diff options
Diffstat (limited to 'kernel/bpf/syscall.c')
-rw-r--r-- | kernel/bpf/syscall.c | 20 |
1 files changed, 18 insertions, 2 deletions
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index bcc97613de768..cda8d00f37622 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -309,7 +309,7 @@ static void *__bpf_map_area_alloc(u64 size, int numa_node, bool mmapable) * __GFP_RETRY_MAYFAIL to avoid such situations. */ - const gfp_t gfp = __GFP_NOWARN | __GFP_ZERO | __GFP_ACCOUNT; + gfp_t gfp = bpf_memcg_flags(__GFP_NOWARN | __GFP_ZERO); unsigned int flags = 0; unsigned long align = 1; void *area; @@ -418,7 +418,8 @@ static void bpf_map_save_memcg(struct bpf_map *map) * So we have to check map->objcg for being NULL each time it's * being used. */ - map->objcg = get_obj_cgroup_from_current(); + if (memcg_bpf_enabled()) + map->objcg = get_obj_cgroup_from_current(); } static void bpf_map_release_memcg(struct bpf_map *map) @@ -464,6 +465,21 @@ void *bpf_map_kzalloc(const struct bpf_map *map, size_t size, gfp_t flags) return ptr; } +void *bpf_map_kvcalloc(struct bpf_map *map, size_t n, size_t size, + gfp_t flags) +{ + struct mem_cgroup *memcg, *old_memcg; + void *ptr; + + memcg = bpf_map_get_memcg(map); + old_memcg = set_active_memcg(memcg); + ptr = kvcalloc(n, size, flags | __GFP_ACCOUNT); + set_active_memcg(old_memcg); + mem_cgroup_put(memcg); + + return ptr; +} + void __percpu *bpf_map_alloc_percpu(const struct bpf_map *map, size_t size, size_t align, gfp_t flags) { |