]> www.infradead.org Git - users/hch/misc.git/commitdiff
bpf: Use try_alloc_pages() to allocate pages for bpf needs.
authorAlexei Starovoitov <ast@kernel.org>
Sat, 22 Feb 2025 02:44:27 +0000 (18:44 -0800)
committerAlexei Starovoitov <ast@kernel.org>
Thu, 27 Feb 2025 17:39:44 +0000 (09:39 -0800)
Use try_alloc_pages() and free_pages_nolock() for BPF needs
when context doesn't allow using normal alloc_pages.
This is a prerequisite for further work.

Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Link: https://lore.kernel.org/r/20250222024427.30294-7-alexei.starovoitov@gmail.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
include/linux/bpf.h
kernel/bpf/arena.c
kernel/bpf/syscall.c

index f3f50e29d63929acaf12c81f8356173f1f5e154b..e1838a34181731b4e0d29e778e1661f7c8882a81 100644 (file)
@@ -2348,7 +2348,7 @@ int  generic_map_delete_batch(struct bpf_map *map,
 struct bpf_map *bpf_map_get_curr_or_next(u32 *id);
 struct bpf_prog *bpf_prog_get_curr_or_next(u32 *id);
 
-int bpf_map_alloc_pages(const struct bpf_map *map, gfp_t gfp, int nid,
+int bpf_map_alloc_pages(const struct bpf_map *map, int nid,
                        unsigned long nr_pages, struct page **page_array);
 #ifdef CONFIG_MEMCG
 void *bpf_map_kmalloc_node(const struct bpf_map *map, size_t size, gfp_t flags,
index 870aeb51d70add0b1fb238a0d91acd7baea171af..aa43d6c34c46d7100af8d12584aaede1df336180 100644 (file)
@@ -287,7 +287,7 @@ static vm_fault_t arena_vm_fault(struct vm_fault *vmf)
                return VM_FAULT_SIGSEGV;
 
        /* Account into memcg of the process that created bpf_arena */
-       ret = bpf_map_alloc_pages(map, GFP_KERNEL | __GFP_ZERO, NUMA_NO_NODE, 1, &page);
+       ret = bpf_map_alloc_pages(map, NUMA_NO_NODE, 1, &page);
        if (ret) {
                range_tree_set(&arena->rt, vmf->pgoff, 1);
                return VM_FAULT_SIGSEGV;
@@ -465,8 +465,7 @@ static long arena_alloc_pages(struct bpf_arena *arena, long uaddr, long page_cnt
        if (ret)
                goto out_free_pages;
 
-       ret = bpf_map_alloc_pages(&arena->map, GFP_KERNEL | __GFP_ZERO,
-                                 node_id, page_cnt, pages);
+       ret = bpf_map_alloc_pages(&arena->map, node_id, page_cnt, pages);
        if (ret)
                goto out;
 
index c420edbfb7c874ddc5f0ba97d120b16c4ee81c02..a7af8d0185d0af860e51a68a602ff2cdc0f99ddc 100644 (file)
@@ -569,7 +569,24 @@ static void bpf_map_release_memcg(struct bpf_map *map)
 }
 #endif
 
-int bpf_map_alloc_pages(const struct bpf_map *map, gfp_t gfp, int nid,
+static bool can_alloc_pages(void)
+{
+       return preempt_count() == 0 && !irqs_disabled() &&
+               !IS_ENABLED(CONFIG_PREEMPT_RT);
+}
+
+static struct page *__bpf_alloc_page(int nid)
+{
+       if (!can_alloc_pages())
+               return try_alloc_pages(nid, 0);
+
+       return alloc_pages_node(nid,
+                               GFP_KERNEL | __GFP_ZERO | __GFP_ACCOUNT
+                               | __GFP_NOWARN,
+                               0);
+}
+
+int bpf_map_alloc_pages(const struct bpf_map *map, int nid,
                        unsigned long nr_pages, struct page **pages)
 {
        unsigned long i, j;
@@ -582,14 +599,14 @@ int bpf_map_alloc_pages(const struct bpf_map *map, gfp_t gfp, int nid,
        old_memcg = set_active_memcg(memcg);
 #endif
        for (i = 0; i < nr_pages; i++) {
-               pg = alloc_pages_node(nid, gfp | __GFP_ACCOUNT, 0);
+               pg = __bpf_alloc_page(nid);
 
                if (pg) {
                        pages[i] = pg;
                        continue;
                }
                for (j = 0; j < i; j++)
-                       __free_page(pages[j]);
+                       free_pages_nolock(pages[j], 0);
                ret = -ENOMEM;
                break;
        }