]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
mm: alloc_pages_bulk: rename API
authorLuiz Capitulino <luizcap@redhat.com>
Mon, 23 Dec 2024 22:00:38 +0000 (17:00 -0500)
committerAndrew Morton <akpm@linux-foundation.org>
Sun, 26 Jan 2025 04:22:31 +0000 (20:22 -0800)
The previous commit removed the page_list argument from
alloc_pages_bulk_noprof() along with the alloc_pages_bulk_list() function.

Now that only the *_array() flavour of the API remains, we can do the
following renaming (along with the _noprof() ones):

  alloc_pages_bulk_array -> alloc_pages_bulk
  alloc_pages_bulk_array_mempolicy -> alloc_pages_bulk_mempolicy
  alloc_pages_bulk_array_node -> alloc_pages_bulk_node

Link: https://lkml.kernel.org/r/275a3bbc0be20fbe9002297d60045e67ab3d4ada.1734991165.git.luizcap@redhat.com
Signed-off-by: Luiz Capitulino <luizcap@redhat.com>
Acked-by: David Hildenbrand <david@redhat.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Mel Gorman <mgorman@techsingularity.net>
Cc: Yunsheng Lin <linyunsheng@huawei.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
17 files changed:
drivers/staging/media/atomisp/pci/hmm/hmm_bo.c
drivers/vfio/pci/mlx5/cmd.c
drivers/vfio/pci/virtio/migrate.c
fs/btrfs/extent_io.c
fs/erofs/zutil.c
fs/splice.c
fs/xfs/xfs_buf.c
include/linux/gfp.h
kernel/bpf/arena.c
lib/alloc_tag.c
lib/kunit_iov_iter.c
lib/test_vmalloc.c
mm/mempolicy.c
mm/vmalloc.c
net/core/page_pool.c
net/sunrpc/svc.c
net/sunrpc/svc_xprt.c

index 07ed33464d711672a23a2783640767a1e6d46dd4..224ca8d42721a68a01c148f20194cc6a173213ab 100644 (file)
@@ -624,10 +624,10 @@ static int alloc_private_pages(struct hmm_buffer_object *bo)
        const gfp_t gfp = __GFP_NOWARN | __GFP_RECLAIM | __GFP_FS;
        int ret;
 
-       ret = alloc_pages_bulk_array(gfp, bo->pgnr, bo->pages);
+       ret = alloc_pages_bulk(gfp, bo->pgnr, bo->pages);
        if (ret != bo->pgnr) {
                free_pages_bulk_array(ret, bo->pages);
-               dev_err(atomisp_dev, "alloc_pages_bulk_array() failed\n");
+               dev_err(atomisp_dev, "alloc_pages_bulk() failed\n");
                return -ENOMEM;
        }
 
index eb7387ee6ebd10733362c181492f0497a015f7d4..11eda6b207f13fe895e3693f6acd2d146d455f2b 100644 (file)
@@ -408,7 +408,7 @@ void mlx5vf_free_data_buffer(struct mlx5_vhca_data_buffer *buf)
                                  buf->dma_dir, 0);
        }
 
-       /* Undo alloc_pages_bulk_array() */
+       /* Undo alloc_pages_bulk() */
        for_each_sgtable_page(&buf->table.sgt, &sg_iter, 0)
                __free_page(sg_page_iter_page(&sg_iter));
        sg_free_append_table(&buf->table);
@@ -431,8 +431,8 @@ static int mlx5vf_add_migration_pages(struct mlx5_vhca_data_buffer *buf,
                return -ENOMEM;
 
        do {
-               filled = alloc_pages_bulk_array(GFP_KERNEL_ACCOUNT, to_fill,
-                                               page_list);
+               filled = alloc_pages_bulk(GFP_KERNEL_ACCOUNT, to_fill,
+                                         page_list);
                if (!filled) {
                        ret = -ENOMEM;
                        goto err;
@@ -1342,7 +1342,7 @@ static void free_recv_pages(struct mlx5_vhca_recv_buf *recv_buf)
 {
        int i;
 
-       /* Undo alloc_pages_bulk_array() */
+       /* Undo alloc_pages_bulk() */
        for (i = 0; i < recv_buf->npages; i++)
                __free_page(recv_buf->page_list[i]);
 
@@ -1361,9 +1361,9 @@ static int alloc_recv_pages(struct mlx5_vhca_recv_buf *recv_buf,
                return -ENOMEM;
 
        for (;;) {
-               filled = alloc_pages_bulk_array(GFP_KERNEL_ACCOUNT,
-                                               npages - done,
-                                               recv_buf->page_list + done);
+               filled = alloc_pages_bulk(GFP_KERNEL_ACCOUNT,
+                                         npages - done,
+                                         recv_buf->page_list + done);
                if (!filled)
                        goto err;
 
index ee54f4c178577191aa3d5902ea98cee9fa29bb37..ba92bb4e9af94e9e96d1574464809607fe193382 100644 (file)
@@ -77,8 +77,8 @@ static int virtiovf_add_migration_pages(struct virtiovf_data_buffer *buf,
                return -ENOMEM;
 
        do {
-               filled = alloc_pages_bulk_array(GFP_KERNEL_ACCOUNT, to_fill,
-                                               page_list);
+               filled = alloc_pages_bulk(GFP_KERNEL_ACCOUNT, to_fill,
+                                         page_list);
                if (!filled) {
                        ret = -ENOMEM;
                        goto err;
@@ -112,7 +112,7 @@ static void virtiovf_free_data_buffer(struct virtiovf_data_buffer *buf)
 {
        struct sg_page_iter sg_iter;
 
-       /* Undo alloc_pages_bulk_array() */
+       /* Undo alloc_pages_bulk() */
        for_each_sgtable_page(&buf->table.sgt, &sg_iter, 0)
                __free_page(sg_page_iter_page(&sg_iter));
        sg_free_append_table(&buf->table);
index b923d0cec61c73274334d720ae98160a912ce973..d70e9461fea861857c174b57223d43842e50607f 100644 (file)
@@ -632,7 +632,7 @@ int btrfs_alloc_page_array(unsigned int nr_pages, struct page **page_array,
        for (allocated = 0; allocated < nr_pages;) {
                unsigned int last = allocated;
 
-               allocated = alloc_pages_bulk_array(gfp, nr_pages, page_array);
+               allocated = alloc_pages_bulk(gfp, nr_pages, page_array);
                if (unlikely(allocated == last)) {
                        /* No progress, fail and do cleanup. */
                        for (int i = 0; i < allocated; i++) {
index 0dd65cefce33e22acb7a23e6bc91413fceeb0c7b..9c5aa9d536821835a5c5f2b03bde41d101a0e9f3 100644 (file)
@@ -87,8 +87,8 @@ int z_erofs_gbuf_growsize(unsigned int nrpages)
                        tmp_pages[j] = gbuf->pages[j];
                do {
                        last = j;
-                       j = alloc_pages_bulk_array(GFP_KERNEL, nrpages,
-                                                  tmp_pages);
+                       j = alloc_pages_bulk(GFP_KERNEL, nrpages,
+                                            tmp_pages);
                        if (last == j)
                                goto out;
                } while (j != nrpages);
index 2898fa1e9e63804c1a5bb2f10ff6746edbc4098a..28cfa63aa236473255c6bdebbfe7cb18ae176aae 100644 (file)
@@ -342,7 +342,7 @@ ssize_t copy_splice_read(struct file *in, loff_t *ppos,
                return -ENOMEM;
 
        pages = (struct page **)(bv + npages);
-       npages = alloc_pages_bulk_array(GFP_USER, npages, pages);
+       npages = alloc_pages_bulk(GFP_USER, npages, pages);
        if (!npages) {
                kfree(bv);
                return -ENOMEM;
index aa63b8efd7822856ba85407a810ad7963a551a9c..82db3ab0e8b40fa00d1a1ca58a624b66148fd981 100644 (file)
@@ -395,8 +395,8 @@ xfs_buf_alloc_pages(
        for (;;) {
                long    last = filled;
 
-               filled = alloc_pages_bulk_array(gfp_mask, bp->b_page_count,
-                                               bp->b_pages);
+               filled = alloc_pages_bulk(gfp_mask, bp->b_page_count,
+                                         bp->b_pages);
                if (filled == bp->b_page_count) {
                        XFS_STATS_INC(bp->b_mount, xb_page_found);
                        break;
index f8b33c5e7a1480ca244c7fbd9961dc7d3c5f93f8..6bb1a5a7a4ae3392c1cd39cb79271e05512adbeb 100644 (file)
@@ -215,18 +215,18 @@ unsigned long alloc_pages_bulk_noprof(gfp_t gfp, int preferred_nid,
                                struct page **page_array);
 #define __alloc_pages_bulk(...)                        alloc_hooks(alloc_pages_bulk_noprof(__VA_ARGS__))
 
-unsigned long alloc_pages_bulk_array_mempolicy_noprof(gfp_t gfp,
+unsigned long alloc_pages_bulk_mempolicy_noprof(gfp_t gfp,
                                unsigned long nr_pages,
                                struct page **page_array);
-#define  alloc_pages_bulk_array_mempolicy(...)                         \
-       alloc_hooks(alloc_pages_bulk_array_mempolicy_noprof(__VA_ARGS__))
+#define  alloc_pages_bulk_mempolicy(...)                               \
+       alloc_hooks(alloc_pages_bulk_mempolicy_noprof(__VA_ARGS__))
 
 /* Bulk allocate order-0 pages */
-#define alloc_pages_bulk_array(_gfp, _nr_pages, _page_array)           \
+#define alloc_pages_bulk(_gfp, _nr_pages, _page_array)         \
        __alloc_pages_bulk(_gfp, numa_mem_id(), NULL, _nr_pages, _page_array)
 
 static inline unsigned long
-alloc_pages_bulk_array_node_noprof(gfp_t gfp, int nid, unsigned long nr_pages,
+alloc_pages_bulk_node_noprof(gfp_t gfp, int nid, unsigned long nr_pages,
                                   struct page **page_array)
 {
        if (nid == NUMA_NO_NODE)
@@ -235,8 +235,8 @@ alloc_pages_bulk_array_node_noprof(gfp_t gfp, int nid, unsigned long nr_pages,
        return alloc_pages_bulk_noprof(gfp, nid, NULL, nr_pages, page_array);
 }
 
-#define alloc_pages_bulk_array_node(...)                               \
-       alloc_hooks(alloc_pages_bulk_array_node_noprof(__VA_ARGS__))
+#define alloc_pages_bulk_node(...)                             \
+       alloc_hooks(alloc_pages_bulk_node_noprof(__VA_ARGS__))
 
 static inline void warn_if_node_offline(int this_node, gfp_t gfp_mask)
 {
index 945a5680f6a54800be404c173a0a32ee0cc6dca6..9927cd4c9e0ea10e3ef820ac2af8e6f1a2c9b5d3 100644 (file)
@@ -443,7 +443,7 @@ static long arena_alloc_pages(struct bpf_arena *arena, long uaddr, long page_cnt
                        return 0;
        }
 
-       /* zeroing is needed, since alloc_pages_bulk_array() only fills in non-zero entries */
+       /* zeroing is needed, since alloc_pages_bulk() only fills in non-zero entries */
        pages = kvcalloc(page_cnt, sizeof(struct page *), GFP_KERNEL);
        if (!pages)
                return 0;
index 7dcebf118a3e64ac50a0ab9071623daca9a8aee8..4bb778be4476434e6316653391c03e0fb809329f 100644 (file)
@@ -420,8 +420,8 @@ static int vm_module_tags_populate(void)
                unsigned long nr;
 
                more_pages = ALIGN(new_end - phys_end, PAGE_SIZE) >> PAGE_SHIFT;
-               nr = alloc_pages_bulk_array_node(GFP_KERNEL | __GFP_NOWARN,
-                                                NUMA_NO_NODE, more_pages, next_page);
+               nr = alloc_pages_bulk_node(GFP_KERNEL | __GFP_NOWARN,
+                                          NUMA_NO_NODE, more_pages, next_page);
                if (nr < more_pages ||
                    vmap_pages_range(phys_end, phys_end + (nr << PAGE_SHIFT), PAGE_KERNEL,
                                     next_page, PAGE_SHIFT) < 0) {
index 13e15687675a84b7bf92a08513d8f05f6b3ea04e..830bf3eca4c2e7f0acfd69c5789d5479de07e394 100644 (file)
@@ -57,7 +57,7 @@ static void *__init iov_kunit_create_buffer(struct kunit *test,
         KUNIT_ASSERT_NOT_ERR_OR_NULL(test, pages);
        *ppages = pages;
 
-       got = alloc_pages_bulk_array(GFP_KERNEL, npages, pages);
+       got = alloc_pages_bulk(GFP_KERNEL, npages, pages);
        if (got != npages) {
                release_pages(pages, got);
                KUNIT_ASSERT_EQ(test, got, npages);
index 4ddf769861ff769fd891b58b2547691ec9c82cc3..f585949ff696eec107d485d03d64f5267d54b313 100644 (file)
@@ -373,7 +373,7 @@ vm_map_ram_test(void)
        if (!pages)
                return -1;
 
-       nr_allocated = alloc_pages_bulk_array(GFP_KERNEL, map_nr_pages, pages);
+       nr_allocated = alloc_pages_bulk(GFP_KERNEL, map_nr_pages, pages);
        if (nr_allocated != map_nr_pages)
                goto cleanup;
 
index 0da6cf950f7b90611a47cfc61427defc47bd4f68..f83b73236ffe7285951bab5a530c2db3dac6986b 100644 (file)
@@ -2372,7 +2372,7 @@ struct folio *folio_alloc_noprof(gfp_t gfp, unsigned int order)
 }
 EXPORT_SYMBOL(folio_alloc_noprof);
 
-static unsigned long alloc_pages_bulk_array_interleave(gfp_t gfp,
+static unsigned long alloc_pages_bulk_interleave(gfp_t gfp,
                struct mempolicy *pol, unsigned long nr_pages,
                struct page **page_array)
 {
@@ -2407,7 +2407,7 @@ static unsigned long alloc_pages_bulk_array_interleave(gfp_t gfp,
        return total_allocated;
 }
 
-static unsigned long alloc_pages_bulk_array_weighted_interleave(gfp_t gfp,
+static unsigned long alloc_pages_bulk_weighted_interleave(gfp_t gfp,
                struct mempolicy *pol, unsigned long nr_pages,
                struct page **page_array)
 {
@@ -2522,7 +2522,7 @@ static unsigned long alloc_pages_bulk_array_weighted_interleave(gfp_t gfp,
        return total_allocated;
 }
 
-static unsigned long alloc_pages_bulk_array_preferred_many(gfp_t gfp, int nid,
+static unsigned long alloc_pages_bulk_preferred_many(gfp_t gfp, int nid,
                struct mempolicy *pol, unsigned long nr_pages,
                struct page **page_array)
 {
@@ -2548,7 +2548,7 @@ static unsigned long alloc_pages_bulk_array_preferred_many(gfp_t gfp, int nid,
  * It can accelerate memory allocation especially interleaving
  * allocate memory.
  */
-unsigned long alloc_pages_bulk_array_mempolicy_noprof(gfp_t gfp,
+unsigned long alloc_pages_bulk_mempolicy_noprof(gfp_t gfp,
                unsigned long nr_pages, struct page **page_array)
 {
        struct mempolicy *pol = &default_policy;
@@ -2559,15 +2559,15 @@ unsigned long alloc_pages_bulk_array_mempolicy_noprof(gfp_t gfp,
                pol = get_task_policy(current);
 
        if (pol->mode == MPOL_INTERLEAVE)
-               return alloc_pages_bulk_array_interleave(gfp, pol,
+               return alloc_pages_bulk_interleave(gfp, pol,
                                                         nr_pages, page_array);
 
        if (pol->mode == MPOL_WEIGHTED_INTERLEAVE)
-               return alloc_pages_bulk_array_weighted_interleave(
+               return alloc_pages_bulk_weighted_interleave(
                                  gfp, pol, nr_pages, page_array);
 
        if (pol->mode == MPOL_PREFERRED_MANY)
-               return alloc_pages_bulk_array_preferred_many(gfp,
+               return alloc_pages_bulk_preferred_many(gfp,
                                numa_node_id(), pol, nr_pages, page_array);
 
        nid = numa_node_id();
index 5c88d0e90c209ab80ed9479c1ea726743fbb565f..a6e7acebe9adf5e6c8abd52dcf7d02a6a1bc3030 100644 (file)
@@ -3562,11 +3562,11 @@ vm_area_alloc_pages(gfp_t gfp, int nid,
                         * but mempolicy wants to alloc memory by interleaving.
                         */
                        if (IS_ENABLED(CONFIG_NUMA) && nid == NUMA_NO_NODE)
-                               nr = alloc_pages_bulk_array_mempolicy_noprof(gfp,
+                               nr = alloc_pages_bulk_mempolicy_noprof(gfp,
                                                        nr_pages_request,
                                                        pages + nr_allocated);
                        else
-                               nr = alloc_pages_bulk_array_node_noprof(gfp, nid,
+                               nr = alloc_pages_bulk_node_noprof(gfp, nid,
                                                        nr_pages_request,
                                                        pages + nr_allocated);
 
index f89cf93f6eb45a95f0a47d1592dde44caa78e711..8a91c1972dc50b65a5a35ba411be6530b99c73d8 100644 (file)
@@ -532,12 +532,11 @@ static noinline netmem_ref __page_pool_alloc_pages_slow(struct page_pool *pool,
        if (unlikely(pool->alloc.count > 0))
                return pool->alloc.cache[--pool->alloc.count];
 
-       /* Mark empty alloc.cache slots "empty" for alloc_pages_bulk_array */
+       /* Mark empty alloc.cache slots "empty" for alloc_pages_bulk */
        memset(&pool->alloc.cache, 0, sizeof(void *) * bulk);
 
-       nr_pages = alloc_pages_bulk_array_node(gfp,
-                                              pool->p.nid, bulk,
-                                              (struct page **)pool->alloc.cache);
+       nr_pages = alloc_pages_bulk_node(gfp, pool->p.nid, bulk,
+                                        (struct page **)pool->alloc.cache);
        if (unlikely(!nr_pages))
                return 0;
 
index 79879b7d39cb4f267b0d7ef54370bdaae83783d5..e7f9c295d13c03bf28a5eeec839fd85e24f5525f 100644 (file)
@@ -651,8 +651,8 @@ svc_init_buffer(struct svc_rqst *rqstp, unsigned int size, int node)
        if (pages > RPCSVC_MAXPAGES)
                pages = RPCSVC_MAXPAGES;
 
-       ret = alloc_pages_bulk_array_node(GFP_KERNEL, node, pages,
-                                         rqstp->rq_pages);
+       ret = alloc_pages_bulk_node(GFP_KERNEL, node, pages,
+                                   rqstp->rq_pages);
        return ret == pages;
 }
 
index 43c57124de52f4fdacbb80e5c9ed9b6d6f4457ca..aebc0d8ddff5c7083c23f862340dd9b565bed22b 100644 (file)
@@ -671,8 +671,7 @@ static bool svc_alloc_arg(struct svc_rqst *rqstp)
        }
 
        for (filled = 0; filled < pages; filled = ret) {
-               ret = alloc_pages_bulk_array(GFP_KERNEL, pages,
-                                            rqstp->rq_pages);
+               ret = alloc_pages_bulk(GFP_KERNEL, pages, rqstp->rq_pages);
                if (ret > filled)
                        /* Made progress, don't sleep yet */
                        continue;