]> www.infradead.org Git - users/hch/block.git/commitdiff
vfio/mlx5: Explicitly store page list
authorLeon Romanovsky <leonro@nvidia.com>
Sun, 21 Apr 2024 13:50:40 +0000 (16:50 +0300)
committerLeon Romanovsky <leon@kernel.org>
Thu, 3 Oct 2024 16:05:53 +0000 (19:05 +0300)
As a preparation to removal scatter-gather table and unifying
receive and send list, explicitly store page list.

Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
drivers/vfio/pci/mlx5/cmd.c
drivers/vfio/pci/mlx5/cmd.h

index 1832a6c1f35d3e1f1805f53bad2902d0bb6831d5..34ae3e299a9ed9d6faa929612f6466e1887c0871 100644 (file)
@@ -422,6 +422,7 @@ void mlx5vf_free_data_buffer(struct mlx5_vhca_data_buffer *buf)
        for_each_sgtable_page(&buf->table.sgt, &sg_iter, 0)
                __free_page(sg_page_iter_page(&sg_iter));
        sg_free_append_table(&buf->table);
+       kvfree(buf->page_list);
        kfree(buf);
 }
 
@@ -434,39 +435,33 @@ static int mlx5vf_add_migration_pages(struct mlx5_vhca_data_buffer *buf,
        unsigned int to_fill;
        int ret;
 
-       to_fill = min_t(unsigned int, npages, PAGE_SIZE / sizeof(*page_list));
-       page_list = kvzalloc(to_fill * sizeof(*page_list), GFP_KERNEL_ACCOUNT);
+       to_fill = min_t(unsigned int, npages, PAGE_SIZE / sizeof(*buf->page_list));
+       page_list = kvzalloc(to_fill * sizeof(*buf->page_list), GFP_KERNEL_ACCOUNT);
        if (!page_list)
                return -ENOMEM;
 
+       buf->page_list = page_list;
+
        do {
                filled = alloc_pages_bulk_array(GFP_KERNEL_ACCOUNT, to_fill,
-                                               page_list);
-               if (!filled) {
-                       ret = -ENOMEM;
-                       goto err;
-               }
+                               buf->page_list + buf->npages);
+               if (!filled)
+                       return -ENOMEM;
+
                to_alloc -= filled;
                ret = sg_alloc_append_table_from_pages(
-                       &buf->table, page_list, filled, 0,
+                       &buf->table, buf->page_list + buf->npages, filled, 0,
                        filled << PAGE_SHIFT, UINT_MAX, SG_MAX_SINGLE_ALLOC,
                        GFP_KERNEL_ACCOUNT);
 
                if (ret)
-                       goto err;
+                       return ret;
                buf->npages += filled;
-               /* clean input for another bulk allocation */
-               memset(page_list, 0, filled * sizeof(*page_list));
                to_fill = min_t(unsigned int, to_alloc,
-                               PAGE_SIZE / sizeof(*page_list));
+                               PAGE_SIZE / sizeof(*buf->page_list));
        } while (to_alloc > 0);
 
-       kvfree(page_list);
        return 0;
-
-err:
-       kvfree(page_list);
-       return ret;
 }
 
 struct mlx5_vhca_data_buffer *
index 25dd6ff5459186f81d674e6fc3c013b16b95c773..5b764199db539fd5bd2cb2a907cbef7f8e8fefee 100644 (file)
@@ -53,6 +53,7 @@ struct mlx5_vf_migration_header {
 };
 
 struct mlx5_vhca_data_buffer {
+       struct page **page_list;
        struct sg_append_table table;
        loff_t start_pos;
        u64 length;