]> www.infradead.org Git - users/willy/pagecache.git/commitdiff
scatterlist: Extract get_user_sgtable() from ib_umem_get()
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Sat, 2 Jul 2022 19:14:01 +0000 (15:14 -0400)
committerMatthew Wilcox (Oracle) <willy@infradead.org>
Tue, 3 Jan 2023 14:00:33 +0000 (09:00 -0500)
This is a nice simplification on its own, as well as being a preparatory
step for the phyr.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
drivers/infiniband/core/umem.c
include/linux/scatterlist.h
lib/scatterlist.c

index 755a9c57db6f3a7e8809a5fd75b9a5182ee3a064..9902d7fe7ed379d0d4a84978394550ddaa30de1d 100644 (file)
@@ -148,14 +148,12 @@ struct ib_umem *ib_umem_get(struct ib_device *device, unsigned long addr,
                            size_t size, int access)
 {
        struct ib_umem *umem;
-       struct page **page_list;
        unsigned long lock_limit;
        unsigned long new_pinned;
-       unsigned long cur_base;
        unsigned long dma_attr = 0;
        struct mm_struct *mm;
        unsigned long npages;
-       int pinned, ret;
+       int ret;
        unsigned int gup_flags = FOLL_LONGTERM;
 
        /*
@@ -187,12 +185,6 @@ struct ib_umem *ib_umem_get(struct ib_device *device, unsigned long addr,
        umem->owning_mm = mm = current->mm;
        mmgrab(mm);
 
-       page_list = (struct page **) __get_free_page(GFP_KERNEL);
-       if (!page_list) {
-               ret = -ENOMEM;
-               goto umem_kfree;
-       }
-
        npages = ib_umem_num_pages(umem);
        if (npages == 0 || npages > UINT_MAX) {
                ret = -EINVAL;
@@ -208,34 +200,13 @@ struct ib_umem *ib_umem_get(struct ib_device *device, unsigned long addr,
                goto out;
        }
 
-       cur_base = addr & PAGE_MASK;
-
-       if (umem->writable)
-               gup_flags |= FOLL_WRITE;
-
-       while (npages) {
-               cond_resched();
-               pinned = pin_user_pages_fast(cur_base,
-                                         min_t(unsigned long, npages,
-                                               PAGE_SIZE /
-                                               sizeof(struct page *)),
-                                         gup_flags, page_list);
-               if (pinned < 0) {
-                       ret = pinned;
-                       goto umem_release;
-               }
-
-               cur_base += pinned * PAGE_SIZE;
-               npages -= pinned;
-               ret = sg_alloc_append_table_from_pages(
-                       &umem->sgt_append, page_list, pinned, 0,
-                       pinned << PAGE_SHIFT, ib_dma_max_seg_size(device),
-                       npages, GFP_KERNEL);
-               if (ret) {
-                       unpin_user_pages_dirty_lock(page_list, pinned, 0);
-                       goto umem_release;
-               }
-       }
+       if (!umem->writable)
+               gup_flags |= FOLL_FORCE;
+
+       ret = get_user_sgtable(&umem->sgt_append, addr & PAGE_MASK,
+                       ib_dma_max_seg_size(device), npages, gup_flags);
+       if (ret < 0)
+               goto umem_release;
 
        if (access & IB_ACCESS_RELAXED_ORDERING)
                dma_attr |= DMA_ATTR_WEAK_ORDERING;
@@ -250,8 +221,6 @@ umem_release:
        __ib_umem_release(device, umem, 0);
        atomic64_sub(ib_umem_num_pages(umem), &mm->pinned_vm);
 out:
-       free_page((unsigned long) page_list);
-umem_kfree:
        if (ret) {
                mmdrop(umem->owning_mm);
                kfree(umem);
index 375a5e90d86ac09be142c1debccc4e0461e38c05..2b59eac670f9542aab5e4c064bb0ad5afc3dfe26 100644 (file)
@@ -418,6 +418,10 @@ static inline int sg_alloc_table_from_pages(struct sg_table *sgt,
                                                 size, UINT_MAX, gfp_mask);
 }
 
+int get_user_sgtable(struct sg_append_table *sgt_append, unsigned long uaddr,
+               size_t max_seg_size, unsigned long npages,
+               unsigned int gup_flags);
+
 #ifdef CONFIG_SGL_ALLOC
 struct scatterlist *sgl_alloc_order(unsigned long long length,
                                    unsigned int order, bool chainable,
index a0ad2a7959b5d24d7e892fc69aabed60686ec782..3126d545f1aca172799b552880273cecd10606e9 100644 (file)
@@ -544,6 +544,42 @@ out:
 }
 EXPORT_SYMBOL(sg_alloc_append_table_from_pages);
 
+int get_user_sgtable(struct sg_append_table *sgt_append, unsigned long uaddr,
+               size_t max_seg_size, unsigned long npages,
+               unsigned int gup_flags)
+{
+       struct page **page_list = (struct page **) __get_free_page(GFP_KERNEL);
+
+       if (!page_list)
+               return -ENOMEM;
+
+       while (npages) {
+               int ret, pinned;
+
+               pinned = pin_user_pages_fast(uaddr,
+                               min_t(unsigned long, npages,
+                                     PAGE_SIZE / sizeof(struct page *)),
+                               gup_flags, page_list);
+               if (pinned < 0)
+                       return pinned;
+
+               uaddr += pinned * PAGE_SIZE;
+               npages -= pinned;
+               ret = sg_alloc_append_table_from_pages(sgt_append, page_list,
+                               pinned, 0, pinned << PAGE_SHIFT, max_seg_size,
+                               npages, GFP_KERNEL);
+               if (ret) {
+                       unpin_user_pages_dirty_lock(page_list, pinned, 0);
+                       return ret;
+               }
+               cond_resched();
+       }
+
+       free_page((unsigned long) page_list);
+       return 0;
+}
+EXPORT_SYMBOL(get_user_sgtable);
+
 /**
  * sg_alloc_table_from_pages_segment - Allocate and initialize an sg table from
  *                                     an array of pages and given maximum