size_t size, int access)
{
struct ib_umem *umem;
- struct page **page_list;
unsigned long lock_limit;
unsigned long new_pinned;
- unsigned long cur_base;
unsigned long dma_attr = 0;
struct mm_struct *mm;
unsigned long npages;
- int pinned, ret;
+ int ret;
unsigned int gup_flags = FOLL_LONGTERM;
/*
umem->owning_mm = mm = current->mm;
mmgrab(mm);
- page_list = (struct page **) __get_free_page(GFP_KERNEL);
- if (!page_list) {
- ret = -ENOMEM;
- goto umem_kfree;
- }
-
npages = ib_umem_num_pages(umem);
if (npages == 0 || npages > UINT_MAX) {
ret = -EINVAL;
goto out;
}
- cur_base = addr & PAGE_MASK;
-
- if (umem->writable)
- gup_flags |= FOLL_WRITE;
-
- while (npages) {
- cond_resched();
- pinned = pin_user_pages_fast(cur_base,
- min_t(unsigned long, npages,
- PAGE_SIZE /
- sizeof(struct page *)),
- gup_flags, page_list);
- if (pinned < 0) {
- ret = pinned;
- goto umem_release;
- }
-
- cur_base += pinned * PAGE_SIZE;
- npages -= pinned;
- ret = sg_alloc_append_table_from_pages(
- &umem->sgt_append, page_list, pinned, 0,
- pinned << PAGE_SHIFT, ib_dma_max_seg_size(device),
- npages, GFP_KERNEL);
- if (ret) {
- unpin_user_pages_dirty_lock(page_list, pinned, 0);
- goto umem_release;
- }
- }
+ if (!umem->writable)
+ gup_flags |= FOLL_FORCE;
+
+ ret = get_user_sgtable(&umem->sgt_append, addr & PAGE_MASK,
+ ib_dma_max_seg_size(device), npages, gup_flags);
+ if (ret < 0)
+ goto umem_release;
if (access & IB_ACCESS_RELAXED_ORDERING)
dma_attr |= DMA_ATTR_WEAK_ORDERING;
__ib_umem_release(device, umem, 0);
atomic64_sub(ib_umem_num_pages(umem), &mm->pinned_vm);
out:
- free_page((unsigned long) page_list);
-umem_kfree:
if (ret) {
mmdrop(umem->owning_mm);
kfree(umem);
size, UINT_MAX, gfp_mask);
}
+int get_user_sgtable(struct sg_append_table *sgt_append, unsigned long uaddr,
+ size_t max_seg_size, unsigned long npages,
+ unsigned int gup_flags);
+
#ifdef CONFIG_SGL_ALLOC
struct scatterlist *sgl_alloc_order(unsigned long long length,
unsigned int order, bool chainable,
}
EXPORT_SYMBOL(sg_alloc_append_table_from_pages);
+int get_user_sgtable(struct sg_append_table *sgt_append, unsigned long uaddr,
+ size_t max_seg_size, unsigned long npages,
+ unsigned int gup_flags)
+{
+ struct page **page_list = (struct page **) __get_free_page(GFP_KERNEL);
+
+ if (!page_list)
+ return -ENOMEM;
+
+ while (npages) {
+ int ret, pinned;
+
+ pinned = pin_user_pages_fast(uaddr,
+ min_t(unsigned long, npages,
+ PAGE_SIZE / sizeof(struct page *)),
+ gup_flags, page_list);
+ if (pinned < 0)
+ return pinned;
+
+ uaddr += pinned * PAGE_SIZE;
+ npages -= pinned;
+ ret = sg_alloc_append_table_from_pages(sgt_append, page_list,
+ pinned, 0, pinned << PAGE_SHIFT, max_seg_size,
+ npages, GFP_KERNEL);
+ if (ret) {
+ unpin_user_pages_dirty_lock(page_list, pinned, 0);
+ return ret;
+ }
+ cond_resched();
+ }
+
+ free_page((unsigned long) page_list);
+ return 0;
+}
+EXPORT_SYMBOL(get_user_sgtable);
+
/**
* sg_alloc_table_from_pages_segment - Allocate and initialize an sg table from
* an array of pages and given maximum