/* Acquire the mm page semaphore. */
        down_read(¤t->mm->mmap_sem);
 
-       err = get_user_pages(current,
-                            current->mm,
-                            (unsigned long int)(oper.indata + prev_ix),
+       err = get_user_pages((unsigned long int)(oper.indata + prev_ix),
                             noinpages,
                             0,  /* read access only for in data */
                             0, /* no force */
        }
        noinpages = err;
        if (oper.do_cipher){
-               err = get_user_pages(current,
-                                    current->mm,
-                                    (unsigned long int)oper.cipher_outdata,
+               err = get_user_pages((unsigned long int)oper.cipher_outdata,
                                     nooutpages,
                                     1, /* write access for out data */
                                     0, /* no force */
 
        u64 virt_addr=simple_strtoull(buf, NULL, 16);
        int ret;
 
-        ret = get_user_pages(current, current->mm, virt_addr,
-                        1, VM_READ, 0, NULL, NULL);
+       ret = get_user_pages(virt_addr, 1, VM_READ, 0, NULL, NULL);
        if (ret<=0) {
 #ifdef ERR_INJ_DEBUG
                printk("Virtual address %lx is not existing.\n",virt_addr);
 
        start += nr << PAGE_SHIFT;
        pages += nr;
 
-       ret = get_user_pages_unlocked(current, mm, start,
-                                     (end - start) >> PAGE_SHIFT,
+       ret = get_user_pages_unlocked(start, (end - start) >> PAGE_SHIFT,
                                      write, 0, pages);
 
        /* Have to be a bit careful with return values */
 
 int get_user_pages_fast(unsigned long start, int nr_pages, int write,
                        struct page **pages)
 {
-       struct mm_struct *mm = current->mm;
        int nr, ret;
 
        might_sleep();
        /* Try to get the remaining pages with get_user_pages */
        start += nr << PAGE_SHIFT;
        pages += nr;
-       ret = get_user_pages_unlocked(current, mm, start,
-                            nr_pages - nr, write, 0, pages);
+       ret = get_user_pages_unlocked(start, nr_pages - nr, write, 0, pages);
        /* Have to be a bit careful with return values */
        if (nr > 0)
                ret = (ret < 0) ? nr : ret + nr;
 
                start += nr << PAGE_SHIFT;
                pages += nr;
 
-               ret = get_user_pages_unlocked(current, mm, start,
+               ret = get_user_pages_unlocked(start,
                        (end - start) >> PAGE_SHIFT, write, 0, pages);
 
                /* Have to be a bit careful with return values */
 
                start += nr << PAGE_SHIFT;
                pages += nr;
 
-               ret = get_user_pages_unlocked(current, mm, start,
+               ret = get_user_pages_unlocked(start,
                        (end - start) >> PAGE_SHIFT, write, 0, pages);
 
                /* Have to be a bit careful with return values */
 
                start += nr << PAGE_SHIFT;
                pages += nr;
 
-               ret = get_user_pages_unlocked(current, mm, start,
+               ret = get_user_pages_unlocked(start,
                                              (end - start) >> PAGE_SHIFT,
                                              write, 0, pages);
 
 
        int nr_pages = 1;
        int force = 0;
 
-       gup_ret = get_user_pages(current, current->mm, (unsigned long)addr,
-                                nr_pages, write, force, NULL, NULL);
+       gup_ret = get_user_pages((unsigned long)addr, nr_pages, write,
+                       force, NULL, NULL);
        /*
         * get_user_pages() returns number of pages gotten.
         * 0 means we failed to fault in and get anything,
 
                uint64_t userptr = gtt->userptr + pinned * PAGE_SIZE;
                struct page **pages = ttm->pages + pinned;
 
-               r = get_user_pages(current, current->mm, userptr, num_pages,
-                                  write, 0, pages, NULL);
+               r = get_user_pages(userptr, num_pages, write, 0, pages, NULL);
                if (r < 0)
                        goto release_pages;
 
 
                uint64_t userptr = gtt->userptr + pinned * PAGE_SIZE;
                struct page **pages = ttm->pages + pinned;
 
-               r = get_user_pages(current, current->mm, userptr, num_pages,
-                                  write, 0, pages, NULL);
+               r = get_user_pages(userptr, num_pages, write, 0, pages, NULL);
                if (r < 0)
                        goto release_pages;
 
 
        if (NULL == vsg->pages)
                return -ENOMEM;
        down_read(¤t->mm->mmap_sem);
-       ret = get_user_pages(current, current->mm,
-                            (unsigned long)xfer->mem_addr,
+       ret = get_user_pages((unsigned long)xfer->mem_addr,
                             vsg->num_pages,
                             (vsg->direction == DMA_FROM_DEVICE),
                             0, vsg->pages, NULL);
 
        sg_list_start = umem->sg_head.sgl;
 
        while (npages) {
-               ret = get_user_pages(current, current->mm, cur_base,
+               ret = get_user_pages(cur_base,
                                     min_t(unsigned long, npages,
                                           PAGE_SIZE / sizeof (struct page *)),
                                     1, !umem->writable, page_list, vma_list);
 
                goto out;
        }
 
-       ret = get_user_pages(current, current->mm, uaddr & PAGE_MASK, 1, 1, 0,
-                            pages, NULL);
+       ret = get_user_pages(uaddr & PAGE_MASK, 1, 1, 0, pages, NULL);
        if (ret < 0)
                goto out;
 
 
        }
 
        for (got = 0; got < num_pages; got += ret) {
-               ret = get_user_pages(current, current->mm,
-                                    start_page + got * PAGE_SIZE,
+               ret = get_user_pages(start_page + got * PAGE_SIZE,
                                     num_pages - got, 1, 1,
                                     p + got, NULL);
                if (ret < 0)
 
        ret = 0;
 
        while (npages) {
-               ret = get_user_pages(current, current->mm, cur_base,
+               ret = get_user_pages(cur_base,
                                        min_t(unsigned long, npages,
                                        PAGE_SIZE / sizeof(struct page *)),
                                        1, !writable, page_list, NULL);
 
        }
 
        /* Get user pages for DMA Xfer */
-       err = get_user_pages_unlocked(current, current->mm,
-                       user_dma.uaddr, user_dma.page_count, 0, 1, dma->map);
+       err = get_user_pages_unlocked(user_dma.uaddr, user_dma.page_count, 0,
+                       1, dma->map);
 
        if (user_dma.page_count != err) {
                IVTV_DEBUG_WARN("failed to map user pages, returned %d instead of %d\n",
 
        ivtv_udma_get_page_info (&uv_dma, (unsigned long)args->uv_source, 360 * uv_decode_height);
 
        /* Get user pages for DMA Xfer */
-       y_pages = get_user_pages_unlocked(current, current->mm,
-                               y_dma.uaddr, y_dma.page_count, 0, 1,
-                               &dma->map[0]);
+       y_pages = get_user_pages_unlocked(y_dma.uaddr,
+                       y_dma.page_count, 0, 1, &dma->map[0]);
        uv_pages = 0; /* silence gcc. value is set and consumed only if: */
        if (y_pages == y_dma.page_count) {
-               uv_pages = get_user_pages_unlocked(current, current->mm,
-                                       uv_dma.uaddr, uv_dma.page_count, 0, 1,
-                                       &dma->map[y_pages]);
+               uv_pages = get_user_pages_unlocked(uv_dma.uaddr,
+                               uv_dma.page_count, 0, 1, &dma->map[y_pages]);
        }
 
        if (y_pages != y_dma.page_count || uv_pages != uv_dma.page_count) {
 
        dprintk(1, "init user [0x%lx+0x%lx => %d pages]\n",
                data, size, dma->nr_pages);
 
-       err = get_user_pages(current, current->mm,
-                            data & PAGE_MASK, dma->nr_pages,
+       err = get_user_pages(data & PAGE_MASK, dma->nr_pages,
                             rw == READ, 1, /* force */
                             dma->pages, NULL);
 
 
                }
 
                pinned_pages->nr_pages = get_user_pages(
-                               current,
-                               mm,
                                (u64)addr,
                                nr_pages,
                                !!(prot & SCIF_PROT_WRITE),
 
 #else
        *pageshift = PAGE_SHIFT;
 #endif
-       if (get_user_pages
-           (current, current->mm, vaddr, 1, write, 0, &page, NULL) <= 0)
+       if (get_user_pages(vaddr, 1, write, 0, &page, NULL) <= 0)
                return -EFAULT;
        *paddr = page_to_phys(page);
        put_page(page);
 
         /* Try to fault in all of the necessary pages */
         /* rw==READ means read from drive, write into memory area */
        res = get_user_pages_unlocked(
-               current,
-               current->mm,
                uaddr,
                nr_pages,
                rw == READ,
 
        if (!pages)
                return -ENOMEM;
 
-       ret = get_user_pages_unlocked(current, current->mm, (unsigned long)buf,
-                                     nr_pages, WRITE, 0, pages);
+       ret = get_user_pages_unlocked((unsigned long)buf, nr_pages, WRITE,
+                       0, pages);
 
        if (ret < nr_pages) {
                nr_pages = ret;
 
 
        /* Get the physical addresses of the source buffer */
        down_read(¤t->mm->mmap_sem);
-       num_pinned = get_user_pages(current, current->mm,
-               param.local_vaddr - lb_offset, num_pages,
-               (param.source == -1) ? READ : WRITE,
+       num_pinned = get_user_pages(param.local_vaddr - lb_offset,
+               num_pages, (param.source == -1) ? READ : WRITE,
                0, pages, NULL);
        up_read(¤t->mm->mmap_sem);
 
 
        if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) {
                vec->got_ref = true;
                vec->is_pfns = false;
-               ret = get_user_pages_locked(current, mm, start, nr_frames,
+               ret = get_user_pages_locked(start, nr_frames,
                        write, force, (struct page **)(vec->ptrs), &locked);
                goto out;
        }
 
 EXPORT_SYMBOL(get_user_pages_remote);
 
 /*
- * This is the same as get_user_pages_remote() for the time
- * being.
+ * This is the same as get_user_pages_remote(), just with a
+ * less-flexible calling convention where we assume that the task
+ * and mm being operated on are the current task's.  We also
+ * obviously don't pass FOLL_REMOTE in here.
  */
 long get_user_pages6(unsigned long start, unsigned long nr_pages,
                int write, int force, struct page **pages,
 
 /*
  * We use break_ksm to break COW on a ksm page: it's a stripped down
  *
- *     if (get_user_pages(current, mm, addr, 1, 1, 1, &page, NULL) == 1)
+ *     if (get_user_pages(addr, 1, 1, 1, &page, NULL) == 1)
  *             put_page(page);
  *
  * but taking great care only to touch a ksm page, in a VM_MERGEABLE vma,
 
        }
 }
 
-static int lookup_node(struct mm_struct *mm, unsigned long addr)
+static int lookup_node(unsigned long addr)
 {
        struct page *p;
        int err;
 
-       err = get_user_pages(current, mm, addr & PAGE_MASK, 1, 0, 0, &p, NULL);
+       err = get_user_pages(addr & PAGE_MASK, 1, 0, 0, &p, NULL);
        if (err >= 0) {
                err = page_to_nid(p);
                put_page(p);
 
        if (flags & MPOL_F_NODE) {
                if (flags & MPOL_F_ADDR) {
-                       err = lookup_node(mm, addr);
+                       err = lookup_node(addr);
                        if (err < 0)
                                goto out;
                        *policy = err;
 
                return ERR_PTR(-ENOMEM);
 
        while (got < num_pages) {
-               rc = get_user_pages_unlocked(current, current->mm,
+               rc = get_user_pages_unlocked(
                    (unsigned long)data + ((unsigned long)got * PAGE_SIZE),
                    num_pages - got, write_page, 0, pages + got);
                if (rc < 0)
 
        return gfn_to_hva_memslot_prot(slot, gfn, writable);
 }
 
-static int get_user_page_nowait(struct task_struct *tsk, struct mm_struct *mm,
-       unsigned long start, int write, struct page **page)
+static int get_user_page_nowait(unsigned long start, int write,
+               struct page **page)
 {
        int flags = FOLL_TOUCH | FOLL_NOWAIT | FOLL_HWPOISON | FOLL_GET;
 
        if (write)
                flags |= FOLL_WRITE;
 
-       return __get_user_pages(tsk, mm, start, 1, flags, page, NULL, NULL);
+       return __get_user_pages(current, current->mm, start, 1, flags, page,
+                       NULL, NULL);
 }
 
 static inline int check_user_page_hwpoison(unsigned long addr)
 
        if (async) {
                down_read(¤t->mm->mmap_sem);
-               npages = get_user_page_nowait(current, current->mm,
-                                             addr, write_fault, page);
+               npages = get_user_page_nowait(addr, write_fault, page);
                up_read(¤t->mm->mmap_sem);
        } else
                npages = __get_user_pages_unlocked(current, current->mm, addr, 1,