* does not include FOLL_NOWAIT, the mmap_lock may be released.  If it
  * is, *@locked will be set to 0 and -EBUSY returned.
  */
-static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma,
+static int faultin_page(struct vm_area_struct *vma,
                unsigned long address, unsigned int *flags, int *locked)
 {
        unsigned int fault_flags = 0;
 
 /**
  * __get_user_pages() - pin user pages in memory
- * @tsk:       task_struct of target task
  * @mm:                mm_struct of target mm
  * @start:     starting user address
  * @nr_pages:  number of pages from start to pin
  * instead of __get_user_pages. __get_user_pages should be used only if
  * you need some special @gup_flags.
  */
-static long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
+static long __get_user_pages(struct mm_struct *mm,
                unsigned long start, unsigned long nr_pages,
                unsigned int gup_flags, struct page **pages,
                struct vm_area_struct **vmas, int *locked)
 
                page = follow_page_mask(vma, start, foll_flags, &ctx);
                if (!page) {
-                       ret = faultin_page(tsk, vma, start, &foll_flags,
-                                          locked);
+                       ret = faultin_page(vma, start, &foll_flags, locked);
                        switch (ret) {
                        case 0:
                                goto retry;
 
 /**
  * fixup_user_fault() - manually resolve a user page fault
- * @tsk:       the task_struct to use for page fault accounting, or
- *             NULL if faults are not to be recorded.
  * @mm:                mm_struct of target mm
  * @address:   user address
  * @fault_flags:flags to pass down to handle_mm_fault()
  * This function will not return with an unlocked mmap_lock. So it has not the
  * same semantics wrt the @mm->mmap_lock as does filemap_fault().
  */
-int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm,
+int fixup_user_fault(struct mm_struct *mm,
                     unsigned long address, unsigned int fault_flags,
                     bool *unlocked)
 {
  * Please note that this function, unlike __get_user_pages will not
  * return 0 for nr_pages > 0 without FOLL_NOWAIT
  */
-static __always_inline long __get_user_pages_locked(struct task_struct *tsk,
-                                               struct mm_struct *mm,
+static __always_inline long __get_user_pages_locked(struct mm_struct *mm,
                                                unsigned long start,
                                                unsigned long nr_pages,
                                                struct page **pages,
        pages_done = 0;
        lock_dropped = false;
        for (;;) {
-               ret = __get_user_pages(tsk, mm, start, nr_pages, flags, pages,
+               ret = __get_user_pages(mm, start, nr_pages, flags, pages,
                                       vmas, locked);
                if (!locked)
                        /* VM_FAULT_RETRY couldn't trigger, bypass */
                }
 
                *locked = 1;
-               ret = __get_user_pages(tsk, mm, start, 1, flags | FOLL_TRIED,
+               ret = __get_user_pages(mm, start, 1, flags | FOLL_TRIED,
                                       pages, NULL, locked);
                if (!*locked) {
                        /* Continue to retry until we succeeded */
         * We made sure addr is within a VMA, so the following will
         * not result in a stack expansion that recurses back here.
         */
-       return __get_user_pages(current, mm, start, nr_pages, gup_flags,
+       return __get_user_pages(mm, start, nr_pages, gup_flags,
                                NULL, NULL, locked);
 }
 
        struct vm_area_struct *vma;
        struct page *page;
 
-       if (__get_user_pages(current, current->mm, addr, 1,
+       if (__get_user_pages(current->mm, addr, 1,
                             FOLL_FORCE | FOLL_DUMP | FOLL_GET, &page, &vma,
                             NULL) < 1)
                return NULL;
 }
 #endif /* CONFIG_ELF_CORE */
 #else /* CONFIG_MMU */
-static long __get_user_pages_locked(struct task_struct *tsk,
-               struct mm_struct *mm, unsigned long start,
+static long __get_user_pages_locked(struct mm_struct *mm, unsigned long start,
                unsigned long nr_pages, struct page **pages,
                struct vm_area_struct **vmas, int *locked,
                unsigned int foll_flags)
 }
 
 #ifdef CONFIG_CMA
-static long check_and_migrate_cma_pages(struct task_struct *tsk,
-                                       struct mm_struct *mm,
+static long check_and_migrate_cma_pages(struct mm_struct *mm,
                                        unsigned long start,
                                        unsigned long nr_pages,
                                        struct page **pages,
                 * again migrating any new CMA pages which we failed to isolate
                 * earlier.
                 */
-               ret = __get_user_pages_locked(tsk, mm, start, nr_pages,
+               ret = __get_user_pages_locked(mm, start, nr_pages,
                                                   pages, vmas, NULL,
                                                   gup_flags);
 
        return ret;
 }
 #else
-static long check_and_migrate_cma_pages(struct task_struct *tsk,
-                                       struct mm_struct *mm,
+static long check_and_migrate_cma_pages(struct mm_struct *mm,
                                        unsigned long start,
                                        unsigned long nr_pages,
                                        struct page **pages,
  * __gup_longterm_locked() is a wrapper for __get_user_pages_locked which
  * allows us to process the FOLL_LONGTERM flag.
  */
-static long __gup_longterm_locked(struct task_struct *tsk,
-                                 struct mm_struct *mm,
+static long __gup_longterm_locked(struct mm_struct *mm,
                                  unsigned long start,
                                  unsigned long nr_pages,
                                  struct page **pages,
                flags = memalloc_nocma_save();
        }
 
-       rc = __get_user_pages_locked(tsk, mm, start, nr_pages, pages,
+       rc = __get_user_pages_locked(mm, start, nr_pages, pages,
                                     vmas_tmp, NULL, gup_flags);
 
        if (gup_flags & FOLL_LONGTERM) {
                        goto out;
                }
 
-               rc = check_and_migrate_cma_pages(tsk, mm, start, rc, pages,
+               rc = check_and_migrate_cma_pages(mm, start, rc, pages,
                                                 vmas_tmp, gup_flags);
 out:
                memalloc_nocma_restore(flags);
        return rc;
 }
 #else /* !CONFIG_FS_DAX && !CONFIG_CMA */
-static __always_inline long __gup_longterm_locked(struct task_struct *tsk,
-                                                 struct mm_struct *mm,
+static __always_inline long __gup_longterm_locked(struct mm_struct *mm,
                                                  unsigned long start,
                                                  unsigned long nr_pages,
                                                  struct page **pages,
                                                  struct vm_area_struct **vmas,
                                                  unsigned int flags)
 {
-       return __get_user_pages_locked(tsk, mm, start, nr_pages, pages, vmas,
+       return __get_user_pages_locked(mm, start, nr_pages, pages, vmas,
                                       NULL, flags);
 }
 #endif /* CONFIG_FS_DAX || CONFIG_CMA */
 
 #ifdef CONFIG_MMU
-static long __get_user_pages_remote(struct task_struct *tsk,
-                                   struct mm_struct *mm,
+static long __get_user_pages_remote(struct mm_struct *mm,
                                    unsigned long start, unsigned long nr_pages,
                                    unsigned int gup_flags, struct page **pages,
                                    struct vm_area_struct **vmas, int *locked)
                 * This will check the vmas (even if our vmas arg is NULL)
                 * and return -ENOTSUPP if DAX isn't allowed in this case:
                 */
-               return __gup_longterm_locked(tsk, mm, start, nr_pages, pages,
+               return __gup_longterm_locked(mm, start, nr_pages, pages,
                                             vmas, gup_flags | FOLL_TOUCH |
                                             FOLL_REMOTE);
        }
 
-       return __get_user_pages_locked(tsk, mm, start, nr_pages, pages, vmas,
+       return __get_user_pages_locked(mm, start, nr_pages, pages, vmas,
                                       locked,
                                       gup_flags | FOLL_TOUCH | FOLL_REMOTE);
 }
 
 /**
  * get_user_pages_remote() - pin user pages in memory
- * @tsk:       the task_struct to use for page fault accounting, or
- *             NULL if faults are not to be recorded.
  * @mm:                mm_struct of target mm
  * @start:     starting user address
  * @nr_pages:  number of pages from start to pin
  * should use get_user_pages_remote because it cannot pass
  * FAULT_FLAG_ALLOW_RETRY to handle_mm_fault.
  */
-long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm,
+long get_user_pages_remote(struct mm_struct *mm,
                unsigned long start, unsigned long nr_pages,
                unsigned int gup_flags, struct page **pages,
                struct vm_area_struct **vmas, int *locked)
        if (WARN_ON_ONCE(gup_flags & FOLL_PIN))
                return -EINVAL;
 
-       return __get_user_pages_remote(tsk, mm, start, nr_pages, gup_flags,
+       return __get_user_pages_remote(mm, start, nr_pages, gup_flags,
                                       pages, vmas, locked);
 }
 EXPORT_SYMBOL(get_user_pages_remote);
 
 #else /* CONFIG_MMU */
-long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm,
+long get_user_pages_remote(struct mm_struct *mm,
                           unsigned long start, unsigned long nr_pages,
                           unsigned int gup_flags, struct page **pages,
                           struct vm_area_struct **vmas, int *locked)
        return 0;
 }
 
-static long __get_user_pages_remote(struct task_struct *tsk,
-                                   struct mm_struct *mm,
+static long __get_user_pages_remote(struct mm_struct *mm,
                                    unsigned long start, unsigned long nr_pages,
                                    unsigned int gup_flags, struct page **pages,
                                    struct vm_area_struct **vmas, int *locked)
  * @vmas:       array of pointers to vmas corresponding to each page.
  *              Or NULL if the caller does not require them.
  *
- * This is the same as get_user_pages_remote(), just with a
- * less-flexible calling convention where we assume that the task
- * and mm being operated on are the current task's and don't allow
- * passing of a locked parameter.  We also obviously don't pass
- * FOLL_REMOTE in here.
+ * This is the same as get_user_pages_remote(), just with a less-flexible
+ * calling convention where we assume that the mm being operated on belongs to
+ * the current task, and doesn't allow passing of a locked parameter.  We also
+ * obviously don't pass FOLL_REMOTE in here.
  */
 long get_user_pages(unsigned long start, unsigned long nr_pages,
                unsigned int gup_flags, struct page **pages,
        if (WARN_ON_ONCE(gup_flags & FOLL_PIN))
                return -EINVAL;
 
-       return __gup_longterm_locked(current, current->mm, start, nr_pages,
+       return __gup_longterm_locked(current->mm, start, nr_pages,
                                     pages, vmas, gup_flags | FOLL_TOUCH);
 }
 EXPORT_SYMBOL(get_user_pages);
  *
  *      mmap_read_lock(mm);
  *      do_something()
- *      get_user_pages(tsk, mm, ..., pages, NULL);
+ *      get_user_pages(mm, ..., pages, NULL);
  *      mmap_read_unlock(mm);
  *
  *  to:
  *      int locked = 1;
  *      mmap_read_lock(mm);
  *      do_something()
- *      get_user_pages_locked(tsk, mm, ..., pages, &locked);
+ *      get_user_pages_locked(mm, ..., pages, &locked);
  *      if (locked)
  *          mmap_read_unlock(mm);
  *
        if (WARN_ON_ONCE(gup_flags & FOLL_PIN))
                return -EINVAL;
 
-       return __get_user_pages_locked(current, current->mm, start, nr_pages,
+       return __get_user_pages_locked(current->mm, start, nr_pages,
                                       pages, NULL, locked,
                                       gup_flags | FOLL_TOUCH);
 }
  * get_user_pages_unlocked() is suitable to replace the form:
  *
  *      mmap_read_lock(mm);
- *      get_user_pages(tsk, mm, ..., pages, NULL);
+ *      get_user_pages(mm, ..., pages, NULL);
  *      mmap_read_unlock(mm);
  *
  *  with:
  *
- *      get_user_pages_unlocked(tsk, mm, ..., pages);
+ *      get_user_pages_unlocked(mm, ..., pages);
  *
  * It is functionally equivalent to get_user_pages_fast so
  * get_user_pages_fast should be used instead if specific gup_flags
                return -EINVAL;
 
        mmap_read_lock(mm);
-       ret = __get_user_pages_locked(current, mm, start, nr_pages, pages, NULL,
+       ret = __get_user_pages_locked(mm, start, nr_pages, pages, NULL,
                                      &locked, gup_flags | FOLL_TOUCH);
        if (locked)
                mmap_read_unlock(mm);
         */
        if (gup_flags & FOLL_LONGTERM) {
                mmap_read_lock(current->mm);
-               ret = __gup_longterm_locked(current, current->mm,
+               ret = __gup_longterm_locked(current->mm,
                                            start, nr_pages,
                                            pages, NULL, gup_flags);
                mmap_read_unlock(current->mm);
 EXPORT_SYMBOL_GPL(pin_user_pages_fast_only);
 
 /**
- * pin_user_pages_remote() - pin pages of a remote process (task != current)
+ * pin_user_pages_remote() - pin pages of a remote process
  *
- * @tsk:       the task_struct to use for page fault accounting, or
- *             NULL if faults are not to be recorded.
  * @mm:                mm_struct of target mm
  * @start:     starting user address
  * @nr_pages:  number of pages from start to pin
  * FOLL_PIN means that the pages must be released via unpin_user_page(). Please
  * see Documentation/core-api/pin_user_pages.rst for details.
  */
-long pin_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm,
+long pin_user_pages_remote(struct mm_struct *mm,
                           unsigned long start, unsigned long nr_pages,
                           unsigned int gup_flags, struct page **pages,
                           struct vm_area_struct **vmas, int *locked)
                return -EINVAL;
 
        gup_flags |= FOLL_PIN;
-       return __get_user_pages_remote(tsk, mm, start, nr_pages, gup_flags,
+       return __get_user_pages_remote(mm, start, nr_pages, gup_flags,
                                       pages, vmas, locked);
 }
 EXPORT_SYMBOL(pin_user_pages_remote);
                return -EINVAL;
 
        gup_flags |= FOLL_PIN;
-       return __gup_longterm_locked(current, current->mm, start, nr_pages,
+       return __gup_longterm_locked(current->mm, start, nr_pages,
                                     pages, vmas, gup_flags);
 }
 EXPORT_SYMBOL(pin_user_pages);
                return -EINVAL;
 
        gup_flags |= FOLL_PIN;
-       return __get_user_pages_locked(current, current->mm, start, nr_pages,
+       return __get_user_pages_locked(current->mm, start, nr_pages,
                                       pages, NULL, locked,
                                       gup_flags | FOLL_TOUCH);
 }