#define FOLL_FORCE     0x10    /* get_user_pages read/write w/o permission */
 #define FOLL_NOWAIT    0x20    /* if a disk transfer is needed, start the IO
                                 * and return without waiting upon it */
-#define FOLL_POPULATE  0x40    /* fault in pages (with FOLL_MLOCK) */
 #define FOLL_NOFAULT   0x80    /* do not fault in pages */
 #define FOLL_HWPOISON  0x100   /* check page is hwpoisoned */
 #define FOLL_NUMA      0x200   /* force NUMA hinting page fault */
 #define FOLL_MIGRATION 0x400   /* wait for page to replace migration entry */
 #define FOLL_TRIED     0x800   /* a retry, previous pass started an IO */
-#define FOLL_MLOCK     0x1000  /* lock present pages */
 #define FOLL_REMOTE    0x2000  /* we are working on non-current tsk/mm */
 #define FOLL_COW       0x4000  /* internal GUP flag */
 #define FOLL_ANON      0x8000  /* don't do file mappings */
 
                 */
                mark_page_accessed(page);
        }
-       if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) {
-               /* Do not mlock pte-mapped THP */
-               if (PageTransCompound(page))
-                       goto out;
-
-               /*
-                * The preliminary mapping check is mainly to avoid the
-                * pointless overhead of lock_page on the ZERO_PAGE
-                * which might bounce very badly if there is contention.
-                *
-                * If the page is already locked, we don't need to
-                * handle it now - vmscan will handle it later if and
-                * when it attempts to reclaim the page.
-                */
-               if (page->mapping && trylock_page(page)) {
-                       lru_add_drain();  /* push cached pages to LRU */
-                       /*
-                        * Because we lock page here, and migration is
-                        * blocked by the pte's page reference, and we
-                        * know the page is still mapped, we don't even
-                        * need to check for file-cache page truncation.
-                        */
-                       mlock_vma_page(page);
-                       unlock_page(page);
-               }
-       }
 out:
        pte_unmap_unlock(ptep, ptl);
        return page;
        unsigned int fault_flags = 0;
        vm_fault_t ret;
 
-       /* mlock all present pages, but do not fault in new pages */
-       if ((*flags & (FOLL_POPULATE | FOLL_MLOCK)) == FOLL_MLOCK)
-               return -ENOENT;
        if (*flags & FOLL_NOFAULT)
                return -EFAULT;
        if (*flags & FOLL_WRITE)
                        case -ENOMEM:
                        case -EHWPOISON:
                                goto out;
-                       case -ENOENT:
-                               goto next_page;
                        }
                        BUG();
                } else if (PTR_ERR(page) == -EEXIST) {
        VM_BUG_ON_VMA(end   > vma->vm_end, vma);
        mmap_assert_locked(mm);
 
-       gup_flags = FOLL_TOUCH | FOLL_POPULATE | FOLL_MLOCK;
+       /*
+        * Rightly or wrongly, the VM_LOCKONFAULT case has never used
+        * faultin_page() to break COW, so it has no work to do here.
+        */
        if (vma->vm_flags & VM_LOCKONFAULT)
-               gup_flags &= ~FOLL_POPULATE;
+               return nr_pages;
+
+       gup_flags = FOLL_TOUCH;
        /*
         * We want to touch writable mappings with a write fault in order
         * to break COW, except for shared mappings because these don't COW
         *             in the page table.
         * FOLL_HWPOISON: Return -EHWPOISON instead of -EFAULT when we hit
         *                a poisoned page.
-        * FOLL_POPULATE: Always populate memory with VM_LOCKONFAULT.
         * !FOLL_FORCE: Require proper access permissions.
         */
-       gup_flags = FOLL_TOUCH | FOLL_POPULATE | FOLL_MLOCK | FOLL_HWPOISON;
+       gup_flags = FOLL_TOUCH | FOLL_HWPOISON;
        if (write)
                gup_flags |= FOLL_WRITE;
 
 
        if (flags & FOLL_TOUCH)
                touch_pmd(vma, addr, pmd, flags);
 
-       if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) {
-               /*
-                * We don't mlock() pte-mapped THPs. This way we can avoid
-                * leaking mlocked pages into non-VM_LOCKED VMAs.
-                *
-                * For anon THP:
-                *
-                * In most cases the pmd is the only mapping of the page as we
-                * break COW for the mlock() -- see gup_flags |= FOLL_WRITE for
-                * writable private mappings in populate_vma_page_range().
-                *
-                * The only scenario when we have the page shared here is if we
-                * mlocking read-only mapping shared over fork(). We skip
-                * mlocking such pages.
-                *
-                * For file THP:
-                *
-                * We can expect PageDoubleMap() to be stable under page lock:
-                * for file pages we set it in page_add_file_rmap(), which
-                * requires page to be locked.
-                */
-
-               if (PageAnon(page) && compound_mapcount(page) != 1)
-                       goto skip_mlock;
-               if (PageDoubleMap(page) || !page->mapping)
-                       goto skip_mlock;
-               if (!trylock_page(page))
-                       goto skip_mlock;
-               if (page->mapping && !PageDoubleMap(page))
-                       mlock_vma_page(page);
-               unlock_page(page);
-       }
-skip_mlock:
        page += (addr & ~HPAGE_PMD_MASK) >> PAGE_SHIFT;
        VM_BUG_ON_PAGE(!PageCompound(page) && !is_zone_device_page(page), page);