static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
                             struct mm_walk *walk)
 {
-       struct vm_area_struct *vma;
+       struct vm_area_struct *vma = walk->vma;
        struct pagemapread *pm = walk->private;
        spinlock_t *ptl;
        pte_t *pte, *orig_pte;
        int err = 0;
 
-       /* find the first VMA at or above 'addr' */
-       vma = find_vma(walk->mm, addr);
-       if (vma && pmd_trans_huge_lock(pmd, vma, &ptl) == 1) {
+       if (pmd_trans_huge_lock(pmd, vma, &ptl) == 1) {
                int pmd_flags2;
 
                if ((vma->vm_flags & VM_SOFTDIRTY) || pmd_soft_dirty(*pmd))
        if (pmd_trans_unstable(pmd))
                return 0;
 
-       while (1) {
-               /* End of address space hole, which we mark as non-present. */
-               unsigned long hole_end;
-
-               if (vma)
-                       hole_end = min(end, vma->vm_start);
-               else
-                       hole_end = end;
-
-               for (; addr < hole_end; addr += PAGE_SIZE) {
-                       pagemap_entry_t pme = make_pme(PM_NOT_PRESENT(pm->v2));
-
-                       err = add_to_pagemap(addr, &pme, pm);
-                       if (err)
-                               return err;
-               }
-
-               if (!vma || vma->vm_start >= end)
-                       break;
-               /*
-                * We can't possibly be in a hugetlb VMA. In general,
-                * for a mm_walk with a pmd_entry and a hugetlb_entry,
-                * the pmd_entry can only be called on addresses in a
-                * hugetlb if the walk starts in a non-hugetlb VMA and
-                * spans a hugepage VMA. Since pagemap_read walks are
-                * PMD-sized and PMD-aligned, this will never be true.
-                */
-               BUG_ON(is_vm_hugetlb_page(vma));
-
-               /* Addresses in the VMA. */
-               orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
-               for (; addr < min(end, vma->vm_end); pte++, addr += PAGE_SIZE) {
-                       pagemap_entry_t pme;
-
-                       pte_to_pagemap_entry(&pme, pm, vma, addr, *pte);
-                       err = add_to_pagemap(addr, &pme, pm);
-                       if (err)
-                               break;
-               }
-               pte_unmap_unlock(orig_pte, ptl);
+       /*
+        * We can assume that @vma always points to a valid one and @end never
+        * goes beyond vma->vm_end.
+        */
+       orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
+       for (; addr < end; pte++, addr += PAGE_SIZE) {
+               pagemap_entry_t pme;
 
+               pte_to_pagemap_entry(&pme, pm, vma, addr, *pte);
+               err = add_to_pagemap(addr, &pme, pm);
                if (err)
-                       return err;
-
-               if (addr == end)
                        break;
-
-               vma = find_vma(walk->mm, addr);
        }
+       pte_unmap_unlock(orig_pte, ptl);
 
        cond_resched();
 
                                 struct mm_walk *walk)
 {
        struct pagemapread *pm = walk->private;
-       struct vm_area_struct *vma;
+       struct vm_area_struct *vma = walk->vma;
        int err = 0;
        int flags2;
        pagemap_entry_t pme;
 
-       vma = find_vma(walk->mm, addr);
-       WARN_ON_ONCE(!vma);
-
-       if (vma && (vma->vm_flags & VM_SOFTDIRTY))
+       if (vma->vm_flags & VM_SOFTDIRTY)
                flags2 = __PM_SOFT_DIRTY;
        else
                flags2 = 0;