spinlock_t *ptl;
        pte_t *pte;
        int err = 0;
-       pagemap_entry_t pme = make_pme(PM_NOT_PRESENT(pm->v2));
 
        /* find the first VMA at or above 'addr' */
        vma = find_vma(walk->mm, addr);
 
                for (; addr != end; addr += PAGE_SIZE) {
                        unsigned long offset;
+                       pagemap_entry_t pme;
 
                        offset = (addr & ~PAGEMAP_WALK_MASK) >>
                                        PAGE_SHIFT;
 
        if (pmd_trans_unstable(pmd))
                return 0;
-       for (; addr != end; addr += PAGE_SIZE) {
-               int flags2;
-
-               /* check to see if we've left 'vma' behind
-                * and need a new, higher one */
-               if (vma && (addr >= vma->vm_end)) {
-                       vma = find_vma(walk->mm, addr);
-                       if (vma && (vma->vm_flags & VM_SOFTDIRTY))
-                               flags2 = __PM_SOFT_DIRTY;
-                       else
-                               flags2 = 0;
-                       pme = make_pme(PM_NOT_PRESENT(pm->v2) | PM_STATUS2(pm->v2, flags2));
+
+       while (1) {
+               /* End of address space hole, which we mark as non-present. */
+               unsigned long hole_end;
+
+               if (vma)
+                       hole_end = min(end, vma->vm_start);
+               else
+                       hole_end = end;
+
+               for (; addr < hole_end; addr += PAGE_SIZE) {
+                       pagemap_entry_t pme = make_pme(PM_NOT_PRESENT(pm->v2));
+
+                       err = add_to_pagemap(addr, &pme, pm);
+                       if (err)
+                               return err;
                }
 
-               /* check that 'vma' actually covers this address,
-                * and that it isn't a huge page vma */
-               if (vma && (vma->vm_start <= addr) &&
-                   !is_vm_hugetlb_page(vma)) {
+               if (!vma || vma->vm_start >= end)
+                       break;
+               /*
+                * We can't possibly be in a hugetlb VMA. In general,
+                * for a mm_walk with a pmd_entry and a hugetlb_entry,
+                * the pmd_entry can only be called on addresses in a
+                * hugetlb if the walk starts in a non-hugetlb VMA and
+                * spans a hugepage VMA. Since pagemap_read walks are
+                * PMD-sized and PMD-aligned, this will never be true.
+                */
+               BUG_ON(is_vm_hugetlb_page(vma));
+
+               /* Addresses in the VMA. */
+               for (; addr < min(end, vma->vm_end); addr += PAGE_SIZE) {
+                       pagemap_entry_t pme;
                        pte = pte_offset_map(pmd, addr);
                        pte_to_pagemap_entry(&pme, pm, vma, addr, *pte);
-                       /* unmap before userspace copy */
                        pte_unmap(pte);
+                       err = add_to_pagemap(addr, &pme, pm);
+                       if (err)
+                               return err;
                }
-               err = add_to_pagemap(addr, &pme, pm);
-               if (err)
-                       return err;
+
+               if (addr == end)
+                       break;
+
+               vma = find_vma(walk->mm, addr);
        }
 
        cond_resched();