ptent = pte_file_clear_soft_dirty(ptent);
        }
 
+       if (vma->vm_flags & VM_SOFTDIRTY)
+               vma->vm_flags &= ~VM_SOFTDIRTY;
+
        set_pte_at(vma->vm_mm, addr, pte, ptent);
 #endif
 }
                if (is_migration_entry(entry))
                        page = migration_entry_to_page(entry);
        } else {
-               *pme = make_pme(PM_NOT_PRESENT(pm->v2));
+               if (vma->vm_flags & VM_SOFTDIRTY)
+                       flags2 |= __PM_SOFT_DIRTY;
+               *pme = make_pme(PM_NOT_PRESENT(pm->v2) | PM_STATUS2(pm->v2, flags2));
                return;
        }
 
        if (page && !PageAnon(page))
                flags |= PM_FILE;
-       if (pte_soft_dirty(pte))
+       if ((vma->vm_flags & VM_SOFTDIRTY) || pte_soft_dirty(pte))
                flags2 |= __PM_SOFT_DIRTY;
 
        *pme = make_pme(PM_PFRAME(frame) | PM_STATUS2(pm->v2, flags2) | flags);
                *pme = make_pme(PM_PFRAME(pmd_pfn(pmd) + offset)
                                | PM_STATUS2(pm->v2, pmd_flags2) | PM_PRESENT);
        else
-               *pme = make_pme(PM_NOT_PRESENT(pm->v2));
+               *pme = make_pme(PM_NOT_PRESENT(pm->v2) | PM_STATUS2(pm->v2, pmd_flags2));
 }
 #else
 static inline void thp_pmd_to_pagemap_entry(pagemap_entry_t *pme, struct pagemapread *pm,
        if (vma && pmd_trans_huge_lock(pmd, vma) == 1) {
                int pmd_flags2;
 
-               pmd_flags2 = (pmd_soft_dirty(*pmd) ? __PM_SOFT_DIRTY : 0);
+               if ((vma->vm_flags & VM_SOFTDIRTY) || pmd_soft_dirty(*pmd))
+                       pmd_flags2 = __PM_SOFT_DIRTY;
+               else
+                       pmd_flags2 = 0;
+
                for (; addr != end; addr += PAGE_SIZE) {
                        unsigned long offset;
 
        if (pmd_trans_unstable(pmd))
                return 0;
        for (; addr != end; addr += PAGE_SIZE) {
+               int flags2;
 
                /* check to see if we've left 'vma' behind
                 * and need a new, higher one */
                if (vma && (addr >= vma->vm_end)) {
                        vma = find_vma(walk->mm, addr);
-                       pme = make_pme(PM_NOT_PRESENT(pm->v2));
+                       if (vma && (vma->vm_flags & VM_SOFTDIRTY))
+                               flags2 = __PM_SOFT_DIRTY;
+                       else
+                               flags2 = 0;
+                       pme = make_pme(PM_NOT_PRESENT(pm->v2) | PM_STATUS2(pm->v2, flags2));
                }
 
                /* check that 'vma' actually covers this address,
 
 #ifdef CONFIG_HUGETLB_PAGE
 static void huge_pte_to_pagemap_entry(pagemap_entry_t *pme, struct pagemapread *pm,
-                                       pte_t pte, int offset)
+                                       pte_t pte, int offset, int flags2)
 {
        if (pte_present(pte))
-               *pme = make_pme(PM_PFRAME(pte_pfn(pte) + offset)
-                               | PM_STATUS2(pm->v2, 0) | PM_PRESENT);
+               *pme = make_pme(PM_PFRAME(pte_pfn(pte) + offset)        |
+                               PM_STATUS2(pm->v2, flags2)              |
+                               PM_PRESENT);
        else
-               *pme = make_pme(PM_NOT_PRESENT(pm->v2));
+               *pme = make_pme(PM_NOT_PRESENT(pm->v2)                  |
+                               PM_STATUS2(pm->v2, flags2));
 }
 
 /* This function walks within one hugetlb entry in the single call */
                                 struct mm_walk *walk)
 {
        struct pagemapread *pm = walk->private;
+       struct vm_area_struct *vma;
        int err = 0;
+       int flags2;
        pagemap_entry_t pme;
 
+       vma = find_vma(walk->mm, addr);
+       WARN_ON_ONCE(!vma);
+
+       if (vma && (vma->vm_flags & VM_SOFTDIRTY))
+               flags2 = __PM_SOFT_DIRTY;
+       else
+               flags2 = 0;
+
        for (; addr != end; addr += PAGE_SIZE) {
                int offset = (addr & ~hmask) >> PAGE_SHIFT;
-               huge_pte_to_pagemap_entry(&pme, pm, *pte, offset);
+               huge_pte_to_pagemap_entry(&pme, pm, *pte, offset, flags2);
                err = add_to_pagemap(addr, &pme, pm);
                if (err)
                        return err;
 
        if (file)
                uprobe_mmap(vma);
 
+       /*
+        * New (or expanded) vma always get soft dirty status.
+        * Otherwise user-space soft-dirty page tracker won't
+        * be able to distinguish situation when vma area unmapped,
+        * then new mapped in-place (which must be aimed as
+        * a completely new data area).
+        */
+       vma->vm_flags |= VM_SOFTDIRTY;
+
        return addr;
 
 unmap_and_free_vma:
        mm->total_vm += len >> PAGE_SHIFT;
        if (flags & VM_LOCKED)
                mm->locked_vm += (len >> PAGE_SHIFT);
+       vma->vm_flags |= VM_SOFTDIRTY;
        return addr;
 }
 
        vma->vm_start = addr;
        vma->vm_end = addr + len;
 
-       vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
+       vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND | VM_SOFTDIRTY;
        vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
 
        vma->vm_ops = &special_mapping_vmops;