]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
mm/userfaultfd: fix a few thp pmd missing uffd-wp bit
authorPeter Xu <peterx@redhat.com>
Wed, 2 Jun 2021 03:52:49 +0000 (13:52 +1000)
committerStephen Rothwell <sfr@canb.auug.org.au>
Wed, 2 Jun 2021 03:52:49 +0000 (13:52 +1000)
These include:

1. When remove migration pmd entry, should keep the uffd-wp bit from
   swap pte.  Note that we need to do this after setting write bit just in
   case we need to remove it.

2. When change huge pmd and convert write -> read migration entry,
   persist the same uffd-wp bit.

3. When convert pmd to swap entry, should drop the uffd-wp bit always.

Link: https://lkml.kernel.org/r/20210428225030.9708-4-peterx@redhat.com
Signed-off-by: Peter Xu <peterx@redhat.com>
Cc: Alexander Viro <viro@zeniv.linux.org.uk>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Axel Rasmussen <axelrasmussen@google.com>
Cc: Brian Geffon <bgeffon@google.com>
Cc: "Dr . David Alan Gilbert" <dgilbert@redhat.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Jerome Glisse <jglisse@redhat.com>
Cc: Joe Perches <joe@perches.com>
Cc: Kirill A. Shutemov <kirill@shutemov.name>
Cc: Lokesh Gidra <lokeshgidra@google.com>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Mike Rapoport <rppt@linux.vnet.ibm.com>
Cc: Mina Almasry <almasrymina@google.com>
Cc: Oliver Upton <oupton@google.com>
Cc: Shaohua Li <shli@fb.com>
Cc: Shuah Khan <shuah@kernel.org>
Cc: Stephen Rothwell <sfr@canb.auug.org.au>
Cc: Wang Qing <wangqing@vivo.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Stephen Rothwell <sfr@canb.auug.org.au>
include/linux/swapops.h
mm/huge_memory.c

index 3ed89479cf1b10979e87ebfcb2d4198c07aed7ca..af3d2661e41e8887d9eacc5d92b1c56d4b76cd4a 100644 (file)
@@ -281,6 +281,8 @@ static inline swp_entry_t pmd_to_swp_entry(pmd_t pmd)
 
        if (pmd_swp_soft_dirty(pmd))
                pmd = pmd_swp_clear_soft_dirty(pmd);
+       if (pmd_swp_uffd_wp(pmd))
+               pmd = pmd_swp_clear_uffd_wp(pmd);
        arch_entry = __pmd_to_swp_entry(pmd);
        return swp_entry(__swp_type(arch_entry), __swp_offset(arch_entry));
 }
index 0c86ad8f1b3d82934a988034e881c92fc781e252..68c78f72380ca83b7eeb0c5e340c692b2f793ccd 100644 (file)
@@ -1831,6 +1831,8 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
                        newpmd = swp_entry_to_pmd(entry);
                        if (pmd_swp_soft_dirty(*pmd))
                                newpmd = pmd_swp_mksoft_dirty(newpmd);
+                       if (pmd_swp_uffd_wp(*pmd))
+                               newpmd = pmd_swp_mkuffd_wp(newpmd);
                        set_pmd_at(mm, addr, pmd, newpmd);
                }
                goto unlock;
@@ -3255,6 +3257,8 @@ void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, struct page *new)
                pmde = pmd_mksoft_dirty(pmde);
        if (is_writable_migration_entry(entry))
                pmde = maybe_pmd_mkwrite(pmde, vma);
+       if (pmd_swp_uffd_wp(*pvmw->pmd))
+               pmde = pmd_wrprotect(pmd_mkuffd_wp(pmde));
 
        flush_cache_range(vma, mmun_start, mmun_start + HPAGE_PMD_SIZE);
        if (PageAnon(new))