]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
mm/migrate: fix wrongly apply write bit after mkdirty on sparc64
authorPeter Xu <peterx@redhat.com>
Thu, 16 Feb 2023 15:30:59 +0000 (10:30 -0500)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 22 Feb 2023 11:59:49 +0000 (12:59 +0100)
commit 96a9c287e25d690fd9623b5133703b8e310fbed1 upstream.

Nick Bowler reported another sparc64 breakage after the young/dirty
persistent work for page migration (per "Link:" below).  That's after a
similar report [2].

It turns out page migration was overlooked, and it wasn't failing before
because page migration was not enabled in the initial report test
environment.

David proposed another way [2] to fix this from sparc64 side, but that
patch didn't land somehow.  Neither did I check whether there's any other
arch that has similar issues.

Let's fix it for now as simple as moving the write bit handling to be
after dirty, like what we did before.

Note: this is based on mm-unstable, because the breakage was since 6.1 and
we're at a very late stage of 6.2 (-rc8), so I assume for this specific
case we should target this at 6.3.

[1] https://lore.kernel.org/all/20221021160603.GA23307@u164.east.ru/
[2] https://lore.kernel.org/all/20221212130213.136267-1-david@redhat.com/

Link: https://lkml.kernel.org/r/20230216153059.256739-1-peterx@redhat.com
Fixes: 2e3468778dbe ("mm: remember young/dirty bit for page migrations")
Link: https://lore.kernel.org/all/CADyTPExpEqaJiMGoV+Z6xVgL50ZoMJg49B10LcZ=8eg19u34BA@mail.gmail.com/
Signed-off-by: Peter Xu <peterx@redhat.com>
Reported-by: Nick Bowler <nbowler@draconx.ca>
Acked-by: David Hildenbrand <david@redhat.com>
Tested-by: Nick Bowler <nbowler@draconx.ca>
Cc: <regressions@lists.linux.dev>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
mm/huge_memory.c
mm/migrate.c

index 811d19b5c4f606f4bf6e31df751d376ee000aaef..e7cf013a0efd0131ae0d8ebf96f41cb3b1f93947 100644 (file)
@@ -3253,8 +3253,6 @@ void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, struct page *new)
        pmde = mk_huge_pmd(new, READ_ONCE(vma->vm_page_prot));
        if (pmd_swp_soft_dirty(*pvmw->pmd))
                pmde = pmd_mksoft_dirty(pmde);
-       if (is_writable_migration_entry(entry))
-               pmde = maybe_pmd_mkwrite(pmde, vma);
        if (pmd_swp_uffd_wp(*pvmw->pmd))
                pmde = pmd_wrprotect(pmd_mkuffd_wp(pmde));
        if (!is_migration_entry_young(entry))
@@ -3262,6 +3260,10 @@ void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, struct page *new)
        /* NOTE: this may contain setting soft-dirty on some archs */
        if (PageDirty(new) && is_migration_entry_dirty(entry))
                pmde = pmd_mkdirty(pmde);
+       if (is_writable_migration_entry(entry))
+               pmde = maybe_pmd_mkwrite(pmde, vma);
+       else
+               pmde = pmd_wrprotect(pmde);
 
        if (PageAnon(new)) {
                rmap_t rmap_flags = RMAP_COMPOUND;
index dff333593a8ae20aeea333d23c11db26b02bb8ce..8d5c0dc618a574d86dcf7f2dd74d19b47c34999e 100644 (file)
@@ -215,6 +215,8 @@ static bool remove_migration_pte(struct folio *folio,
                        pte = maybe_mkwrite(pte, vma);
                else if (pte_swp_uffd_wp(*pvmw.pte))
                        pte = pte_mkuffd_wp(pte);
+               else
+                       pte = pte_wrprotect(pte);
 
                if (folio_test_anon(folio) && !is_readable_migration_entry(entry))
                        rmap_flags |= RMAP_EXCLUSIVE;