]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
mm/rmap: extend rmap and migration support device-private entries
authorBalbir Singh <balbirs@nvidia.com>
Mon, 8 Sep 2025 00:04:36 +0000 (10:04 +1000)
committerAndrew Morton <akpm@linux-foundation.org>
Fri, 12 Sep 2025 00:26:05 +0000 (17:26 -0700)
Add device-private THP support to reverse mapping infrastructure, enabling
proper handling during migration and walk operations.

The key changes are:
- add_migration_pmd()/remove_migration_pmd(): Handle device-private
  entries during folio migration and splitting
- page_vma_mapped_walk(): Recognize device-private THP entries during
  VMA traversal operations

This change supports folio splitting and migration operations on
device-private entries.

Link: https://lkml.kernel.org/r/20250908000448.180088-4-balbirs@nvidia.com
Signed-off-by: Balbir Singh <balbirs@nvidia.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Zi Yan <ziy@nvidia.com>
Cc: Joshua Hahn <joshua.hahnjy@gmail.com>
Cc: Rakie Kim <rakie.kim@sk.com>
Cc: Byungchul Park <byungchul@sk.com>
Cc: Gregory Price <gourry@gourry.net>
Cc: Ying Huang <ying.huang@linux.alibaba.com>
Cc: Alistair Popple <apopple@nvidia.com>
Cc: Oscar Salvador <osalvador@suse.de>
Cc: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Cc: Baolin Wang <baolin.wang@linux.alibaba.com>
Cc: "Liam R. Howlett" <Liam.Howlett@oracle.com>
Cc: Nico Pache <npache@redhat.com>
Cc: Ryan Roberts <ryan.roberts@arm.com>
Cc: Dev Jain <dev.jain@arm.com>
Cc: Barry Song <baohua@kernel.org>
Cc: Lyude Paul <lyude@redhat.com>
Cc: Danilo Krummrich <dakr@kernel.org>
Cc: David Airlie <airlied@gmail.com>
Cc: Simona Vetter <simona@ffwll.ch>
Cc: Ralph Campbell <rcampbell@nvidia.com>
Cc: Mika Penttilä <mpenttil@redhat.com>
Cc: Matthew Brost <matthew.brost@intel.com>
Cc: Francois Dugast <francois.dugast@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/damon/ops-common.c
mm/huge_memory.c
mm/page_idle.c
mm/page_vma_mapped.c
mm/rmap.c

index 998c5180a603472307c573ddcee8ac0b18770044..eda4de5536119ee1ed2a5492afd4aa4d2b3936b3 100644 (file)
@@ -75,12 +75,24 @@ void damon_ptep_mkold(pte_t *pte, struct vm_area_struct *vma, unsigned long addr
 void damon_pmdp_mkold(pmd_t *pmd, struct vm_area_struct *vma, unsigned long addr)
 {
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
-       struct folio *folio = damon_get_folio(pmd_pfn(pmdp_get(pmd)));
+       pmd_t pmdval = pmdp_get(pmd);
+       struct folio *folio;
+       bool young = false;
+       unsigned long pfn;
+
+       if (likely(pmd_present(pmdval)))
+               pfn = pmd_pfn(pmdval);
+       else
+               pfn = swp_offset_pfn(pmd_to_swp_entry(pmdval));
 
+       folio = damon_get_folio(pfn);
        if (!folio)
                return;
 
-       if (pmdp_clear_young_notify(vma, addr, pmd))
+       if (likely(pmd_present(pmdval)))
+               young |= pmdp_clear_young_notify(vma, addr, pmd);
+       young |= mmu_notifier_clear_young(vma->vm_mm, addr, addr + PAGE_SIZE);
+       if (young)
                folio_set_young(folio);
 
        folio_set_idle(folio);
@@ -203,7 +215,9 @@ static bool damon_folio_young_one(struct folio *folio,
                                mmu_notifier_test_young(vma->vm_mm, addr);
                } else {
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
-                       *accessed = pmd_young(pmdp_get(pvmw.pmd)) ||
+                       pmd_t pmd = pmdp_get(pvmw.pmd);
+
+                       *accessed = (pmd_present(pmd) && pmd_young(pmd)) ||
                                !folio_test_idle(folio) ||
                                mmu_notifier_test_young(vma->vm_mm, addr);
 #else
index 03babad18978b94cf452e369a768e635cae1d150..c41aa1718293acf800cab303323c441b15d21563 100644 (file)
@@ -4645,7 +4645,10 @@ int set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
                return 0;
 
        flush_cache_range(vma, address, address + HPAGE_PMD_SIZE);
-       pmdval = pmdp_invalidate(vma, address, pvmw->pmd);
+       if (unlikely(!pmd_present(*pvmw->pmd)))
+               pmdval = pmdp_huge_get_and_clear(vma->vm_mm, address, pvmw->pmd);
+       else
+               pmdval = pmdp_invalidate(vma, address, pvmw->pmd);
 
        /* See folio_try_share_anon_rmap_pmd(): invalidate PMD first. */
        anon_exclusive = folio_test_anon(folio) && PageAnonExclusive(page);
@@ -4695,6 +4698,17 @@ void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, struct page *new)
        entry = pmd_to_swp_entry(*pvmw->pmd);
        folio_get(folio);
        pmde = folio_mk_pmd(folio, READ_ONCE(vma->vm_page_prot));
+
+       if (folio_is_device_private(folio)) {
+               if (pmd_write(pmde))
+                       entry = make_writable_device_private_entry(
+                                                       page_to_pfn(new));
+               else
+                       entry = make_readable_device_private_entry(
+                                                       page_to_pfn(new));
+               pmde = swp_entry_to_pmd(entry);
+       }
+
        if (pmd_swp_soft_dirty(*pvmw->pmd))
                pmde = pmd_mksoft_dirty(pmde);
        if (is_writable_migration_entry(entry))
index a82b340dc20447e760889b588cf71288525a7275..9030c31800ce946cd5506a19b05274c0c42937fc 100644 (file)
@@ -71,8 +71,9 @@ static bool page_idle_clear_pte_refs_one(struct folio *folio,
                                referenced |= ptep_test_and_clear_young(vma, addr, pvmw.pte);
                        referenced |= mmu_notifier_clear_young(vma->vm_mm, addr, addr + PAGE_SIZE);
                } else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
-                       if (pmdp_clear_young_notify(vma, addr, pvmw.pmd))
-                               referenced = true;
+                       if (likely(pmd_present(pmdp_get(pvmw.pmd))))
+                               referenced |= pmdp_clear_young_notify(vma, addr, pvmw.pmd);
+                       referenced |= mmu_notifier_clear_young(vma->vm_mm, addr, addr + PAGE_SIZE);
                } else {
                        /* unexpected pmd-mapped page? */
                        WARN_ON_ONCE(1);
index e981a1a292d25f303621f3213c69322d2c9e2a59..7ab46a2b4e15df1b5dc13b7108f15d628db40f3c 100644 (file)
@@ -250,12 +250,11 @@ restart:
                        pvmw->ptl = pmd_lock(mm, pvmw->pmd);
                        pmde = *pvmw->pmd;
                        if (!pmd_present(pmde)) {
-                               swp_entry_t entry;
+                               swp_entry_t entry = pmd_to_swp_entry(pmde);
 
                                if (!thp_migration_supported() ||
                                    !(pvmw->flags & PVMW_MIGRATION))
                                        return not_found(pvmw);
-                               entry = pmd_to_swp_entry(pmde);
                                if (!is_migration_entry(entry) ||
                                    !check_pmd(swp_offset_pfn(entry), pvmw))
                                        return not_found(pvmw);
@@ -277,6 +276,15 @@ restart:
                         * cannot return prematurely, while zap_huge_pmd() has
                         * cleared *pmd but not decremented compound_mapcount().
                         */
+                       swp_entry_t entry;
+
+                       entry = pmd_to_swp_entry(pmde);
+
+                       if (is_device_private_entry(entry)) {
+                               pvmw->ptl = pmd_lock(mm, pvmw->pmd);
+                               return true;
+                       }
+
                        if ((pvmw->flags & PVMW_SYNC) &&
                            thp_vma_suitable_order(vma, pvmw->address,
                                                   PMD_ORDER) &&
index 236ceff5b276b66c3b8923742c4bb1c91049caa4..6de1baf7a4f18ff9f67472afcf868ec1ef664646 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1063,8 +1063,10 @@ static int page_vma_mkclean_one(struct page_vma_mapped_walk *pvmw)
                } else {
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
                        pmd_t *pmd = pvmw->pmd;
-                       pmd_t entry;
+                       pmd_t entry = pmdp_get(pmd);
 
+                       if (!pmd_present(entry))
+                               continue;
                        if (!pmd_dirty(*pmd) && !pmd_write(*pmd))
                                continue;
 
@@ -2330,6 +2332,11 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
        while (page_vma_mapped_walk(&pvmw)) {
                /* PMD-mapped THP migration entry */
                if (!pvmw.pte) {
+#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
+                       unsigned long pfn;
+                       pmd_t pmdval;
+#endif
+
                        if (flags & TTU_SPLIT_HUGE_PMD) {
                                split_huge_pmd_locked(vma, pvmw.address,
                                                      pvmw.pmd, true);
@@ -2338,8 +2345,14 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
                                break;
                        }
 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
-                       subpage = folio_page(folio,
-                               pmd_pfn(*pvmw.pmd) - folio_pfn(folio));
+                       pmdval = pmdp_get(pvmw.pmd);
+                       if (likely(pmd_present(pmdval)))
+                               pfn = pmd_pfn(pmdval);
+                       else
+                               pfn = swp_offset_pfn(pmd_to_swp_entry(pmdval));
+
+                       subpage = folio_page(folio, pfn - folio_pfn(folio));
+
                        VM_BUG_ON_FOLIO(folio_test_hugetlb(folio) ||
                                        !folio_test_pmd_mappable(folio), folio);