]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
mm/rmap: pass vma to __folio_add_rmap()
authorDavid Hildenbrand <david@redhat.com>
Mon, 3 Mar 2025 16:30:01 +0000 (17:30 +0100)
committerAndrew Morton <akpm@linux-foundation.org>
Tue, 4 Mar 2025 05:50:43 +0000 (21:50 -0800)
We'll need access to the destination MM when modifying the mapcount large
folios next.  So pass in the VMA.

Link: https://lkml.kernel.org/r/20250303163014.1128035-9-david@redhat.com
Signed-off-by: David Hildenbrand <david@redhat.com>
Cc: Andy Lutomirks^H^Hski <luto@kernel.org>
Cc: Borislav Betkov <bp@alien8.de>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Jann Horn <jannh@google.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Jonathan Corbet <corbet@lwn.net>
Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Lance Yang <ioworker0@gmail.com>
Cc: Liam Howlett <liam.howlett@oracle.com>
Cc: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Cc: Matthew Wilcow (Oracle) <willy@infradead.org>
Cc: Michal Koutn <mkoutny@suse.com>
Cc: Muchun Song <muchun.song@linux.dev>
Cc: tejun heo <tj@kernel.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Zefan Li <lizefan.x@bytedance.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/rmap.c

index bcec8677f68dfaf93b1504fe62b568dfff988f23..8a7d023b02e0c44cf2cf0279e0a4cf2c05158206 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1242,8 +1242,8 @@ int pfn_mkclean_range(unsigned long pfn, unsigned long nr_pages, pgoff_t pgoff,
 }
 
 static __always_inline unsigned int __folio_add_rmap(struct folio *folio,
-               struct page *page, int nr_pages, enum rmap_level level,
-               int *nr_pmdmapped)
+               struct page *page, int nr_pages, struct vm_area_struct *vma,
+               enum rmap_level level, int *nr_pmdmapped)
 {
        atomic_t *mapped = &folio->_nr_pages_mapped;
        const int orig_nr_pages = nr_pages;
@@ -1411,7 +1411,7 @@ static __always_inline void __folio_add_anon_rmap(struct folio *folio,
 
        VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio);
 
-       nr = __folio_add_rmap(folio, page, nr_pages, level, &nr_pmdmapped);
+       nr = __folio_add_rmap(folio, page, nr_pages, vma, level, &nr_pmdmapped);
 
        if (likely(!folio_test_ksm(folio)))
                __page_check_anon_rmap(folio, page, vma, address);
@@ -1582,7 +1582,7 @@ static __always_inline void __folio_add_file_rmap(struct folio *folio,
 
        VM_WARN_ON_FOLIO(folio_test_anon(folio), folio);
 
-       nr = __folio_add_rmap(folio, page, nr_pages, level, &nr_pmdmapped);
+       nr = __folio_add_rmap(folio, page, nr_pages, vma, level, &nr_pmdmapped);
        __folio_mod_stat(folio, nr, nr_pmdmapped);
 
        /* See comments in folio_add_anon_rmap_*() */