]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
mips: mm: convert __flush_dcache_pages() to __flush_dcache_folio_pages()
authorDavid Hildenbrand <david@redhat.com>
Mon, 1 Sep 2025 15:03:42 +0000 (17:03 +0200)
committerAndrew Morton <akpm@linux-foundation.org>
Fri, 12 Sep 2025 00:25:34 +0000 (17:25 -0700)
Let's make it clearer that we are operating within a single folio by
providing both the folio and the page.

This implies that for flush_dcache_folio() we'll now avoid one more
page->folio lookup, and that we can safely drop the "nth_page" usage.

While at it, drop the "extern" from the function declaration.

Link: https://lkml.kernel.org/r/20250901150359.867252-22-david@redhat.com
Signed-off-by: David Hildenbrand <david@redhat.com>
Cc: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
arch/mips/include/asm/cacheflush.h
arch/mips/mm/cache.c

index 5d283ef89d90d61802693e1679762d9236c42949..5099c1b65a584deebeb3dccc78bce3327fc947b0 100644 (file)
@@ -50,13 +50,14 @@ extern void (*flush_cache_mm)(struct mm_struct *mm);
 extern void (*flush_cache_range)(struct vm_area_struct *vma,
        unsigned long start, unsigned long end);
 extern void (*flush_cache_page)(struct vm_area_struct *vma, unsigned long page, unsigned long pfn);
-extern void __flush_dcache_pages(struct page *page, unsigned int nr);
+void __flush_dcache_folio_pages(struct folio *folio, struct page *page, unsigned int nr);
 
 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
 static inline void flush_dcache_folio(struct folio *folio)
 {
        if (cpu_has_dc_aliases)
-               __flush_dcache_pages(&folio->page, folio_nr_pages(folio));
+               __flush_dcache_folio_pages(folio, folio_page(folio, 0),
+                                          folio_nr_pages(folio));
        else if (!cpu_has_ic_fills_f_dc)
                folio_set_dcache_dirty(folio);
 }
@@ -64,10 +65,12 @@ static inline void flush_dcache_folio(struct folio *folio)
 
 static inline void flush_dcache_page(struct page *page)
 {
+       struct folio *folio = page_folio(page);
+
        if (cpu_has_dc_aliases)
-               __flush_dcache_pages(page, 1);
+               __flush_dcache_folio_pages(folio, page, 1);
        else if (!cpu_has_ic_fills_f_dc)
-               folio_set_dcache_dirty(page_folio(page));
+               folio_set_dcache_dirty(folio);
 }
 
 #define flush_dcache_mmap_lock(mapping)                do { } while (0)
index bf9a37c60e9f091ad3012d398cbe16856b8552e5..e3b4224c9a4061764897cbeae0bb51e2adc9d74a 100644 (file)
@@ -99,9 +99,9 @@ SYSCALL_DEFINE3(cacheflush, unsigned long, addr, unsigned long, bytes,
        return 0;
 }
 
-void __flush_dcache_pages(struct page *page, unsigned int nr)
+void __flush_dcache_folio_pages(struct folio *folio, struct page *page,
+               unsigned int nr)
 {
-       struct folio *folio = page_folio(page);
        struct address_space *mapping = folio_flush_mapping(folio);
        unsigned long addr;
        unsigned int i;
@@ -117,12 +117,12 @@ void __flush_dcache_pages(struct page *page, unsigned int nr)
         * get faulted into the tlb (and thus flushed) anyways.
         */
        for (i = 0; i < nr; i++) {
-               addr = (unsigned long)kmap_local_page(nth_page(page, i));
+               addr = (unsigned long)kmap_local_page(page + i);
                flush_data_cache_page(addr);
                kunmap_local((void *)addr);
        }
 }
-EXPORT_SYMBOL(__flush_dcache_pages);
+EXPORT_SYMBOL(__flush_dcache_folio_pages);
 
 void __flush_anon_page(struct page *page, unsigned long vmaddr)
 {