extern void (*flush_cache_range)(struct vm_area_struct *vma,
unsigned long start, unsigned long end);
extern void (*flush_cache_page)(struct vm_area_struct *vma, unsigned long page, unsigned long pfn);
-extern void __flush_dcache_pages(struct page *page, unsigned int nr);
+void __flush_dcache_folio_pages(struct folio *folio, struct page *page, unsigned int nr);
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
static inline void flush_dcache_folio(struct folio *folio)
{
if (cpu_has_dc_aliases)
- __flush_dcache_pages(&folio->page, folio_nr_pages(folio));
+ __flush_dcache_folio_pages(folio, folio_page(folio, 0),
+ folio_nr_pages(folio));
else if (!cpu_has_ic_fills_f_dc)
folio_set_dcache_dirty(folio);
}
static inline void flush_dcache_page(struct page *page)
{
+ struct folio *folio = page_folio(page);
+
if (cpu_has_dc_aliases)
- __flush_dcache_pages(page, 1);
+ __flush_dcache_folio_pages(folio, page, 1);
else if (!cpu_has_ic_fills_f_dc)
- folio_set_dcache_dirty(page_folio(page));
+ folio_set_dcache_dirty(folio);
}
#define flush_dcache_mmap_lock(mapping) do { } while (0)
return 0;
}
-void __flush_dcache_pages(struct page *page, unsigned int nr)
+void __flush_dcache_folio_pages(struct folio *folio, struct page *page,
+ unsigned int nr)
{
- struct folio *folio = page_folio(page);
struct address_space *mapping = folio_flush_mapping(folio);
unsigned long addr;
unsigned int i;
* get faulted into the tlb (and thus flushed) anyways.
*/
for (i = 0; i < nr; i++) {
- addr = (unsigned long)kmap_local_page(nth_page(page, i));
+ addr = (unsigned long)kmap_local_page(page + i);
flush_data_cache_page(addr);
kunmap_local((void *)addr);
}
}
-EXPORT_SYMBOL(__flush_dcache_pages);
+EXPORT_SYMBOL(__flush_dcache_folio_pages);
void __flush_anon_page(struct page *page, unsigned long vmaddr)
{