dirty. Again, see sparc64 for examples of how
to deal with this.
+ ``void flush_dcache_pageset(struct pageset *pageset)``
+ This function is called under the same circumstances as
+ flush_dcache_page(). It allows the architecture to
+ optimise for flushing the entire pageset of pages instead
+ of flushing one page at a time.
+
``void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
unsigned long user_vaddr, void *dst, void *src, int len)``
``void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
void flush_dcache_page(struct page *page);
+void flush_dcache_pageset(struct pageset *pageset);
void dma_cache_wback_inv(phys_addr_t start, unsigned long sz);
void dma_cache_inv(phys_addr_t start, unsigned long sz);
*/
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
extern void flush_dcache_page(struct page *);
+void flush_dcache_pageset(struct pageset *pageset);
static inline void flush_kernel_vmap_range(void *addr, int size)
{
SetPageDcacheDirty(page);
}
+void flush_dcache_pageset(struct pageset *pageset);
+
#define flush_dcache_mmap_lock(mapping) do { } while (0)
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
void flush_dcache_page(struct page *page);
+void flush_dcache_pageset(struct pageset *pageset);
void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
unsigned long vaddr, void *dst, void *src, int len);
void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
extern void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
unsigned long pfn);
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
-extern void flush_dcache_page(struct page *page);
+void flush_dcache_page(struct page *page);
+void flush_dcache_pageset(struct pageset *pageset);
extern void flush_icache_range(unsigned long start, unsigned long end);
extern void flush_icache_page(struct vm_area_struct *vma, struct page *page);
#define flush_cache_vunmap(start, end) flush_cache_all()
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
-extern void flush_dcache_page(struct page *page);
+void flush_dcache_page(struct page *page);
+void flush_dcache_pageset(struct pageset *pageset);
#define flush_dcache_mmap_lock(mapping) xa_lock_irq(&mapping->i_pages)
#define flush_dcache_mmap_unlock(mapping) xa_unlock_irq(&mapping->i_pages)
extern void flush_cache_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end);
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
-extern void flush_dcache_page(struct page *page);
+void flush_dcache_page(struct page *page);
+void flush_dcache_pageset(struct pageset *pageset);
extern void flush_icache_range(unsigned long start, unsigned long end);
#define flush_icache_user_range flush_icache_range
extern void flush_icache_page(struct vm_area_struct *vma,
#define flush_cache_vunmap(start,end) flush_cache_all()
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
-extern void flush_dcache_page(struct page*);
+void flush_dcache_page(struct page *);
+void flush_dcache_pageset(struct pageset *);
void local_flush_cache_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end);
static inline void flush_dcache_page(struct page *page)
{
}
+
+static inline void flush_dcache_pageset(struct pageset *pageset) { }
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
+#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGESET
#endif
+#ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGESET
+void flush_dcache_pageset(struct pageset *pageset);
+#endif
#ifndef flush_dcache_mmap_lock
static inline void flush_dcache_mmap_lock(struct address_space *mapping)
up_write(&page_offline_rwsem);
}
EXPORT_SYMBOL(page_offline_end);
+
+#ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGESET
+void flush_dcache_pageset(struct pageset *pageset)
+{
+ long i, nr = pageset_nr_pages(pageset);
+
+ for (i = 0; i < nr; i++)
+ flush_dcache_page(pageset_page(pageset, i));
+}
+EXPORT_SYMBOL(flush_dcache_pageset);
+#endif