void (*coherent_kern_range)(unsigned long, unsigned long);
        void (*coherent_user_range)(unsigned long, unsigned long);
-       void (*flush_kern_dcache_page)(void *);
+       void (*flush_kern_dcache_area)(void *, size_t);
 
        void (*dma_inv_range)(const void *, const void *);
        void (*dma_clean_range)(const void *, const void *);
 #define __cpuc_flush_user_range                cpu_cache.flush_user_range
 #define __cpuc_coherent_kern_range     cpu_cache.coherent_kern_range
 #define __cpuc_coherent_user_range     cpu_cache.coherent_user_range
-#define __cpuc_flush_dcache_page       cpu_cache.flush_kern_dcache_page
+#define __cpuc_flush_dcache_area       cpu_cache.flush_kern_dcache_area
 
 /*
  * These are private to the dma-mapping API.  Do not use directly.
 #define __cpuc_flush_user_range                __glue(_CACHE,_flush_user_cache_range)
 #define __cpuc_coherent_kern_range     __glue(_CACHE,_coherent_kern_range)
 #define __cpuc_coherent_user_range     __glue(_CACHE,_coherent_user_range)
-#define __cpuc_flush_dcache_page       __glue(_CACHE,_flush_kern_dcache_page)
+#define __cpuc_flush_dcache_area       __glue(_CACHE,_flush_kern_dcache_area)
 
 extern void __cpuc_flush_kern_all(void);
 extern void __cpuc_flush_user_all(void);
 extern void __cpuc_flush_user_range(unsigned long, unsigned long, unsigned int);
 extern void __cpuc_coherent_kern_range(unsigned long, unsigned long);
 extern void __cpuc_coherent_user_range(unsigned long, unsigned long);
-extern void __cpuc_flush_dcache_page(void *);
+extern void __cpuc_flush_dcache_area(void *, size_t);
 
 /*
  * These are private to the dma-mapping API.  Do not use directly.
 {
        /* highmem pages are always flushed upon kunmap already */
        if ((cache_is_vivt() || cache_is_vipt_aliasing()) && !PageHighMem(page))
-               __cpuc_flush_dcache_page(page_address(page));
+               __cpuc_flush_dcache_area(page_address(page), PAGE_SIZE);
 }
 
 #define flush_dcache_mmap_lock(mapping) \
 
        mov     pc, lr
 
 /*
- *     flush_kern_dcache_page(kaddr)
+ *     flush_kern_dcache_area(void *addr, size_t size)
  *
  *     Ensure that the data held in the page kaddr is written back
  *     to the page in question.
  *
- *     - kaddr   - kernel address (guaranteed to be page aligned)
+ *     - addr  - kernel address
+ *     - size  - size of region
  */
-ENTRY(fa_flush_kern_dcache_page)
-       add     r1, r0, #PAGE_SZ
+ENTRY(fa_flush_kern_dcache_area)
+       add     r1, r0, r1
 1:     mcr     p15, 0, r0, c7, c14, 1          @ clean & invalidate D line
        add     r0, r0, #CACHE_DLINESIZE
        cmp     r0, r1
        .long   fa_flush_user_cache_range
        .long   fa_coherent_kern_range
        .long   fa_coherent_user_range
-       .long   fa_flush_kern_dcache_page
+       .long   fa_flush_kern_dcache_area
        .long   fa_dma_inv_range
        .long   fa_dma_clean_range
        .long   fa_dma_flush_range
 
        mov     pc, lr
 
 /*
- *     flush_kern_dcache_page(void *page)
+ *     flush_kern_dcache_area(void *page, size_t size)
  *
  *     Ensure no D cache aliasing occurs, either with itself or
  *     the I cache
  *
- *     - addr  - page aligned address
+ *     - addr  - kernel address
+ *     - size  - region size
  */
-ENTRY(v3_flush_kern_dcache_page)
+ENTRY(v3_flush_kern_dcache_area)
        /* FALLTHROUGH */
 
 /*
        .long   v3_flush_user_cache_range
        .long   v3_coherent_kern_range
        .long   v3_coherent_user_range
-       .long   v3_flush_kern_dcache_page
+       .long   v3_flush_kern_dcache_area
        .long   v3_dma_inv_range
        .long   v3_dma_clean_range
        .long   v3_dma_flush_range
 
        mov     pc, lr
 
 /*
- *     flush_kern_dcache_page(void *page)
+ *     flush_kern_dcache_area(void *addr, size_t size)
  *
  *     Ensure no D cache aliasing occurs, either with itself or
  *     the I cache
  *
- *     - addr  - page aligned address
+ *     - addr  - kernel address
+ *     - size  - region size
  */
-ENTRY(v4_flush_kern_dcache_page)
+ENTRY(v4_flush_kern_dcache_area)
        /* FALLTHROUGH */
 
 /*
        .long   v4_flush_user_cache_range
        .long   v4_coherent_kern_range
        .long   v4_coherent_user_range
-       .long   v4_flush_kern_dcache_page
+       .long   v4_flush_kern_dcache_area
        .long   v4_dma_inv_range
        .long   v4_dma_clean_range
        .long   v4_dma_flush_range
 
        mov     pc, lr
 
 /*
- *     flush_kern_dcache_page(void *page)
+ *     flush_kern_dcache_area(void *addr, size_t size)
  *
  *     Ensure no D cache aliasing occurs, either with itself or
  *     the I cache
  *
- *     - addr  - page aligned address
+ *     - addr  - kernel address
+ *     - size  - region size
  */
-ENTRY(v4wb_flush_kern_dcache_page)
-       add     r1, r0, #PAGE_SZ
+ENTRY(v4wb_flush_kern_dcache_area)
+       add     r1, r0, r1
        /* fall through */
 
 /*
        .long   v4wb_flush_user_cache_range
        .long   v4wb_coherent_kern_range
        .long   v4wb_coherent_user_range
-       .long   v4wb_flush_kern_dcache_page
+       .long   v4wb_flush_kern_dcache_area
        .long   v4wb_dma_inv_range
        .long   v4wb_dma_clean_range
        .long   v4wb_dma_flush_range
 
        mov     pc, lr
 
 /*
- *     flush_kern_dcache_page(void *page)
+ *     flush_kern_dcache_area(void *addr, size_t size)
  *
  *     Ensure no D cache aliasing occurs, either with itself or
  *     the I cache
  *
- *     - addr  - page aligned address
+ *     - addr  - kernel address
+ *     - size  - region size
  */
-ENTRY(v4wt_flush_kern_dcache_page)
+ENTRY(v4wt_flush_kern_dcache_area)
        mov     r2, #0
        mcr     p15, 0, r2, c7, c5, 0           @ invalidate I cache
-       add     r1, r0, #PAGE_SZ
+       add     r1, r0, r1
        /* fallthrough */
 
 /*
        .long   v4wt_flush_user_cache_range
        .long   v4wt_coherent_kern_range
        .long   v4wt_coherent_user_range
-       .long   v4wt_flush_kern_dcache_page
+       .long   v4wt_flush_kern_dcache_area
        .long   v4wt_dma_inv_range
        .long   v4wt_dma_clean_range
        .long   v4wt_dma_flush_range
 
 ENDPROC(v6_coherent_kern_range)
 
 /*
- *     v6_flush_kern_dcache_page(kaddr)
+ *     v6_flush_kern_dcache_area(void *addr, size_t size)
  *
  *     Ensure that the data held in the page kaddr is written back
  *     to the page in question.
  *
- *     - kaddr   - kernel address (guaranteed to be page aligned)
+ *     - addr  - kernel address
+ *     - size  - region size
  */
-ENTRY(v6_flush_kern_dcache_page)
-       add     r1, r0, #PAGE_SZ
+ENTRY(v6_flush_kern_dcache_area)
+       add     r1, r0, r1
 1:
 #ifdef HARVARD_CACHE
        mcr     p15, 0, r0, c7, c14, 1          @ clean & invalidate D line
        .long   v6_flush_user_cache_range
        .long   v6_coherent_kern_range
        .long   v6_coherent_user_range
-       .long   v6_flush_kern_dcache_page
+       .long   v6_flush_kern_dcache_area
        .long   v6_dma_inv_range
        .long   v6_dma_clean_range
        .long   v6_dma_flush_range
 
 ENDPROC(v7_coherent_user_range)
 
 /*
- *     v7_flush_kern_dcache_page(kaddr)
+ *     v7_flush_kern_dcache_area(void *addr, size_t size)
  *
  *     Ensure that the data held in the page kaddr is written back
  *     to the page in question.
  *
- *     - kaddr   - kernel address (guaranteed to be page aligned)
+ *     - addr  - kernel address
+ *     - size  - region size
  */
-ENTRY(v7_flush_kern_dcache_page)
+ENTRY(v7_flush_kern_dcache_area)
        dcache_line_size r2, r3
-       add     r1, r0, #PAGE_SZ
+       add     r1, r0, r1
 1:
        mcr     p15, 0, r0, c7, c14, 1          @ clean & invalidate D line / unified line
        add     r0, r0, r2
        blo     1b
        dsb
        mov     pc, lr
-ENDPROC(v7_flush_kern_dcache_page)
+ENDPROC(v7_flush_kern_dcache_area)
 
 /*
  *     v7_dma_inv_range(start,end)
        .long   v7_flush_user_cache_range
        .long   v7_coherent_kern_range
        .long   v7_coherent_user_range
-       .long   v7_flush_kern_dcache_page
+       .long   v7_flush_kern_dcache_area
        .long   v7_dma_inv_range
        .long   v7_dma_clean_range
        .long   v7_dma_flush_range
 
         */
        if (addr)
 #endif
-               __cpuc_flush_dcache_page(addr);
+               __cpuc_flush_dcache_area(addr, PAGE_SIZE);
 
        /*
         * If this is a page cache page, and we have an aliasing VIPT cache,
         * in this mapping of the page.  FIXME: this is overkill
         * since we actually ask for a write-back and invalidate.
         */
-       __cpuc_flush_dcache_page(page_address(page));
+       __cpuc_flush_dcache_area(page_address(page), PAGE_SIZE);
 }
 
        unsigned int idx = type + KM_TYPE_NR * smp_processor_id();
 
        if (kvaddr >= (void *)FIXADDR_START) {
-               __cpuc_flush_dcache_page((void *)vaddr);
+               __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE);
 #ifdef CONFIG_DEBUG_HIGHMEM
                BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
                set_pte_ext(TOP_PTE(vaddr), __pte(0), 0);
 
 
 void flush_dcache_page(struct page *page)
 {
-       __cpuc_flush_dcache_page(page_address(page));
+       __cpuc_flush_dcache_area(page_address(page), PAGE_SIZE);
 }
 EXPORT_SYMBOL(flush_dcache_page);
 
 
        mov     pc, lr
 
 /*
- *     flush_kern_dcache_page(void *page)
+ *     flush_kern_dcache_area(void *addr, size_t size)
  *
  *     Ensure no D cache aliasing occurs, either with itself or
  *     the I cache
  *
- *     - page  - page aligned address
+ *     - addr  - kernel address
+ *     - size  - region size
  */
-ENTRY(arm1020_flush_kern_dcache_page)
+ENTRY(arm1020_flush_kern_dcache_area)
        mov     ip, #0
 #ifndef CONFIG_CPU_DCACHE_DISABLE
-       add     r1, r0, #PAGE_SZ
+       add     r1, r0, r1
 1:     mcr     p15, 0, r0, c7, c14, 1          @ clean+invalidate D entry
        mcr     p15, 0, ip, c7, c10, 4          @ drain WB
        add     r0, r0, #CACHE_DLINESIZE
        .long   arm1020_flush_user_cache_range
        .long   arm1020_coherent_kern_range
        .long   arm1020_coherent_user_range
-       .long   arm1020_flush_kern_dcache_page
+       .long   arm1020_flush_kern_dcache_area
        .long   arm1020_dma_inv_range
        .long   arm1020_dma_clean_range
        .long   arm1020_dma_flush_range
 
        mov     pc, lr
 
 /*
- *     flush_kern_dcache_page(void *page)
+ *     flush_kern_dcache_area(void *addr, size_t size)
  *
  *     Ensure no D cache aliasing occurs, either with itself or
  *     the I cache
  *
- *     - page  - page aligned address
+ *     - addr  - kernel address
+ *     - size  - region size
  */
-ENTRY(arm1020e_flush_kern_dcache_page)
+ENTRY(arm1020e_flush_kern_dcache_area)
        mov     ip, #0
 #ifndef CONFIG_CPU_DCACHE_DISABLE
-       add     r1, r0, #PAGE_SZ
+       add     r1, r0, r1
 1:     mcr     p15, 0, r0, c7, c14, 1          @ clean+invalidate D entry
        add     r0, r0, #CACHE_DLINESIZE
        cmp     r0, r1
        .long   arm1020e_flush_user_cache_range
        .long   arm1020e_coherent_kern_range
        .long   arm1020e_coherent_user_range
-       .long   arm1020e_flush_kern_dcache_page
+       .long   arm1020e_flush_kern_dcache_area
        .long   arm1020e_dma_inv_range
        .long   arm1020e_dma_clean_range
        .long   arm1020e_dma_flush_range
 
        mov     pc, lr
 
 /*
- *     flush_kern_dcache_page(void *page)
+ *     flush_kern_dcache_area(void *addr, size_t size)
  *
  *     Ensure no D cache aliasing occurs, either with itself or
  *     the I cache
  *
- *     - page  - page aligned address
+ *     - addr  - kernel address
+ *     - size  - region size
  */
-ENTRY(arm1022_flush_kern_dcache_page)
+ENTRY(arm1022_flush_kern_dcache_area)
        mov     ip, #0
 #ifndef CONFIG_CPU_DCACHE_DISABLE
-       add     r1, r0, #PAGE_SZ
+       add     r1, r0, r1
 1:     mcr     p15, 0, r0, c7, c14, 1          @ clean+invalidate D entry
        add     r0, r0, #CACHE_DLINESIZE
        cmp     r0, r1
        .long   arm1022_flush_user_cache_range
        .long   arm1022_coherent_kern_range
        .long   arm1022_coherent_user_range
-       .long   arm1022_flush_kern_dcache_page
+       .long   arm1022_flush_kern_dcache_area
        .long   arm1022_dma_inv_range
        .long   arm1022_dma_clean_range
        .long   arm1022_dma_flush_range
 
        mov     pc, lr
 
 /*
- *     flush_kern_dcache_page(void *page)
+ *     flush_kern_dcache_area(void *addr, size_t size)
  *
  *     Ensure no D cache aliasing occurs, either with itself or
  *     the I cache
  *
- *     - page  - page aligned address
+ *     - addr  - kernel address
+ *     - size  - region size
  */
-ENTRY(arm1026_flush_kern_dcache_page)
+ENTRY(arm1026_flush_kern_dcache_area)
        mov     ip, #0
 #ifndef CONFIG_CPU_DCACHE_DISABLE
-       add     r1, r0, #PAGE_SZ
+       add     r1, r0, r1
 1:     mcr     p15, 0, r0, c7, c14, 1          @ clean+invalidate D entry
        add     r0, r0, #CACHE_DLINESIZE
        cmp     r0, r1
        .long   arm1026_flush_user_cache_range
        .long   arm1026_coherent_kern_range
        .long   arm1026_coherent_user_range
-       .long   arm1026_flush_kern_dcache_page
+       .long   arm1026_flush_kern_dcache_area
        .long   arm1026_dma_inv_range
        .long   arm1026_dma_clean_range
        .long   arm1026_dma_flush_range
 
        mov     pc, lr
 
 /*
- *     flush_kern_dcache_page(void *page)
+ *     flush_kern_dcache_area(void *addr, size_t size)
  *
  *     Ensure no D cache aliasing occurs, either with itself or
  *     the I cache
  *
- *     - addr  - page aligned address
+ *     - addr  - kernel address
+ *     - size  - region size
  */
-ENTRY(arm920_flush_kern_dcache_page)
-       add     r1, r0, #PAGE_SZ
+ENTRY(arm920_flush_kern_dcache_area)
+       add     r1, r0, r1
 1:     mcr     p15, 0, r0, c7, c14, 1          @ clean+invalidate D entry
        add     r0, r0, #CACHE_DLINESIZE
        cmp     r0, r1
        .long   arm920_flush_user_cache_range
        .long   arm920_coherent_kern_range
        .long   arm920_coherent_user_range
-       .long   arm920_flush_kern_dcache_page
+       .long   arm920_flush_kern_dcache_area
        .long   arm920_dma_inv_range
        .long   arm920_dma_clean_range
        .long   arm920_dma_flush_range
 
        mov     pc, lr
 
 /*
- *     flush_kern_dcache_page(void *page)
+ *     flush_kern_dcache_area(void *addr, size_t size)
  *
  *     Ensure no D cache aliasing occurs, either with itself or
  *     the I cache
  *
- *     - addr  - page aligned address
+ *     - addr  - kernel address
+ *     - size  - region size
  */
-ENTRY(arm922_flush_kern_dcache_page)
-       add     r1, r0, #PAGE_SZ
+ENTRY(arm922_flush_kern_dcache_area)
+       add     r1, r0, r1
 1:     mcr     p15, 0, r0, c7, c14, 1          @ clean+invalidate D entry
        add     r0, r0, #CACHE_DLINESIZE
        cmp     r0, r1
        .long   arm922_flush_user_cache_range
        .long   arm922_coherent_kern_range
        .long   arm922_coherent_user_range
-       .long   arm922_flush_kern_dcache_page
+       .long   arm922_flush_kern_dcache_area
        .long   arm922_dma_inv_range
        .long   arm922_dma_clean_range
        .long   arm922_dma_flush_range
 
        mov     pc, lr
 
 /*
- *     flush_kern_dcache_page(void *page)
+ *     flush_kern_dcache_area(void *addr, size_t size)
  *
  *     Ensure no D cache aliasing occurs, either with itself or
  *     the I cache
  *
- *     - addr  - page aligned address
+ *     - addr  - kernel address
+ *     - size  - region size
  */
-ENTRY(arm925_flush_kern_dcache_page)
-       add     r1, r0, #PAGE_SZ
+ENTRY(arm925_flush_kern_dcache_area)
+       add     r1, r0, r1
 1:     mcr     p15, 0, r0, c7, c14, 1          @ clean+invalidate D entry
        add     r0, r0, #CACHE_DLINESIZE
        cmp     r0, r1
        .long   arm925_flush_user_cache_range
        .long   arm925_coherent_kern_range
        .long   arm925_coherent_user_range
-       .long   arm925_flush_kern_dcache_page
+       .long   arm925_flush_kern_dcache_area
        .long   arm925_dma_inv_range
        .long   arm925_dma_clean_range
        .long   arm925_dma_flush_range
 
        mov     pc, lr
 
 /*
- *     flush_kern_dcache_page(void *page)
+ *     flush_kern_dcache_area(void *addr, size_t size)
  *
  *     Ensure no D cache aliasing occurs, either with itself or
  *     the I cache
  *
- *     - addr  - page aligned address
+ *     - addr  - kernel address
+ *     - size  - region size
  */
-ENTRY(arm926_flush_kern_dcache_page)
-       add     r1, r0, #PAGE_SZ
+ENTRY(arm926_flush_kern_dcache_area)
+       add     r1, r0, r1
 1:     mcr     p15, 0, r0, c7, c14, 1          @ clean+invalidate D entry
        add     r0, r0, #CACHE_DLINESIZE
        cmp     r0, r1
        .long   arm926_flush_user_cache_range
        .long   arm926_coherent_kern_range
        .long   arm926_coherent_user_range
-       .long   arm926_flush_kern_dcache_page
+       .long   arm926_flush_kern_dcache_area
        .long   arm926_dma_inv_range
        .long   arm926_dma_clean_range
        .long   arm926_dma_flush_range
 
        /* FALLTHROUGH */
 
 /*
- *     flush_kern_dcache_page(void *page)
+ *     flush_kern_dcache_area(void *addr, size_t size)
  *
  *     Ensure no D cache aliasing occurs, either with itself or
  *     the I cache
  *
- *     - addr  - page aligned address
+ *     - addr  - kernel address
+ *     - size  - region size
  */
-ENTRY(arm940_flush_kern_dcache_page)
+ENTRY(arm940_flush_kern_dcache_area)
        mov     ip, #0
        mov     r1, #(CACHE_DSEGMENTS - 1) << 4 @ 4 segments
 1:     orr     r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
        .long   arm940_flush_user_cache_range
        .long   arm940_coherent_kern_range
        .long   arm940_coherent_user_range
-       .long   arm940_flush_kern_dcache_page
+       .long   arm940_flush_kern_dcache_area
        .long   arm940_dma_inv_range
        .long   arm940_dma_clean_range
        .long   arm940_dma_flush_range
 
        mov     pc, lr
 
 /*
- *     flush_kern_dcache_page(void *page)
+ *     flush_kern_dcache_area(void *addr, size_t size)
  *
  *     Ensure no D cache aliasing occurs, either with itself or
  *     the I cache
  *
- *     - addr  - page aligned address
+ *     - addr  - kernel address
+ *     - size  - region size
  * (same as arm926)
  */
-ENTRY(arm946_flush_kern_dcache_page)
-       add     r1, r0, #PAGE_SZ
+ENTRY(arm946_flush_kern_dcache_area)
+       add     r1, r0, r1
 1:     mcr     p15, 0, r0, c7, c14, 1          @ clean+invalidate D entry
        add     r0, r0, #CACHE_DLINESIZE
        cmp     r0, r1
        .long   arm946_flush_user_cache_range
        .long   arm946_coherent_kern_range
        .long   arm946_coherent_user_range
-       .long   arm946_flush_kern_dcache_page
+       .long   arm946_flush_kern_dcache_area
        .long   arm946_dma_inv_range
        .long   arm946_dma_clean_range
        .long   arm946_dma_flush_range
 
        mov     pc, lr
 
 /*
- *     flush_kern_dcache_page(void *page)
+ *     flush_kern_dcache_area(void *addr, size_t size)
  *
  *     Ensure no D cache aliasing occurs, either with itself or
  *     the I cache
  *
- *     - addr  - page aligned address
+ *     - addr  - kernel address
+ *     - size  - region size
  */
        .align  5
-ENTRY(feroceon_flush_kern_dcache_page)
-       add     r1, r0, #PAGE_SZ
+ENTRY(feroceon_flush_kern_dcache_area)
+       add     r1, r0, r1
 1:     mcr     p15, 0, r0, c7, c14, 1          @ clean+invalidate D entry
        add     r0, r0, #CACHE_DLINESIZE
        cmp     r0, r1
        mov     pc, lr
 
        .align  5
-ENTRY(feroceon_range_flush_kern_dcache_page)
+ENTRY(feroceon_range_flush_kern_dcache_area)
        mrs     r2, cpsr
        add     r1, r0, #PAGE_SZ - CACHE_DLINESIZE      @ top addr is inclusive
        orr     r3, r2, #PSR_I_BIT
        .long   feroceon_flush_user_cache_range
        .long   feroceon_coherent_kern_range
        .long   feroceon_coherent_user_range
-       .long   feroceon_flush_kern_dcache_page
+       .long   feroceon_flush_kern_dcache_area
        .long   feroceon_dma_inv_range
        .long   feroceon_dma_clean_range
        .long   feroceon_dma_flush_range
        .long   feroceon_flush_user_cache_range
        .long   feroceon_coherent_kern_range
        .long   feroceon_coherent_user_range
-       .long   feroceon_range_flush_kern_dcache_page
+       .long   feroceon_range_flush_kern_dcache_area
        .long   feroceon_range_dma_inv_range
        .long   feroceon_range_dma_clean_range
        .long   feroceon_range_dma_flush_range
 
        mov     pc, lr
 
 /*
- *     flush_kern_dcache_page(void *page)
+ *     flush_kern_dcache_area(void *addr, size_t size)
  *
  *     Ensure no D cache aliasing occurs, either with itself or
  *     the I cache
  *
- *     - addr  - page aligned address
+ *     - addr  - kernel address
+ *     - size  - region size
  */
-ENTRY(mohawk_flush_kern_dcache_page)
-       add     r1, r0, #PAGE_SZ
+ENTRY(mohawk_flush_kern_dcache_area)
+       add     r1, r0, r1
 1:     mcr     p15, 0, r0, c7, c14, 1          @ clean+invalidate D entry
        add     r0, r0, #CACHE_DLINESIZE
        cmp     r0, r1
        .long   mohawk_flush_user_cache_range
        .long   mohawk_coherent_kern_range
        .long   mohawk_coherent_user_range
-       .long   mohawk_flush_kern_dcache_page
+       .long   mohawk_flush_kern_dcache_area
        .long   mohawk_dma_inv_range
        .long   mohawk_dma_clean_range
        .long   mohawk_dma_flush_range
 
 EXPORT_SYMBOL(__cpuc_flush_user_all);
 EXPORT_SYMBOL(__cpuc_flush_user_range);
 EXPORT_SYMBOL(__cpuc_coherent_kern_range);
-EXPORT_SYMBOL(__cpuc_flush_dcache_page);
+EXPORT_SYMBOL(__cpuc_flush_dcache_area);
 #else
 EXPORT_SYMBOL(cpu_cache);
 #endif
 
        mov     pc, lr
 
 /*
- *     flush_kern_dcache_page(void *page)
+ *     flush_kern_dcache_area(void *addr, size_t size)
  *
  *     Ensure no D cache aliasing occurs, either with itself or
  *     the I cache.
  *
- *     - addr  - page aligned address
+ *     - addr  - kernel address
+ *     - size  - region size
  */
-ENTRY(xsc3_flush_kern_dcache_page)
-       add     r1, r0, #PAGE_SZ
+ENTRY(xsc3_flush_kern_dcache_area)
+       add     r1, r0, r1
 1:     mcr     p15, 0, r0, c7, c14, 1          @ clean/invalidate L1 D line
        add     r0, r0, #CACHELINESIZE
        cmp     r0, r1
        .long   xsc3_flush_user_cache_range
        .long   xsc3_coherent_kern_range
        .long   xsc3_coherent_user_range
-       .long   xsc3_flush_kern_dcache_page
+       .long   xsc3_flush_kern_dcache_area
        .long   xsc3_dma_inv_range
        .long   xsc3_dma_clean_range
        .long   xsc3_dma_flush_range
 
        mov     pc, lr
 
 /*
- *     flush_kern_dcache_page(void *page)
+ *     flush_kern_dcache_area(void *addr, size_t size)
  *
  *     Ensure no D cache aliasing occurs, either with itself or
  *     the I cache
  *
- *     - addr  - page aligned address
+ *     - addr  - kernel address
+ *     - size  - region size
  */
-ENTRY(xscale_flush_kern_dcache_page)
-       add     r1, r0, #PAGE_SZ
+ENTRY(xscale_flush_kern_dcache_area)
+       add     r1, r0, r1
 1:     mcr     p15, 0, r0, c7, c10, 1          @ clean D entry
        mcr     p15, 0, r0, c7, c6, 1           @ invalidate D entry
        add     r0, r0, #CACHELINESIZE
        .long   xscale_flush_user_cache_range
        .long   xscale_coherent_kern_range
        .long   xscale_coherent_user_range
-       .long   xscale_flush_kern_dcache_page
+       .long   xscale_flush_kern_dcache_area
        .long   xscale_dma_inv_range
        .long   xscale_dma_clean_range
        .long   xscale_dma_flush_range
        .long   xscale_flush_user_cache_range
        .long   xscale_coherent_kern_range
        .long   xscale_coherent_user_range
-       .long   xscale_flush_kern_dcache_page
+       .long   xscale_flush_kern_dcache_area
        .long   xscale_dma_flush_range
        .long   xscale_dma_clean_range
        .long   xscale_dma_flush_range