#define kmap_prot              PAGE_KERNEL
 
-#define flush_cache_kmaps()    flush_cache_all()
+#define flush_cache_kmaps() \
+       do { \
+               if (cache_is_vivt()) \
+                       flush_cache_all(); \
+       } while (0)
 
 extern pte_t *pkmap_page_table;
 
 extern void *kmap_high_get(struct page *page);
 extern void kunmap_high(struct page *page);
 
+extern void *kmap_high_l1_vipt(struct page *page, pte_t *saved_pte);
+extern void kunmap_high_l1_vipt(struct page *page, pte_t saved_pte);
+
+/*
+ * The following functions are already defined by <linux/highmem.h>
+ * when CONFIG_HIGHMEM is not set.
+ */
+#ifdef CONFIG_HIGHMEM
 extern void *kmap(struct page *page);
 extern void kunmap(struct page *page);
 extern void *kmap_atomic(struct page *page, enum km_type type);
 extern void kunmap_atomic(void *kvaddr, enum km_type type);
 extern void *kmap_atomic_pfn(unsigned long pfn, enum km_type type);
 extern struct page *kmap_atomic_to_page(const void *ptr);
+#endif
 
 #endif
 
        kfrom = kmap_atomic(from, KM_USER0);
        kto = kmap_atomic(to, KM_USER1);
        copy_page(kto, kfrom);
-#ifdef CONFIG_HIGHMEM
-       /*
-        * kmap_atomic() doesn't set the page virtual address, and
-        * kunmap_atomic() takes care of cache flushing already.
-        */
-       if (page_address(to) != NULL)
-#endif
-               __cpuc_flush_dcache_area(kto, PAGE_SIZE);
+       __cpuc_flush_dcache_area(kto, PAGE_SIZE);
        kunmap_atomic(kto, KM_USER1);
        kunmap_atomic(kfrom, KM_USER0);
 }
 
 
 #include <asm/cacheflush.h>
 #include <asm/cachetype.h>
+#include <asm/highmem.h>
 #include <asm/smp_plat.h>
 #include <asm/system.h>
 #include <asm/tlbflush.h>
 
 void __flush_dcache_page(struct address_space *mapping, struct page *page)
 {
-       void *addr = page_address(page);
-
        /*
         * Writeback any data associated with the kernel mapping of this
         * page.  This ensures that data in the physical page is mutually
         * coherent with the kernels mapping.
         */
-#ifdef CONFIG_HIGHMEM
-       /*
-        * kmap_atomic() doesn't set the page virtual address, and
-        * kunmap_atomic() takes care of cache flushing already.
-        */
-       if (addr)
-#endif
-               __cpuc_flush_dcache_area(addr, PAGE_SIZE);
+       if (!PageHighMem(page)) {
+               __cpuc_flush_dcache_area(page_address(page), PAGE_SIZE);
+       } else {
+               void *addr = kmap_high_get(page);
+               if (addr) {
+                       __cpuc_flush_dcache_area(addr, PAGE_SIZE);
+                       kunmap_high(page);
+               } else if (cache_is_vipt()) {
+                       pte_t saved_pte;
+                       addr = kmap_high_l1_vipt(page, &saved_pte);
+                       __cpuc_flush_dcache_area(addr, PAGE_SIZE);
+                       kunmap_high_l1_vipt(page, saved_pte);
+               }
+       }
 
        /*
         * If this is a page cache page, and we have an aliasing VIPT cache,
 
        unsigned int idx = type + KM_TYPE_NR * smp_processor_id();
 
        if (kvaddr >= (void *)FIXADDR_START) {
-               __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE);
+               if (cache_is_vivt())
+                       __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE);
 #ifdef CONFIG_DEBUG_HIGHMEM
                BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
                set_pte_ext(TOP_PTE(vaddr), __pte(0), 0);
        pte = TOP_PTE(vaddr);
        return pte_page(*pte);
 }
+
+#ifdef CONFIG_CPU_CACHE_VIPT
+
+#include <linux/percpu.h>
+
+/*
+ * The VIVT cache of a highmem page is always flushed before the page
+ * is unmapped. Hence unmapped highmem pages need no cache maintenance
+ * in that case.
+ *
+ * However unmapped pages may still be cached with a VIPT cache, and
+ * it is not possible to perform cache maintenance on them using physical
+ * addresses unfortunately.  So we have no choice but to set up a temporary
+ * virtual mapping for that purpose.
+ *
+ * Yet this VIPT cache maintenance may be triggered from DMA support
+ * functions which are possibly called from interrupt context. As we don't
+ * want to keep interrupt disabled all the time when such maintenance is
+ * taking place, we therefore allow for some reentrancy by preserving and
+ * restoring the previous fixmap entry before the interrupted context is
+ * resumed.  If the reentrancy depth is 0 then there is no need to restore
+ * the previous fixmap, and leaving the current one in place allow it to
+ * be reused the next time without a TLB flush (common with DMA).
+ */
+
+static DEFINE_PER_CPU(int, kmap_high_l1_vipt_depth);
+
+void *kmap_high_l1_vipt(struct page *page, pte_t *saved_pte)
+{
+       unsigned int idx, cpu = smp_processor_id();
+       int *depth = &per_cpu(kmap_high_l1_vipt_depth, cpu);
+       unsigned long vaddr, flags;
+       pte_t pte, *ptep;
+
+       idx = KM_L1_CACHE + KM_TYPE_NR * cpu;
+       vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
+       ptep = TOP_PTE(vaddr);
+       pte = mk_pte(page, kmap_prot);
+
+       if (!in_interrupt())
+               preempt_disable();
+
+       raw_local_irq_save(flags);
+       (*depth)++;
+       if (pte_val(*ptep) == pte_val(pte)) {
+               *saved_pte = pte;
+       } else {
+               *saved_pte = *ptep;
+               set_pte_ext(ptep, pte, 0);
+               local_flush_tlb_kernel_page(vaddr);
+       }
+       raw_local_irq_restore(flags);
+
+       return (void *)vaddr;
+}
+
+void kunmap_high_l1_vipt(struct page *page, pte_t saved_pte)
+{
+       unsigned int idx, cpu = smp_processor_id();
+       int *depth = &per_cpu(kmap_high_l1_vipt_depth, cpu);
+       unsigned long vaddr, flags;
+       pte_t pte, *ptep;
+
+       idx = KM_L1_CACHE + KM_TYPE_NR * cpu;
+       vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
+       ptep = TOP_PTE(vaddr);
+       pte = mk_pte(page, kmap_prot);
+
+       BUG_ON(pte_val(*ptep) != pte_val(pte));
+       BUG_ON(*depth <= 0);
+
+       raw_local_irq_save(flags);
+       (*depth)--;
+       if (*depth != 0 && pte_val(pte) != pte_val(saved_pte)) {
+               set_pte_ext(ptep, saved_pte, 0);
+               local_flush_tlb_kernel_page(vaddr);
+       }
+       raw_local_irq_restore(flags);
+
+       if (!in_interrupt())
+               preempt_enable();
+}
+
+#endif  /* CONFIG_CPU_CACHE_VIPT */