raw_spin_lock(&minicache_lock);
 
-       set_pte_ext(TOP_PTE(COPYPAGE_MINICACHE), mk_pte(from, minicache_pgprot), 0);
-       flush_tlb_kernel_page(COPYPAGE_MINICACHE);
+       set_top_pte(COPYPAGE_MINICACHE, mk_pte(from, minicache_pgprot));
 
        mc_copy_user_page((void *)COPYPAGE_MINICACHE, kto);
 
 
        kfrom = COPYPAGE_V6_FROM + (offset << PAGE_SHIFT);
        kto   = COPYPAGE_V6_TO + (offset << PAGE_SHIFT);
 
-       set_pte_ext(TOP_PTE(kfrom), mk_pte(from, PAGE_KERNEL), 0);
-       set_pte_ext(TOP_PTE(kto), mk_pte(to, PAGE_KERNEL), 0);
-
-       flush_tlb_kernel_page(kfrom);
-       flush_tlb_kernel_page(kto);
+       set_top_pte(kfrom, mk_pte(from, PAGE_KERNEL));
+       set_top_pte(kto, mk_pte(to, PAGE_KERNEL));
 
        copy_page((void *)kto, (void *)kfrom);
 
         */
        raw_spin_lock(&v6_lock);
 
-       set_pte_ext(TOP_PTE(to), mk_pte(page, PAGE_KERNEL), 0);
-       flush_tlb_kernel_page(to);
+       set_top_pte(to, mk_pte(page, PAGE_KERNEL));
        clear_page((void *)to);
 
        raw_spin_unlock(&v6_lock);
 
 
        raw_spin_lock(&minicache_lock);
 
-       set_pte_ext(TOP_PTE(COPYPAGE_MINICACHE), mk_pte(from, minicache_pgprot), 0);
-       flush_tlb_kernel_page(COPYPAGE_MINICACHE);
+       set_top_pte(COPYPAGE_MINICACHE, mk_pte(from, minicache_pgprot));
 
        mc_copy_user_page((void *)COPYPAGE_MINICACHE, kto);
 
 
        unsigned long to = FLUSH_ALIAS_START + (CACHE_COLOUR(vaddr) << PAGE_SHIFT);
        const int zero = 0;
 
-       set_pte_ext(TOP_PTE(to), pfn_pte(pfn, PAGE_KERNEL), 0);
-       flush_tlb_kernel_page(to);
+       set_top_pte(to, pfn_pte(pfn, PAGE_KERNEL));
 
        asm(    "mcrr   p15, 0, %1, %0, c14\n"
        "       mcr     p15, 0, %2, c7, c10, 4"
 
 static void flush_icache_alias(unsigned long pfn, unsigned long vaddr, unsigned long len)
 {
-       unsigned long colour = CACHE_COLOUR(vaddr);
+       unsigned long va = FLUSH_ALIAS_START + (CACHE_COLOUR(vaddr) << PAGE_SHIFT);
        unsigned long offset = vaddr & (PAGE_SIZE - 1);
        unsigned long to;
 
-       set_pte_ext(TOP_PTE(FLUSH_ALIAS_START) + colour, pfn_pte(pfn, PAGE_KERNEL), 0);
-       to = FLUSH_ALIAS_START + (colour << PAGE_SHIFT) + offset;
-       flush_tlb_kernel_page(to);
+       set_top_pte(va, pfn_pte(pfn, PAGE_KERNEL));
+       to = va + offset;
        flush_icache_range(to, to + len);
 }
 
 
         */
        BUG_ON(!pte_none(*(TOP_PTE(vaddr))));
 #endif
-       set_pte_ext(TOP_PTE(vaddr), mk_pte(page, kmap_prot), 0);
        /*
         * When debugging is off, kunmap_atomic leaves the previous mapping
-        * in place, so this TLB flush ensures the TLB is updated with the
-        * new mapping.
+        * in place, so the contained TLB flush ensures the TLB is updated
+        * with the new mapping.
         */
-       local_flush_tlb_kernel_page(vaddr);
+       set_top_pte(vaddr, mk_pte(page, kmap_prot));
 
        return (void *)vaddr;
 }
                        __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE);
 #ifdef CONFIG_DEBUG_HIGHMEM
                BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
-               set_pte_ext(TOP_PTE(vaddr), __pte(0), 0);
-               local_flush_tlb_kernel_page(vaddr);
+               set_top_pte(vaddr, __pte(0));
 #else
                (void) idx;  /* to kill a warning */
 #endif
 #ifdef CONFIG_DEBUG_HIGHMEM
        BUG_ON(!pte_none(*(TOP_PTE(vaddr))));
 #endif
-       set_pte_ext(TOP_PTE(vaddr), pfn_pte(pfn, kmap_prot), 0);
-       local_flush_tlb_kernel_page(vaddr);
+       set_top_pte(vaddr, pfn_pte(pfn, kmap_prot));
 
        return (void *)vaddr;
 }
 
 /* PFN alias flushing, for VIPT caches */
 #define FLUSH_ALIAS_START      0xffff4000
 
+static inline void set_top_pte(unsigned long va, pte_t pte)
+{
+       set_pte_ext(TOP_PTE(va), pte, 0);
+       local_flush_tlb_kernel_page(va);
+}
+
 static inline pmd_t *pmd_off_k(unsigned long virt)
 {
        return pmd_offset(pud_offset(pgd_offset_k(virt), virt), virt);