bool user);
 extern void kvm_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu,
                                    struct kvm_vcpu *vcpu);
-extern void kvm_local_flush_tlb_all(void);
 extern void kvm_mips_alloc_new_mmu_context(struct kvm_vcpu *vcpu);
 extern void kvm_mips_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
 extern void kvm_mips_vcpu_put(struct kvm_vcpu *vcpu);
 
 static inline void
 get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
 {
-       extern void kvm_local_flush_tlb_all(void);
        unsigned long asid = asid_cache(cpu);
 
        if (!((asid += cpu_asid_inc()) & cpu_asid_mask(&cpu_data[cpu]))) {
                if (cpu_has_vtag_icache)
                        flush_icache_all();
-#ifdef CONFIG_KVM
-               kvm_local_flush_tlb_all();      /* start new asid cycle */
-#else
                local_flush_tlb_all();  /* start new asid cycle */
-#endif
                if (!asid)              /* fix version if needed */
                        asid = asid_first_version(cpu);
        }
 
                if (cpu_has_vtag_icache)
                        flush_icache_all();
 
-               kvm_local_flush_tlb_all();      /* start new asid cycle */
+               local_flush_tlb_all();      /* start new asid cycle */
 
                if (!asid)      /* fix version if needed */
                        asid = asid_first_version(cpu);
 
 }
 EXPORT_SYMBOL_GPL(kvm_mips_flush_host_tlb);
 
-void kvm_local_flush_tlb_all(void)
-{
-       unsigned long flags;
-       unsigned long old_ctx;
-       int entry = 0;
-
-       local_irq_save(flags);
-       /* Save old context and create impossible VPN2 value */
-       old_ctx = read_c0_entryhi();
-       write_c0_entrylo0(0);
-       write_c0_entrylo1(0);
-
-       /* Blast 'em all away. */
-       while (entry < current_cpu_data.tlbsize) {
-               /* Make sure all entries differ. */
-               write_c0_entryhi(UNIQUE_ENTRYHI(entry));
-               write_c0_index(entry);
-               mtc0_tlbw_hazard();
-               tlb_write_indexed();
-               tlbw_use_hazard();
-               entry++;
-       }
-       write_c0_entryhi(old_ctx);
-       mtc0_tlbw_hazard();
-
-       local_irq_restore(flags);
-}
-EXPORT_SYMBOL_GPL(kvm_local_flush_tlb_all);
-
 /**
  * kvm_mips_suspend_mm() - Suspend the active mm.
  * @cpu                The CPU we're running on.