void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags);
 
        void (*tlb_flush)(struct kvm_vcpu *vcpu, bool invalidate_gpa);
+       int  (*tlb_remote_flush)(struct kvm *kvm);
 
        /*
         * Flush any TLB entries associated with the given GVA.
        return kvm_x86_ops->vm_free(kvm);
 }
 
+#define __KVM_HAVE_ARCH_FLUSH_REMOTE_TLB
+static inline int kvm_arch_flush_remote_tlb(struct kvm *kvm)
+{
+       if (kvm_x86_ops->tlb_remote_flush &&
+           !kvm_x86_ops->tlb_remote_flush(kvm))
+               return 0;
+       else
+               return -ENOTSUPP;
+}
+
 int kvm_mmu_module_init(void);
 void kvm_mmu_module_exit(void);
 
 
 }
 #endif
 
+#ifndef __KVM_HAVE_ARCH_FLUSH_REMOTE_TLB
+static inline int kvm_arch_flush_remote_tlb(struct kvm *kvm)
+{
+       return -ENOTSUPP;
+}
+#endif
+
 #ifdef __KVM_HAVE_ARCH_NONCOHERENT_DMA
 void kvm_arch_register_noncoherent_dma(struct kvm *kvm);
 void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm);
 
         * kvm_make_all_cpus_request() reads vcpu->mode. We reuse that
         * barrier here.
         */
-       if (kvm_make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH))
+       if (!kvm_arch_flush_remote_tlb(kvm)
+           || kvm_make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH))
                ++kvm->stat.remote_tlb_flush;
        cmpxchg(&kvm->tlbs_dirty, dirty_count, 0);
 }