#define KVM_REQ_GET_VMCS12_PAGES       KVM_ARCH_REQ(24)
 #define KVM_REQ_APICV_UPDATE \
        KVM_ARCH_REQ_FLAGS(25, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
+#define KVM_REQ_HV_TLB_FLUSH \
+       KVM_ARCH_REQ_FLAGS(26, KVM_REQUEST_NO_WAKEUP)
 
 #define CR0_RESERVED_BITS                                               \
        (~(unsigned long)(X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS \
 
         * vcpu->arch.cr3 may not be up-to-date for running vCPUs so we can't
         * analyze it here, flush TLB regardless of the specified address space.
         */
-       kvm_make_vcpus_request_mask(kvm,
-                                   KVM_REQ_TLB_FLUSH | KVM_REQUEST_NO_WAKEUP,
+       kvm_make_vcpus_request_mask(kvm, KVM_REQ_HV_TLB_FLUSH,
                                    vcpu_mask, &hv_vcpu->tlb_flush);
 
 ret_success:
 
        kvm_x86_ops.tlb_flush(vcpu, invalidate_gpa);
 }
 
+static void kvm_vcpu_flush_tlb_guest(struct kvm_vcpu *vcpu)
+{
+       ++vcpu->stat.tlb_flush;
+       kvm_x86_ops.tlb_flush_guest(vcpu);
+}
+
 static void record_steal_time(struct kvm_vcpu *vcpu)
 {
        struct kvm_host_map map;
        trace_kvm_pv_tlb_flush(vcpu->vcpu_id,
                st->preempted & KVM_VCPU_FLUSH_TLB);
        if (xchg(&st->preempted, 0) & KVM_VCPU_FLUSH_TLB)
-               kvm_x86_ops.tlb_flush_guest(vcpu);
+               kvm_vcpu_flush_tlb_guest(vcpu);
 
        vcpu->arch.st.preempted = 0;
 
                        kvm_mmu_load_pgd(vcpu);
                if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu))
                        kvm_vcpu_flush_tlb(vcpu, true);
+               if (kvm_check_request(KVM_REQ_HV_TLB_FLUSH, vcpu))
+                       kvm_vcpu_flush_tlb_guest(vcpu);
                if (kvm_check_request(KVM_REQ_REPORT_TPR_ACCESS, vcpu)) {
                        vcpu->run->exit_reason = KVM_EXIT_TPR_ACCESS;
                        r = 0;