unsigned long (*get_rflags)(struct kvm_vcpu *vcpu);
        void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags);
 
-       void (*tlb_flush)(struct kvm_vcpu *vcpu);
+       void (*tlb_flush)(struct kvm_vcpu *vcpu, bool invalidate_gpa);
 
        void (*run)(struct kvm_vcpu *vcpu);
        int (*handle_exit)(struct kvm_vcpu *vcpu);
 
 module_param(vgif, int, 0444);
 
 static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
-static void svm_flush_tlb(struct kvm_vcpu *vcpu);
+static void svm_flush_tlb(struct kvm_vcpu *vcpu, bool invalidate_gpa);
 static void svm_complete_interrupts(struct vcpu_svm *svm);
 
 static int nested_svm_exit_handled(struct vcpu_svm *svm);
                return 1;
 
        if (npt_enabled && ((old_cr4 ^ cr4) & X86_CR4_PGE))
-               svm_flush_tlb(vcpu);
+               svm_flush_tlb(vcpu, true);
 
        vcpu->arch.cr4 = cr4;
        if (!npt_enabled)
 
        svm->vmcb->control.nested_cr3 = __sme_set(root);
        mark_dirty(svm->vmcb, VMCB_NPT);
-       svm_flush_tlb(vcpu);
+       svm_flush_tlb(vcpu, true);
 }
 
 static void nested_svm_inject_npf_exit(struct kvm_vcpu *vcpu,
        svm->nested.intercept_exceptions = nested_vmcb->control.intercept_exceptions;
        svm->nested.intercept            = nested_vmcb->control.intercept;
 
-       svm_flush_tlb(&svm->vcpu);
+       svm_flush_tlb(&svm->vcpu, true);
        svm->vmcb->control.int_ctl = nested_vmcb->control.int_ctl | V_INTR_MASKING_MASK;
        if (nested_vmcb->control.int_ctl & V_INTR_MASKING_MASK)
                svm->vcpu.arch.hflags |= HF_VINTR_MASK;
        return 0;
 }
 
-static void svm_flush_tlb(struct kvm_vcpu *vcpu)
+static void svm_flush_tlb(struct kvm_vcpu *vcpu, bool invalidate_gpa)
 {
        struct vcpu_svm *svm = to_svm(vcpu);
 
 
        svm->vmcb->save.cr3 = __sme_set(root);
        mark_dirty(svm->vmcb, VMCB_CR);
-       svm_flush_tlb(vcpu);
+       svm_flush_tlb(vcpu, true);
 }
 
 static void set_tdp_cr3(struct kvm_vcpu *vcpu, unsigned long root)
        svm->vmcb->save.cr3 = kvm_read_cr3(vcpu);
        mark_dirty(svm->vmcb, VMCB_CR);
 
-       svm_flush_tlb(vcpu);
+       svm_flush_tlb(vcpu, true);
 }
 
 static int is_disabled(void)
 
 
 #endif
 
-static inline void __vmx_flush_tlb(struct kvm_vcpu *vcpu, int vpid)
+static inline void __vmx_flush_tlb(struct kvm_vcpu *vcpu, int vpid,
+                               bool invalidate_gpa)
 {
-       if (enable_ept) {
+       if (enable_ept && (invalidate_gpa || !enable_vpid)) {
                if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
                        return;
                ept_sync_context(construct_eptp(vcpu, vcpu->arch.mmu.root_hpa));
        }
 }
 
-static void vmx_flush_tlb(struct kvm_vcpu *vcpu)
+static void vmx_flush_tlb(struct kvm_vcpu *vcpu, bool invalidate_gpa)
 {
-       __vmx_flush_tlb(vcpu, to_vmx(vcpu)->vpid);
+       __vmx_flush_tlb(vcpu, to_vmx(vcpu)->vpid, invalidate_gpa);
 }
 
 static void vmx_flush_tlb_ept_only(struct kvm_vcpu *vcpu)
 {
        if (enable_ept)
-               vmx_flush_tlb(vcpu);
+               vmx_flush_tlb(vcpu, true);
 }
 
 static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu)
                ept_load_pdptrs(vcpu);
        }
 
-       vmx_flush_tlb(vcpu);
+       vmx_flush_tlb(vcpu, true);
        vmcs_writel(GUEST_CR3, guest_cr3);
 }
 
                return kvm_skip_emulated_instruction(vcpu);
        }
 
-       __vmx_flush_tlb(vcpu, vmx->nested.vpid02);
+       __vmx_flush_tlb(vcpu, vmx->nested.vpid02, true);
        nested_vmx_succeed(vcpu);
 
        return kvm_skip_emulated_instruction(vcpu);
                        vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->nested.vpid02);
                        if (vmcs12->virtual_processor_id != vmx->nested.last_vpid) {
                                vmx->nested.last_vpid = vmcs12->virtual_processor_id;
-                               __vmx_flush_tlb(vcpu, to_vmx(vcpu)->nested.vpid02);
+                               __vmx_flush_tlb(vcpu, to_vmx(vcpu)->nested.vpid02, true);
                        }
                } else {
                        vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid);
-                       vmx_flush_tlb(vcpu);
+                       vmx_flush_tlb(vcpu, true);
                }
 
        }
                 * L1's vpid. TODO: move to a more elaborate solution, giving
                 * each L2 its own vpid and exposing the vpid feature to L1.
                 */
-               vmx_flush_tlb(vcpu);
+               vmx_flush_tlb(vcpu, true);
        }
        /* Restore posted intr vector. */
        if (nested_cpu_has_posted_intr(vmcs12))
 
        kvm_x86_ops->load_eoi_exitmap(vcpu, eoi_exit_bitmap);
 }
 
-static void kvm_vcpu_flush_tlb(struct kvm_vcpu *vcpu)
+static void kvm_vcpu_flush_tlb(struct kvm_vcpu *vcpu, bool invalidate_gpa)
 {
        ++vcpu->stat.tlb_flush;
-       kvm_x86_ops->tlb_flush(vcpu);
+       kvm_x86_ops->tlb_flush(vcpu, invalidate_gpa);
 }
 
 void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
                if (kvm_check_request(KVM_REQ_MMU_SYNC, vcpu))
                        kvm_mmu_sync_roots(vcpu);
                if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu))
-                       kvm_vcpu_flush_tlb(vcpu);
+                       kvm_vcpu_flush_tlb(vcpu, true);
                if (kvm_check_request(KVM_REQ_REPORT_TPR_ACCESS, vcpu)) {
                        vcpu->run->exit_reason = KVM_EXIT_TPR_ACCESS;
                        r = 0;