static bool vmx_invpcid_supported(void)
 {
-       return cpu_has_vmx_invpcid() && enable_ept;
+       return cpu_has_vmx_invpcid();
 }
 
 /*
        if (!enable_ept) {
                exec_control &= ~SECONDARY_EXEC_ENABLE_EPT;
                enable_unrestricted_guest = 0;
-               /* Enable INVPCID for non-ept guests may cause performance regression. */
-               exec_control &= ~SECONDARY_EXEC_ENABLE_INVPCID;
        }
        if (!enable_unrestricted_guest)
                exec_control &= ~SECONDARY_EXEC_UNRESTRICTED_GUEST;
        return kvm_skip_emulated_instruction(vcpu);
 }
 
+static int handle_invpcid(struct kvm_vcpu *vcpu)
+{
+       u32 vmx_instruction_info;
+       unsigned long type;
+       bool pcid_enabled;
+       gva_t gva;
+       struct x86_exception e;
+       struct {
+               u64 pcid;
+               u64 gla;
+       } operand;
+
+       if (!guest_cpuid_has(vcpu, X86_FEATURE_INVPCID)) {
+               kvm_queue_exception(vcpu, UD_VECTOR);
+               return 1;
+       }
+
+       vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
+       type = kvm_register_readl(vcpu, (vmx_instruction_info >> 28) & 0xf);
+
+       if (type > 3) {
+               kvm_inject_gp(vcpu, 0);
+               return 1;
+       }
+
+       /* According to the Intel instruction reference, the memory operand
+        * is read even if it isn't needed (e.g., for type==all)
+        */
+       if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
+                               vmx_instruction_info, false, &gva))
+               return 1;
+
+       if (kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e)) {
+               kvm_inject_page_fault(vcpu, &e);
+               return 1;
+       }
+
+       if (operand.pcid >> 12 != 0) {
+               kvm_inject_gp(vcpu, 0);
+               return 1;
+       }
+
+       pcid_enabled = kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE);
+
+       switch (type) {
+       case INVPCID_TYPE_INDIV_ADDR:
+               if ((!pcid_enabled && (operand.pcid != 0)) ||
+                   is_noncanonical_address(operand.gla, vcpu)) {
+                       kvm_inject_gp(vcpu, 0);
+                       return 1;
+               }
+               kvm_mmu_invpcid_gva(vcpu, operand.gla, operand.pcid);
+               return kvm_skip_emulated_instruction(vcpu);
+
+       case INVPCID_TYPE_SINGLE_CTXT:
+               if (!pcid_enabled && (operand.pcid != 0)) {
+                       kvm_inject_gp(vcpu, 0);
+                       return 1;
+               }
+
+               if (kvm_get_active_pcid(vcpu) == operand.pcid) {
+                       kvm_mmu_sync_roots(vcpu);
+                       kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
+               }
+
+               /*
+                * If the current cr3 does not use the given PCID, then nothing
+                * needs to be done here because a resync will happen anyway
+                * before switching to any other CR3.
+                */
+
+               return kvm_skip_emulated_instruction(vcpu);
+
+       case INVPCID_TYPE_ALL_NON_GLOBAL:
+               /*
+                * Currently, KVM doesn't mark global entries in the shadow
+                * page tables, so a non-global flush just degenerates to a
+                * global flush. If needed, we could optimize this later by
+                * keeping track of global entries in shadow page tables.
+                */
+
+               /* fall-through */
+       case INVPCID_TYPE_ALL_INCL_GLOBAL:
+               kvm_mmu_unload(vcpu);
+               return kvm_skip_emulated_instruction(vcpu);
+
+       default:
+               BUG(); /* We have already checked above that type <= 3 */
+       }
+}
+
 static int handle_pml_full(struct kvm_vcpu *vcpu)
 {
        unsigned long exit_qualification;
        [EXIT_REASON_XSAVES]                  = handle_xsaves,
        [EXIT_REASON_XRSTORS]                 = handle_xrstors,
        [EXIT_REASON_PML_FULL]                = handle_pml_full,
+       [EXIT_REASON_INVPCID]                 = handle_invpcid,
        [EXIT_REASON_VMFUNC]                  = handle_vmfunc,
        [EXIT_REASON_PREEMPTION_TIMER]        = handle_preemption_timer,
 };