#define SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE   VMCS_CONTROL_BIT(USR_WAIT_PAUSE)
 #define SECONDARY_EXEC_BUS_LOCK_DETECTION      VMCS_CONTROL_BIT(BUS_LOCK_DETECTION)
 
+/*
+ * Definitions of Tertiary Processor-Based VM-Execution Controls.
+ */
+#define TERTIARY_EXEC_IPI_VIRT                 VMCS_CONTROL_BIT(IPI_VIRT)
+
 #define PIN_BASED_EXT_INTR_MASK                 VMCS_CONTROL_BIT(INTR_EXITING)
 #define PIN_BASED_NMI_EXITING                   VMCS_CONTROL_BIT(NMI_EXITING)
 #define PIN_BASED_VIRTUAL_NMIS                  VMCS_CONTROL_BIT(VIRTUAL_NMIS)
 enum vmcs_field {
        VIRTUAL_PROCESSOR_ID            = 0x00000000,
        POSTED_INTR_NV                  = 0x00000002,
+       LAST_PID_POINTER_INDEX          = 0x00000008,
        GUEST_ES_SELECTOR               = 0x00000800,
        GUEST_CS_SELECTOR               = 0x00000802,
        GUEST_SS_SELECTOR               = 0x00000804,
        TSC_MULTIPLIER_HIGH             = 0x00002033,
        TERTIARY_VM_EXEC_CONTROL        = 0x00002034,
        TERTIARY_VM_EXEC_CONTROL_HIGH   = 0x00002035,
+       PID_POINTER_TABLE               = 0x00002042,
+       PID_POINTER_TABLE_HIGH          = 0x00002043,
        GUEST_PHYSICAL_ADDRESS          = 0x00002400,
        GUEST_PHYSICAL_ADDRESS_HIGH     = 0x00002401,
        VMCS_LINK_POINTER               = 0x00002800,
 
 
 module_param(enable_apicv, bool, S_IRUGO);
 
+bool __read_mostly enable_ipiv = true;
+module_param(enable_ipiv, bool, 0444);
+
 /*
  * If nested=1, nested virtualization is supported, i.e., guests may use
  * VMX and be a hypervisor for its own guests. If nested=0, guests may not
        }
 
        if (_cpu_based_exec_control & CPU_BASED_ACTIVATE_TERTIARY_CONTROLS) {
-               u64 opt3 = 0;
+               u64 opt3 = TERTIARY_EXEC_IPI_VIRT;
 
                _cpu_based_3rd_exec_control = adjust_vmx_controls64(opt3,
                                              MSR_IA32_VMX_PROCBASED_CTLS3);
                vmx_enable_intercept_for_msr(vcpu, X2APIC_MSR(APIC_TMCCT), MSR_TYPE_RW);
                vmx_disable_intercept_for_msr(vcpu, X2APIC_MSR(APIC_EOI), MSR_TYPE_W);
                vmx_disable_intercept_for_msr(vcpu, X2APIC_MSR(APIC_SELF_IPI), MSR_TYPE_W);
+               if (enable_ipiv)
+                       vmx_disable_intercept_for_msr(vcpu, X2APIC_MSR(APIC_ICR), MSR_TYPE_RW);
        }
 }
 
 
        pin_controls_set(vmx, vmx_pin_based_exec_ctrl(vmx));
 
-       if (kvm_vcpu_apicv_active(vcpu))
+       if (kvm_vcpu_apicv_active(vcpu)) {
                secondary_exec_controls_setbit(vmx,
                                               SECONDARY_EXEC_APIC_REGISTER_VIRT |
                                               SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY);
-       else
+               if (enable_ipiv)
+                       tertiary_exec_controls_setbit(vmx, TERTIARY_EXEC_IPI_VIRT);
+       } else {
                secondary_exec_controls_clearbit(vmx,
                                                 SECONDARY_EXEC_APIC_REGISTER_VIRT |
                                                 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY);
+               if (enable_ipiv)
+                       tertiary_exec_controls_clearbit(vmx, TERTIARY_EXEC_IPI_VIRT);
+       }
 
        vmx_update_msr_bitmap_x2apic(vcpu);
 }
 
 static u64 vmx_tertiary_exec_control(struct vcpu_vmx *vmx)
 {
-       return vmcs_config.cpu_based_3rd_exec_ctrl;
+       u64 exec_control = vmcs_config.cpu_based_3rd_exec_ctrl;
+
+       /*
+        * IPI virtualization relies on APICv. Disable IPI virtualization if
+        * APICv is inhibited.
+        */
+       if (!enable_ipiv || !kvm_vcpu_apicv_active(&vmx->vcpu))
+               exec_control &= ~TERTIARY_EXEC_IPI_VIRT;
+
+       return exec_control;
 }
 
 /*
        return exec_control;
 }
 
+static inline int vmx_get_pid_table_order(struct kvm *kvm)
+{
+       return get_order(kvm->arch.max_vcpu_ids * sizeof(*to_kvm_vmx(kvm)->pid_table));
+}
+
+static int vmx_alloc_ipiv_pid_table(struct kvm *kvm)
+{
+       struct page *pages;
+       struct kvm_vmx *kvm_vmx = to_kvm_vmx(kvm);
+
+       if (!irqchip_in_kernel(kvm) || !enable_ipiv)
+               return 0;
+
+       if (kvm_vmx->pid_table)
+               return 0;
+
+       pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, vmx_get_pid_table_order(kvm));
+       if (!pages)
+               return -ENOMEM;
+
+       kvm_vmx->pid_table = (void *)page_address(pages);
+       return 0;
+}
+
+static int vmx_vcpu_precreate(struct kvm *kvm)
+{
+       return vmx_alloc_ipiv_pid_table(kvm);
+}
+
 #define VMX_XSS_EXIT_BITMAP 0
 
 static void init_vmcs(struct vcpu_vmx *vmx)
 {
+       struct kvm *kvm = vmx->vcpu.kvm;
+       struct kvm_vmx *kvm_vmx = to_kvm_vmx(kvm);
+
        if (nested)
                nested_vmx_set_vmcs_shadowing_bitmap();
 
                vmcs_write64(POSTED_INTR_DESC_ADDR, __pa((&vmx->pi_desc)));
        }
 
-       if (!kvm_pause_in_guest(vmx->vcpu.kvm)) {
+       if (vmx_can_use_ipiv(&vmx->vcpu)) {
+               vmcs_write64(PID_POINTER_TABLE, __pa(kvm_vmx->pid_table));
+               vmcs_write16(LAST_PID_POINTER_INDEX, kvm->arch.max_vcpu_ids - 1);
+       }
+
+       if (!kvm_pause_in_guest(kvm)) {
                vmcs_write32(PLE_GAP, ple_gap);
                vmx->ple_window = ple_window;
                vmx->ple_window_dirty = true;
                        goto free_vmcs;
        }
 
+       if (vmx_can_use_ipiv(vcpu))
+               WRITE_ONCE(to_kvm_vmx(vcpu->kvm)->pid_table[vcpu->vcpu_id],
+                          __pa(&vmx->pi_desc) | PID_TABLE_ENTRY_VALID);
+
        return 0;
 
 free_vmcs:
        return supported & BIT(reason);
 }
 
+static void vmx_vm_destroy(struct kvm *kvm)
+{
+       struct kvm_vmx *kvm_vmx = to_kvm_vmx(kvm);
+
+       free_pages((unsigned long)kvm_vmx->pid_table, vmx_get_pid_table_order(kvm));
+}
+
 static struct kvm_x86_ops vmx_x86_ops __initdata = {
        .name = "kvm_intel",
 
 
        .vm_size = sizeof(struct kvm_vmx),
        .vm_init = vmx_vm_init,
+       .vm_destroy = vmx_vm_destroy,
 
+       .vcpu_precreate = vmx_vcpu_precreate,
        .vcpu_create = vmx_vcpu_create,
        .vcpu_free = vmx_vcpu_free,
        .vcpu_reset = vmx_vcpu_reset,
        if (!enable_apicv)
                vmx_x86_ops.sync_pir_to_irr = NULL;
 
+       if (!enable_apicv || !cpu_has_vmx_ipiv())
+               enable_ipiv = false;
+
        if (cpu_has_vmx_tsc_scaling())
                kvm_has_tsc_control = true;