u64 guest_pdptr1;
        u64 guest_pdptr2;
        u64 guest_pdptr3;
+       u64 guest_bndcfgs;
        u64 host_ia32_pat;
        u64 host_ia32_efer;
        u64 host_ia32_perf_global_ctrl;
        GUEST_CS_LIMIT,
        GUEST_CS_BASE,
        GUEST_ES_BASE,
+       GUEST_BNDCFGS,
        CR0_GUEST_HOST_MASK,
        CR0_READ_SHADOW,
        CR4_READ_SHADOW,
        FIELD64(GUEST_PDPTR1, guest_pdptr1),
        FIELD64(GUEST_PDPTR2, guest_pdptr2),
        FIELD64(GUEST_PDPTR3, guest_pdptr3),
+       FIELD64(GUEST_BNDCFGS, guest_bndcfgs),
        FIELD64(HOST_IA32_PAT, host_ia32_pat),
        FIELD64(HOST_IA32_EFER, host_ia32_efer),
        FIELD64(HOST_IA32_PERF_GLOBAL_CTRL, host_ia32_perf_global_ctrl),
 static void vmx_sync_pir_to_irr_dummy(struct kvm_vcpu *vcpu);
 static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx);
 static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx);
+static bool vmx_mpx_supported(void);
 
 static DEFINE_PER_CPU(struct vmcs *, vmxarea);
 static DEFINE_PER_CPU(struct vmcs *, current_vmcs);
        nested_vmx_exit_ctls_high |= VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR |
                VM_EXIT_LOAD_IA32_EFER | VM_EXIT_SAVE_IA32_EFER |
                VM_EXIT_SAVE_VMX_PREEMPTION_TIMER;
+       if (vmx_mpx_supported())
+               nested_vmx_exit_ctls_high |= VM_EXIT_CLEAR_BNDCFGS;
 
        /* entry controls */
        rdmsr(MSR_IA32_VMX_ENTRY_CTLS,
                VM_ENTRY_LOAD_IA32_PAT;
        nested_vmx_entry_ctls_high |= (VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR |
                                       VM_ENTRY_LOAD_IA32_EFER);
+       if (vmx_mpx_supported())
+               nested_vmx_entry_ctls_high |= VM_ENTRY_LOAD_BNDCFGS;
 
        /* cpu-based controls */
        rdmsr(MSR_IA32_VMX_PROCBASED_CTLS,
 
        set_cr4_guest_host_mask(vmx);
 
+       if (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS)
+               vmcs_write64(GUEST_BNDCFGS, vmcs12->guest_bndcfgs);
+
        if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING)
                vmcs_write64(TSC_OFFSET,
                        vmx->nested.vmcs01_tsc_offset + vmcs12->tsc_offset);
        vmcs12->guest_sysenter_cs = vmcs_read32(GUEST_SYSENTER_CS);
        vmcs12->guest_sysenter_esp = vmcs_readl(GUEST_SYSENTER_ESP);
        vmcs12->guest_sysenter_eip = vmcs_readl(GUEST_SYSENTER_EIP);
+       if (vmx_mpx_supported())
+               vmcs12->guest_bndcfgs = vmcs_read64(GUEST_BNDCFGS);
 
        /* update exit information fields: */
 
        vmcs_writel(GUEST_IDTR_BASE, vmcs12->host_idtr_base);
        vmcs_writel(GUEST_GDTR_BASE, vmcs12->host_gdtr_base);
 
+       /* If not VM_EXIT_CLEAR_BNDCFGS, the L2 value propagates to L1.  */
+       if (vmcs12->vm_exit_controls & VM_EXIT_CLEAR_BNDCFGS)
+               vmcs_write64(GUEST_BNDCFGS, 0);
+
        if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PAT) {
                vmcs_write64(GUEST_IA32_PAT, vmcs12->host_ia32_pat);
                vcpu->arch.pat = vmcs12->host_ia32_pat;