static unsigned long *vmx_msr_bitmap_legacy;
 static unsigned long *vmx_msr_bitmap_longmode;
 
+static bool cpu_has_load_ia32_efer;
+
 static DECLARE_BITMAP(vmx_vpid_bitmap, VMX_NR_VPIDS);
 static DEFINE_SPINLOCK(vmx_vpid_lock);
 
        unsigned i;
        struct msr_autoload *m = &vmx->msr_autoload;
 
+       if (msr == MSR_EFER && cpu_has_load_ia32_efer) {
+               vmcs_clear_bits(VM_ENTRY_CONTROLS, VM_ENTRY_LOAD_IA32_EFER);
+               vmcs_clear_bits(VM_EXIT_CONTROLS, VM_EXIT_LOAD_IA32_EFER);
+               return;
+       }
+
        for (i = 0; i < m->nr; ++i)
                if (m->guest[i].index == msr)
                        break;
        unsigned i;
        struct msr_autoload *m = &vmx->msr_autoload;
 
+       if (msr == MSR_EFER && cpu_has_load_ia32_efer) {
+               vmcs_write64(GUEST_IA32_EFER, guest_val);
+               vmcs_write64(HOST_IA32_EFER, host_val);
+               vmcs_set_bits(VM_ENTRY_CONTROLS, VM_ENTRY_LOAD_IA32_EFER);
+               vmcs_set_bits(VM_EXIT_CONTROLS, VM_EXIT_LOAD_IA32_EFER);
+               return;
+       }
+
        for (i = 0; i < m->nr; ++i)
                if (m->guest[i].index == msr)
                        break;
        return 0;
 }
 
+static __init bool allow_1_setting(u32 msr, u32 ctl)
+{
+       u32 vmx_msr_low, vmx_msr_high;
+
+       rdmsr(msr, vmx_msr_low, vmx_msr_high);
+       return vmx_msr_high & ctl;
+}
+
 static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf)
 {
        u32 vmx_msr_low, vmx_msr_high;
        vmcs_conf->vmexit_ctrl         = _vmexit_control;
        vmcs_conf->vmentry_ctrl        = _vmentry_control;
 
+       cpu_has_load_ia32_efer =
+               allow_1_setting(MSR_IA32_VMX_ENTRY_CTLS,
+                               VM_ENTRY_LOAD_IA32_EFER)
+               && allow_1_setting(MSR_IA32_VMX_EXIT_CTLS,
+                                  VM_EXIT_LOAD_IA32_EFER);
+
        return 0;
 }