find_msr_entry(to_vmx(vcpu), MSR_EFER)->data |= EFER_LMA | EFER_LME;
        vmcs_write32(VM_ENTRY_CONTROLS,
                     vmcs_read32(VM_ENTRY_CONTROLS)
-                    | VM_ENTRY_CONTROLS_IA32E_MASK);
+                    | VM_ENTRY_IA32E_MODE);
 }
 
 static void exit_lmode(struct kvm_vcpu *vcpu)
 
        vmcs_write32(VM_ENTRY_CONTROLS,
                     vmcs_read32(VM_ENTRY_CONTROLS)
-                    & ~VM_ENTRY_CONTROLS_IA32E_MASK);
+                    & ~VM_ENTRY_IA32E_MODE);
 }
 
 #endif
        if (efer & EFER_LMA) {
                vmcs_write32(VM_ENTRY_CONTROLS,
                                     vmcs_read32(VM_ENTRY_CONTROLS) |
-                                    VM_ENTRY_CONTROLS_IA32E_MASK);
+                                    VM_ENTRY_IA32E_MODE);
                msr->data = efer;
 
        } else {
                vmcs_write32(VM_ENTRY_CONTROLS,
                                     vmcs_read32(VM_ENTRY_CONTROLS) &
-                                    ~VM_ENTRY_CONTROLS_IA32E_MASK);
+                                    ~VM_ENTRY_IA32E_MODE);
 
                msr->data = efer & ~EFER_LME;
        }
 
 /* segment AR */
 #define SEGMENT_AR_L_MASK (1 << 13)
 
-/* entry controls */
-#define VM_ENTRY_CONTROLS_IA32E_MASK (1 << 9)
-
 #define AR_TYPE_ACCESSES_MASK 1
 #define AR_TYPE_READABLE_MASK (1 << 1)
 #define AR_TYPE_WRITEABLE_MASK (1 << 2)