* failValid writes the error number to the current VMCS, which
         * can't be done if there isn't a current VMCS.
         */
-       if (vmx->nested.current_vmptr == -1ull && !vmx->nested.hv_evmcs)
+       if (vmx->nested.current_vmptr == -1ull &&
+           !evmptr_is_valid(vmx->nested.hv_evmcs_vmptr))
                return nested_vmx_failInvalid(vcpu);
 
        return nested_vmx_failValid(vcpu, vm_instruction_error);
 {
        struct vcpu_vmx *vmx = to_vmx(vcpu);
 
-       if (!vmx->nested.hv_evmcs)
-               return;
+       if (evmptr_is_valid(vmx->nested.hv_evmcs_vmptr)) {
+               kvm_vcpu_unmap(vcpu, &vmx->nested.hv_evmcs_map, true);
+               vmx->nested.hv_evmcs = NULL;
+       }
 
-       kvm_vcpu_unmap(vcpu, &vmx->nested.hv_evmcs_map, true);
-       vmx->nested.hv_evmcs_vmptr = 0;
-       vmx->nested.hv_evmcs = NULL;
+       vmx->nested.hv_evmcs_vmptr = EVMPTR_INVALID;
 }
 
 static void vmx_sync_vmcs_host_state(struct vcpu_vmx *vmx,
        if (!nested_enlightened_vmentry(vcpu, &evmcs_gpa))
                return EVMPTRLD_DISABLED;
 
-       if (unlikely(!vmx->nested.hv_evmcs ||
-                    evmcs_gpa != vmx->nested.hv_evmcs_vmptr)) {
-               if (!vmx->nested.hv_evmcs)
-                       vmx->nested.current_vmptr = -1ull;
+       if (unlikely(evmcs_gpa != vmx->nested.hv_evmcs_vmptr)) {
+               vmx->nested.current_vmptr = -1ull;
 
                nested_release_evmcs(vcpu);
 
 {
        struct vcpu_vmx *vmx = to_vmx(vcpu);
 
-       if (vmx->nested.hv_evmcs) {
+       if (evmptr_is_valid(vmx->nested.hv_evmcs_vmptr)) {
                copy_vmcs12_to_enlightened(vmx);
                /* All fields are clean */
                vmx->nested.hv_evmcs->hv_clean_fields |=
        u32 exec_control;
        u64 guest_efer = nested_vmx_calc_efer(vmx, vmcs12);
 
-       if (vmx->nested.dirty_vmcs12 || vmx->nested.hv_evmcs)
+       if (vmx->nested.dirty_vmcs12 || evmptr_is_valid(vmx->nested.hv_evmcs_vmptr))
                prepare_vmcs02_early_rare(vmx, vmcs12);
 
        /*
                          enum vm_entry_failure_code *entry_failure_code)
 {
        struct vcpu_vmx *vmx = to_vmx(vcpu);
-       struct hv_enlightened_vmcs *hv_evmcs = vmx->nested.hv_evmcs;
        bool load_guest_pdptrs_vmcs12 = false;
 
-       if (vmx->nested.dirty_vmcs12 || hv_evmcs) {
+       if (vmx->nested.dirty_vmcs12 || evmptr_is_valid(vmx->nested.hv_evmcs_vmptr)) {
                prepare_vmcs02_rare(vmx, vmcs12);
                vmx->nested.dirty_vmcs12 = false;
 
-               load_guest_pdptrs_vmcs12 = !hv_evmcs ||
-                       !(hv_evmcs->hv_clean_fields &
+               load_guest_pdptrs_vmcs12 = !evmptr_is_valid(vmx->nested.hv_evmcs_vmptr) ||
+                       !(vmx->nested.hv_evmcs->hv_clean_fields &
                          HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1);
        }
 
         * L2 was running), map it here to make sure vmcs12 changes are
         * properly reflected.
         */
-       if (vmx->nested.enlightened_vmcs_enabled && !vmx->nested.hv_evmcs) {
+       if (vmx->nested.enlightened_vmcs_enabled &&
+           !evmptr_is_valid(vmx->nested.hv_evmcs_vmptr)) {
                enum nested_evmptrld_status evmptrld_status =
                        nested_vmx_handle_enlightened_vmptrld(vcpu, false);
 
 
        load_vmcs12_host_state(vcpu, vmcs12);
        vmcs12->vm_exit_reason = exit_reason.full;
-       if (enable_shadow_vmcs || vmx->nested.hv_evmcs)
+       if (enable_shadow_vmcs || evmptr_is_valid(vmx->nested.hv_evmcs_vmptr))
                vmx->nested.need_vmcs12_to_shadow_sync = true;
        return NVMX_VMENTRY_VMEXIT;
 }
                return nested_vmx_failInvalid(vcpu);
        }
 
-       if (CC(!vmx->nested.hv_evmcs && vmx->nested.current_vmptr == -1ull))
+       if (CC(!evmptr_is_valid(vmx->nested.hv_evmcs_vmptr) &&
+              vmx->nested.current_vmptr == -1ull))
                return nested_vmx_failInvalid(vcpu);
 
        vmcs12 = get_vmcs12(vcpu);
        if (CC(vmcs12->hdr.shadow_vmcs))
                return nested_vmx_failInvalid(vcpu);
 
-       if (vmx->nested.hv_evmcs) {
+       if (evmptr_is_valid(vmx->nested.hv_evmcs_vmptr)) {
                copy_enlightened_to_vmcs12(vmx);
                /* Enlightened VMCS doesn't have launch state */
                vmcs12->launch_state = !launch;
 {
        struct vcpu_vmx *vmx = to_vmx(vcpu);
 
-       if (vmx->nested.hv_evmcs)
+       if (evmptr_is_valid(vmx->nested.hv_evmcs_vmptr))
                sync_vmcs02_to_vmcs12_rare(vcpu, vmcs12);
 
-       vmx->nested.need_sync_vmcs02_to_vmcs12_rare = !vmx->nested.hv_evmcs;
+       vmx->nested.need_sync_vmcs02_to_vmcs12_rare =
+               !evmptr_is_valid(vmx->nested.hv_evmcs_vmptr);
 
        vmcs12->guest_cr0 = vmcs12_guest_cr0(vcpu, vmcs12);
        vmcs12->guest_cr4 = vmcs12_guest_cr4(vcpu, vmcs12);
        }
 
        if ((vm_exit_reason != -1) &&
-           (enable_shadow_vmcs || vmx->nested.hv_evmcs))
+           (enable_shadow_vmcs || evmptr_is_valid(vmx->nested.hv_evmcs_vmptr)))
                vmx->nested.need_vmcs12_to_shadow_sync = true;
 
        /* in case we halted in L2 */
                return nested_vmx_fail(vcpu, VMXERR_VMPTRLD_VMXON_POINTER);
 
        /* Forbid normal VMPTRLD if Enlightened version was used */
-       if (vmx->nested.hv_evmcs)
+       if (evmptr_is_valid(vmx->nested.hv_evmcs_vmptr))
                return 1;
 
        if (vmx->nested.current_vmptr != vmptr) {
        if (!nested_vmx_check_permission(vcpu))
                return 1;
 
-       if (unlikely(to_vmx(vcpu)->nested.hv_evmcs))
+       if (unlikely(evmptr_is_valid(to_vmx(vcpu)->nested.hv_evmcs_vmptr)))
                return 1;
 
        if (get_vmx_mem_address(vcpu, exit_qual, instr_info,
                if (vmx_has_valid_vmcs12(vcpu)) {
                        kvm_state.size += sizeof(user_vmx_nested_state->vmcs12);
 
-                       if (vmx->nested.hv_evmcs)
+                       if (evmptr_is_valid(vmx->nested.hv_evmcs_vmptr))
                                kvm_state.flags |= KVM_STATE_NESTED_EVMCS;
 
                        if (is_guest_mode(vcpu) &&
        } else  {
                copy_vmcs02_to_vmcs12_rare(vcpu, get_vmcs12(vcpu));
                if (!vmx->nested.need_vmcs12_to_shadow_sync) {
-                       if (vmx->nested.hv_evmcs)
+                       if (evmptr_is_valid(vmx->nested.hv_evmcs_vmptr))
                                copy_enlightened_to_vmcs12(vmx);
                        else if (enable_shadow_vmcs)
                                copy_shadow_to_vmcs12(vmx);