* failValid writes the error number to the current VMCS, which
         * can't be done if there isn't a current VMCS.
         */
-       if (vmx->nested.current_vmptr == -1ull &&
+       if (vmx->nested.current_vmptr == INVALID_GPA &&
            !evmptr_is_valid(vmx->nested.hv_evmcs_vmptr))
                return nested_vmx_failInvalid(vcpu);
 
 static void vmx_disable_shadow_vmcs(struct vcpu_vmx *vmx)
 {
        secondary_exec_controls_clearbit(vmx, SECONDARY_EXEC_SHADOW_VMCS);
-       vmcs_write64(VMCS_LINK_POINTER, -1ull);
+       vmcs_write64(VMCS_LINK_POINTER, INVALID_GPA);
        vmx->nested.need_vmcs12_to_shadow_sync = false;
 }
 
        vmx->nested.smm.vmxon = false;
        free_vpid(vmx->nested.vpid02);
        vmx->nested.posted_intr_nv = -1;
-       vmx->nested.current_vmptr = -1ull;
+       vmx->nested.current_vmptr = INVALID_GPA;
        if (enable_shadow_vmcs) {
                vmx_disable_shadow_vmcs(vmx);
                vmcs_clear(vmx->vmcs01.shadow_vmcs);
        struct vmcs12 *shadow;
 
        if (!nested_cpu_has_shadow_vmcs(vmcs12) ||
-           vmcs12->vmcs_link_pointer == -1ull)
+           vmcs12->vmcs_link_pointer == INVALID_GPA)
                return;
 
        shadow = get_shadow_vmcs12(vcpu);
        struct vcpu_vmx *vmx = to_vmx(vcpu);
 
        if (!nested_cpu_has_shadow_vmcs(vmcs12) ||
-           vmcs12->vmcs_link_pointer == -1ull)
+           vmcs12->vmcs_link_pointer == INVALID_GPA)
                return;
 
        kvm_write_guest(vmx->vcpu.kvm, vmcs12->vmcs_link_pointer,
        }
 
        if (unlikely(evmcs_gpa != vmx->nested.hv_evmcs_vmptr)) {
-               vmx->nested.current_vmptr = -1ull;
+               vmx->nested.current_vmptr = INVALID_GPA;
 
                nested_release_evmcs(vcpu);
 
        }
 
        if (cpu_has_vmx_encls_vmexit())
-               vmcs_write64(ENCLS_EXITING_BITMAP, -1ull);
+               vmcs_write64(ENCLS_EXITING_BITMAP, INVALID_GPA);
 
        /*
         * Set the MSR load/store lists to match L0's settings.  Only the
 {
        prepare_vmcs02_constant_state(vmx);
 
-       vmcs_write64(VMCS_LINK_POINTER, -1ull);
+       vmcs_write64(VMCS_LINK_POINTER, INVALID_GPA);
 
        if (enable_vpid) {
                if (nested_cpu_has_vpid(vmcs12) && vmx->nested.vpid02)
        struct vmcs12 *shadow;
        struct kvm_host_map map;
 
-       if (vmcs12->vmcs_link_pointer == -1ull)
+       if (vmcs12->vmcs_link_pointer == INVALID_GPA)
                return 0;
 
        if (CC(!page_address_valid(vcpu, vmcs12->vmcs_link_pointer)))
                         * Write an illegal value to VIRTUAL_APIC_PAGE_ADDR to
                         * force VM-Entry to fail.
                         */
-                       vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, -1ull);
+                       vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, INVALID_GPA);
                }
        }
 
        }
 
        if (CC(!evmptr_is_valid(vmx->nested.hv_evmcs_vmptr) &&
-              vmx->nested.current_vmptr == -1ull))
+              vmx->nested.current_vmptr == INVALID_GPA))
                return nested_vmx_failInvalid(vcpu);
 
        vmcs12 = get_vmcs12(vcpu);
 {
        struct vcpu_vmx *vmx = to_vmx(vcpu);
 
-       if (vmx->nested.current_vmptr == -1ull)
+       if (vmx->nested.current_vmptr == INVALID_GPA)
                return;
 
        copy_vmcs02_to_vmcs12_rare(vcpu, get_vmcs12(vcpu));
 
        kvm_mmu_free_roots(vcpu, &vcpu->arch.guest_mmu, KVM_MMU_ROOTS_ALL);
 
-       vmx->nested.current_vmptr = -1ull;
+       vmx->nested.current_vmptr = INVALID_GPA;
 }
 
 /* Emulate the VMXOFF instruction */
                return 1;
 
        /*
-        * In VMX non-root operation, when the VMCS-link pointer is -1ull,
+        * In VMX non-root operation, when the VMCS-link pointer is INVALID_GPA,
         * any VMREAD sets the ALU flags for VMfailInvalid.
         */
-       if (vmx->nested.current_vmptr == -1ull ||
+       if (vmx->nested.current_vmptr == INVALID_GPA ||
            (is_guest_mode(vcpu) &&
-            get_vmcs12(vcpu)->vmcs_link_pointer == -1ull))
+            get_vmcs12(vcpu)->vmcs_link_pointer == INVALID_GPA))
                return nested_vmx_failInvalid(vcpu);
 
        /* Decode instruction info and find the field to read */
                return 1;
 
        /*
-        * In VMX non-root operation, when the VMCS-link pointer is -1ull,
+        * In VMX non-root operation, when the VMCS-link pointer is INVALID_GPA,
         * any VMWRITE sets the ALU flags for VMfailInvalid.
         */
-       if (vmx->nested.current_vmptr == -1ull ||
+       if (vmx->nested.current_vmptr == INVALID_GPA ||
            (is_guest_mode(vcpu) &&
-            get_vmcs12(vcpu)->vmcs_link_pointer == -1ull))
+            get_vmcs12(vcpu)->vmcs_link_pointer == INVALID_GPA))
                return nested_vmx_failInvalid(vcpu);
 
        if (instr_info & BIT(10))
        gpa_t bitmap, last_bitmap;
        u8 b;
 
-       last_bitmap = (gpa_t)-1;
+       last_bitmap = INVALID_GPA;
        b = -1;
 
        while (size > 0) {
                .format = KVM_STATE_NESTED_FORMAT_VMX,
                .size = sizeof(kvm_state),
                .hdr.vmx.flags = 0,
-               .hdr.vmx.vmxon_pa = -1ull,
-               .hdr.vmx.vmcs12_pa = -1ull,
+               .hdr.vmx.vmxon_pa = INVALID_GPA,
+               .hdr.vmx.vmcs12_pa = INVALID_GPA,
                .hdr.vmx.preemption_timer_deadline = 0,
        };
        struct kvm_vmx_nested_state_data __user *user_vmx_nested_state =
 
                        if (is_guest_mode(vcpu) &&
                            nested_cpu_has_shadow_vmcs(vmcs12) &&
-                           vmcs12->vmcs_link_pointer != -1ull)
+                           vmcs12->vmcs_link_pointer != INVALID_GPA)
                                kvm_state.size += sizeof(user_vmx_nested_state->shadow_vmcs12);
                }
 
                return -EFAULT;
 
        if (nested_cpu_has_shadow_vmcs(vmcs12) &&
-           vmcs12->vmcs_link_pointer != -1ull) {
+           vmcs12->vmcs_link_pointer != INVALID_GPA) {
                if (copy_to_user(user_vmx_nested_state->shadow_vmcs12,
                                 get_shadow_vmcs12(vcpu), VMCS12_SIZE))
                        return -EFAULT;
        if (kvm_state->format != KVM_STATE_NESTED_FORMAT_VMX)
                return -EINVAL;
 
-       if (kvm_state->hdr.vmx.vmxon_pa == -1ull) {
+       if (kvm_state->hdr.vmx.vmxon_pa == INVALID_GPA) {
                if (kvm_state->hdr.vmx.smm.flags)
                        return -EINVAL;
 
-               if (kvm_state->hdr.vmx.vmcs12_pa != -1ull)
+               if (kvm_state->hdr.vmx.vmcs12_pa != INVALID_GPA)
                        return -EINVAL;
 
                /*
 
        vmx_leave_nested(vcpu);
 
-       if (kvm_state->hdr.vmx.vmxon_pa == -1ull)
+       if (kvm_state->hdr.vmx.vmxon_pa == INVALID_GPA)
                return 0;
 
        vmx->nested.vmxon_ptr = kvm_state->hdr.vmx.vmxon_pa;
                /* See vmx_has_valid_vmcs12.  */
                if ((kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE) ||
                    (kvm_state->flags & KVM_STATE_NESTED_EVMCS) ||
-                   (kvm_state->hdr.vmx.vmcs12_pa != -1ull))
+                   (kvm_state->hdr.vmx.vmcs12_pa != INVALID_GPA))
                        return -EINVAL;
                else
                        return 0;
        }
 
-       if (kvm_state->hdr.vmx.vmcs12_pa != -1ull) {
+       if (kvm_state->hdr.vmx.vmcs12_pa != INVALID_GPA) {
                if (kvm_state->hdr.vmx.vmcs12_pa == kvm_state->hdr.vmx.vmxon_pa ||
                    !page_address_valid(vcpu, kvm_state->hdr.vmx.vmcs12_pa))
                        return -EINVAL;
 
        ret = -EINVAL;
        if (nested_cpu_has_shadow_vmcs(vmcs12) &&
-           vmcs12->vmcs_link_pointer != -1ull) {
+           vmcs12->vmcs_link_pointer != INVALID_GPA) {
                struct vmcs12 *shadow_vmcs12 = get_shadow_vmcs12(vcpu);
 
                if (kvm_state->size <