}
        return 0;
 fail:
+       /* Note, max_msr_list_size is at most 4096, i.e. this can't wrap. */
        return i + 1;
 }
 
  * @entry_failure_code.
  */
 static int nested_vmx_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3, bool nested_ept,
-                              u32 *entry_failure_code)
+                              enum vm_entry_failure_code *entry_failure_code)
 {
        if (CC(!nested_cr3_valid(vcpu, cr3))) {
                *entry_failure_code = ENTRY_FAIL_DEFAULT;
  * is assigned to entry_failure_code on failure.
  */
 static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
-                         u32 *entry_failure_code)
+                         enum vm_entry_failure_code *entry_failure_code)
 {
        struct vcpu_vmx *vmx = to_vmx(vcpu);
        struct hv_enlightened_vmcs *hv_evmcs = vmx->nested.hv_evmcs;
 
 static int nested_vmx_check_guest_state(struct kvm_vcpu *vcpu,
                                        struct vmcs12 *vmcs12,
-                                       u32 *exit_qual)
+                                       enum vm_entry_failure_code *entry_failure_code)
 {
        bool ia32e;
 
-       *exit_qual = ENTRY_FAIL_DEFAULT;
+       *entry_failure_code = ENTRY_FAIL_DEFAULT;
 
        if (CC(!nested_guest_cr0_valid(vcpu, vmcs12->guest_cr0)) ||
            CC(!nested_guest_cr4_valid(vcpu, vmcs12->guest_cr4)))
                return -EINVAL;
 
        if (nested_vmx_check_vmcs_link_ptr(vcpu, vmcs12)) {
-               *exit_qual = ENTRY_FAIL_VMCS_LINK_PTR;
+               *entry_failure_code = ENTRY_FAIL_VMCS_LINK_PTR;
                return -EINVAL;
        }
 
 {
        struct vcpu_vmx *vmx = to_vmx(vcpu);
        struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
+       enum vm_entry_failure_code entry_failure_code;
        bool evaluate_pending_interrupts;
-       u32 exit_reason = EXIT_REASON_INVALID_STATE;
-       u32 exit_qual;
+       u32 exit_reason, failed_index;
 
        if (kvm_check_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu))
                kvm_vcpu_flush_tlb_current(vcpu);
                        return NVMX_VMENTRY_VMFAIL;
                }
 
-               if (nested_vmx_check_guest_state(vcpu, vmcs12, &exit_qual))
+               if (nested_vmx_check_guest_state(vcpu, vmcs12,
+                                                &entry_failure_code)) {
+                       exit_reason = EXIT_REASON_INVALID_STATE;
+                       vmcs12->exit_qualification = entry_failure_code;
                        goto vmentry_fail_vmexit;
+               }
        }
 
        enter_guest_mode(vcpu);
        if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETTING)
                vcpu->arch.tsc_offset += vmcs12->tsc_offset;
 
-       if (prepare_vmcs02(vcpu, vmcs12, &exit_qual))
+       if (prepare_vmcs02(vcpu, vmcs12, &entry_failure_code)) {
+               exit_reason = EXIT_REASON_INVALID_STATE;
+               vmcs12->exit_qualification = entry_failure_code;
                goto vmentry_fail_vmexit_guest_mode;
+       }
 
        if (from_vmentry) {
-               exit_reason = EXIT_REASON_MSR_LOAD_FAIL;
-               exit_qual = nested_vmx_load_msr(vcpu,
-                                               vmcs12->vm_entry_msr_load_addr,
-                                               vmcs12->vm_entry_msr_load_count);
-               if (exit_qual)
+               failed_index = nested_vmx_load_msr(vcpu,
+                                                  vmcs12->vm_entry_msr_load_addr,
+                                                  vmcs12->vm_entry_msr_load_count);
+               if (failed_index) {
+                       exit_reason = EXIT_REASON_MSR_LOAD_FAIL;
+                       vmcs12->exit_qualification = failed_index;
                        goto vmentry_fail_vmexit_guest_mode;
+               }
        } else {
                /*
                 * The MMU is not initialized to point at the right entities yet and
 
        load_vmcs12_host_state(vcpu, vmcs12);
        vmcs12->vm_exit_reason = exit_reason | VMX_EXIT_REASONS_FAILED_VMENTRY;
-       vmcs12->exit_qualification = exit_qual;
        if (enable_shadow_vmcs || vmx->nested.hv_evmcs)
                vmx->nested.need_vmcs12_to_shadow_sync = true;
        return NVMX_VMENTRY_VMEXIT;
 static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
                                   struct vmcs12 *vmcs12)
 {
+       enum vm_entry_failure_code ignored;
        struct kvm_segment seg;
-       u32 entry_failure_code;
 
        if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_EFER)
                vcpu->arch.efer = vmcs12->host_ia32_efer;
         * Only PDPTE load can fail as the value of cr3 was checked on entry and
         * couldn't have changed.
         */
-       if (nested_vmx_load_cr3(vcpu, vmcs12->host_cr3, false, &entry_failure_code))
+       if (nested_vmx_load_cr3(vcpu, vmcs12->host_cr3, false, &ignored))
                nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_PDPTE_FAIL);
 
        if (!enable_ept)
 {
        struct vcpu_vmx *vmx = to_vmx(vcpu);
        struct vmcs12 *vmcs12;
-       u32 exit_qual;
+       enum vm_entry_failure_code ignored;
        struct kvm_vmx_nested_state_data __user *user_vmx_nested_state =
                &user_kvm_nested_state->data.vmx[0];
        int ret;
 
        if (nested_vmx_check_controls(vcpu, vmcs12) ||
            nested_vmx_check_host_state(vcpu, vmcs12) ||
-           nested_vmx_check_guest_state(vcpu, vmcs12, &exit_qual))
+           nested_vmx_check_guest_state(vcpu, vmcs12, &ignored))
                goto error_guest_mode;
 
        vmx->nested.dirty_vmcs12 = true;