u32 exit_reason = vmx->exit_reason;
        u32 vectoring_info = vmx->idt_vectoring_info;
 
-       trace_kvm_exit(exit_reason, vcpu, KVM_ISA_VMX);
-
        /*
         * Flush logged GPAs PML buffer, this will make dirty_bitmap more
         * updated. Another good is, in kvm_vm_ioctl_get_dirty_log, before
        }
 }
 
+static enum exit_fastpath_completion vmx_exit_handlers_fastpath(struct kvm_vcpu *vcpu)
+{
+       switch (to_vmx(vcpu)->exit_reason) {
+       case EXIT_REASON_MSR_WRITE:
+               return handle_fastpath_set_msr_irqoff(vcpu);
+       default:
+               return EXIT_FASTPATH_NONE;
+       }
+}
+
 bool __vmx_vcpu_run(struct vcpu_vmx *vmx, unsigned long *regs, bool launched);
 
 static enum exit_fastpath_completion vmx_vcpu_run(struct kvm_vcpu *vcpu)
        if (unlikely((u16)vmx->exit_reason == EXIT_REASON_MCE_DURING_VMENTRY))
                kvm_machine_check();
 
+       trace_kvm_exit(vmx->exit_reason, vcpu, KVM_ISA_VMX);
+
        if (unlikely(vmx->exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY))
                return EXIT_FASTPATH_NONE;
 
-       if (!is_guest_mode(vcpu) && vmx->exit_reason == EXIT_REASON_MSR_WRITE)
-               exit_fastpath = handle_fastpath_set_msr_irqoff(vcpu);
-       else
-               exit_fastpath = EXIT_FASTPATH_NONE;
-
        vmx->loaded_vmcs->launched = 1;
        vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
 
        vmx_recover_nmi_blocking(vmx);
        vmx_complete_interrupts(vmx);
 
+       if (is_guest_mode(vcpu))
+               return EXIT_FASTPATH_NONE;
+
+       exit_fastpath = vmx_exit_handlers_fastpath(vcpu);
        return exit_fastpath;
 }