static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu)
 {
-       fastpath_t exit_fastpath;
        struct vcpu_vmx *vmx = to_vmx(vcpu);
        unsigned long cr3, cr4;
 
-reenter_guest:
        /* Record the guest's net vcpu time for enforced NMI injections. */
        if (unlikely(!enable_vnmi &&
                     vmx->loaded_vmcs->soft_vnmi_blocked))
        if (is_guest_mode(vcpu))
                return EXIT_FASTPATH_NONE;
 
-       exit_fastpath = vmx_exit_handlers_fastpath(vcpu);
-       if (exit_fastpath == EXIT_FASTPATH_REENTER_GUEST) {
-               if (!kvm_vcpu_exit_request(vcpu)) {
-                       /*
-                        * FIXME: this goto should be a loop in vcpu_enter_guest,
-                        * but it would incur the cost of a retpoline for now.
-                        * Revisit once static calls are available.
-                        */
-                       if (vcpu->arch.apicv_active)
-                               vmx_sync_pir_to_irr(vcpu);
-                       goto reenter_guest;
-               }
-               exit_fastpath = EXIT_FASTPATH_EXIT_HANDLED;
-       }
-
-       return exit_fastpath;
+       return vmx_exit_handlers_fastpath(vcpu);
 }
 
 static void vmx_free_vcpu(struct kvm_vcpu *vcpu)
 
 }
 EXPORT_SYMBOL_GPL(kvm_emulate_wrmsr);
 
-bool kvm_vcpu_exit_request(struct kvm_vcpu *vcpu)
+static inline bool kvm_vcpu_exit_request(struct kvm_vcpu *vcpu)
 {
        return vcpu->mode == EXITING_GUEST_MODE || kvm_request_pending(vcpu) ||
                xfer_to_guest_mode_work_pending();
 }
-EXPORT_SYMBOL_GPL(kvm_vcpu_exit_request);
 
 /*
  * The fast path for frequent and performance sensitive wrmsr emulation,
                vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_RELOAD;
        }
 
-       exit_fastpath = static_call(kvm_x86_run)(vcpu);
+       for (;;) {
+               exit_fastpath = static_call(kvm_x86_run)(vcpu);
+               if (likely(exit_fastpath != EXIT_FASTPATH_REENTER_GUEST))
+                       break;
+
+                if (unlikely(kvm_vcpu_exit_request(vcpu))) {
+                       exit_fastpath = EXIT_FASTPATH_EXIT_HANDLED;
+                       break;
+               }
+
+               if (vcpu->arch.apicv_active)
+                       static_call(kvm_x86_sync_pir_to_irr)(vcpu);
+        }
 
        /*
         * Do this here before restoring debug registers on the host.  And
 
 void kvm_load_host_xsave_state(struct kvm_vcpu *vcpu);
 int kvm_spec_ctrl_test_value(u64 value);
 bool kvm_is_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
-bool kvm_vcpu_exit_request(struct kvm_vcpu *vcpu);
 int kvm_handle_memory_failure(struct kvm_vcpu *vcpu, int r,
                              struct x86_exception *e);
 int kvm_handle_invpcid(struct kvm_vcpu *vcpu, unsigned long type, gva_t gva);