if (kvm_vcpu_get_hsr(vcpu) & HSR_WFI_IS_WFE) {
                trace_kvm_wfx(*vcpu_pc(vcpu), true);
                vcpu->stat.wfe_exit_stat++;
-               kvm_vcpu_on_spin(vcpu);
+               kvm_vcpu_on_spin(vcpu, false);
        } else {
                trace_kvm_wfx(*vcpu_pc(vcpu), false);
                vcpu->stat.wfi_exit_stat++;
 
        if (kvm_vcpu_get_hsr(vcpu) & ESR_ELx_WFx_ISS_WFE) {
                trace_kvm_wfx_arm64(*vcpu_pc(vcpu), true);
                vcpu->stat.wfe_exit_stat++;
-               kvm_vcpu_on_spin(vcpu);
+               kvm_vcpu_on_spin(vcpu, false);
        } else {
                trace_kvm_wfx_arm64(*vcpu_pc(vcpu), false);
                vcpu->stat.wfi_exit_stat++;
 
        return !!(vcpu->arch.pending_exceptions);
 }
 
+bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
+{
+       return false;
+}
+
 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
 {
        return 1;
 
        return !!(v->arch.pending_exceptions) || kvm_request_pending(v);
 }
 
+bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
+{
+       return false;
+}
+
 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
 {
        return 1;
 
 {
        VCPU_EVENT(vcpu, 5, "%s", "diag time slice end");
        vcpu->stat.diagnose_44++;
-       kvm_vcpu_on_spin(vcpu);
+       kvm_vcpu_on_spin(vcpu, false);
        return 0;
 }
 
 
        return kvm_s390_vcpu_has_irq(vcpu, 0);
 }
 
+bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
+{
+       return false;
+}
+
 void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu)
 {
        atomic_or(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
 
 
        switch (code) {
        case HVCALL_NOTIFY_LONG_SPIN_WAIT:
-               kvm_vcpu_on_spin(vcpu);
+               kvm_vcpu_on_spin(vcpu, false);
                break;
        case HVCALL_POST_MESSAGE:
        case HVCALL_SIGNAL_EVENT:
 
 
 static int pause_interception(struct vcpu_svm *svm)
 {
-       kvm_vcpu_on_spin(&(svm->vcpu));
+       kvm_vcpu_on_spin(&svm->vcpu, false);
        return 1;
 }
 
 
        if (ple_gap)
                grow_ple_window(vcpu);
 
-       kvm_vcpu_on_spin(vcpu);
+       kvm_vcpu_on_spin(vcpu, false);
        return kvm_skip_emulated_instruction(vcpu);
 }
 
 
        return kvm_vcpu_running(vcpu) || kvm_vcpu_has_events(vcpu);
 }
 
+bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
+{
+       return false;
+}
+
 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
 {
        return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE;
 
 bool kvm_vcpu_wake_up(struct kvm_vcpu *vcpu);
 void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
 int kvm_vcpu_yield_to(struct kvm_vcpu *target);
-void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu);
+void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu, bool usermode_vcpu_not_eligible);
 void kvm_load_guest_fpu(struct kvm_vcpu *vcpu);
 void kvm_put_guest_fpu(struct kvm_vcpu *vcpu);
 
 void kvm_arch_hardware_unsetup(void);
 void kvm_arch_check_processor_compat(void *rtn);
 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu);
+bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu);
 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu);
 
 #ifndef __KVM_HAVE_ARCH_VM_ALLOC
 
                && !v->arch.power_off && !v->arch.pause);
 }
 
+bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
+{
+       return false;
+}
+
 /* Just ensure a guest exit from a particular CPU */
 static void exit_vm_noop(void *info)
 {
 
 #endif
 }
 
-void kvm_vcpu_on_spin(struct kvm_vcpu *me)
+void kvm_vcpu_on_spin(struct kvm_vcpu *me, bool yield_to_kernel_mode)
 {
        struct kvm *kvm = me->kvm;
        struct kvm_vcpu *vcpu;
                                continue;
                        if (swait_active(&vcpu->wq) && !kvm_arch_vcpu_runnable(vcpu))
                                continue;
+                       if (yield_to_kernel_mode && !kvm_arch_vcpu_in_kernel(vcpu))
+                               continue;
                        if (!kvm_vcpu_eligible_for_directed_yield(vcpu))
                                continue;