Add helpers to wake and query a blocking vCPU.  In addition to providing
nice names, the helpers reduce the probability of KVM neglecting to use
kvm_arch_vcpu_get_wait().
No functional change intended.
Signed-off-by: Sean Christopherson <seanjc@google.com>
Message-Id: <
20211009021236.
4122790-20-seanjc@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
 {
        struct arch_timer_cpu *timer = vcpu_timer(vcpu);
        struct timer_map map;
-       struct rcuwait *wait = kvm_arch_vcpu_get_wait(vcpu);
 
        if (unlikely(!timer->enabled))
                return;
        if (map.emul_ptimer)
                soft_timer_cancel(&map.emul_ptimer->hrtimer);
 
-       if (rcuwait_active(wait))
+       if (kvm_vcpu_is_blocking(vcpu))
                kvm_timer_blocking(vcpu);
 
        /*
 
 
        kvm_for_each_vcpu(i, vcpu, kvm) {
                vcpu->arch.pause = false;
-               rcuwait_wake_up(kvm_arch_vcpu_get_wait(vcpu));
+               __kvm_vcpu_wake_up(vcpu);
        }
 }
 
 
        /* If the preempt notifier has already run, it also called apic_timer_expired */
        if (!apic->lapic_timer.hv_timer_in_use)
                goto out;
-       WARN_ON(rcuwait_active(&vcpu->wait));
+       WARN_ON(kvm_vcpu_is_blocking(vcpu));
        apic_timer_expired(apic, false);
        cancel_hv_timer(apic);
 
 
 #endif
 }
 
+/*
+ * Wake a vCPU if necessary, but don't do any stats/metadata updates.  Returns
+ * true if the vCPU was blocking and was awakened, false otherwise.
+ */
+static inline bool __kvm_vcpu_wake_up(struct kvm_vcpu *vcpu)
+{
+       return !!rcuwait_wake_up(kvm_arch_vcpu_get_wait(vcpu));
+}
+
+static inline bool kvm_vcpu_is_blocking(struct kvm_vcpu *vcpu)
+{
+       return rcuwait_active(kvm_arch_vcpu_get_wait(vcpu));
+}
+
 #ifdef __KVM_HAVE_ARCH_INTC_INITIALIZED
 /*
  * returns true if the virtual interrupt controller is initialized and
 
 
        trace_kvm_async_pf_completed(addr, cr2_or_gpa);
 
-       rcuwait_wake_up(kvm_arch_vcpu_get_wait(vcpu));
+       __kvm_vcpu_wake_up(vcpu);
 
        mmput(mm);
        kvm_put_kvm(vcpu->kvm);
 
 
 bool kvm_vcpu_wake_up(struct kvm_vcpu *vcpu)
 {
-       struct rcuwait *waitp;
-
-       waitp = kvm_arch_vcpu_get_wait(vcpu);
-       if (rcuwait_wake_up(waitp)) {
+       if (__kvm_vcpu_wake_up(vcpu)) {
                WRITE_ONCE(vcpu->ready, true);
                ++vcpu->stat.generic.halt_wakeup;
                return true;
                                continue;
                        if (vcpu == me)
                                continue;
-                       if (rcuwait_active(kvm_arch_vcpu_get_wait(vcpu)) &&
-                           !vcpu_dy_runnable(vcpu))
+                       if (kvm_vcpu_is_blocking(vcpu) && !vcpu_dy_runnable(vcpu))
                                continue;
                        if (READ_ONCE(vcpu->preempted) && yield_to_kernel_mode &&
                            !kvm_arch_dy_has_pending_interrupt(vcpu) &&