{
 }
 
-static inline bool kvm_kick_many_cpus(const struct cpumask *cpus, bool wait)
+static inline bool kvm_kick_many_cpus(cpumask_var_t tmp, bool wait)
 {
-       if (unlikely(!cpus))
+       const struct cpumask *cpus;
+
+       if (likely(cpumask_available(tmp)))
+               cpus = tmp;
+       else
                cpus = cpu_online_mask;
 
        if (cpumask_empty(cpus))
                if (!(req & KVM_REQUEST_NO_WAKEUP) && kvm_vcpu_wake_up(vcpu))
                        continue;
 
+               /*
+                * tmp can be "unavailable" if cpumasks are allocated off stack
+                * as allocation of the mask is deliberately not fatal and is
+                * handled by falling back to kicking all online CPUs.
+                */
+               if (!cpumask_available(tmp))
+                       continue;
+
                /*
                 * Note, the vCPU could get migrated to a different pCPU at any
                 * point after kvm_request_needs_ipi(), which could result in
                 * were reading SPTEs _before_ any changes were finalized.  See
                 * kvm_vcpu_kick() for more details on handling requests.
                 */
-               if (tmp != NULL && kvm_request_needs_ipi(vcpu, req)) {
+               if (kvm_request_needs_ipi(vcpu, req)) {
                        cpu = READ_ONCE(vcpu->cpu);
                        if (cpu != -1 && cpu != me)
                                __cpumask_set_cpu(cpu, tmp);