{
        u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS);
        struct kvm_task_sleep_head *b = &async_pf_sleepers[key];
-       struct kvm_task_sleep_node *n;
+       struct kvm_task_sleep_node *n, *dummy = NULL;
 
        if (token == ~0) {
                apf_task_wake_all();
        n = _find_apf_task(b, token);
        if (!n) {
                /*
-                * async PF was not yet handled.
-                * Add dummy entry for the token.
+                * Async #PF not yet handled, add a dummy entry for the token.
+                * Allocating the token must be down outside of the raw lock
+                * as the allocator is preemptible on PREEMPT_RT kernels.
                 */
-               n = kzalloc(sizeof(*n), GFP_ATOMIC);
-               if (!n) {
+               if (!dummy) {
+                       raw_spin_unlock(&b->lock);
+                       dummy = kzalloc(sizeof(*dummy), GFP_KERNEL);
+
                        /*
-                        * Allocation failed! Busy wait while other cpu
-                        * handles async PF.
+                        * Continue looping on allocation failure, eventually
+                        * the async #PF will be handled and allocating a new
+                        * node will be unnecessary.
+                        */
+                       if (!dummy)
+                               cpu_relax();
+
+                       /*
+                        * Recheck for async #PF completion before enqueueing
+                        * the dummy token to avoid duplicate list entries.
                         */
-                       raw_spin_unlock(&b->lock);
-                       cpu_relax();
                        goto again;
                }
-               n->token = token;
-               n->cpu = smp_processor_id();
-               init_swait_queue_head(&n->wq);
-               hlist_add_head(&n->link, &b->list);
+               dummy->token = token;
+               dummy->cpu = smp_processor_id();
+               init_swait_queue_head(&dummy->wq);
+               hlist_add_head(&dummy->link, &b->list);
+               dummy = NULL;
        } else {
                apf_task_wake_one(n);
        }
        raw_spin_unlock(&b->lock);
-       return;
+
+       /* A dummy token might be allocated and ultimately not used.  */
+       if (dummy)
+               kfree(dummy);
 }
 EXPORT_SYMBOL_GPL(kvm_async_pf_task_wake);