}
 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
 
+static DECLARE_WAIT_QUEUE_HEAD(swapcache_wq);
+
 /*
  * We enter with non-exclusive mmap_lock (to exclude vma changes,
  * but allow concurrent faults), and pte mapped but not yet locked.
 {
        struct vm_area_struct *vma = vmf->vma;
        struct folio *swapcache, *folio = NULL;
+       DECLARE_WAITQUEUE(wait, current);
        struct page *page;
        struct swap_info_struct *si = NULL;
        rmap_t rmap_flags = RMAP_NONE;
                                         * Relax a bit to prevent rapid
                                         * repeated page faults.
                                         */
+                                       add_wait_queue(&swapcache_wq, &wait);
                                        schedule_timeout_uninterruptible(1);
+                                       remove_wait_queue(&swapcache_wq, &wait);
                                        goto out_page;
                                }
                                need_clear_cache = true;
                pte_unmap_unlock(vmf->pte, vmf->ptl);
 out:
        /* Clear the swap cache pin for direct swapin after PTL unlock */
-       if (need_clear_cache)
+       if (need_clear_cache) {
                swapcache_clear(si, entry, nr_pages);
+               if (waitqueue_active(&swapcache_wq))
+                       wake_up(&swapcache_wq);
+       }
        if (si)
                put_swap_device(si);
        return ret;
                folio_unlock(swapcache);
                folio_put(swapcache);
        }
-       if (need_clear_cache)
+       if (need_clear_cache) {
                swapcache_clear(si, entry, nr_pages);
+               if (waitqueue_active(&swapcache_wq))
+                       wake_up(&swapcache_wq);
+       }
        if (si)
                put_swap_device(si);
        return ret;