* Bring missing pages in from swap, to complete THP collapse.
  * Only done if khugepaged_scan_pmd believes it is worthwhile.
  *
- * Called and returns without pte mapped or spinlocks held,
- * but with mmap_lock held to protect against vma changes.
+ * Called and returns without pte mapped or spinlocks held.
+ * Note that if false is returned, mmap_lock will be released.
  */
 
 static bool __collapse_huge_page_swapin(struct mm_struct *mm,
                        pte_unmap(vmf.pte);
                        continue;
                }
-               swapped_in++;
                ret = do_swap_page(&vmf);
 
-               /* do_swap_page returns VM_FAULT_RETRY with released mmap_lock */
+               /*
+                * do_swap_page returns VM_FAULT_RETRY with released mmap_lock.
+                * Note we treat VM_FAULT_RETRY as VM_FAULT_ERROR here because
+                * we do not retry here and swap entry will remain in pagetable
+                * resulting in later failure.
+                */
                if (ret & VM_FAULT_RETRY) {
-                       mmap_read_lock(mm);
-                       if (hugepage_vma_revalidate(mm, haddr, &vma)) {
-                               /* vma is no longer available, don't continue to swapin */
-                               trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
-                               return false;
-                       }
-                       /* check if the pmd is still valid */
-                       if (mm_find_pmd(mm, haddr) != pmd) {
-                               trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
-                               return false;
-                       }
+                       trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
+                       return false;
                }
                if (ret & VM_FAULT_ERROR) {
+                       mmap_read_unlock(mm);
                        trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
                        return false;
                }
+               swapped_in++;
        }
 
        /* Drain LRU add pagevec to remove extra pin on the swapped in pages */
        }
 
        /*
-        * __collapse_huge_page_swapin always returns with mmap_lock locked.
-        * If it fails, we release mmap_lock and jump out_nolock.
+        * __collapse_huge_page_swapin will return with mmap_lock released
+        * when it fails. So we jump out_nolock directly in that case.
         * Continuing to collapse causes inconsistency.
         */
        if (unmapped && !__collapse_huge_page_swapin(mm, vma, address,
                                                     pmd, referenced)) {
-               mmap_read_unlock(mm);
                goto out_nolock;
        }