int migrate_huge_page_move_mapping(struct address_space *mapping,
                struct folio *dst, struct folio *src);
-void migration_entry_wait_on_locked(swp_entry_t entry, pte_t *ptep,
-                               spinlock_t *ptl);
+void migration_entry_wait_on_locked(swp_entry_t entry, spinlock_t *ptl)
+               __releases(ptl);
 void folio_migrate_flags(struct folio *newfolio, struct folio *folio);
 void folio_migrate_copy(struct folio *newfolio, struct folio *folio);
 int folio_migrate_mapping(struct address_space *mapping,
 
        return false;
 }
 
-extern void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
-                                       spinlock_t *ptl);
 extern void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
                                        unsigned long address);
-#ifdef CONFIG_HUGETLB_PAGE
-extern void __migration_entry_wait_huge(struct vm_area_struct *vma,
-                                       pte_t *ptep, spinlock_t *ptl);
 extern void migration_entry_wait_huge(struct vm_area_struct *vma, pte_t *pte);
-#endif /* CONFIG_HUGETLB_PAGE */
 #else  /* CONFIG_MIGRATION */
 static inline swp_entry_t make_readable_migration_entry(pgoff_t offset)
 {
        return 0;
 }
 
-static inline void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
-                                       spinlock_t *ptl) { }
 static inline void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
-                                        unsigned long address) { }
-#ifdef CONFIG_HUGETLB_PAGE
-static inline void __migration_entry_wait_huge(struct vm_area_struct *vma,
-                                              pte_t *ptep, spinlock_t *ptl) { }
-static inline void migration_entry_wait_huge(struct vm_area_struct *vma, pte_t *pte) { }
-#endif /* CONFIG_HUGETLB_PAGE */
+                                       unsigned long address) { }
+static inline void migration_entry_wait_huge(struct vm_area_struct *vma,
+                                       pte_t *pte) { }
 static inline int is_writable_migration_entry(swp_entry_t entry)
 {
        return 0;
 
 /**
  * migration_entry_wait_on_locked - Wait for a migration entry to be removed
  * @entry: migration swap entry.
- * @ptep: mapped pte pointer. Will return with the ptep unmapped. Only required
- *        for pte entries, pass NULL for pmd entries.
  * @ptl: already locked ptl. This function will drop the lock.
  *
  * Wait for a migration entry referencing the given page to be removed. This is
  * should be called while holding the ptl for the migration entry referencing
  * the page.
  *
- * Returns after unmapping and unlocking the pte/ptl with pte_unmap_unlock().
+ * Returns after unlocking the ptl.
  *
  * This follows the same logic as folio_wait_bit_common() so see the comments
  * there.
  */
-void migration_entry_wait_on_locked(swp_entry_t entry, pte_t *ptep,
-                               spinlock_t *ptl)
+void migration_entry_wait_on_locked(swp_entry_t entry, spinlock_t *ptl)
+       __releases(ptl)
 {
        struct wait_page_queue wait_page;
        wait_queue_entry_t *wait = &wait_page.wait;
         * a valid reference to the page, and it must take the ptl to remove the
         * migration entry. So the page is valid until the ptl is dropped.
         */
-       if (ptep)
-               pte_unmap_unlock(ptep, ptl);
-       else
-               spin_unlock(ptl);
+       spin_unlock(ptl);
 
        for (;;) {
                unsigned int flags;
 
  * get to the page and wait until migration is finished.
  * When we return from this function the fault will be retried.
  */
-void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
-                               spinlock_t *ptl)
+void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
+                         unsigned long address)
 {
+       spinlock_t *ptl;
+       pte_t *ptep;
        pte_t pte;
        swp_entry_t entry;
 
-       spin_lock(ptl);
+       ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
        pte = *ptep;
+       pte_unmap(ptep);
+
        if (!is_swap_pte(pte))
                goto out;
 
        if (!is_migration_entry(entry))
                goto out;
 
-       migration_entry_wait_on_locked(entry, ptep, ptl);
+       migration_entry_wait_on_locked(entry, ptl);
        return;
 out:
-       pte_unmap_unlock(ptep, ptl);
-}
-
-void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
-                               unsigned long address)
-{
-       spinlock_t *ptl = pte_lockptr(mm, pmd);
-       pte_t *ptep = pte_offset_map(pmd, address);
-       __migration_entry_wait(mm, ptep, ptl);
+       spin_unlock(ptl);
 }
 
 #ifdef CONFIG_HUGETLB_PAGE
  *
  * This function will release the vma lock before returning.
  */
-void __migration_entry_wait_huge(struct vm_area_struct *vma,
-                                pte_t *ptep, spinlock_t *ptl)
+void migration_entry_wait_huge(struct vm_area_struct *vma, pte_t *ptep)
 {
+       spinlock_t *ptl = huge_pte_lockptr(hstate_vma(vma), vma->vm_mm, ptep);
        pte_t pte;
 
        hugetlb_vma_assert_locked(vma);
                 * lock release in migration_entry_wait_on_locked().
                 */
                hugetlb_vma_unlock_read(vma);
-               migration_entry_wait_on_locked(pte_to_swp_entry(pte), NULL, ptl);
+               migration_entry_wait_on_locked(pte_to_swp_entry(pte), ptl);
        }
 }
-
-void migration_entry_wait_huge(struct vm_area_struct *vma, pte_t *pte)
-{
-       spinlock_t *ptl = huge_pte_lockptr(hstate_vma(vma), vma->vm_mm, pte);
-
-       __migration_entry_wait_huge(vma, pte, ptl);
-}
 #endif
 
 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
        ptl = pmd_lock(mm, pmd);
        if (!is_pmd_migration_entry(*pmd))
                goto unlock;
-       migration_entry_wait_on_locked(pmd_to_swp_entry(*pmd), NULL, ptl);
+       migration_entry_wait_on_locked(pmd_to_swp_entry(*pmd), ptl);
        return;
 unlock:
        spin_unlock(ptl);