struct vm_area_struct *vma,
                             unsigned long addr, unsigned long end,
                             struct zap_details *details);
+int folio_unmap_invalidate(struct address_space *mapping, struct folio *folio,
+                          gfp_t gfp);
 
 void page_cache_ra_order(struct readahead_control *, struct file_ra_state *,
                unsigned int order);
 
 }
 EXPORT_SYMBOL(invalidate_mapping_pages);
 
+static int folio_launder(struct address_space *mapping, struct folio *folio)
+{
+       if (!folio_test_dirty(folio))
+               return 0;
+       if (folio->mapping != mapping || mapping->a_ops->launder_folio == NULL)
+               return 0;
+       return mapping->a_ops->launder_folio(folio);
+}
+
 /*
  * This is like mapping_evict_folio(), except it ignores the folio's
  * refcount.  We do this because invalidate_inode_pages2() needs stronger
  * shrink_folio_list() has a temp ref on them, or because they're transiently
  * sitting in the folio_add_lru() caches.
  */
-static int invalidate_complete_folio2(struct address_space *mapping,
-                                       struct folio *folio)
+int folio_unmap_invalidate(struct address_space *mapping, struct folio *folio,
+                          gfp_t gfp)
 {
-       if (folio->mapping != mapping)
-               return 0;
+       int ret;
+
+       VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
 
-       if (!filemap_release_folio(folio, GFP_KERNEL))
+       if (folio_test_dirty(folio))
                return 0;
+       if (folio_mapped(folio))
+               unmap_mapping_folio(folio);
+       BUG_ON(folio_mapped(folio));
+
+       ret = folio_launder(mapping, folio);
+       if (ret)
+               return ret;
+       if (folio->mapping != mapping)
+               return -EBUSY;
+       if (!filemap_release_folio(folio, gfp))
+               return -EBUSY;
 
        spin_lock(&mapping->host->i_lock);
        xa_lock_irq(&mapping->i_pages);
 failed:
        xa_unlock_irq(&mapping->i_pages);
        spin_unlock(&mapping->host->i_lock);
-       return 0;
-}
-
-static int folio_launder(struct address_space *mapping, struct folio *folio)
-{
-       if (!folio_test_dirty(folio))
-               return 0;
-       if (folio->mapping != mapping || mapping->a_ops->launder_folio == NULL)
-               return 0;
-       return mapping->a_ops->launder_folio(folio);
+       return -EBUSY;
 }
 
 /**
                        }
                        VM_BUG_ON_FOLIO(!folio_contains(folio, indices[i]), folio);
                        folio_wait_writeback(folio);
-
-                       if (folio_mapped(folio))
-                               unmap_mapping_folio(folio);
-                       BUG_ON(folio_mapped(folio));
-
-                       ret2 = folio_launder(mapping, folio);
-                       if (ret2 == 0) {
-                               if (!invalidate_complete_folio2(mapping, folio))
-                                       ret2 = -EBUSY;
-                       }
+                       ret2 = folio_unmap_invalidate(mapping, folio, GFP_KERNEL);
                        if (ret2 < 0)
                                ret = ret2;
                        folio_unlock(folio);