return ret;
 }
 
+static int wait_page_idle(struct page *page,
+                       void (cb)(struct inode *),
+                       struct inode *inode)
+{
+       return ___wait_var_event(page, dax_page_is_idle(page),
+                               TASK_INTERRUPTIBLE, 0, 0, cb(inode));
+}
+
+/*
+ * Unmaps the inode and waits for any DMA to complete prior to deleting the
+ * DAX mapping entries for the range.
+ */
+int dax_break_layout(struct inode *inode, loff_t start, loff_t end,
+               void (cb)(struct inode *))
+{
+       struct page *page;
+       int error = 0;
+
+       if (!dax_mapping(inode->i_mapping))
+               return 0;
+
+       do {
+               page = dax_layout_busy_page_range(inode->i_mapping, start, end);
+               if (!page)
+                       break;
+
+               error = wait_page_idle(page, cb, inode);
+       } while (error == 0);
+
+       return error;
+}
+EXPORT_SYMBOL_GPL(dax_break_layout);
+
 /*
  * Invalidate DAX entry if it is clean.
  */
 
 
 int ext4_break_layouts(struct inode *inode)
 {
-       struct page *page;
-       int error;
-
        if (WARN_ON_ONCE(!rwsem_is_locked(&inode->i_mapping->invalidate_lock)))
                return -EINVAL;
 
-       do {
-               page = dax_layout_busy_page(inode->i_mapping);
-               if (!page)
-                       return 0;
-
-               error = dax_wait_page_idle(page, ext4_wait_dax_page, inode);
-       } while (error == 0);
-
-       return error;
+       return dax_break_layout_inode(inode, ext4_wait_dax_page);
 }
 
 /*
 
        filemap_invalidate_lock(inode->i_mapping);
 }
 
-/* Should be called with mapping->invalidate_lock held exclusively */
-static int __fuse_dax_break_layouts(struct inode *inode, bool *retry,
-                                   loff_t start, loff_t end)
-{
-       struct page *page;
-
-       page = dax_layout_busy_page_range(inode->i_mapping, start, end);
-       if (!page)
-               return 0;
-
-       *retry = true;
-       return dax_wait_page_idle(page, fuse_wait_dax_page, inode);
-}
-
+/* Should be called with mapping->invalidate_lock held exclusively. */
 int fuse_dax_break_layouts(struct inode *inode, u64 dmap_start,
                                  u64 dmap_end)
 {
-       bool    retry;
-       int     ret;
-
-       do {
-               retry = false;
-               ret = __fuse_dax_break_layouts(inode, &retry, dmap_start,
-                                              dmap_end);
-       } while (ret == 0 && retry);
-
-       return ret;
+       return dax_break_layout(inode, dmap_start, dmap_end,
+                               fuse_wait_dax_page);
 }
 
 ssize_t fuse_dax_read_iter(struct kiocb *iocb, struct iov_iter *to)
 
        struct xfs_inode        *ip2)
 {
        int                     error;
-       bool                    retry;
        struct page             *page;
 
        if (ip1->i_ino > ip2->i_ino)
                swap(ip1, ip2);
 
 again:
-       retry = false;
        /* Lock the first inode */
        xfs_ilock(ip1, XFS_MMAPLOCK_EXCL);
-       error = xfs_break_dax_layouts(VFS_I(ip1), &retry);
-       if (error || retry) {
+       error = xfs_break_dax_layouts(VFS_I(ip1));
+       if (error) {
                xfs_iunlock(ip1, XFS_MMAPLOCK_EXCL);
-               if (error == 0 && retry)
-                       goto again;
                return error;
        }
 
         * for this nested lock case.
         */
        page = dax_layout_busy_page(VFS_I(ip2)->i_mapping);
-       if (page && page_ref_count(page) != 1) {
+       if (!dax_page_is_idle(page)) {
                xfs_iunlock(ip2, XFS_MMAPLOCK_EXCL);
                xfs_iunlock(ip1, XFS_MMAPLOCK_EXCL);
                goto again;
 
 int
 xfs_break_dax_layouts(
-       struct inode            *inode,
-       bool                    *retry)
+       struct inode            *inode)
 {
-       struct page             *page;
-
        xfs_assert_ilocked(XFS_I(inode), XFS_MMAPLOCK_EXCL);
 
-       page = dax_layout_busy_page(inode->i_mapping);
-       if (!page)
-               return 0;
-
-       *retry = true;
-       return dax_wait_page_idle(page, xfs_wait_dax_page, inode);
+       return dax_break_layout_inode(inode, xfs_wait_dax_page);
 }
 
 int
                retry = false;
                switch (reason) {
                case BREAK_UNMAP:
-                       error = xfs_break_dax_layouts(inode, &retry);
-                       if (error || retry)
+                       error = xfs_break_dax_layouts(inode);
+                       if (error)
                                break;
                        fallthrough;
                case BREAK_WRITE:
 
        return xfs_itruncate_extents_flags(tpp, ip, whichfork, new_size, 0);
 }
 
-int    xfs_break_dax_layouts(struct inode *inode, bool *retry);
+int    xfs_break_dax_layouts(struct inode *inode);
 int    xfs_break_layouts(struct inode *inode, uint *iolock,
                enum layout_break_reason reason);
 
 
 int dax_truncate_page(struct inode *inode, loff_t pos, bool *did_zero,
                const struct iomap_ops *ops);
 
-static inline int dax_wait_page_idle(struct page *page,
-                               void (cb)(struct inode *),
-                               struct inode *inode)
+static inline bool dax_page_is_idle(struct page *page)
 {
-       return ___wait_var_event(page, page_ref_count(page) == 1,
-                               TASK_INTERRUPTIBLE, 0, 0, cb(inode));
+       return page && page_ref_count(page) == 1;
 }
 
 #if IS_ENABLED(CONFIG_DAX)
 {
 }
 #endif /* CONFIG_DAX */
+
+#if !IS_ENABLED(CONFIG_FS_DAX)
+static inline int __must_check dax_break_layout(struct inode *inode,
+                           loff_t start, loff_t end, void (cb)(struct inode *))
+{
+       return 0;
+}
+#endif
+
 bool dax_alive(struct dax_device *dax_dev);
 void *dax_get_private(struct dax_device *dax_dev);
 long dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages,
 int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index);
 int dax_invalidate_mapping_entry_sync(struct address_space *mapping,
                                      pgoff_t index);
+int __must_check dax_break_layout(struct inode *inode, loff_t start,
+                               loff_t end, void (cb)(struct inode *));
+static inline int __must_check dax_break_layout_inode(struct inode *inode,
+                                               void (cb)(struct inode *))
+{
+       return dax_break_layout(inode, 0, LLONG_MAX, cb);
+}
 int dax_dedupe_file_range_compare(struct inode *src, loff_t srcoff,
                                  struct inode *dest, loff_t destoff,
                                  loff_t len, bool *is_same,