* This is a library function for filesystems to check if the block device
   * can be mounted with dax option.
   *
 - * Return: negative errno if unsupported, 0 if supported.
 + * Return: true if supported, false if unsupported
   */
 -int __bdev_dax_supported(struct super_block *sb, int blocksize)
 +bool __bdev_dax_supported(struct block_device *bdev, int blocksize)
  {
 -      struct block_device *bdev = sb->s_bdev;
        struct dax_device *dax_dev;
+       bool dax_enabled = false;
        pgoff_t pgoff;
        int err, id;
        void *kaddr;
                 * on being able to do (page_address(pfn_to_page())).
                 */
                WARN_ON(IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API));
+               dax_enabled = true;
        } else if (pfn_t_devmap(pfn)) {
-               /* pass */;
-       } else {
+               struct dev_pagemap *pgmap;
+ 
+               pgmap = get_dev_pagemap(pfn_t_to_pfn(pfn), NULL);
+               if (pgmap && pgmap->type == MEMORY_DEVICE_FS_DAX)
+                       dax_enabled = true;
+               put_dev_pagemap(pgmap);
+       }
+ 
+       if (!dax_enabled) {
 -              pr_debug("VFS (%s): error: dax support not enabled\n",
 -                              sb->s_id);
 -              return -EOPNOTSUPP;
 +              pr_debug("%s: error: dax support not enabled\n",
 +                              bdevname(bdev, buf));
 +              return false;
        }
- 
 -      return 0;
 +      return true;
  }
  EXPORT_SYMBOL_GPL(__bdev_dax_supported);
  #endif
 
  {
        struct inode *inode = mapping->host;
        unsigned long vaddr = vmf->address;
 -      int ret = VM_FAULT_NOPAGE;
 +      vm_fault_t ret = VM_FAULT_NOPAGE;
        struct page *zero_page;
-       void *entry2;
        pfn_t pfn;
  
        zero_page = ZERO_PAGE(0);
        }
  
        pfn = page_to_pfn_t(zero_page);
-       entry2 = dax_insert_mapping_entry(mapping, vmf, entry, pfn,
-                       RADIX_DAX_ZERO_PAGE, false);
-       if (IS_ERR(entry2)) {
-               ret = VM_FAULT_SIGBUS;
-               goto out;
-       }
- 
+       dax_insert_mapping_entry(mapping, vmf, entry, pfn, RADIX_DAX_ZERO_PAGE,
+                       false);
 -      vm_insert_mixed(vmf->vma, vaddr, pfn);
 +      ret = vmf_insert_mixed(vmf->vma, vaddr, pfn);
  out:
        trace_dax_load_hole(inode, vmf, ret);
        return ret;
 
                 * allow an operation to fall back to buffered mode.
                 */
                ret = xfs_file_dio_aio_write(iocb, from);
 -              if (ret == -EREMCHG)
 -                      goto buffered;
 -      } else {
 -buffered:
 -              ret = xfs_file_buffered_aio_write(iocb, from);
 +              if (ret != -EREMCHG)
 +                      return ret;
        }
  
 -      if (ret > 0) {
 -              XFS_STATS_ADD(ip->i_mount, xs_write_bytes, ret);
 -
 -              /* Handle various SYNC-type writes */
 -              ret = generic_write_sync(iocb, ret);
 -      }
 -      return ret;
 +      return xfs_file_buffered_aio_write(iocb, from);
  }
  
+ static void
+ xfs_wait_dax_page(
+       struct inode            *inode,
+       bool                    *did_unlock)
+ {
+       struct xfs_inode        *ip = XFS_I(inode);
+ 
+       *did_unlock = true;
+       xfs_iunlock(ip, XFS_MMAPLOCK_EXCL);
+       schedule();
+       xfs_ilock(ip, XFS_MMAPLOCK_EXCL);
+ }
+ 
+ static int
+ xfs_break_dax_layouts(
+       struct inode            *inode,
+       uint                    iolock,
+       bool                    *did_unlock)
+ {
+       struct page             *page;
+ 
+       ASSERT(xfs_isilocked(XFS_I(inode), XFS_MMAPLOCK_EXCL));
+ 
+       page = dax_layout_busy_page(inode->i_mapping);
+       if (!page)
+               return 0;
+ 
+       return ___wait_var_event(&page->_refcount,
+                       atomic_read(&page->_refcount) == 1, TASK_INTERRUPTIBLE,
+                       0, 0, xfs_wait_dax_page(inode, did_unlock));
+ }
+ 
+ int
+ xfs_break_layouts(
+       struct inode            *inode,
+       uint                    *iolock,
+       enum layout_break_reason reason)
+ {
+       bool                    retry;
+       int                     error;
+ 
+       ASSERT(xfs_isilocked(XFS_I(inode), XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL));
+ 
+       do {
+               retry = false;
+               switch (reason) {
+               case BREAK_UNMAP:
+                       error = xfs_break_dax_layouts(inode, *iolock, &retry);
+                       if (error || retry)
+                               break;
+                       /* fall through */
+               case BREAK_WRITE:
+                       error = xfs_break_leased_layouts(inode, iolock, &retry);
+                       break;
+               default:
+                       WARN_ON_ONCE(1);
+                       error = -EINVAL;
+               }
+       } while (error == 0 && retry);
+ 
+       return error;
+ }
+ 
  #define       XFS_FALLOC_FL_SUPPORTED                                         \
                (FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |           \
                 FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE |      \
 
  struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev);
  int dax_writeback_mapping_range(struct address_space *mapping,
                struct block_device *bdev, struct writeback_control *wbc);
+ 
+ struct page *dax_layout_busy_page(struct address_space *mapping);
  #else
 -static inline int bdev_dax_supported(struct super_block *sb, int blocksize)
 +static inline bool bdev_dax_supported(struct block_device *bdev,
 +              int blocksize)
  {
 -      return -EOPNOTSUPP;
 +      return false;
  }
  
  static inline struct dax_device *fs_dax_get_by_host(const char *host)