static int dax_insert_mapping(struct inode *inode, struct buffer_head *bh,
                        struct vm_area_struct *vma, struct vm_fault *vmf)
 {
-       struct address_space *mapping = inode->i_mapping;
        sector_t sector = bh->b_blocknr << (inode->i_blkbits - 9);
        unsigned long vaddr = (unsigned long)vmf->virtual_address;
        void *addr;
        pgoff_t size;
        int error;
 
-       i_mmap_lock_read(mapping);
-
        /*
         * Check truncate didn't happen while we were allocating a block.
         * If it did, this block may or may not be still allocated to the
        error = vm_insert_mixed(vma, vaddr, pfn);
 
  out:
-       i_mmap_unlock_read(mapping);
-
        return error;
 }
 
                         * from a read fault and we've raced with a truncate
                         */
                        error = -EIO;
-                       goto unlock_page;
+                       goto unlock;
                }
+       } else {
+               i_mmap_lock_write(mapping);
        }
 
        error = get_block(inode, block, &bh, 0);
        if (!error && (bh.b_size < PAGE_SIZE))
                error = -EIO;           /* fs corruption? */
        if (error)
-               goto unlock_page;
+               goto unlock;
 
        if (!buffer_mapped(&bh) && !buffer_unwritten(&bh) && !vmf->cow_page) {
                if (vmf->flags & FAULT_FLAG_WRITE) {
                        if (!error && (bh.b_size < PAGE_SIZE))
                                error = -EIO;
                        if (error)
-                               goto unlock_page;
+                               goto unlock;
                } else {
+                       i_mmap_unlock_write(mapping);
                        return dax_load_hole(mapping, page, vmf);
                }
        }
                else
                        clear_user_highpage(new_page, vaddr);
                if (error)
-                       goto unlock_page;
+                       goto unlock;
                vmf->page = page;
                if (!page) {
-                       i_mmap_lock_read(mapping);
                        /* Check we didn't race with truncate */
                        size = (i_size_read(inode) + PAGE_SIZE - 1) >>
                                                                PAGE_SHIFT;
                        if (vmf->pgoff >= size) {
-                               i_mmap_unlock_read(mapping);
                                error = -EIO;
-                               goto out;
+                               goto unlock;
                        }
                }
                return VM_FAULT_LOCKED;
                        WARN_ON_ONCE(!(vmf->flags & FAULT_FLAG_WRITE));
        }
 
+       if (!page)
+               i_mmap_unlock_write(mapping);
  out:
        if (error == -ENOMEM)
                return VM_FAULT_OOM | major;
                return VM_FAULT_SIGBUS | major;
        return VM_FAULT_NOPAGE | major;
 
- unlock_page:
+ unlock:
        if (page) {
                unlock_page(page);
                page_cache_release(page);
+       } else {
+               i_mmap_unlock_write(mapping);
        }
+
        goto out;
 }
 EXPORT_SYMBOL(__dax_fault);
        block = (sector_t)pgoff << (PAGE_SHIFT - blkbits);
 
        bh.b_size = PMD_SIZE;
+       i_mmap_lock_write(mapping);
        length = get_block(inode, block, &bh, write);
        if (length)
                return VM_FAULT_SIGBUS;
-       i_mmap_lock_read(mapping);
 
        /*
         * If the filesystem isn't willing to tell us the length of a hole,
        }
 
  out:
-       i_mmap_unlock_read(mapping);
-
        if (buffer_unwritten(&bh))
                complete_unwritten(&bh, !(result & VM_FAULT_ERROR));
 
+       i_mmap_unlock_write(mapping);
+
        return result;
 
  fallback:
 
                details.last_index = ULONG_MAX;
 
 
-       /* DAX uses i_mmap_lock to serialise file truncate vs page fault */
-       i_mmap_lock_write(mapping);
+       /*
+        * DAX already holds i_mmap_lock to serialise file truncate vs
+        * page fault and page fault vs page fault.
+        */
+       if (!IS_DAX(mapping->host))
+               i_mmap_lock_write(mapping);
        if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap)))
                unmap_mapping_range_tree(&mapping->i_mmap, &details);
-       i_mmap_unlock_write(mapping);
+       if (!IS_DAX(mapping->host))
+               i_mmap_unlock_write(mapping);
 }
 EXPORT_SYMBOL(unmap_mapping_range);