}
 EXPORT_SYMBOL(end_buffer_write_sync);
 
-/*
- * Various filesystems appear to want __find_get_block to be non-blocking.
- * But it's the page lock which protects the buffers.  To get around this,
- * we get exclusion from try_to_free_buffers with the blockdev mapping's
- * i_private_lock.
- *
- * Hack idea: for the blockdev mapping, i_private_lock contention
- * may be quite high.  This code could TryLock the page, and if that
- * succeeds, there is no need to take i_private_lock.
- */
 static struct buffer_head *
-__find_get_block_slow(struct block_device *bdev, sector_t block)
+__find_get_block_slow(struct block_device *bdev, sector_t block, bool atomic)
 {
        struct address_space *bd_mapping = bdev->bd_mapping;
        const int blkbits = bd_mapping->host->i_blkbits;
        if (IS_ERR(folio))
                goto out;
 
-       spin_lock(&bd_mapping->i_private_lock);
+       /*
+        * Folio lock protects the buffers. Callers that cannot block
+        * will fallback to serializing vs try_to_free_buffers() via
+        * the i_private_lock.
+        */
+       if (atomic)
+               spin_lock(&bd_mapping->i_private_lock);
+       else
+               folio_lock(folio);
+
        head = folio_buffers(folio);
        if (!head)
                goto out_unlock;
                       1 << blkbits);
        }
 out_unlock:
-       spin_unlock(&bd_mapping->i_private_lock);
+       if (atomic)
+               spin_unlock(&bd_mapping->i_private_lock);
+       else
+               folio_unlock(folio);
        folio_put(folio);
 out:
        return ret;
  * it in the LRU and mark it as accessed.  If it is not present then return
  * NULL
  */
-struct buffer_head *
-__find_get_block(struct block_device *bdev, sector_t block, unsigned size)
+static struct buffer_head *
+find_get_block_common(struct block_device *bdev, sector_t block,
+                       unsigned size, bool atomic)
 {
        struct buffer_head *bh = lookup_bh_lru(bdev, block, size);
 
        if (bh == NULL) {
                /* __find_get_block_slow will mark the page accessed */
-               bh = __find_get_block_slow(bdev, block);
+               bh = __find_get_block_slow(bdev, block, atomic);
                if (bh)
                        bh_lru_install(bh);
        } else
 
        return bh;
 }
+
+struct buffer_head *
+__find_get_block(struct block_device *bdev, sector_t block, unsigned size)
+{
+       return find_get_block_common(bdev, block, size, true);
+}
 EXPORT_SYMBOL(__find_get_block);
 
 /**