]> www.infradead.org Git - users/hch/misc.git/commitdiff
xfs: remove xfs_buf_cache.bc_lock
authorChristoph Hellwig <hch@lst.de>
Sun, 12 Jan 2025 06:00:04 +0000 (07:00 +0100)
committerChristoph Hellwig <hch@lst.de>
Mon, 3 Feb 2025 09:18:36 +0000 (10:18 +0100)
xfs_buf_cache.bc_lock serializes adding buffers to and removing them from
the hashtable.  But as the rhashtable code already uses fine grained
internal locking for inserts and removals the extra protection isn't
actually required.

It also happens to fix a lock order inversion vs b_lock added by the
recent lookup race fix.

Fixes: ee10f6fcdb96 ("xfs: fix buffer lookup vs release race")
Reported-by: "Lai, Yi" <yi1.lai@linux.intel.com>
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Carlos Maiolino <cmaiolino@redhat.com>
fs/xfs/xfs_buf.c
fs/xfs/xfs_buf.h

index d1d4a0a22e1370f9a5802f112fac45e1df6a44a3..5db1b9022865b9e93b175ffec34435b9d0caf4aa 100644 (file)
@@ -41,8 +41,7 @@ struct kmem_cache *xfs_buf_cache;
  *
  * xfs_buf_rele:
  *     b_lock
- *       pag_buf_lock
- *         lru_lock
+ *       lru_lock
  *
  * xfs_buftarg_drain_rele
  *     lru_lock
@@ -220,14 +219,21 @@ _xfs_buf_alloc(
         */
        flags &= ~(XBF_UNMAPPED | XBF_TRYLOCK | XBF_ASYNC | XBF_READ_AHEAD);
 
-       spin_lock_init(&bp->b_lock);
+       /*
+        * A new buffer is held and locked by the owner.  This ensures that the
+        * buffer is owned by the caller and racing RCU lookups right after
+        * inserting into the hash table are safe (and will have to wait for
+        * the unlock to do anything non-trivial).
+        */
        bp->b_hold = 1;
+       sema_init(&bp->b_sema, 0); /* held, no waiters */
+
+       spin_lock_init(&bp->b_lock);
        atomic_set(&bp->b_lru_ref, 1);
        init_completion(&bp->b_iowait);
        INIT_LIST_HEAD(&bp->b_lru);
        INIT_LIST_HEAD(&bp->b_list);
        INIT_LIST_HEAD(&bp->b_li_list);
-       sema_init(&bp->b_sema, 0); /* held, no waiters */
        bp->b_target = target;
        bp->b_mount = target->bt_mount;
        bp->b_flags = flags;
@@ -502,7 +508,6 @@ int
 xfs_buf_cache_init(
        struct xfs_buf_cache    *bch)
 {
-       spin_lock_init(&bch->bc_lock);
        return rhashtable_init(&bch->bc_hash, &xfs_buf_hash_params);
 }
 
@@ -652,17 +657,20 @@ xfs_buf_find_insert(
        if (error)
                goto out_free_buf;
 
-       spin_lock(&bch->bc_lock);
+       /* The new buffer keeps the perag reference until it is freed. */
+       new_bp->b_pag = pag;
+
+       rcu_read_lock();
        bp = rhashtable_lookup_get_insert_fast(&bch->bc_hash,
                        &new_bp->b_rhash_head, xfs_buf_hash_params);
        if (IS_ERR(bp)) {
+               rcu_read_unlock();
                error = PTR_ERR(bp);
-               spin_unlock(&bch->bc_lock);
                goto out_free_buf;
        }
        if (bp && xfs_buf_try_hold(bp)) {
                /* found an existing buffer */
-               spin_unlock(&bch->bc_lock);
+               rcu_read_unlock();
                error = xfs_buf_find_lock(bp, flags);
                if (error)
                        xfs_buf_rele(bp);
@@ -670,10 +678,8 @@ xfs_buf_find_insert(
                        *bpp = bp;
                goto out_free_buf;
        }
+       rcu_read_unlock();
 
-       /* The new buffer keeps the perag reference until it is freed. */
-       new_bp->b_pag = pag;
-       spin_unlock(&bch->bc_lock);
        *bpp = new_bp;
        return 0;
 
@@ -1090,7 +1096,6 @@ xfs_buf_rele_cached(
        }
 
        /* we are asked to drop the last reference */
-       spin_lock(&bch->bc_lock);
        __xfs_buf_ioacct_dec(bp);
        if (!(bp->b_flags & XBF_STALE) && atomic_read(&bp->b_lru_ref)) {
                /*
@@ -1102,7 +1107,6 @@ xfs_buf_rele_cached(
                        bp->b_state &= ~XFS_BSTATE_DISPOSE;
                else
                        bp->b_hold--;
-               spin_unlock(&bch->bc_lock);
        } else {
                bp->b_hold--;
                /*
@@ -1120,7 +1124,6 @@ xfs_buf_rele_cached(
                ASSERT(!(bp->b_flags & _XBF_DELWRI_Q));
                rhashtable_remove_fast(&bch->bc_hash, &bp->b_rhash_head,
                                xfs_buf_hash_params);
-               spin_unlock(&bch->bc_lock);
                if (pag)
                        xfs_perag_put(pag);
                freebuf = true;
index 7e73663c5d4a580b35c75480aec24c88de3cd20d..3b4ed42e11c015d24af1882e4d95c675d431d455 100644 (file)
@@ -80,7 +80,6 @@ typedef unsigned int xfs_buf_flags_t;
 #define XFS_BSTATE_IN_FLIGHT    (1 << 1)       /* I/O in flight */
 
 struct xfs_buf_cache {
-       spinlock_t              bc_lock;
        struct rhashtable       bc_hash;
 };