spin_lock(&bp->b_lock);
atomic_set(&bp->b_lru_ref, 0);
- if (!(bp->b_state & XFS_BSTATE_DISPOSE) &&
- (list_lru_del_obj(&bp->b_target->bt_lru, &bp->b_lru)))
- bp->b_hold--;
-
- ASSERT(bp->b_hold >= 1);
+ if (bp->b_hold >= 0)
+ list_lru_del_obj(&bp->b_target->bt_lru, &bp->b_lru);
spin_unlock(&bp->b_lock);
}
struct xfs_buf *bp)
{
spin_lock(&bp->b_lock);
- if (bp->b_hold == 0) {
+ if (bp->b_hold == -1) {
spin_unlock(&bp->b_lock);
return false;
}
}
static void
-xfs_buf_rele_uncached(
- struct xfs_buf *bp)
-{
- ASSERT(list_empty(&bp->b_lru));
-
- spin_lock(&bp->b_lock);
- if (--bp->b_hold) {
- spin_unlock(&bp->b_lock);
- return;
- }
- spin_unlock(&bp->b_lock);
- xfs_buf_free(bp);
-}
-
-static void
-xfs_buf_rele_cached(
+xfs_buf_destroy(
struct xfs_buf *bp)
{
- struct xfs_buftarg *btp = bp->b_target;
- struct xfs_perag *pag = bp->b_pag;
- struct xfs_buf_cache *bch = xfs_buftarg_buf_cache(btp, pag);
- bool freebuf = false;
-
- trace_xfs_buf_rele(bp, _RET_IP_);
-
- spin_lock(&bp->b_lock);
- ASSERT(bp->b_hold >= 1);
- if (bp->b_hold > 1) {
- bp->b_hold--;
- goto out_unlock;
- }
+ ASSERT(bp->b_hold < 0);
+ ASSERT(!(bp->b_flags & _XBF_DELWRI_Q));
- /* we are asked to drop the last reference */
- if (atomic_read(&bp->b_lru_ref)) {
- /*
- * If the buffer is added to the LRU, keep the reference to the
- * buffer for the LRU and clear the (now stale) dispose list
- * state flag, else drop the reference.
- */
- if (list_lru_add_obj(&btp->bt_lru, &bp->b_lru))
- bp->b_state &= ~XFS_BSTATE_DISPOSE;
- else
- bp->b_hold--;
- } else {
- bp->b_hold--;
- /*
- * most of the time buffers will already be removed from the
- * LRU, so optimise that case by checking for the
- * XFS_BSTATE_DISPOSE flag indicating the last list the buffer
- * was on was the disposal list
- */
- if (!(bp->b_state & XFS_BSTATE_DISPOSE)) {
- list_lru_del_obj(&btp->bt_lru, &bp->b_lru);
- } else {
- ASSERT(list_empty(&bp->b_lru));
- }
+ if (!xfs_buf_is_uncached(bp)) {
+ struct xfs_buf_cache *bch =
+ xfs_buftarg_buf_cache(bp->b_target, bp->b_pag);
- ASSERT(!(bp->b_flags & _XBF_DELWRI_Q));
rhashtable_remove_fast(&bch->bc_hash, &bp->b_rhash_head,
xfs_buf_hash_params);
- if (pag)
- xfs_perag_put(pag);
- freebuf = true;
- }
-out_unlock:
- spin_unlock(&bp->b_lock);
+ if (bp->b_pag)
+ xfs_perag_put(bp->b_pag);
+ }
- if (freebuf)
- xfs_buf_free(bp);
+ xfs_buf_free(bp);
}
/*
struct xfs_buf *bp)
{
trace_xfs_buf_rele(bp, _RET_IP_);
- if (xfs_buf_is_uncached(bp))
- xfs_buf_rele_uncached(bp);
- else
- xfs_buf_rele_cached(bp);
+
+ spin_lock(&bp->b_lock);
+ if (!--bp->b_hold) {
+ if (xfs_buf_is_uncached(bp) || !atomic_read(&bp->b_lru_ref))
+ goto kill;
+ list_lru_add_obj(&bp->b_target->bt_lru, &bp->b_lru);
+ }
+ spin_unlock(&bp->b_lock);
+ return;
+
+kill:
+ bp->b_hold = -1;
+ list_lru_del_obj(&bp->b_target->bt_lru, &bp->b_lru);
+ spin_unlock(&bp->b_lock);
+
+ xfs_buf_destroy(bp);
}
/*
/*
* To simulate an I/O failure, the buffer must be locked and held with at least
- * three references. The LRU reference is dropped by the stale call. The buf
- * item reference is dropped via ioend processing. The third reference is owned
- * by the caller and is dropped on I/O completion if the buffer is XBF_ASYNC.
+ * two references.
+ *
+ * The buf item reference is dropped via ioend processing. The second reference
+ * is owned by the caller and is dropped on I/O completion if the buffer is
+ * XBF_ASYNC.
*/
void
xfs_buf_ioend_fail(
if (!spin_trylock(&bp->b_lock))
return LRU_SKIP;
- if (bp->b_hold > 1) {
+ if (bp->b_hold > 0) {
/* need to wait, so skip it this pass */
spin_unlock(&bp->b_lock);
trace_xfs_buf_drain_buftarg(bp, _RET_IP_);
return LRU_SKIP;
}
- /*
- * clear the LRU reference count so the buffer doesn't get
- * ignored in xfs_buf_rele().
- */
- atomic_set(&bp->b_lru_ref, 0);
- bp->b_state |= XFS_BSTATE_DISPOSE;
+ bp->b_hold = -1;
list_lru_isolate_move(lru, item, dispose);
spin_unlock(&bp->b_lock);
return LRU_REMOVED;
"Corruption Alert: Buffer at daddr 0x%llx had permanent write failures!",
(long long)xfs_buf_daddr(bp));
}
- xfs_buf_rele(bp);
+ xfs_buf_destroy(bp);
}
if (loop++ != 0)
delay(100);
struct list_head *dispose = arg;
/*
- * we are inverting the lru lock/bp->b_lock here, so use a trylock.
- * If we fail to get the lock, just skip it.
+ * We are inverting the lru lock vs bp->b_lock order here, so use a
+ * trylock. If we fail to get the lock, just skip the buffer.
*/
if (!spin_trylock(&bp->b_lock))
return LRU_SKIP;
+
+ /*
+ * Іf the buffer is in use, remove it from the LRU for now as we can't
+ * free it. It will be added to the LRU again when the reference count
+ * hits zero.
+ */
+ if (bp->b_hold > 0) {
+ list_lru_isolate(lru, &bp->b_lru);
+ spin_unlock(&bp->b_lock);
+ return LRU_REMOVED;
+ }
+
/*
* Decrement the b_lru_ref count unless the value is already
* zero. If the value is already zero, we need to reclaim the
return LRU_ROTATE;
}
- bp->b_state |= XFS_BSTATE_DISPOSE;
+ bp->b_hold = -1;
list_lru_isolate_move(lru, item, dispose);
spin_unlock(&bp->b_lock);
return LRU_REMOVED;
struct xfs_buf *bp;
bp = list_first_entry(&dispose, struct xfs_buf, b_lru);
list_del_init(&bp->b_lru);
- xfs_buf_rele(bp);
+ xfs_buf_destroy(bp);
}
return freed;