*
* xfs_buf_stale:
* b_sema (caller holds)
- * b_lock
+ * b_lockref.lock
* lru_lock
*
* xfs_buf_rele:
- * b_lock
+ * b_lockref.lock
* lru_lock
*
* xfs_buftarg_drain_rele
* lru_lock
- * b_lock (trylock due to inversion)
+ * b_lockref.lock (trylock due to inversion)
*
* xfs_buftarg_isolate
* lru_lock
- * b_lock (trylock due to inversion)
+ * b_lockref.lock (trylock due to inversion)
*/
static void xfs_buf_submit(struct xfs_buf *bp);
*/
bp->b_flags &= ~_XBF_DELWRI_Q;
- spin_lock(&bp->b_lock);
+ spin_lock(&bp->b_lockref.lock);
atomic_set(&bp->b_lru_ref, 0);
- if (bp->b_hold >= 0)
+ if (!__lockref_is_dead(&bp->b_lockref))
list_lru_del_obj(&bp->b_target->bt_lru, &bp->b_lru);
- spin_unlock(&bp->b_lock);
+ spin_unlock(&bp->b_lockref.lock);
}
static int
*/
flags &= ~(XBF_UNMAPPED | XBF_TRYLOCK | XBF_ASYNC | XBF_READ_AHEAD);
- spin_lock_init(&bp->b_lock);
- bp->b_hold = 1;
+ /* a lockref_init helper would be nice */
+ bp->b_lockref.count = 1;
+ spin_lock_init(&bp->b_lockref.lock);
+
atomic_set(&bp->b_lru_ref, 1);
init_completion(&bp->b_iowait);
INIT_LIST_HEAD(&bp->b_lru);
return 0;
}
-static bool
-xfs_buf_try_hold(
- struct xfs_buf *bp)
-{
- spin_lock(&bp->b_lock);
- if (bp->b_hold == -1) {
- spin_unlock(&bp->b_lock);
- return false;
- }
- bp->b_hold++;
- spin_unlock(&bp->b_lock);
- return true;
-}
-
static inline int
xfs_buf_lookup(
struct xfs_buf_cache *bch,
rcu_read_lock();
bp = rhashtable_lookup(&bch->bc_hash, map, xfs_buf_hash_params);
- if (!bp || !xfs_buf_try_hold(bp)) {
+ if (!bp || !lockref_get_not_dead(&bp->b_lockref)) {
rcu_read_unlock();
return -ENOENT;
}
error = PTR_ERR(bp);
goto out_free_buf;
}
- if (bp && xfs_buf_try_hold(bp)) {
+ if (bp && lockref_get_not_dead(&bp->b_lockref)) {
/* found an existing buffer */
rcu_read_unlock();
error = xfs_buf_find_lock(bp, flags);
{
trace_xfs_buf_hold(bp, _RET_IP_);
- spin_lock(&bp->b_lock);
- bp->b_hold++;
- spin_unlock(&bp->b_lock);
+ lockref_get(&bp->b_lockref);
}
static void
xfs_buf_destroy(
struct xfs_buf *bp)
{
- ASSERT(bp->b_hold < 0);
+ ASSERT(__lockref_is_dead(&bp->b_lockref));
ASSERT(!(bp->b_flags & _XBF_DELWRI_Q));
if (!xfs_buf_is_uncached(bp)) {
{
trace_xfs_buf_rele(bp, _RET_IP_);
- spin_lock(&bp->b_lock);
- if (!--bp->b_hold) {
+ if (lockref_put_or_lock(&bp->b_lockref))
+ return;
+ if (!--bp->b_lockref.count) {
if (xfs_buf_is_uncached(bp) || !atomic_read(&bp->b_lru_ref))
goto kill;
list_lru_add_obj(&bp->b_target->bt_lru, &bp->b_lru);
}
- spin_unlock(&bp->b_lock);
+ spin_unlock(&bp->b_lockref.lock);
return;
kill:
- bp->b_hold = -1;
+ lockref_mark_dead(&bp->b_lockref);
list_lru_del_obj(&bp->b_target->bt_lru, &bp->b_lru);
- spin_unlock(&bp->b_lock);
+ spin_unlock(&bp->b_lockref.lock);
xfs_buf_destroy(bp);
}
struct xfs_buf *bp = container_of(item, struct xfs_buf, b_lru);
struct list_head *dispose = arg;
- if (!spin_trylock(&bp->b_lock))
+ if (!spin_trylock(&bp->b_lockref.lock))
return LRU_SKIP;
- if (bp->b_hold > 0) {
+ if (bp->b_lockref.count > 0) {
/* need to wait, so skip it this pass */
- spin_unlock(&bp->b_lock);
+ spin_unlock(&bp->b_lockref.lock);
trace_xfs_buf_drain_buftarg(bp, _RET_IP_);
return LRU_SKIP;
}
- bp->b_hold = -1;
+ lockref_mark_dead(&bp->b_lockref);
list_lru_isolate_move(lru, item, dispose);
- spin_unlock(&bp->b_lock);
+ spin_unlock(&bp->b_lockref.lock);
return LRU_REMOVED;
}
struct list_head *dispose = arg;
/*
- * We are inverting the lru lock vs bp->b_lock order here, so use a
- * trylock. If we fail to get the lock, just skip the buffer.
+ * We are inverting the lru lock vs bp->b_lockref.lock order here, so
+ * use a trylock. If we fail to get the lock, just skip the buffer.
*/
- if (!spin_trylock(&bp->b_lock))
+ if (!spin_trylock(&bp->b_lockref.lock))
return LRU_SKIP;
/*
* free it. It will be added to the LRU again when the reference count
* hits zero.
*/
- if (bp->b_hold > 0) {
+ if (bp->b_lockref.count > 0) {
list_lru_isolate(lru, &bp->b_lru);
- spin_unlock(&bp->b_lock);
+ spin_unlock(&bp->b_lockref.lock);
return LRU_REMOVED;
}
* buffer, otherwise it gets another trip through the LRU.
*/
if (atomic_add_unless(&bp->b_lru_ref, -1, 0)) {
- spin_unlock(&bp->b_lock);
+ spin_unlock(&bp->b_lockref.lock);
return LRU_ROTATE;
}
- bp->b_hold = -1;
+ lockref_mark_dead(&bp->b_lockref);
list_lru_isolate_move(lru, item, dispose);
- spin_unlock(&bp->b_lock);
+ spin_unlock(&bp->b_lockref.lock);
return LRU_REMOVED;
}
__entry->dev = bp->b_target->bt_dev;
__entry->bno = xfs_buf_daddr(bp);
__entry->nblks = bp->b_length;
- __entry->hold = bp->b_hold;
+ __entry->hold = bp->b_lockref.count;
__entry->pincount = atomic_read(&bp->b_pin_count);
__entry->lockval = bp->b_sema.count;
__entry->flags = bp->b_flags;
__entry->bno = xfs_buf_daddr(bp);
__entry->length = bp->b_length;
__entry->flags = flags;
- __entry->hold = bp->b_hold;
+ __entry->hold = bp->b_lockref.count;
__entry->pincount = atomic_read(&bp->b_pin_count);
__entry->lockval = bp->b_sema.count;
__entry->caller_ip = caller_ip;
__entry->dev = bp->b_target->bt_dev;
__entry->bno = xfs_buf_daddr(bp);
__entry->length = bp->b_length;
- __entry->hold = bp->b_hold;
+ __entry->hold = bp->b_lockref.count;
__entry->pincount = atomic_read(&bp->b_pin_count);
__entry->lockval = bp->b_sema.count;
__entry->error = error;
__entry->buf_bno = xfs_buf_daddr(bip->bli_buf);
__entry->buf_len = bip->bli_buf->b_length;
__entry->buf_flags = bip->bli_buf->b_flags;
- __entry->buf_hold = bip->bli_buf->b_hold;
+ __entry->buf_hold = bip->bli_buf->b_lockref.count;
__entry->buf_pincount = atomic_read(&bip->bli_buf->b_pin_count);
__entry->buf_lockval = bip->bli_buf->b_sema.count;
__entry->li_flags = bip->bli_item.li_flags;
__entry->xfino = file_inode(xfbt->target->bt_file)->i_ino;
__entry->bno = xfs_buf_daddr(bp);
__entry->nblks = bp->b_length;
- __entry->hold = bp->b_hold;
+ __entry->hold = bp->b_lockref.count;
__entry->pincount = atomic_read(&bp->b_pin_count);
__entry->lockval = bp->b_sema.count;
__entry->flags = bp->b_flags;