if (!list_empty(&ref->add_list))
list_del(&ref->add_list);
btrfs_put_delayed_ref(ref);
- atomic_dec(&delayed_refs->num_entries);
btrfs_delayed_refs_rsv_release(fs_info, 1, 0);
}
rb_erase_cached(&head->href_node, &delayed_refs->href_root);
RB_CLEAR_NODE(&head->href_node);
- atomic_dec(&delayed_refs->num_entries);
delayed_refs->num_heads--;
if (!head->processing)
delayed_refs->num_heads_ready--;
if (!exist) {
if (ref->action == BTRFS_ADD_DELAYED_REF)
list_add_tail(&ref->add_list, &href->ref_add_list);
- atomic_inc(&root->num_entries);
spin_unlock(&href->lock);
trans->delayed_ref_updates++;
return false;
}
delayed_refs->num_heads++;
delayed_refs->num_heads_ready++;
- atomic_inc(&delayed_refs->num_entries);
}
if (qrecord_inserted_ret)
*qrecord_inserted_ret = qrecord_inserted;
/* this spin lock protects the rbtree and the entries inside */
spinlock_t lock;
- /* how many delayed ref updates we've queued, used by the
- * throttling code
- */
- atomic_t num_entries;
-
/* total number of head nodes in tree */
unsigned long num_heads;
cur_trans->delayed_refs.href_root = RB_ROOT_CACHED;
xa_init(&cur_trans->delayed_refs.dirty_extents);
- atomic_set(&cur_trans->delayed_refs.num_entries, 0);
/*
* although the tree mod log is per file system and not per transaction,