trans->paths_allocated[0] = 1;
+ static struct lock_class_key lockdep_key;
+ lockdep_init_map(&trans->dep_map, "bcachefs_btree", &lockdep_key, 0);
+
if (fn_idx < BCH_TRANSACTIONS_NR) {
trans->fn = bch2_btree_transaction_fns[fn_idx];
mempool_init_kmalloc_pool(&c->btree_trans_mem_pool, 1,
BTREE_TRANS_MEM_MAX) ?:
init_srcu_struct(&c->btree_trans_barrier);
- if (!ret)
- c->btree_trans_barrier_initialized = true;
- return ret;
+ if (ret)
+ return ret;
+
+ /*
+ * static annotation (hackily done) for lock ordering of reclaim vs.
+ * btree node locks:
+ */
+#ifdef CONFIG_LOCKDEP
+ fs_reclaim_acquire(GFP_KERNEL);
+ struct btree_trans *trans = bch2_trans_get(c);
+ trans_set_locked(trans);
+ bch2_trans_put(trans);
+ fs_reclaim_release(GFP_KERNEL);
+#endif
+
+ c->btree_trans_barrier_initialized = true;
+ return 0;
+
}
static inline void trans_set_locked(struct btree_trans *trans)
{
if (!trans->locked) {
+ lock_acquire_exclusive(&trans->dep_map, 0, 0, NULL, _THIS_IP_);
trans->locked = true;
trans->last_unlock_ip = 0;
static inline void trans_set_unlocked(struct btree_trans *trans)
{
if (trans->locked) {
+ lock_release(&trans->dep_map, _THIS_IP_);
trans->locked = false;
trans->last_unlock_ip = _RET_IP_;
unsigned journal_u64s;
unsigned extra_disk_res; /* XXX kill */
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ struct lockdep_map dep_map;
+#endif
/* Entries before this are zeroed out on every bch2_trans_get() call */
struct list_head list;