struct bch_dev *ca = NULL;
int ret;
- down_read(&c->gc_lock);
-
if (c->sb.version_upgrade_complete >= bcachefs_metadata_version_bucket_gens) {
ret = for_each_btree_key(trans, iter, BTREE_ID_bucket_gens, POS_MIN,
BTREE_ITER_prefetch, k, ({
bch2_dev_put(ca);
bch2_trans_put(trans);
- up_read(&c->gc_lock);
bch_err_fn(c, ret);
return ret;
/*
* Buckets:
* Per-bucket arrays are protected by c->mark_lock, bucket_lock and
- * gc_lock, for device resize - holding any is sufficient for access:
- * Or rcu_read_lock(), but only for dev_ptr_stale():
+ * gc_gens_lock, for device resize - holding any is sufficient for
+ * access: Or rcu_read_lock(), but only for dev_ptr_stale():
*/
struct bucket_array __rcu *buckets_gc;
struct bucket_gens __rcu *bucket_gens;
int ret;
/*
- * Ideally we would be using state_lock and not gc_lock here, but that
+ * Ideally we would be using state_lock and not gc_gens_lock here, but that
* introduces a deadlock in the RO path - we currently take the state
* lock at the start of going RO, thus the gc thread may get stuck:
*/
return 0;
trace_and_count(c, gc_gens_start, c);
- down_read(&c->gc_lock);
+
+ down_read(&c->state_lock);
for_each_member_device(c, ca) {
struct bucket_gens *gens = bucket_gens(ca);
ca->oldest_gen = NULL;
}
- up_read(&c->gc_lock);
+ up_read(&c->state_lock);
mutex_unlock(&c->gc_gens_lock);
if (!bch2_err_matches(ret, EROFS))
bch_err_fn(c, ret);
bucket_gens->nbuckets - bucket_gens->first_bucket;
if (resize) {
- down_write(&c->gc_lock);
down_write(&ca->bucket_lock);
percpu_down_write(&c->mark_lock);
}
if (resize) {
percpu_up_write(&c->mark_lock);
up_write(&ca->bucket_lock);
- up_write(&c->gc_lock);
}
ret = 0;
return rcu_dereference_check(ca->buckets_gc,
!ca->fs ||
percpu_rwsem_is_held(&ca->fs->mark_lock) ||
- lockdep_is_held(&ca->fs->gc_lock) ||
+ lockdep_is_held(&ca->fs->state_lock) ||
lockdep_is_held(&ca->bucket_lock));
}
return rcu_dereference_check(ca->bucket_gens,
!ca->fs ||
percpu_rwsem_is_held(&ca->fs->mark_lock) ||
- lockdep_is_held(&ca->fs->gc_lock) ||
+ lockdep_is_held(&ca->fs->state_lock) ||
lockdep_is_held(&ca->bucket_lock));
}
{
int ret = 0;
+ down_read(&c->state_lock);
+
for (unsigned i = 0; i < ARRAY_SIZE(recovery_pass_fns); i++) {
struct recovery_pass_fn *p = recovery_pass_fns + i;
break;
}
+ up_read(&c->state_lock);
+
return ret;
}