a->k.version = journal_pos_to_bversion(&trans->journal_res,
(u64 *) entry - (u64 *) trans->journal_entries);
BUG_ON(bversion_zero(a->k.version));
- ret = bch2_accounting_mem_mod_locked(trans, accounting_i_to_s_c(a), false);
+ ret = bch2_accounting_mem_mod_locked(trans, accounting_i_to_s_c(a), false, false);
if (ret)
goto revert_fs_usage;
}
struct bkey_s_accounting a = bkey_i_to_s_accounting(entry2->start);
bch2_accounting_neg(a);
- bch2_accounting_mem_mod_locked(trans, a.c, false);
+ bch2_accounting_mem_mod_locked(trans, a.c, false, false);
bch2_accounting_neg(a);
}
percpu_up_read(&c->mark_lock);
struct { __BKEY_PADDED(k, BCH_ACCOUNTING_MAX_COUNTERS); } k_i;
accounting_key_init(&k_i.k, &acc_k, src_v, nr);
- bch2_accounting_mem_mod_locked(trans, bkey_i_to_s_c_accounting(&k_i.k), false);
+ bch2_accounting_mem_mod_locked(trans, bkey_i_to_s_c_accounting(&k_i.k), false, false);
preempt_disable();
struct bch_fs_usage_base *dst = this_cpu_ptr(c->usage);
return 0;
percpu_down_read(&c->mark_lock);
- int ret = __bch2_accounting_mem_mod(c, bkey_s_c_to_accounting(k), false);
+ int ret = bch2_accounting_mem_mod_locked(trans, bkey_s_c_to_accounting(k), false, true);
percpu_up_read(&c->mark_lock);
if (bch2_accounting_key_is_zero(bkey_s_c_to_accounting(k)) &&
int bch2_accounting_mem_insert(struct bch_fs *, struct bkey_s_c_accounting, bool);
void bch2_accounting_mem_gc(struct bch_fs *);
-static inline int __bch2_accounting_mem_mod(struct bch_fs *c, struct bkey_s_c_accounting a, bool gc)
-{
- struct bch_accounting_mem *acc = &c->accounting;
- unsigned idx;
-
- EBUG_ON(gc && !acc->gc_running);
-
- while ((idx = eytzinger0_find(acc->k.data, acc->k.nr, sizeof(acc->k.data[0]),
- accounting_pos_cmp, &a.k->p)) >= acc->k.nr) {
- int ret = bch2_accounting_mem_insert(c, a, gc);
- if (ret)
- return ret;
- }
-
- struct accounting_mem_entry *e = &acc->k.data[idx];
-
- EBUG_ON(bch2_accounting_counters(a.k) != e->nr_counters);
-
- for (unsigned i = 0; i < bch2_accounting_counters(a.k); i++)
- this_cpu_add(e->v[gc][i], a.v->d[i]);
- return 0;
-}
-
/*
* Update in memory counters so they match the btree update we're doing; called
* from transaction commit path
*/
-static inline int bch2_accounting_mem_mod_locked(struct btree_trans *trans, struct bkey_s_c_accounting a, bool gc)
+static inline int bch2_accounting_mem_mod_locked(struct btree_trans *trans, struct bkey_s_c_accounting a, bool gc, bool read)
{
struct bch_fs *c = trans->c;
+ struct disk_accounting_pos acc_k;
+ bpos_to_disk_accounting_pos(&acc_k, a.k->p);
- if (!gc) {
- struct disk_accounting_pos acc_k;
- bpos_to_disk_accounting_pos(&acc_k, a.k->p);
-
+ if (!gc && !read) {
switch (acc_k.type) {
case BCH_DISK_ACCOUNTING_persistent_reserved:
trans->fs_usage_delta.reserved += acc_k.persistent_reserved.nr_replicas * a.v->d[0];
}
}
- return __bch2_accounting_mem_mod(c, a, gc);
+ struct bch_accounting_mem *acc = &c->accounting;
+ unsigned idx;
+
+ EBUG_ON(gc && !acc->gc_running);
+
+ while ((idx = eytzinger0_find(acc->k.data, acc->k.nr, sizeof(acc->k.data[0]),
+ accounting_pos_cmp, &a.k->p)) >= acc->k.nr) {
+ int ret = bch2_accounting_mem_insert(c, a, gc);
+ if (ret)
+ return ret;
+ }
+
+ struct accounting_mem_entry *e = &acc->k.data[idx];
+
+ EBUG_ON(bch2_accounting_counters(a.k) != e->nr_counters);
+
+ for (unsigned i = 0; i < bch2_accounting_counters(a.k); i++)
+ this_cpu_add(e->v[gc][i], a.v->d[i]);
+ return 0;
}
static inline int bch2_accounting_mem_add(struct btree_trans *trans, struct bkey_s_c_accounting a, bool gc)
{
percpu_down_read(&trans->c->mark_lock);
- int ret = bch2_accounting_mem_mod_locked(trans, a, gc);
+ int ret = bch2_accounting_mem_mod_locked(trans, a, gc, false);
percpu_up_read(&trans->c->mark_lock);
return ret;
}