unsigned long *buckets_nouse;
struct rw_semaphore bucket_lock;
- struct bch_dev_usage *usage_base;
- struct bch_dev_usage __percpu *usage[JOURNAL_BUF_NR];
+ struct bch_dev_usage __percpu *usage;
struct bch_dev_usage __percpu *usage_gc;
/* Allocator: */
bch2_fs_usage_acc_to_base(c, i);
__for_each_member_device(c, ca) {
- struct bch_dev_usage *dst = ca->usage_base;
+ struct bch_dev_usage *dst = this_cpu_ptr(ca->usage);
struct bch_dev_usage *src = (void *)
bch2_acc_percpu_u64s((u64 __percpu *) ca->usage_gc,
dev_usage_u64s());
void bch2_dev_usage_read_fast(struct bch_dev *ca, struct bch_dev_usage *usage)
{
- struct bch_fs *c = ca->fs;
- unsigned seq, i, u64s = dev_usage_u64s();
-
- do {
- seq = read_seqcount_begin(&c->usage_lock);
- memcpy(usage, ca->usage_base, u64s * sizeof(u64));
- for (i = 0; i < ARRAY_SIZE(ca->usage); i++)
- acc_u64s_percpu((u64 *) usage, (u64 __percpu *) ca->usage[i], u64s);
- } while (read_seqcount_retry(&c->usage_lock, seq));
+ memset(usage, 0, sizeof(*usage));
+ acc_u64s_percpu((u64 *) usage, (u64 __percpu *) ca->usage, dev_usage_u64s());
}
u64 bch2_fs_usage_read_one(struct bch_fs *c, u64 *v)
(u64 __percpu *) c->usage[idx], u64s);
percpu_memset(c->usage[idx], 0, u64s * sizeof(u64));
- rcu_read_lock();
- for_each_member_device_rcu(c, ca, NULL) {
- u64s = dev_usage_u64s();
-
- acc_u64s_percpu((u64 *) ca->usage_base,
- (u64 __percpu *) ca->usage[idx], u64s);
- percpu_memset(ca->usage[idx], 0, u64s * sizeof(u64));
- }
- rcu_read_unlock();
-
write_seqcount_end(&c->usage_lock);
preempt_enable();
}
{
kvfree(ca->buckets_nouse);
kvfree(rcu_dereference_protected(ca->bucket_gens, 1));
-
- for (unsigned i = 0; i < ARRAY_SIZE(ca->usage); i++)
- free_percpu(ca->usage[i]);
- kfree(ca->usage_base);
+ free_percpu(ca->usage);
}
int bch2_dev_buckets_alloc(struct bch_fs *c, struct bch_dev *ca)
{
- ca->usage_base = kzalloc(sizeof(struct bch_dev_usage), GFP_KERNEL);
- if (!ca->usage_base)
+ ca->usage = alloc_percpu(struct bch_dev_usage);
+ if (!ca->usage)
return -BCH_ERR_ENOMEM_usage_init;
- for (unsigned i = 0; i < ARRAY_SIZE(ca->usage); i++) {
- ca->usage[i] = alloc_percpu(struct bch_dev_usage);
- if (!ca->usage[i])
- return -BCH_ERR_ENOMEM_usage_init;
- }
-
return bch2_dev_buckets_resize(c, ca, ca->mi.nbuckets);
}
};
struct bch_dev_usage {
- struct {
+ struct bch_dev_usage_type {
u64 buckets;
u64 sectors; /* _compressed_ sectors: */
/*
case BCH_DISK_ACCOUNTING_replicas:
fs_usage_data_type_to_base(usage, k.replicas.data_type, v[0]);
break;
+ case BCH_DISK_ACCOUNTING_dev_data_type:
+ rcu_read_lock();
+ struct bch_dev *ca = bch2_dev_rcu(c, k.dev_data_type.dev);
+ if (ca) {
+ struct bch_dev_usage_type __percpu *d = &ca->usage->d[k.dev_data_type.data_type];
+ percpu_u64_set(&d->buckets, v[0]);
+ percpu_u64_set(&d->sectors, v[1]);
+ percpu_u64_set(&d->fragmented, v[2]);
+
+ if (k.dev_data_type.data_type == BCH_DATA_sb ||
+ k.dev_data_type.data_type == BCH_DATA_journal)
+ usage->hidden += v[0] * ca->mi.bucket_size;
+ }
+ rcu_read_unlock();
+ break;
}
}
preempt_enable();
#define _BCACHEFS_DISK_ACCOUNTING_H
#include "eytzinger.h"
+#include "sb-members.h"
static inline void bch2_u64s_neg(u64 *v, unsigned nr)
{
static inline int bch2_accounting_mem_mod(struct btree_trans *trans, struct
bkey_s_c_accounting a)
{
+ struct bch_fs *c = trans->c;
struct disk_accounting_pos acc_k;
bpos_to_disk_accounting_pos(&acc_k, a.k->p);
case BCH_DISK_ACCOUNTING_replicas:
fs_usage_data_type_to_base(&trans->fs_usage_delta, acc_k.replicas.data_type, a.v->d[0]);
break;
+ case BCH_DISK_ACCOUNTING_dev_data_type:
+ rcu_read_lock();
+ struct bch_dev *ca = bch2_dev_rcu(c, acc_k.dev_data_type.dev);
+ if (ca) {
+ this_cpu_add(ca->usage->d[acc_k.dev_data_type.data_type].buckets, a.v->d[0]);
+ this_cpu_add(ca->usage->d[acc_k.dev_data_type.data_type].sectors, a.v->d[1]);
+ this_cpu_add(ca->usage->d[acc_k.dev_data_type.data_type].fragmented, a.v->d[2]);
+ }
+ rcu_read_unlock();
+ break;
}
- return __bch2_accounting_mem_mod(trans->c, a);
+ return __bch2_accounting_mem_mod(c, a);
}
static inline void bch2_accounting_mem_read_counters(struct bch_fs *c,
le64_to_cpu(u->v));
break;
}
- case BCH_JSET_ENTRY_dev_usage: {
- struct jset_entry_dev_usage *u =
- container_of(entry, struct jset_entry_dev_usage, entry);
- unsigned nr_types = jset_entry_dev_usage_nr_types(u);
-
- rcu_read_lock();
- struct bch_dev *ca = bch2_dev_rcu(c, le32_to_cpu(u->dev));
- if (ca)
- for (unsigned i = 0; i < min_t(unsigned, nr_types, BCH_DATA_NR); i++) {
- ca->usage_base->d[i].buckets = le64_to_cpu(u->d[i].buckets);
- ca->usage_base->d[i].sectors = le64_to_cpu(u->d[i].sectors);
- ca->usage_base->d[i].fragmented = le64_to_cpu(u->d[i].fragmented);
- }
- rcu_read_unlock();
-
- break;
- }
case BCH_JSET_ENTRY_blacklist: {
struct jset_entry_blacklist *bl_entry =
container_of(entry, struct jset_entry_blacklist, entry);
"embedded variable length struct");
}
- for_each_member_device(c, ca) {
- unsigned b = sizeof(struct jset_entry_dev_usage) +
- sizeof(struct jset_entry_dev_usage_type) * BCH_DATA_NR;
- struct jset_entry_dev_usage *u =
- container_of(jset_entry_init(end, b),
- struct jset_entry_dev_usage, entry);
-
- u->entry.type = BCH_JSET_ENTRY_dev_usage;
- u->dev = cpu_to_le32(ca->dev_idx);
-
- for (unsigned i = 0; i < BCH_DATA_NR; i++) {
- u->d[i].buckets = cpu_to_le64(ca->usage_base->d[i].buckets);
- u->d[i].sectors = cpu_to_le64(ca->usage_base->d[i].sectors);
- u->d[i].fragmented = cpu_to_le64(ca->usage_base->d[i].fragmented);
- }
- }
-
percpu_up_read(&c->mark_lock);
for (unsigned i = 0; i < 2; i++) {