]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
bcachefs: Delete journal-buf-sharded old style accounting
authorKent Overstreet <kent.overstreet@linux.dev>
Thu, 28 Dec 2023 03:09:25 +0000 (22:09 -0500)
committerKent Overstreet <kent.overstreet@linux.dev>
Sun, 14 Jul 2024 23:00:13 +0000 (19:00 -0400)
More deletion of dead code.

Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
fs/bcachefs/bcachefs.h
fs/bcachefs/btree_gc.c
fs/bcachefs/buckets.c
fs/bcachefs/buckets.h
fs/bcachefs/disk_accounting.c
fs/bcachefs/recovery.c
fs/bcachefs/replicas.c
fs/bcachefs/replicas.h
fs/bcachefs/super.c

index 89ffe38b6bfea47dd791f0a767b83c91b29c71ed..50937f6aec0ac5d87029679e6eeb0fdd350f8e48 100644 (file)
@@ -891,8 +891,7 @@ struct bch_fs {
        struct percpu_rw_semaphore      mark_lock;
 
        seqcount_t                      usage_lock;
-       struct bch_fs_usage             *usage_base;
-       struct bch_fs_usage __percpu    *usage[JOURNAL_BUF_NR];
+       struct bch_fs_usage_base __percpu *usage;
        struct bch_fs_usage __percpu    *usage_gc;
        u64 __percpu            *online_reserved;
 
index fe7293166e3724646fa84076ce824ca8f40930e0..c79258e3e69cb7682646436cae15a8e361321c6a 100644 (file)
@@ -769,10 +769,8 @@ static int bch2_gc_done(struct bch_fs *c)
 #define copy_fs_field(_err, _f, _msg, ...)                                     \
        copy_field(_err, _f, "fs has wrong " _msg, ##__VA_ARGS__)
 
-       for (i = 0; i < ARRAY_SIZE(c->usage); i++)
-               bch2_fs_usage_acc_to_base(c, i);
-
        __for_each_member_device(c, ca) {
+               /* XXX */
                struct bch_dev_usage *dst = this_cpu_ptr(ca->usage);
                struct bch_dev_usage *src = (void *)
                        bch2_acc_percpu_u64s((u64 __percpu *) ca->usage_gc,
@@ -789,8 +787,10 @@ static int bch2_gc_done(struct bch_fs *c)
        }
 
        {
+#if 0
                unsigned nr = fs_usage_u64s(c);
-               struct bch_fs_usage *dst = c->usage_base;
+               /* XX: */
+               struct bch_fs_usage *dst = this_cpu_ptr(c->usage);
                struct bch_fs_usage *src = (void *)
                        bch2_acc_percpu_u64s((u64 __percpu *) c->usage_gc, nr);
 
@@ -823,6 +823,7 @@ static int bch2_gc_done(struct bch_fs *c)
                        copy_fs_field(fs_usage_replicas_wrong,
                                      replicas[i], "%s", buf.buf);
                }
+#endif
        }
 
 #undef copy_fs_field
index 07cdb429991820ba09e78d729dd0131fbfe124f4..240fc8185d4faaf12c459118f0d8492b984c8d90 100644 (file)
 
 #include <linux/preempt.h>
 
-static inline struct bch_fs_usage *fs_usage_ptr(struct bch_fs *c,
-                                               unsigned journal_seq,
-                                               bool gc)
-{
-       percpu_rwsem_assert_held(&c->mark_lock);
-       BUG_ON(!gc && !journal_seq);
-
-       return this_cpu_ptr(gc
-                           ? c->usage_gc
-                           : c->usage[journal_seq & JOURNAL_BUF_MASK]);
-}
-
 void bch2_dev_usage_read_fast(struct bch_dev *ca, struct bch_dev_usage *usage)
 {
        memset(usage, 0, sizeof(*usage));
        acc_u64s_percpu((u64 *) usage, (u64 __percpu *) ca->usage, dev_usage_u64s());
 }
 
-u64 bch2_fs_usage_read_one(struct bch_fs *c, u64 *v)
-{
-       ssize_t offset = v - (u64 *) c->usage_base;
-       unsigned i, seq;
-       u64 ret;
-
-       BUG_ON(offset < 0 || offset >= fs_usage_u64s(c));
-       percpu_rwsem_assert_held(&c->mark_lock);
-
-       do {
-               seq = read_seqcount_begin(&c->usage_lock);
-               ret = *v;
-
-               for (i = 0; i < ARRAY_SIZE(c->usage); i++)
-                       ret += percpu_u64_get((u64 __percpu *) c->usage[i] + offset);
-       } while (read_seqcount_retry(&c->usage_lock, seq));
-
-       return ret;
-}
-
-void bch2_fs_usage_acc_to_base(struct bch_fs *c, unsigned idx)
-{
-       unsigned u64s = fs_usage_u64s(c);
-
-       BUG_ON(idx >= ARRAY_SIZE(c->usage));
-
-       preempt_disable();
-       write_seqcount_begin(&c->usage_lock);
-
-       acc_u64s_percpu((u64 *) c->usage_base,
-                       (u64 __percpu *) c->usage[idx], u64s);
-       percpu_memset(c->usage[idx], 0, u64s * sizeof(u64));
-
-       write_seqcount_end(&c->usage_lock);
-       preempt_enable();
-}
-
 void bch2_fs_usage_to_text(struct printbuf *out,
                           struct bch_fs *c,
                           struct bch_fs_usage_online *fs_usage)
@@ -142,17 +93,17 @@ __bch2_fs_usage_read_short(struct bch_fs *c)
        u64 data, reserved;
 
        ret.capacity = c->capacity -
-               bch2_fs_usage_read_one(c, &c->usage_base->b.hidden);
+               percpu_u64_get(&c->usage->hidden);
 
-       data            = bch2_fs_usage_read_one(c, &c->usage_base->b.data) +
-               bch2_fs_usage_read_one(c, &c->usage_base->b.btree);
-       reserved        = bch2_fs_usage_read_one(c, &c->usage_base->b.reserved) +
+       data            = percpu_u64_get(&c->usage->data) +
+               percpu_u64_get(&c->usage->btree);
+       reserved        = percpu_u64_get(&c->usage->reserved) +
                percpu_u64_get(c->online_reserved);
 
        ret.used        = min(ret.capacity, data + reserve_factor(reserved));
        ret.free        = ret.capacity - ret.used;
 
-       ret.nr_inodes   = bch2_fs_usage_read_one(c, &c->usage_base->b.nr_inodes);
+       ret.nr_inodes   = percpu_u64_get(&c->usage->nr_inodes);
 
        return ret;
 }
@@ -673,7 +624,7 @@ void bch2_trans_account_disk_usage_change(struct btree_trans *trans)
 
        percpu_down_read(&c->mark_lock);
        preempt_disable();
-       struct bch_fs_usage_base *dst = &fs_usage_ptr(c, trans->journal_res.seq, false)->b;
+       struct bch_fs_usage_base *dst = this_cpu_ptr(c->usage);
        struct bch_fs_usage_base *src = &trans->fs_usage_delta;
 
        s64 added = src->btree + src->data + src->reserved;
index df73a47a41232f3e8dcaf5b5c950850ad6e4086c..711c85e24f6d1360a86cbfc902ae97d6cdfc3486 100644 (file)
@@ -298,10 +298,6 @@ static inline unsigned dev_usage_u64s(void)
        return sizeof(struct bch_dev_usage) / sizeof(u64);
 }
 
-u64 bch2_fs_usage_read_one(struct bch_fs *, u64 *);
-
-void bch2_fs_usage_acc_to_base(struct bch_fs *, unsigned);
-
 void bch2_fs_usage_to_text(struct printbuf *,
                           struct bch_fs *, struct bch_fs_usage_online *);
 
index eadf4f6392bf3266977f2e6d01dd89e4f590ed2b..f5b5d896979e8ae9606d461f31c724cc9559d08b 100644 (file)
@@ -384,7 +384,7 @@ int bch2_accounting_read(struct bch_fs *c)
 
        percpu_down_read(&c->mark_lock);
        preempt_disable();
-       struct bch_fs_usage_base *usage = &c->usage_base->b;
+       struct bch_fs_usage_base *usage = this_cpu_ptr(c->usage);
 
        for (unsigned i = 0; i < acc->k.nr; i++) {
                struct disk_accounting_pos k;
index abdb26d4506881003637a1cbc2991f2f3a4ee330..4006b8ec4fe89fa80f08509268a6cbc718f53cac 100644 (file)
@@ -427,28 +427,10 @@ static int journal_replay_entry_early(struct bch_fs *c,
                        container_of(entry, struct jset_entry_usage, entry);
 
                switch (entry->btree_id) {
-               case BCH_FS_USAGE_reserved:
-                       if (entry->level < BCH_REPLICAS_MAX)
-                               c->usage_base->persistent_reserved[entry->level] =
-                                       le64_to_cpu(u->v);
-                       break;
-               case BCH_FS_USAGE_inodes:
-                       c->usage_base->b.nr_inodes = le64_to_cpu(u->v);
-                       break;
                case BCH_FS_USAGE_key_version:
-                       atomic64_set(&c->key_version,
-                                    le64_to_cpu(u->v));
+                       atomic64_set(&c->key_version, le64_to_cpu(u->v));
                        break;
                }
-
-               break;
-       }
-       case BCH_JSET_ENTRY_data_usage: {
-               struct jset_entry_data_usage *u =
-                       container_of(entry, struct jset_entry_data_usage, entry);
-
-               ret = bch2_replicas_set_usage(c, &u->r,
-                                             le64_to_cpu(u->v));
                break;
        }
        case BCH_JSET_ENTRY_blacklist: {
index 06783f357f8d505edfb4ac80078fffeddfc436e6..05214ad2ad358b52569fb6d9dae28144971e8bda 100644 (file)
@@ -307,46 +307,23 @@ static void __replicas_table_update_pcpu(struct bch_fs_usage __percpu *dst_p,
 static int replicas_table_update(struct bch_fs *c,
                                 struct bch_replicas_cpu *new_r)
 {
-       struct bch_fs_usage __percpu *new_usage[JOURNAL_BUF_NR];
        struct bch_fs_usage __percpu *new_gc = NULL;
-       struct bch_fs_usage *new_base = NULL;
-       unsigned i, bytes = sizeof(struct bch_fs_usage) +
+       unsigned bytes = sizeof(struct bch_fs_usage) +
                sizeof(u64) * new_r->nr;
        int ret = 0;
 
-       memset(new_usage, 0, sizeof(new_usage));
-
-       for (i = 0; i < ARRAY_SIZE(new_usage); i++)
-               if (!(new_usage[i] = __alloc_percpu_gfp(bytes,
-                                       sizeof(u64), GFP_KERNEL)))
-                       goto err;
-
-       if (!(new_base = kzalloc(bytes, GFP_KERNEL)) ||
-           (c->usage_gc &&
+       if ((c->usage_gc &&
             !(new_gc = __alloc_percpu_gfp(bytes, sizeof(u64), GFP_KERNEL))))
                goto err;
 
-       for (i = 0; i < ARRAY_SIZE(new_usage); i++)
-               if (c->usage[i])
-                       __replicas_table_update_pcpu(new_usage[i], new_r,
-                                                    c->usage[i], &c->replicas);
-       if (c->usage_base)
-               __replicas_table_update(new_base,               new_r,
-                                       c->usage_base,          &c->replicas);
        if (c->usage_gc)
                __replicas_table_update_pcpu(new_gc,            new_r,
                                             c->usage_gc,       &c->replicas);
 
-       for (i = 0; i < ARRAY_SIZE(new_usage); i++)
-               swap(c->usage[i],       new_usage[i]);
-       swap(c->usage_base,     new_base);
        swap(c->usage_gc,       new_gc);
        swap(c->replicas,       *new_r);
 out:
        free_percpu(new_gc);
-       for (i = 0; i < ARRAY_SIZE(new_usage); i++)
-               free_percpu(new_usage[i]);
-       kfree(new_base);
        return ret;
 err:
        bch_err(c, "error updating replicas table: memory allocation failure");
@@ -537,6 +514,8 @@ int bch2_replicas_gc_start(struct bch_fs *c, unsigned typemask)
  */
 int bch2_replicas_gc2(struct bch_fs *c)
 {
+       return 0;
+#if 0
        struct bch_replicas_cpu new = { 0 };
        unsigned i, nr;
        int ret = 0;
@@ -591,34 +570,7 @@ retry:
        mutex_unlock(&c->sb_lock);
 
        return ret;
-}
-
-int bch2_replicas_set_usage(struct bch_fs *c,
-                           struct bch_replicas_entry_v1 *r,
-                           u64 sectors)
-{
-       int ret, idx = bch2_replicas_entry_idx(c, r);
-
-       if (idx < 0) {
-               struct bch_replicas_cpu n;
-
-               n = cpu_replicas_add_entry(c, &c->replicas, r);
-               if (!n.entries)
-                       return -BCH_ERR_ENOMEM_cpu_replicas;
-
-               ret = replicas_table_update(c, &n);
-               if (ret)
-                       return ret;
-
-               kfree(n.entries);
-
-               idx = bch2_replicas_entry_idx(c, r);
-               BUG_ON(ret < 0);
-       }
-
-       c->usage_base->replicas[idx] = sectors;
-
-       return 0;
+#endif
 }
 
 /* Replicas tracking - superblock: */
@@ -1020,11 +972,6 @@ unsigned bch2_dev_has_data(struct bch_fs *c, struct bch_dev *ca)
 
 void bch2_fs_replicas_exit(struct bch_fs *c)
 {
-       unsigned i;
-
-       for (i = 0; i < ARRAY_SIZE(c->usage); i++)
-               free_percpu(c->usage[i]);
-       kfree(c->usage_base);
        kfree(c->replicas.entries);
        kfree(c->replicas_gc.entries);
 }
index 0a24ebcf71bd388af0e88da2a849a906acb8cea1..eade75ed48392f7e10b0b693317fd483782a712e 100644 (file)
@@ -53,10 +53,6 @@ int bch2_replicas_gc_end(struct bch_fs *, int);
 int bch2_replicas_gc_start(struct bch_fs *, unsigned);
 int bch2_replicas_gc2(struct bch_fs *);
 
-int bch2_replicas_set_usage(struct bch_fs *,
-                           struct bch_replicas_entry_v1 *,
-                           u64);
-
 #define for_each_cpu_replicas_entry(_r, _i)                            \
        for (_i = (_r)->entries;                                        \
             (void *) (_i) < (void *) (_r)->entries + (_r)->nr * (_r)->entry_size;\
index e7a17179f7410befa33001b7b7a06885c5e5f946..01f8c7dd45a5ff775f09e8f09fbd4a27952f9eb1 100644 (file)
@@ -573,6 +573,7 @@ static void __bch2_fs_free(struct bch_fs *c)
 
        darray_exit(&c->btree_roots_extra);
        free_percpu(c->pcpu);
+       free_percpu(c->usage);
        mempool_exit(&c->large_bkey_pool);
        mempool_exit(&c->btree_bounce_pool);
        bioset_exit(&c->btree_bio);
@@ -898,6 +899,7 @@ static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts)
                            offsetof(struct btree_write_bio, wbio.bio)),
                        BIOSET_NEED_BVECS) ||
            !(c->pcpu = alloc_percpu(struct bch_fs_pcpu)) ||
+           !(c->usage = alloc_percpu(struct bch_fs_usage_base)) ||
            !(c->online_reserved = alloc_percpu(u64)) ||
            mempool_init_kvmalloc_pool(&c->btree_bounce_pool, 1,
                                       c->opts.btree_node_size) ||