]> www.infradead.org Git - nvme.git/commitdiff
bcachefs: Fix race in bch2_accounting_mem_insert()
authorKent Overstreet <kent.overstreet@linux.dev>
Wed, 5 Jun 2024 16:35:48 +0000 (12:35 -0400)
committerKent Overstreet <kent.overstreet@linux.dev>
Sun, 14 Jul 2024 23:00:15 +0000 (19:00 -0400)
bch2_accounting_mem_insert() drops and retakes mark_lock; thus, we need
to check if the entry in question has already been inserted.

Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
fs/bcachefs/disk_accounting.c
fs/bcachefs/disk_accounting.h

index 510ed683f0a0e285e0ed230aa6eb5888bb3be6e3..29bc4c816f95ec350e5cc0b164007a1fdd512d93 100644 (file)
@@ -218,7 +218,7 @@ int bch2_accounting_update_sb(struct btree_trans *trans)
        return 0;
 }
 
-static int __bch2_accounting_mem_mod_slowpath(struct bch_fs *c, struct bkey_s_c_accounting a, bool gc)
+static int __bch2_accounting_mem_insert(struct bch_fs *c, struct bkey_s_c_accounting a, bool gc)
 {
        struct bch_replicas_padded r;
 
@@ -226,7 +226,12 @@ static int __bch2_accounting_mem_mod_slowpath(struct bch_fs *c, struct bkey_s_c_
            !bch2_replicas_marked_locked(c, &r.e))
                return -BCH_ERR_btree_insert_need_mark_replicas;
 
+       /* raced with another insert, already present: */
        struct bch_accounting_mem *acc = &c->accounting[gc];
+       if (eytzinger0_find(acc->k.data, acc->k.nr, sizeof(acc->k.data[0]),
+                           accounting_pos_cmp, &a.k->p) < acc->k.nr)
+               return 0;
+
        unsigned new_nr_counters = acc->nr_counters + bch2_accounting_counters(a.k);
 
        u64 __percpu *new_counters = __alloc_percpu_gfp(new_nr_counters * sizeof(u64),
@@ -256,17 +261,14 @@ static int __bch2_accounting_mem_mod_slowpath(struct bch_fs *c, struct bkey_s_c_
        free_percpu(acc->v);
        acc->v = new_counters;
        acc->nr_counters = new_nr_counters;
-
-       for (unsigned i = 0; i < n.nr_counters; i++)
-               this_cpu_add(acc->v[n.offset + i], a.v->d[i]);
        return 0;
 }
 
-int bch2_accounting_mem_mod_slowpath(struct bch_fs *c, struct bkey_s_c_accounting a, bool gc)
+int bch2_accounting_mem_insert(struct bch_fs *c, struct bkey_s_c_accounting a, bool gc)
 {
        percpu_up_read(&c->mark_lock);
        percpu_down_write(&c->mark_lock);
-       int ret = __bch2_accounting_mem_mod_slowpath(c, a, gc);
+       int ret = __bch2_accounting_mem_insert(c, a, gc);
        percpu_up_write(&c->mark_lock);
        percpu_down_read(&c->mark_lock);
        return ret;
index ab1f74cb97c72a7b8b648c8803584ba84e1f5037..4b37f0c24b4edeffbe60d4bb8d6f94350e51ede2 100644 (file)
@@ -104,15 +104,19 @@ static inline int accounting_pos_cmp(const void *_l, const void *_r)
        return bpos_cmp(*l, *r);
 }
 
-int bch2_accounting_mem_mod_slowpath(struct bch_fs *, struct bkey_s_c_accounting, bool);
+int bch2_accounting_mem_insert(struct bch_fs *, struct bkey_s_c_accounting, bool);
 
 static inline int __bch2_accounting_mem_mod(struct bch_fs *c, struct bkey_s_c_accounting a, bool gc)
 {
        struct bch_accounting_mem *acc = &c->accounting[gc];
-       unsigned idx = eytzinger0_find(acc->k.data, acc->k.nr, sizeof(acc->k.data[0]),
-                                      accounting_pos_cmp, &a.k->p);
-       if (unlikely(idx >= acc->k.nr))
-               return bch2_accounting_mem_mod_slowpath(c, a, gc);
+       unsigned idx;
+
+       while ((idx = eytzinger0_find(acc->k.data, acc->k.nr, sizeof(acc->k.data[0]),
+                                     accounting_pos_cmp, &a.k->p)) >= acc->k.nr) {
+               int ret = bch2_accounting_mem_insert(c, a, gc);
+               if (ret)
+                       return ret;
+       }
 
        unsigned offset = acc->k.data[idx].offset;