return ret;
 }
 
-static inline int is_unavailable_bucket(struct bucket_mark m)
+static inline int is_unavailable_bucket(struct bch_alloc_v4 a)
 {
-       return !is_available_bucket(m);
+       return a.dirty_sectors || a.stripe;
 }
 
 static inline int bucket_sectors_fragmented(struct bch_dev *ca,
-                                           struct bucket_mark m)
+                                           struct bch_alloc_v4 a)
 {
-       return m.dirty_sectors
-               ? max(0, (int) ca->mi.bucket_size - (int) m.dirty_sectors)
+       return a.dirty_sectors
+               ? max(0, (int) ca->mi.bucket_size - (int) a.dirty_sectors)
                : 0;
 }
 
-static inline enum bch_data_type bucket_type(struct bucket_mark m)
+static inline enum bch_data_type bucket_type(struct bch_alloc_v4 a)
 {
-       return m.cached_sectors && !m.dirty_sectors
+       return a.cached_sectors && !a.dirty_sectors
                ? BCH_DATA_cached
-               : m.data_type;
+               : a.data_type;
 }
 
 static inline void account_bucket(struct bch_fs_usage *fs_usage,
 }
 
 static void bch2_dev_usage_update(struct bch_fs *c, struct bch_dev *ca,
-                                 struct bucket_mark old, struct bucket_mark new,
+                                 struct bch_alloc_v4 old,
+                                 struct bch_alloc_v4 new,
                                  u64 journal_seq, bool gc)
 {
        struct bch_fs_usage *fs_usage;
        preempt_enable();
 }
 
+static void bch2_dev_usage_update_m(struct bch_fs *c, struct bch_dev *ca,
+                                   struct bucket_mark old, struct bucket_mark new,
+                                   u64 journal_seq, bool gc)
+{
+       struct bch_alloc_v4 old_a = {
+               .gen            = old.gen,
+               .data_type      = old.data_type,
+               .dirty_sectors  = old.dirty_sectors,
+               .cached_sectors = old.cached_sectors,
+               .stripe         = old.stripe,
+       };
+       struct bch_alloc_v4 new_a = {
+               .gen            = new.gen,
+               .data_type      = new.data_type,
+               .dirty_sectors  = new.dirty_sectors,
+               .cached_sectors = new.cached_sectors,
+               .stripe         = new.stripe,
+       };
+
+       bch2_dev_usage_update(c, ca, old_a, new_a, journal_seq, gc);
+}
+
 static inline int __update_replicas(struct bch_fs *c,
                                    struct bch_fs_usage *fs_usage,
                                    struct bch_replicas_entry *r,
        if (!gc && new_a.gen != old_a.gen)
                *bucket_gen(ca, new.k->p.offset) = new_a.gen;
 
+       bch2_dev_usage_update(c, ca, old_a, new_a, journal_seq, gc);
+
        g = __bucket(ca, new.k->p.offset, gc);
 
        old_m = bucket_cmpxchg(g, m, ({
                m.stripe                = new_a.stripe != 0;
        }));
 
-       bch2_dev_usage_update(c, ca, old_m, m, journal_seq, gc);
-
        g->io_time[READ]        = new_a.io_time[READ];
        g->io_time[WRITE]       = new_a.io_time[WRITE];
        g->gen_valid            = 1;
                bch2_data_types[old.data_type ?: data_type],
                old.dirty_sectors, sectors);
 
-       bch2_dev_usage_update(c, ca, old, new, 0, true);
+       bch2_dev_usage_update_m(c, ca, old, new, 0, true);
        percpu_up_read(&c->mark_lock);
 }
 
        g->stripe               = k.k->p.offset;
        g->stripe_redundancy    = s->nr_redundant;
 
-       bch2_dev_usage_update(c, ca, old, new, journal_seq, true);
+       bch2_dev_usage_update_m(c, ca, old, new, journal_seq, true);
 err:
        percpu_up_read(&c->mark_lock);
        printbuf_exit(&buf);
                              old.v.counter,
                              new.v.counter)) != old.v.counter);
 
-       bch2_dev_usage_update(c, ca, old, new, journal_seq, true);
+       bch2_dev_usage_update_m(c, ca, old, new, journal_seq, true);
 err:
        percpu_up_read(&c->mark_lock);