* reclaimed by copy GC
                 */
                s64 fragmented = (bucket_to_sector(ca,
-                                       stats.buckets[BCH_DATA_USER] +
-                                       stats.buckets[BCH_DATA_CACHED]) -
-                                 (stats.sectors[BCH_DATA_USER] +
-                                  stats.sectors[BCH_DATA_CACHED])) << 9;
+                                       stats.buckets[BCH_DATA_user] +
+                                       stats.buckets[BCH_DATA_cached]) -
+                                 (stats.sectors[BCH_DATA_user] +
+                                  stats.sectors[BCH_DATA_cached])) << 9;
 
                fragmented = max(0LL, fragmented);
 
 
                if (*nr_effective < nr_replicas &&
                    test_bit(ob->ptr.dev, devs_may_alloc->d) &&
                    (ca->mi.durability ||
-                    (wp->type == BCH_DATA_USER && !*have_cache)) &&
+                    (wp->type == BCH_DATA_user && !*have_cache)) &&
                    (ob->ec || !need_ec)) {
                        add_new_bucket(c, ptrs, devs_may_alloc,
                                       nr_effective, have_cache,
 
        wp = writepoint_find(c, write_point.v);
 
-       if (wp->type == BCH_DATA_USER)
+       if (wp->type == BCH_DATA_user)
                ob_flags |= BUCKET_MAY_ALLOC_PARTIAL;
 
        /* metadata may not allocate on cache devices: */
-       if (wp->type != BCH_DATA_USER)
+       if (wp->type != BCH_DATA_user)
                have_cache = true;
 
        if (!target || (flags & BCH_WRITE_ONLY_SPECIFIED_DEVS)) {
 
        /* Free buckets we didn't use: */
        open_bucket_for_each(c, &wp->ptrs, ob, i)
-               open_bucket_free_unused(c, ob, wp->type == BCH_DATA_USER);
+               open_bucket_free_unused(c, ob, wp->type == BCH_DATA_user);
 
        wp->ptrs = ptrs;
 
                        ob_push(c, &ptrs, ob);
                else
                        open_bucket_free_unused(c, ob,
-                                       wp->type == BCH_DATA_USER);
+                                       wp->type == BCH_DATA_user);
        wp->ptrs = ptrs;
 
        mutex_unlock(&wp->lock);
                struct bch_extent_ptr tmp = ob->ptr;
 
                tmp.cached = !ca->mi.durability &&
-                       wp->type == BCH_DATA_USER;
+                       wp->type == BCH_DATA_user;
 
                tmp.offset += ca->mi.bucket_size - ob->sectors_free;
                bch2_bkey_append_ptr(k, tmp);
                c->open_buckets_freelist = ob - c->open_buckets;
        }
 
-       writepoint_init(&c->btree_write_point, BCH_DATA_BTREE);
-       writepoint_init(&c->rebalance_write_point, BCH_DATA_USER);
+       writepoint_init(&c->btree_write_point, BCH_DATA_btree);
+       writepoint_init(&c->rebalance_write_point, BCH_DATA_user);
 
        for (wp = c->write_points;
             wp < c->write_points + c->write_points_nr; wp++) {
-               writepoint_init(wp, BCH_DATA_USER);
+               writepoint_init(wp, BCH_DATA_user);
 
                wp->last_used   = sched_clock();
                wp->write_point = (unsigned long) wp;
 
 
 /* BCH_SB_FIELD_replicas: */
 
+#define BCH_DATA_TYPES()               \
+       x(none,         0)              \
+       x(sb,           1)              \
+       x(journal,      2)              \
+       x(btree,        3)              \
+       x(user,         4)              \
+       x(cached,       5)
+
 enum bch_data_type {
-       BCH_DATA_NONE           = 0,
-       BCH_DATA_SB             = 1,
-       BCH_DATA_JOURNAL        = 2,
-       BCH_DATA_BTREE          = 3,
-       BCH_DATA_USER           = 4,
-       BCH_DATA_CACHED         = 5,
-       BCH_DATA_NR             = 6,
+#define x(t, n) BCH_DATA_##t,
+       BCH_DATA_TYPES()
+#undef x
+       BCH_DATA_NR
 };
 
 struct bch_replicas_entry_v0 {
 
 
                if (offset == BCH_SB_SECTOR)
                        mark_metadata_sectors(c, ca, 0, BCH_SB_SECTOR,
-                                             BCH_DATA_SB, flags);
+                                             BCH_DATA_sb, flags);
 
                mark_metadata_sectors(c, ca, offset,
                                      offset + (1 << layout->sb_max_size_bits),
-                                     BCH_DATA_SB, flags);
+                                     BCH_DATA_sb, flags);
        }
 
        for (i = 0; i < ca->journal.nr; i++) {
                b = ca->journal.buckets[i];
-               bch2_mark_metadata_bucket(c, ca, b, BCH_DATA_JOURNAL,
+               bch2_mark_metadata_bucket(c, ca, b, BCH_DATA_journal,
                                          ca->mi.bucket_size,
                                          gc_phase(GC_PHASE_SB), flags);
        }
                        char buf[80];
 
                        if (metadata_only &&
-                           (e->data_type == BCH_DATA_USER ||
-                            e->data_type == BCH_DATA_CACHED))
+                           (e->data_type == BCH_DATA_user ||
+                            e->data_type == BCH_DATA_cached))
                                continue;
 
                        bch2_replicas_entry_to_text(&PBUF(buf), e);
                        d->gen_valid = s->gen_valid;
 
                        if (metadata_only &&
-                           (s->mark.data_type == BCH_DATA_USER ||
-                            s->mark.data_type == BCH_DATA_CACHED)) {
+                           (s->mark.data_type == BCH_DATA_user ||
+                            s->mark.data_type == BCH_DATA_cached)) {
                                d->_mark = s->mark;
                                d->_mark.owned_by_allocator = 0;
                        }
 
        set_btree_node_read_in_flight(b);
 
        if (rb->have_ioref) {
-               this_cpu_add(ca->io_done->sectors[READ][BCH_DATA_BTREE],
+               this_cpu_add(ca->io_done->sectors[READ][BCH_DATA_btree],
                             bio_sectors(bio));
                bio_set_dev(bio, ca->disk_sb.bdev);
 
        b->written += sectors_to_write;
 
        /* XXX: submitting IO with btree locks held: */
-       bch2_submit_wbio_replicas(&wbio->wbio, c, BCH_DATA_BTREE, &k.key);
+       bch2_submit_wbio_replicas(&wbio->wbio, c, BCH_DATA_btree, &k.key);
        return;
 err:
        set_btree_node_noevict(b);
 
                        cpu_replicas_entry(&c->replicas, i);
 
                switch (e->data_type) {
-               case BCH_DATA_BTREE:
+               case BCH_DATA_btree:
                        usage->btree    += usage->replicas[i];
                        break;
-               case BCH_DATA_USER:
+               case BCH_DATA_user:
                        usage->data     += usage->replicas[i];
                        break;
-               case BCH_DATA_CACHED:
+               case BCH_DATA_cached:
                        usage->cached   += usage->replicas[i];
                        break;
                }
                                       struct bch_dev *ca)
 {
        if (!m.owned_by_allocator &&
-           m.data_type == BCH_DATA_USER &&
+           m.data_type == BCH_DATA_user &&
            bucket_sectors_used(m))
                return max_t(int, 0, (int) ca->mi.bucket_size -
                             bucket_sectors_used(m));
 static inline enum bch_data_type bucket_type(struct bucket_mark m)
 {
        return m.cached_sectors && !m.dirty_sectors
-               ? BCH_DATA_CACHED
+               ? BCH_DATA_cached
                : m.data_type;
 }
 
                                  enum bch_data_type type,
                                  int nr, s64 size)
 {
-       if (type == BCH_DATA_SB || type == BCH_DATA_JOURNAL)
+       if (type == BCH_DATA_sb || type == BCH_DATA_journal)
                fs_usage->hidden        += size;
 
        dev_usage->buckets[type]        += nr;
 
        u->sectors[old.data_type] -= old.dirty_sectors;
        u->sectors[new.data_type] += new.dirty_sectors;
-       u->sectors[BCH_DATA_CACHED] +=
+       u->sectors[BCH_DATA_cached] +=
                (int) new.cached_sectors - (int) old.cached_sectors;
        u->sectors_fragmented +=
                is_fragmented_bucket(new, ca) - is_fragmented_bucket(old, ca);
                return 0;
 
        switch (r->data_type) {
-       case BCH_DATA_BTREE:
+       case BCH_DATA_btree:
                fs_usage->btree         += sectors;
                break;
-       case BCH_DATA_USER:
+       case BCH_DATA_user:
                fs_usage->data          += sectors;
                break;
-       case BCH_DATA_CACHED:
+       case BCH_DATA_cached:
                fs_usage->cached        += sectors;
                break;
        }
        struct bucket_mark old, new;
        bool overflow;
 
-       BUG_ON(data_type != BCH_DATA_SB &&
-              data_type != BCH_DATA_JOURNAL);
+       BUG_ON(data_type != BCH_DATA_sb &&
+              data_type != BCH_DATA_journal);
 
        old = bucket_cmpxchg(g, new, ({
                new.data_type   = data_type;
                               unsigned sectors, struct gc_pos pos,
                               unsigned flags)
 {
-       BUG_ON(type != BCH_DATA_SB &&
-              type != BCH_DATA_JOURNAL);
+       BUG_ON(type != BCH_DATA_sb &&
+              type != BCH_DATA_journal);
 
        preempt_disable();
 
        BUG_ON(!sectors);
 
        bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
-               s64 disk_sectors = data_type == BCH_DATA_BTREE
+               s64 disk_sectors = data_type == BCH_DATA_btree
                        ? sectors
                        : ptr_disk_sectors_delta(p, offset, sectors, flags);
 
                        : -c->opts.btree_node_size;
 
                ret = bch2_mark_extent(c, old, new, offset, sectors,
-                               BCH_DATA_BTREE, fs_usage, journal_seq, flags);
+                               BCH_DATA_btree, fs_usage, journal_seq, flags);
                break;
        case KEY_TYPE_extent:
        case KEY_TYPE_reflink_v:
                ret = bch2_mark_extent(c, old, new, offset, sectors,
-                               BCH_DATA_USER, fs_usage, journal_seq, flags);
+                               BCH_DATA_user, fs_usage, journal_seq, flags);
                break;
        case KEY_TYPE_stripe:
                ret = bch2_mark_stripe(c, old, new, fs_usage, journal_seq, flags);
        BUG_ON(!sectors);
 
        bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
-               s64 disk_sectors = data_type == BCH_DATA_BTREE
+               s64 disk_sectors = data_type == BCH_DATA_btree
                        ? sectors
                        : ptr_disk_sectors_delta(p, offset, sectors, flags);
 
                        : -c->opts.btree_node_size;
 
                return bch2_trans_mark_extent(trans, k, offset, sectors,
-                                             flags, BCH_DATA_BTREE);
+                                             flags, BCH_DATA_btree);
        case KEY_TYPE_extent:
        case KEY_TYPE_reflink_v:
                return bch2_trans_mark_extent(trans, k, offset, sectors,
-                                             flags, BCH_DATA_USER);
+                                             flags, BCH_DATA_user);
        case KEY_TYPE_inode:
                d = replicas_deltas_realloc(trans, 0);
 
 
 {
        if (k->type == KEY_TYPE_btree_ptr ||
            k->type == KEY_TYPE_btree_ptr_v2)
-               return BCH_DATA_BTREE;
+               return BCH_DATA_btree;
 
-       return ptr->cached ? BCH_DATA_CACHED : BCH_DATA_USER;
+       return ptr->cached ? BCH_DATA_cached : BCH_DATA_user;
 }
 
 static inline struct bucket_mark ptr_bucket_mark(struct bch_dev *ca,
 
        h->redundancy   = redundancy;
 
        rcu_read_lock();
-       h->devs = target_rw_devs(c, BCH_DATA_USER, target);
+       h->devs = target_rw_devs(c, BCH_DATA_user, target);
 
        for_each_member_device_rcu(ca, c, i, &h->devs)
                if (!ca->mi.durability)
 
                        goto err;
 
                err = "inconsistent";
-               if (mark.data_type != BCH_DATA_BTREE ||
+               if (mark.data_type != BCH_DATA_btree ||
                    mark.dirty_sectors < c->opts.btree_node_size)
                        goto err;
        }
                        "key too stale: %i", stale);
 
                bch2_fs_inconsistent_on(!stale &&
-                       (mark.data_type != BCH_DATA_USER ||
+                       (mark.data_type != BCH_DATA_user ||
                         mark_sectors < disk_sectors), c,
                        "extent pointer not marked: %s:\n"
                        "type %u sectors %u < %u",
 
 
                        bio_set_dev(&n->bio, ca->disk_sb.bdev);
 
-                       if (type != BCH_DATA_BTREE && unlikely(c->opts.no_data_io)) {
+                       if (type != BCH_DATA_btree && unlikely(c->opts.no_data_io)) {
                                bio_endio(&n->bio);
                                continue;
                        }
                key_to_write = (void *) (op->insert_keys.keys_p +
                                         key_to_write_offset);
 
-               bch2_submit_wbio_replicas(to_wbio(bio), c, BCH_DATA_USER,
+               bch2_submit_wbio_replicas(to_wbio(bio), c, BCH_DATA_user,
                                          key_to_write);
        } while (ret);
 
                        goto out;
                }
 
-               this_cpu_add(ca->io_done->sectors[READ][BCH_DATA_USER],
+               this_cpu_add(ca->io_done->sectors[READ][BCH_DATA_user],
                             bio_sectors(&rbio->bio));
                bio_set_dev(&rbio->bio, ca->disk_sb.bdev);
 
 
                if (pos <= ja->cur_idx)
                        ja->cur_idx = (ja->cur_idx + 1) % ja->nr;
 
-               bch2_mark_metadata_bucket(c, ca, bucket, BCH_DATA_JOURNAL,
+               bch2_mark_metadata_bucket(c, ca, bucket, BCH_DATA_journal,
                                          ca->mi.bucket_size,
                                          gc_phase(GC_PHASE_SB),
                                          0);
               test_bit(JOURNAL_REPLAY_DONE,    &j->flags));
 
        for_each_member_device_rcu(ca, c, iter,
-                                  &c->rw_devs[BCH_DATA_JOURNAL]) {
+                                  &c->rw_devs[BCH_DATA_journal]) {
                struct journal_device *ja = &ca->journal;
 
                if (!ja->nr)
 
 
        for_each_member_device(ca, c, iter) {
                if (!test_bit(BCH_FS_REBUILD_REPLICAS, &c->flags) &&
-                   !(bch2_dev_has_data(c, ca) & (1 << BCH_DATA_JOURNAL)))
+                   !(bch2_dev_has_data(c, ca) & (1 << BCH_DATA_journal)))
                        continue;
 
                if ((ca->mi.state == BCH_MEMBER_STATE_RW ||
                 * the devices - this is wrong:
                 */
 
-               bch2_devlist_to_replicas(&replicas.e, BCH_DATA_JOURNAL, i->devs);
+               bch2_devlist_to_replicas(&replicas.e, BCH_DATA_journal, i->devs);
 
                if (!degraded &&
                    (test_bit(BCH_FS_REBUILD_REPLICAS, &c->flags) ||
        rcu_read_lock();
 
        devs_sorted = bch2_dev_alloc_list(c, &j->wp.stripe,
-                                         &c->rw_devs[BCH_DATA_JOURNAL]);
+                                         &c->rw_devs[BCH_DATA_journal]);
 
        __journal_write_alloc(j, w, &devs_sorted,
                              sectors, &replicas, replicas_want);
                goto err;
        }
 
-       bch2_devlist_to_replicas(&replicas.e, BCH_DATA_JOURNAL, devs);
+       bch2_devlist_to_replicas(&replicas.e, BCH_DATA_journal, devs);
 
        if (bch2_mark_replicas(c, &replicas.e))
                goto err;
                        continue;
                }
 
-               this_cpu_add(ca->io_done->sectors[WRITE][BCH_DATA_JOURNAL],
+               this_cpu_add(ca->io_done->sectors[WRITE][BCH_DATA_journal],
                             sectors);
 
                bio = ca->journal.bio;
 
 
        rcu_read_lock();
        for_each_member_device_rcu(ca, c, i,
-                                  &c->rw_devs[BCH_DATA_JOURNAL]) {
+                                  &c->rw_devs[BCH_DATA_journal]) {
                struct journal_device *ja = &ca->journal;
                unsigned buckets_this_device, sectors_this_device;
 
 
        rcu_read_lock();
        for_each_member_device_rcu(ca, c, i,
-                                  &c->rw_devs[BCH_DATA_JOURNAL]) {
+                                  &c->rw_devs[BCH_DATA_journal]) {
                struct journal_device *ja = &ca->journal;
 
                if (!ja->nr)
                return ret;
 
        mutex_lock(&c->replicas_gc_lock);
-       bch2_replicas_gc_start(c, 1 << BCH_DATA_JOURNAL);
+       bch2_replicas_gc_start(c, 1 << BCH_DATA_journal);
 
        seq = 0;
 
                struct bch_replicas_padded replicas;
 
                seq = max(seq, journal_last_seq(j));
-               bch2_devlist_to_replicas(&replicas.e, BCH_DATA_JOURNAL,
+               bch2_devlist_to_replicas(&replicas.e, BCH_DATA_journal,
                                         journal_seq_pin(j, seq)->devs);
                seq++;
 
 
        bkey_on_stack_init(&sk);
        bch2_trans_init(&trans, c, 0, 0);
 
-       stats->data_type = BCH_DATA_USER;
+       stats->data_type = BCH_DATA_user;
        stats->btree_id = btree_id;
        stats->pos      = POS_MIN;
 
        INIT_LIST_HEAD(&ctxt.reads);
        init_waitqueue_head(&ctxt.wait);
 
-       stats->data_type = BCH_DATA_USER;
+       stats->data_type = BCH_DATA_user;
 
        ret =   __bch2_move_data(c, &ctxt, rate, wp, start, end,
                                 pred, arg, stats, BTREE_ID_EXTENTS) ?:
 
        bch2_trans_init(&trans, c, 0, 0);
 
-       stats->data_type = BCH_DATA_BTREE;
+       stats->data_type = BCH_DATA_btree;
 
        for (id = 0; id < BTREE_ID_NR; id++) {
                stats->btree_id = id;
 
        switch (op.op) {
        case BCH_DATA_OP_REREPLICATE:
-               stats->data_type = BCH_DATA_JOURNAL;
+               stats->data_type = BCH_DATA_journal;
                ret = bch2_journal_flush_device_pins(&c->journal, -1);
 
                ret = bch2_move_btree(c, rereplicate_pred, c, stats) ?: ret;
                if (op.migrate.dev >= c->sb.nr_devices)
                        return -EINVAL;
 
-               stats->data_type = BCH_DATA_JOURNAL;
+               stats->data_type = BCH_DATA_journal;
                ret = bch2_journal_flush_device_pins(&c->journal, op.migrate.dev);
 
                ret = bch2_move_btree(c, migrate_pred, &op, stats) ?: ret;
 
                struct copygc_heap_entry e;
 
                if (m.owned_by_allocator ||
-                   m.data_type != BCH_DATA_USER ||
+                   m.data_type != BCH_DATA_user ||
                    !bucket_sectors_used(m) ||
                    bucket_sectors_used(m) >= ca->mi.bucket_size)
                        continue;
 
 };
 
 const char * const bch2_data_types[] = {
-       "none",
-       "sb",
-       "journal",
-       "btree",
-       "data",
-       "cached",
+#define x(t, n) #t,
+       BCH_DATA_TYPES()
+#undef x
        NULL
 };
 
 
        switch (k.k->type) {
        case KEY_TYPE_btree_ptr:
        case KEY_TYPE_btree_ptr_v2:
-               e->data_type = BCH_DATA_BTREE;
+               e->data_type = BCH_DATA_btree;
                extent_to_replicas(k, e);
                break;
        case KEY_TYPE_extent:
        case KEY_TYPE_reflink_v:
-               e->data_type = BCH_DATA_USER;
+               e->data_type = BCH_DATA_user;
                extent_to_replicas(k, e);
                break;
        case KEY_TYPE_stripe:
-               e->data_type = BCH_DATA_USER;
+               e->data_type = BCH_DATA_user;
                stripe_to_replicas(k, e);
                break;
        }
        unsigned i;
 
        BUG_ON(!data_type ||
-              data_type == BCH_DATA_SB ||
+              data_type == BCH_DATA_sb ||
               data_type >= BCH_DATA_NR);
 
        e->data_type    = data_type;
                struct bch_replicas_entry *e =
                        cpu_replicas_entry(&c->replicas, i);
 
-               if (e->data_type == BCH_DATA_JOURNAL ||
+               if (e->data_type == BCH_DATA_journal ||
                    c->usage_base->replicas[i] ||
                    percpu_u64_get(&c->usage[0]->replicas[i]) ||
                    percpu_u64_get(&c->usage[1]->replicas[i]))
 
 bool bch2_have_enough_devs(struct replicas_status s, unsigned flags)
 {
-       return (have_enough_devs(s, BCH_DATA_JOURNAL,
+       return (have_enough_devs(s, BCH_DATA_journal,
                                 flags & BCH_FORCE_IF_METADATA_DEGRADED,
                                 flags & BCH_FORCE_IF_METADATA_LOST) &&
-               have_enough_devs(s, BCH_DATA_BTREE,
+               have_enough_devs(s, BCH_DATA_btree,
                                 flags & BCH_FORCE_IF_METADATA_DEGRADED,
                                 flags & BCH_FORCE_IF_METADATA_LOST) &&
-               have_enough_devs(s, BCH_DATA_USER,
+               have_enough_devs(s, BCH_DATA_user,
                                 flags & BCH_FORCE_IF_DATA_DEGRADED,
                                 flags & BCH_FORCE_IF_DATA_LOST));
 }
        struct replicas_status s = bch2_replicas_status(c);
 
        return (meta
-               ? min(s.replicas[BCH_DATA_JOURNAL].redundancy,
-                     s.replicas[BCH_DATA_BTREE].redundancy)
-               : s.replicas[BCH_DATA_USER].redundancy) + 1;
+               ? min(s.replicas[BCH_DATA_journal].redundancy,
+                     s.replicas[BCH_DATA_btree].redundancy)
+               : s.replicas[BCH_DATA_user].redundancy) + 1;
 }
 
 unsigned bch2_dev_has_data(struct bch_fs *c, struct bch_dev *ca)
 
 static inline void bch2_replicas_entry_cached(struct bch_replicas_entry *e,
                                              unsigned dev)
 {
-       e->data_type    = BCH_DATA_CACHED;
+       e->data_type    = BCH_DATA_cached;
        e->nr_devs      = 1;
        e->nr_required  = 1;
        e->devs[0]      = dev;
 
        bio->bi_private         = ca;
        bch2_bio_map(bio, ca->sb_read_scratch, PAGE_SIZE);
 
-       this_cpu_add(ca->io_done->sectors[READ][BCH_DATA_SB],
+       this_cpu_add(ca->io_done->sectors[READ][BCH_DATA_sb],
                     bio_sectors(bio));
 
        percpu_ref_get(&ca->io_ref);
                     roundup((size_t) vstruct_bytes(sb),
                             bdev_logical_block_size(ca->disk_sb.bdev)));
 
-       this_cpu_add(ca->io_done->sectors[WRITE][BCH_DATA_SB],
+       this_cpu_add(ca->io_done->sectors[WRITE][BCH_DATA_sb],
                     bio_sectors(bio));
 
        percpu_ref_get(&ca->io_ref);
 
 
        init_rwsem(&ca->bucket_lock);
 
-       writepoint_init(&ca->copygc_write_point, BCH_DATA_USER);
+       writepoint_init(&ca->copygc_write_point, BCH_DATA_user);
 
        bch2_dev_copygc_init(ca);
 
                return ret;
 
        if (test_bit(BCH_FS_ALLOC_READ_DONE, &c->flags) &&
-           !percpu_u64_get(&ca->usage[0]->buckets[BCH_DATA_SB])) {
+           !percpu_u64_get(&ca->usage[0]->buckets[BCH_DATA_sb])) {
                mutex_lock(&c->sb_lock);
                bch2_mark_dev_superblock(ca->fs, ca, 0);
                mutex_unlock(&c->sb_lock);
 
                fifo_used(&ca->free[RESERVE_MOVINGGC]), ca->free[RESERVE_MOVINGGC].size,
                fifo_used(&ca->free[RESERVE_NONE]),     ca->free[RESERVE_NONE].size,
                ca->mi.nbuckets - ca->mi.first_bucket,
-               stats.buckets[BCH_DATA_SB],
-               stats.buckets[BCH_DATA_JOURNAL],
-               stats.buckets[BCH_DATA_BTREE],
-               stats.buckets[BCH_DATA_USER],
-               stats.buckets[BCH_DATA_CACHED],
+               stats.buckets[BCH_DATA_sb],
+               stats.buckets[BCH_DATA_journal],
+               stats.buckets[BCH_DATA_btree],
+               stats.buckets[BCH_DATA_user],
+               stats.buckets[BCH_DATA_cached],
                stats.buckets_ec,
                ca->mi.nbuckets - ca->mi.first_bucket - stats.buckets_unavailable,
-               stats.sectors[BCH_DATA_SB],
-               stats.sectors[BCH_DATA_JOURNAL],
-               stats.sectors[BCH_DATA_BTREE],
-               stats.sectors[BCH_DATA_USER],
-               stats.sectors[BCH_DATA_CACHED],
+               stats.sectors[BCH_DATA_sb],
+               stats.sectors[BCH_DATA_journal],
+               stats.sectors[BCH_DATA_btree],
+               stats.sectors[BCH_DATA_user],
+               stats.sectors[BCH_DATA_cached],
                stats.sectors_ec,
                stats.sectors_fragmented,
                ca->copygc_threshold,
                c->open_buckets_nr_free, OPEN_BUCKETS_COUNT,
                BTREE_NODE_OPEN_BUCKET_RESERVE,
                c->open_buckets_wait.list.first         ? "waiting" : "empty",
-               nr[BCH_DATA_BTREE],
-               nr[BCH_DATA_USER],
+               nr[BCH_DATA_btree],
+               nr[BCH_DATA_user],
                c->btree_reserve_cache_nr);
 }