struct mutex            fsck_error_lock;
        bool                    fsck_alloc_err;
 
-       /* FILESYSTEM */
-       atomic_long_t           nr_inodes;
-
        /* QUOTAS */
        struct bch_memquota_type quotas[QTYP_NR];
 
 
                for (b = 0; b < BCH_DATA_NR; b++)
                        copy_fs_field(buckets[b],
                                      "buckets[%s]", bch2_data_types[b]);
+               copy_fs_field(nr_inodes, "nr_inodes");
 
                for_each_possible_cpu(cpu) {
                        p = per_cpu_ptr(c->usage[0], cpu);
 
        switch (type) {
        case BKEY_TYPE_BTREE:
        case BKEY_TYPE_EXTENTS:
+       case BKEY_TYPE_INODES:
        case BKEY_TYPE_EC:
                return true;
        default:
 
                ret = bch2_mark_stripe(c, k, inserting,
                                       stats, journal_seq, flags, gc);
                break;
+       case KEY_TYPE_alloc:
+               if (inserting)
+                       stats->nr_inodes++;
+               else
+                       stats->nr_inodes--;
+               break;
        case KEY_TYPE_reservation: {
                unsigned replicas = bkey_s_c_to_reservation(k).v->nr_replicas;
 
 
 
        u64                     buckets[BCH_DATA_NR];
 
+       u64                     nr_inodes;
+
        /* fields starting here aren't touched by gc: */
        u64                     online_reserved;
        u64                     available_cache;
 
        if (unlikely(ret))
                goto err_trans;
 
-       atomic_long_inc(&c->nr_inodes);
-
        if (!tmpfile) {
                bch2_inode_update_after_write(c, dir, &dir_u,
                                              ATTR_MTIME|ATTR_CTIME);
                bch2_quota_acct(c, inode->ei_qid, Q_INO, -1,
                                KEY_TYPE_QUOTA_WARN);
                bch2_inode_rm(c, inode->v.i_ino);
-
-               WARN_ONCE(atomic_long_dec_return(&c->nr_inodes) < 0,
-                         "nr_inodes < 0");
        }
 }
 
        buf->f_blocks   = (c->capacity - hidden_metadata) >> shift;
        buf->f_bfree    = (c->capacity - bch2_fs_sectors_used(c, usage)) >> shift;
        buf->f_bavail   = buf->f_bfree;
-       buf->f_files    = atomic_long_read(&c->nr_inodes);
+       buf->f_files    = usage.nr_inodes;
        buf->f_ffree    = U64_MAX;
 
        fsid = le64_to_cpup((void *) c->sb.user_uuid.b) ^
 
                        BUG_ON(ret == -EINTR);
                        if (ret)
                                break;
-
-                       if (link->count)
-                               atomic_long_inc(&c->nr_inodes);
                } else {
                        /* Should have been caught by dirents pass: */
                        need_fsck_err_on(link->count, c,
        struct btree_iter iter;
        struct bkey_s_c k;
        struct bkey_s_c_inode inode;
-       unsigned long nr_inodes = 0;
        int ret = 0;
 
        for_each_btree_key(&iter, c, BTREE_ID_INODES, POS_MIN, 0, k) {
 
                inode = bkey_s_c_to_inode(k);
 
-               if (!(inode.v->bi_flags & BCH_INODE_UNLINKED))
-                       nr_inodes++;
-
                if (inode.v->bi_flags &
                    (BCH_INODE_I_SIZE_DIRTY|
                     BCH_INODE_I_SECTORS_DIRTY|
                                break;
                }
        }
-       atomic_long_set(&c->nr_inodes, nr_inodes);
 fsck_err:
        return bch2_btree_iter_unlock(&iter) ?: ret;
 }
 
        if (ret)
                goto err;
 
-       atomic_long_set(&c->nr_inodes, 2);
-
        if (enabled_qtypes(c)) {
                ret = bch2_fs_quota_read(c);
                if (ret)