return ob;
 }
 
+static inline bool is_superblock_bucket(struct bch_fs *c, struct bch_dev *ca, u64 b)
+{
+       if (c->curr_recovery_pass > BCH_RECOVERY_PASS_trans_mark_dev_sbs)
+               return false;
+
+       return bch2_is_superblock_bucket(ca, b);
+}
+
 static void open_bucket_free_unused(struct bch_fs *c, struct open_bucket *ob)
 {
        BUG_ON(c->open_buckets_partial_nr >=
        closure_wake_up(&c->freelist_wait);
 }
 
-/* _only_ for allocating the journal on a new device: */
-long bch2_bucket_alloc_new_fs(struct bch_dev *ca)
-{
-       while (ca->new_fs_bucket_idx < ca->mi.nbuckets) {
-               u64 b = ca->new_fs_bucket_idx++;
-
-               if (!is_superblock_bucket(ca, b) &&
-                   (!ca->buckets_nouse || !test_bit(b, ca->buckets_nouse)))
-                       return b;
-       }
-
-       return -1;
-}
-
 static inline unsigned open_buckets_reserved(enum bch_watermark watermark)
 {
        switch (watermark) {
 {
        struct open_bucket *ob;
 
+       if (unlikely(is_superblock_bucket(c, ca, bucket)))
+               return NULL;
+
        if (unlikely(ca->buckets_nouse && test_bit(bucket, ca->buckets_nouse))) {
                s->skipped_nouse++;
                return NULL;
 
 /*
  * This path is for before the freespace btree is initialized:
- *
- * If ca->new_fs_bucket_idx is nonzero, we haven't yet marked superblock &
- * journal buckets - journal buckets will be < ca->new_fs_bucket_idx
  */
 static noinline struct open_bucket *
 bch2_bucket_alloc_early(struct btree_trans *trans,
        struct btree_iter iter, citer;
        struct bkey_s_c k, ck;
        struct open_bucket *ob = NULL;
-       u64 first_bucket = max_t(u64, ca->mi.first_bucket, ca->new_fs_bucket_idx);
+       u64 first_bucket = ca->mi.first_bucket;
        u64 *dev_alloc_cursor = &ca->alloc_cursor[s->btree_bitmap];
        u64 alloc_start = max(first_bucket, *dev_alloc_cursor);
        u64 alloc_cursor = alloc_start;
                if (bkey_ge(k.k->p, POS(ca->dev_idx, ca->mi.nbuckets)))
                        break;
 
-               if (ca->new_fs_bucket_idx &&
-                   is_superblock_bucket(ca, k.k->p.offset))
-                       continue;
-
                if (s->btree_bitmap != BTREE_BITMAP_ANY &&
                    s->btree_bitmap != bch2_dev_btree_bitmap_marked_sectors(ca,
                                bucket_to_sector(ca, bucket), ca->mi.bucket_size)) {
        u64 alloc_start = max_t(u64, ca->mi.first_bucket, READ_ONCE(*dev_alloc_cursor));
        u64 alloc_cursor = alloc_start;
        int ret;
-
-       BUG_ON(ca->new_fs_bucket_idx);
 again:
        for_each_btree_key_max_norestart(trans, iter, BTREE_ID_freespace,
                                         POS(ca->dev_idx, alloc_cursor),
                bch2_dev_do_invalidates(ca);
 
        if (!avail) {
+               if (watermark > BCH_WATERMARK_normal &&
+                   c->curr_recovery_pass <= BCH_RECOVERY_PASS_check_allocations)
+                       goto alloc;
+
                if (cl && !waiting) {
                        closure_wait(&c->freelist_wait, cl);
                        waiting = true;
 
                                          struct bch_devs_mask *);
 void bch2_dev_stripe_increment(struct bch_dev *, struct dev_stripe_state *);
 
-long bch2_bucket_alloc_new_fs(struct bch_dev *);
-
 static inline struct bch_dev *ob_dev(struct bch_fs *c, struct open_bucket *ob)
 {
        return bch2_dev_have_ref(c, ob->dev);
 
        struct bch_dev_usage __percpu   *usage;
 
        /* Allocator: */
-       u64                     new_fs_bucket_idx;
        u64                     alloc_cursor[3];
 
        unsigned                nr_open_buckets;
 
        return bch2_trans_mark_dev_sbs_flags(c, BTREE_TRIGGER_transactional);
 }
 
+bool bch2_is_superblock_bucket(struct bch_dev *ca, u64 b)
+{
+       struct bch_sb_layout *layout = &ca->disk_sb.sb->layout;
+       u64 b_offset    = bucket_to_sector(ca, b);
+       u64 b_end       = bucket_to_sector(ca, b + 1);
+       unsigned i;
+
+       if (!b)
+               return true;
+
+       for (i = 0; i < layout->nr_superblocks; i++) {
+               u64 offset = le64_to_cpu(layout->sb_offset[i]);
+               u64 end = offset + (1 << layout->sb_max_size_bits);
+
+               if (!(offset >= b_end || end <= b_offset))
+                       return true;
+       }
+
+       for (i = 0; i < ca->journal.nr; i++)
+               if (b == ca->journal.buckets[i])
+                       return true;
+
+       return false;
+}
+
 /* Disk reservations: */
 
 #define SECTORS_CACHE  1024
 
                                    enum btree_iter_update_trigger_flags);
 int bch2_trans_mark_dev_sbs(struct bch_fs *);
 
-static inline bool is_superblock_bucket(struct bch_dev *ca, u64 b)
-{
-       struct bch_sb_layout *layout = &ca->disk_sb.sb->layout;
-       u64 b_offset    = bucket_to_sector(ca, b);
-       u64 b_end       = bucket_to_sector(ca, b + 1);
-       unsigned i;
-
-       if (!b)
-               return true;
-
-       for (i = 0; i < layout->nr_superblocks; i++) {
-               u64 offset = le64_to_cpu(layout->sb_offset[i]);
-               u64 end = offset + (1 << layout->sb_max_size_bits);
-
-               if (!(offset >= b_end || end <= b_offset))
-                       return true;
-       }
-
-       return false;
-}
+bool bch2_is_superblock_bucket(struct bch_dev *, u64);
 
 static inline const char *bch2_data_type_str(enum bch_data_type type)
 {
 
        }
 
        for (nr_got = 0; nr_got < nr_want; nr_got++) {
-               if (new_fs) {
-                       bu[nr_got] = bch2_bucket_alloc_new_fs(ca);
-                       if (bu[nr_got] < 0) {
-                               ret = -BCH_ERR_ENOSPC_bucket_alloc;
-                               break;
-                       }
-               } else {
-                       ob[nr_got] = bch2_bucket_alloc(c, ca, BCH_WATERMARK_normal,
-                                                      BCH_DATA_journal, cl);
-                       ret = PTR_ERR_OR_ZERO(ob[nr_got]);
-                       if (ret)
-                               break;
+               enum bch_watermark watermark = new_fs
+                       ? BCH_WATERMARK_btree
+                       : BCH_WATERMARK_normal;
 
+               ob[nr_got] = bch2_bucket_alloc(c, ca, watermark,
+                                              BCH_DATA_journal, cl);
+               ret = PTR_ERR_OR_ZERO(ob[nr_got]);
+               if (ret)
+                       break;
+
+               if (!new_fs) {
                        ret = bch2_trans_run(c,
                                bch2_trans_mark_metadata_bucket(trans, ca,
                                                ob[nr_got]->bucket, BCH_DATA_journal,
                                bch_err_msg(c, ret, "marking new journal buckets");
                                break;
                        }
-
-                       bu[nr_got] = ob[nr_got]->bucket;
                }
+
+               bu[nr_got] = ob[nr_got]->bucket;
        }
 
        if (!nr_got)
        if (ret)
                goto err_unblock;
 
-       if (!new_fs)
-               bch2_write_super(c);
+       bch2_write_super(c);
 
        /* Commit: */
        if (c)
                                                bu[i], BCH_DATA_free, 0,
                                                BTREE_TRIGGER_transactional));
 err_free:
-       if (!new_fs)
-               for (i = 0; i < nr_got; i++)
-                       bch2_open_bucket_put(c, ob[i]);
+       for (i = 0; i < nr_got; i++)
+               bch2_open_bucket_put(c, ob[i]);
 
        kfree(new_bucket_seq);
        kfree(new_buckets);
 
                                            struct journal_device *ja,
                                            enum journal_space_from from)
 {
+       if (!ja->nr)
+               return 0;
+
        unsigned available = (journal_space_from(ja, from) -
                              ja->cur_idx - 1 + ja->nr) % ja->nr;
 
 
        bch2_write_super(c);
        mutex_unlock(&c->sb_lock);
 
-       c->curr_recovery_pass = BCH_RECOVERY_PASS_NR;
        set_bit(BCH_FS_btree_running, &c->flags);
        set_bit(BCH_FS_may_go_rw, &c->flags);
 
        if (ret)
                goto err;
 
-       for_each_online_member(c, ca)
-               ca->new_fs_bucket_idx = 0;
-
        ret = bch2_fs_freespace_init(c);
        if (ret)
                goto err;
        bch2_write_super(c);
        mutex_unlock(&c->sb_lock);
 
+       c->curr_recovery_pass = BCH_RECOVERY_PASS_NR;
        return 0;
 err:
        bch_err_fn(c, ret);
 
        if (ret)
                goto err;
 
-       ret = bch2_dev_journal_alloc(ca, true);
-       bch_err_msg(c, ret, "allocating journal");
-       if (ret)
-               goto err;
-
        down_write(&c->state_lock);
        mutex_lock(&c->sb_lock);
 
        if (ret)
                goto err_late;
 
-       ca->new_fs_bucket_idx = 0;
-
        if (ca->mi.state == BCH_MEMBER_STATE_rw)
                __bch2_dev_read_write(c, ca);
 
+       ret = bch2_dev_journal_alloc(ca, false);
+       bch_err_msg(c, ret, "allocating journal");
+       if (ret)
+               goto err_late;
+
        up_write(&c->state_lock);
        return 0;