enum {
        /* startup: */
-       BCH_FS_ALLOC_CLEAN,
+       BCH_FS_CLEAN_SHUTDOWN,
        BCH_FS_INITIAL_GC_DONE,
        BCH_FS_INITIAL_GC_UNFIXED,
        BCH_FS_TOPOLOGY_REPAIR_DONE,
 
 
        down_write(&c->gc_lock);
 
-       /* flush interior btree updates: */
-       closure_wait_event(&c->btree_interior_update_wait,
-                          !bch2_btree_interior_updates_nr_pending(c));
+       bch2_btree_interior_updates_flush(c);
 
        ret   = bch2_gc_start(c, metadata_only) ?:
                bch2_gc_alloc_start(c, metadata_only) ?:
 
        }
 }
 
-static void __bch2_btree_flush_all(struct bch_fs *c, unsigned flag)
+static bool __bch2_btree_flush_all(struct bch_fs *c, unsigned flag)
 {
        struct bucket_table *tbl;
        struct rhash_head *pos;
        struct btree *b;
        unsigned i;
+       bool ret = false;
 restart:
        rcu_read_lock();
        for_each_cached_btree(b, c, tbl, i, pos)
                if (test_bit(flag, &b->flags)) {
                        rcu_read_unlock();
                        wait_on_bit_io(&b->flags, flag, TASK_UNINTERRUPTIBLE);
+                       ret = true;
                        goto restart;
                }
        rcu_read_unlock();
+
+       return ret;
 }
 
-void bch2_btree_flush_all_reads(struct bch_fs *c)
+bool bch2_btree_flush_all_reads(struct bch_fs *c)
 {
-       __bch2_btree_flush_all(c, BTREE_NODE_read_in_flight);
+       return __bch2_btree_flush_all(c, BTREE_NODE_read_in_flight);
 }
 
-void bch2_btree_flush_all_writes(struct bch_fs *c)
+bool bch2_btree_flush_all_writes(struct bch_fs *c)
 {
-       __bch2_btree_flush_all(c, BTREE_NODE_write_in_flight);
+       return __bch2_btree_flush_all(c, BTREE_NODE_write_in_flight);
 }
 
        bch2_btree_node_write(c, b, lock_held, BTREE_WRITE_ONLY_IF_NEED);
 }
 
-void bch2_btree_flush_all_reads(struct bch_fs *);
-void bch2_btree_flush_all_writes(struct bch_fs *);
+bool bch2_btree_flush_all_reads(struct bch_fs *);
+bool bch2_btree_flush_all_writes(struct bch_fs *);
 
 static inline void compat_bformat(unsigned level, enum btree_id btree_id,
                                  unsigned version, unsigned big_endian,
 
        mutex_unlock(&c->btree_interior_update_lock);
 }
 
-size_t bch2_btree_interior_updates_nr_pending(struct bch_fs *c)
+static bool bch2_btree_interior_updates_pending(struct bch_fs *c)
 {
-       size_t ret = 0;
-       struct list_head *i;
+       bool ret;
 
        mutex_lock(&c->btree_interior_update_lock);
-       list_for_each(i, &c->btree_interior_update_list)
-               ret++;
+       ret = !list_empty(&c->btree_interior_update_list);
        mutex_unlock(&c->btree_interior_update_lock);
 
        return ret;
 }
 
+bool bch2_btree_interior_updates_flush(struct bch_fs *c)
+{
+       bool ret = bch2_btree_interior_updates_pending(c);
+
+       if (ret)
+               closure_wait_event(&c->btree_interior_update_wait,
+                                  !bch2_btree_interior_updates_pending(c));
+       return ret;
+}
+
 void bch2_journal_entries_to_btree_roots(struct bch_fs *c, struct jset *jset)
 {
        struct btree_root *r;
 
 
 void bch2_btree_updates_to_text(struct printbuf *, struct bch_fs *);
 
-size_t bch2_btree_interior_updates_nr_pending(struct bch_fs *);
+bool bch2_btree_interior_updates_flush(struct bch_fs *);
 
 void bch2_journal_entries_to_btree_roots(struct bch_fs *, struct jset *);
 struct jset_entry *bch2_btree_roots_to_journal_entries(struct bch_fs *,
 
                        goto out_reset;
        }
 
+       EBUG_ON(test_bit(BCH_FS_CLEAN_SHUTDOWN, &c->flags));
+
        memset(&trans->journal_preres, 0, sizeof(trans->journal_preres));
 
        trans->journal_u64s             = trans->extra_journal_entries.nr;
 
                        goto err;
        }
 
-       /* flush relevant btree updates */
-       closure_wait_event(&c->btree_interior_update_wait,
-                          !bch2_btree_interior_updates_nr_pending(c));
-
+       bch2_btree_interior_updates_flush(c);
        ret = 0;
 err:
        bch2_trans_exit(&trans);
 
        if (ret)
                bch_err(c, "error %i in bch2_move_btree", ret);
 
-       /* flush relevant btree updates */
-       closure_wait_event(&c->btree_interior_update_wait,
-                          !bch2_btree_interior_updates_nr_pending(c));
+       bch2_btree_interior_updates_flush(c);
 
        progress_list_del(c, stats);
        return ret;
 
 {
        struct bch_dev *ca;
        unsigned i, clean_passes = 0;
+       u64 seq = 0;
 
        bch2_rebalance_stop(c);
        bch2_copygc_stop(c);
        bch2_gc_thread_stop(c);
 
-       /*
-        * Flush journal before stopping allocators, because flushing journal
-        * blacklist entries involves allocating new btree nodes:
-        */
-       bch2_journal_flush_all_pins(&c->journal);
-
        bch_verbose(c, "flushing journal and stopping allocators");
 
-       bch2_journal_flush_all_pins(&c->journal);
-
        do {
                clean_passes++;
 
-               if (bch2_journal_flush_all_pins(&c->journal))
-                       clean_passes = 0;
-
-               /*
-                * In flight interior btree updates will generate more journal
-                * updates and btree updates (alloc btree):
-                */
-               if (bch2_btree_interior_updates_nr_pending(c)) {
-                       closure_wait_event(&c->btree_interior_update_wait,
-                                          !bch2_btree_interior_updates_nr_pending(c));
+               if (bch2_btree_interior_updates_flush(c) ||
+                   bch2_journal_flush_all_pins(&c->journal) ||
+                   bch2_btree_flush_all_writes(c) ||
+                   seq != atomic64_read(&c->journal.seq)) {
+                       seq = atomic64_read(&c->journal.seq);
                        clean_passes = 0;
                }
-               flush_work(&c->btree_interior_update_work);
-
-               if (bch2_journal_flush_all_pins(&c->journal))
-                       clean_passes = 0;
        } while (clean_passes < 2);
-       bch_verbose(c, "flushing journal and stopping allocators complete");
 
-       set_bit(BCH_FS_ALLOC_CLEAN, &c->flags);
-
-       closure_wait_event(&c->btree_interior_update_wait,
-                          !bch2_btree_interior_updates_nr_pending(c));
-       flush_work(&c->btree_interior_update_work);
+       bch_verbose(c, "flushing journal and stopping allocators complete");
 
+       if (test_bit(JOURNAL_REPLAY_DONE, &c->journal.flags) &&
+           !test_bit(BCH_FS_EMERGENCY_RO, &c->flags))
+               set_bit(BCH_FS_CLEAN_SHUTDOWN, &c->flags);
        bch2_fs_journal_stop(&c->journal);
 
-       /*
-        * the journal kicks off btree writes via reclaim - wait for in flight
-        * writes after stopping journal:
-        */
-       bch2_btree_flush_all_writes(c);
-
        /*
         * After stopping journal:
         */
            !test_bit(BCH_FS_ERROR, &c->flags) &&
            !test_bit(BCH_FS_EMERGENCY_RO, &c->flags) &&
            test_bit(BCH_FS_STARTED, &c->flags) &&
-           test_bit(BCH_FS_ALLOC_CLEAN, &c->flags) &&
+           test_bit(BCH_FS_CLEAN_SHUTDOWN, &c->flags) &&
            !c->opts.norecovery) {
                bch_verbose(c, "marking filesystem clean");
                bch2_fs_mark_clean(c);
        if (ret)
                goto err;
 
-       clear_bit(BCH_FS_ALLOC_CLEAN, &c->flags);
+       clear_bit(BCH_FS_CLEAN_SHUTDOWN, &c->flags);
 
        for_each_rw_member(ca, c, i)
                bch2_dev_allocator_add(c, ca);