set_freezable();
 
+       /*
+        * Data move operations can't run until after check_snapshots has
+        * completed, and bch2_snapshot_is_ancestor() is available.
+        */
+       kthread_wait_freezable(c->recovery_pass_done > BCH_RECOVERY_PASS_check_snapshots ||
+                              kthread_should_stop());
+
        bch2_move_stats_init(&move_stats, "copygc");
        bch2_moving_ctxt_init(&ctxt, c, NULL, &move_stats,
                              writepoint_ptr(&c->copygc_write_point),
 
 
        set_freezable();
 
+       /*
+        * Data move operations can't run until after check_snapshots has
+        * completed, and bch2_snapshot_is_ancestor() is available.
+        */
+       kthread_wait_freezable(c->recovery_pass_done > BCH_RECOVERY_PASS_check_snapshots ||
+                              kthread_should_stop());
+
        bch2_moving_ctxt_init(&ctxt, c, NULL, &r->work_stats,
                              writepoint_ptr(&c->rebalance_write_point),
                              true);
 
 #include "journal_seq_blacklist.h"
 #include "logged_ops.h"
 #include "move.h"
+#include "movinggc.h"
 #include "namei.h"
 #include "quota.h"
 #include "rebalance.h"
 
        c->recovery_pass_done = BCH_RECOVERY_PASS_NR - 1;
 
+       bch2_copygc_wakeup(c);
+       bch2_rebalance_wakeup(c);
+
        if (enabled_qtypes(c)) {
                ret = bch2_fs_quota_read(c);
                if (ret)
 
        spin_lock_irq(&c->recovery_pass_lock);
 
        while (c->curr_recovery_pass < ARRAY_SIZE(recovery_pass_fns) && !ret) {
+               unsigned prev_done = c->recovery_pass_done;
                unsigned pass = c->curr_recovery_pass;
 
                c->next_recovery_pass = pass + 1;
                }
 
                c->curr_recovery_pass = c->next_recovery_pass;
+
+               if (prev_done <= BCH_RECOVERY_PASS_check_snapshots &&
+                   c->recovery_pass_done > BCH_RECOVERY_PASS_check_snapshots) {
+                       bch2_copygc_wakeup(c);
+                       bch2_rebalance_wakeup(c);
+               }
        }
 
        spin_unlock_irq(&c->recovery_pass_lock);
 
        return ret;
 }
 
-static int bch2_fs_read_write_late(struct bch_fs *c)
-{
-       int ret;
-
-       /*
-        * Data move operations can't run until after check_snapshots has
-        * completed, and bch2_snapshot_is_ancestor() is available.
-        *
-        * Ideally we'd start copygc/rebalance earlier instead of waiting for
-        * all of recovery/fsck to complete:
-        */
-       ret = bch2_copygc_start(c);
-       if (ret) {
-               bch_err(c, "error starting copygc thread");
-               return ret;
-       }
-
-       ret = bch2_rebalance_start(c);
-       if (ret) {
-               bch_err(c, "error starting rebalance thread");
-               return ret;
-       }
-
-       return 0;
-}
-
 static int __bch2_fs_read_write(struct bch_fs *c, bool early)
 {
        int ret;
                atomic_long_inc(&c->writes[i]);
        }
 #endif
-       if (!early) {
-               ret = bch2_fs_read_write_late(c);
-               if (ret)
-                       goto err;
+
+       ret = bch2_copygc_start(c);
+       if (ret) {
+               bch_err_msg(c, ret, "error starting copygc thread");
+               goto err;
+       }
+
+       ret = bch2_rebalance_start(c);
+       if (ret) {
+               bch_err_msg(c, ret, "error starting rebalance thread");
+               goto err;
        }
 
        bch2_do_discards(c);
        wake_up(&c->ro_ref_wait);
 
        down_write(&c->state_lock);
-       if (c->opts.read_only) {
+       if (c->opts.read_only)
                bch2_fs_read_only(c);
-       } else {
-               ret = !test_bit(BCH_FS_rw, &c->flags)
-                       ? bch2_fs_read_write(c)
-                       : bch2_fs_read_write_late(c);
-       }
+       else if (!test_bit(BCH_FS_rw, &c->flags))
+               ret = bch2_fs_read_write(c);
        up_write(&c->state_lock);
 
 err: