timer_setup(&mddev->safemode_timer, md_safemode_timeout, 0);
        atomic_set(&mddev->active, 1);
        atomic_set(&mddev->openers, 0);
+       atomic_set(&mddev->sync_seq, 0);
        spin_lock_init(&mddev->lock);
        atomic_set(&mddev->flush_pending, 0);
        init_waitqueue_head(&mddev->sb_wait);
        if (work_pending(&mddev->del_work))
                flush_workqueue(md_misc_wq);
 
-       if (mddev->sync_thread) {
-               set_bit(MD_RECOVERY_INTR, &mddev->recovery);
-               md_reap_sync_thread(mddev);
-       }
+       set_bit(MD_RECOVERY_INTR, &mddev->recovery);
+       /*
+        * Thread might be blocked waiting for metadata update which will now
+        * never happen
+        */
+       md_wakeup_thread_directly(mddev->sync_thread);
 
        mddev_unlock(mddev);
 }
 
 static void idle_sync_thread(struct mddev *mddev)
 {
+       int sync_seq = atomic_read(&mddev->sync_seq);
+
        mutex_lock(&mddev->sync_mutex);
        clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
        stop_sync_thread(mddev);
+
+       wait_event(resync_wait, sync_seq != atomic_read(&mddev->sync_seq) ||
+                       !test_bit(MD_RECOVERY_RUNNING, &mddev->recovery));
+
        mutex_unlock(&mddev->sync_mutex);
 }
 
        mutex_lock(&mddev->sync_mutex);
        set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
        stop_sync_thread(mddev);
+
+       wait_event(resync_wait, mddev->sync_thread == NULL &&
+                       !test_bit(MD_RECOVERY_RUNNING, &mddev->recovery));
+
        mutex_unlock(&mddev->sync_mutex);
 }
 
 
        /* resync has finished, collect result */
        md_unregister_thread(&mddev->sync_thread);
+       atomic_inc(&mddev->sync_seq);
+
        if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
            !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
            mddev->degraded != mddev->raid_disks) {