if (kstrtoull(buf, 10, &new_offset) < 0)
                return -EINVAL;
 
-       if (mddev->sync_thread)
+       if (mddev->sync_thread ||
+           test_bit(MD_RECOVERY_RUNNING,&mddev->recovery))
                return -EBUSY;
        if (new_offset == rdev->data_offset)
                /* reset is always permitted */
         */
 
        if (mddev->sync_thread ||
+           test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
            mddev->reshape_position != MaxSector ||
            mddev->sysfs_active)
                return -EBUSY;
                clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
 
        if (cmd_match(page, "idle") || cmd_match(page, "frozen")) {
+               flush_workqueue(md_misc_wq);
                if (mddev->sync_thread) {
                        set_bit(MD_RECOVERY_INTR, &mddev->recovery);
                        md_reap_sync_thread(mddev);
 static void __md_stop_writes(struct mddev *mddev)
 {
        set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
+       flush_workqueue(md_misc_wq);
        if (mddev->sync_thread) {
                set_bit(MD_RECOVERY_INTR, &mddev->recovery);
                md_reap_sync_thread(mddev);
                set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
                md_wakeup_thread(mddev->thread);
        }
-       if (mddev->sync_thread) {
+       if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
                set_bit(MD_RECOVERY_INTR, &mddev->recovery);
+       if (mddev->sync_thread)
                /* Thread might be blocked waiting for metadata update
                 * which will now never happen */
                wake_up_process(mddev->sync_thread->tsk);
-       }
+
        mddev_unlock(mddev);
-       wait_event(resync_wait, mddev->sync_thread == NULL);
+       wait_event(resync_wait, !test_bit(MD_RECOVERY_RUNNING,
+                                         &mddev->recovery));
        mddev_lock_nointr(mddev);
 
        mutex_lock(&mddev->open_mutex);
        if ((mddev->pers && atomic_read(&mddev->openers) > !!bdev) ||
            mddev->sync_thread ||
+           test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
            (bdev && !test_bit(MD_STILL_CLOSED, &mddev->flags))) {
                printk("md: %s still in use.\n",mdname(mddev));
                if (did_freeze) {
                set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
                md_wakeup_thread(mddev->thread);
        }
-       if (mddev->sync_thread) {
+       if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
                set_bit(MD_RECOVERY_INTR, &mddev->recovery);
+       if (mddev->sync_thread)
                /* Thread might be blocked waiting for metadata update
                 * which will now never happen */
                wake_up_process(mddev->sync_thread->tsk);
-       }
+
        mddev_unlock(mddev);
-       wait_event(resync_wait, mddev->sync_thread == NULL);
+       wait_event(resync_wait, (mddev->sync_thread == NULL &&
+                                !test_bit(MD_RECOVERY_RUNNING,
+                                          &mddev->recovery)));
        mddev_lock_nointr(mddev);
 
        mutex_lock(&mddev->open_mutex);
        if ((mddev->pers && atomic_read(&mddev->openers) > !!bdev) ||
            mddev->sysfs_active ||
            mddev->sync_thread ||
+           test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
            (bdev && !test_bit(MD_STILL_CLOSED, &mddev->flags))) {
                printk("md: %s still in use.\n",mdname(mddev));
                mutex_unlock(&mddev->open_mutex);
         * of each device.  If num_sectors is zero, we find the largest size
         * that fits.
         */
-       if (mddev->sync_thread)
+       if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
+           mddev->sync_thread)
                return -EBUSY;
        if (mddev->ro)
                return -EROFS;
        if (raid_disks <= 0 ||
            (mddev->max_disks && raid_disks >= mddev->max_disks))
                return -EINVAL;
-       if (mddev->sync_thread || mddev->reshape_position != MaxSector)
+       if (mddev->sync_thread ||
+           test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
+           mddev->reshape_position != MaxSector)
                return -EBUSY;
 
        rdev_for_each(rdev, mddev) {
                clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
                clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
                clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
+               wake_up(&resync_wait);
                if (test_and_clear_bit(MD_RECOVERY_RECOVER,
                                       &mddev->recovery))
                        if (mddev->sysfs_action)
        not_running:
                if (!mddev->sync_thread) {
                        clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
+                       wake_up(&resync_wait);
                        if (test_and_clear_bit(MD_RECOVERY_RECOVER,
                                               &mddev->recovery))
                                if (mddev->sysfs_action)
 
        /* resync has finished, collect result */
        md_unregister_thread(&mddev->sync_thread);
-       wake_up(&resync_wait);
        if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
            !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
                /* success...*/
        clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
        clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
        clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
+       wake_up(&resync_wait);
        /* flag recovery needed just to double check */
        set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
        sysfs_notify_dirent_safe(mddev->sysfs_action);