md_error(mddev, rdev);
                if (!test_bit(Faulty, &rdev->flags)
                    && (bio->bi_opf & MD_FAILFAST)) {
-                       set_bit(MD_NEED_REWRITE, &mddev->flags);
+                       set_bit(MD_SB_NEED_REWRITE, &mddev->sb_flags);
                        set_bit(LastDev, &rdev->flags);
                }
        } else
 {
        /* wait for all superblock writes that were scheduled to complete */
        wait_event(mddev->sb_wait, atomic_read(&mddev->pending_writes)==0);
-       if (test_and_clear_bit(MD_NEED_REWRITE, &mddev->flags))
+       if (test_and_clear_bit(MD_SB_NEED_REWRITE, &mddev->sb_flags))
                return -EAGAIN;
        return 0;
 }
 
        if (mddev->ro) {
                if (force_change)
-                       set_bit(MD_CHANGE_DEVS, &mddev->flags);
+                       set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
                return;
        }
 
 repeat:
        if (mddev_is_clustered(mddev)) {
-               if (test_and_clear_bit(MD_CHANGE_DEVS, &mddev->flags))
+               if (test_and_clear_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags))
                        force_change = 1;
-               if (test_and_clear_bit(MD_CHANGE_CLEAN, &mddev->flags))
+               if (test_and_clear_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags))
                        nospares = 1;
                ret = md_cluster_ops->metadata_update_start(mddev);
                /* Has someone else has updated the sb */
                if (!does_sb_need_changing(mddev)) {
                        if (ret == 0)
                                md_cluster_ops->metadata_update_cancel(mddev);
-                       bit_clear_unless(&mddev->flags, BIT(MD_CHANGE_PENDING),
-                                                        BIT(MD_CHANGE_DEVS) |
-                                                        BIT(MD_CHANGE_CLEAN));
+                       bit_clear_unless(&mddev->sb_flags, BIT(MD_SB_CHANGE_PENDING),
+                                                        BIT(MD_SB_CHANGE_DEVS) |
+                                                        BIT(MD_SB_CHANGE_CLEAN));
                        return;
                }
        }
 
        }
        if (!mddev->persistent) {
-               clear_bit(MD_CHANGE_CLEAN, &mddev->flags);
-               clear_bit(MD_CHANGE_DEVS, &mddev->flags);
+               clear_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
+               clear_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
                if (!mddev->external) {
-                       clear_bit(MD_CHANGE_PENDING, &mddev->flags);
+                       clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
                        rdev_for_each(rdev, mddev) {
                                if (rdev->badblocks.changed) {
                                        rdev->badblocks.changed = 0;
 
        mddev->utime = ktime_get_real_seconds();
 
-       if (test_and_clear_bit(MD_CHANGE_DEVS, &mddev->flags))
+       if (test_and_clear_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags))
                force_change = 1;
-       if (test_and_clear_bit(MD_CHANGE_CLEAN, &mddev->flags))
+       if (test_and_clear_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags))
                /* just a clean<-> dirty transition, possibly leave spares alone,
                 * though if events isn't the right even/odd, we will have to do
                 * spares after all
        }
        if (md_super_wait(mddev) < 0)
                goto rewrite;
-       /* if there was a failure, MD_CHANGE_DEVS was set, and we re-write super */
+       /* if there was a failure, MD_SB_CHANGE_DEVS was set, and we re-write super */
 
        if (mddev_is_clustered(mddev) && ret == 0)
                md_cluster_ops->metadata_update_finish(mddev);
 
        if (mddev->in_sync != sync_req ||
-           !bit_clear_unless(&mddev->flags, BIT(MD_CHANGE_PENDING),
-                              BIT(MD_CHANGE_DEVS) | BIT(MD_CHANGE_CLEAN)))
+           !bit_clear_unless(&mddev->sb_flags, BIT(MD_SB_CHANGE_PENDING),
+                              BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_CLEAN)))
                /* have to write it out again */
                goto repeat;
        wake_up(&mddev->sb_wait);
        }
        sysfs_notify_dirent_safe(rdev->sysfs_state);
 
-       set_bit(MD_CHANGE_DEVS, &mddev->flags);
+       set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
        if (mddev->degraded)
                set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
        set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
                        if (err == 0) {
                                md_kick_rdev_from_array(rdev);
                                if (mddev->pers) {
-                                       set_bit(MD_CHANGE_DEVS, &mddev->flags);
+                                       set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
                                        md_wakeup_thread(mddev->thread);
                                }
                                md_new_event(mddev);
        }
        blk_set_stacking_limits(&mddev->queue->limits);
        pers->run(mddev);
-       set_bit(MD_CHANGE_DEVS, &mddev->flags);
+       set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
        mddev_resume(mddev);
        if (!mddev->thread)
                md_update_sb(mddev, 1);
        if (!err) {
                mddev->recovery_cp = n;
                if (mddev->pers)
-                       set_bit(MD_CHANGE_CLEAN, &mddev->flags);
+                       set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
        }
        mddev_unlock(mddev);
        return err ?: len;
                        st = read_auto;
                        break;
                case 0:
-                       if (test_bit(MD_CHANGE_PENDING, &mddev->flags))
+                       if (test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags))
                                st = write_pending;
                        else if (mddev->in_sync)
                                st = clean;
                spin_lock(&mddev->lock);
                if (st == active) {
                        restart_array(mddev);
-                       clear_bit(MD_CHANGE_PENDING, &mddev->flags);
+                       clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
                        md_wakeup_thread(mddev->thread);
                        wake_up(&mddev->sb_wait);
                        err = 0;
                                        mddev->in_sync = 1;
                                        if (mddev->safemode == 1)
                                                mddev->safemode = 0;
-                                       set_bit(MD_CHANGE_CLEAN, &mddev->flags);
+                                       set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
                                }
                                err = 0;
                        } else
                                        mddev->in_sync = 1;
                                        if (mddev->safemode == 1)
                                                mddev->safemode = 0;
-                                       set_bit(MD_CHANGE_CLEAN, &mddev->flags);
+                                       set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
                                }
                                err = 0;
                        } else
                        err = restart_array(mddev);
                        if (err)
                                break;
-                       clear_bit(MD_CHANGE_PENDING, &mddev->flags);
+                       clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
                        wake_up(&mddev->sb_wait);
                        err = 0;
                } else {
                set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
        set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
 
-       if (mddev->flags & MD_UPDATE_SB_FLAGS)
+       if (mddev->sb_flags)
                md_update_sb(mddev, 0);
 
        md_new_event(mddev);
        mddev->level = LEVEL_NONE;
        mddev->clevel[0] = 0;
        mddev->flags = 0;
+       mddev->sb_flags = 0;
        mddev->ro = 0;
        mddev->metadata_type[0] = 0;
        mddev->chunk_sectors = 0;
 
        if (mddev->ro == 0 &&
            ((!mddev->in_sync && !mddev_is_clustered(mddev)) ||
-            (mddev->flags & MD_UPDATE_SB_FLAGS))) {
+            mddev->sb_flags)) {
                /* mark array as shutdown cleanly */
                if (!mddev_is_clustered(mddev))
                        mddev->in_sync = 1;
                 * which will now never happen */
                wake_up_process(mddev->sync_thread->tsk);
 
-       if (mddev->external && test_bit(MD_CHANGE_PENDING, &mddev->flags))
+       if (mddev->external && test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags))
                return -EBUSY;
        mddev_unlock(mddev);
        wait_event(resync_wait, !test_bit(MD_RECOVERY_RUNNING,
                                          &mddev->recovery));
        wait_event(mddev->sb_wait,
-                  !test_bit(MD_CHANGE_PENDING, &mddev->flags));
+                  !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags));
        mddev_lock_nointr(mddev);
 
        mutex_lock(&mddev->open_mutex);
                md_cluster_ops->remove_disk(mddev, rdev);
 
        md_kick_rdev_from_array(rdev);
-       set_bit(MD_CHANGE_DEVS, &mddev->flags);
+       set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
        if (mddev->thread)
                md_wakeup_thread(mddev->thread);
        else
 
        rdev->raid_disk = -1;
 
-       set_bit(MD_CHANGE_DEVS, &mddev->flags);
+       set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
        if (!mddev->thread)
                md_update_sb(mddev, 1);
        /*
 
        mddev->max_disks     = MD_SB_DISKS;
 
-       if (mddev->persistent)
+       if (mddev->persistent) {
                mddev->flags         = 0;
-       set_bit(MD_CHANGE_DEVS, &mddev->flags);
+               mddev->sb_flags         = 0;
+       }
+       set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
 
        mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9;
        mddev->bitmap_info.default_space = 64*2 - (MD_SB_BYTES >> 9);
                        /* If a device failed while we were read-only, we
                         * need to make sure the metadata is updated now.
                         */
-                       if (test_bit(MD_CHANGE_DEVS, &mddev->flags)) {
+                       if (test_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags)) {
                                mddev_unlock(mddev);
                                wait_event(mddev->sb_wait,
-                                          !test_bit(MD_CHANGE_DEVS, &mddev->flags) &&
-                                          !test_bit(MD_CHANGE_PENDING, &mddev->flags));
+                                          !test_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags) &&
+                                          !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags));
                                mddev_lock_nointr(mddev);
                        }
                } else {
                spin_lock(&mddev->lock);
                if (mddev->in_sync) {
                        mddev->in_sync = 0;
-                       set_bit(MD_CHANGE_CLEAN, &mddev->flags);
-                       set_bit(MD_CHANGE_PENDING, &mddev->flags);
+                       set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
+                       set_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
                        md_wakeup_thread(mddev->thread);
                        did_change = 1;
                }
        if (did_change)
                sysfs_notify_dirent_safe(mddev->sysfs_state);
        wait_event(mddev->sb_wait,
-                  !test_bit(MD_CHANGE_PENDING, &mddev->flags));
+                  !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags));
 }
 EXPORT_SYMBOL(md_write_start);
 
  * attempting a GFP_KERNEL allocation while holding the mddev lock.
  * Must be called with mddev_lock held.
  *
- * In the ->external case MD_CHANGE_PENDING can not be cleared until mddev->lock
+ * In the ->external case MD_SB_CHANGE_PENDING can not be cleared until mddev->lock
  * is dropped, so return -EAGAIN after notifying userspace.
  */
 int md_allow_write(struct mddev *mddev)
        spin_lock(&mddev->lock);
        if (mddev->in_sync) {
                mddev->in_sync = 0;
-               set_bit(MD_CHANGE_CLEAN, &mddev->flags);
-               set_bit(MD_CHANGE_PENDING, &mddev->flags);
+               set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
+               set_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
                if (mddev->safemode_delay &&
                    mddev->safemode == 0)
                        mddev->safemode = 1;
        } else
                spin_unlock(&mddev->lock);
 
-       if (test_bit(MD_CHANGE_PENDING, &mddev->flags))
+       if (test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags))
                return -EAGAIN;
        else
                return 0;
                            j > mddev->recovery_cp)
                                mddev->recovery_cp = j;
                        update_time = jiffies;
-                       set_bit(MD_CHANGE_CLEAN, &mddev->flags);
+                       set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
                        sysfs_notify(&mddev->kobj, NULL, "sync_completed");
                }
 
        /* set CHANGE_PENDING here since maybe another update is needed,
         * so other nodes are informed. It should be harmless for normal
         * raid */
-       set_mask_bits(&mddev->flags, 0,
-                     BIT(MD_CHANGE_PENDING) | BIT(MD_CHANGE_DEVS));
+       set_mask_bits(&mddev->sb_flags, 0,
+                     BIT(MD_SB_CHANGE_PENDING) | BIT(MD_SB_CHANGE_DEVS));
 
        spin_lock(&mddev->lock);
        if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
                        if (!test_bit(Journal, &rdev->flags))
                                spares++;
                        md_new_event(mddev);
-                       set_bit(MD_CHANGE_DEVS, &mddev->flags);
+                       set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
                }
        }
 no_add:
        if (removed)
-               set_bit(MD_CHANGE_DEVS, &mddev->flags);
+               set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
        return spares;
 }
 
        if (mddev->ro && !test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
                return;
        if ( ! (
-               (mddev->flags & MD_UPDATE_SB_FLAGS & ~ (1<<MD_CHANGE_PENDING)) ||
+               (mddev->sb_flags & ~ (1<<MD_SB_CHANGE_PENDING)) ||
                test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) ||
                test_bit(MD_RECOVERY_DONE, &mddev->recovery) ||
                test_bit(MD_RELOAD_SB, &mddev->flags) ||
                        md_reap_sync_thread(mddev);
                        clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
                        clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
-                       clear_bit(MD_CHANGE_PENDING, &mddev->flags);
+                       clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
                        goto unlock;
                }
 
                            mddev->recovery_cp == MaxSector) {
                                mddev->in_sync = 1;
                                did_change = 1;
-                               set_bit(MD_CHANGE_CLEAN, &mddev->flags);
+                               set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
                        }
                        if (mddev->safemode == 1)
                                mddev->safemode = 0;
                                sysfs_notify_dirent_safe(mddev->sysfs_state);
                }
 
-               if (mddev->flags & MD_UPDATE_SB_FLAGS)
+               if (mddev->sb_flags)
                        md_update_sb(mddev, 0);
 
                if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
                if (mddev->pers->spare_active(mddev)) {
                        sysfs_notify(&mddev->kobj, NULL,
                                     "degraded");
-                       set_bit(MD_CHANGE_DEVS, &mddev->flags);
+                       set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
                }
        }
        if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
                        rdev->saved_raid_disk = -1;
 
        md_update_sb(mddev, 1);
-       /* MD_CHANGE_PENDING should be cleared by md_update_sb, so we can
+       /* MD_SB_CHANGE_PENDING should be cleared by md_update_sb, so we can
         * call resync_finish here if MD_CLUSTER_RESYNC_LOCKED is set by
         * clustered raid */
        if (test_and_clear_bit(MD_CLUSTER_RESYNC_LOCKED, &mddev->flags))
                        sysfs_notify(&rdev->kobj, NULL,
                                     "unacknowledged_bad_blocks");
                sysfs_notify_dirent_safe(rdev->sysfs_state);
-               set_mask_bits(&mddev->flags, 0,
-                             BIT(MD_CHANGE_CLEAN) | BIT(MD_CHANGE_PENDING));
+               set_mask_bits(&mddev->sb_flags, 0,
+                             BIT(MD_SB_CHANGE_CLEAN) | BIT(MD_SB_CHANGE_PENDING));
                md_wakeup_thread(rdev->mddev->thread);
                return 1;
        } else
 
                bio->bi_iter.bi_sector < conf->reshape_progress))) {
                /* Need to update reshape_position in metadata */
                mddev->reshape_position = conf->reshape_progress;
-               set_mask_bits(&mddev->flags, 0,
-                             BIT(MD_CHANGE_DEVS) | BIT(MD_CHANGE_PENDING));
+               set_mask_bits(&mddev->sb_flags, 0,
+                             BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING));
                md_wakeup_thread(mddev->thread);
                raid10_log(conf->mddev, "wait reshape metadata");
                wait_event(mddev->sb_wait,
-                          !test_bit(MD_CHANGE_PENDING, &mddev->flags));
+                          !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags));
 
                conf->reshape_safe = mddev->reshape_position;
        }
        set_bit(MD_RECOVERY_INTR, &mddev->recovery);
        set_bit(Blocked, &rdev->flags);
        set_bit(Faulty, &rdev->flags);
-       set_mask_bits(&mddev->flags, 0,
-                     BIT(MD_CHANGE_DEVS) | BIT(MD_CHANGE_PENDING));
+       set_mask_bits(&mddev->sb_flags, 0,
+                     BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING));
        spin_unlock_irqrestore(&conf->device_lock, flags);
        pr_crit("md/raid10:%s: Disk failure on %s, disabling device.\n"
                "md/raid10:%s: Operation continuing on %d devices.\n",
        md_check_recovery(mddev);
 
        if (!list_empty_careful(&conf->bio_end_io_list) &&
-           !test_bit(MD_CHANGE_PENDING, &mddev->flags)) {
+           !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) {
                LIST_HEAD(tmp);
                spin_lock_irqsave(&conf->device_lock, flags);
-               if (!test_bit(MD_CHANGE_PENDING, &mddev->flags)) {
+               if (!test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) {
                        while (!list_empty(&conf->bio_end_io_list)) {
                                list_move(conf->bio_end_io_list.prev, &tmp);
                                conf->nr_queued--;
                }
 
                cond_resched();
-               if (mddev->flags & ~(1<<MD_CHANGE_PENDING))
+               if (mddev->sb_flags & ~(1<<MD_SB_CHANGE_PENDING))
                        md_check_recovery(mddev);
        }
        blk_finish_plug(&plug);
        spin_unlock_irq(&conf->device_lock);
        mddev->raid_disks = conf->geo.raid_disks;
        mddev->reshape_position = conf->reshape_progress;
-       set_bit(MD_CHANGE_DEVS, &mddev->flags);
+       set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
 
        clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
        clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
                else
                        mddev->curr_resync_completed = conf->reshape_progress;
                conf->reshape_checkpoint = jiffies;
-               set_bit(MD_CHANGE_DEVS, &mddev->flags);
+               set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
                md_wakeup_thread(mddev->thread);
-               wait_event(mddev->sb_wait, mddev->flags == 0 ||
+               wait_event(mddev->sb_wait, mddev->sb_flags == 0 ||
                           test_bit(MD_RECOVERY_INTR, &mddev->recovery));
                if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
                        allow_barrier(conf);
 
                        if (bad < 0) {
                                set_bit(BlockedBadBlocks, &rdev->flags);
                                if (!conf->mddev->external &&
-                                   conf->mddev->flags) {
+                                   conf->mddev->sb_flags) {
                                        /* It is very unlikely, but we might
                                         * still need to write out the
                                         * bad block log - better give it
 
        set_bit(Blocked, &rdev->flags);
        set_bit(Faulty, &rdev->flags);
-       set_mask_bits(&mddev->flags, 0,
-                     BIT(MD_CHANGE_DEVS) | BIT(MD_CHANGE_PENDING));
+       set_mask_bits(&mddev->sb_flags, 0,
+                     BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING));
        pr_crit("md/raid:%s: Disk failure on %s, disabling device.\n"
                "md/raid:%s: Operation continuing on %d devices.\n",
                mdname(mddev),
        }
 
        if (!bio_list_empty(&s.return_bi)) {
-               if (test_bit(MD_CHANGE_PENDING, &conf->mddev->flags)) {
+               if (test_bit(MD_SB_CHANGE_PENDING, &conf->mddev->sb_flags)) {
                        spin_lock_irq(&conf->device_lock);
                        bio_list_merge(&conf->return_bi, &s.return_bi);
                        spin_unlock_irq(&conf->device_lock);
                mddev->reshape_position = conf->reshape_progress;
                mddev->curr_resync_completed = sector_nr;
                conf->reshape_checkpoint = jiffies;
-               set_bit(MD_CHANGE_DEVS, &mddev->flags);
+               set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
                md_wakeup_thread(mddev->thread);
-               wait_event(mddev->sb_wait, mddev->flags == 0 ||
+               wait_event(mddev->sb_wait, mddev->sb_flags == 0 ||
                           test_bit(MD_RECOVERY_INTR, &mddev->recovery));
                if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
                        return 0;
                mddev->reshape_position = conf->reshape_progress;
                mddev->curr_resync_completed = sector_nr;
                conf->reshape_checkpoint = jiffies;
-               set_bit(MD_CHANGE_DEVS, &mddev->flags);
+               set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
                md_wakeup_thread(mddev->thread);
                wait_event(mddev->sb_wait,
-                          !test_bit(MD_CHANGE_DEVS, &mddev->flags)
+                          !test_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags)
                           || test_bit(MD_RECOVERY_INTR, &mddev->recovery));
                if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
                        goto ret;
        md_check_recovery(mddev);
 
        if (!bio_list_empty(&conf->return_bi) &&
-           !test_bit(MD_CHANGE_PENDING, &mddev->flags)) {
+           !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) {
                struct bio_list tmp = BIO_EMPTY_LIST;
                spin_lock_irq(&conf->device_lock);
-               if (!test_bit(MD_CHANGE_PENDING, &mddev->flags)) {
+               if (!test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) {
                        bio_list_merge(&tmp, &conf->return_bi);
                        bio_list_init(&conf->return_bi);
                }
                        break;
                handled += batch_size;
 
-               if (mddev->flags & ~(1<<MD_CHANGE_PENDING)) {
+               if (mddev->sb_flags & ~(1 << MD_SB_CHANGE_PENDING)) {
                        spin_unlock_irq(&conf->device_lock);
                        md_check_recovery(mddev);
                        spin_lock_irq(&conf->device_lock);
        }
        mddev->raid_disks = conf->raid_disks;
        mddev->reshape_position = conf->reshape_progress;
-       set_bit(MD_CHANGE_DEVS, &mddev->flags);
+       set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
 
        clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
        clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
                        conf->chunk_sectors = new_chunk ;
                        mddev->chunk_sectors = new_chunk;
                }
-               set_bit(MD_CHANGE_DEVS, &mddev->flags);
+               set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
                md_wakeup_thread(mddev->thread);
        }
        return check_reshape(mddev);