INIT_WORK(&mddev->flush_work, md_submit_flush_data);
        atomic_set(&mddev->flush_pending, 1);
        rcu_read_lock();
-       list_for_each_entry_rcu(rdev, &mddev->disks, same_set)
+       rdev_for_each_rcu(rdev, mddev)
                if (rdev->raid_disk >= 0 &&
                    !test_bit(Faulty, &rdev->flags)) {
                        /* Take two references, one is dropped
 {
        struct md_rdev *rdev;
 
-       list_for_each_entry(rdev, &mddev->disks, same_set)
+       rdev_for_each(rdev, mddev)
                if (rdev->desc_nr == nr)
                        return rdev;
 
 {
        struct md_rdev *rdev;
 
-       list_for_each_entry(rdev, &mddev->disks, same_set)
+       rdev_for_each(rdev, mddev)
                if (rdev->bdev->bd_dev == dev)
                        return rdev;
 
                sb->state |= (1<<MD_SB_BITMAP_PRESENT);
 
        sb->disks[0].state = (1<<MD_DISK_REMOVED);
-       list_for_each_entry(rdev2, &mddev->disks, same_set) {
+       rdev_for_each(rdev2, mddev) {
                mdp_disk_t *d;
                int desc_nr;
                int is_active = test_bit(In_sync, &rdev2->flags);
        }
 
        max_dev = 0;
-       list_for_each_entry(rdev2, &mddev->disks, same_set)
+       rdev_for_each(rdev2, mddev)
                if (rdev2->desc_nr+1 > max_dev)
                        max_dev = rdev2->desc_nr+1;
 
        for (i=0; i<max_dev;i++)
                sb->dev_roles[i] = cpu_to_le16(0xfffe);
        
-       list_for_each_entry(rdev2, &mddev->disks, same_set) {
+       rdev_for_each(rdev2, mddev) {
                i = rdev2->desc_nr;
                if (test_bit(Faulty, &rdev2->flags))
                        sb->dev_roles[i] = cpu_to_le16(0xfffe);
                return 0; /* nothing to do */
        if (!mddev->gendisk || blk_get_integrity(mddev->gendisk))
                return 0; /* shouldn't register, or already is */
-       list_for_each_entry(rdev, &mddev->disks, same_set) {
+       rdev_for_each(rdev, mddev) {
                /* skip spares and non-functional disks */
                if (test_bit(Faulty, &rdev->flags))
                        continue;
 {
        struct md_rdev *rdev, *tmp;
 
-       rdev_for_each(rdev, tmp, mddev) {
+       rdev_for_each_safe(rdev, tmp, mddev) {
                if (!rdev->mddev) {
                        MD_BUG();
                        continue;
                        bitmap_print_sb(mddev->bitmap);
                else
                        printk("%s: ", mdname(mddev));
-               list_for_each_entry(rdev, &mddev->disks, same_set)
+               rdev_for_each(rdev, mddev)
                        printk("<%s>", bdevname(rdev->bdev,b));
                printk("\n");
 
-               list_for_each_entry(rdev, &mddev->disks, same_set)
+               rdev_for_each(rdev, mddev)
                        print_rdev(rdev, mddev->major_version);
        }
        printk("md:     **********************************\n");
         * with the rest of the array)
         */
        struct md_rdev *rdev;
-       list_for_each_entry(rdev, &mddev->disks, same_set) {
+       rdev_for_each(rdev, mddev) {
                if (rdev->sb_events == mddev->events ||
                    (nospares &&
                     rdev->raid_disk < 0 &&
 
 repeat:
        /* First make sure individual recovery_offsets are correct */
-       list_for_each_entry(rdev, &mddev->disks, same_set) {
+       rdev_for_each(rdev, mddev) {
                if (rdev->raid_disk >= 0 &&
                    mddev->delta_disks >= 0 &&
                    !test_bit(In_sync, &rdev->flags) &&
                clear_bit(MD_CHANGE_DEVS, &mddev->flags);
                if (!mddev->external) {
                        clear_bit(MD_CHANGE_PENDING, &mddev->flags);
-                       list_for_each_entry(rdev, &mddev->disks, same_set) {
+                       rdev_for_each(rdev, mddev) {
                                if (rdev->badblocks.changed) {
                                        md_ack_all_badblocks(&rdev->badblocks);
                                        md_error(mddev, rdev);
                mddev->events --;
        }
 
-       list_for_each_entry(rdev, &mddev->disks, same_set) {
+       rdev_for_each(rdev, mddev) {
                if (rdev->badblocks.changed)
                        any_badblocks_changed++;
                if (test_bit(Faulty, &rdev->flags))
                 mdname(mddev), mddev->in_sync);
 
        bitmap_update_sb(mddev->bitmap);
-       list_for_each_entry(rdev, &mddev->disks, same_set) {
+       rdev_for_each(rdev, mddev) {
                char b[BDEVNAME_SIZE];
 
                if (rdev->sb_loaded != 1)
        if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
                sysfs_notify(&mddev->kobj, NULL, "sync_completed");
 
-       list_for_each_entry(rdev, &mddev->disks, same_set) {
+       rdev_for_each(rdev, mddev) {
                if (test_and_clear_bit(FaultRecorded, &rdev->flags))
                        clear_bit(Blocked, &rdev->flags);
 
                        struct md_rdev *rdev2;
 
                        mddev_lock(mddev);
-                       list_for_each_entry(rdev2, &mddev->disks, same_set)
+                       rdev_for_each(rdev2, mddev)
                                if (rdev->bdev == rdev2->bdev &&
                                    rdev != rdev2 &&
                                    overlaps(rdev->data_offset, rdev->sectors,
        char b[BDEVNAME_SIZE];
 
        freshest = NULL;
-       rdev_for_each(rdev, tmp, mddev)
+       rdev_for_each_safe(rdev, tmp, mddev)
                switch (super_types[mddev->major_version].
                        load_super(rdev, freshest, mddev->minor_version)) {
                case 1:
                validate_super(mddev, freshest);
 
        i = 0;
-       rdev_for_each(rdev, tmp, mddev) {
+       rdev_for_each_safe(rdev, tmp, mddev) {
                if (mddev->max_disks &&
                    (rdev->desc_nr >= mddev->max_disks ||
                     i > mddev->max_disks)) {
                return -EINVAL;
        }
 
-       list_for_each_entry(rdev, &mddev->disks, same_set)
+       rdev_for_each(rdev, mddev)
                rdev->new_raid_disk = rdev->raid_disk;
 
        /* ->takeover must set new_* and/or delta_disks
                mddev->safemode = 0;
        }
 
-       list_for_each_entry(rdev, &mddev->disks, same_set) {
+       rdev_for_each(rdev, mddev) {
                if (rdev->raid_disk < 0)
                        continue;
                if (rdev->new_raid_disk >= mddev->raid_disks)
                        continue;
                sysfs_unlink_rdev(mddev, rdev);
        }
-       list_for_each_entry(rdev, &mddev->disks, same_set) {
+       rdev_for_each(rdev, mddev) {
                if (rdev->raid_disk < 0)
                        continue;
                if (rdev->new_raid_disk == rdev->raid_disk)
         * the only valid external interface is through the md
         * device.
         */
-       list_for_each_entry(rdev, &mddev->disks, same_set) {
+       rdev_for_each(rdev, mddev) {
                if (test_bit(Faulty, &rdev->flags))
                        continue;
                sync_blockdev(rdev->bdev);
                struct md_rdev *rdev2;
                int warned = 0;
 
-               list_for_each_entry(rdev, &mddev->disks, same_set)
-                       list_for_each_entry(rdev2, &mddev->disks, same_set) {
+               rdev_for_each(rdev, mddev)
+                       rdev_for_each(rdev2, mddev) {
                                if (rdev < rdev2 &&
                                    rdev->bdev->bd_contains ==
                                    rdev2->bdev->bd_contains) {
        mddev->in_sync = 1;
        smp_wmb();
        mddev->ready = 1;
-       list_for_each_entry(rdev, &mddev->disks, same_set)
+       rdev_for_each(rdev, mddev)
                if (rdev->raid_disk >= 0)
                        if (sysfs_link_rdev(mddev, rdev))
                                /* failure here is OK */;
                /* tell userspace to handle 'inactive' */
                sysfs_notify_dirent_safe(mddev->sysfs_state);
 
-               list_for_each_entry(rdev, &mddev->disks, same_set)
+               rdev_for_each(rdev, mddev)
                        if (rdev->raid_disk >= 0)
                                sysfs_unlink_rdev(mddev, rdev);
 
 
        printk(KERN_INFO "md: running: ");
 
-       list_for_each_entry(rdev, &mddev->disks, same_set) {
+       rdev_for_each(rdev, mddev) {
                char b[BDEVNAME_SIZE];
                printk("<%s>", bdevname(rdev->bdev,b));
        }
        struct md_rdev *rdev;
 
        nr=working=insync=failed=spare=0;
-       list_for_each_entry(rdev, &mddev->disks, same_set) {
+       rdev_for_each(rdev, mddev) {
                nr++;
                if (test_bit(Faulty, &rdev->flags))
                        failed++;
                 * grow, and re-add.
                 */
                return -EBUSY;
-       list_for_each_entry(rdev, &mddev->disks, same_set) {
+       rdev_for_each(rdev, mddev) {
                sector_t avail = rdev->sectors;
 
                if (fit && (num_sectors == 0 || num_sectors > avail))
                }
 
                sectors = 0;
-               list_for_each_entry(rdev, &mddev->disks, same_set) {
+               rdev_for_each(rdev, mddev) {
                        char b[BDEVNAME_SIZE];
                        seq_printf(seq, " %s[%d]",
                                bdevname(rdev->bdev,b), rdev->desc_nr);
                max_sectors = mddev->dev_sectors;
                j = MaxSector;
                rcu_read_lock();
-               list_for_each_entry_rcu(rdev, &mddev->disks, same_set)
+               rdev_for_each_rcu(rdev, mddev)
                        if (rdev->raid_disk >= 0 &&
                            !test_bit(Faulty, &rdev->flags) &&
                            !test_bit(In_sync, &rdev->flags) &&
                        if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery))
                                mddev->curr_resync = MaxSector;
                        rcu_read_lock();
-                       list_for_each_entry_rcu(rdev, &mddev->disks, same_set)
+                       rdev_for_each_rcu(rdev, mddev)
                                if (rdev->raid_disk >= 0 &&
                                    mddev->delta_disks >= 0 &&
                                    !test_bit(Faulty, &rdev->flags) &&
 
        mddev->curr_resync_completed = 0;
 
-       list_for_each_entry(rdev, &mddev->disks, same_set)
+       rdev_for_each(rdev, mddev)
                if (rdev->raid_disk >= 0 &&
                    !test_bit(Blocked, &rdev->flags) &&
                    (test_bit(Faulty, &rdev->flags) ||
                             "degraded");
 
 
-       list_for_each_entry(rdev, &mddev->disks, same_set) {
+       rdev_for_each(rdev, mddev) {
                if (rdev->raid_disk >= 0 &&
                    !test_bit(In_sync, &rdev->flags) &&
                    !test_bit(Faulty, &rdev->flags))
         * do the superblock for an incrementally recovered device
         * written out.
         */
-       list_for_each_entry(rdev, &mddev->disks, same_set)
+       rdev_for_each(rdev, mddev)
                if (!mddev->degraded ||
                    test_bit(In_sync, &rdev->flags))
                        rdev->saved_raid_disk = -1;
                         * failed devices.
                         */
                        struct md_rdev *rdev;
-                       list_for_each_entry(rdev, &mddev->disks, same_set)
+                       rdev_for_each(rdev, mddev)
                                if (rdev->raid_disk >= 0 &&
                                    !test_bit(Blocked, &rdev->flags) &&
                                    test_bit(Faulty, &rdev->flags) &&