static LIST_HEAD(all_mddevs);
 static DEFINE_SPINLOCK(all_mddevs_lock);
 
-/*
- * iterates through all used mddevs in the system.
- * We take care to grab the all_mddevs_lock whenever navigating
- * the list, and to always hold a refcount when unlocked.
- * Any code which breaks out of this loop while own
- * a reference to the current mddev and must mddev_put it.
- */
-#define for_each_mddev(_mddev,_tmp)                                    \
-                                                                       \
-       for (({ spin_lock(&all_mddevs_lock);                            \
-               _tmp = all_mddevs.next;                                 \
-               _mddev = NULL;});                                       \
-            ({ if (_tmp != &all_mddevs)                                \
-                       mddev_get(list_entry(_tmp, struct mddev, all_mddevs));\
-               spin_unlock(&all_mddevs_lock);                          \
-               if (_mddev) mddev_put(_mddev);                          \
-               _mddev = list_entry(_tmp, struct mddev, all_mddevs);    \
-               _tmp != &all_mddevs;});                                 \
-            ({ spin_lock(&all_mddevs_lock);                            \
-               _tmp = _tmp->next;})                                    \
-               )
-
 /* Rather than calling directly into the personality make_request function,
  * IO requests come here first so that we can check if the device is
  * being suspended pending a reconfiguration.
 
 static __exit void md_exit(void)
 {
-       struct mddev *mddev;
-       struct list_head *tmp;
+       struct mddev *mddev, *n;
        int delay = 1;
 
        unregister_blkdev(MD_MAJOR,"md");
        }
        remove_proc_entry("mdstat", NULL);
 
-       for_each_mddev(mddev, tmp) {
+       spin_lock(&all_mddevs_lock);
+       list_for_each_entry_safe(mddev, n, &all_mddevs, all_mddevs) {
+               mddev_get(mddev);
+               spin_unlock(&all_mddevs_lock);
                export_array(mddev);
                mddev->ctime = 0;
                mddev->hold_active = 0;
                /*
-                * for_each_mddev() will call mddev_put() at the end of each
-                * iteration.  As the mddev is now fully clear, this will
-                * schedule the mddev for destruction by a workqueue, and the
+                * As the mddev is now fully clear, mddev_put will schedule
+                * the mddev for destruction by a workqueue, and the
                 * destroy_workqueue() below will wait for that to complete.
                 */
+               mddev_put(mddev);
+               spin_lock(&all_mddevs_lock);
        }
+       spin_unlock(&all_mddevs_lock);
+
        destroy_workqueue(md_rdev_misc_wq);
        destroy_workqueue(md_misc_wq);
        destroy_workqueue(md_wq);