static void mddev_delayed_delete(struct work_struct *ws);
 
+static void __mddev_put(struct mddev *mddev)
+{
+       if (mddev->raid_disks || !list_empty(&mddev->disks) ||
+           mddev->ctime || mddev->hold_active)
+               return;
+
+       /* Array is not configured at all, and not held active, so destroy it */
+       set_bit(MD_DELETED, &mddev->flags);
+
+       /*
+        * Call queue_work inside the spinlock so that flush_workqueue() after
+        * mddev_find will succeed in waiting for the work to be done.
+        */
+       queue_work(md_misc_wq, &mddev->del_work);
+}
+
 void mddev_put(struct mddev *mddev)
 {
        if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock))
                return;
-       if (!mddev->raid_disks && list_empty(&mddev->disks) &&
-           mddev->ctime == 0 && !mddev->hold_active) {
-               /* Array is not configured at all, and not held active,
-                * so destroy it */
-               set_bit(MD_DELETED, &mddev->flags);
 
-               /*
-                * Call queue_work inside the spinlock so that
-                * flush_workqueue() after mddev_find will succeed in waiting
-                * for the work to be done.
-                */
-               queue_work(md_misc_wq, &mddev->del_work);
-       }
+       __mddev_put(mddev);
        spin_unlock(&all_mddevs_lock);
 }