}
 EXPORT_SYMBOL(dev_open);
 
-static int __dev_close(struct net_device *dev)
+static int __dev_close_many(struct list_head *head)
 {
-       const struct net_device_ops *ops = dev->netdev_ops;
+       struct net_device *dev;
 
        ASSERT_RTNL();
        might_sleep();
 
-       /*
-        *      Tell people we are going down, so that they can
-        *      prepare to death, when device is still operating.
-        */
-       call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
+       list_for_each_entry(dev, head, unreg_list) {
+               /*
+                *      Tell people we are going down, so that they can
+                *      prepare to death, when device is still operating.
+                */
+               call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
 
-       clear_bit(__LINK_STATE_START, &dev->state);
+               clear_bit(__LINK_STATE_START, &dev->state);
 
-       /* Synchronize to scheduled poll. We cannot touch poll list,
-        * it can be even on different cpu. So just clear netif_running().
-        *
-        * dev->stop() will invoke napi_disable() on all of it's
-        * napi_struct instances on this device.
-        */
-       smp_mb__after_clear_bit(); /* Commit netif_running(). */
+               /* Synchronize to scheduled poll. We cannot touch poll list, it
+                * can be even on different cpu. So just clear netif_running().
+                *
+                * dev->stop() will invoke napi_disable() on all of it's
+                * napi_struct instances on this device.
+                */
+               smp_mb__after_clear_bit(); /* Commit netif_running(). */
+       }
 
-       dev_deactivate(dev);
+       dev_deactivate_many(head);
 
-       /*
-        *      Call the device specific close. This cannot fail.
-        *      Only if device is UP
-        *
-        *      We allow it to be called even after a DETACH hot-plug
-        *      event.
-        */
-       if (ops->ndo_stop)
-               ops->ndo_stop(dev);
+       list_for_each_entry(dev, head, unreg_list) {
+               const struct net_device_ops *ops = dev->netdev_ops;
 
-       /*
-        *      Device is now down.
-        */
+               /*
+                *      Call the device specific close. This cannot fail.
+                *      Only if device is UP
+                *
+                *      We allow it to be called even after a DETACH hot-plug
+                *      event.
+                */
+               if (ops->ndo_stop)
+                       ops->ndo_stop(dev);
+
+               /*
+                *      Device is now down.
+                */
+
+               dev->flags &= ~IFF_UP;
+
+               /*
+                *      Shutdown NET_DMA
+                */
+               net_dmaengine_put();
+       }
 
-       dev->flags &= ~IFF_UP;
+       return 0;
+}
+
+static int __dev_close(struct net_device *dev)
+{
+       LIST_HEAD(single);
+
+       list_add(&dev->unreg_list, &single);
+       return __dev_close_many(&single);
+}
+
+int dev_close_many(struct list_head *head)
+{
+       struct net_device *dev, *tmp;
+       LIST_HEAD(tmp_list);
+
+       list_for_each_entry_safe(dev, tmp, head, unreg_list)
+               if (!(dev->flags & IFF_UP))
+                       list_move(&dev->unreg_list, &tmp_list);
+
+       __dev_close_many(head);
 
        /*
-        *      Shutdown NET_DMA
+        * Tell people we are down
         */
-       net_dmaengine_put();
+       list_for_each_entry(dev, head, unreg_list) {
+               rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING);
+               call_netdevice_notifiers(NETDEV_DOWN, dev);
+       }
 
+       /* rollback_registered_many needs the complete original list */
+       list_splice(&tmp_list, head);
        return 0;
 }
 
  */
 int dev_close(struct net_device *dev)
 {
-       if (!(dev->flags & IFF_UP))
-               return 0;
-
-       __dev_close(dev);
+       LIST_HEAD(single);
 
-       /*
-        * Tell people we are down
-        */
-       rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING);
-       call_netdevice_notifiers(NETDEV_DOWN, dev);
+       list_add(&dev->unreg_list, &single);
+       dev_close_many(&single);
 
        return 0;
 }
                }
 
                BUG_ON(dev->reg_state != NETREG_REGISTERED);
+       }
 
-               /* If device is running, close it first. */
-               dev_close(dev);
+       /* If device is running, close it first. */
+       dev_close_many(head);
 
+       list_for_each_entry(dev, head, unreg_list) {
                /* And unlink it from device chain. */
                unlist_netdevice(dev);
 
 
        return false;
 }
 
-void dev_deactivate(struct net_device *dev)
+void dev_deactivate_many(struct list_head *head)
 {
-       netdev_for_each_tx_queue(dev, dev_deactivate_queue, &noop_qdisc);
-       if (dev_ingress_queue(dev))
-               dev_deactivate_queue(dev, dev_ingress_queue(dev), &noop_qdisc);
+       struct net_device *dev;
 
-       dev_watchdog_down(dev);
+       list_for_each_entry(dev, head, unreg_list) {
+               netdev_for_each_tx_queue(dev, dev_deactivate_queue,
+                                        &noop_qdisc);
+               if (dev_ingress_queue(dev))
+                       dev_deactivate_queue(dev, dev_ingress_queue(dev),
+                                            &noop_qdisc);
+
+               dev_watchdog_down(dev);
+       }
 
        /* Wait for outstanding qdisc-less dev_queue_xmit calls. */
        synchronize_rcu();
 
        /* Wait for outstanding qdisc_run calls. */
-       while (some_qdisc_is_busy(dev))
-               yield();
+       list_for_each_entry(dev, head, unreg_list)
+               while (some_qdisc_is_busy(dev))
+                       yield();
+}
+
+void dev_deactivate(struct net_device *dev)
+{
+       LIST_HEAD(single);
+
+       list_add(&dev->unreg_list, &single);
+       dev_deactivate_many(&single);
 }
 
 static void dev_init_scheduler_queue(struct net_device *dev,