#endif
}
-static void rtnl_drop_if_cleanup_net(void)
-{
- if (from_cleanup_net())
- __rtnl_unlock();
-}
-
-static void rtnl_acquire_if_cleanup_net(void)
-{
- if (from_cleanup_net())
- rtnl_lock();
-}
-
/* Delayed registration/unregisteration */
LIST_HEAD(net_todo_list);
-static LIST_HEAD(net_todo_list_for_cleanup_net);
-
-/* TODO: net_todo_list/net_todo_list_for_cleanup_net should probably
- * be provided by callers, instead of being static, rtnl protected.
- */
-static struct list_head *todo_list(void)
-{
- return from_cleanup_net() ? &net_todo_list_for_cleanup_net :
- &net_todo_list;
-}
-
DECLARE_WAIT_QUEUE_HEAD(netdev_unregistering_wq);
atomic_t dev_unreg_count = ATOMIC_INIT(0);
static void net_set_todo(struct net_device *dev)
{
- list_add_tail(&dev->todo_list, todo_list());
+ list_add_tail(&dev->todo_list, &net_todo_list);
}
static netdev_features_t netdev_sync_upper_features(struct net_device *lower,
#endif
/* Snapshot list, allow later requests */
- list_replace_init(todo_list(), &list);
+ list_replace_init(&net_todo_list, &list);
__rtnl_unlock();
WRITE_ONCE(dev->reg_state, NETREG_UNREGISTERING);
netdev_unlock(dev);
}
-
- rtnl_drop_if_cleanup_net();
flush_all_backlogs();
+
synchronize_net();
- rtnl_acquire_if_cleanup_net();
list_for_each_entry(dev, head, unreg_list) {
struct sk_buff *skb = NULL;
#endif
}
- rtnl_drop_if_cleanup_net();
synchronize_net();
- rtnl_acquire_if_cleanup_net();
list_for_each_entry(dev, head, unreg_list) {
netdev_put(dev, &dev->dev_registered_tracker);