So we delay the allocation till the priority is set through cgroup,
and this makes skb_update_priority() faster when it's not set.
This also eliminates an off-by-one bug similar with the one fixed
in the previous patch.
Origionally-authored-by: Li Zefan <lizf@cn.fujitsu.com>
Signed-off-by: Li Zefan <lizf@cn.fujitsu.com>
Signed-off-by: Neil Horman <nhorman@tuxdriver.com>
CC: "David S. Miller" <davem@davemloft.net>
Signed-off-by: David S. Miller <davem@davemloft.net>
 {
        struct net_device *dev = ptr;
        struct netprio_map *old;
-       u32 max_len = atomic_read(&max_prioidx);
 
        /*
         * Note this is called with rtnl_lock held so we have update side
         */
 
        switch (event) {
-
-       case NETDEV_REGISTER:
-               if (max_len)
-                       extend_netdev_table(dev, max_len);
-               break;
        case NETDEV_UNREGISTER:
                old = rtnl_dereference(dev->priomap);
                RCU_INIT_POINTER(dev->priomap, NULL);