struct xps_dev_maps *dev_maps,
                                 int cpu, u16 offset, u16 count)
 {
-       int num_tc = dev->num_tc ? : 1;
+       int num_tc = dev_maps->num_tc;
        bool active = false;
        int tci;
 
 {
        const unsigned long *online_mask = NULL, *possible_mask = NULL;
        struct xps_dev_maps *dev_maps, *new_dev_maps = NULL;
+       bool active = false, copy = false;
        int i, j, tci, numa_node_id = -2;
        int maps_sz, num_tc = 1, tc = 0;
        struct xps_map *map, *new_map;
-       bool active = false;
        unsigned int nr_ids;
 
        if (dev->num_tc) {
        if (maps_sz < L1_CACHE_BYTES)
                maps_sz = L1_CACHE_BYTES;
 
+       /* The old dev_maps could be larger or smaller than the one we're
+        * setting up now, as dev->num_tc could have been updated in between. We
+        * could try to be smart, but let's be safe instead and only copy
+        * foreign traffic classes if the two map sizes match.
+        */
+       if (dev_maps && dev_maps->num_tc == num_tc)
+               copy = true;
+
        /* allocate memory for queue storage */
        for (j = -1; j = netif_attrmask_next_and(j, online_mask, mask, nr_ids),
             j < nr_ids;) {
-               if (!new_dev_maps)
-                       new_dev_maps = kzalloc(maps_sz, GFP_KERNEL);
                if (!new_dev_maps) {
-                       mutex_unlock(&xps_map_mutex);
-                       return -ENOMEM;
+                       new_dev_maps = kzalloc(maps_sz, GFP_KERNEL);
+                       if (!new_dev_maps) {
+                               mutex_unlock(&xps_map_mutex);
+                               return -ENOMEM;
+                       }
+
+                       new_dev_maps->num_tc = num_tc;
                }
 
                tci = j * num_tc + tc;
-               map = dev_maps ? xmap_dereference(dev_maps->attr_map[tci]) :
-                                NULL;
+               map = copy ? xmap_dereference(dev_maps->attr_map[tci]) : NULL;
 
                map = expand_xps_map(map, j, index, is_rxqs_map);
                if (!map)
        for (j = -1; j = netif_attrmask_next(j, possible_mask, nr_ids),
             j < nr_ids;) {
                /* copy maps belonging to foreign traffic classes */
-               for (i = tc, tci = j * num_tc; dev_maps && i--; tci++) {
+               for (i = tc, tci = j * num_tc; copy && i--; tci++) {
                        /* fill in the new device map from the old device map */
                        map = xmap_dereference(dev_maps->attr_map[tci]);
                        RCU_INIT_POINTER(new_dev_maps->attr_map[tci], map);
                                        numa_node_id = -1;
                        }
 #endif
-               } else if (dev_maps) {
+               } else if (copy) {
                        /* fill in the new device map from the old device map */
                        map = xmap_dereference(dev_maps->attr_map[tci]);
                        RCU_INIT_POINTER(new_dev_maps->attr_map[tci], map);
                }
 
                /* copy maps belonging to foreign traffic classes */
-               for (i = num_tc - tc, tci++; dev_maps && --i; tci++) {
+               for (i = num_tc - tc, tci++; copy && --i; tci++) {
                        /* fill in the new device map from the old device map */
                        map = xmap_dereference(dev_maps->attr_map[tci]);
                        RCU_INIT_POINTER(new_dev_maps->attr_map[tci], map);
 
        for (j = -1; j = netif_attrmask_next(j, possible_mask, nr_ids),
             j < nr_ids;) {
-               for (i = num_tc, tci = j * num_tc; i--; tci++) {
-                       new_map = xmap_dereference(new_dev_maps->attr_map[tci]);
+               for (i = num_tc, tci = j * dev_maps->num_tc; i--; tci++) {
                        map = xmap_dereference(dev_maps->attr_map[tci]);
-                       if (map && map != new_map)
-                               kfree_rcu(map, rcu);
+                       if (!map)
+                               continue;
+
+                       if (copy) {
+                               new_map = xmap_dereference(new_dev_maps->attr_map[tci]);
+                               if (map == new_map)
+                                       continue;
+                       }
+
+                       kfree_rcu(map, rcu);
                }
        }
 
        /* removes tx-queue from unused CPUs/rx-queues */
        for (j = -1; j = netif_attrmask_next(j, possible_mask, nr_ids),
             j < nr_ids;) {
-               for (i = tc, tci = j * num_tc; i--; tci++)
+               for (i = tc, tci = j * dev_maps->num_tc; i--; tci++)
                        active |= remove_xps_queue(dev_maps, tci, index);
                if (!netif_attr_test_mask(j, mask, nr_ids) ||
                    !netif_attr_test_online(j, online_mask, nr_ids))
                        active |= remove_xps_queue(dev_maps, tci, index);
-               for (i = num_tc - tc, tci++; --i; tci++)
+               for (i = dev_maps->num_tc - tc, tci++; --i; tci++)
                        active |= remove_xps_queue(dev_maps, tci, index);
        }
 
             j < nr_ids;) {
                for (i = num_tc, tci = j * num_tc; i--; tci++) {
                        new_map = xmap_dereference(new_dev_maps->attr_map[tci]);
-                       map = dev_maps ?
+                       map = copy ?
                              xmap_dereference(dev_maps->attr_map[tci]) :
                              NULL;
                        if (new_map && new_map != map)
 static int __get_xps_queue_idx(struct net_device *dev, struct sk_buff *skb,
                               struct xps_dev_maps *dev_maps, unsigned int tci)
 {
+       int tc = netdev_get_prio_tc_map(dev, skb->priority);
        struct xps_map *map;
        int queue_index = -1;
 
-       if (dev->num_tc) {
-               tci *= dev->num_tc;
-               tci += netdev_get_prio_tc_map(dev, skb->priority);
-       }
+       if (tc >= dev_maps->num_tc)
+               return queue_index;
+
+       tci *= dev_maps->num_tc;
+       tci += tc;
 
        map = rcu_dereference(dev_maps->attr_map[tci]);
        if (map) {
 
 static ssize_t xps_cpus_show(struct netdev_queue *queue,
                             char *buf)
 {
-       int j, len, ret, num_tc = 1, tc = 0;
        struct net_device *dev = queue->dev;
        struct xps_dev_maps *dev_maps;
+       int j, len, ret, tc = 0;
        unsigned long *mask;
        unsigned int index;
 
        if (!rtnl_trylock())
                return restart_syscall();
 
-       if (dev->num_tc) {
-               /* Do not allow XPS on subordinate device directly */
-               num_tc = dev->num_tc;
-               if (num_tc < 0) {
-                       ret = -EINVAL;
-                       goto err_rtnl_unlock;
-               }
-
-               /* If queue belongs to subordinate dev use its map */
-               dev = netdev_get_tx_queue(dev, index)->sb_dev ? : dev;
+       /* If queue belongs to subordinate dev use its map */
+       dev = netdev_get_tx_queue(dev, index)->sb_dev ? : dev;
 
-               tc = netdev_txq_to_tc(dev, index);
-               if (tc < 0) {
-                       ret = -EINVAL;
-                       goto err_rtnl_unlock;
-               }
+       tc = netdev_txq_to_tc(dev, index);
+       if (tc < 0) {
+               ret = -EINVAL;
+               goto err_rtnl_unlock;
        }
 
        mask = bitmap_zalloc(nr_cpu_ids, GFP_KERNEL);
 
        rcu_read_lock();
        dev_maps = rcu_dereference(dev->xps_cpus_map);
-       if (!dev_maps)
+       if (!dev_maps || tc >= dev_maps->num_tc)
                goto out_no_maps;
 
        for (j = -1; j = netif_attrmask_next(j, NULL, nr_cpu_ids),
             j < nr_cpu_ids;) {
-               int i, tci = j * num_tc + tc;
+               int i, tci = j * dev_maps->num_tc + tc;
                struct xps_map *map;
 
                map = rcu_dereference(dev_maps->attr_map[tci]);
 
 static ssize_t xps_rxqs_show(struct netdev_queue *queue, char *buf)
 {
-       int j, len, ret, num_tc = 1, tc = 0;
        struct net_device *dev = queue->dev;
        struct xps_dev_maps *dev_maps;
+       int j, len, ret, tc = 0;
        unsigned long *mask;
        unsigned int index;
 
        if (!rtnl_trylock())
                return restart_syscall();
 
-       if (dev->num_tc) {
-               num_tc = dev->num_tc;
-               tc = netdev_txq_to_tc(dev, index);
-               if (tc < 0) {
-                       ret = -EINVAL;
-                       goto err_rtnl_unlock;
-               }
+       tc = netdev_txq_to_tc(dev, index);
+       if (tc < 0) {
+               ret = -EINVAL;
+               goto err_rtnl_unlock;
        }
+
        mask = bitmap_zalloc(dev->num_rx_queues, GFP_KERNEL);
        if (!mask) {
                ret = -ENOMEM;
 
        rcu_read_lock();
        dev_maps = rcu_dereference(dev->xps_rxqs_map);
-       if (!dev_maps)
+       if (!dev_maps || tc >= dev_maps->num_tc)
                goto out_no_maps;
 
        for (j = -1; j = netif_attrmask_next(j, NULL, dev->num_rx_queues),
             j < dev->num_rx_queues;) {
-               int i, tci = j * num_tc + tc;
+               int i, tci = j * dev_maps->num_tc + tc;
                struct xps_map *map;
 
                map = rcu_dereference(dev_maps->attr_map[tci]);