return active;
 }
 
+static void reset_xps_maps(struct net_device *dev,
+                          struct xps_dev_maps *dev_maps,
+                          bool is_rxqs_map)
+{
+       if (is_rxqs_map) {
+               static_key_slow_dec_cpuslocked(&xps_rxqs_needed);
+               RCU_INIT_POINTER(dev->xps_rxqs_map, NULL);
+       } else {
+               RCU_INIT_POINTER(dev->xps_cpus_map, NULL);
+       }
+       static_key_slow_dec_cpuslocked(&xps_needed);
+       kfree_rcu(dev_maps, rcu);
+}
+
 static void clean_xps_maps(struct net_device *dev, const unsigned long *mask,
                           struct xps_dev_maps *dev_maps, unsigned int nr_ids,
                           u16 offset, u16 count, bool is_rxqs_map)
             j < nr_ids;)
                active |= remove_xps_queue_cpu(dev, dev_maps, j, offset,
                                               count);
-       if (!active) {
-               if (is_rxqs_map)
-                       RCU_INIT_POINTER(dev->xps_rxqs_map, NULL);
-               else
-                       RCU_INIT_POINTER(dev->xps_cpus_map, NULL);
-               kfree_rcu(dev_maps, rcu);
-       }
+       if (!active)
+               reset_xps_maps(dev, dev_maps, is_rxqs_map);
 
        if (!is_rxqs_map) {
                for (i = offset + (count - 1); count--; i--) {
                       false);
 
 out_no_maps:
-       if (static_key_enabled(&xps_rxqs_needed))
-               static_key_slow_dec_cpuslocked(&xps_rxqs_needed);
-
-       static_key_slow_dec_cpuslocked(&xps_needed);
        mutex_unlock(&xps_map_mutex);
        cpus_read_unlock();
 }
        if (!new_dev_maps)
                goto out_no_new_maps;
 
-       static_key_slow_inc_cpuslocked(&xps_needed);
-       if (is_rxqs_map)
-               static_key_slow_inc_cpuslocked(&xps_rxqs_needed);
+       if (!dev_maps) {
+               /* Increment static keys at most once per type */
+               static_key_slow_inc_cpuslocked(&xps_needed);
+               if (is_rxqs_map)
+                       static_key_slow_inc_cpuslocked(&xps_rxqs_needed);
+       }
 
        for (j = -1; j = netif_attrmask_next(j, possible_mask, nr_ids),
             j < nr_ids;) {
        }
 
        /* free map if not active */
-       if (!active) {
-               if (is_rxqs_map)
-                       RCU_INIT_POINTER(dev->xps_rxqs_map, NULL);
-               else
-                       RCU_INIT_POINTER(dev->xps_cpus_map, NULL);
-               kfree_rcu(dev_maps, rcu);
-       }
+       if (!active)
+               reset_xps_maps(dev, dev_maps, is_rxqs_map);
 
 out_no_maps:
        mutex_unlock(&xps_map_mutex);