hlist_add_head_rcu(&dev->index_hlist,
                           dev_index_hash(net, dev->ifindex));
        write_unlock(&dev_base_lock);
+       /* We reserved the ifindex, this can't fail */
+       WARN_ON(xa_store(&net->dev_by_index, dev->ifindex, dev, GFP_KERNEL));
 
        dev_base_seq_inc(net);
 }
  */
 static void unlist_netdevice(struct net_device *dev, bool lock)
 {
+       struct net *net = dev_net(dev);
+
        ASSERT_RTNL();
 
+       xa_erase(&net->dev_by_index, dev->ifindex);
+
        /* Unlink dev from the device chain */
        if (lock)
                write_lock(&dev_base_lock);
 }
 
 /**
- *     dev_new_index   -       allocate an ifindex
- *     @net: the applicable net namespace
+ * dev_index_reserve() - allocate an ifindex in a namespace
+ * @net: the applicable net namespace
+ * @ifindex: requested ifindex, pass %0 to get one allocated
+ *
+ * Allocate a ifindex for a new device. Caller must either use the ifindex
+ * to store the device (via list_netdevice()) or call dev_index_release()
+ * to give the index up.
  *
- *     Returns a suitable unique value for a new device interface
- *     number.  The caller must hold the rtnl semaphore or the
- *     dev_base_lock to be sure it remains unique.
+ * Return: a suitable unique value for a new device interface number or -errno.
  */
-static int dev_new_index(struct net *net)
+static int dev_index_reserve(struct net *net, u32 ifindex)
 {
-       int ifindex = net->ifindex;
+       int err;
 
-       for (;;) {
-               if (++ifindex <= 0)
-                       ifindex = 1;
-               if (!__dev_get_by_index(net, ifindex))
-                       return net->ifindex = ifindex;
-       }
+       if (!ifindex)
+               err = xa_alloc_cyclic(&net->dev_by_index, &ifindex, NULL,
+                                     xa_limit_31b, &net->ifindex, GFP_KERNEL);
+       else
+               err = xa_insert(&net->dev_by_index, ifindex, NULL, GFP_KERNEL);
+       if (err < 0)
+               return err;
+
+       return ifindex;
+}
+
+static void dev_index_release(struct net *net, int ifindex)
+{
+       /* Expect only unused indexes, unlist_netdevice() removes the used */
+       WARN_ON(xa_erase(&net->dev_by_index, ifindex));
 }
 
 /* Delayed registration/unregisteration */
                goto err_uninit;
        }
 
-       ret = -EBUSY;
-       if (!dev->ifindex)
-               dev->ifindex = dev_new_index(net);
-       else if (__dev_get_by_index(net, dev->ifindex))
+       ret = dev_index_reserve(net, dev->ifindex);
+       if (ret < 0)
                goto err_uninit;
+       dev->ifindex = ret;
 
        /* Transfer changeable features to wanted_features and enable
         * software offloads (GSO and GRO).
        ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev);
        ret = notifier_to_errno(ret);
        if (ret)
-               goto err_uninit;
+               goto err_ifindex_release;
 
        ret = netdev_register_kobject(dev);
        write_lock(&dev_base_lock);
 
 err_uninit_notify:
        call_netdevice_notifiers(NETDEV_PRE_UNINIT, dev);
+err_ifindex_release:
+       dev_index_release(net, dev->ifindex);
 err_uninit:
        if (dev->netdev_ops->ndo_uninit)
                dev->netdev_ops->ndo_uninit(dev);
        }
 
        /* Check that new_ifindex isn't used yet. */
-       err = -EBUSY;
-       if (new_ifindex && __dev_get_by_index(net, new_ifindex))
-               goto out;
+       if (new_ifindex) {
+               err = dev_index_reserve(net, new_ifindex);
+               if (err < 0)
+                       goto out;
+       } else {
+               /* If there is an ifindex conflict assign a new one */
+               err = dev_index_reserve(net, dev->ifindex);
+               if (err == -EBUSY)
+                       err = dev_index_reserve(net, 0);
+               if (err < 0)
+                       goto out;
+               new_ifindex = err;
+       }
 
        /*
         * And now a mini version of register_netdevice unregister_netdevice.
        rcu_barrier();
 
        new_nsid = peernet2id_alloc(dev_net(dev), net, GFP_KERNEL);
-       /* If there is an ifindex conflict assign a new one */
-       if (!new_ifindex) {
-               if (__dev_get_by_index(net, dev->ifindex))
-                       new_ifindex = dev_new_index(net);
-               else
-                       new_ifindex = dev->ifindex;
-       }
 
        rtmsg_ifinfo_newnet(RTM_DELLINK, dev, ~0U, GFP_KERNEL, &new_nsid,
                            new_ifindex);
        if (net->dev_index_head == NULL)
                goto err_idx;
 
+       net->ifindex = 1;
+       xa_init_flags(&net->dev_by_index, XA_FLAGS_ALLOC);
+
        RAW_INIT_NOTIFIER_HEAD(&net->netdev_chain);
 
        return 0;
 {
        kfree(net->dev_name_head);
        kfree(net->dev_index_head);
+       xa_destroy(&net->dev_by_index);
        if (net != &init_net)
                WARN_ON_ONCE(!list_empty(&net->dev_base_head));
 }