{
        struct delayed_work *delay = to_delayed_work(work);
        struct id_map_entry *ent = container_of(delay, struct id_map_entry, timeout);
-       struct id_map_entry *db_ent, *found_ent;
+       struct id_map_entry *found_ent;
        struct mlx4_ib_dev *dev = ent->dev;
        struct mlx4_ib_sriov *sriov = &dev->sriov;
        struct rb_root *sl_id_map = &sriov->sl_id_map;
-       int pv_id = (int) ent->pv_cm_id;
 
        spin_lock(&sriov->id_map_lock);
-       db_ent = (struct id_map_entry *)idr_find(&sriov->pv_id_table, pv_id);
-       if (!db_ent)
+       if (!xa_erase(&sriov->pv_id_table, ent->pv_cm_id))
                goto out;
        found_ent = id_map_find_by_sl_id(&dev->ib_dev, ent->slave_id, ent->sl_cm_id);
        if (found_ent && found_ent == ent)
                rb_erase(&found_ent->node, sl_id_map);
-       idr_remove(&sriov->pv_id_table, pv_id);
 
 out:
        list_del(&ent->list);
        struct id_map_entry *ent, *found_ent;
 
        spin_lock(&sriov->id_map_lock);
-       ent = (struct id_map_entry *)idr_find(&sriov->pv_id_table, pv_cm_id);
+       ent = xa_erase(&sriov->pv_id_table, pv_cm_id);
        if (!ent)
                goto out;
        found_ent = id_map_find_by_sl_id(ibdev, ent->slave_id, ent->sl_cm_id);
        if (found_ent && found_ent == ent)
                rb_erase(&found_ent->node, sl_id_map);
-       idr_remove(&sriov->pv_id_table, pv_cm_id);
 out:
        spin_unlock(&sriov->id_map_lock);
 }
        ent->dev = to_mdev(ibdev);
        INIT_DELAYED_WORK(&ent->timeout, id_map_ent_timeout);
 
-       idr_preload(GFP_KERNEL);
-       spin_lock(&to_mdev(ibdev)->sriov.id_map_lock);
-
-       ret = idr_alloc_cyclic(&sriov->pv_id_table, ent, 0, 0, GFP_NOWAIT);
+       ret = xa_alloc_cyclic(&sriov->pv_id_table, &ent->pv_cm_id, ent,
+                       xa_limit_32b, &sriov->pv_id_next, GFP_KERNEL);
        if (ret >= 0) {
-               ent->pv_cm_id = (u32)ret;
+               spin_lock(&sriov->id_map_lock);
                sl_id_map_add(ibdev, ent);
                list_add_tail(&ent->list, &sriov->cm_list);
-       }
-
-       spin_unlock(&sriov->id_map_lock);
-       idr_preload_end();
-
-       if (ret >= 0)
+               spin_unlock(&sriov->id_map_lock);
                return ent;
+       }
 
        /*error flow*/
        kfree(ent);
-       mlx4_ib_warn(ibdev, "No more space in the idr (err:0x%x)\n", ret);
+       mlx4_ib_warn(ibdev, "Allocation failed (err:0x%x)\n", ret);
        return ERR_PTR(-ENOMEM);
 }
 
                if (ent)
                        *pv_cm_id = (int) ent->pv_cm_id;
        } else
-               ent = (struct id_map_entry *)idr_find(&sriov->pv_id_table, *pv_cm_id);
+               ent = xa_load(&sriov->pv_id_table, *pv_cm_id);
        spin_unlock(&sriov->id_map_lock);
 
        return ent;
        spin_lock_init(&dev->sriov.id_map_lock);
        INIT_LIST_HEAD(&dev->sriov.cm_list);
        dev->sriov.sl_id_map = RB_ROOT;
-       idr_init(&dev->sriov.pv_id_table);
+       xa_init_flags(&dev->sriov.pv_id_table, XA_FLAGS_ALLOC);
 }
 
 /* slave = -1 ==> all slaves */
                                         struct id_map_entry, node);
 
                        rb_erase(&ent->node, sl_id_map);
-                       idr_remove(&sriov->pv_id_table, (int) ent->pv_cm_id);
+                       xa_erase(&sriov->pv_id_table, ent->pv_cm_id);
                }
                list_splice_init(&dev->sriov.cm_list, &lh);
        } else {
                /* remove those nodes from databases */
                list_for_each_entry_safe(map, tmp_map, &lh, list) {
                        rb_erase(&map->node, sl_id_map);
-                       idr_remove(&sriov->pv_id_table, (int) map->pv_cm_id);
+                       xa_erase(&sriov->pv_id_table, map->pv_cm_id);
                }
 
                /* add remaining nodes from cm_list */