static LIST_HEAD(dev_list);
 static DEFINE_MUTEX(dev_mutex);
 
-static int disable_qp_db(int id, void *p, void *data)
-{
-       struct iwch_qp *qhp = p;
-
-       cxio_disable_wq_db(&qhp->wq);
-       return 0;
-}
-
-static int enable_qp_db(int id, void *p, void *data)
-{
-       struct iwch_qp *qhp = p;
-
-       if (data)
-               ring_doorbell(qhp->rhp->rdev.ctrl_qp.doorbell, qhp->wq.qpid);
-       cxio_enable_wq_db(&qhp->wq);
-       return 0;
-}
-
 static void disable_dbs(struct iwch_dev *rnicp)
 {
-       spin_lock_irq(&rnicp->lock);
-       idr_for_each(&rnicp->qpidr, disable_qp_db, NULL);
-       spin_unlock_irq(&rnicp->lock);
+       unsigned long index;
+       struct iwch_qp *qhp;
+
+       xa_lock_irq(&rnicp->qps);
+       xa_for_each(&rnicp->qps, index, qhp)
+               cxio_disable_wq_db(&qhp->wq);
+       xa_unlock_irq(&rnicp->qps);
 }
 
 static void enable_dbs(struct iwch_dev *rnicp, int ring_db)
 {
-       spin_lock_irq(&rnicp->lock);
-       idr_for_each(&rnicp->qpidr, enable_qp_db,
-                    (void *)(unsigned long)ring_db);
-       spin_unlock_irq(&rnicp->lock);
+       unsigned long index;
+       struct iwch_qp *qhp;
+
+       xa_lock_irq(&rnicp->qps);
+       xa_for_each(&rnicp->qps, index, qhp) {
+               if (ring_db)
+                       ring_doorbell(qhp->rhp->rdev.ctrl_qp.doorbell,
+                                       qhp->wq.qpid);
+               cxio_enable_wq_db(&qhp->wq);
+       }
+       xa_unlock_irq(&rnicp->qps);
 }
 
 static void iwch_db_drop_task(struct work_struct *work)
 {
        pr_debug("%s iwch_dev %p\n", __func__,  rnicp);
        xa_init_flags(&rnicp->cqs, XA_FLAGS_LOCK_IRQ);
-       idr_init(&rnicp->qpidr);
+       xa_init_flags(&rnicp->qps, XA_FLAGS_LOCK_IRQ);
        idr_init(&rnicp->mmidr);
        spin_lock_init(&rnicp->lock);
        INIT_DELAYED_WORK(&rnicp->db_drop_task, iwch_db_drop_task);
                        iwch_unregister_device(dev);
                        cxio_rdev_close(&dev->rdev);
                        WARN_ON(!xa_empty(&dev->cqs));
-                       idr_destroy(&dev->qpidr);
+                       WARN_ON(!xa_empty(&dev->qps));
                        idr_destroy(&dev->mmidr);
                        ib_dealloc_device(&dev->ibdev);
                        break;
 
        u32 device_cap_flags;
        struct iwch_rnic_attributes attr;
        struct xarray cqs;
-       struct idr qpidr;
+       struct xarray qps;
        struct idr mmidr;
        spinlock_t lock;
        struct list_head entry;
 
 static inline struct iwch_qp *get_qhp(struct iwch_dev *rhp, u32 qpid)
 {
-       return idr_find(&rhp->qpidr, qpid);
+       return xa_load(&rhp->qps, qpid);
 }
 
 static inline struct iwch_mr *get_mhp(struct iwch_dev *rhp, u32 mmid)
 
        struct iwch_qp *qhp;
        unsigned long flag;
 
-       spin_lock(&rnicp->lock);
-       qhp = get_qhp(rnicp, CQE_QPID(rsp_msg->cqe));
+       xa_lock(&rnicp->qps);
+       qhp = xa_load(&rnicp->qps, CQE_QPID(rsp_msg->cqe));
 
        if (!qhp) {
                pr_err("%s unaffiliated error 0x%x qpid 0x%x\n",
                       __func__, CQE_STATUS(rsp_msg->cqe),
                       CQE_QPID(rsp_msg->cqe));
-               spin_unlock(&rnicp->lock);
+               xa_unlock(&rnicp->qps);
                return;
        }
 
                         __func__,
                         qhp->attr.state, qhp->wq.qpid,
                         CQE_STATUS(rsp_msg->cqe));
-               spin_unlock(&rnicp->lock);
+               xa_unlock(&rnicp->qps);
                return;
        }
 
               CQE_WRID_HI(rsp_msg->cqe), CQE_WRID_LOW(rsp_msg->cqe));
 
        atomic_inc(&qhp->refcnt);
-       spin_unlock(&rnicp->lock);
+       xa_unlock(&rnicp->qps);
 
        if (qhp->attr.state == IWCH_QP_STATE_RTS) {
                attrs.next_state = IWCH_QP_STATE_TERMINATE;
        unsigned long flag;
 
        rnicp = (struct iwch_dev *) rdev_p->ulp;
-       spin_lock(&rnicp->lock);
+       xa_lock(&rnicp->qps);
        chp = get_chp(rnicp, cqid);
-       qhp = get_qhp(rnicp, CQE_QPID(rsp_msg->cqe));
+       qhp = xa_load(&rnicp->qps, CQE_QPID(rsp_msg->cqe));
        if (!chp || !qhp) {
                pr_err("BAD AE cqid 0x%x qpid 0x%x opcode %d status 0x%x type %d wrid.hi 0x%x wrid.lo 0x%x\n",
                       cqid, CQE_QPID(rsp_msg->cqe),
                       CQE_OPCODE(rsp_msg->cqe), CQE_STATUS(rsp_msg->cqe),
                       CQE_TYPE(rsp_msg->cqe), CQE_WRID_HI(rsp_msg->cqe),
                       CQE_WRID_LOW(rsp_msg->cqe));
-               spin_unlock(&rnicp->lock);
+               xa_unlock(&rnicp->qps);
                goto out;
        }
        iwch_qp_add_ref(&qhp->ibqp);
        atomic_inc(&chp->refcnt);
-       spin_unlock(&rnicp->lock);
+       xa_unlock(&rnicp->qps);
 
        /*
         * 1) completion of our sending a TERMINATE.
 
        iwch_modify_qp(rhp, qhp, IWCH_QP_ATTR_NEXT_STATE, &attrs, 0);
        wait_event(qhp->wait, !qhp->ep);
 
-       remove_handle(rhp, &rhp->qpidr, qhp->wq.qpid);
+       xa_erase_irq(&rhp->qps, qhp->wq.qpid);
 
        atomic_dec(&qhp->refcnt);
        wait_event(qhp->wait, !atomic_read(&qhp->refcnt));
        init_waitqueue_head(&qhp->wait);
        atomic_set(&qhp->refcnt, 1);
 
-       if (insert_handle(rhp, &rhp->qpidr, qhp, qhp->wq.qpid)) {
+       if (xa_store_irq(&rhp->qps, qhp->wq.qpid, qhp, GFP_KERNEL)) {
                cxio_destroy_qp(&rhp->rdev, &qhp->wq,
                        ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
                kfree(qhp);