struct mlx5_srq_table *table = &dev->srq_table;
        struct mlx5_core_srq *srq;
 
-       spin_lock(&table->lock);
-
-       srq = radix_tree_lookup(&table->tree, srqn);
+       xa_lock(&table->array);
+       srq = xa_load(&table->array, srqn);
        if (srq)
                atomic_inc(&srq->common.refcount);
-
-       spin_unlock(&table->lock);
+       xa_unlock(&table->array);
 
        return srq;
 }
        atomic_set(&srq->common.refcount, 1);
        init_completion(&srq->common.free);
 
-       spin_lock_irq(&table->lock);
-       err = radix_tree_insert(&table->tree, srq->srqn, srq);
-       spin_unlock_irq(&table->lock);
+       err = xa_err(xa_store_irq(&table->array, srq->srqn, srq, GFP_KERNEL));
        if (err)
                goto err_destroy_srq_split;
 
        struct mlx5_core_srq *tmp;
        int err;
 
-       spin_lock_irq(&table->lock);
-       tmp = radix_tree_delete(&table->tree, srq->srqn);
-       spin_unlock_irq(&table->lock);
+       tmp = xa_erase_irq(&table->array, srq->srqn);
        if (!tmp || tmp != srq)
                return -EINVAL;
 
        eqe = data;
        srqn = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff;
 
-       spin_lock(&table->lock);
-
-       srq = radix_tree_lookup(&table->tree, srqn);
+       xa_lock(&table->array);
+       srq = xa_load(&table->array, srqn);
        if (srq)
                atomic_inc(&srq->common.refcount);
-
-       spin_unlock(&table->lock);
+       xa_unlock(&table->array);
 
        if (!srq)
                return NOTIFY_OK;
        struct mlx5_srq_table *table = &dev->srq_table;
 
        memset(table, 0, sizeof(*table));
-       spin_lock_init(&table->lock);
-       INIT_RADIX_TREE(&table->tree, GFP_ATOMIC);
+       xa_init_flags(&table->array, XA_FLAGS_LOCK_IRQ);
 
        table->nb.notifier_call = srq_event_notifier;
        mlx5_notifier_register(dev->mdev, &table->nb);