#include <linux/module.h>
 #include <linux/init.h>
 #include <linux/slab.h>
+#include <rdma/mr_pool.h>
 #include <linux/err.h>
 #include <linux/string.h>
 #include <linux/atomic.h>
        return ret;
 }
 
-static int nvme_rdma_reinit_request(void *data, struct request *rq)
-{
-       struct nvme_rdma_ctrl *ctrl = data;
-       struct nvme_rdma_device *dev = ctrl->device;
-       struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
-       int ret = 0;
-
-       if (WARN_ON_ONCE(!req->mr))
-               return 0;
-
-       ib_dereg_mr(req->mr);
-
-       req->mr = ib_alloc_mr(dev->pd, IB_MR_TYPE_MEM_REG,
-                       ctrl->max_fr_pages);
-       if (IS_ERR(req->mr)) {
-               ret = PTR_ERR(req->mr);
-               req->mr = NULL;
-               goto out;
-       }
-
-       req->mr->need_inval = false;
-
-out:
-       return ret;
-}
-
 static void nvme_rdma_exit_request(struct blk_mq_tag_set *set,
                struct request *rq, unsigned int hctx_idx)
 {
        struct nvme_rdma_queue *queue = &ctrl->queues[queue_idx];
        struct nvme_rdma_device *dev = queue->device;
 
-       if (req->mr)
-               ib_dereg_mr(req->mr);
-
        nvme_rdma_free_qe(dev->dev, &req->sqe, sizeof(struct nvme_command),
                        DMA_TO_DEVICE);
 }
        if (ret)
                return ret;
 
-       req->mr = ib_alloc_mr(dev->pd, IB_MR_TYPE_MEM_REG,
-                       ctrl->max_fr_pages);
-       if (IS_ERR(req->mr)) {
-               ret = PTR_ERR(req->mr);
-               goto out_free_qe;
-       }
-
        req->queue = queue;
 
        return 0;
-
-out_free_qe:
-       nvme_rdma_free_qe(dev->dev, &req->sqe, sizeof(struct nvme_command),
-                       DMA_TO_DEVICE);
-       return -ENOMEM;
 }
 
 static int nvme_rdma_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
        struct nvme_rdma_device *dev = queue->device;
        struct ib_device *ibdev = dev->dev;
 
+       ib_mr_pool_destroy(queue->qp, &queue->qp->rdma_mrs);
+
        rdma_destroy_qp(queue->cm_id);
        ib_free_cq(queue->ib_cq);
 
        nvme_rdma_dev_put(dev);
 }
 
+static int nvme_rdma_get_max_fr_pages(struct ib_device *ibdev)
+{
+       return min_t(u32, NVME_RDMA_MAX_SEGMENTS,
+                    ibdev->attrs.max_fast_reg_page_list_len);
+}
+
 static int nvme_rdma_create_queue_ib(struct nvme_rdma_queue *queue)
 {
        struct ib_device *ibdev;
                goto out_destroy_qp;
        }
 
+       ret = ib_mr_pool_init(queue->qp, &queue->qp->rdma_mrs,
+                             queue->queue_size,
+                             IB_MR_TYPE_MEM_REG,
+                             nvme_rdma_get_max_fr_pages(ibdev));
+       if (ret) {
+               dev_err(queue->ctrl->ctrl.device,
+                       "failed to initialize MR pool sized %d for QID %d\n",
+                       queue->queue_size, idx);
+               goto out_destroy_ring;
+       }
+
        return 0;
 
+out_destroy_ring:
+       nvme_rdma_free_ring(ibdev, queue->rsp_ring, queue->queue_size,
+                           sizeof(struct nvme_completion), DMA_FROM_DEVICE);
 out_destroy_qp:
        rdma_destroy_qp(queue->cm_id);
 out_destroy_ib_cq:
 
        ctrl->device = ctrl->queues[0].device;
 
-       ctrl->max_fr_pages = min_t(u32, NVME_RDMA_MAX_SEGMENTS,
-               ctrl->device->dev->attrs.max_fast_reg_page_list_len);
+       ctrl->max_fr_pages = nvme_rdma_get_max_fr_pages(ctrl->device->dev);
 
        if (new) {
                ctrl->ctrl.admin_tagset = nvme_rdma_alloc_tagset(&ctrl->ctrl, true);
                        error = PTR_ERR(ctrl->ctrl.admin_q);
                        goto out_free_tagset;
                }
-       } else {
-               error = nvme_reinit_tagset(&ctrl->ctrl, ctrl->ctrl.admin_tagset);
-               if (error)
-                       goto out_free_queue;
        }
 
        error = nvme_rdma_start_queue(ctrl, 0);
                        goto out_free_tag_set;
                }
        } else {
-               ret = nvme_reinit_tagset(&ctrl->ctrl, ctrl->ctrl.tagset);
-               if (ret)
-                       goto out_free_io_queues;
-
                blk_mq_update_nr_hw_queues(&ctrl->tag_set,
                        ctrl->ctrl.queue_count - 1);
        }
        if (!blk_rq_bytes(rq))
                return;
 
+       if (req->mr) {
+               ib_mr_pool_put(queue->qp, &queue->qp->rdma_mrs, req->mr);
+               req->mr = NULL;
+       }
+
        ib_dma_unmap_sg(ibdev, req->sg_table.sgl,
                        req->nents, rq_data_dir(rq) ==
                                    WRITE ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
        struct nvme_keyed_sgl_desc *sg = &c->common.dptr.ksgl;
        int nr;
 
+       req->mr = ib_mr_pool_get(queue->qp, &queue->qp->rdma_mrs);
+       if (WARN_ON_ONCE(!req->mr))
+               return -EAGAIN;
+
        /*
         * Align the MR to a 4K page size to match the ctrl page size and
         * the block virtual boundary.
         */
        nr = ib_map_mr_sg(req->mr, req->sg_table.sgl, count, NULL, SZ_4K);
        if (unlikely(nr < count)) {
+               ib_mr_pool_put(queue->qp, &queue->qp->rdma_mrs, req->mr);
+               req->mr = NULL;
                if (nr < 0)
                        return nr;
                return -EINVAL;
                             IB_ACCESS_REMOTE_READ |
                             IB_ACCESS_REMOTE_WRITE;
 
-       req->mr->need_inval = true;
-
        sg->addr = cpu_to_le64(req->mr->iova);
        put_unaligned_le24(req->mr->length, sg->length);
        put_unaligned_le32(req->mr->rkey, sg->key);
 
        req->num_sge = 1;
        req->inline_data = false;
-       req->mr->need_inval = false;
        refcount_set(&req->ref, 2); /* send and recv completions */
 
        c->common.flags |= NVME_CMD_SGL_METABUF;
                                req->mr->rkey);
                        nvme_rdma_error_recovery(queue->ctrl);
                }
-               req->mr->need_inval = false;
-       } else if (req->mr->need_inval) {
+       } else if (req->mr) {
                ret = nvme_rdma_inv_rkey(queue, req);
                if (unlikely(ret < 0)) {
                        dev_err(queue->ctrl->ctrl.device,
                        sizeof(struct nvme_command), DMA_TO_DEVICE);
 
        err = nvme_rdma_post_send(queue, sqe, req->sge, req->num_sge,
-                       req->mr->need_inval ? &req->reg_wr.wr : NULL);
+                       req->mr ? &req->reg_wr.wr : NULL);
        if (unlikely(err)) {
                nvme_rdma_unmap_data(queue, rq);
                goto err;
        .submit_async_event     = nvme_rdma_submit_async_event,
        .delete_ctrl            = nvme_rdma_delete_ctrl,
        .get_address            = nvmf_get_address,
-       .reinit_request         = nvme_rdma_reinit_request,
 };
 
 static inline bool