return ret;
  }
  
- static int nvme_rdma_init_io_queues(struct nvme_rdma_ctrl *ctrl)
+ static int nvme_rdma_alloc_io_queues(struct nvme_rdma_ctrl *ctrl)
  {
        struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
 +      struct ib_device *ibdev = ctrl->device->dev;
        unsigned int nr_io_queues;
        int i, ret;
  
  {
        nvme_rdma_free_qe(ctrl->queues[0].device->dev, &ctrl->async_event_sqe,
                        sizeof(struct nvme_command), DMA_TO_DEVICE);
-       nvme_rdma_stop_and_free_queue(&ctrl->queues[0]);
-       blk_cleanup_queue(ctrl->ctrl.admin_q);
-       blk_mq_free_tag_set(&ctrl->admin_tag_set);
-       nvme_rdma_dev_put(ctrl->device);
+       nvme_rdma_stop_queue(&ctrl->queues[0]);
+       if (remove) {
+               blk_cleanup_queue(ctrl->ctrl.admin_q);
+               nvme_rdma_free_tagset(&ctrl->ctrl, true);
+       }
+       nvme_rdma_free_queue(&ctrl->queues[0]);
+ }
+ 
+ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl,
+               bool new)
+ {
+       int error;
+ 
+       error = nvme_rdma_alloc_queue(ctrl, 0, NVME_AQ_DEPTH);
+       if (error)
+               return error;
+ 
+       ctrl->device = ctrl->queues[0].device;
+ 
+       ctrl->max_fr_pages = min_t(u32, NVME_RDMA_MAX_SEGMENTS,
+               ctrl->device->dev->attrs.max_fast_reg_page_list_len);
+ 
+       if (new) {
+               ctrl->ctrl.admin_tagset = nvme_rdma_alloc_tagset(&ctrl->ctrl, true);
+               if (IS_ERR(ctrl->ctrl.admin_tagset))
+                       goto out_free_queue;
+ 
+               ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set);
+               if (IS_ERR(ctrl->ctrl.admin_q)) {
+                       error = PTR_ERR(ctrl->ctrl.admin_q);
+                       goto out_free_tagset;
+               }
+       } else {
+               error = blk_mq_reinit_tagset(&ctrl->admin_tag_set,
+                                            nvme_rdma_reinit_request);
+               if (error)
+                       goto out_free_queue;
+       }
+ 
+       error = nvme_rdma_start_queue(ctrl, 0);
+       if (error)
+               goto out_cleanup_queue;
+ 
+       error = ctrl->ctrl.ops->reg_read64(&ctrl->ctrl, NVME_REG_CAP,
+                       &ctrl->ctrl.cap);
+       if (error) {
+               dev_err(ctrl->ctrl.device,
+                       "prop_get NVME_REG_CAP failed\n");
+               goto out_cleanup_queue;
+       }
+ 
+       ctrl->ctrl.sqsize =
+               min_t(int, NVME_CAP_MQES(ctrl->ctrl.cap), ctrl->ctrl.sqsize);
+ 
+       error = nvme_enable_ctrl(&ctrl->ctrl, ctrl->ctrl.cap);
+       if (error)
+               goto out_cleanup_queue;
+ 
+       ctrl->ctrl.max_hw_sectors =
 -              (ctrl->max_fr_pages - 1) << (PAGE_SHIFT - 9);
++              (ctrl->max_fr_pages - 1) << (ilog2(SZ_4K) - 9);
+ 
+       error = nvme_init_identify(&ctrl->ctrl);
+       if (error)
+               goto out_cleanup_queue;
+ 
+       error = nvme_rdma_alloc_qe(ctrl->queues[0].device->dev,
+                       &ctrl->async_event_sqe, sizeof(struct nvme_command),
+                       DMA_TO_DEVICE);
+       if (error)
+               goto out_cleanup_queue;
+ 
+       return 0;
+ 
+ out_cleanup_queue:
+       if (new)
+               blk_cleanup_queue(ctrl->ctrl.admin_q);
+ out_free_tagset:
+       if (new)
+               nvme_rdma_free_tagset(&ctrl->ctrl, true);
+ out_free_queue:
+       nvme_rdma_free_queue(&ctrl->queues[0]);
+       return error;
+ }
+ 
+ static void nvme_rdma_destroy_io_queues(struct nvme_rdma_ctrl *ctrl,
+               bool remove)
+ {
+       nvme_rdma_stop_io_queues(ctrl);
+       if (remove) {
+               blk_cleanup_queue(ctrl->ctrl.connect_q);
+               nvme_rdma_free_tagset(&ctrl->ctrl, false);
+       }
+       nvme_rdma_free_io_queues(ctrl);
+ }
+ 
+ static int nvme_rdma_configure_io_queues(struct nvme_rdma_ctrl *ctrl, bool new)
+ {
+       int ret;
+ 
+       ret = nvme_rdma_alloc_io_queues(ctrl);
+       if (ret)
+               return ret;
+ 
+       if (new) {
+               ctrl->ctrl.tagset = nvme_rdma_alloc_tagset(&ctrl->ctrl, false);
+               if (IS_ERR(ctrl->ctrl.tagset))
+                       goto out_free_io_queues;
+ 
+               ctrl->ctrl.connect_q = blk_mq_init_queue(&ctrl->tag_set);
+               if (IS_ERR(ctrl->ctrl.connect_q)) {
+                       ret = PTR_ERR(ctrl->ctrl.connect_q);
+                       goto out_free_tag_set;
+               }
+       } else {
+               ret = blk_mq_reinit_tagset(&ctrl->tag_set,
+                                          nvme_rdma_reinit_request);
+               if (ret)
+                       goto out_free_io_queues;
+ 
+               blk_mq_update_nr_hw_queues(&ctrl->tag_set,
+                       ctrl->ctrl.queue_count - 1);
+       }
+ 
+       ret = nvme_rdma_start_io_queues(ctrl);
+       if (ret)
+               goto out_cleanup_connect_q;
+ 
+       return 0;
+ 
+ out_cleanup_connect_q:
+       if (new)
+               blk_cleanup_queue(ctrl->ctrl.connect_q);
+ out_free_tag_set:
+       if (new)
+               nvme_rdma_free_tagset(&ctrl->ctrl, false);
+ out_free_io_queues:
+       nvme_rdma_free_io_queues(ctrl);
+       return ret;
  }
  
  static void nvme_rdma_free_ctrl(struct nvme_ctrl *nctrl)
        struct nvme_keyed_sgl_desc *sg = &c->common.dptr.ksgl;
        int nr;
  
 -      nr = ib_map_mr_sg(req->mr, req->sg_table.sgl, count, NULL, PAGE_SIZE);
 +      /*
 +       * Align the MR to a 4K page size to match the ctrl page size and
 +       * the block virtual boundary.
 +       */
 +      nr = ib_map_mr_sg(req->mr, req->sg_table.sgl, count, NULL, SZ_4K);
-       if (nr < count) {
+       if (unlikely(nr < count)) {
                if (nr < 0)
                        return nr;
                return -EINVAL;