ctrl->max_fr_pages = nvme_rdma_get_max_fr_pages(ctrl->device->dev,
pi_capable);
+ ctrl->ctrl.max_segments = ctrl->max_fr_pages;
+ ctrl->ctrl.max_hw_sectors = ctrl->max_fr_pages << (ilog2(SZ_4K) - 9);
+ if (pi_capable)
+ ctrl->ctrl.max_integrity_segments = ctrl->max_fr_pages;
+ else
+ ctrl->ctrl.max_integrity_segments = 0;
/*
* Bind the async event SQE DMA mapping to the admin queue lifetime.
if (error)
goto out_stop_queue;
- ctrl->ctrl.max_segments = ctrl->max_fr_pages;
- ctrl->ctrl.max_hw_sectors = ctrl->max_fr_pages << (ilog2(SZ_4K) - 9);
- if (pi_capable)
- ctrl->ctrl.max_integrity_segments = ctrl->max_fr_pages;
- else
- ctrl->ctrl.max_integrity_segments = 0;
-
nvme_unquiesce_admin_queue(&ctrl->ctrl);
error = nvme_init_ctrl_finish(&ctrl->ctrl, false);