struct request_queue *q)
{
if (ctrl->max_hw_sectors) {
+ u32 max_segments =
+ (ctrl->max_hw_sectors / (ctrl->page_size >> 9)) + 1;
+
blk_queue_max_hw_sectors(q, ctrl->max_hw_sectors);
- blk_queue_max_segments(q,
- (ctrl->max_hw_sectors / (ctrl->page_size >> 9)) + 1);
+ blk_queue_max_segments(q, min_t(u32, max_segments, USHRT_MAX));
}
if (ctrl->stripe_size)
blk_queue_chunk_sectors(q, ctrl->stripe_size >> 9);
if (ctrl->vwc & NVME_CTRL_VWC_PRESENT)
blk_queue_flush(q, REQ_FLUSH | REQ_FUA);
- blk_queue_virt_boundary(q, ctrl->page_size - 1);
}
/*