struct request *req, *n;
        int rc;
        struct bio *bio;
-       unsigned int segs;
        struct blkfront_ring_info *rinfo;
 
        blkfront_gather_backend_features(info);
-       /* Reset limits changed by blk_mq_update_nr_hw_queues(). */
        blkif_set_queue_limits(info);
-       segs = info->max_indirect_segments ? : BLKIF_MAX_SEGMENTS_PER_REQUEST;
-       blk_queue_max_segments(info->rq, segs / GRANTS_PER_PSEG);
 
        for_each_rinfo(info, rinfo, r_index) {
                rc = blkfront_setup_indirect(rinfo);
        list_for_each_entry_safe(req, n, &info->requests, queuelist) {
                /* Requeue pending requests (flush or discard) */
                list_del_init(&req->queuelist);
-               BUG_ON(req->nr_phys_segments > segs);
+               BUG_ON(req->nr_phys_segments >
+                      (info->max_indirect_segments ? :
+                       BLKIF_MAX_SEGMENTS_PER_REQUEST));
                blk_mq_requeue_request(req, false);
        }
        blk_mq_start_stopped_hw_queues(info->rq, true);