nvme_tcp_stop_queue(ctrl, i);
 }
 
-static int nvme_tcp_start_io_queues(struct nvme_ctrl *ctrl)
+static int nvme_tcp_start_io_queues(struct nvme_ctrl *ctrl,
+                                   int first, int last)
 {
        int i, ret;
 
-       for (i = 1; i < ctrl->queue_count; i++) {
+       for (i = first; i < last; i++) {
                ret = nvme_tcp_start_queue(ctrl, i);
                if (ret)
                        goto out_stop_queues;
        return 0;
 
 out_stop_queues:
-       for (i--; i >= 1; i--)
+       for (i--; i >= first; i--)
                nvme_tcp_stop_queue(ctrl, i);
        return ret;
 }
 
 static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new)
 {
-       int ret;
+       int ret, nr_queues;
 
        ret = nvme_tcp_alloc_io_queues(ctrl);
        if (ret)
                        goto out_free_tag_set;
        }
 
-       ret = nvme_tcp_start_io_queues(ctrl);
+       /*
+        * Only start IO queues for which we have allocated the tagset
+        * and limitted it to the available queues. On reconnects, the
+        * queue number might have changed.
+        */
+       nr_queues = min(ctrl->tagset->nr_hw_queues + 1, ctrl->queue_count);
+       ret = nvme_tcp_start_io_queues(ctrl, 1, nr_queues);
        if (ret)
                goto out_cleanup_connect_q;
 
                nvme_unfreeze(ctrl);
        }
 
+       /*
+        * If the number of queues has increased (reconnect case)
+        * start all new queues now.
+        */
+       ret = nvme_tcp_start_io_queues(ctrl, nr_queues,
+                                      ctrl->tagset->nr_hw_queues + 1);
+       if (ret)
+               goto out_wait_freeze_timed_out;
+
        return 0;
 
 out_wait_freeze_timed_out: