return ret;
 }
 
+static bool nvme_tcp_admin_queue(struct nvme_tcp_queue *queue)
+{
+       return nvme_tcp_queue_id(queue) == 0;
+}
+
+static bool nvme_tcp_default_queue(struct nvme_tcp_queue *queue)
+{
+       struct nvme_tcp_ctrl *ctrl = queue->ctrl;
+       int qid = nvme_tcp_queue_id(queue);
+
+       return !nvme_tcp_admin_queue(queue) &&
+               qid < 1 + ctrl->io_queues[HCTX_TYPE_DEFAULT];
+}
+
+static bool nvme_tcp_read_queue(struct nvme_tcp_queue *queue)
+{
+       struct nvme_tcp_ctrl *ctrl = queue->ctrl;
+       int qid = nvme_tcp_queue_id(queue);
+
+       return !nvme_tcp_admin_queue(queue) &&
+               !nvme_tcp_default_queue(queue) &&
+               qid < 1 + ctrl->io_queues[HCTX_TYPE_DEFAULT] +
+                         ctrl->io_queues[HCTX_TYPE_READ];
+}
+
+static bool nvme_tcp_poll_queue(struct nvme_tcp_queue *queue)
+{
+       struct nvme_tcp_ctrl *ctrl = queue->ctrl;
+       int qid = nvme_tcp_queue_id(queue);
+
+       return !nvme_tcp_admin_queue(queue) &&
+               !nvme_tcp_default_queue(queue) &&
+               !nvme_tcp_read_queue(queue) &&
+               qid < 1 + ctrl->io_queues[HCTX_TYPE_DEFAULT] +
+                         ctrl->io_queues[HCTX_TYPE_READ] +
+                         ctrl->io_queues[HCTX_TYPE_POLL];
+}
+
+static void nvme_tcp_set_queue_io_cpu(struct nvme_tcp_queue *queue)
+{
+       struct nvme_tcp_ctrl *ctrl = queue->ctrl;
+       int qid = nvme_tcp_queue_id(queue);
+       int n = 0;
+
+       if (nvme_tcp_default_queue(queue))
+               n = qid - 1;
+       else if (nvme_tcp_read_queue(queue))
+               n = qid - ctrl->io_queues[HCTX_TYPE_DEFAULT] - 1;
+       else if (nvme_tcp_poll_queue(queue))
+               n = qid - ctrl->io_queues[HCTX_TYPE_DEFAULT] -
+                               ctrl->io_queues[HCTX_TYPE_READ] - 1;
+       queue->io_cpu = cpumask_next_wrap(n - 1, cpu_online_mask, -1, false);
+}
+
 static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl,
                int qid, size_t queue_size)
 {
        struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
        struct nvme_tcp_queue *queue = &ctrl->queues[qid];
        struct linger sol = { .l_onoff = 1, .l_linger = 0 };
-       int ret, opt, rcv_pdu_size, n;
+       int ret, opt, rcv_pdu_size;
 
        queue->ctrl = ctrl;
        INIT_LIST_HEAD(&queue->send_list);
        }
 
        queue->sock->sk->sk_allocation = GFP_ATOMIC;
-       if (!qid)
-               n = 0;
-       else
-               n = (qid - 1) % num_online_cpus();
-       queue->io_cpu = cpumask_next_wrap(n - 1, cpu_online_mask, -1, false);
+       nvme_tcp_set_queue_io_cpu(queue);
        queue->request = NULL;
        queue->data_remaining = 0;
        queue->ddgst_remaining = 0;