struct socket           *sock;
        struct nvmet_tcp_port   *port;
        struct work_struct      io_work;
-       int                     cpu;
        struct nvmet_cq         nvme_cq;
        struct nvmet_sq         nvme_sq;
 
        struct work_struct      accept_work;
        struct nvmet_port       *nport;
        struct sockaddr_storage addr;
-       int                     last_cpu;
        void (*data_ready)(struct sock *);
 };
 
        list_add_tail(&cmd->entry, &cmd->queue->free_list);
 }
 
+static inline int queue_cpu(struct nvmet_tcp_queue *queue)
+{
+       return queue->sock->sk->sk_incoming_cpu;
+}
+
 static inline u8 nvmet_tcp_hdgst_len(struct nvmet_tcp_queue *queue)
 {
        return queue->hdr_digest ? NVME_TCP_DIGEST_LENGTH : 0;
        struct nvmet_tcp_queue  *queue = cmd->queue;
 
        llist_add(&cmd->lentry, &queue->resp_list);
-       queue_work_on(cmd->queue->cpu, nvmet_tcp_wq, &cmd->queue->io_work);
+       queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &cmd->queue->io_work);
 }
 
 static int nvmet_try_send_data_pdu(struct nvmet_tcp_cmd *cmd)
         * We exahusted our budget, requeue our selves
         */
        if (pending)
-               queue_work_on(queue->cpu, nvmet_tcp_wq, &queue->io_work);
+               queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work);
 }
 
 static int nvmet_tcp_alloc_cmd(struct nvmet_tcp_queue *queue,
        read_lock_bh(&sk->sk_callback_lock);
        queue = sk->sk_user_data;
        if (likely(queue))
-               queue_work_on(queue->cpu, nvmet_tcp_wq, &queue->io_work);
+               queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work);
        read_unlock_bh(&sk->sk_callback_lock);
 }
 
 
        if (sk_stream_is_writeable(sk)) {
                clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
-               queue_work_on(queue->cpu, nvmet_tcp_wq, &queue->io_work);
+               queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work);
        }
 out:
        read_unlock_bh(&sk->sk_callback_lock);
        if (ret)
                goto out_free_connect;
 
-       port->last_cpu = cpumask_next_wrap(port->last_cpu,
-                               cpu_online_mask, -1, false);
-       queue->cpu = port->last_cpu;
        nvmet_prepare_receive_pdu(queue);
 
        mutex_lock(&nvmet_tcp_queue_mutex);
        if (ret)
                goto out_destroy_sq;
 
-       queue_work_on(queue->cpu, nvmet_tcp_wq, &queue->io_work);
+       queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work);
 
        return 0;
 out_destroy_sq:
        }
 
        port->nport = nport;
-       port->last_cpu = -1;
        INIT_WORK(&port->accept_work, nvmet_tcp_accept_work);
        if (port->nport->inline_data_size < 0)
                port->nport->inline_data_size = NVMET_TCP_DEF_INLINE_DATA_SIZE;