static const struct nvmet_fabrics_ops nvmet_tcp_ops;
 static void nvmet_tcp_free_cmd(struct nvmet_tcp_cmd *c);
 static void nvmet_tcp_finish_cmd(struct nvmet_tcp_cmd *cmd);
+static void nvmet_tcp_free_cmd_buffers(struct nvmet_tcp_cmd *cmd);
+static void nvmet_tcp_unmap_pdu_iovec(struct nvmet_tcp_cmd *cmd);
 
 static inline u16 nvmet_tcp_cmd_tag(struct nvmet_tcp_queue *queue,
                struct nvmet_tcp_cmd *cmd)
        return 0;
 }
 
+static void nvmet_tcp_free_cmd_buffers(struct nvmet_tcp_cmd *cmd)
+{
+       WARN_ON(unlikely(cmd->nr_mapped > 0));
+
+       kfree(cmd->iov);
+       sgl_free(cmd->req.sg);
+       cmd->iov = NULL;
+       cmd->req.sg = NULL;
+}
+
 static void nvmet_tcp_unmap_pdu_iovec(struct nvmet_tcp_cmd *cmd)
 {
        struct scatterlist *sg;
 
        for (i = 0; i < cmd->nr_mapped; i++)
                kunmap(sg_page(&sg[i]));
+
+       cmd->nr_mapped = 0;
 }
 
 static void nvmet_tcp_map_pdu_iovec(struct nvmet_tcp_cmd *cmd)
 
        return 0;
 err:
-       sgl_free(cmd->req.sg);
+       nvmet_tcp_free_cmd_buffers(cmd);
        return NVME_SC_INTERNAL;
 }
 
                }
        }
 
-       if (queue->nvme_sq.sqhd_disabled) {
-               kfree(cmd->iov);
-               sgl_free(cmd->req.sg);
-       }
+       if (queue->nvme_sq.sqhd_disabled)
+               nvmet_tcp_free_cmd_buffers(cmd);
 
        return 1;
 
        if (left)
                return -EAGAIN;
 
-       kfree(cmd->iov);
-       sgl_free(cmd->req.sg);
+       nvmet_tcp_free_cmd_buffers(cmd);
        cmd->queue->snd_cmd = NULL;
        nvmet_tcp_put_cmd(cmd);
        return 1;
 {
        nvmet_req_uninit(&cmd->req);
        nvmet_tcp_unmap_pdu_iovec(cmd);
-       kfree(cmd->iov);
-       sgl_free(cmd->req.sg);
+       nvmet_tcp_free_cmd_buffers(cmd);
 }
 
 static void nvmet_tcp_uninit_data_in_cmds(struct nvmet_tcp_queue *queue)