struct nvmet_rdma_cmd *cmd)
 {
        struct ib_recv_wr *bad_wr;
+       int ret;
 
        ib_dma_sync_single_for_device(ndev->device,
                cmd->sge[0].addr, cmd->sge[0].length,
                DMA_FROM_DEVICE);
 
        if (ndev->srq)
-               return ib_post_srq_recv(ndev->srq, &cmd->wr, &bad_wr);
-       return ib_post_recv(cmd->queue->cm_id->qp, &cmd->wr, &bad_wr);
+               ret = ib_post_srq_recv(ndev->srq, &cmd->wr, &bad_wr);
+       else
+               ret = ib_post_recv(cmd->queue->cm_id->qp, &cmd->wr, &bad_wr);
+
+       if (unlikely(ret))
+               pr_err("post_recv cmd failed\n");
+
+       return ret;
 }
 
 static void nvmet_rdma_process_wr_wait_list(struct nvmet_rdma_queue *queue)
        ndev->srq = srq;
        ndev->srq_size = srq_size;
 
-       for (i = 0; i < srq_size; i++)
-               nvmet_rdma_post_recv(ndev, &ndev->srq_cmds[i]);
+       for (i = 0; i < srq_size; i++) {
+               ret = nvmet_rdma_post_recv(ndev, &ndev->srq_cmds[i]);
+               if (ret)
+                       goto out_free_cmds;
+       }
 
        return 0;
 
+out_free_cmds:
+       nvmet_rdma_free_cmds(ndev, ndev->srq_cmds, ndev->srq_size, false);
 out_destroy_srq:
        ib_destroy_srq(srq);
        return ret;
        if (!ndev->srq) {
                for (i = 0; i < queue->recv_queue_size; i++) {
                        queue->cmds[i].queue = queue;
-                       nvmet_rdma_post_recv(ndev, &queue->cmds[i]);
+                       ret = nvmet_rdma_post_recv(ndev, &queue->cmds[i]);
+                       if (ret)
+                               goto err_destroy_qp;
                }
        }
 
 out:
        return ret;
 
+err_destroy_qp:
+       rdma_destroy_qp(queue->cm_id);
 err_destroy_cq:
        ib_free_cq(queue->cq);
        goto out;