nvme_update_sq_tail(sq);
}
- while (!(nvme_sq_empty(sq) || QTAILQ_EMPTY(&sq->req_list))) {
- addr = sq->dma_addr + sq->head * n->sqe_size;
- if (nvme_addr_read(n, addr, (void *)&cmd, sizeof(cmd))) {
- trace_pci_nvme_err_addr_read(addr);
- trace_pci_nvme_err_cfs();
- stl_le_p(&n->bar.csts, NVME_CSTS_FAILED);
- break;
- }
- nvme_inc_sq_head(sq);
-
- req = QTAILQ_FIRST(&sq->req_list);
- QTAILQ_REMOVE(&sq->req_list, req, entry);
- QTAILQ_INSERT_TAIL(&sq->out_req_list, req, entry);
- nvme_req_clear(req);
- req->cqe.cid = cmd.cid;
- memcpy(&req->cmd, &cmd, sizeof(NvmeCmd));
-
- status = sq->sqid ? nvme_io_cmd(n, req) :
- nvme_admin_cmd(n, req);
- if (status != NVME_NO_COMPLETE) {
- req->status = status;
- nvme_enqueue_req_completion(cq, req);
+ do {
+ while (!(nvme_sq_empty(sq) || QTAILQ_EMPTY(&sq->req_list))) {
+ addr = sq->dma_addr + sq->head * n->sqe_size;
+ if (nvme_addr_read(n, addr, (void *)&cmd, sizeof(cmd))) {
+ trace_pci_nvme_err_addr_read(addr);
+ trace_pci_nvme_err_cfs();
+ stl_le_p(&n->bar.csts, NVME_CSTS_FAILED);
+ break;
+ }
+ nvme_inc_sq_head(sq);
+
+ req = QTAILQ_FIRST(&sq->req_list);
+ QTAILQ_REMOVE(&sq->req_list, req, entry);
+ QTAILQ_INSERT_TAIL(&sq->out_req_list, req, entry);
+ nvme_req_clear(req);
+ req->cqe.cid = cmd.cid;
+ memcpy(&req->cmd, &cmd, sizeof(NvmeCmd));
+
+ status = sq->sqid ? nvme_io_cmd(n, req) :
+ nvme_admin_cmd(n, req);
+ if (status != NVME_NO_COMPLETE) {
+ req->status = status;
+ nvme_enqueue_req_completion(cq, req);
+ }
}
if (n->dbbuf_enabled) {
nvme_update_sq_eventidx(sq);
nvme_update_sq_tail(sq);
}
- }
+ } while (!nvme_sq_empty(sq));
}
static void nvme_update_msixcap_ts(PCIDevice *pci_dev, uint32_t table_size)