[NVME_ADM_CMD_GET_FEATURES] = NVME_CMD_EFF_CSUPP,
[NVME_ADM_CMD_ASYNC_EV_REQ] = NVME_CMD_EFF_CSUPP,
[NVME_ADM_CMD_NS_ATTACHMENT] = NVME_CMD_EFF_CSUPP | NVME_CMD_EFF_NIC,
+ [NVME_ADM_CMD_DBBUF_CONFIG] = NVME_CMD_EFF_CSUPP,
[NVME_ADM_CMD_FORMAT_NVM] = NVME_CMD_EFF_CSUPP | NVME_CMD_EFF_LBCC,
};
}
}
+static void nvme_update_cq_eventidx(const NvmeCQueue *cq)
+{
+ pci_dma_write(&cq->ctrl->parent_obj, cq->ei_addr, &cq->head,
+ sizeof(cq->head));
+ trace_pci_nvme_eventidx_cq(cq->cqid, cq->head);
+}
+
+static void nvme_update_cq_head(NvmeCQueue *cq)
+{
+ pci_dma_read(&cq->ctrl->parent_obj, cq->db_addr, &cq->head,
+ sizeof(cq->head));
+}
+
static void nvme_post_cqes(void *opaque)
{
NvmeCQueue *cq = opaque;
NvmeSQueue *sq;
hwaddr addr;
+ if (n->dbbuf_enabled) {
+ nvme_update_cq_eventidx(cq);
+ nvme_update_cq_head(cq);
+ }
+
if (nvme_cq_full(cq)) {
break;
}
static void nvme_init_sq(NvmeSQueue *sq, NvmeCtrl *n, uint64_t dma_addr,
uint16_t sqid, uint16_t cqid, uint16_t size)
{
+ uint32_t stride = 4 << NVME_CAP_DSTRD(n->bar.cap);
int i;
NvmeCQueue *cq;
}
sq->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, nvme_process_sq, sq);
+ if (n->dbbuf_dbs && n->dbbuf_eis) {
+ sq->db_addr = n->dbbuf_dbs + 2 * sqid * stride;
+ sq->ei_addr = n->dbbuf_eis + 2 * sqid * stride;
+ }
+
assert(n->cq[cqid]);
cq = n->cq[cqid];
QTAILQ_INSERT_TAIL(&(cq->sq_list), sq, entry);
uint16_t cqid, uint16_t vector, uint16_t size,
uint16_t irq_enabled)
{
+ uint32_t stride = 4 << NVME_CAP_DSTRD(n->bar.cap);
int ret;
if (msix_enabled(&n->parent_obj)) {
cq->head = cq->tail = 0;
QTAILQ_INIT(&cq->req_list);
QTAILQ_INIT(&cq->sq_list);
+ if (n->dbbuf_dbs && n->dbbuf_eis) {
+ cq->db_addr = n->dbbuf_dbs + (2 * cqid + 1) * stride;
+ cq->ei_addr = n->dbbuf_eis + (2 * cqid + 1) * stride;
+ }
n->cq[cqid] = cq;
cq->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, nvme_post_cqes, cq);
}
return status;
}
+static uint16_t nvme_dbbuf_config(NvmeCtrl *n, const NvmeRequest *req)
+{
+ uint32_t stride = 4 << NVME_CAP_DSTRD(n->bar.cap);
+ uint64_t dbs_addr = le64_to_cpu(req->cmd.dptr.prp1);
+ uint64_t eis_addr = le64_to_cpu(req->cmd.dptr.prp2);
+ int i;
+
+ /* Address should be page aligned */
+ if (dbs_addr & (n->page_size - 1) || eis_addr & (n->page_size - 1)) {
+ return NVME_INVALID_FIELD | NVME_DNR;
+ }
+
+ /* Save shadow buffer base addr for use during queue creation */
+ n->dbbuf_dbs = dbs_addr;
+ n->dbbuf_eis = eis_addr;
+ n->dbbuf_enabled = true;
+
+ for (i = 0; i < n->params.max_ioqpairs + 1; i++) {
+ NvmeSQueue *sq = n->sq[i];
+ NvmeCQueue *cq = n->cq[i];
+
+ if (sq) {
+ /* Submission queue tail pointer location, 2 * QID * stride */
+ sq->db_addr = dbs_addr + 2 * i * stride;
+ sq->ei_addr = eis_addr + 2 * i * stride;
+ pci_dma_write(&n->parent_obj, sq->db_addr, &sq->tail,
+ sizeof(sq->tail));
+ }
+
+ if (cq) {
+ /* Completion queue head pointer location, (2 * QID + 1) * stride */
+ cq->db_addr = dbs_addr + (2 * i + 1) * stride;
+ cq->ei_addr = eis_addr + (2 * i + 1) * stride;
+ pci_dma_write(&n->parent_obj, cq->db_addr, &cq->head,
+ sizeof(cq->head));
+ }
+ }
+
+ return NVME_SUCCESS;
+}
+
static uint16_t nvme_admin_cmd(NvmeCtrl *n, NvmeRequest *req)
{
trace_pci_nvme_admin_cmd(nvme_cid(req), nvme_sqid(req), req->cmd.opcode,
return nvme_aer(n, req);
case NVME_ADM_CMD_NS_ATTACHMENT:
return nvme_ns_attachment(n, req);
+ case NVME_ADM_CMD_DBBUF_CONFIG:
+ return nvme_dbbuf_config(n, req);
case NVME_ADM_CMD_FORMAT_NVM:
return nvme_format(n, req);
default:
return NVME_INVALID_OPCODE | NVME_DNR;
}
+static void nvme_update_sq_eventidx(const NvmeSQueue *sq)
+{
+ pci_dma_write(&sq->ctrl->parent_obj, sq->ei_addr, &sq->tail,
+ sizeof(sq->tail));
+}
+
+static void nvme_update_sq_tail(NvmeSQueue *sq)
+{
+ pci_dma_read(&sq->ctrl->parent_obj, sq->db_addr, &sq->tail,
+ sizeof(sq->tail));
+}
+
static void nvme_process_sq(void *opaque)
{
NvmeSQueue *sq = opaque;
NvmeCmd cmd;
NvmeRequest *req;
+ if (sq->sqid && n->dbbuf_enabled) {
+ nvme_update_sq_tail(sq);
+ }
+
while (!(nvme_sq_empty(sq) || QTAILQ_EMPTY(&sq->req_list))) {
addr = sq->dma_addr + sq->head * n->sqe_size;
if (nvme_addr_read(n, addr, (void *)&cmd, sizeof(cmd))) {
req->status = status;
nvme_enqueue_req_completion(cq, req);
}
+
+ if (n->dbbuf_enabled) {
+ nvme_update_sq_eventidx(sq);
+ nvme_update_sq_tail(sq);
+ }
}
}
n->aer_queued = 0;
n->outstanding_aers = 0;
n->qs_created = false;
+ n->dbbuf_dbs = 0;
+ n->dbbuf_eis = 0;
+ n->dbbuf_enabled = false;
}
static void nvme_ctrl_shutdown(NvmeCtrl *n)
start_sqs = nvme_cq_full(cq) ? 1 : 0;
cq->head = new_head;
+ if (n->dbbuf_enabled) {
+ pci_dma_write(&n->parent_obj, cq->db_addr, &cq->head,
+ sizeof(cq->head));
+ }
if (start_sqs) {
NvmeSQueue *sq;
QTAILQ_FOREACH(sq, &cq->sq_list, entry) {
trace_pci_nvme_mmio_doorbell_sq(sq->sqid, new_tail);
sq->tail = new_tail;
+ if (n->dbbuf_enabled) {
+ pci_dma_write(&n->parent_obj, sq->db_addr, &sq->tail,
+ sizeof(sq->tail));
+ }
timer_mod(sq->timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + 500);
}
}
id->mdts = n->params.mdts;
id->ver = cpu_to_le32(NVME_SPEC_VER);
- id->oacs = cpu_to_le16(NVME_OACS_NS_MGMT | NVME_OACS_FORMAT);
+ id->oacs = cpu_to_le16(NVME_OACS_NS_MGMT | NVME_OACS_FORMAT | NVME_OACS_DBBUF);
id->cntrltype = 0x1;
/*