}
}
-static void nvme_irq_assert(NvmeCtrl *n, NvmeCQueue *cq)
+static void nvme_irq_do_assert(NvmeCtrl *n, NvmeCQueue *cq)
{
PCIDevice *pci = PCI_DEVICE(n);
- assert(cq->irq_enabled);
+ trace_pci_nvme_irq_do_assert(cq->cqid);
if (msix_enabled(pci)) {
trace_pci_nvme_irq_msix(cq->vector);
nvme_irq_check_intx(n);
}
-static void nvme_irq_deassert(NvmeCtrl *n, NvmeCQueue *cq)
+static void nvme_irq_assert(NvmeCtrl *n, NvmeCQueue *cq)
+{
+ trace_pci_nvme_irq_assert(cq->cqid);
+
+ assert(cq->irq_enabled);
+
+ if (cq->assert_notifier.initialized) {
+ event_notifier_set(&cq->assert_notifier);
+ return;
+ }
+
+ nvme_irq_do_assert(n, cq);
+}
+
+static void nvme_irq_do_deassert(NvmeCtrl *n, NvmeCQueue *cq)
{
PCIDevice *pci = PCI_DEVICE(n);
+ trace_pci_nvme_irq_do_deassert(cq->cqid);
+
assert(cq->irq_enabled);
if (msix_enabled(pci)) {
return;
}
-
+
assert(cq->vector < 32);
if (qatomic_dec_fetch(&n->cq_pending) == 0) {
nvme_irq_check_intx(n);
}
+static void nvme_irq_deassert(NvmeCtrl *n, NvmeCQueue *cq)
+{
+ PCIDevice *pci = PCI_DEVICE(n);
+
+ assert(cq->irq_enabled);
+
+ if (msix_enabled(pci)) {
+ return;
+ }
+
+ if (cq->deassert_notifier.initialized) {
+ /*
+ * The deassert notifier will only be initilized when MSI-X is NOT
+ * in use. Therefore no need to worry about extra eventfd syscall
+ * for pin-based interrupts.
+ */
+ event_notifier_set(&cq->deassert_notifier);
+ return;
+ }
+
+ nvme_irq_do_deassert(n, cq);
+}
+
static void nvme_req_clear(NvmeRequest *req)
{
req->ns = NULL;
trace_pci_nvme_update_cq_head(cq->cqid, cq->head);
}
+static void nvme_irq_assert_notify(EventNotifier *e)
+{
+ NvmeCQueue *cq = container_of(e, NvmeCQueue, assert_notifier);
+
+ trace_pci_nvme_irq_assert_notify(cq->cqid);
+
+ if (event_notifier_test_and_clear(e)) {
+ nvme_irq_do_assert(cq->ctrl, cq);
+ }
+}
+
+static void nvme_irq_deassert_notify(EventNotifier *e)
+{
+ NvmeCQueue *cq = container_of(e, NvmeCQueue, deassert_notifier);
+
+ trace_pci_nvme_irq_deassert_notify(cq->cqid);
+
+ if (event_notifier_test_and_clear(e)) {
+ nvme_irq_do_deassert(cq->ctrl, cq);
+ }
+}
+
+static void nvme_init_irq_notifier(NvmeCtrl *n, NvmeCQueue *cq)
+{
+ int ret;
+
+ ret = event_notifier_init(&cq->assert_notifier, 0);
+ if (ret < 0) {
+ return;
+ }
+
+ event_notifier_set_handler(&cq->assert_notifier, nvme_irq_assert_notify);
+
+ if (!msix_enabled(&n->parent_obj)) {
+ ret = event_notifier_init(&cq->deassert_notifier, 0);
+ if (ret < 0) {
+ event_notifier_set_handler(&cq->assert_notifier, NULL);
+ event_notifier_cleanup(&cq->assert_notifier);
+
+ return;
+ }
+
+ event_notifier_set_handler(&cq->deassert_notifier,
+ nvme_irq_deassert_notify);
+ }
+
+ return;
+}
+
static void nvme_post_cqe(NvmeCQueue *cq, NvmeRequest *req)
{
NvmeCtrl *n = cq->ctrl;
event_notifier_set_handler(&cq->notifier, NULL);
event_notifier_cleanup(&cq->notifier);
}
+ if (cq->assert_notifier.initialized) {
+ event_notifier_set_handler(&cq->assert_notifier, NULL);
+ event_notifier_cleanup(&cq->assert_notifier);
+ }
+ if (cq->deassert_notifier.initialized) {
+ event_notifier_set_handler(&cq->deassert_notifier, NULL);
+ event_notifier_cleanup(&cq->deassert_notifier);
+ }
if (msix_enabled(pci)) {
msix_vector_unuse(pci, cq->vector);
}
}
if (cq->irq_enabled) {
- nvme_irq_deassert(n, cq);
+ nvme_irq_do_deassert(n, cq);
}
trace_pci_nvme_del_cq(qid);
cq->do_irq = qemu_bh_new_guarded(nvme_do_irq, cq, guard);
cq->post_queued_cqes = qemu_bh_new_guarded(nvme_post_cqes, cq, guard);
+
+ if (cqid && n->params.irq_eventfd) {
+ nvme_init_irq_notifier(n, cq);
+ }
}
static uint16_t nvme_create_cq(NvmeCtrl *n, NvmeRequest *req)
DEFINE_PROP_BOOL("use-intel-id", NvmeCtrl, params.use_intel_id, false),
DEFINE_PROP_BOOL("legacy-cmb", NvmeCtrl, params.legacy_cmb, false),
DEFINE_PROP_BOOL("ioeventfd", NvmeCtrl, params.ioeventfd, false),
+ DEFINE_PROP_BOOL("x-irq-eventfd", NvmeCtrl, params.irq_eventfd, false),
DEFINE_PROP_UINT8("zoned.zasl", NvmeCtrl, params.zasl, 0),
DEFINE_PROP_BOOL("zoned.auto_transition", NvmeCtrl,
params.auto_transition_zones, true),