]> www.infradead.org Git - users/dwmw2/linux.git/commitdiff
xen/netback: use lateeoi irq binding
authorJuergen Gross <jgross@suse.com>
Mon, 7 Sep 2020 13:47:28 +0000 (15:47 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 5 Nov 2020 10:08:36 +0000 (11:08 +0100)
commit 23025393dbeb3b8b3b60ebfa724cdae384992e27 upstream.

In order to reduce the chance for the system becoming unresponsive due
to event storms triggered by a misbehaving netfront use the lateeoi
irq binding for netback and unmask the event channel only just before
going to sleep waiting for new events.

Make sure not to issue an EOI when none is pending by introducing an
eoi_pending element to struct xenvif_queue.

When no request has been consumed set the spurious flag when sending
the EOI for an interrupt.

This is part of XSA-332.

Cc: stable@vger.kernel.org
Reported-by: Julien Grall <julien@xen.org>
Signed-off-by: Juergen Gross <jgross@suse.com>
Reviewed-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: Wei Liu <wl@xen.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
drivers/net/xen-netback/common.h
drivers/net/xen-netback/interface.c
drivers/net/xen-netback/netback.c
drivers/net/xen-netback/rx.c

index 936c0b3e0ba28ec1f6586a5bab86d403a86d0dd6..86d23d0f563c49b17affac24a1a189829dfabcbc 100644 (file)
@@ -140,6 +140,20 @@ struct xenvif_queue { /* Per-queue data for xenvif */
        char name[QUEUE_NAME_SIZE]; /* DEVNAME-qN */
        struct xenvif *vif; /* Parent VIF */
 
+       /*
+        * TX/RX common EOI handling.
+        * When feature-split-event-channels = 0, interrupt handler sets
+        * NETBK_COMMON_EOI, otherwise NETBK_RX_EOI and NETBK_TX_EOI are set
+        * by the RX and TX interrupt handlers.
+        * RX and TX handler threads will issue an EOI when either
+        * NETBK_COMMON_EOI or their specific bits (NETBK_RX_EOI or
+        * NETBK_TX_EOI) are set and they will reset those bits.
+        */
+       atomic_t eoi_pending;
+#define NETBK_RX_EOI           0x01
+#define NETBK_TX_EOI           0x02
+#define NETBK_COMMON_EOI       0x04
+
        /* Use NAPI for guest TX */
        struct napi_struct napi;
        /* When feature-split-event-channels = 0, tx_irq = rx_irq. */
@@ -357,6 +371,7 @@ int xenvif_dealloc_kthread(void *data);
 
 irqreturn_t xenvif_ctrl_irq_fn(int irq, void *data);
 
+bool xenvif_have_rx_work(struct xenvif_queue *queue, bool test_kthread);
 void xenvif_rx_action(struct xenvif_queue *queue);
 void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb);
 
index 4cafc31b98b7c6f095d0d3f92134d03d812f5b83..c960cb7e3251f67c240f130d7793bdb9586da0f4 100644 (file)
@@ -77,12 +77,28 @@ int xenvif_schedulable(struct xenvif *vif)
                !vif->disabled;
 }
 
+static bool xenvif_handle_tx_interrupt(struct xenvif_queue *queue)
+{
+       bool rc;
+
+       rc = RING_HAS_UNCONSUMED_REQUESTS(&queue->tx);
+       if (rc)
+               napi_schedule(&queue->napi);
+       return rc;
+}
+
 static irqreturn_t xenvif_tx_interrupt(int irq, void *dev_id)
 {
        struct xenvif_queue *queue = dev_id;
+       int old;
 
-       if (RING_HAS_UNCONSUMED_REQUESTS(&queue->tx))
-               napi_schedule(&queue->napi);
+       old = atomic_fetch_or(NETBK_TX_EOI, &queue->eoi_pending);
+       WARN(old & NETBK_TX_EOI, "Interrupt while EOI pending\n");
+
+       if (!xenvif_handle_tx_interrupt(queue)) {
+               atomic_andnot(NETBK_TX_EOI, &queue->eoi_pending);
+               xen_irq_lateeoi(irq, XEN_EOI_FLAG_SPURIOUS);
+       }
 
        return IRQ_HANDLED;
 }
@@ -116,19 +132,46 @@ static int xenvif_poll(struct napi_struct *napi, int budget)
        return work_done;
 }
 
+static bool xenvif_handle_rx_interrupt(struct xenvif_queue *queue)
+{
+       bool rc;
+
+       rc = xenvif_have_rx_work(queue, false);
+       if (rc)
+               xenvif_kick_thread(queue);
+       return rc;
+}
+
 static irqreturn_t xenvif_rx_interrupt(int irq, void *dev_id)
 {
        struct xenvif_queue *queue = dev_id;
+       int old;
 
-       xenvif_kick_thread(queue);
+       old = atomic_fetch_or(NETBK_RX_EOI, &queue->eoi_pending);
+       WARN(old & NETBK_RX_EOI, "Interrupt while EOI pending\n");
+
+       if (!xenvif_handle_rx_interrupt(queue)) {
+               atomic_andnot(NETBK_RX_EOI, &queue->eoi_pending);
+               xen_irq_lateeoi(irq, XEN_EOI_FLAG_SPURIOUS);
+       }
 
        return IRQ_HANDLED;
 }
 
 irqreturn_t xenvif_interrupt(int irq, void *dev_id)
 {
-       xenvif_tx_interrupt(irq, dev_id);
-       xenvif_rx_interrupt(irq, dev_id);
+       struct xenvif_queue *queue = dev_id;
+       int old;
+
+       old = atomic_fetch_or(NETBK_COMMON_EOI, &queue->eoi_pending);
+       WARN(old, "Interrupt while EOI pending\n");
+
+       /* Use bitwise or as we need to call both functions. */
+       if ((!xenvif_handle_tx_interrupt(queue) |
+            !xenvif_handle_rx_interrupt(queue))) {
+               atomic_andnot(NETBK_COMMON_EOI, &queue->eoi_pending);
+               xen_irq_lateeoi(irq, XEN_EOI_FLAG_SPURIOUS);
+       }
 
        return IRQ_HANDLED;
 }
@@ -595,7 +638,7 @@ int xenvif_connect_ctrl(struct xenvif *vif, grant_ref_t ring_ref,
        shared = (struct xen_netif_ctrl_sring *)addr;
        BACK_RING_INIT(&vif->ctrl, shared, XEN_PAGE_SIZE);
 
-       err = bind_interdomain_evtchn_to_irq(vif->domid, evtchn);
+       err = bind_interdomain_evtchn_to_irq_lateeoi(vif->domid, evtchn);
        if (err < 0)
                goto err_unmap;
 
@@ -653,7 +696,7 @@ int xenvif_connect_data(struct xenvif_queue *queue,
 
        if (tx_evtchn == rx_evtchn) {
                /* feature-split-event-channels == 0 */
-               err = bind_interdomain_evtchn_to_irqhandler(
+               err = bind_interdomain_evtchn_to_irqhandler_lateeoi(
                        queue->vif->domid, tx_evtchn, xenvif_interrupt, 0,
                        queue->name, queue);
                if (err < 0)
@@ -664,7 +707,7 @@ int xenvif_connect_data(struct xenvif_queue *queue,
                /* feature-split-event-channels == 1 */
                snprintf(queue->tx_irq_name, sizeof(queue->tx_irq_name),
                         "%s-tx", queue->name);
-               err = bind_interdomain_evtchn_to_irqhandler(
+               err = bind_interdomain_evtchn_to_irqhandler_lateeoi(
                        queue->vif->domid, tx_evtchn, xenvif_tx_interrupt, 0,
                        queue->tx_irq_name, queue);
                if (err < 0)
@@ -674,7 +717,7 @@ int xenvif_connect_data(struct xenvif_queue *queue,
 
                snprintf(queue->rx_irq_name, sizeof(queue->rx_irq_name),
                         "%s-rx", queue->name);
-               err = bind_interdomain_evtchn_to_irqhandler(
+               err = bind_interdomain_evtchn_to_irqhandler_lateeoi(
                        queue->vif->domid, rx_evtchn, xenvif_rx_interrupt, 0,
                        queue->rx_irq_name, queue);
                if (err < 0)
index 1c849106b7935274e79390fb7d2f748166b7d12d..f228298c3bd0890f31a471eacd70ba8029c3f546 100644 (file)
@@ -162,6 +162,10 @@ void xenvif_napi_schedule_or_enable_events(struct xenvif_queue *queue)
 
        if (more_to_do)
                napi_schedule(&queue->napi);
+       else if (atomic_fetch_andnot(NETBK_TX_EOI | NETBK_COMMON_EOI,
+                                    &queue->eoi_pending) &
+                (NETBK_TX_EOI | NETBK_COMMON_EOI))
+               xen_irq_lateeoi(queue->tx_irq, 0);
 }
 
 static void tx_add_credit(struct xenvif_queue *queue)
@@ -1613,9 +1617,14 @@ static bool xenvif_ctrl_work_todo(struct xenvif *vif)
 irqreturn_t xenvif_ctrl_irq_fn(int irq, void *data)
 {
        struct xenvif *vif = data;
+       unsigned int eoi_flag = XEN_EOI_FLAG_SPURIOUS;
 
-       while (xenvif_ctrl_work_todo(vif))
+       while (xenvif_ctrl_work_todo(vif)) {
                xenvif_ctrl_action(vif);
+               eoi_flag = 0;
+       }
+
+       xen_irq_lateeoi(irq, eoi_flag);
 
        return IRQ_HANDLED;
 }
index ef5887037b225251cfa77ccac587f79ad338dd9d..9b62f65b630e4acad0868d923a9380e6e1117c2f 100644 (file)
@@ -490,13 +490,13 @@ static bool xenvif_rx_queue_ready(struct xenvif_queue *queue)
        return queue->stalled && prod - cons >= 1;
 }
 
-static bool xenvif_have_rx_work(struct xenvif_queue *queue)
+bool xenvif_have_rx_work(struct xenvif_queue *queue, bool test_kthread)
 {
        return xenvif_rx_ring_slots_available(queue) ||
                (queue->vif->stall_timeout &&
                 (xenvif_rx_queue_stalled(queue) ||
                  xenvif_rx_queue_ready(queue))) ||
-               kthread_should_stop() ||
+               (test_kthread && kthread_should_stop()) ||
                queue->vif->disabled;
 }
 
@@ -527,15 +527,20 @@ static void xenvif_wait_for_rx_work(struct xenvif_queue *queue)
 {
        DEFINE_WAIT(wait);
 
-       if (xenvif_have_rx_work(queue))
+       if (xenvif_have_rx_work(queue, true))
                return;
 
        for (;;) {
                long ret;
 
                prepare_to_wait(&queue->wq, &wait, TASK_INTERRUPTIBLE);
-               if (xenvif_have_rx_work(queue))
+               if (xenvif_have_rx_work(queue, true))
                        break;
+               if (atomic_fetch_andnot(NETBK_RX_EOI | NETBK_COMMON_EOI,
+                                       &queue->eoi_pending) &
+                   (NETBK_RX_EOI | NETBK_COMMON_EOI))
+                       xen_irq_lateeoi(queue->rx_irq, 0);
+
                ret = schedule_timeout(xenvif_rx_queue_timeout(queue));
                if (!ret)
                        break;