]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
xen-netback: batch copies for multiple to-guest rx packets
authorDavid Vrabel <david.vrabel@citrix.com>
Fri, 12 May 2017 08:46:33 +0000 (09:46 +0100)
committerJoao Martins <joao.m.martins@oracle.com>
Wed, 31 May 2017 21:51:54 +0000 (22:51 +0100)
Instead of flushing the copy ops when an packet is complete, complete
packets when their copy ops are done.  This improves performance by
reducing the number of grant copy hypercalls.

Latency is still limited by the relatively small size of the copy
batch.

Signed-off-by: David Vrabel <david.vrabel@citrix.com>
[re-based]
Signed-off-by: Paul Durrant <paul.durrant@citrix.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
(cherry picked from commit a37f12298c251a48bc74d4012e07bf0d78175f46)
Signed-off-by: Joao Martins <joao.m.martins@oracle.com>
Reviewed-by: Shannon Nelson <shannon.nelson@oracle.com>
Acked-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
drivers/net/xen-netback/common.h
drivers/net/xen-netback/rx.c

index 19e35e0d8638a2bb34486ede5f284c1cc181e85a..7252fa9c83827cf346c0eecf2f2f35822634c725 100644 (file)
@@ -132,6 +132,7 @@ struct xenvif_copy_state {
        struct gnttab_copy op[COPY_BATCH_SIZE];
        RING_IDX idx[COPY_BATCH_SIZE];
        unsigned int num;
+       struct sk_buff_head *completed;
 };
 
 struct xenvif_queue { /* Per-queue data for xenvif */
index d1862dbf474ac6e0a9f1caacdd7d17706ed63dbd..e592d9ad5a5338c574910026ef95284ae1e24d3e 100644 (file)
@@ -133,6 +133,7 @@ static void xenvif_rx_queue_drop_expired(struct xenvif_queue *queue)
 static void xenvif_rx_copy_flush(struct xenvif_queue *queue)
 {
        unsigned int i;
+       int notify;
 
        gnttab_batch_copy(queue->rx_copy.op, queue->rx_copy.num);
 
@@ -154,6 +155,13 @@ static void xenvif_rx_copy_flush(struct xenvif_queue *queue)
        }
 
        queue->rx_copy.num = 0;
+
+       /* Push responses for all completed packets. */
+       RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->rx, notify);
+       if (notify)
+               notify_remote_via_irq(queue->rx_irq);
+
+       __skb_queue_purge(queue->rx_copy.completed);
 }
 
 static void xenvif_rx_copy_add(struct xenvif_queue *queue,
@@ -252,18 +260,10 @@ static void xenvif_rx_next_skb(struct xenvif_queue *queue,
 static void xenvif_rx_complete(struct xenvif_queue *queue,
                               struct xenvif_pkt_state *pkt)
 {
-       int notify;
-
-       /* Complete any outstanding copy ops for this skb. */
-       xenvif_rx_copy_flush(queue);
-
-       /* Push responses and notify. */
+       /* All responses are ready to be pushed. */
        queue->rx.rsp_prod_pvt = queue->rx.req_cons;
-       RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->rx, notify);
-       if (notify)
-               notify_remote_via_irq(queue->rx_irq);
 
-       dev_kfree_skb(pkt->skb);
+       __skb_queue_tail(queue->rx_copy.completed, pkt->skb);
 }
 
 static void xenvif_rx_next_chunk(struct xenvif_queue *queue,
@@ -402,13 +402,20 @@ void xenvif_rx_skb(struct xenvif_queue *queue)
 
 void xenvif_rx_action(struct xenvif_queue *queue)
 {
+       struct sk_buff_head completed_skbs;
        unsigned int work_done = 0;
 
+       __skb_queue_head_init(&completed_skbs);
+       queue->rx_copy.completed = &completed_skbs;
+
        while (xenvif_rx_ring_slots_available(queue) &&
               work_done < RX_BATCH_SIZE) {
                xenvif_rx_skb(queue);
                work_done++;
        }
+
+       /* Flush any pending copies and complete all skbs. */
+       xenvif_rx_copy_flush(queue);
 }
 
 static bool xenvif_rx_queue_stalled(struct xenvif_queue *queue)