]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
xen-netback: slightly rework xenvif_rx_skb
authorJoao Martins <joao.m.martins@oracle.com>
Fri, 12 May 2017 08:46:49 +0000 (09:46 +0100)
committerJoao Martins <joao.m.martins@oracle.com>
Wed, 31 May 2017 21:52:05 +0000 (22:52 +0100)
This way we can reuse xenvif_rx_skb when transmiting
an skb that it's not taken from the internal guestrx queue.
We therefore isolate that in xenvif_rx_action on guestrx
context, and all its usage of the completed queue.

Signed-off-by: Joao Martins <joao.m.martins@oracle.com>
Reviewed-by: Shannon Nelson <shannon.nelson@oracle.com>
Acked-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Orabug: 26107942

drivers/net/xen-netback/rx.c

index f458a23f7c4393b79f1b67880cd4b40de7e9b79b..7faa28203456477c83a2bbc0d7cf9e2267b8ea64 100644 (file)
@@ -231,13 +231,11 @@ struct xenvif_pkt_state {
 };
 
 static void xenvif_rx_next_skb(struct xenvif_queue *queue,
-                              struct xenvif_pkt_state *pkt)
+                              struct xenvif_pkt_state *pkt,
+                              struct sk_buff *skb)
 {
-       struct sk_buff *skb;
        unsigned int gso_type;
 
-       skb = xenvif_rx_dequeue(queue);
-
        queue->stats.tx_bytes += skb->len;
        queue->stats.tx_packets++;
 
@@ -266,15 +264,6 @@ static void xenvif_rx_next_skb(struct xenvif_queue *queue,
        }
 }
 
-static void xenvif_rx_complete(struct xenvif_queue *queue,
-                              struct xenvif_pkt_state *pkt)
-{
-       /* All responses are ready to be pushed. */
-       queue->rx.rsp_prod_pvt = queue->rx.req_cons;
-
-       __skb_queue_tail(queue->rx_copy.completed, pkt->skb);
-}
-
 static void xenvif_rx_next_frag(struct xenvif_pkt_state *pkt)
 {
        struct sk_buff *frag_iter = pkt->frag_iter;
@@ -405,11 +394,11 @@ static void xenvif_rx_extra_slot(struct xenvif_queue *queue,
        BUG();
 }
 
-void xenvif_rx_skb(struct xenvif_queue *queue)
+void xenvif_rx_skb(struct xenvif_queue *queue, struct sk_buff *skb)
 {
        struct xenvif_pkt_state pkt;
 
-       xenvif_rx_next_skb(queue, &pkt);
+       xenvif_rx_next_skb(queue, &pkt, skb);
 
        queue->last_rx_time = jiffies;
 
@@ -430,7 +419,8 @@ void xenvif_rx_skb(struct xenvif_queue *queue)
                pkt.slot++;
        } while (pkt.remaining_len > 0 || pkt.extra_count != 0);
 
-       xenvif_rx_complete(queue, &pkt);
+       /* All responses are ready to be pushed. */
+       queue->rx.rsp_prod_pvt = queue->rx.req_cons;
 }
 
 #define RX_BATCH_SIZE 64
@@ -439,13 +429,16 @@ void xenvif_rx_action(struct xenvif_queue *queue)
 {
        struct sk_buff_head completed_skbs;
        unsigned int work_done = 0;
+       struct sk_buff *skb;
 
        __skb_queue_head_init(&completed_skbs);
        queue->rx_copy.completed = &completed_skbs;
 
        while (xenvif_rx_ring_slots_available(queue) &&
               work_done < RX_BATCH_SIZE) {
-               xenvif_rx_skb(queue);
+               skb = xenvif_rx_dequeue(queue);
+               xenvif_rx_skb(queue, skb);
+               __skb_queue_tail(queue->rx_copy.completed, skb);
                work_done++;
        }