]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
xen-netfront: introduce rx copy mode
authorJoao Martins <joao.m.martins@oracle.com>
Fri, 12 May 2017 08:46:48 +0000 (09:46 +0100)
committerJoao Martins <joao.m.martins@oracle.com>
Wed, 31 May 2017 21:52:04 +0000 (22:52 +0100)
This allows us to not rely on recycling opportunities for cases
where pages recycling isn't effective and/or workloads prove to
be faster with copying onto new pages.

Signed-off-by: Joao Martins <joao.m.martins@oracle.com>
Reviewed-by: Shannon Nelson <shannon.nelson@oracle.com>
Acked-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Orabug: 26107942

drivers/net/xen-netfront.c

index f36fc8f92968dde8462d721c9d7b9ecc5aa0a294..fb73ff74487cf6ddd9570c89a8701414bfc70bef 100644 (file)
@@ -67,6 +67,11 @@ module_param_named(staging_grants, xennet_staging_grants, bool, 0644);
 MODULE_PARM_DESC(staging_grants,
                 "Staging grants support (0=off, 1=on [default]");
 
+static bool xennet_rx_copy_mode;
+module_param_named(rx_copy_mode, xennet_rx_copy_mode, bool, 0644);
+MODULE_PARM_DESC(rx_copy_mode,
+                "Always copy data from Rx grants into new pages");
+
 static const struct ethtool_ops xennet_ethtool_ops;
 
 struct netfront_cb {
@@ -1140,6 +1145,24 @@ static RING_IDX xennet_fill_frags(struct netfront_queue *queue,
        return cons;
 }
 
+static void xennet_orphan_done(struct ubuf_info *ubuf, bool success)
+{
+       /* Purposely empty as SKBTX_DEV_ZEROCOPY requires a valid
+        * destructor context and callback.
+        */
+}
+
+static int xennet_orphan_frags(struct sk_buff *skb, gfp_t gfp_mask)
+{
+       struct ubuf_info ctx;
+
+       ctx.callback = xennet_orphan_done;
+       skb_shinfo(skb)->destructor_arg = &ctx;
+       skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
+
+       return skb_orphan_frags(skb, gfp_mask);
+}
+
 static int checksum_setup(struct netfront_info *info, struct sk_buff *skb)
 {
        bool recalculate_partial_csum = false;
@@ -1178,6 +1201,15 @@ static int handle_incoming_queue(struct netfront_queue *queue,
                if (pull_to > skb_headlen(skb))
                        __pskb_pull_tail(skb, pull_to - skb_headlen(skb));
 
+               /* Rx copy mode means copying skb frags into new pages */
+               if (xennet_rx_copy_mode &&
+                   unlikely(xennet_orphan_frags(skb, GFP_ATOMIC))) {
+                       kfree_skb(skb);
+                       packets_dropped++;
+                       queue->info->netdev->stats.rx_errors++;
+                       continue;
+               }
+
                /* Ethernet work: Delayed to here as it peeks the header. */
                skb->protocol = eth_type_trans(skb, queue->info->netdev);
                skb_reset_network_header(skb);