]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
xen/netback: don't do grant copy across page boundary
authorJuergen Gross <jgross@suse.com>
Mon, 27 Mar 2023 08:36:45 +0000 (10:36 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 6 Apr 2023 10:10:52 +0000 (12:10 +0200)
commit 05310f31ca74673a96567fb14637b7d5d6c82ea5 upstream.

Fix xenvif_get_requests() not to do grant copy operations across local
page boundaries. This requires to double the maximum number of copy
operations per queue, as each copy could now be split into 2.

Make sure that struct xenvif_tx_cb doesn't grow too large.

Cc: stable@vger.kernel.org
Fixes: ad7f402ae4f4 ("xen/netback: Ensure protocol headers don't fall in the non-linear area")
Signed-off-by: Juergen Gross <jgross@suse.com>
Reviewed-by: Paul Durrant <paul@xen.org>
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
drivers/net/xen-netback/common.h
drivers/net/xen-netback/netback.c

index 3dbfc8a6924ed33bb1700293d56fb11914bdc043..1fcbd83f7ff2e3e46e0c230da0ea60f0665e7e78 100644 (file)
@@ -166,7 +166,7 @@ struct xenvif_queue { /* Per-queue data for xenvif */
        struct pending_tx_info pending_tx_info[MAX_PENDING_REQS];
        grant_handle_t grant_tx_handle[MAX_PENDING_REQS];
 
-       struct gnttab_copy tx_copy_ops[MAX_PENDING_REQS];
+       struct gnttab_copy tx_copy_ops[2 * MAX_PENDING_REQS];
        struct gnttab_map_grant_ref tx_map_ops[MAX_PENDING_REQS];
        struct gnttab_unmap_grant_ref tx_unmap_ops[MAX_PENDING_REQS];
        /* passed to gnttab_[un]map_refs with pages under (un)mapping */
index bf627af723bf96c22c60922af6f1af14489115ed..5c266062c08f04d9f7a99c1bd507ab612f694960 100644 (file)
@@ -334,6 +334,7 @@ static int xenvif_count_requests(struct xenvif_queue *queue,
 struct xenvif_tx_cb {
        u16 copy_pending_idx[XEN_NETBK_LEGACY_SLOTS_MAX + 1];
        u8 copy_count;
+       u32 split_mask;
 };
 
 #define XENVIF_TX_CB(skb) ((struct xenvif_tx_cb *)(skb)->cb)
@@ -361,6 +362,8 @@ static inline struct sk_buff *xenvif_alloc_skb(unsigned int size)
        struct sk_buff *skb =
                alloc_skb(size + NET_SKB_PAD + NET_IP_ALIGN,
                          GFP_ATOMIC | __GFP_NOWARN);
+
+       BUILD_BUG_ON(sizeof(*XENVIF_TX_CB(skb)) > sizeof(skb->cb));
        if (unlikely(skb == NULL))
                return NULL;
 
@@ -396,11 +399,13 @@ static void xenvif_get_requests(struct xenvif_queue *queue,
        nr_slots = shinfo->nr_frags + 1;
 
        copy_count(skb) = 0;
+       XENVIF_TX_CB(skb)->split_mask = 0;
 
        /* Create copy ops for exactly data_len bytes into the skb head. */
        __skb_put(skb, data_len);
        while (data_len > 0) {
                int amount = data_len > txp->size ? txp->size : data_len;
+               bool split = false;
 
                cop->source.u.ref = txp->gref;
                cop->source.domid = queue->vif->domid;
@@ -413,6 +418,13 @@ static void xenvif_get_requests(struct xenvif_queue *queue,
                cop->dest.u.gmfn = virt_to_gfn(skb->data + skb_headlen(skb)
                                               - data_len);
 
+               /* Don't cross local page boundary! */
+               if (cop->dest.offset + amount > XEN_PAGE_SIZE) {
+                       amount = XEN_PAGE_SIZE - cop->dest.offset;
+                       XENVIF_TX_CB(skb)->split_mask |= 1U << copy_count(skb);
+                       split = true;
+               }
+
                cop->len = amount;
                cop->flags = GNTCOPY_source_gref;
 
@@ -420,7 +432,8 @@ static void xenvif_get_requests(struct xenvif_queue *queue,
                pending_idx = queue->pending_ring[index];
                callback_param(queue, pending_idx).ctx = NULL;
                copy_pending_idx(skb, copy_count(skb)) = pending_idx;
-               copy_count(skb)++;
+               if (!split)
+                       copy_count(skb)++;
 
                cop++;
                data_len -= amount;
@@ -441,7 +454,8 @@ static void xenvif_get_requests(struct xenvif_queue *queue,
                        nr_slots--;
                } else {
                        /* The copy op partially covered the tx_request.
-                        * The remainder will be mapped.
+                        * The remainder will be mapped or copied in the next
+                        * iteration.
                         */
                        txp->offset += amount;
                        txp->size -= amount;
@@ -539,6 +553,13 @@ static int xenvif_tx_check_gop(struct xenvif_queue *queue,
                pending_idx = copy_pending_idx(skb, i);
 
                newerr = (*gopp_copy)->status;
+
+               /* Split copies need to be handled together. */
+               if (XENVIF_TX_CB(skb)->split_mask & (1U << i)) {
+                       (*gopp_copy)++;
+                       if (!newerr)
+                               newerr = (*gopp_copy)->status;
+               }
                if (likely(!newerr)) {
                        /* The first frag might still have this slot mapped */
                        if (i < copy_count(skb) - 1 || !sharedslot)