]> www.infradead.org Git - users/dwmw2/linux.git/commitdiff
gve: Fix an edge case for TSO skb validity check
authorBailey Forrest <bcf@google.com>
Wed, 24 Jul 2024 14:34:31 +0000 (07:34 -0700)
committerJakub Kicinski <kuba@kernel.org>
Thu, 25 Jul 2024 14:58:07 +0000 (07:58 -0700)
The NIC requires each TSO segment to not span more than 10
descriptors. NIC further requires each descriptor to not exceed
16KB - 1 (GVE_TX_MAX_BUF_SIZE_DQO).

The descriptors for an skb are generated by
gve_tx_add_skb_no_copy_dqo() for DQO RDA queue format.
gve_tx_add_skb_no_copy_dqo() loops through each skb frag and
generates a descriptor for the entire frag if the frag size is
not greater than GVE_TX_MAX_BUF_SIZE_DQO. If the frag size is
greater than GVE_TX_MAX_BUF_SIZE_DQO, it is split into descriptor(s)
of size GVE_TX_MAX_BUF_SIZE_DQO and a descriptor is generated for
the remainder (frag size % GVE_TX_MAX_BUF_SIZE_DQO).

gve_can_send_tso() checks if the descriptors thus generated for an
skb would meet the requirement that each TSO-segment not span more
than 10 descriptors. However, the current code misses an edge case
when a TSO segment spans multiple descriptors within a large frag.
This change fixes the edge case.

gve_can_send_tso() relies on the assumption that max gso size (9728)
is less than GVE_TX_MAX_BUF_SIZE_DQO and therefore within an skb
fragment a TSO segment can never span more than 2 descriptors.

Fixes: a57e5de476be ("gve: DQO: Add TX path")
Signed-off-by: Praveen Kaligineedi <pkaligineedi@google.com>
Signed-off-by: Bailey Forrest <bcf@google.com>
Reviewed-by: Jeroen de Borst <jeroendb@google.com>
Cc: stable@vger.kernel.org
Reviewed-by: Willem de Bruijn <willemb@google.com>
Link: https://patch.msgid.link/20240724143431.3343722-1-pkaligineedi@google.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
drivers/net/ethernet/google/gve/gve_tx_dqo.c

index 0b3cca3fc7921574f6e85f87469a54c4f7430395..f879426cb5523a7e150f363b5e57b9d472b5817c 100644 (file)
@@ -866,22 +866,42 @@ static bool gve_can_send_tso(const struct sk_buff *skb)
        const int header_len = skb_tcp_all_headers(skb);
        const int gso_size = shinfo->gso_size;
        int cur_seg_num_bufs;
+       int prev_frag_size;
        int cur_seg_size;
        int i;
 
        cur_seg_size = skb_headlen(skb) - header_len;
+       prev_frag_size = skb_headlen(skb);
        cur_seg_num_bufs = cur_seg_size > 0;
 
        for (i = 0; i < shinfo->nr_frags; i++) {
                if (cur_seg_size >= gso_size) {
                        cur_seg_size %= gso_size;
                        cur_seg_num_bufs = cur_seg_size > 0;
+
+                       if (prev_frag_size > GVE_TX_MAX_BUF_SIZE_DQO) {
+                               int prev_frag_remain = prev_frag_size %
+                                       GVE_TX_MAX_BUF_SIZE_DQO;
+
+                               /* If the last descriptor of the previous frag
+                                * is less than cur_seg_size, the segment will
+                                * span two descriptors in the previous frag.
+                                * Since max gso size (9728) is less than
+                                * GVE_TX_MAX_BUF_SIZE_DQO, it is impossible
+                                * for the segment to span more than two
+                                * descriptors.
+                                */
+                               if (prev_frag_remain &&
+                                   cur_seg_size > prev_frag_remain)
+                                       cur_seg_num_bufs++;
+                       }
                }
 
                if (unlikely(++cur_seg_num_bufs > max_bufs_per_seg))
                        return false;
 
-               cur_seg_size += skb_frag_size(&shinfo->frags[i]);
+               prev_frag_size = skb_frag_size(&shinfo->frags[i]);
+               cur_seg_size += prev_frag_size;
        }
 
        return true;