]> www.infradead.org Git - users/willy/pagecache.git/commitdiff
net: ethernet: ti: am65-cpsw: fix memleak in certain XDP cases
authorRoger Quadros <rogerq@kernel.org>
Mon, 10 Feb 2025 14:52:15 +0000 (16:52 +0200)
committerJakub Kicinski <kuba@kernel.org>
Thu, 13 Feb 2025 04:12:38 +0000 (20:12 -0800)
If the XDP program doesn't result in XDP_PASS then we leak the
memory allocated by am65_cpsw_build_skb().

It is pointless to allocate SKB memory before running the XDP
program as we would be wasting CPU cycles for cases other than XDP_PASS.
Move the SKB allocation after evaluating the XDP program result.

This fixes the memleak. A performance boost is seen for XDP_DROP test.

XDP_DROP test:
Before: 460256 rx/s                  0 err/s
After:  784130 rx/s                  0 err/s

Fixes: 8acacc40f733 ("net: ethernet: ti: am65-cpsw: Add minimal XDP support")
Signed-off-by: Roger Quadros <rogerq@kernel.org>
Link: https://patch.msgid.link/20250210-am65-cpsw-xdp-fixes-v1-1-ec6b1f7f1aca@kernel.org
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
drivers/net/ethernet/ti/am65-cpsw-nuss.c

index b663271e79f7247e6fc3ed6b45fd336a16e351ef..021c5a4df8fd456067b9ef9d5c040a30501b77d0 100644 (file)
@@ -842,7 +842,8 @@ static void am65_cpsw_nuss_tx_cleanup(void *data, dma_addr_t desc_dma)
 
 static struct sk_buff *am65_cpsw_build_skb(void *page_addr,
                                           struct net_device *ndev,
-                                          unsigned int len)
+                                          unsigned int len,
+                                          unsigned int headroom)
 {
        struct sk_buff *skb;
 
@@ -852,7 +853,7 @@ static struct sk_buff *am65_cpsw_build_skb(void *page_addr,
        if (unlikely(!skb))
                return NULL;
 
-       skb_reserve(skb, AM65_CPSW_HEADROOM);
+       skb_reserve(skb, headroom);
        skb->dev = ndev;
 
        return skb;
@@ -1315,16 +1316,8 @@ static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_rx_flow *flow,
        dev_dbg(dev, "%s rx csum_info:%#x\n", __func__, csum_info);
 
        dma_unmap_single(rx_chn->dma_dev, buf_dma, buf_dma_len, DMA_FROM_DEVICE);
-
        k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
 
-       skb = am65_cpsw_build_skb(page_addr, ndev,
-                                 AM65_CPSW_MAX_PACKET_SIZE);
-       if (unlikely(!skb)) {
-               new_page = page;
-               goto requeue;
-       }
-
        if (port->xdp_prog) {
                xdp_init_buff(&xdp, PAGE_SIZE, &port->xdp_rxq[flow->id]);
                xdp_prepare_buff(&xdp, page_addr, AM65_CPSW_HEADROOM,
@@ -1334,9 +1327,16 @@ static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_rx_flow *flow,
                if (*xdp_state != AM65_CPSW_XDP_PASS)
                        goto allocate;
 
-               /* Compute additional headroom to be reserved */
-               headroom = (xdp.data - xdp.data_hard_start) - skb_headroom(skb);
-               skb_reserve(skb, headroom);
+               headroom = xdp.data - xdp.data_hard_start;
+       } else {
+               headroom = AM65_CPSW_HEADROOM;
+       }
+
+       skb = am65_cpsw_build_skb(page_addr, ndev,
+                                 AM65_CPSW_MAX_PACKET_SIZE, headroom);
+       if (unlikely(!skb)) {
+               new_page = page;
+               goto requeue;
        }
 
        ndev_priv = netdev_priv(ndev);