]> www.infradead.org Git - users/dwmw2/linux.git/commitdiff
net: rswitch: Use build_skb() for RX
authorYoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com>
Fri, 8 Dec 2023 04:10:24 +0000 (13:10 +0900)
committerDavid S. Miller <davem@davemloft.net>
Sun, 10 Dec 2023 19:31:41 +0000 (19:31 +0000)
If this hardware receives a jumbo frame like 2KiB or more, it will be
split into multiple queues. In the near future, to support this, use
build_skb() instead of netdev_alloc_skb_ip_align().

Signed-off-by: Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
drivers/net/ethernet/renesas/rswitch.c
drivers/net/ethernet/renesas/rswitch.h

index 0928003ad245b0465c0faae557979ff0f1722a51..c6a96abe9dbaf6a5650990c0ab3352a88882bde7 100644 (file)
@@ -235,19 +235,18 @@ static bool rswitch_is_queue_rxed(struct rswitch_gwca_queue *gq)
        return false;
 }
 
-static int rswitch_gwca_queue_alloc_skb(struct rswitch_gwca_queue *gq,
-                                       unsigned int start_index,
-                                       unsigned int num)
+static int rswitch_gwca_queue_alloc_rx_buf(struct rswitch_gwca_queue *gq,
+                                          unsigned int start_index,
+                                          unsigned int num)
 {
        unsigned int i, index;
 
        for (i = 0; i < num; i++) {
                index = (i + start_index) % gq->ring_size;
-               if (gq->skbs[index])
+               if (gq->rx_bufs[index])
                        continue;
-               gq->skbs[index] = netdev_alloc_skb_ip_align(gq->ndev,
-                                                           PKT_BUF_SZ + RSWITCH_ALIGN - 1);
-               if (!gq->skbs[index])
+               gq->rx_bufs[index] = netdev_alloc_frag(RSWITCH_BUF_SIZE);
+               if (!gq->rx_bufs[index])
                        goto err;
        }
 
@@ -256,8 +255,8 @@ static int rswitch_gwca_queue_alloc_skb(struct rswitch_gwca_queue *gq,
 err:
        for (; i-- > 0; ) {
                index = (i + start_index) % gq->ring_size;
-               dev_kfree_skb(gq->skbs[index]);
-               gq->skbs[index] = NULL;
+               skb_free_frag(gq->rx_bufs[index]);
+               gq->rx_bufs[index] = NULL;
        }
 
        return -ENOMEM;
@@ -275,16 +274,17 @@ static void rswitch_gwca_queue_free(struct net_device *ndev,
                gq->rx_ring = NULL;
 
                for (i = 0; i < gq->ring_size; i++)
-                       dev_kfree_skb(gq->skbs[i]);
+                       skb_free_frag(gq->rx_bufs[i]);
+               kfree(gq->rx_bufs);
+               gq->rx_bufs = NULL;
        } else {
                dma_free_coherent(ndev->dev.parent,
                                  sizeof(struct rswitch_ext_desc) *
                                  (gq->ring_size + 1), gq->tx_ring, gq->ring_dma);
                gq->tx_ring = NULL;
+               kfree(gq->skbs);
+               gq->skbs = NULL;
        }
-
-       kfree(gq->skbs);
-       gq->skbs = NULL;
 }
 
 static void rswitch_gwca_ts_queue_free(struct rswitch_private *priv)
@@ -308,17 +308,20 @@ static int rswitch_gwca_queue_alloc(struct net_device *ndev,
        gq->ring_size = ring_size;
        gq->ndev = ndev;
 
-       gq->skbs = kcalloc(gq->ring_size, sizeof(*gq->skbs), GFP_KERNEL);
-       if (!gq->skbs)
-               return -ENOMEM;
-
        if (!dir_tx) {
-               rswitch_gwca_queue_alloc_skb(gq, 0, gq->ring_size);
+               gq->rx_bufs = kcalloc(gq->ring_size, sizeof(*gq->rx_bufs), GFP_KERNEL);
+               if (!gq->rx_bufs)
+                       return -ENOMEM;
+               if (rswitch_gwca_queue_alloc_rx_buf(gq, 0, gq->ring_size) < 0)
+                       goto out;
 
                gq->rx_ring = dma_alloc_coherent(ndev->dev.parent,
                                                 sizeof(struct rswitch_ext_ts_desc) *
                                                 (gq->ring_size + 1), &gq->ring_dma, GFP_KERNEL);
        } else {
+               gq->skbs = kcalloc(gq->ring_size, sizeof(*gq->skbs), GFP_KERNEL);
+               if (!gq->skbs)
+                       return -ENOMEM;
                gq->tx_ring = dma_alloc_coherent(ndev->dev.parent,
                                                 sizeof(struct rswitch_ext_desc) *
                                                 (gq->ring_size + 1), &gq->ring_dma, GFP_KERNEL);
@@ -367,12 +370,13 @@ static int rswitch_gwca_queue_format(struct net_device *ndev,
        for (i = 0, desc = gq->tx_ring; i < gq->ring_size; i++, desc++) {
                if (!gq->dir_tx) {
                        dma_addr = dma_map_single(ndev->dev.parent,
-                                                 gq->skbs[i]->data, PKT_BUF_SZ,
+                                                 gq->rx_bufs[i] + RSWITCH_HEADROOM,
+                                                 RSWITCH_MAP_BUF_SIZE,
                                                  DMA_FROM_DEVICE);
                        if (dma_mapping_error(ndev->dev.parent, dma_addr))
                                goto err;
 
-                       desc->desc.info_ds = cpu_to_le16(PKT_BUF_SZ);
+                       desc->desc.info_ds = cpu_to_le16(RSWITCH_DESC_BUF_SIZE);
                        rswitch_desc_set_dptr(&desc->desc, dma_addr);
                        desc->desc.die_dt = DT_FEMPTY | DIE;
                } else {
@@ -395,8 +399,8 @@ err:
        if (!gq->dir_tx) {
                for (desc = gq->tx_ring; i-- > 0; desc++) {
                        dma_addr = rswitch_desc_get_dptr(&desc->desc);
-                       dma_unmap_single(ndev->dev.parent, dma_addr, PKT_BUF_SZ,
-                                        DMA_FROM_DEVICE);
+                       dma_unmap_single(ndev->dev.parent, dma_addr,
+                                        RSWITCH_MAP_BUF_SIZE, DMA_FROM_DEVICE);
                }
        }
 
@@ -433,12 +437,13 @@ static int rswitch_gwca_queue_ext_ts_fill(struct net_device *ndev,
                desc = &gq->rx_ring[index];
                if (!gq->dir_tx) {
                        dma_addr = dma_map_single(ndev->dev.parent,
-                                                 gq->skbs[index]->data, PKT_BUF_SZ,
+                                                 gq->rx_bufs[index] + RSWITCH_HEADROOM,
+                                                 RSWITCH_MAP_BUF_SIZE,
                                                  DMA_FROM_DEVICE);
                        if (dma_mapping_error(ndev->dev.parent, dma_addr))
                                goto err;
 
-                       desc->desc.info_ds = cpu_to_le16(PKT_BUF_SZ);
+                       desc->desc.info_ds = cpu_to_le16(RSWITCH_DESC_BUF_SIZE);
                        rswitch_desc_set_dptr(&desc->desc, dma_addr);
                        dma_wmb();
                        desc->desc.die_dt = DT_FEMPTY | DIE;
@@ -456,8 +461,8 @@ err:
                        index = (i + start_index) % gq->ring_size;
                        desc = &gq->rx_ring[index];
                        dma_addr = rswitch_desc_get_dptr(&desc->desc);
-                       dma_unmap_single(ndev->dev.parent, dma_addr, PKT_BUF_SZ,
-                                        DMA_FROM_DEVICE);
+                       dma_unmap_single(ndev->dev.parent, dma_addr,
+                                        RSWITCH_MAP_BUF_SIZE, DMA_FROM_DEVICE);
                }
        }
 
@@ -724,10 +729,15 @@ static bool rswitch_rx(struct net_device *ndev, int *quota)
        while ((desc->desc.die_dt & DT_MASK) != DT_FEMPTY) {
                dma_rmb();
                pkt_len = le16_to_cpu(desc->desc.info_ds) & RX_DS;
-               skb = gq->skbs[gq->cur];
-               gq->skbs[gq->cur] = NULL;
                dma_addr = rswitch_desc_get_dptr(&desc->desc);
-               dma_unmap_single(ndev->dev.parent, dma_addr, PKT_BUF_SZ, DMA_FROM_DEVICE);
+               dma_unmap_single(ndev->dev.parent, dma_addr,
+                                RSWITCH_MAP_BUF_SIZE, DMA_FROM_DEVICE);
+               skb = build_skb(gq->rx_bufs[gq->cur], RSWITCH_BUF_SIZE);
+               if (!skb)
+                       goto out;
+               skb_reserve(skb, RSWITCH_HEADROOM);
+               skb_put(skb, pkt_len);
+
                get_ts = rdev->priv->ptp_priv->tstamp_rx_ctrl & RCAR_GEN4_RXTSTAMP_TYPE_V2_L2_EVENT;
                if (get_ts) {
                        struct skb_shared_hwtstamps *shhwtstamps;
@@ -739,12 +749,13 @@ static bool rswitch_rx(struct net_device *ndev, int *quota)
                        ts.tv_nsec = __le32_to_cpu(desc->ts_nsec & cpu_to_le32(0x3fffffff));
                        shhwtstamps->hwtstamp = timespec64_to_ktime(ts);
                }
-               skb_put(skb, pkt_len);
                skb->protocol = eth_type_trans(skb, ndev);
                napi_gro_receive(&rdev->napi, skb);
                rdev->ndev->stats.rx_packets++;
                rdev->ndev->stats.rx_bytes += pkt_len;
 
+out:
+               gq->rx_bufs[gq->cur] = NULL;
                gq->cur = rswitch_next_queue_index(gq, true, 1);
                desc = &gq->rx_ring[gq->cur];
 
@@ -753,7 +764,7 @@ static bool rswitch_rx(struct net_device *ndev, int *quota)
        }
 
        num = rswitch_get_num_cur_queues(gq);
-       ret = rswitch_gwca_queue_alloc_skb(gq, gq->dirty, num);
+       ret = rswitch_gwca_queue_alloc_rx_buf(gq, gq->dirty, num);
        if (ret < 0)
                goto err;
        ret = rswitch_gwca_queue_ext_ts_fill(ndev, gq, gq->dirty, num);
index f1c7f7cdefc2918ab8b2f12843f404159d6fe9c4..a407d85dcfadbce1023dcf798a4c2eadaf43abeb 100644 (file)
 #define RX_RING_SIZE           1024
 #define TS_RING_SIZE           (TX_RING_SIZE * RSWITCH_NUM_PORTS)
 
-#define PKT_BUF_SZ             1584
+#define RSWITCH_HEADROOM       (NET_SKB_PAD + NET_IP_ALIGN)
+#define RSWITCH_DESC_BUF_SIZE  2048
+#define RSWITCH_TAILROOM       SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
 #define RSWITCH_ALIGN          128
+#define RSWITCH_BUF_SIZE       (RSWITCH_HEADROOM + RSWITCH_DESC_BUF_SIZE + \
+                                RSWITCH_TAILROOM + RSWITCH_ALIGN)
+#define RSWITCH_MAP_BUF_SIZE   (RSWITCH_BUF_SIZE - RSWITCH_HEADROOM)
 #define RSWITCH_MAX_CTAG_PCP   7
 
 #define RSWITCH_TIMEOUT_US     100000
@@ -945,8 +950,18 @@ struct rswitch_gwca_queue {
        /* For [rt]x_ring */
        unsigned int index;
        bool dir_tx;
-       struct sk_buff **skbs;
        struct net_device *ndev;        /* queue to ndev for irq */
+
+       union {
+               /* For TX */
+               struct {
+                       struct sk_buff **skbs;
+               };
+               /* For RX */
+               struct {
+                       void **rx_bufs;
+               };
+       };
 };
 
 struct rswitch_gwca_ts_info {