]> www.infradead.org Git - users/hch/misc.git/commitdiff
eth: fbnic: support devmem Tx
authorJakub Kicinski <kuba@kernel.org>
Tue, 16 Sep 2025 14:54:01 +0000 (07:54 -0700)
committerPaolo Abeni <pabeni@redhat.com>
Thu, 18 Sep 2025 08:12:05 +0000 (10:12 +0200)
Support devmem Tx. We already use skb_frag_dma_map(), we just need
to make sure we don't try to unmap the frags. Check if frag is
unreadable and mark the ring entry.

  # ./tools/testing/selftests/drivers/net/hw/devmem.py
  TAP version 13
  1..3
  ok 1 devmem.check_rx
  ok 2 devmem.check_tx
  ok 3 devmem.check_tx_chunks
  # Totals: pass:3 fail:0 xfail:0 xpass:0 skip:0 error:0

Acked-by: Mina Almasry <almasrymina@google.com>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Link: https://patch.msgid.link/20250916145401.1464550-1-kuba@kernel.org
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
drivers/net/ethernet/meta/fbnic/fbnic_netdev.c
drivers/net/ethernet/meta/fbnic/fbnic_txrx.c

index dd35de30187084881845668cc1ce6c1d1ca0dc88..d12b4cad84a57c006750fdaa904dd9b5aba0c8fe 100644 (file)
@@ -712,6 +712,7 @@ struct net_device *fbnic_netdev_alloc(struct fbnic_dev *fbd)
        netdev->netdev_ops = &fbnic_netdev_ops;
        netdev->stat_ops = &fbnic_stat_ops;
        netdev->queue_mgmt_ops = &fbnic_queue_mgmt_ops;
+       netdev->netmem_tx = true;
 
        fbnic_set_ethtool_ops(netdev);
 
index ac555e045e349027ab115bcea75a23b57a2ae187..cf773cc78e40451bbc4e36212f7faa85709b2c5c 100644 (file)
@@ -37,6 +37,8 @@ struct fbnic_xmit_cb {
 
 #define FBNIC_XMIT_CB(__skb) ((struct fbnic_xmit_cb *)((__skb)->cb))
 
+#define FBNIC_XMIT_NOUNMAP     ((void *)1)
+
 static u32 __iomem *fbnic_ring_csr_base(const struct fbnic_ring *ring)
 {
        unsigned long csr_base = (unsigned long)ring->doorbell;
@@ -315,6 +317,7 @@ fbnic_tx_map(struct fbnic_ring *ring, struct sk_buff *skb, __le64 *meta)
        unsigned int tail = ring->tail, first;
        unsigned int size, data_len;
        skb_frag_t *frag;
+       bool is_net_iov;
        dma_addr_t dma;
        __le64 *twd;
 
@@ -330,6 +333,7 @@ fbnic_tx_map(struct fbnic_ring *ring, struct sk_buff *skb, __le64 *meta)
        if (size > FIELD_MAX(FBNIC_TWD_LEN_MASK))
                goto dma_error;
 
+       is_net_iov = false;
        dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE);
 
        for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
@@ -342,6 +346,8 @@ fbnic_tx_map(struct fbnic_ring *ring, struct sk_buff *skb, __le64 *meta)
                                   FIELD_PREP(FBNIC_TWD_LEN_MASK, size) |
                                   FIELD_PREP(FBNIC_TWD_TYPE_MASK,
                                              FBNIC_TWD_TYPE_AL));
+               if (is_net_iov)
+                       ring->tx_buf[tail] = FBNIC_XMIT_NOUNMAP;
 
                tail++;
                tail &= ring->size_mask;
@@ -355,6 +361,7 @@ fbnic_tx_map(struct fbnic_ring *ring, struct sk_buff *skb, __le64 *meta)
                if (size > FIELD_MAX(FBNIC_TWD_LEN_MASK))
                        goto dma_error;
 
+               is_net_iov = skb_frag_is_net_iov(frag);
                dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE);
        }
 
@@ -390,6 +397,8 @@ dma_error:
                twd = &ring->desc[tail];
                if (tail == first)
                        fbnic_unmap_single_twd(dev, twd);
+               else if (ring->tx_buf[tail] == FBNIC_XMIT_NOUNMAP)
+                       ring->tx_buf[tail] = NULL;
                else
                        fbnic_unmap_page_twd(dev, twd);
        }
@@ -574,7 +583,11 @@ static void fbnic_clean_twq0(struct fbnic_napi_vector *nv, int napi_budget,
                desc_cnt--;
 
                while (desc_cnt--) {
-                       fbnic_unmap_page_twd(nv->dev, &ring->desc[head]);
+                       if (ring->tx_buf[head] != FBNIC_XMIT_NOUNMAP)
+                               fbnic_unmap_page_twd(nv->dev,
+                                                    &ring->desc[head]);
+                       else
+                               ring->tx_buf[head] = NULL;
                        head++;
                        head &= ring->size_mask;
                }