]> www.infradead.org Git - nvme.git/commitdiff
mlxsw: pci: Do not store SKB for RDQ elements
authorAmit Cohen <amcohen@nvidia.com>
Tue, 18 Jun 2024 11:34:45 +0000 (13:34 +0200)
committerJakub Kicinski <kuba@kernel.org>
Thu, 20 Jun 2024 00:38:11 +0000 (17:38 -0700)
The previous patch used page pool to allocate buffers for RDQ. With this
change, 'elem_info->u.rdq.skb' is not used anymore, as we do not allocate
SKB before getting the packet, we hold page pointer and build the SKB
around it once packet is received.

Remove the union and store SKB pointer for SDQ only.

Signed-off-by: Amit Cohen <amcohen@nvidia.com>
Reviewed-by: Petr Machata <petrm@nvidia.com>
Reviewed-by: Ido Schimmel <idosch@nvidia.com>
Signed-off-by: Petr Machata <petrm@nvidia.com>
Reviewed-by: Jiri Pirko <jiri@nvidia.com>
Link: https://lore.kernel.org/r/23a531008936dc9a1a298643fb1e4f9a7b8e6eb3.1718709196.git.petrm@nvidia.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
drivers/net/ethernet/mellanox/mlxsw/pci.c

index 6f41747e8a763226453343c8cde9ed239d3c78fd..b530b6a80ab7458677f9793ed52b4059763252f0 100644 (file)
@@ -64,14 +64,9 @@ struct mlxsw_pci_mem_item {
 struct mlxsw_pci_queue_elem_info {
        struct page *page;
        char *elem; /* pointer to actual dma mapped element mem chunk */
-       union {
-               struct {
-                       struct sk_buff *skb;
-               } sdq;
-               struct {
-                       struct sk_buff *skb;
-               } rdq;
-       } u;
+       struct {
+               struct sk_buff *skb;
+       } sdq;
 };
 
 struct mlxsw_pci_queue {
@@ -557,8 +552,8 @@ static void mlxsw_pci_cqe_sdq_handle(struct mlxsw_pci *mlxsw_pci,
 
        spin_lock(&q->lock);
        elem_info = mlxsw_pci_queue_elem_info_consumer_get(q);
-       tx_info = mlxsw_skb_cb(elem_info->u.sdq.skb)->tx_info;
-       skb = elem_info->u.sdq.skb;
+       tx_info = mlxsw_skb_cb(elem_info->sdq.skb)->tx_info;
+       skb = elem_info->sdq.skb;
        wqe = elem_info->elem;
        for (i = 0; i < MLXSW_PCI_WQE_SG_ENTRIES; i++)
                mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, i, DMA_TO_DEVICE);
@@ -573,7 +568,7 @@ static void mlxsw_pci_cqe_sdq_handle(struct mlxsw_pci *mlxsw_pci,
 
        if (skb)
                dev_kfree_skb_any(skb);
-       elem_info->u.sdq.skb = NULL;
+       elem_info->sdq.skb = NULL;
 
        if (q->consumer_counter++ != consumer_counter_limit)
                dev_dbg_ratelimited(&pdev->dev, "Consumer counter does not match limit in SDQ\n");
@@ -2007,7 +2002,7 @@ static int mlxsw_pci_skb_transmit(void *bus_priv, struct sk_buff *skb,
                goto unlock;
        }
        mlxsw_skb_cb(skb)->tx_info = *tx_info;
-       elem_info->u.sdq.skb = skb;
+       elem_info->sdq.skb = skb;
 
        wqe = elem_info->elem;
        mlxsw_pci_wqe_c_set(wqe, 1); /* always report completion */