]> www.infradead.org Git - linux.git/commitdiff
mlxsw: pci: Sync Rx buffers for device
authorAmit Cohen <amcohen@nvidia.com>
Fri, 25 Oct 2024 14:26:27 +0000 (16:26 +0200)
committerJakub Kicinski <kuba@kernel.org>
Thu, 31 Oct 2024 01:24:39 +0000 (18:24 -0700)
Non-coherent architectures, like ARM, may require invalidating caches
before the device can use the DMA mapped memory, which means that before
posting pages to device, drivers should sync the memory for device.

Sync for device can be configured as page pool responsibility. Set the
relevant flag and define max_len for sync.

Cc: Jiri Pirko <jiri@resnulli.us>
Fixes: b5b60bb491b2 ("mlxsw: pci: Use page pool for Rx buffers allocation")
Signed-off-by: Amit Cohen <amcohen@nvidia.com>
Reviewed-by: Ido Schimmel <idosch@nvidia.com>
Signed-off-by: Petr Machata <petrm@nvidia.com>
Link: https://patch.msgid.link/92e01f05c4f506a4f0a9b39c10175dcc01994910.1729866134.git.petrm@nvidia.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
drivers/net/ethernet/mellanox/mlxsw/pci.c

index 2320a5f323b45c9d6c90e3c5175030e4a06f2ac5..d6f37456fb317d9cf7fba07bab261baaa09ba49c 100644 (file)
@@ -996,12 +996,13 @@ static int mlxsw_pci_cq_page_pool_init(struct mlxsw_pci_queue *q,
        if (cq_type != MLXSW_PCI_CQ_RDQ)
                return 0;
 
-       pp_params.flags = PP_FLAG_DMA_MAP;
+       pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
        pp_params.pool_size = MLXSW_PCI_WQE_COUNT * mlxsw_pci->num_sg_entries;
        pp_params.nid = dev_to_node(&mlxsw_pci->pdev->dev);
        pp_params.dev = &mlxsw_pci->pdev->dev;
        pp_params.napi = &q->u.cq.napi;
        pp_params.dma_dir = DMA_FROM_DEVICE;
+       pp_params.max_len = PAGE_SIZE;
 
        page_pool = page_pool_create(&pp_params);
        if (IS_ERR(page_pool))