]> www.infradead.org Git - nvme.git/commitdiff
gve: support unreadable netmem
authorMina Almasry <almasrymina@google.com>
Mon, 18 Aug 2025 21:05:07 +0000 (21:05 +0000)
committerJakub Kicinski <kuba@kernel.org>
Fri, 22 Aug 2025 00:42:43 +0000 (17:42 -0700)
Declare PP_FLAG_ALLOW_UNREADABLE_NETMEM to turn on unreadable netmem
support in GVE.

We also drop any net_iov packets where header split is not enabled.
We're unable to process packets where the header landed in unreadable
netmem.

Use page_pool_dma_sync_netmem_for_cpu in lieu of
dma_sync_single_range_for_cpu to correctly handle unreadable netmem
that should not be dma-sync'd.

Disable rx_copybreak optimization if payload is unreadable netmem as
that needs access to the payload.

Signed-off-by: Mina Almasry <almasrymina@google.com>
Signed-off-by: Ziwei Xiao <ziweixiao@google.com>
Signed-off-by: Harshitha Ramamurthy <hramamurthy@google.com>
Link: https://patch.msgid.link/20250818210507.3781705-1-hramamurthy@google.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
drivers/net/ethernet/google/gve/gve_buffer_mgmt_dqo.c
drivers/net/ethernet/google/gve/gve_rx_dqo.c

index 8f5021e59e0a9cb5ab5523a8091606711e7c8f0a..0e2b703c673ac2071618e81802382bca06e5479b 100644 (file)
@@ -260,6 +260,11 @@ struct page_pool *gve_rx_create_page_pool(struct gve_priv *priv,
                .offset = xdp ? XDP_PACKET_HEADROOM : 0,
        };
 
+       if (priv->header_split_enabled) {
+               pp.flags |= PP_FLAG_ALLOW_UNREADABLE_NETMEM;
+               pp.queue_idx = rx->q_num;
+       }
+
        return page_pool_create(&pp);
 }
 
index 7380c2b7a2d85a6ca428ad64d0f95b1b9afe4a6e..55393b784317ffada646dc25e20157ae542fe38d 100644 (file)
@@ -718,6 +718,24 @@ static int gve_rx_xsk_dqo(struct napi_struct *napi, struct gve_rx_ring *rx,
        return 0;
 }
 
+static void gve_dma_sync(struct gve_priv *priv, struct gve_rx_ring *rx,
+                        struct gve_rx_buf_state_dqo *buf_state, u16 buf_len)
+{
+       struct gve_rx_slot_page_info *page_info = &buf_state->page_info;
+
+       if (rx->dqo.page_pool) {
+               page_pool_dma_sync_netmem_for_cpu(rx->dqo.page_pool,
+                                                 page_info->netmem,
+                                                 page_info->page_offset,
+                                                 buf_len);
+       } else {
+               dma_sync_single_range_for_cpu(&priv->pdev->dev, buf_state->addr,
+                                             page_info->page_offset +
+                                             page_info->pad,
+                                             buf_len, DMA_FROM_DEVICE);
+       }
+}
+
 /* Returns 0 if descriptor is completed successfully.
  * Returns -EINVAL if descriptor is invalid.
  * Returns -ENOMEM if data cannot be copied to skb.
@@ -793,13 +811,18 @@ static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx,
                rx->rx_hsplit_unsplit_pkt += unsplit;
                rx->rx_hsplit_bytes += hdr_len;
                u64_stats_update_end(&rx->statss);
+       } else if (!rx->ctx.skb_head && rx->dqo.page_pool &&
+                  netmem_is_net_iov(buf_state->page_info.netmem)) {
+               /* when header split is disabled, the header went to the packet
+                * buffer. If the packet buffer is a net_iov, those can't be
+                * easily mapped into the kernel space to access the header
+                * required to process the packet.
+                */
+               goto error;
        }
 
        /* Sync the portion of dma buffer for CPU to read. */
-       dma_sync_single_range_for_cpu(&priv->pdev->dev, buf_state->addr,
-                                     buf_state->page_info.page_offset +
-                                     buf_state->page_info.pad,
-                                     buf_len, DMA_FROM_DEVICE);
+       gve_dma_sync(priv, rx, buf_state, buf_len);
 
        /* Append to current skb if one exists. */
        if (rx->ctx.skb_head) {
@@ -837,7 +860,9 @@ static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx,
                u64_stats_update_end(&rx->statss);
        }
 
-       if (eop && buf_len <= priv->rx_copybreak) {
+       if (eop && buf_len <= priv->rx_copybreak &&
+           !(rx->dqo.page_pool &&
+             netmem_is_net_iov(buf_state->page_info.netmem))) {
                rx->ctx.skb_head = gve_rx_copy(priv->dev, napi,
                                               &buf_state->page_info, buf_len);
                if (unlikely(!rx->ctx.skb_head))