]> www.infradead.org Git - users/hch/misc.git/commitdiff
gve: add XDP DROP and PASS support for DQ
authorJoshua Washington <joshwash@google.com>
Fri, 21 Mar 2025 00:29:10 +0000 (00:29 +0000)
committerJakub Kicinski <kuba@kernel.org>
Tue, 25 Mar 2025 20:51:15 +0000 (13:51 -0700)
This patch adds support for running XDP programs on DQ, along with
rudimentary processing for XDP_DROP and XDP_PASS. These actions require
very limited driver functionality when it comes to processing an XDP
buffer, so currently if the XDP action is not XDP_PASS, the packet is
dropped and stats are updated.

Reviewed-by: Willem de Bruijn <willemb@google.com>
Signed-off-by: Praveen Kaliginedi <pkaligineedi@google.com>
Signed-off-by: Joshua Washington <joshwash@google.com>
Signed-off-by: Harshitha Ramamurthy<hramamurthy@google.com>
Link: https://patch.msgid.link/20250321002910.1343422-7-hramamurthy@google.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
drivers/net/ethernet/google/gve/gve_rx_dqo.c

index 2edf3c632cbddf612a5feb045dff88e8eace60d7..deb869eb38e0ec94c72ffdcf837bc36fe3b92514 100644 (file)
@@ -546,6 +546,29 @@ static int gve_rx_append_frags(struct napi_struct *napi,
        return 0;
 }
 
+static void gve_xdp_done_dqo(struct gve_priv *priv, struct gve_rx_ring *rx,
+                            struct xdp_buff *xdp, struct bpf_prog *xprog,
+                            int xdp_act,
+                            struct gve_rx_buf_state_dqo *buf_state)
+{
+       u64_stats_update_begin(&rx->statss);
+       switch (xdp_act) {
+       case XDP_ABORTED:
+       case XDP_DROP:
+       default:
+               rx->xdp_actions[xdp_act]++;
+               break;
+       case XDP_TX:
+               rx->xdp_tx_errors++;
+               break;
+       case XDP_REDIRECT:
+               rx->xdp_redirect_errors++;
+               break;
+       }
+       u64_stats_update_end(&rx->statss);
+       gve_free_buffer(rx, buf_state);
+}
+
 /* Returns 0 if descriptor is completed successfully.
  * Returns -EINVAL if descriptor is invalid.
  * Returns -ENOMEM if data cannot be copied to skb.
@@ -560,6 +583,7 @@ static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx,
        const bool hsplit = compl_desc->split_header;
        struct gve_rx_buf_state_dqo *buf_state;
        struct gve_priv *priv = rx->gve;
+       struct bpf_prog *xprog;
        u16 buf_len;
        u16 hdr_len;
 
@@ -633,6 +657,34 @@ static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx,
                return 0;
        }
 
+       xprog = READ_ONCE(priv->xdp_prog);
+       if (xprog) {
+               struct xdp_buff xdp;
+               void *old_data;
+               int xdp_act;
+
+               xdp_init_buff(&xdp, buf_state->page_info.buf_size,
+                             &rx->xdp_rxq);
+               xdp_prepare_buff(&xdp,
+                                buf_state->page_info.page_address +
+                                buf_state->page_info.page_offset,
+                                buf_state->page_info.pad,
+                                buf_len, false);
+               old_data = xdp.data;
+               xdp_act = bpf_prog_run_xdp(xprog, &xdp);
+               buf_state->page_info.pad += xdp.data - old_data;
+               buf_len = xdp.data_end - xdp.data;
+               if (xdp_act != XDP_PASS) {
+                       gve_xdp_done_dqo(priv, rx, &xdp, xprog, xdp_act,
+                                        buf_state);
+                       return 0;
+               }
+
+               u64_stats_update_begin(&rx->statss);
+               rx->xdp_actions[XDP_PASS]++;
+               u64_stats_update_end(&rx->statss);
+       }
+
        if (eop && buf_len <= priv->rx_copybreak) {
                rx->ctx.skb_head = gve_rx_copy(priv->dev, napi,
                                               &buf_state->page_info, buf_len);