diff mbox series

[net-next,6/6] gve: add XDP DROP and PASS support for DQ

Message ID 20250321002910.1343422-7-hramamurthy@google.com (mailing list archive)
State New
Delegated to: Netdev Maintainers
Headers show
Series Basic XDP Support for DQO RDA Queue Format | expand

Checks

Context Check Description
netdev/series_format success Posting correctly formatted
netdev/tree_selection success Clearly marked for net-next
netdev/ynl success Generated files up to date; no warnings/errors; no diff in generated;
netdev/fixes_present success Fixes tag not required for -next series
netdev/header_inline success No static functions without inline keyword in header files
netdev/build_32bit success Errors and warnings before: 0 this patch: 0
netdev/build_tools success No tools touched, skip
netdev/cc_maintainers success CCed 15 of 15 maintainers
netdev/build_clang success Errors and warnings before: 0 this patch: 0
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/deprecated_api success None detected
netdev/check_selftest success No net selftest shell script
netdev/verify_fixes success No Fixes tag
netdev/build_allmodconfig_warn success Errors and warnings before: 0 this patch: 0
netdev/checkpatch success total: 0 errors, 0 warnings, 0 checks, 70 lines checked
netdev/build_clang_rust success No Rust files in patch. Skipping build
netdev/kdoc success Errors and warnings before: 0 this patch: 0
netdev/source_inline success Was 0 now: 0
netdev/contest warning net-next-2025-03-21--18-00 (tests: 896)

Commit Message

Harshitha Ramamurthy March 21, 2025, 12:29 a.m. UTC
From: Joshua Washington <joshwash@google.com>

This patch adds support for running XDP programs on DQ, along with
rudimentary processing for XDP_DROP and XDP_PASS. These actions require
very limited driver functionality when it comes to processing an XDP
buffer, so currently if the XDP action is not XDP_PASS, the packet is
dropped and stats are updated.

Reviewed-by: Willem de Bruijn <willemb@google.com>
Signed-off-by: Praveen Kaliginedi <pkaligineedi@google.com>
Signed-off-by: Joshua Washington <joshwash@google.com>
Signed-off-by: Harshitha Ramamurthy<hramamurthy@google.com>
---
 drivers/net/ethernet/google/gve/gve_rx_dqo.c | 52 ++++++++++++++++++++
 1 file changed, 52 insertions(+)
diff mbox series

Patch

diff --git a/drivers/net/ethernet/google/gve/gve_rx_dqo.c b/drivers/net/ethernet/google/gve/gve_rx_dqo.c
index 2edf3c632cbd..deb869eb38e0 100644
--- a/drivers/net/ethernet/google/gve/gve_rx_dqo.c
+++ b/drivers/net/ethernet/google/gve/gve_rx_dqo.c
@@ -546,6 +546,29 @@  static int gve_rx_append_frags(struct napi_struct *napi,
 	return 0;
 }
 
+static void gve_xdp_done_dqo(struct gve_priv *priv, struct gve_rx_ring *rx,
+			     struct xdp_buff *xdp, struct bpf_prog *xprog,
+			     int xdp_act,
+			     struct gve_rx_buf_state_dqo *buf_state)
+{
+	u64_stats_update_begin(&rx->statss);
+	switch (xdp_act) {
+	case XDP_ABORTED:
+	case XDP_DROP:
+	default:
+		rx->xdp_actions[xdp_act]++;
+		break;
+	case XDP_TX:
+		rx->xdp_tx_errors++;
+		break;
+	case XDP_REDIRECT:
+		rx->xdp_redirect_errors++;
+		break;
+	}
+	u64_stats_update_end(&rx->statss);
+	gve_free_buffer(rx, buf_state);
+}
+
 /* Returns 0 if descriptor is completed successfully.
  * Returns -EINVAL if descriptor is invalid.
  * Returns -ENOMEM if data cannot be copied to skb.
@@ -560,6 +583,7 @@  static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx,
 	const bool hsplit = compl_desc->split_header;
 	struct gve_rx_buf_state_dqo *buf_state;
 	struct gve_priv *priv = rx->gve;
+	struct bpf_prog *xprog;
 	u16 buf_len;
 	u16 hdr_len;
 
@@ -633,6 +657,34 @@  static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx,
 		return 0;
 	}
 
+	xprog = READ_ONCE(priv->xdp_prog);
+	if (xprog) {
+		struct xdp_buff xdp;
+		void *old_data;
+		int xdp_act;
+
+		xdp_init_buff(&xdp, buf_state->page_info.buf_size,
+			      &rx->xdp_rxq);
+		xdp_prepare_buff(&xdp,
+				 buf_state->page_info.page_address +
+				 buf_state->page_info.page_offset,
+				 buf_state->page_info.pad,
+				 buf_len, false);
+		old_data = xdp.data;
+		xdp_act = bpf_prog_run_xdp(xprog, &xdp);
+		buf_state->page_info.pad += xdp.data - old_data;
+		buf_len = xdp.data_end - xdp.data;
+		if (xdp_act != XDP_PASS) {
+			gve_xdp_done_dqo(priv, rx, &xdp, xprog, xdp_act,
+					 buf_state);
+			return 0;
+		}
+
+		u64_stats_update_begin(&rx->statss);
+		rx->xdp_actions[XDP_PASS]++;
+		u64_stats_update_end(&rx->statss);
+	}
+
 	if (eop && buf_len <= priv->rx_copybreak) {
 		rx->ctx.skb_head = gve_rx_copy(priv->dev, napi,
 					       &buf_state->page_info, buf_len);