diff mbox series

[net-next,3/6] gve: update GQ RX to use buf_size

Message ID 20250321002910.1343422-4-hramamurthy@google.com (mailing list archive)
State New
Delegated to: Netdev Maintainers
Headers show
Series Basic XDP Support for DQO RDA Queue Format | expand

Checks

Context Check Description
netdev/series_format success Posting correctly formatted
netdev/tree_selection success Clearly marked for net-next
netdev/ynl success Generated files up to date; no warnings/errors; no diff in generated;
netdev/fixes_present success Fixes tag not required for -next series
netdev/header_inline success No static functions without inline keyword in header files
netdev/build_32bit success Errors and warnings before: 0 this patch: 0
netdev/build_tools success No tools touched, skip
netdev/cc_maintainers success CCed 15 of 15 maintainers
netdev/build_clang success Errors and warnings before: 0 this patch: 0
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/deprecated_api success None detected
netdev/check_selftest success No net selftest shell script
netdev/verify_fixes success No Fixes tag
netdev/build_allmodconfig_warn success Errors and warnings before: 0 this patch: 0
netdev/checkpatch success total: 0 errors, 0 warnings, 0 checks, 88 lines checked
netdev/build_clang_rust success No Rust files in patch. Skipping build
netdev/kdoc success Errors and warnings before: 0 this patch: 0
netdev/source_inline success Was 0 now: 0
netdev/contest warning net-next-2025-03-21--18-00 (tests: 896)

Commit Message

Harshitha Ramamurthy March 21, 2025, 12:29 a.m. UTC
From: Joshua Washington <joshwash@google.com>

Commit ebdfae0d377b ("gve: adopt page pool for DQ RDA mode") introduced
a buf_size field to the gve_rx_slot_page_info struct, which can be used
in the datapath to take the place of the packet_buffer_size field, as it
will already be hot in the cache due to its extensive use. Using the
buf_size field in the datapath frees up the packet_buffer_size field in
the GQ-specific RX cacheline to be generalized for GQ and DQ (in the
next patch), as there is currently no common packet buffer size field
between the two queue formats.

Reviewed-by: Willem de Bruijn <willemb@google.com>
Signed-off-by: Joshua Washington <joshwash@google.com>
Signed-off-by: Harshitha Ramamurthy <hramamurthy@google.com>
---
 drivers/net/ethernet/google/gve/gve_rx.c | 24 +++++++++++++++---------
 1 file changed, 15 insertions(+), 9 deletions(-)
diff mbox series

Patch

diff --git a/drivers/net/ethernet/google/gve/gve_rx.c b/drivers/net/ethernet/google/gve/gve_rx.c
index 7b774cc510cc..9d444e723fcd 100644
--- a/drivers/net/ethernet/google/gve/gve_rx.c
+++ b/drivers/net/ethernet/google/gve/gve_rx.c
@@ -141,12 +141,15 @@  void gve_rx_free_ring_gqi(struct gve_priv *priv, struct gve_rx_ring *rx,
 	netif_dbg(priv, drv, priv->dev, "freed rx ring %d\n", idx);
 }
 
-static void gve_setup_rx_buffer(struct gve_rx_slot_page_info *page_info,
-			     dma_addr_t addr, struct page *page, __be64 *slot_addr)
+static void gve_setup_rx_buffer(struct gve_rx_ring *rx,
+				struct gve_rx_slot_page_info *page_info,
+				dma_addr_t addr, struct page *page,
+				__be64 *slot_addr)
 {
 	page_info->page = page;
 	page_info->page_offset = 0;
 	page_info->page_address = page_address(page);
+	page_info->buf_size = rx->packet_buffer_size;
 	*slot_addr = cpu_to_be64(addr);
 	/* The page already has 1 ref */
 	page_ref_add(page, INT_MAX - 1);
@@ -171,7 +174,7 @@  static int gve_rx_alloc_buffer(struct gve_priv *priv, struct device *dev,
 		return err;
 	}
 
-	gve_setup_rx_buffer(page_info, dma, page, &data_slot->addr);
+	gve_setup_rx_buffer(rx, page_info, dma, page, &data_slot->addr);
 	return 0;
 }
 
@@ -199,7 +202,8 @@  static int gve_rx_prefill_pages(struct gve_rx_ring *rx,
 			struct page *page = rx->data.qpl->pages[i];
 			dma_addr_t addr = i * PAGE_SIZE;
 
-			gve_setup_rx_buffer(&rx->data.page_info[i], addr, page,
+			gve_setup_rx_buffer(rx, &rx->data.page_info[i], addr,
+					    page,
 					    &rx->data.data_ring[i].qpl_offset);
 			continue;
 		}
@@ -222,6 +226,7 @@  static int gve_rx_prefill_pages(struct gve_rx_ring *rx,
 			rx->qpl_copy_pool[j].page = page;
 			rx->qpl_copy_pool[j].page_offset = 0;
 			rx->qpl_copy_pool[j].page_address = page_address(page);
+			rx->qpl_copy_pool[j].buf_size = rx->packet_buffer_size;
 
 			/* The page already has 1 ref. */
 			page_ref_add(page, INT_MAX - 1);
@@ -283,6 +288,7 @@  int gve_rx_alloc_ring_gqi(struct gve_priv *priv,
 
 	rx->gve = priv;
 	rx->q_num = idx;
+	rx->packet_buffer_size = GVE_DEFAULT_RX_BUFFER_SIZE;
 
 	rx->mask = slots - 1;
 	rx->data.raw_addressing = cfg->raw_addressing;
@@ -351,7 +357,6 @@  int gve_rx_alloc_ring_gqi(struct gve_priv *priv,
 	rx->db_threshold = slots / 2;
 	gve_rx_init_ring_state_gqi(rx);
 
-	rx->packet_buffer_size = GVE_DEFAULT_RX_BUFFER_SIZE;
 	gve_rx_ctx_clear(&rx->ctx);
 
 	return 0;
@@ -590,7 +595,7 @@  static struct sk_buff *gve_rx_copy_to_pool(struct gve_rx_ring *rx,
 	copy_page_info->pad = page_info->pad;
 
 	skb = gve_rx_add_frags(napi, copy_page_info,
-			       rx->packet_buffer_size, len, ctx);
+			       copy_page_info->buf_size, len, ctx);
 	if (unlikely(!skb))
 		return NULL;
 
@@ -630,7 +635,8 @@  gve_rx_qpl(struct device *dev, struct net_device *netdev,
 	 * device.
 	 */
 	if (page_info->can_flip) {
-		skb = gve_rx_add_frags(napi, page_info, rx->packet_buffer_size, len, ctx);
+		skb = gve_rx_add_frags(napi, page_info, page_info->buf_size,
+				       len, ctx);
 		/* No point in recycling if we didn't get the skb */
 		if (skb) {
 			/* Make sure that the page isn't freed. */
@@ -680,7 +686,7 @@  static struct sk_buff *gve_rx_skb(struct gve_priv *priv, struct gve_rx_ring *rx,
 			skb = gve_rx_raw_addressing(&priv->pdev->dev, netdev,
 						    page_info, len, napi,
 						    data_slot,
-						    rx->packet_buffer_size, ctx);
+						    page_info->buf_size, ctx);
 		} else {
 			skb = gve_rx_qpl(&priv->pdev->dev, netdev, rx,
 					 page_info, len, napi, data_slot);
@@ -855,7 +861,7 @@  static void gve_rx(struct gve_rx_ring *rx, netdev_features_t feat,
 		void *old_data;
 		int xdp_act;
 
-		xdp_init_buff(&xdp, rx->packet_buffer_size, &rx->xdp_rxq);
+		xdp_init_buff(&xdp, page_info->buf_size, &rx->xdp_rxq);
 		xdp_prepare_buff(&xdp, page_info->page_address +
 				 page_info->page_offset, GVE_RX_PAD,
 				 len, false);