diff mbox series

[RFC,net-next,13/34] xdp: add generic xdp_build_skb_from_buff()

Message ID 20231223025554.2316836-14-aleksander.lobakin@intel.com (mailing list archive)
State RFC
Delegated to: Netdev Maintainers
Headers show
Series Christmas 3-serie XDP for idpf (+generic stuff) | expand

Checks

Context Check Description
netdev/tree_selection success Clearly marked for net-next, async
netdev/apply fail Patch does not apply to net-next

Commit Message

Alexander Lobakin Dec. 23, 2023, 2:55 a.m. UTC
The code which builds an skb from an &xdp_buff keeps multiplying itself
around the drivers with almost no changes. Let's try to stop that by
adding a generic function.
There's __xdp_build_skb_from_frame() already, so just convert it to take
&xdp_buff instead, while making the original one a wrapper. The original
one always took an already allocated skb, allow both variants here -- if
no skb passed, which is expected when calling from a driver, pick one via
napi_build_skb().

Signed-off-by: Alexander Lobakin <aleksander.lobakin@intel.com>
---
 include/net/xdp.h |  4 +++
 net/core/xdp.c    | 89 +++++++++++++++++++++++++++++++----------------
 2 files changed, 63 insertions(+), 30 deletions(-)
diff mbox series

Patch

diff --git a/include/net/xdp.h b/include/net/xdp.h
index a3dc0f39b437..4fcf0ac48345 100644
--- a/include/net/xdp.h
+++ b/include/net/xdp.h
@@ -276,6 +276,10 @@  xdp_update_skb_shared_info(struct sk_buff *skb, u8 nr_frags,
 void xdp_warn(const char *msg, const char *func, const int line);
 #define XDP_WARN(msg) xdp_warn(msg, __func__, __LINE__)
 
+struct sk_buff *__xdp_build_skb_from_buff(struct sk_buff *skb,
+					  const struct xdp_buff *xdp);
+#define xdp_build_skb_from_buff(xdp) __xdp_build_skb_from_buff(NULL, xdp)
+
 struct xdp_frame *xdp_convert_zc_to_xdp_frame(struct xdp_buff *xdp);
 struct sk_buff *__xdp_build_skb_from_frame(struct xdp_frame *xdpf,
 					   struct sk_buff *skb,
diff --git a/net/core/xdp.c b/net/core/xdp.c
index 03ebdb21ea62..ed73b97472b4 100644
--- a/net/core/xdp.c
+++ b/net/core/xdp.c
@@ -609,45 +609,77 @@  int xdp_alloc_skb_bulk(void **skbs, int n_skb, gfp_t gfp)
 }
 EXPORT_SYMBOL_GPL(xdp_alloc_skb_bulk);
 
-struct sk_buff *__xdp_build_skb_from_frame(struct xdp_frame *xdpf,
-					   struct sk_buff *skb,
-					   struct net_device *dev)
+struct sk_buff *__xdp_build_skb_from_buff(struct sk_buff *skb,
+					  const struct xdp_buff *xdp)
 {
-	struct skb_shared_info *sinfo = xdp_get_shared_info_from_frame(xdpf);
-	unsigned int headroom, frame_size;
-	void *hard_start;
-	u8 nr_frags;
+	const struct xdp_rxq_info *rxq = xdp->rxq;
+	const struct skb_shared_info *sinfo;
+	u32 nr_frags = 0;
 
 	/* xdp frags frame */
-	if (unlikely(xdp_frame_has_frags(xdpf)))
+	if (unlikely(xdp_buff_has_frags(xdp))) {
+		sinfo = xdp_get_shared_info_from_buff(xdp);
 		nr_frags = sinfo->nr_frags;
+	}
 
-	/* Part of headroom was reserved to xdpf */
-	headroom = sizeof(*xdpf) + xdpf->headroom;
+	net_prefetch(xdp->data_meta);
 
-	/* Memory size backing xdp_frame data already have reserved
-	 * room for build_skb to place skb_shared_info in tailroom.
-	 */
-	frame_size = xdpf->frame_sz;
+	if (!skb) {
+		skb = napi_build_skb(xdp->data_hard_start, xdp->frame_sz);
+		if (unlikely(!skb))
+			return NULL;
+	} else {
+		/* build_skb_around() can return NULL only when !skb, which
+		 * is impossible here.
+		 */
+		build_skb_around(skb, xdp->data_hard_start, xdp->frame_sz);
+	}
 
-	hard_start = xdpf->data - headroom;
-	skb = build_skb_around(skb, hard_start, frame_size);
-	if (unlikely(!skb))
-		return NULL;
+	skb_reserve(skb, xdp->data - xdp->data_hard_start);
+	__skb_put(skb, xdp->data_end - xdp->data);
+	if (xdp->data > xdp->data_meta)
+		skb_metadata_set(skb, xdp->data - xdp->data_meta);
+
+	if (rxq->mem.type == MEM_TYPE_PAGE_POOL)
+		skb_mark_for_recycle(skb);
 
-	skb_reserve(skb, headroom);
-	__skb_put(skb, xdpf->len);
-	if (xdpf->metasize)
-		skb_metadata_set(skb, xdpf->metasize);
+	/* __xdp_rxq_info_reg() sets these two together */
+	if (rxq->reg_state == REG_STATE_REGISTERED)
+		skb_record_rx_queue(skb, rxq->queue_index);
+
+	if (unlikely(nr_frags)) {
+		u32 truesize = sinfo->xdp_frags_truesize ? :
+			       nr_frags * xdp->frame_sz;
 
-	if (unlikely(xdp_frame_has_frags(xdpf)))
 		xdp_update_skb_shared_info(skb, nr_frags,
-					   sinfo->xdp_frags_size,
-					   nr_frags * xdpf->frame_sz,
-					   xdp_frame_is_frag_pfmemalloc(xdpf));
+					   sinfo->xdp_frags_size, truesize,
+					   xdp_buff_is_frag_pfmemalloc(xdp));
+	}
 
 	/* Essential SKB info: protocol and skb->dev */
-	skb->protocol = eth_type_trans(skb, dev);
+	skb->protocol = eth_type_trans(skb, rxq->dev);
+
+	return skb;
+}
+EXPORT_SYMBOL_GPL(__xdp_build_skb_from_buff);
+
+struct sk_buff *__xdp_build_skb_from_frame(struct xdp_frame *xdpf,
+					   struct sk_buff *skb,
+					   struct net_device *dev)
+{
+	struct xdp_rxq_info rxq = {
+		.dev		= dev,
+		.mem		= xdpf->mem,
+	};
+	struct xdp_buff xdp;
+
+	/* Check early instead of delegating it to build_skb_around() */
+	if (unlikely(!skb))
+		return NULL;
+
+	xdp.rxq = &rxq;
+	xdp_convert_frame_to_buff(xdpf, &xdp);
+	__xdp_build_skb_from_buff(skb, &xdp);
 
 	/* Optional SKB info, currently missing:
 	 * - HW checksum info		(skb->ip_summed)
@@ -655,9 +687,6 @@  struct sk_buff *__xdp_build_skb_from_frame(struct xdp_frame *xdpf,
 	 * - RX ring dev queue index	(skb_record_rx_queue)
 	 */
 
-	if (xdpf->mem.type == MEM_TYPE_PAGE_POOL)
-		skb_mark_for_recycle(skb);
-
 	/* Allow SKB to reuse area used by xdp_frame */
 	xdp_scrub_frame(xdpf);