diff mbox series

[RFCv2,bpf-next,13/18] mvneta: add XDP-hints support

Message ID 166256556634.1434226.5193053845415014583.stgit@firesoul (mailing list archive)
State RFC
Delegated to: BPF
Headers show
Series XDP-hints: XDP gaining access to HW offload hints via BTF | expand

Checks

Context Check Description
bpf/vmtest-bpf-next-PR fail PR summary
bpf/vmtest-bpf-next-VM_Test-1 pending Logs for ${{ matrix.test }} on ${{ matrix.arch }} with ${{ matrix.toolchain }}
bpf/vmtest-bpf-next-VM_Test-2 fail Logs for build for s390x with gcc
bpf/vmtest-bpf-next-VM_Test-3 fail Logs for build for x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-4 fail Logs for build for x86_64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-5 success Logs for llvm-toolchain
bpf/vmtest-bpf-next-VM_Test-6 success Logs for set-matrix
netdev/tree_selection success Clearly marked for bpf-next, async
netdev/fixes_present success Fixes tag not required for -next series
netdev/subject_prefix success Link
netdev/cover_letter success Series has a cover letter
netdev/patch_count fail Series longer than 15 patches (and no cover letter)
netdev/header_inline success No static functions without inline keyword in header files
netdev/build_32bit success Errors and warnings before: 2 this patch: 2
netdev/cc_maintainers warning 8 maintainers not CCed: edumazet@google.com john.fastabend@gmail.com pabeni@redhat.com daniel@iogearbox.net kuba@kernel.org thomas.petazzoni@bootlin.com hawk@kernel.org davem@davemloft.net
netdev/build_clang success Errors and warnings before: 0 this patch: 0
netdev/module_param success Was 0 now: 0
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/check_selftest success No net selftest shell script
netdev/verify_fixes success No Fixes tag
netdev/build_allmodconfig_warn success Errors and warnings before: 2 this patch: 2
netdev/checkpatch warning CHECK: Please don't use multiple blank lines
netdev/kdoc success Errors and warnings before: 0 this patch: 0
netdev/source_inline success Was 0 now: 0

Commit Message

Jesper Dangaard Brouer Sept. 7, 2022, 3:46 p.m. UTC
From: Lorenzo Bianconi <lorenzo@kernel.org>

In mvneta_rx_swbm() code path this driver already builds the SKB based
on the xdp_buff. The natural next step is to use XDP-hints to populate
the SKB fields, even when sending packets to normal netstack.

The hardware/driver only support RX checksum offloading, which is stored
as XDP-hints. Still the generic function xdp_hint2skb() that applies all
common hints is called.  This makes sense as an XDP bpf_prog have the
opportunity to add some of these common hints prior to SKB creation.

Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
Signed-off-by: Jesper Dangaard Brouer <brouer@redhat.com>
---
 drivers/net/ethernet/marvell/mvneta.c |   59 ++++++++++++++++++++++++++++-----
 1 file changed, 50 insertions(+), 9 deletions(-)
diff mbox series

Patch

diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 0caa2df87c04..7d0055488a86 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -40,6 +40,7 @@ 
 #include <net/page_pool.h>
 #include <net/pkt_cls.h>
 #include <linux/bpf_trace.h>
+#include <linux/btf.h>
 
 /* Registers */
 #define MVNETA_RXQ_CONFIG_REG(q)                (0x1400 + ((q) << 2))
@@ -371,6 +372,9 @@ 
 #define MVNETA_RX_GET_BM_POOL_ID(rxd) \
 	(((rxd)->status & MVNETA_RXD_BM_POOL_MASK) >> MVNETA_RXD_BM_POOL_SHIFT)
 
+static struct btf *mvneta_btf;
+static u64 btf_id_xdp_hints;
+
 enum {
 	ETHTOOL_STAT_EEE_WAKEUP,
 	ETHTOOL_STAT_SKB_ALLOC_ERR,
@@ -2308,12 +2312,15 @@  mvneta_swbm_rx_frame(struct mvneta_port *pp,
 		     struct mvneta_rx_desc *rx_desc,
 		     struct mvneta_rx_queue *rxq,
 		     struct xdp_buff *xdp, int *size,
-		     struct page *page)
+		     struct page *page, u32 status)
 {
 	unsigned char *data = page_address(page);
 	int data_len = -MVNETA_MH_SIZE, len;
+	struct xdp_hints_common *xdp_hints;
 	struct net_device *dev = pp->dev;
 	enum dma_data_direction dma_dir;
+	u32 xdp_hints_flags;
+	u16 cksum;
 
 	if (*size > MVNETA_MAX_RX_BUF_SIZE) {
 		len = MVNETA_MAX_RX_BUF_SIZE;
@@ -2336,6 +2343,20 @@  mvneta_swbm_rx_frame(struct mvneta_port *pp,
 	xdp_buff_clear_frags_flag(xdp);
 	xdp_prepare_buff(xdp, data, pp->rx_offset_correction + MVNETA_MH_SIZE,
 			 data_len, false);
+
+	if (unlikely(!(pp->dev->features & NETIF_F_XDP_HINTS))) {
+		xdp_buff_clear_hints_flags(xdp);
+		return;
+	}
+
+	xdp_hints = xdp->data - sizeof(*xdp_hints);
+	cksum = mvneta_rx_csum(pp, status);
+	xdp_hints_flags = xdp_hints_set_rx_csum(xdp_hints, cksum, 0);
+	xdp_hints_set_flags(xdp_hints, xdp_hints_flags);
+	xdp_hints->btf_full_id = btf_id_xdp_hints;
+	xdp->data_meta = xdp->data - sizeof(*xdp_hints);
+
+	xdp_buff_set_hints_flags(xdp, true);
 }
 
 static void
@@ -2385,9 +2406,25 @@  mvneta_swbm_add_rx_fragment(struct mvneta_port *pp,
 	*size -= len;
 }
 
+static void
+mvneta_set_skb_hints_from_xdp(struct xdp_buff *xdp, struct sk_buff *skb)
+{
+	struct xdp_hints_common *xdp_hints;
+
+	if (!(xdp_buff_has_hints_compat(xdp)))
+		return;
+
+	if (xdp->data - xdp->data_meta < sizeof(*xdp_hints))
+		return;
+
+	xdp_hints = xdp->data - sizeof(*xdp_hints);
+	xdp_hint2skb(skb, xdp_hints);
+}
+
+
 static struct sk_buff *
 mvneta_swbm_build_skb(struct mvneta_port *pp, struct page_pool *pool,
-		      struct xdp_buff *xdp, u32 desc_status)
+		      struct xdp_buff *xdp)
 {
 	struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
 	struct sk_buff *skb;
@@ -2404,7 +2441,7 @@  mvneta_swbm_build_skb(struct mvneta_port *pp, struct page_pool *pool,
 
 	skb_reserve(skb, xdp->data - xdp->data_hard_start);
 	skb_put(skb, xdp->data_end - xdp->data);
-	skb->ip_summed = mvneta_rx_csum(pp, desc_status);
+	mvneta_set_skb_hints_from_xdp(xdp, skb);
 
 	if (unlikely(xdp_buff_has_frags(xdp)))
 		xdp_update_skb_shared_info(skb, num_frags,
@@ -2424,8 +2461,8 @@  static int mvneta_rx_swbm(struct napi_struct *napi,
 	struct net_device *dev = pp->dev;
 	struct mvneta_stats ps = {};
 	struct bpf_prog *xdp_prog;
-	u32 desc_status, frame_sz;
 	struct xdp_buff xdp_buf;
+	u32 frame_sz;
 
 	xdp_init_buff(&xdp_buf, PAGE_SIZE, &rxq->xdp_rxq);
 	xdp_buf.data_hard_start = NULL;
@@ -2458,10 +2495,8 @@  static int mvneta_rx_swbm(struct napi_struct *napi,
 
 			size = rx_desc->data_size;
 			frame_sz = size - ETH_FCS_LEN;
-			desc_status = rx_status;
-
 			mvneta_swbm_rx_frame(pp, rx_desc, rxq, &xdp_buf,
-					     &size, page);
+					     &size, page, rx_status);
 		} else {
 			if (unlikely(!xdp_buf.data_hard_start)) {
 				rx_desc->buf_phys_addr = 0;
@@ -2487,7 +2522,7 @@  static int mvneta_rx_swbm(struct napi_struct *napi,
 		    mvneta_run_xdp(pp, rxq, xdp_prog, &xdp_buf, frame_sz, &ps))
 			goto next;
 
-		skb = mvneta_swbm_build_skb(pp, rxq->page_pool, &xdp_buf, desc_status);
+		skb = mvneta_swbm_build_skb(pp, rxq->page_pool, &xdp_buf);
 		if (IS_ERR(skb)) {
 			struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
 
@@ -5613,7 +5648,7 @@  static int mvneta_probe(struct platform_device *pdev)
 	}
 
 	dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
-			NETIF_F_TSO | NETIF_F_RXCSUM;
+			NETIF_F_TSO | NETIF_F_RXCSUM | NETIF_F_XDP_HINTS;
 	dev->hw_features |= dev->features;
 	dev->vlan_features |= dev->features;
 	dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
@@ -5817,6 +5852,11 @@  static int __init mvneta_driver_init(void)
 {
 	int ret;
 
+	mvneta_btf = btf_get_module_btf(THIS_MODULE);
+	if (mvneta_btf)
+		btf_id_xdp_hints = btf_get_module_btf_full_id(mvneta_btf,
+							      "xdp_hints_common");
+
 	ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "net/mvneta:online",
 				      mvneta_cpu_online,
 				      mvneta_cpu_down_prepare);
@@ -5844,6 +5884,7 @@  module_init(mvneta_driver_init);
 
 static void __exit mvneta_driver_exit(void)
 {
+	btf_put_module_btf(mvneta_btf);
 	platform_driver_unregister(&mvneta_driver);
 	cpuhp_remove_multi_state(CPUHP_NET_MVNETA_DEAD);
 	cpuhp_remove_multi_state(online_hpstate);