diff mbox series

[v2,net-next,2/2] xdp: add multi-buff support for xdp running in generic mode

Message ID ce8cc5ce6e25d5e455704aa42fbf633be206ce85.1701334869.git.lorenzo@kernel.org (mailing list archive)
State Superseded
Delegated to: BPF
Headers show
Series add multi-buff support for xdp running in generic mode | expand

Checks

Context Check Description
netdev/series_format success Posting correctly formatted
netdev/tree_selection success Clearly marked for net-next, async
netdev/ynl success Generated files up to date; no warnings/errors;
netdev/fixes_present success Fixes tag not required for -next series
netdev/header_inline success No static functions without inline keyword in header files
netdev/build_32bit success Errors and warnings before: 1123 this patch: 1123
netdev/cc_maintainers warning 3 maintainers not CCed: daniel@iogearbox.net ast@kernel.org john.fastabend@gmail.com
netdev/build_clang success Errors and warnings before: 1143 this patch: 1143
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/deprecated_api success None detected
netdev/check_selftest success No net selftest shell script
netdev/verify_fixes success No Fixes tag
netdev/build_allmodconfig_warn success Errors and warnings before: 1150 this patch: 1150
netdev/checkpatch warning WARNING: line length of 81 exceeds 80 columns
netdev/build_clang_rust success No Rust files in patch. Skipping build
netdev/kdoc success Errors and warnings before: 0 this patch: 0
netdev/source_inline success Was 0 now: 0
bpf/vmtest-bpf-next-PR success PR summary
bpf/vmtest-bpf-next-VM_Test-0 success Logs for Lint
bpf/vmtest-bpf-next-VM_Test-2 success Logs for Validate matrix.py
bpf/vmtest-bpf-next-VM_Test-1 success Logs for ShellCheck
bpf/vmtest-bpf-next-VM_Test-3 success Logs for aarch64-gcc / build / build for aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-8 success Logs for aarch64-gcc / veristat
bpf/vmtest-bpf-next-VM_Test-7 success Logs for aarch64-gcc / test (test_verifier, false, 360) / test_verifier on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-4 success Logs for aarch64-gcc / test (test_maps, false, 360) / test_maps on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-6 success Logs for aarch64-gcc / test (test_progs_no_alu32, false, 360) / test_progs_no_alu32 on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-5 success Logs for aarch64-gcc / test (test_progs, false, 360) / test_progs on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-9 success Logs for s390x-gcc / build / build for s390x with gcc
bpf/vmtest-bpf-next-VM_Test-15 success Logs for set-matrix
bpf/vmtest-bpf-next-VM_Test-14 success Logs for s390x-gcc / veristat
bpf/vmtest-bpf-next-VM_Test-16 success Logs for x86_64-gcc / build / build for x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-17 success Logs for x86_64-gcc / test (test_maps, false, 360) / test_maps on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-19 success Logs for x86_64-gcc / test (test_progs_no_alu32, false, 360) / test_progs_no_alu32 on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-20 success Logs for x86_64-gcc / test (test_progs_no_alu32_parallel, true, 30) / test_progs_no_alu32_parallel on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-18 success Logs for x86_64-gcc / test (test_progs, false, 360) / test_progs on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-21 success Logs for x86_64-gcc / test (test_progs_parallel, true, 30) / test_progs_parallel on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-22 success Logs for x86_64-gcc / test (test_verifier, false, 360) / test_verifier on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-25 success Logs for x86_64-llvm-16 / test (test_maps, false, 360) / test_maps on x86_64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-24 success Logs for x86_64-llvm-16 / build / build for x86_64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-23 success Logs for x86_64-gcc / veristat / veristat on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-26 success Logs for x86_64-llvm-16 / test (test_progs, false, 360) / test_progs on x86_64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-28 success Logs for x86_64-llvm-16 / test (test_verifier, false, 360) / test_verifier on x86_64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-27 success Logs for x86_64-llvm-16 / test (test_progs_no_alu32, false, 360) / test_progs_no_alu32 on x86_64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-29 success Logs for x86_64-llvm-16 / veristat
bpf/vmtest-bpf-next-VM_Test-13 success Logs for s390x-gcc / test (test_verifier, false, 360) / test_verifier on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-12 success Logs for s390x-gcc / test (test_progs_no_alu32, false, 360) / test_progs_no_alu32 on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-11 success Logs for s390x-gcc / test (test_progs, false, 360) / test_progs on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-10 success Logs for s390x-gcc / test (test_maps, false, 360) / test_maps on s390x with gcc

Commit Message

Lorenzo Bianconi Nov. 30, 2023, 9:11 a.m. UTC
Similar to native xdp, do not always linearize the skb in
netif_receive_generic_xdp routine but create a non-linear xdp_buff to be
processed by the eBPF program. This allow to add  multi-buffer support
for xdp running in generic mode.

Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
---
 net/core/dev.c | 144 ++++++++++++++++++++++++++++++++++++++++---------
 1 file changed, 119 insertions(+), 25 deletions(-)

Comments

Jesper Dangaard Brouer Nov. 30, 2023, 10:36 a.m. UTC | #1
On 11/30/23 10:11, Lorenzo Bianconi wrote:
> Similar to native xdp, do not always linearize the skb in
> netif_receive_generic_xdp routine but create a non-linear xdp_buff to be
> processed by the eBPF program. This allow to add  multi-buffer support
> for xdp running in generic mode.
> 
> Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
> ---
>   net/core/dev.c | 144 ++++++++++++++++++++++++++++++++++++++++---------
>   1 file changed, 119 insertions(+), 25 deletions(-)
> 
> diff --git a/net/core/dev.c b/net/core/dev.c
> index 4df68d7f04a2..0d08e755bb7f 100644
> --- a/net/core/dev.c
> +++ b/net/core/dev.c
> @@ -4853,6 +4853,12 @@ u32 bpf_prog_run_generic_xdp(struct sk_buff *skb, struct xdp_buff *xdp,
>   	xdp_init_buff(xdp, frame_sz, &rxqueue->xdp_rxq);
>   	xdp_prepare_buff(xdp, hard_start, skb_headroom(skb) - mac_len,
>   			 skb_headlen(skb) + mac_len, true);
> +	if (skb_is_nonlinear(skb)) {
> +		skb_shinfo(skb)->xdp_frags_size = skb->data_len;
> +		xdp_buff_set_frags_flag(xdp);
> +	} else {
> +		xdp_buff_clear_frags_flag(xdp);
> +	}
>   
>   	orig_data_end = xdp->data_end;
>   	orig_data = xdp->data;
> @@ -4882,6 +4888,14 @@ u32 bpf_prog_run_generic_xdp(struct sk_buff *skb, struct xdp_buff *xdp,
>   		skb->len += off; /* positive on grow, negative on shrink */
>   	}
>   
> +	/* XDP frag metadata (e.g. nr_frags) are updated in eBPF helpers
> +	 * (e.g. bpf_xdp_adjust_tail), we need to update data_len here.
> +	 */
> +	if (xdp_buff_has_frags(xdp))
> +		skb->data_len = skb_shinfo(skb)->xdp_frags_size;
> +	else
> +		skb->data_len = 0;
> +
>   	/* check if XDP changed eth hdr such SKB needs update */
>   	eth = (struct ethhdr *)xdp->data;
>   	if ((orig_eth_type != eth->h_proto) ||
> @@ -4915,54 +4929,134 @@ u32 bpf_prog_run_generic_xdp(struct sk_buff *skb, struct xdp_buff *xdp,
>   	return act;
>   }
>   
> -static u32 netif_receive_generic_xdp(struct sk_buff **pskb,
> -				     struct xdp_buff *xdp,
> -				     struct bpf_prog *xdp_prog)
> +static int netif_skb_check_for_generic_xdp(struct sk_buff **pskb,
> +					   struct bpf_prog *prog)

I like this is split out into a check function.

>   {
>   	struct sk_buff *skb = *pskb;
> -	u32 act = XDP_DROP;
> -
> -	/* Reinjected packets coming from act_mirred or similar should
> -	 * not get XDP generic processing.
> -	 */
> -	if (skb_is_redirected(skb))
> -		return XDP_PASS;

(For other reviewers)
This reinjected check is moved further down.

> +	int err;
>   
> -	/* XDP packets must be linear and must have sufficient headroom
> -	 * of XDP_PACKET_HEADROOM bytes. This is the guarantee that also
> -	 * native XDP provides, thus we need to do it here as well.
> +	/* XDP does not support fraglist so we need to linearize
> +	 * the skb.
>   	 */
> -	if (skb_cloned(skb) || skb_is_nonlinear(skb) ||
> -	    skb_headroom(skb) < XDP_PACKET_HEADROOM) {
> +	if (skb_has_frag_list(skb) || !prog->aux->xdp_has_frags) {
>   		int hroom = XDP_PACKET_HEADROOM - skb_headroom(skb);
>   		int troom = skb->tail + skb->data_len - skb->end;
>   
>   		/* In case we have to go down the path and also linearize,
>   		 * then lets do the pskb_expand_head() work just once here.
>   		 */
> -		if (pskb_expand_head(skb,
> -				     hroom > 0 ? ALIGN(hroom, NET_SKB_PAD) : 0,
> -				     troom > 0 ? troom + 128 : 0, GFP_ATOMIC))
> -			goto do_drop;
> -		if (skb_linearize(skb))
> -			goto do_drop;
> +		err = pskb_expand_head(skb,
> +				       hroom > 0 ? ALIGN(hroom, NET_SKB_PAD) : 0,
> +				       troom > 0 ? troom + 128 : 0, GFP_ATOMIC);
> +		if (err)
> +			return err;
> +
> +		err = skb_linearize(skb);
> +		if (err)
> +			return err;
> +
> +		return 0;
> +	}
> +
> +	/* XDP packets must have sufficient headroom of XDP_PACKET_HEADROOM
> +	 * bytes. This is the guarantee that also native XDP provides,
> +	 * thus we need to do it here as well.
> +	 */
> +	if (skb_cloned(skb) || skb_shinfo(skb)->nr_frags ||

I though we could allow a SKB with skb_shinfo(skb)->nr_frags (that isn't
cloned or shared) to be processed by generic XDP without any reallocation?

So check would be: (skb_cloned(skb) || skb_shared(skb) ||)

> +	    skb_headroom(skb) < XDP_PACKET_HEADROOM) {

[Headroom trick]
For layered devices the netstack could be the process that created the
SKB.  If you noticed my veth patchset[4/4], when I detect an XDP-prog
attach, I'm increasing the net_device headroom (.ndo_set_rx_headroom)
such that netstack will allocated enough headroom to satisfy
XDP_PACKET_HEADROOM.

[4/4] 
https://lore.kernel.org/netdev/169272716651.1975370.10514711233878278884.stgit@firesoul/



> +		u32 mac_len = skb->data - skb_mac_header(skb);
> +		u32 size, len, max_head_size, off;
> +		struct sk_buff *nskb;
> +		int i, head_off;
> +
> +		__skb_push(skb, mac_len);
> +		max_head_size = SKB_WITH_OVERHEAD(PAGE_SIZE -
> +						  XDP_PACKET_HEADROOM);
> +		if (skb->len > max_head_size + MAX_SKB_FRAGS * PAGE_SIZE)
> +			return -ENOMEM;
> +
> +		size = min_t(u32, skb->len, max_head_size);
> +		nskb = netdev_alloc_skb(skb->dev, size + XDP_PACKET_HEADROOM);


Would is be possible to use napi_alloc_skb() here?

The napi_alloc_skb() is faster than netdev_alloc_skb(), but it as name
suggest assumes this is called under NAPI protection/context.  It
used-to-be the case for generic XDP, but code got moved around to
support layered devices, so I not 100% sure if this is always true (NAPI
context).


> +		if (!nskb)
> +			return -ENOMEM;
> +
> +		skb_reserve(nskb, XDP_PACKET_HEADROOM);
> +		skb_copy_header(nskb, skb);
> +
> +		err = skb_copy_bits(skb, 0, nskb->data, size);
> +		if (err) {
> +			consume_skb(nskb);
> +			return err;
> +		}
> +		skb_put(nskb, size);
> +
> +		head_off = skb_headroom(nskb) - skb_headroom(skb);
> +		skb_headers_offset_update(nskb, head_off);
> +
> +		off = size;
> +		len = skb->len - off;
> +		for (i = 0; i < MAX_SKB_FRAGS && off < skb->len; i++) {
> +			struct page *page;
> +			void *frag;
> +
> +			size = min_t(u32, len, PAGE_SIZE);
> +			frag = netdev_alloc_frag(size);

Again the slower variant.

> +			if (!frag) {
> +				consume_skb(nskb);
> +				return -ENOMEM;
> +			}
> +
> +			page = virt_to_head_page(frag);
> +			skb_add_rx_frag(nskb, i, page,
> +					frag - page_address(page), size, size);
> +			err = skb_copy_bits(skb, off, frag, size);
> +			if (err) {
> +				consume_skb(nskb);
> +				return err;
> +			}
> +
> +			len -= size;
> +			off += size;
> +		}
> +
> +		consume_skb(skb);
> +		*pskb = nskb;
> +		__skb_pull(nskb, mac_len);
>   	}
>   
> -	act = bpf_prog_run_generic_xdp(skb, xdp, xdp_prog);
> +	return 0;
> +}
> +
> +static u32 netif_receive_generic_xdp(struct sk_buff **pskb,
> +				     struct xdp_buff *xdp,
> +				     struct bpf_prog *xdp_prog)
> +{
> +	u32 act = XDP_DROP;
> +
> +	/* Reinjected packets coming from act_mirred or similar should
> +	 * not get XDP generic processing.
> +	 */
> +	if (skb_is_redirected(*pskb))
> +		return XDP_PASS;
> +
> +	if (netif_skb_check_for_generic_xdp(pskb, xdp_prog))
> +		goto do_drop;
> +
> +	act = bpf_prog_run_generic_xdp(*pskb, xdp, xdp_prog);
>   	switch (act) {
>   	case XDP_REDIRECT:
>   	case XDP_TX:
>   	case XDP_PASS:
>   		break;
>   	default:
> -		bpf_warn_invalid_xdp_action(skb->dev, xdp_prog, act);
> +		bpf_warn_invalid_xdp_action((*pskb)->dev, xdp_prog, act);
>   		fallthrough;
>   	case XDP_ABORTED:
> -		trace_xdp_exception(skb->dev, xdp_prog, act);
> +		trace_xdp_exception((*pskb)->dev, xdp_prog, act);
>   		fallthrough;
>   	case XDP_DROP:
>   	do_drop:
> -		kfree_skb(skb);
> +		kfree_skb(*pskb);
>   		break;
>   	}
>   

Overall I like the patch :-)

Are we missing more things to allow GRO packets getting processed by 
generic XDP?

--Jesper
Lorenzo Bianconi Nov. 30, 2023, 10:51 a.m. UTC | #2
> 
> 
> On 11/30/23 10:11, Lorenzo Bianconi wrote:
> > Similar to native xdp, do not always linearize the skb in
> > netif_receive_generic_xdp routine but create a non-linear xdp_buff to be
> > processed by the eBPF program. This allow to add  multi-buffer support
> > for xdp running in generic mode.
> > 
> > Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
> > ---
> >   net/core/dev.c | 144 ++++++++++++++++++++++++++++++++++++++++---------
> >   1 file changed, 119 insertions(+), 25 deletions(-)
> > 
> > diff --git a/net/core/dev.c b/net/core/dev.c
> > index 4df68d7f04a2..0d08e755bb7f 100644
> > --- a/net/core/dev.c
> > +++ b/net/core/dev.c
> > @@ -4853,6 +4853,12 @@ u32 bpf_prog_run_generic_xdp(struct sk_buff *skb, struct xdp_buff *xdp,
> >   	xdp_init_buff(xdp, frame_sz, &rxqueue->xdp_rxq);
> >   	xdp_prepare_buff(xdp, hard_start, skb_headroom(skb) - mac_len,
> >   			 skb_headlen(skb) + mac_len, true);
> > +	if (skb_is_nonlinear(skb)) {
> > +		skb_shinfo(skb)->xdp_frags_size = skb->data_len;
> > +		xdp_buff_set_frags_flag(xdp);
> > +	} else {
> > +		xdp_buff_clear_frags_flag(xdp);
> > +	}
> >   	orig_data_end = xdp->data_end;
> >   	orig_data = xdp->data;
> > @@ -4882,6 +4888,14 @@ u32 bpf_prog_run_generic_xdp(struct sk_buff *skb, struct xdp_buff *xdp,
> >   		skb->len += off; /* positive on grow, negative on shrink */
> >   	}
> > +	/* XDP frag metadata (e.g. nr_frags) are updated in eBPF helpers
> > +	 * (e.g. bpf_xdp_adjust_tail), we need to update data_len here.
> > +	 */
> > +	if (xdp_buff_has_frags(xdp))
> > +		skb->data_len = skb_shinfo(skb)->xdp_frags_size;
> > +	else
> > +		skb->data_len = 0;
> > +
> >   	/* check if XDP changed eth hdr such SKB needs update */
> >   	eth = (struct ethhdr *)xdp->data;
> >   	if ((orig_eth_type != eth->h_proto) ||
> > @@ -4915,54 +4929,134 @@ u32 bpf_prog_run_generic_xdp(struct sk_buff *skb, struct xdp_buff *xdp,
> >   	return act;
> >   }
> > -static u32 netif_receive_generic_xdp(struct sk_buff **pskb,
> > -				     struct xdp_buff *xdp,
> > -				     struct bpf_prog *xdp_prog)
> > +static int netif_skb_check_for_generic_xdp(struct sk_buff **pskb,
> > +					   struct bpf_prog *prog)
> 
> I like this is split out into a check function.
> 
> >   {
> >   	struct sk_buff *skb = *pskb;
> > -	u32 act = XDP_DROP;
> > -
> > -	/* Reinjected packets coming from act_mirred or similar should
> > -	 * not get XDP generic processing.
> > -	 */
> > -	if (skb_is_redirected(skb))
> > -		return XDP_PASS;
> 
> (For other reviewers)
> This reinjected check is moved further down.
> 
> > +	int err;
> > -	/* XDP packets must be linear and must have sufficient headroom
> > -	 * of XDP_PACKET_HEADROOM bytes. This is the guarantee that also
> > -	 * native XDP provides, thus we need to do it here as well.
> > +	/* XDP does not support fraglist so we need to linearize
> > +	 * the skb.
> >   	 */
> > -	if (skb_cloned(skb) || skb_is_nonlinear(skb) ||
> > -	    skb_headroom(skb) < XDP_PACKET_HEADROOM) {
> > +	if (skb_has_frag_list(skb) || !prog->aux->xdp_has_frags) {
> >   		int hroom = XDP_PACKET_HEADROOM - skb_headroom(skb);
> >   		int troom = skb->tail + skb->data_len - skb->end;
> >   		/* In case we have to go down the path and also linearize,
> >   		 * then lets do the pskb_expand_head() work just once here.
> >   		 */
> > -		if (pskb_expand_head(skb,
> > -				     hroom > 0 ? ALIGN(hroom, NET_SKB_PAD) : 0,
> > -				     troom > 0 ? troom + 128 : 0, GFP_ATOMIC))
> > -			goto do_drop;
> > -		if (skb_linearize(skb))
> > -			goto do_drop;
> > +		err = pskb_expand_head(skb,
> > +				       hroom > 0 ? ALIGN(hroom, NET_SKB_PAD) : 0,
> > +				       troom > 0 ? troom + 128 : 0, GFP_ATOMIC);
> > +		if (err)
> > +			return err;
> > +
> > +		err = skb_linearize(skb);
> > +		if (err)
> > +			return err;
> > +
> > +		return 0;
> > +	}
> > +
> > +	/* XDP packets must have sufficient headroom of XDP_PACKET_HEADROOM
> > +	 * bytes. This is the guarantee that also native XDP provides,
> > +	 * thus we need to do it here as well.
> > +	 */
> > +	if (skb_cloned(skb) || skb_shinfo(skb)->nr_frags ||
> 
> I though we could allow a SKB with skb_shinfo(skb)->nr_frags (that isn't
> cloned or shared) to be processed by generic XDP without any reallocation?

I do not think so, we discussed about it with Jakub here [0]

[0] https://lore.kernel.org/netdev/20231128105145.7b39db7d@kernel.org/

> 
> So check would be: (skb_cloned(skb) || skb_shared(skb) ||)
> 
> > +	    skb_headroom(skb) < XDP_PACKET_HEADROOM) {
> 
> [Headroom trick]
> For layered devices the netstack could be the process that created the
> SKB.  If you noticed my veth patchset[4/4], when I detect an XDP-prog
> attach, I'm increasing the net_device headroom (.ndo_set_rx_headroom)
> such that netstack will allocated enough headroom to satisfy
> XDP_PACKET_HEADROOM.
> 
> [4/4] https://lore.kernel.org/netdev/169272716651.1975370.10514711233878278884.stgit@firesoul/

Ah nice, for some reason I missed this patch

> 
> 
> 
> > +		u32 mac_len = skb->data - skb_mac_header(skb);
> > +		u32 size, len, max_head_size, off;
> > +		struct sk_buff *nskb;
> > +		int i, head_off;
> > +
> > +		__skb_push(skb, mac_len);
> > +		max_head_size = SKB_WITH_OVERHEAD(PAGE_SIZE -
> > +						  XDP_PACKET_HEADROOM);
> > +		if (skb->len > max_head_size + MAX_SKB_FRAGS * PAGE_SIZE)
> > +			return -ENOMEM;
> > +
> > +		size = min_t(u32, skb->len, max_head_size);
> > +		nskb = netdev_alloc_skb(skb->dev, size + XDP_PACKET_HEADROOM);
> 
> 
> Would is be possible to use napi_alloc_skb() here?
> 
> The napi_alloc_skb() is faster than netdev_alloc_skb(), but it as name
> suggest assumes this is called under NAPI protection/context.  It
> used-to-be the case for generic XDP, but code got moved around to
> support layered devices, so I not 100% sure if this is always true (NAPI
> context).

Actually I was thinking about it and I was not 100% sure too (so I decided to
use the non-NAPI version). Any input about it?

Regards,
Lorenzo

> 
> 
> > +		if (!nskb)
> > +			return -ENOMEM;
> > +
> > +		skb_reserve(nskb, XDP_PACKET_HEADROOM);
> > +		skb_copy_header(nskb, skb);
> > +
> > +		err = skb_copy_bits(skb, 0, nskb->data, size);
> > +		if (err) {
> > +			consume_skb(nskb);
> > +			return err;
> > +		}
> > +		skb_put(nskb, size);
> > +
> > +		head_off = skb_headroom(nskb) - skb_headroom(skb);
> > +		skb_headers_offset_update(nskb, head_off);
> > +
> > +		off = size;
> > +		len = skb->len - off;
> > +		for (i = 0; i < MAX_SKB_FRAGS && off < skb->len; i++) {
> > +			struct page *page;
> > +			void *frag;
> > +
> > +			size = min_t(u32, len, PAGE_SIZE);
> > +			frag = netdev_alloc_frag(size);
> 
> Again the slower variant.
> 
> > +			if (!frag) {
> > +				consume_skb(nskb);
> > +				return -ENOMEM;
> > +			}
> > +
> > +			page = virt_to_head_page(frag);
> > +			skb_add_rx_frag(nskb, i, page,
> > +					frag - page_address(page), size, size);
> > +			err = skb_copy_bits(skb, off, frag, size);
> > +			if (err) {
> > +				consume_skb(nskb);
> > +				return err;
> > +			}
> > +
> > +			len -= size;
> > +			off += size;
> > +		}
> > +
> > +		consume_skb(skb);
> > +		*pskb = nskb;
> > +		__skb_pull(nskb, mac_len);
> >   	}
> > -	act = bpf_prog_run_generic_xdp(skb, xdp, xdp_prog);
> > +	return 0;
> > +}
> > +
> > +static u32 netif_receive_generic_xdp(struct sk_buff **pskb,
> > +				     struct xdp_buff *xdp,
> > +				     struct bpf_prog *xdp_prog)
> > +{
> > +	u32 act = XDP_DROP;
> > +
> > +	/* Reinjected packets coming from act_mirred or similar should
> > +	 * not get XDP generic processing.
> > +	 */
> > +	if (skb_is_redirected(*pskb))
> > +		return XDP_PASS;
> > +
> > +	if (netif_skb_check_for_generic_xdp(pskb, xdp_prog))
> > +		goto do_drop;
> > +
> > +	act = bpf_prog_run_generic_xdp(*pskb, xdp, xdp_prog);
> >   	switch (act) {
> >   	case XDP_REDIRECT:
> >   	case XDP_TX:
> >   	case XDP_PASS:
> >   		break;
> >   	default:
> > -		bpf_warn_invalid_xdp_action(skb->dev, xdp_prog, act);
> > +		bpf_warn_invalid_xdp_action((*pskb)->dev, xdp_prog, act);
> >   		fallthrough;
> >   	case XDP_ABORTED:
> > -		trace_xdp_exception(skb->dev, xdp_prog, act);
> > +		trace_xdp_exception((*pskb)->dev, xdp_prog, act);
> >   		fallthrough;
> >   	case XDP_DROP:
> >   	do_drop:
> > -		kfree_skb(skb);
> > +		kfree_skb(*pskb);
> >   		break;
> >   	}
> 
> Overall I like the patch :-)
> 
> Are we missing more things to allow GRO packets getting processed by generic
> XDP?
> 
> --Jesper
Stanislav Fomichev Nov. 30, 2023, 6:49 p.m. UTC | #3
On 11/30, Lorenzo Bianconi wrote:
> > 
> > 
> > On 11/30/23 10:11, Lorenzo Bianconi wrote:
> > > Similar to native xdp, do not always linearize the skb in
> > > netif_receive_generic_xdp routine but create a non-linear xdp_buff to be
> > > processed by the eBPF program. This allow to add  multi-buffer support
> > > for xdp running in generic mode.
> > > 
> > > Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
> > > ---
> > >   net/core/dev.c | 144 ++++++++++++++++++++++++++++++++++++++++---------
> > >   1 file changed, 119 insertions(+), 25 deletions(-)
> > > 
> > > diff --git a/net/core/dev.c b/net/core/dev.c
> > > index 4df68d7f04a2..0d08e755bb7f 100644
> > > --- a/net/core/dev.c
> > > +++ b/net/core/dev.c
> > > @@ -4853,6 +4853,12 @@ u32 bpf_prog_run_generic_xdp(struct sk_buff *skb, struct xdp_buff *xdp,
> > >   	xdp_init_buff(xdp, frame_sz, &rxqueue->xdp_rxq);
> > >   	xdp_prepare_buff(xdp, hard_start, skb_headroom(skb) - mac_len,
> > >   			 skb_headlen(skb) + mac_len, true);
> > > +	if (skb_is_nonlinear(skb)) {
> > > +		skb_shinfo(skb)->xdp_frags_size = skb->data_len;
> > > +		xdp_buff_set_frags_flag(xdp);
> > > +	} else {
> > > +		xdp_buff_clear_frags_flag(xdp);
> > > +	}
> > >   	orig_data_end = xdp->data_end;
> > >   	orig_data = xdp->data;
> > > @@ -4882,6 +4888,14 @@ u32 bpf_prog_run_generic_xdp(struct sk_buff *skb, struct xdp_buff *xdp,
> > >   		skb->len += off; /* positive on grow, negative on shrink */
> > >   	}
> > > +	/* XDP frag metadata (e.g. nr_frags) are updated in eBPF helpers
> > > +	 * (e.g. bpf_xdp_adjust_tail), we need to update data_len here.
> > > +	 */
> > > +	if (xdp_buff_has_frags(xdp))
> > > +		skb->data_len = skb_shinfo(skb)->xdp_frags_size;
> > > +	else
> > > +		skb->data_len = 0;
> > > +
> > >   	/* check if XDP changed eth hdr such SKB needs update */
> > >   	eth = (struct ethhdr *)xdp->data;
> > >   	if ((orig_eth_type != eth->h_proto) ||
> > > @@ -4915,54 +4929,134 @@ u32 bpf_prog_run_generic_xdp(struct sk_buff *skb, struct xdp_buff *xdp,
> > >   	return act;
> > >   }
> > > -static u32 netif_receive_generic_xdp(struct sk_buff **pskb,
> > > -				     struct xdp_buff *xdp,
> > > -				     struct bpf_prog *xdp_prog)
> > > +static int netif_skb_check_for_generic_xdp(struct sk_buff **pskb,
> > > +					   struct bpf_prog *prog)
> > 
> > I like this is split out into a check function.
> > 
> > >   {
> > >   	struct sk_buff *skb = *pskb;
> > > -	u32 act = XDP_DROP;
> > > -
> > > -	/* Reinjected packets coming from act_mirred or similar should
> > > -	 * not get XDP generic processing.
> > > -	 */
> > > -	if (skb_is_redirected(skb))
> > > -		return XDP_PASS;
> > 
> > (For other reviewers)
> > This reinjected check is moved further down.
> > 
> > > +	int err;
> > > -	/* XDP packets must be linear and must have sufficient headroom
> > > -	 * of XDP_PACKET_HEADROOM bytes. This is the guarantee that also
> > > -	 * native XDP provides, thus we need to do it here as well.
> > > +	/* XDP does not support fraglist so we need to linearize
> > > +	 * the skb.
> > >   	 */
> > > -	if (skb_cloned(skb) || skb_is_nonlinear(skb) ||
> > > -	    skb_headroom(skb) < XDP_PACKET_HEADROOM) {
> > > +	if (skb_has_frag_list(skb) || !prog->aux->xdp_has_frags) {
> > >   		int hroom = XDP_PACKET_HEADROOM - skb_headroom(skb);
> > >   		int troom = skb->tail + skb->data_len - skb->end;
> > >   		/* In case we have to go down the path and also linearize,
> > >   		 * then lets do the pskb_expand_head() work just once here.
> > >   		 */
> > > -		if (pskb_expand_head(skb,
> > > -				     hroom > 0 ? ALIGN(hroom, NET_SKB_PAD) : 0,
> > > -				     troom > 0 ? troom + 128 : 0, GFP_ATOMIC))
> > > -			goto do_drop;
> > > -		if (skb_linearize(skb))
> > > -			goto do_drop;
> > > +		err = pskb_expand_head(skb,
> > > +				       hroom > 0 ? ALIGN(hroom, NET_SKB_PAD) : 0,
> > > +				       troom > 0 ? troom + 128 : 0, GFP_ATOMIC);
> > > +		if (err)
> > > +			return err;
> > > +
> > > +		err = skb_linearize(skb);
> > > +		if (err)
> > > +			return err;
> > > +
> > > +		return 0;
> > > +	}
> > > +
> > > +	/* XDP packets must have sufficient headroom of XDP_PACKET_HEADROOM
> > > +	 * bytes. This is the guarantee that also native XDP provides,
> > > +	 * thus we need to do it here as well.
> > > +	 */
> > > +	if (skb_cloned(skb) || skb_shinfo(skb)->nr_frags ||
> > 
> > I though we could allow a SKB with skb_shinfo(skb)->nr_frags (that isn't
> > cloned or shared) to be processed by generic XDP without any reallocation?
> 
> I do not think so, we discussed about it with Jakub here [0]
> 
> [0] https://lore.kernel.org/netdev/20231128105145.7b39db7d@kernel.org/

Can this be done as an optimization later on? If, from the bpf side,
the verifier can attest that the program is not calling
bpf_xdp_{load,store}_bytes on the frags for example.
Lorenzo Bianconi Dec. 1, 2023, 9:33 a.m. UTC | #4
> On 11/30, Lorenzo Bianconi wrote:
> > > 
> > > 
> > > On 11/30/23 10:11, Lorenzo Bianconi wrote:
> > > > Similar to native xdp, do not always linearize the skb in
> > > > netif_receive_generic_xdp routine but create a non-linear xdp_buff to be
> > > > processed by the eBPF program. This allow to add  multi-buffer support
> > > > for xdp running in generic mode.
> > > > 
> > > > Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
> > > > ---
> > > >   net/core/dev.c | 144 ++++++++++++++++++++++++++++++++++++++++---------
> > > >   1 file changed, 119 insertions(+), 25 deletions(-)
> > > > 
> > > > diff --git a/net/core/dev.c b/net/core/dev.c
> > > > index 4df68d7f04a2..0d08e755bb7f 100644
> > > > --- a/net/core/dev.c
> > > > +++ b/net/core/dev.c
> > > > @@ -4853,6 +4853,12 @@ u32 bpf_prog_run_generic_xdp(struct sk_buff *skb, struct xdp_buff *xdp,
> > > >   	xdp_init_buff(xdp, frame_sz, &rxqueue->xdp_rxq);
> > > >   	xdp_prepare_buff(xdp, hard_start, skb_headroom(skb) - mac_len,
> > > >   			 skb_headlen(skb) + mac_len, true);
> > > > +	if (skb_is_nonlinear(skb)) {
> > > > +		skb_shinfo(skb)->xdp_frags_size = skb->data_len;
> > > > +		xdp_buff_set_frags_flag(xdp);
> > > > +	} else {
> > > > +		xdp_buff_clear_frags_flag(xdp);
> > > > +	}
> > > >   	orig_data_end = xdp->data_end;
> > > >   	orig_data = xdp->data;
> > > > @@ -4882,6 +4888,14 @@ u32 bpf_prog_run_generic_xdp(struct sk_buff *skb, struct xdp_buff *xdp,
> > > >   		skb->len += off; /* positive on grow, negative on shrink */
> > > >   	}
> > > > +	/* XDP frag metadata (e.g. nr_frags) are updated in eBPF helpers
> > > > +	 * (e.g. bpf_xdp_adjust_tail), we need to update data_len here.
> > > > +	 */
> > > > +	if (xdp_buff_has_frags(xdp))
> > > > +		skb->data_len = skb_shinfo(skb)->xdp_frags_size;
> > > > +	else
> > > > +		skb->data_len = 0;
> > > > +
> > > >   	/* check if XDP changed eth hdr such SKB needs update */
> > > >   	eth = (struct ethhdr *)xdp->data;
> > > >   	if ((orig_eth_type != eth->h_proto) ||
> > > > @@ -4915,54 +4929,134 @@ u32 bpf_prog_run_generic_xdp(struct sk_buff *skb, struct xdp_buff *xdp,
> > > >   	return act;
> > > >   }
> > > > -static u32 netif_receive_generic_xdp(struct sk_buff **pskb,
> > > > -				     struct xdp_buff *xdp,
> > > > -				     struct bpf_prog *xdp_prog)
> > > > +static int netif_skb_check_for_generic_xdp(struct sk_buff **pskb,
> > > > +					   struct bpf_prog *prog)
> > > 
> > > I like this is split out into a check function.
> > > 
> > > >   {
> > > >   	struct sk_buff *skb = *pskb;
> > > > -	u32 act = XDP_DROP;
> > > > -
> > > > -	/* Reinjected packets coming from act_mirred or similar should
> > > > -	 * not get XDP generic processing.
> > > > -	 */
> > > > -	if (skb_is_redirected(skb))
> > > > -		return XDP_PASS;
> > > 
> > > (For other reviewers)
> > > This reinjected check is moved further down.
> > > 
> > > > +	int err;
> > > > -	/* XDP packets must be linear and must have sufficient headroom
> > > > -	 * of XDP_PACKET_HEADROOM bytes. This is the guarantee that also
> > > > -	 * native XDP provides, thus we need to do it here as well.
> > > > +	/* XDP does not support fraglist so we need to linearize
> > > > +	 * the skb.
> > > >   	 */
> > > > -	if (skb_cloned(skb) || skb_is_nonlinear(skb) ||
> > > > -	    skb_headroom(skb) < XDP_PACKET_HEADROOM) {
> > > > +	if (skb_has_frag_list(skb) || !prog->aux->xdp_has_frags) {
> > > >   		int hroom = XDP_PACKET_HEADROOM - skb_headroom(skb);
> > > >   		int troom = skb->tail + skb->data_len - skb->end;
> > > >   		/* In case we have to go down the path and also linearize,
> > > >   		 * then lets do the pskb_expand_head() work just once here.
> > > >   		 */
> > > > -		if (pskb_expand_head(skb,
> > > > -				     hroom > 0 ? ALIGN(hroom, NET_SKB_PAD) : 0,
> > > > -				     troom > 0 ? troom + 128 : 0, GFP_ATOMIC))
> > > > -			goto do_drop;
> > > > -		if (skb_linearize(skb))
> > > > -			goto do_drop;
> > > > +		err = pskb_expand_head(skb,
> > > > +				       hroom > 0 ? ALIGN(hroom, NET_SKB_PAD) : 0,
> > > > +				       troom > 0 ? troom + 128 : 0, GFP_ATOMIC);
> > > > +		if (err)
> > > > +			return err;
> > > > +
> > > > +		err = skb_linearize(skb);
> > > > +		if (err)
> > > > +			return err;
> > > > +
> > > > +		return 0;
> > > > +	}
> > > > +
> > > > +	/* XDP packets must have sufficient headroom of XDP_PACKET_HEADROOM
> > > > +	 * bytes. This is the guarantee that also native XDP provides,
> > > > +	 * thus we need to do it here as well.
> > > > +	 */
> > > > +	if (skb_cloned(skb) || skb_shinfo(skb)->nr_frags ||
> > > 
> > > I though we could allow a SKB with skb_shinfo(skb)->nr_frags (that isn't
> > > cloned or shared) to be processed by generic XDP without any reallocation?
> > 
> > I do not think so, we discussed about it with Jakub here [0]
> > 
> > [0] https://lore.kernel.org/netdev/20231128105145.7b39db7d@kernel.org/
> 
> Can this be done as an optimization later on? If, from the bpf side,
> the verifier can attest that the program is not calling
> bpf_xdp_{load,store}_bytes on the frags for example.

Yes, I think so. Moreover this would be useful for veth too.

Regards,
Lorenzo
diff mbox series

Patch

diff --git a/net/core/dev.c b/net/core/dev.c
index 4df68d7f04a2..0d08e755bb7f 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -4853,6 +4853,12 @@  u32 bpf_prog_run_generic_xdp(struct sk_buff *skb, struct xdp_buff *xdp,
 	xdp_init_buff(xdp, frame_sz, &rxqueue->xdp_rxq);
 	xdp_prepare_buff(xdp, hard_start, skb_headroom(skb) - mac_len,
 			 skb_headlen(skb) + mac_len, true);
+	if (skb_is_nonlinear(skb)) {
+		skb_shinfo(skb)->xdp_frags_size = skb->data_len;
+		xdp_buff_set_frags_flag(xdp);
+	} else {
+		xdp_buff_clear_frags_flag(xdp);
+	}
 
 	orig_data_end = xdp->data_end;
 	orig_data = xdp->data;
@@ -4882,6 +4888,14 @@  u32 bpf_prog_run_generic_xdp(struct sk_buff *skb, struct xdp_buff *xdp,
 		skb->len += off; /* positive on grow, negative on shrink */
 	}
 
+	/* XDP frag metadata (e.g. nr_frags) are updated in eBPF helpers
+	 * (e.g. bpf_xdp_adjust_tail), we need to update data_len here.
+	 */
+	if (xdp_buff_has_frags(xdp))
+		skb->data_len = skb_shinfo(skb)->xdp_frags_size;
+	else
+		skb->data_len = 0;
+
 	/* check if XDP changed eth hdr such SKB needs update */
 	eth = (struct ethhdr *)xdp->data;
 	if ((orig_eth_type != eth->h_proto) ||
@@ -4915,54 +4929,134 @@  u32 bpf_prog_run_generic_xdp(struct sk_buff *skb, struct xdp_buff *xdp,
 	return act;
 }
 
-static u32 netif_receive_generic_xdp(struct sk_buff **pskb,
-				     struct xdp_buff *xdp,
-				     struct bpf_prog *xdp_prog)
+static int netif_skb_check_for_generic_xdp(struct sk_buff **pskb,
+					   struct bpf_prog *prog)
 {
 	struct sk_buff *skb = *pskb;
-	u32 act = XDP_DROP;
-
-	/* Reinjected packets coming from act_mirred or similar should
-	 * not get XDP generic processing.
-	 */
-	if (skb_is_redirected(skb))
-		return XDP_PASS;
+	int err;
 
-	/* XDP packets must be linear and must have sufficient headroom
-	 * of XDP_PACKET_HEADROOM bytes. This is the guarantee that also
-	 * native XDP provides, thus we need to do it here as well.
+	/* XDP does not support fraglist so we need to linearize
+	 * the skb.
 	 */
-	if (skb_cloned(skb) || skb_is_nonlinear(skb) ||
-	    skb_headroom(skb) < XDP_PACKET_HEADROOM) {
+	if (skb_has_frag_list(skb) || !prog->aux->xdp_has_frags) {
 		int hroom = XDP_PACKET_HEADROOM - skb_headroom(skb);
 		int troom = skb->tail + skb->data_len - skb->end;
 
 		/* In case we have to go down the path and also linearize,
 		 * then lets do the pskb_expand_head() work just once here.
 		 */
-		if (pskb_expand_head(skb,
-				     hroom > 0 ? ALIGN(hroom, NET_SKB_PAD) : 0,
-				     troom > 0 ? troom + 128 : 0, GFP_ATOMIC))
-			goto do_drop;
-		if (skb_linearize(skb))
-			goto do_drop;
+		err = pskb_expand_head(skb,
+				       hroom > 0 ? ALIGN(hroom, NET_SKB_PAD) : 0,
+				       troom > 0 ? troom + 128 : 0, GFP_ATOMIC);
+		if (err)
+			return err;
+
+		err = skb_linearize(skb);
+		if (err)
+			return err;
+
+		return 0;
+	}
+
+	/* XDP packets must have sufficient headroom of XDP_PACKET_HEADROOM
+	 * bytes. This is the guarantee that also native XDP provides,
+	 * thus we need to do it here as well.
+	 */
+	if (skb_cloned(skb) || skb_shinfo(skb)->nr_frags ||
+	    skb_headroom(skb) < XDP_PACKET_HEADROOM) {
+		u32 mac_len = skb->data - skb_mac_header(skb);
+		u32 size, len, max_head_size, off;
+		struct sk_buff *nskb;
+		int i, head_off;
+
+		__skb_push(skb, mac_len);
+		max_head_size = SKB_WITH_OVERHEAD(PAGE_SIZE -
+						  XDP_PACKET_HEADROOM);
+		if (skb->len > max_head_size + MAX_SKB_FRAGS * PAGE_SIZE)
+			return -ENOMEM;
+
+		size = min_t(u32, skb->len, max_head_size);
+		nskb = netdev_alloc_skb(skb->dev, size + XDP_PACKET_HEADROOM);
+		if (!nskb)
+			return -ENOMEM;
+
+		skb_reserve(nskb, XDP_PACKET_HEADROOM);
+		skb_copy_header(nskb, skb);
+
+		err = skb_copy_bits(skb, 0, nskb->data, size);
+		if (err) {
+			consume_skb(nskb);
+			return err;
+		}
+		skb_put(nskb, size);
+
+		head_off = skb_headroom(nskb) - skb_headroom(skb);
+		skb_headers_offset_update(nskb, head_off);
+
+		off = size;
+		len = skb->len - off;
+		for (i = 0; i < MAX_SKB_FRAGS && off < skb->len; i++) {
+			struct page *page;
+			void *frag;
+
+			size = min_t(u32, len, PAGE_SIZE);
+			frag = netdev_alloc_frag(size);
+			if (!frag) {
+				consume_skb(nskb);
+				return -ENOMEM;
+			}
+
+			page = virt_to_head_page(frag);
+			skb_add_rx_frag(nskb, i, page,
+					frag - page_address(page), size, size);
+			err = skb_copy_bits(skb, off, frag, size);
+			if (err) {
+				consume_skb(nskb);
+				return err;
+			}
+
+			len -= size;
+			off += size;
+		}
+
+		consume_skb(skb);
+		*pskb = nskb;
+		__skb_pull(nskb, mac_len);
 	}
 
-	act = bpf_prog_run_generic_xdp(skb, xdp, xdp_prog);
+	return 0;
+}
+
+static u32 netif_receive_generic_xdp(struct sk_buff **pskb,
+				     struct xdp_buff *xdp,
+				     struct bpf_prog *xdp_prog)
+{
+	u32 act = XDP_DROP;
+
+	/* Reinjected packets coming from act_mirred or similar should
+	 * not get XDP generic processing.
+	 */
+	if (skb_is_redirected(*pskb))
+		return XDP_PASS;
+
+	if (netif_skb_check_for_generic_xdp(pskb, xdp_prog))
+		goto do_drop;
+
+	act = bpf_prog_run_generic_xdp(*pskb, xdp, xdp_prog);
 	switch (act) {
 	case XDP_REDIRECT:
 	case XDP_TX:
 	case XDP_PASS:
 		break;
 	default:
-		bpf_warn_invalid_xdp_action(skb->dev, xdp_prog, act);
+		bpf_warn_invalid_xdp_action((*pskb)->dev, xdp_prog, act);
 		fallthrough;
 	case XDP_ABORTED:
-		trace_xdp_exception(skb->dev, xdp_prog, act);
+		trace_xdp_exception((*pskb)->dev, xdp_prog, act);
 		fallthrough;
 	case XDP_DROP:
 	do_drop:
-		kfree_skb(skb);
+		kfree_skb(*pskb);
 		break;
 	}