diff mbox series

[RFC,net-next,5/9] ice: apply XDP offloading fixup when building skb

Message ID a9eba425bfd3bfac7e7be38fe86ad5dbff3ae01f.1718919473.git.yan@cloudflare.com (mailing list archive)
State RFC
Delegated to: Netdev Maintainers
Headers show
Series xdp: allow disable GRO per packet by XDP | expand

Checks

Context Check Description
netdev/series_format warning Series does not have a cover letter
netdev/tree_selection success Clearly marked for net-next, async
netdev/ynl success Generated files up to date; no warnings/errors; no diff in generated;
netdev/fixes_present success Fixes tag not required for -next series
netdev/header_inline success No static functions without inline keyword in header files
netdev/build_32bit success Errors and warnings before: 843 this patch: 843
netdev/build_tools success Errors and warnings before: 0 this patch: 0
netdev/cc_maintainers success CCed 16 of 16 maintainers
netdev/build_clang success Errors and warnings before: 850 this patch: 850
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/deprecated_api success None detected
netdev/check_selftest success No net selftest shell script
netdev/verify_fixes success No Fixes tag
netdev/build_allmodconfig_warn success Errors and warnings before: 883 this patch: 883
netdev/checkpatch success total: 0 errors, 0 warnings, 0 checks, 47 lines checked
netdev/build_clang_rust success No Rust files in patch. Skipping build
netdev/kdoc success Errors and warnings before: 48 this patch: 48
netdev/source_inline success Was 0 now: 0

Commit Message

Yan Zhai June 20, 2024, 10:19 p.m. UTC
Add a common point to transfer offloading info from XDP context to skb.

Signed-off-by: Yan Zhai <yan@cloudflare.com>
Signed-off-by: Jesper Dangaard Brouer <hawk@kernel.org>
---
 drivers/net/ethernet/intel/ice/ice_txrx.c | 2 ++
 drivers/net/ethernet/intel/ice/ice_xsk.c  | 6 +++++-
 include/net/xdp_sock_drv.h                | 2 +-
 3 files changed, 8 insertions(+), 2 deletions(-)

Comments

Alexander Lobakin June 21, 2024, 9:20 a.m. UTC | #1
From: Yan Zhai <yan@cloudflare.com>
Date: Thu, 20 Jun 2024 15:19:22 -0700

> Add a common point to transfer offloading info from XDP context to skb.
> 
> Signed-off-by: Yan Zhai <yan@cloudflare.com>
> Signed-off-by: Jesper Dangaard Brouer <hawk@kernel.org>
> ---
>  drivers/net/ethernet/intel/ice/ice_txrx.c | 2 ++
>  drivers/net/ethernet/intel/ice/ice_xsk.c  | 6 +++++-
>  include/net/xdp_sock_drv.h                | 2 +-
>  3 files changed, 8 insertions(+), 2 deletions(-)
> 
> diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
> index 8bb743f78fcb..a247306837ed 100644
> --- a/drivers/net/ethernet/intel/ice/ice_txrx.c
> +++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
> @@ -1222,6 +1222,7 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
>  
>  			hard_start = page_address(rx_buf->page) + rx_buf->page_offset -
>  				     offset;
> +			xdp_init_buff_minimal(xdp);

Two lines below, you have this:

	xdp_buff_clear_frags_flag(xdp);

Which clears frags bit in xdp->flags. I.e. since you always clear flags
here, this call becomes redundant.
But I'd say that `xdp->flags = 0` really wants to be moved from
xdp_init_buff() to xdp_prepare_buff().

>  			xdp_prepare_buff(xdp, hard_start, offset, size, !!offset);
>  #if (PAGE_SIZE > 4096)
>  			/* At larger PAGE_SIZE, frame_sz depend on len size */
> @@ -1287,6 +1288,7 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
>  
>  		/* populate checksum, VLAN, and protocol */
>  		ice_process_skb_fields(rx_ring, rx_desc, skb);
> +		xdp_buff_fixup_skb_offloading(xdp, skb);
>  
>  		ice_trace(clean_rx_irq_indicate, rx_ring, rx_desc, skb);
>  		/* send completed skb up the stack */
> diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
> index a65955eb23c0..367658acaab8 100644
> --- a/drivers/net/ethernet/intel/ice/ice_xsk.c
> +++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
> @@ -845,8 +845,10 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget)
>  	xdp_prog = READ_ONCE(rx_ring->xdp_prog);
>  	xdp_ring = rx_ring->xdp_ring;
>  
> -	if (ntc != rx_ring->first_desc)
> +	if (ntc != rx_ring->first_desc) {
>  		first = *ice_xdp_buf(rx_ring, rx_ring->first_desc);
> +		xdp_init_buff_minimal(first);

xdp_buff_set_size() always clears flags, this is redundant.

> +	}
>  
>  	while (likely(total_rx_packets < (unsigned int)budget)) {
>  		union ice_32b_rx_flex_desc *rx_desc;
> @@ -920,6 +922,7 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget)
>  			break;
>  		}
>  
> +		xdp = first;
>  		first = NULL;
>  		rx_ring->first_desc = ntc;
>  
> @@ -934,6 +937,7 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget)
>  		vlan_tci = ice_get_vlan_tci(rx_desc);
>  
>  		ice_process_skb_fields(rx_ring, rx_desc, skb);
> +		xdp_buff_fixup_skb_offloading(xdp, skb);
>  		ice_receive_skb(rx_ring, skb, vlan_tci);
>  	}
>  
> diff --git a/include/net/xdp_sock_drv.h b/include/net/xdp_sock_drv.h
> index 0a5dca2b2b3f..02243dc064c2 100644
> --- a/include/net/xdp_sock_drv.h
> +++ b/include/net/xdp_sock_drv.h
> @@ -181,7 +181,7 @@ static inline void xsk_buff_set_size(struct xdp_buff *xdp, u32 size)
>  	xdp->data = xdp->data_hard_start + XDP_PACKET_HEADROOM;
>  	xdp->data_meta = xdp->data;
>  	xdp->data_end = xdp->data + size;
> -	xdp->flags = 0;
> +	xdp_init_buff_minimal(xdp);

Why is this done in the patch prefixed with "ice:"?

>  }
>  
>  static inline dma_addr_t xsk_buff_raw_get_dma(struct xsk_buff_pool *pool,

Thanks,
Olek
Yan Zhai June 21, 2024, 4:05 p.m. UTC | #2
On Fri, Jun 21, 2024 at 4:22 AM Alexander Lobakin
<aleksander.lobakin@intel.com> wrote:
>
> From: Yan Zhai <yan@cloudflare.com>
> Date: Thu, 20 Jun 2024 15:19:22 -0700
>
> > Add a common point to transfer offloading info from XDP context to skb.
> >
> > Signed-off-by: Yan Zhai <yan@cloudflare.com>
> > Signed-off-by: Jesper Dangaard Brouer <hawk@kernel.org>
> > ---
> >  drivers/net/ethernet/intel/ice/ice_txrx.c | 2 ++
> >  drivers/net/ethernet/intel/ice/ice_xsk.c  | 6 +++++-
> >  include/net/xdp_sock_drv.h                | 2 +-
> >  3 files changed, 8 insertions(+), 2 deletions(-)
> >
> > diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
> > index 8bb743f78fcb..a247306837ed 100644
> > --- a/drivers/net/ethernet/intel/ice/ice_txrx.c
> > +++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
> > @@ -1222,6 +1222,7 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
> >
> >                       hard_start = page_address(rx_buf->page) + rx_buf->page_offset -
> >                                    offset;
> > +                     xdp_init_buff_minimal(xdp);
>
> Two lines below, you have this:
>
>         xdp_buff_clear_frags_flag(xdp);
>
> Which clears frags bit in xdp->flags. I.e. since you always clear flags
> here, this call becomes redundant.
> But I'd say that `xdp->flags = 0` really wants to be moved from
> xdp_init_buff() to xdp_prepare_buff().
>
You are right, there is some redundancy here. I will fix it if people
feel good about the use case in general :)


> >                       xdp_prepare_buff(xdp, hard_start, offset, size, !!offset);
> >  #if (PAGE_SIZE > 4096)
> >                       /* At larger PAGE_SIZE, frame_sz depend on len size */
> > @@ -1287,6 +1288,7 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
> >
> >               /* populate checksum, VLAN, and protocol */
> >               ice_process_skb_fields(rx_ring, rx_desc, skb);
> > +             xdp_buff_fixup_skb_offloading(xdp, skb);
> >
> >               ice_trace(clean_rx_irq_indicate, rx_ring, rx_desc, skb);
> >               /* send completed skb up the stack */
> > diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
> > index a65955eb23c0..367658acaab8 100644
> > --- a/drivers/net/ethernet/intel/ice/ice_xsk.c
> > +++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
> > @@ -845,8 +845,10 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget)
> >       xdp_prog = READ_ONCE(rx_ring->xdp_prog);
> >       xdp_ring = rx_ring->xdp_ring;
> >
> > -     if (ntc != rx_ring->first_desc)
> > +     if (ntc != rx_ring->first_desc) {
> >               first = *ice_xdp_buf(rx_ring, rx_ring->first_desc);
> > +             xdp_init_buff_minimal(first);
>
> xdp_buff_set_size() always clears flags, this is redundant.
>
> > +     }
> >
> >       while (likely(total_rx_packets < (unsigned int)budget)) {
> >               union ice_32b_rx_flex_desc *rx_desc;
> > @@ -920,6 +922,7 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget)
> >                       break;
> >               }
> >
> > +             xdp = first;
> >               first = NULL;
> >               rx_ring->first_desc = ntc;
> >
> > @@ -934,6 +937,7 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget)
> >               vlan_tci = ice_get_vlan_tci(rx_desc);
> >
> >               ice_process_skb_fields(rx_ring, rx_desc, skb);
> > +             xdp_buff_fixup_skb_offloading(xdp, skb);
> >               ice_receive_skb(rx_ring, skb, vlan_tci);
> >       }
> >
> > diff --git a/include/net/xdp_sock_drv.h b/include/net/xdp_sock_drv.h
> > index 0a5dca2b2b3f..02243dc064c2 100644
> > --- a/include/net/xdp_sock_drv.h
> > +++ b/include/net/xdp_sock_drv.h
> > @@ -181,7 +181,7 @@ static inline void xsk_buff_set_size(struct xdp_buff *xdp, u32 size)
> >       xdp->data = xdp->data_hard_start + XDP_PACKET_HEADROOM;
> >       xdp->data_meta = xdp->data;
> >       xdp->data_end = xdp->data + size;
> > -     xdp->flags = 0;
> > +     xdp_init_buff_minimal(xdp);
>
> Why is this done in the patch prefixed with "ice:"?
>
Good catch, this should be moved to the previous patch.

thanks
Yan

> >  }
> >
> >  static inline dma_addr_t xsk_buff_raw_get_dma(struct xsk_buff_pool *pool,
>
> Thanks,
> Olek
diff mbox series

Patch

diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index 8bb743f78fcb..a247306837ed 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -1222,6 +1222,7 @@  int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
 
 			hard_start = page_address(rx_buf->page) + rx_buf->page_offset -
 				     offset;
+			xdp_init_buff_minimal(xdp);
 			xdp_prepare_buff(xdp, hard_start, offset, size, !!offset);
 #if (PAGE_SIZE > 4096)
 			/* At larger PAGE_SIZE, frame_sz depend on len size */
@@ -1287,6 +1288,7 @@  int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
 
 		/* populate checksum, VLAN, and protocol */
 		ice_process_skb_fields(rx_ring, rx_desc, skb);
+		xdp_buff_fixup_skb_offloading(xdp, skb);
 
 		ice_trace(clean_rx_irq_indicate, rx_ring, rx_desc, skb);
 		/* send completed skb up the stack */
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
index a65955eb23c0..367658acaab8 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
@@ -845,8 +845,10 @@  int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget)
 	xdp_prog = READ_ONCE(rx_ring->xdp_prog);
 	xdp_ring = rx_ring->xdp_ring;
 
-	if (ntc != rx_ring->first_desc)
+	if (ntc != rx_ring->first_desc) {
 		first = *ice_xdp_buf(rx_ring, rx_ring->first_desc);
+		xdp_init_buff_minimal(first);
+	}
 
 	while (likely(total_rx_packets < (unsigned int)budget)) {
 		union ice_32b_rx_flex_desc *rx_desc;
@@ -920,6 +922,7 @@  int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget)
 			break;
 		}
 
+		xdp = first;
 		first = NULL;
 		rx_ring->first_desc = ntc;
 
@@ -934,6 +937,7 @@  int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget)
 		vlan_tci = ice_get_vlan_tci(rx_desc);
 
 		ice_process_skb_fields(rx_ring, rx_desc, skb);
+		xdp_buff_fixup_skb_offloading(xdp, skb);
 		ice_receive_skb(rx_ring, skb, vlan_tci);
 	}
 
diff --git a/include/net/xdp_sock_drv.h b/include/net/xdp_sock_drv.h
index 0a5dca2b2b3f..02243dc064c2 100644
--- a/include/net/xdp_sock_drv.h
+++ b/include/net/xdp_sock_drv.h
@@ -181,7 +181,7 @@  static inline void xsk_buff_set_size(struct xdp_buff *xdp, u32 size)
 	xdp->data = xdp->data_hard_start + XDP_PACKET_HEADROOM;
 	xdp->data_meta = xdp->data;
 	xdp->data_end = xdp->data + size;
-	xdp->flags = 0;
+	xdp_init_buff_minimal(xdp);
 }
 
 static inline dma_addr_t xsk_buff_raw_get_dma(struct xsk_buff_pool *pool,