diff mbox series

[bpf-next,04/10] i40e: xsk: terminate NAPI when XSK Rx queue gets full

Message ID 20220405110631.404427-5-maciej.fijalkowski@intel.com (mailing list archive)
State Changes Requested
Delegated to: BPF
Headers show
Series xsk: stop softirq processing on full XSK Rx queue | expand

Checks

Context Check Description
bpf/vmtest-bpf-next-PR pending PR summary
netdev/tree_selection success Clearly marked for bpf-next
netdev/fixes_present success Fixes tag not required for -next series
netdev/subject_prefix success Link
netdev/cover_letter success Series has a cover letter
netdev/patch_count success Link
netdev/header_inline success No static functions without inline keyword in header files
netdev/build_32bit fail Errors and warnings before: 0 this patch: 1
netdev/cc_maintainers warning 13 maintainers not CCed: songliubraving@fb.com davem@davemloft.net andrii@kernel.org kuba@kernel.org pabeni@redhat.com kafai@fb.com anthony.l.nguyen@intel.com jesse.brandeburg@intel.com intel-wired-lan@lists.osuosl.org john.fastabend@gmail.com yhs@fb.com hawk@kernel.org kpsingh@kernel.org
netdev/build_clang fail Errors and warnings before: 0 this patch: 9
netdev/module_param success Was 0 now: 0
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/verify_fixes success No Fixes tag
netdev/build_allmodconfig_warn fail Errors and warnings before: 0 this patch: 1
netdev/checkpatch success total: 0 errors, 0 warnings, 0 checks, 58 lines checked
netdev/kdoc success Errors and warnings before: 0 this patch: 0
netdev/source_inline success Was 0 now: 0

Commit Message

Maciej Fijalkowski April 5, 2022, 11:06 a.m. UTC
Correlate -ENOBUFS that was returned from xdp_do_redirect() with a XSK
Rx queue being full. In such case, terminate the softirq processing and
let the user space to consume descriptors from XSK Rx queue so that
there is room that driver can use later on.

Signed-off-by: Maciej Fijalkowski <maciej.fijalkowski@intel.com>
---
 .../ethernet/intel/i40e/i40e_txrx_common.h    |  1 +
 drivers/net/ethernet/intel/i40e/i40e_xsk.c    | 21 ++++++++++++-------
 2 files changed, 15 insertions(+), 7 deletions(-)

Comments

Jesper Dangaard Brouer April 5, 2022, 1:04 p.m. UTC | #1
On 05/04/2022 13.06, Maciej Fijalkowski wrote:
> Correlate -ENOBUFS that was returned from xdp_do_redirect() with a XSK
> Rx queue being full. In such case, terminate the softirq processing and
> let the user space to consume descriptors from XSK Rx queue so that
> there is room that driver can use later on.
> 
> Signed-off-by: Maciej Fijalkowski <maciej.fijalkowski@intel.com>
> ---
>   .../ethernet/intel/i40e/i40e_txrx_common.h    |  1 +
>   drivers/net/ethernet/intel/i40e/i40e_xsk.c    | 21 ++++++++++++-------
>   2 files changed, 15 insertions(+), 7 deletions(-)
> 
[...]

I noticed you are only doing this for the Zero-Copy variants.
Wouldn't this also be a benefit for normal AF_XDP ?


> diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
> index c1d25b0b0ca2..9f9e4ce9a24d 100644
> --- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c
> +++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
> @@ -161,9 +161,10 @@ static int i40e_run_xdp_zc(struct i40e_ring *rx_ring, struct xdp_buff *xdp)
>   
>   	if (likely(act == XDP_REDIRECT)) {
>   		err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
> -		if (err)
> -			goto out_failure;
> -		return I40E_XDP_REDIR;
> +		if (!err)
> +			return I40E_XDP_REDIR;
> +		result = (err == -ENOBUFS) ? I40E_XDP_EXIT : I40E_XDP_CONSUMED;
> +		goto out_failure;
>   	}
>   
>   	switch (act) {
> @@ -175,6 +176,9 @@ static int i40e_run_xdp_zc(struct i40e_ring *rx_ring, struct xdp_buff *xdp)
>   		if (result == I40E_XDP_CONSUMED)
>   			goto out_failure;
>   		break;
> +	case XDP_DROP:
> +		result = I40E_XDP_CONSUMED;
> +		break;
>   	default:
>   		bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, act);
>   		fallthrough;
> @@ -182,9 +186,6 @@ static int i40e_run_xdp_zc(struct i40e_ring *rx_ring, struct xdp_buff *xdp)
>   out_failure:
>   		trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
>   		fallthrough; /* handle aborts by dropping packet */
> -	case XDP_DROP:
> -		result = I40E_XDP_CONSUMED;
> -		break;
>   	}
>   	return result;
>   }
> @@ -370,6 +371,12 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
>   		xsk_buff_dma_sync_for_cpu(bi, rx_ring->xsk_pool);
>   
>   		xdp_res = i40e_run_xdp_zc(rx_ring, bi);
> +		if (xdp_res == I40E_XDP_EXIT) {
> +			failure = true;
> +			xsk_buff_free(bi);
> +			next_to_clean = (next_to_clean + 1) & count_mask;
> +			break;
> +		}
>   		i40e_handle_xdp_result_zc(rx_ring, bi, rx_desc, &rx_packets,
>   					  &rx_bytes, size, xdp_res);
>   		total_rx_packets += rx_packets;
> @@ -382,7 +389,7 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
>   	cleaned_count = (next_to_clean - rx_ring->next_to_use - 1) & count_mask;
>   
>   	if (cleaned_count >= I40E_RX_BUFFER_WRITE)
> -		failure = !i40e_alloc_rx_buffers_zc(rx_ring, cleaned_count);
> +		failure |= !i40e_alloc_rx_buffers_zc(rx_ring, cleaned_count);
>   
>   	i40e_finalize_xdp_rx(rx_ring, xdp_xmit);
>   	i40e_update_rx_stats(rx_ring, total_rx_bytes, total_rx_packets);
>
Maciej Fijalkowski April 6, 2022, 4:04 p.m. UTC | #2
On Tue, Apr 05, 2022 at 03:04:17PM +0200, Jesper Dangaard Brouer wrote:
> 
> 
> On 05/04/2022 13.06, Maciej Fijalkowski wrote:
> > Correlate -ENOBUFS that was returned from xdp_do_redirect() with a XSK
> > Rx queue being full. In such case, terminate the softirq processing and
> > let the user space to consume descriptors from XSK Rx queue so that
> > there is room that driver can use later on.
> > 
> > Signed-off-by: Maciej Fijalkowski <maciej.fijalkowski@intel.com>
> > ---
> >   .../ethernet/intel/i40e/i40e_txrx_common.h    |  1 +
> >   drivers/net/ethernet/intel/i40e/i40e_xsk.c    | 21 ++++++++++++-------
> >   2 files changed, 15 insertions(+), 7 deletions(-)
> > 
> [...]
> 
> I noticed you are only doing this for the Zero-Copy variants.
> Wouldn't this also be a benefit for normal AF_XDP ?

Sorry for the delay, indeed this would improve AF_XDP in copy mode as
well, but only after a fix I have sent (not on lore yet :<).

I'll adjust patches to check for -ENOBUFS in $DRIVER_txrx.c and send a v2.

> 
> 
> > diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
> > index c1d25b0b0ca2..9f9e4ce9a24d 100644
> > --- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c
> > +++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
> > @@ -161,9 +161,10 @@ static int i40e_run_xdp_zc(struct i40e_ring *rx_ring, struct xdp_buff *xdp)
> >   	if (likely(act == XDP_REDIRECT)) {
> >   		err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
> > -		if (err)
> > -			goto out_failure;
> > -		return I40E_XDP_REDIR;
> > +		if (!err)
> > +			return I40E_XDP_REDIR;
> > +		result = (err == -ENOBUFS) ? I40E_XDP_EXIT : I40E_XDP_CONSUMED;
> > +		goto out_failure;
> >   	}
> >   	switch (act) {
> > @@ -175,6 +176,9 @@ static int i40e_run_xdp_zc(struct i40e_ring *rx_ring, struct xdp_buff *xdp)
> >   		if (result == I40E_XDP_CONSUMED)
> >   			goto out_failure;
> >   		break;
> > +	case XDP_DROP:
> > +		result = I40E_XDP_CONSUMED;
> > +		break;
> >   	default:
> >   		bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, act);
> >   		fallthrough;
> > @@ -182,9 +186,6 @@ static int i40e_run_xdp_zc(struct i40e_ring *rx_ring, struct xdp_buff *xdp)
> >   out_failure:
> >   		trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
> >   		fallthrough; /* handle aborts by dropping packet */
> > -	case XDP_DROP:
> > -		result = I40E_XDP_CONSUMED;
> > -		break;
> >   	}
> >   	return result;
> >   }
> > @@ -370,6 +371,12 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
> >   		xsk_buff_dma_sync_for_cpu(bi, rx_ring->xsk_pool);
> >   		xdp_res = i40e_run_xdp_zc(rx_ring, bi);
> > +		if (xdp_res == I40E_XDP_EXIT) {
> > +			failure = true;
> > +			xsk_buff_free(bi);
> > +			next_to_clean = (next_to_clean + 1) & count_mask;
> > +			break;
> > +		}
> >   		i40e_handle_xdp_result_zc(rx_ring, bi, rx_desc, &rx_packets,
> >   					  &rx_bytes, size, xdp_res);
> >   		total_rx_packets += rx_packets;
> > @@ -382,7 +389,7 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
> >   	cleaned_count = (next_to_clean - rx_ring->next_to_use - 1) & count_mask;
> >   	if (cleaned_count >= I40E_RX_BUFFER_WRITE)
> > -		failure = !i40e_alloc_rx_buffers_zc(rx_ring, cleaned_count);
> > +		failure |= !i40e_alloc_rx_buffers_zc(rx_ring, cleaned_count);
> >   	i40e_finalize_xdp_rx(rx_ring, xdp_xmit);
> >   	i40e_update_rx_stats(rx_ring, total_rx_bytes, total_rx_packets);
> > 
>
diff mbox series

Patch

diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx_common.h b/drivers/net/ethernet/intel/i40e/i40e_txrx_common.h
index 19da3b22160f..8c5118c8baaf 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx_common.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx_common.h
@@ -20,6 +20,7 @@  void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val);
 #define I40E_XDP_CONSUMED	BIT(0)
 #define I40E_XDP_TX		BIT(1)
 #define I40E_XDP_REDIR		BIT(2)
+#define I40E_XDP_EXIT		BIT(3)
 
 /*
  * build_ctob - Builds the Tx descriptor (cmd, offset and type) qword
diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
index c1d25b0b0ca2..9f9e4ce9a24d 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
@@ -161,9 +161,10 @@  static int i40e_run_xdp_zc(struct i40e_ring *rx_ring, struct xdp_buff *xdp)
 
 	if (likely(act == XDP_REDIRECT)) {
 		err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
-		if (err)
-			goto out_failure;
-		return I40E_XDP_REDIR;
+		if (!err)
+			return I40E_XDP_REDIR;
+		result = (err == -ENOBUFS) ? I40E_XDP_EXIT : I40E_XDP_CONSUMED;
+		goto out_failure;
 	}
 
 	switch (act) {
@@ -175,6 +176,9 @@  static int i40e_run_xdp_zc(struct i40e_ring *rx_ring, struct xdp_buff *xdp)
 		if (result == I40E_XDP_CONSUMED)
 			goto out_failure;
 		break;
+	case XDP_DROP:
+		result = I40E_XDP_CONSUMED;
+		break;
 	default:
 		bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, act);
 		fallthrough;
@@ -182,9 +186,6 @@  static int i40e_run_xdp_zc(struct i40e_ring *rx_ring, struct xdp_buff *xdp)
 out_failure:
 		trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
 		fallthrough; /* handle aborts by dropping packet */
-	case XDP_DROP:
-		result = I40E_XDP_CONSUMED;
-		break;
 	}
 	return result;
 }
@@ -370,6 +371,12 @@  int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
 		xsk_buff_dma_sync_for_cpu(bi, rx_ring->xsk_pool);
 
 		xdp_res = i40e_run_xdp_zc(rx_ring, bi);
+		if (xdp_res == I40E_XDP_EXIT) {
+			failure = true;
+			xsk_buff_free(bi);
+			next_to_clean = (next_to_clean + 1) & count_mask;
+			break;
+		}
 		i40e_handle_xdp_result_zc(rx_ring, bi, rx_desc, &rx_packets,
 					  &rx_bytes, size, xdp_res);
 		total_rx_packets += rx_packets;
@@ -382,7 +389,7 @@  int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
 	cleaned_count = (next_to_clean - rx_ring->next_to_use - 1) & count_mask;
 
 	if (cleaned_count >= I40E_RX_BUFFER_WRITE)
-		failure = !i40e_alloc_rx_buffers_zc(rx_ring, cleaned_count);
+		failure |= !i40e_alloc_rx_buffers_zc(rx_ring, cleaned_count);
 
 	i40e_finalize_xdp_rx(rx_ring, xdp_xmit);
 	i40e_update_rx_stats(rx_ring, total_rx_bytes, total_rx_packets);