diff mbox series

[net,v2,3/3] octeontx2-pf: Do xdp_do_flush() after redirects.

Message ID 20230918153611.165722-4-bigeasy@linutronix.de (mailing list archive)
State Accepted
Commit 70b2b6892645e58ed6f051dad7f8d1083f0ad553
Delegated to: Netdev Maintainers
Headers show
Series Add missing xdp_do_flush() invocations. | expand

Checks

Context Check Description
netdev/series_format success Posting correctly formatted
netdev/tree_selection success Clearly marked for net
netdev/fixes_present success Fixes tag present in non-next series
netdev/header_inline success No static functions without inline keyword in header files
netdev/build_32bit success Errors and warnings before: 9 this patch: 9
netdev/cc_maintainers success CCed 14 of 14 maintainers
netdev/build_clang success Errors and warnings before: 1363 this patch: 1363
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/deprecated_api success None detected
netdev/check_selftest success No net selftest shell script
netdev/verify_fixes success Fixes tag looks correct
netdev/build_allmodconfig_warn success Errors and warnings before: 1366 this patch: 1366
netdev/checkpatch warning WARNING: Please use correct Fixes: style 'Fixes: <12 chars of sha1> ("<title line>")' - ie: 'Fixes: 06059a1a9a4a ("octeontx2-pf: Add XDP support to netdev PF")' WARNING: line length of 92 exceeds 80 columns
netdev/kdoc success Errors and warnings before: 0 this patch: 0
netdev/source_inline success Was 0 now: 0

Commit Message

Sebastian Andrzej Siewior Sept. 18, 2023, 3:36 p.m. UTC
xdp_do_flush() should be invoked before leaving the NAPI poll function
if XDP-redirect has been performed.

Invoke xdp_do_flush() before leaving NAPI.

Cc: Geetha sowjanya <gakula@marvell.com>
Cc: Subbaraya Sundeep <sbhatta@marvell.com>
Cc: Sunil Goutham <sgoutham@marvell.com>
Cc: hariprasad <hkelam@marvell.com>
Fixes: 06059a1a9a4a5 ("octeontx2-pf: Add XDP support to netdev PF")
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Acked-by: Geethasowjanya Akula <gakula@marvell.com>
---
 .../marvell/octeontx2/nic/otx2_txrx.c         | 19 +++++++++++++------
 1 file changed, 13 insertions(+), 6 deletions(-)

Comments

Kui-Feng Lee Sept. 18, 2023, 5:58 p.m. UTC | #1
On 9/18/23 08:36, Sebastian Andrzej Siewior wrote:
> xdp_do_flush() should be invoked before leaving the NAPI poll function
> if XDP-redirect has been performed.
> 
> Invoke xdp_do_flush() before leaving NAPI.
> 
> Cc: Geetha sowjanya <gakula@marvell.com>
> Cc: Subbaraya Sundeep <sbhatta@marvell.com>
> Cc: Sunil Goutham <sgoutham@marvell.com>
> Cc: hariprasad <hkelam@marvell.com>
> Fixes: 06059a1a9a4a5 ("octeontx2-pf: Add XDP support to netdev PF")
> Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
> Acked-by: Geethasowjanya Akula <gakula@marvell.com>
> ---
>   .../marvell/octeontx2/nic/otx2_txrx.c         | 19 +++++++++++++------
>   1 file changed, 13 insertions(+), 6 deletions(-)
> 
> diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
> index e77d438489557..53b2a4ef52985 100644
> --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
> +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
> @@ -29,7 +29,8 @@
>   static bool otx2_xdp_rcv_pkt_handler(struct otx2_nic *pfvf,
>   				     struct bpf_prog *prog,
>   				     struct nix_cqe_rx_s *cqe,
> -				     struct otx2_cq_queue *cq);
> +				     struct otx2_cq_queue *cq,
> +				     bool *need_xdp_flush);
>   
>   static int otx2_nix_cq_op_status(struct otx2_nic *pfvf,
>   				 struct otx2_cq_queue *cq)
> @@ -337,7 +338,7 @@ static bool otx2_check_rcv_errors(struct otx2_nic *pfvf,
>   static void otx2_rcv_pkt_handler(struct otx2_nic *pfvf,
>   				 struct napi_struct *napi,
>   				 struct otx2_cq_queue *cq,
> -				 struct nix_cqe_rx_s *cqe)
> +				 struct nix_cqe_rx_s *cqe, bool *need_xdp_flush)
>   {
>   	struct nix_rx_parse_s *parse = &cqe->parse;
>   	struct nix_rx_sg_s *sg = &cqe->sg;
> @@ -353,7 +354,7 @@ static void otx2_rcv_pkt_handler(struct otx2_nic *pfvf,
>   	}
>   
>   	if (pfvf->xdp_prog)
> -		if (otx2_xdp_rcv_pkt_handler(pfvf, pfvf->xdp_prog, cqe, cq))
> +		if (otx2_xdp_rcv_pkt_handler(pfvf, pfvf->xdp_prog, cqe, cq, need_xdp_flush))
>   			return;
>   
>   	skb = napi_get_frags(napi);
> @@ -388,6 +389,7 @@ static int otx2_rx_napi_handler(struct otx2_nic *pfvf,
>   				struct napi_struct *napi,
>   				struct otx2_cq_queue *cq, int budget)
>   {
> +	bool need_xdp_flush = false;
>   	struct nix_cqe_rx_s *cqe;
>   	int processed_cqe = 0;
>   
> @@ -409,13 +411,15 @@ static int otx2_rx_napi_handler(struct otx2_nic *pfvf,
>   		cq->cq_head++;
>   		cq->cq_head &= (cq->cqe_cnt - 1);
>   
> -		otx2_rcv_pkt_handler(pfvf, napi, cq, cqe);
> +		otx2_rcv_pkt_handler(pfvf, napi, cq, cqe, &need_xdp_flush);
>   
>   		cqe->hdr.cqe_type = NIX_XQE_TYPE_INVALID;
>   		cqe->sg.seg_addr = 0x00;
>   		processed_cqe++;
>   		cq->pend_cqe--;
>   	}
> +	if (need_xdp_flush)
> +		xdp_do_flush();
>   
>   	/* Free CQEs to HW */
>   	otx2_write64(pfvf, NIX_LF_CQ_OP_DOOR,
> @@ -1354,7 +1358,8 @@ bool otx2_xdp_sq_append_pkt(struct otx2_nic *pfvf, u64 iova, int len, u16 qidx)
>   static bool otx2_xdp_rcv_pkt_handler(struct otx2_nic *pfvf,
>   				     struct bpf_prog *prog,
>   				     struct nix_cqe_rx_s *cqe,
> -				     struct otx2_cq_queue *cq)
> +				     struct otx2_cq_queue *cq,
> +				     bool *need_xdp_flush)
>   {
>   	unsigned char *hard_start, *data;
>   	int qidx = cq->cq_idx;
> @@ -1391,8 +1396,10 @@ static bool otx2_xdp_rcv_pkt_handler(struct otx2_nic *pfvf,
>   
>   		otx2_dma_unmap_page(pfvf, iova, pfvf->rbsize,
>   				    DMA_FROM_DEVICE);
> -		if (!err)
> +		if (!err) {
> +			*need_xdp_flush = true;

Is it possible to call xdp_do_flush() at the first place (here)?

>   			return true;
> +		}
>   		put_page(page);
>   		break;
>   	default:
Sebastian Andrzej Siewior Sept. 19, 2023, 6:36 a.m. UTC | #2
On 2023-09-18 10:58:39 [-0700], Kui-Feng Lee wrote:
> > diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
> > index e77d438489557..53b2a4ef52985 100644
> > --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
> > +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
> > @@ -1391,8 +1396,10 @@ static bool otx2_xdp_rcv_pkt_handler(struct otx2_nic *pfvf,
> >   		otx2_dma_unmap_page(pfvf, iova, pfvf->rbsize,
> >   				    DMA_FROM_DEVICE);
> > -		if (!err)
> > +		if (!err) {
> > +			*need_xdp_flush = true;
> 
> Is it possible to call xdp_do_flush() at the first place (here)?

It shouldn't be wrong. All drivers, except for cpsw, invoke
xdp_do_flush() after completing their NAPI loop and then flushing all
possible packets at once.

> >   			return true;
> > +		}
> >   		put_page(page);
> >   		break;
> >   	default:

Sebastian
Paolo Abeni Sept. 21, 2023, 7:01 a.m. UTC | #3
On Mon, 2023-09-18 at 10:58 -0700, Kui-Feng Lee wrote:
> 
> On 9/18/23 08:36, Sebastian Andrzej Siewior wrote:
> > xdp_do_flush() should be invoked before leaving the NAPI poll function
> > if XDP-redirect has been performed.
> > 
> > Invoke xdp_do_flush() before leaving NAPI.
> > 
> > Cc: Geetha sowjanya <gakula@marvell.com>
> > Cc: Subbaraya Sundeep <sbhatta@marvell.com>
> > Cc: Sunil Goutham <sgoutham@marvell.com>
> > Cc: hariprasad <hkelam@marvell.com>
> > Fixes: 06059a1a9a4a5 ("octeontx2-pf: Add XDP support to netdev PF")
> > Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
> > Acked-by: Geethasowjanya Akula <gakula@marvell.com>
> > ---
> >   .../marvell/octeontx2/nic/otx2_txrx.c         | 19 +++++++++++++------
> >   1 file changed, 13 insertions(+), 6 deletions(-)
> > 
> > diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
> > index e77d438489557..53b2a4ef52985 100644
> > --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
> > +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
> > @@ -29,7 +29,8 @@
> >   static bool otx2_xdp_rcv_pkt_handler(struct otx2_nic *pfvf,
> >   				     struct bpf_prog *prog,
> >   				     struct nix_cqe_rx_s *cqe,
> > -				     struct otx2_cq_queue *cq);
> > +				     struct otx2_cq_queue *cq,
> > +				     bool *need_xdp_flush);
> >   
> >   static int otx2_nix_cq_op_status(struct otx2_nic *pfvf,
> >   				 struct otx2_cq_queue *cq)
> > @@ -337,7 +338,7 @@ static bool otx2_check_rcv_errors(struct otx2_nic *pfvf,
> >   static void otx2_rcv_pkt_handler(struct otx2_nic *pfvf,
> >   				 struct napi_struct *napi,
> >   				 struct otx2_cq_queue *cq,
> > -				 struct nix_cqe_rx_s *cqe)
> > +				 struct nix_cqe_rx_s *cqe, bool *need_xdp_flush)
> >   {
> >   	struct nix_rx_parse_s *parse = &cqe->parse;
> >   	struct nix_rx_sg_s *sg = &cqe->sg;
> > @@ -353,7 +354,7 @@ static void otx2_rcv_pkt_handler(struct otx2_nic *pfvf,
> >   	}
> >   
> >   	if (pfvf->xdp_prog)
> > -		if (otx2_xdp_rcv_pkt_handler(pfvf, pfvf->xdp_prog, cqe, cq))
> > +		if (otx2_xdp_rcv_pkt_handler(pfvf, pfvf->xdp_prog, cqe, cq, need_xdp_flush))
> >   			return;
> >   
> >   	skb = napi_get_frags(napi);
> > @@ -388,6 +389,7 @@ static int otx2_rx_napi_handler(struct otx2_nic *pfvf,
> >   				struct napi_struct *napi,
> >   				struct otx2_cq_queue *cq, int budget)
> >   {
> > +	bool need_xdp_flush = false;
> >   	struct nix_cqe_rx_s *cqe;
> >   	int processed_cqe = 0;
> >   
> > @@ -409,13 +411,15 @@ static int otx2_rx_napi_handler(struct otx2_nic *pfvf,
> >   		cq->cq_head++;
> >   		cq->cq_head &= (cq->cqe_cnt - 1);
> >   
> > -		otx2_rcv_pkt_handler(pfvf, napi, cq, cqe);
> > +		otx2_rcv_pkt_handler(pfvf, napi, cq, cqe, &need_xdp_flush);
> >   
> >   		cqe->hdr.cqe_type = NIX_XQE_TYPE_INVALID;
> >   		cqe->sg.seg_addr = 0x00;
> >   		processed_cqe++;
> >   		cq->pend_cqe--;
> >   	}
> > +	if (need_xdp_flush)
> > +		xdp_do_flush();
> >   
> >   	/* Free CQEs to HW */
> >   	otx2_write64(pfvf, NIX_LF_CQ_OP_DOOR,
> > @@ -1354,7 +1358,8 @@ bool otx2_xdp_sq_append_pkt(struct otx2_nic *pfvf, u64 iova, int len, u16 qidx)
> >   static bool otx2_xdp_rcv_pkt_handler(struct otx2_nic *pfvf,
> >   				     struct bpf_prog *prog,
> >   				     struct nix_cqe_rx_s *cqe,
> > -				     struct otx2_cq_queue *cq)
> > +				     struct otx2_cq_queue *cq,
> > +				     bool *need_xdp_flush)
> >   {
> >   	unsigned char *hard_start, *data;
> >   	int qidx = cq->cq_idx;
> > @@ -1391,8 +1396,10 @@ static bool otx2_xdp_rcv_pkt_handler(struct otx2_nic *pfvf,
> >   
> >   		otx2_dma_unmap_page(pfvf, iova, pfvf->rbsize,
> >   				    DMA_FROM_DEVICE);
> > -		if (!err)
> > +		if (!err) {
> > +			*need_xdp_flush = true;
> 
> Is it possible to call xdp_do_flush() at the first place (here)?

AFAICT that would kill much/all of the performance benefits of bulk
redirect.

I think the proposed solution is a better one.

Cheers,

Paolo
diff mbox series

Patch

diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
index e77d438489557..53b2a4ef52985 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
@@ -29,7 +29,8 @@ 
 static bool otx2_xdp_rcv_pkt_handler(struct otx2_nic *pfvf,
 				     struct bpf_prog *prog,
 				     struct nix_cqe_rx_s *cqe,
-				     struct otx2_cq_queue *cq);
+				     struct otx2_cq_queue *cq,
+				     bool *need_xdp_flush);
 
 static int otx2_nix_cq_op_status(struct otx2_nic *pfvf,
 				 struct otx2_cq_queue *cq)
@@ -337,7 +338,7 @@  static bool otx2_check_rcv_errors(struct otx2_nic *pfvf,
 static void otx2_rcv_pkt_handler(struct otx2_nic *pfvf,
 				 struct napi_struct *napi,
 				 struct otx2_cq_queue *cq,
-				 struct nix_cqe_rx_s *cqe)
+				 struct nix_cqe_rx_s *cqe, bool *need_xdp_flush)
 {
 	struct nix_rx_parse_s *parse = &cqe->parse;
 	struct nix_rx_sg_s *sg = &cqe->sg;
@@ -353,7 +354,7 @@  static void otx2_rcv_pkt_handler(struct otx2_nic *pfvf,
 	}
 
 	if (pfvf->xdp_prog)
-		if (otx2_xdp_rcv_pkt_handler(pfvf, pfvf->xdp_prog, cqe, cq))
+		if (otx2_xdp_rcv_pkt_handler(pfvf, pfvf->xdp_prog, cqe, cq, need_xdp_flush))
 			return;
 
 	skb = napi_get_frags(napi);
@@ -388,6 +389,7 @@  static int otx2_rx_napi_handler(struct otx2_nic *pfvf,
 				struct napi_struct *napi,
 				struct otx2_cq_queue *cq, int budget)
 {
+	bool need_xdp_flush = false;
 	struct nix_cqe_rx_s *cqe;
 	int processed_cqe = 0;
 
@@ -409,13 +411,15 @@  static int otx2_rx_napi_handler(struct otx2_nic *pfvf,
 		cq->cq_head++;
 		cq->cq_head &= (cq->cqe_cnt - 1);
 
-		otx2_rcv_pkt_handler(pfvf, napi, cq, cqe);
+		otx2_rcv_pkt_handler(pfvf, napi, cq, cqe, &need_xdp_flush);
 
 		cqe->hdr.cqe_type = NIX_XQE_TYPE_INVALID;
 		cqe->sg.seg_addr = 0x00;
 		processed_cqe++;
 		cq->pend_cqe--;
 	}
+	if (need_xdp_flush)
+		xdp_do_flush();
 
 	/* Free CQEs to HW */
 	otx2_write64(pfvf, NIX_LF_CQ_OP_DOOR,
@@ -1354,7 +1358,8 @@  bool otx2_xdp_sq_append_pkt(struct otx2_nic *pfvf, u64 iova, int len, u16 qidx)
 static bool otx2_xdp_rcv_pkt_handler(struct otx2_nic *pfvf,
 				     struct bpf_prog *prog,
 				     struct nix_cqe_rx_s *cqe,
-				     struct otx2_cq_queue *cq)
+				     struct otx2_cq_queue *cq,
+				     bool *need_xdp_flush)
 {
 	unsigned char *hard_start, *data;
 	int qidx = cq->cq_idx;
@@ -1391,8 +1396,10 @@  static bool otx2_xdp_rcv_pkt_handler(struct otx2_nic *pfvf,
 
 		otx2_dma_unmap_page(pfvf, iova, pfvf->rbsize,
 				    DMA_FROM_DEVICE);
-		if (!err)
+		if (!err) {
+			*need_xdp_flush = true;
 			return true;
+		}
 		put_page(page);
 		break;
 	default: