Message ID | 20230216140043.109345-7-tirthendu.sarkar@intel.com (mailing list archive) |
---|---|
State | Superseded |
Delegated to: | Netdev Maintainers |
Headers | show |
Series | i40e: support XDP multi-buffer | expand |
On Thu, Feb 16, 2023 at 07:30:41PM +0530, Tirthendu Sarkar wrote: > Add a new field called next_to_process in the i40e_ring that is > advanced for every buffer and change the semantics of next_to_clean to > point to the first buffer of a packet. Driver will use next_to_process > in the same way next_to_clean was used previously. > > For the non multi-buffer case, next_to_process and next_to_clean will > always be the same since each packet consists of a single buffer. > > Signed-off-by: Tirthendu Sarkar <tirthendu.sarkar@intel.com> > --- > drivers/net/ethernet/intel/i40e/i40e_txrx.c | 26 ++++++++++++--------- > drivers/net/ethernet/intel/i40e/i40e_txrx.h | 4 ++++ > 2 files changed, 19 insertions(+), 11 deletions(-) > > diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c > index 01340f620d96..94c50fa223bd 100644 > --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c > +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c > @@ -1524,6 +1524,7 @@ void i40e_clean_rx_ring(struct i40e_ring *rx_ring) > > rx_ring->next_to_alloc = 0; > rx_ring->next_to_clean = 0; > + rx_ring->next_to_process = 0; > rx_ring->next_to_use = 0; > } > > @@ -1576,6 +1577,7 @@ int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring) > > rx_ring->next_to_alloc = 0; > rx_ring->next_to_clean = 0; > + rx_ring->next_to_process = 0; > rx_ring->next_to_use = 0; > > /* XDP RX-queue info only needed for RX rings exposed to XDP */ > @@ -2076,7 +2078,7 @@ static struct i40e_rx_buffer *i40e_get_rx_buffer(struct i40e_ring *rx_ring, > { > struct i40e_rx_buffer *rx_buffer; > > - rx_buffer = i40e_rx_bi(rx_ring, rx_ring->next_to_clean); > + rx_buffer = i40e_rx_bi(rx_ring, rx_ring->next_to_process); > rx_buffer->page_count = > #if (PAGE_SIZE < 8192) > page_count(rx_buffer->page); > @@ -2375,16 +2377,16 @@ void i40e_finalize_xdp_rx(struct i40e_ring *rx_ring, unsigned int xdp_res) > } > > /** > - * i40e_inc_ntc: Advance the next_to_clean index > + * i40e_inc_ntp: Advance the next_to_process index > * @rx_ring: Rx ring > **/ > -static void i40e_inc_ntc(struct i40e_ring *rx_ring) > +static void i40e_inc_ntp(struct i40e_ring *rx_ring) > { > - u32 ntc = rx_ring->next_to_clean + 1; > + u32 ntp = rx_ring->next_to_process + 1; > > - ntc = (ntc < rx_ring->count) ? ntc : 0; > - rx_ring->next_to_clean = ntc; > - prefetch(I40E_RX_DESC(rx_ring, ntc)); > + ntp = (ntp < rx_ring->count) ? ntp : 0; > + rx_ring->next_to_process = ntp; > + prefetch(I40E_RX_DESC(rx_ring, ntp)); > } > > /** > @@ -2421,6 +2423,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget, > xdp_prog = READ_ONCE(rx_ring->xdp_prog); > > while (likely(total_rx_packets < (unsigned int)budget)) { > + u16 ntp = rx_ring->next_to_process; u32 > struct i40e_rx_buffer *rx_buffer; > union i40e_rx_desc *rx_desc; > unsigned int size; > @@ -2433,7 +2436,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget, > cleaned_count = 0; > } > > - rx_desc = I40E_RX_DESC(rx_ring, rx_ring->next_to_clean); > + rx_desc = I40E_RX_DESC(rx_ring, ntp); > > /* status_error_len will always be zero for unused descriptors > * because it's cleared in cleanup, and overlaps with hdr_addr > @@ -2452,8 +2455,8 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget, > i40e_clean_programming_status(rx_ring, > rx_desc->raw.qword[0], > qword); > - rx_buffer = i40e_rx_bi(rx_ring, rx_ring->next_to_clean); > - i40e_inc_ntc(rx_ring); > + rx_buffer = i40e_rx_bi(rx_ring, ntp); > + i40e_inc_ntp(rx_ring); > i40e_reuse_rx_page(rx_ring, rx_buffer); > cleaned_count++; > continue; > @@ -2509,7 +2512,8 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget, > i40e_put_rx_buffer(rx_ring, rx_buffer); > cleaned_count++; > > - i40e_inc_ntc(rx_ring); > + i40e_inc_ntp(rx_ring); > + rx_ring->next_to_clean = rx_ring->next_to_process; > if (i40e_is_non_eop(rx_ring, rx_desc)) > continue; > > diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h > index 3e2935365104..6e0fd73367df 100644 > --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h > +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h > @@ -338,6 +338,10 @@ struct i40e_ring { > u8 dcb_tc; /* Traffic class of ring */ > u8 __iomem *tail; > > + /* Next descriptor to be processed; next_to_clean is updated only on > + * processing EOP descriptor > + */ > + u16 next_to_process; > /* high bit set means dynamic, use accessor routines to read/write. > * hardware only supports 2us resolution for the ITR registers. > * these values always store the USER setting, and must be converted > -- > 2.34.1 >
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 01340f620d96..94c50fa223bd 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -1524,6 +1524,7 @@ void i40e_clean_rx_ring(struct i40e_ring *rx_ring) rx_ring->next_to_alloc = 0; rx_ring->next_to_clean = 0; + rx_ring->next_to_process = 0; rx_ring->next_to_use = 0; } @@ -1576,6 +1577,7 @@ int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring) rx_ring->next_to_alloc = 0; rx_ring->next_to_clean = 0; + rx_ring->next_to_process = 0; rx_ring->next_to_use = 0; /* XDP RX-queue info only needed for RX rings exposed to XDP */ @@ -2076,7 +2078,7 @@ static struct i40e_rx_buffer *i40e_get_rx_buffer(struct i40e_ring *rx_ring, { struct i40e_rx_buffer *rx_buffer; - rx_buffer = i40e_rx_bi(rx_ring, rx_ring->next_to_clean); + rx_buffer = i40e_rx_bi(rx_ring, rx_ring->next_to_process); rx_buffer->page_count = #if (PAGE_SIZE < 8192) page_count(rx_buffer->page); @@ -2375,16 +2377,16 @@ void i40e_finalize_xdp_rx(struct i40e_ring *rx_ring, unsigned int xdp_res) } /** - * i40e_inc_ntc: Advance the next_to_clean index + * i40e_inc_ntp: Advance the next_to_process index * @rx_ring: Rx ring **/ -static void i40e_inc_ntc(struct i40e_ring *rx_ring) +static void i40e_inc_ntp(struct i40e_ring *rx_ring) { - u32 ntc = rx_ring->next_to_clean + 1; + u32 ntp = rx_ring->next_to_process + 1; - ntc = (ntc < rx_ring->count) ? ntc : 0; - rx_ring->next_to_clean = ntc; - prefetch(I40E_RX_DESC(rx_ring, ntc)); + ntp = (ntp < rx_ring->count) ? ntp : 0; + rx_ring->next_to_process = ntp; + prefetch(I40E_RX_DESC(rx_ring, ntp)); } /** @@ -2421,6 +2423,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget, xdp_prog = READ_ONCE(rx_ring->xdp_prog); while (likely(total_rx_packets < (unsigned int)budget)) { + u16 ntp = rx_ring->next_to_process; struct i40e_rx_buffer *rx_buffer; union i40e_rx_desc *rx_desc; unsigned int size; @@ -2433,7 +2436,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget, cleaned_count = 0; } - rx_desc = I40E_RX_DESC(rx_ring, rx_ring->next_to_clean); + rx_desc = I40E_RX_DESC(rx_ring, ntp); /* status_error_len will always be zero for unused descriptors * because it's cleared in cleanup, and overlaps with hdr_addr @@ -2452,8 +2455,8 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget, i40e_clean_programming_status(rx_ring, rx_desc->raw.qword[0], qword); - rx_buffer = i40e_rx_bi(rx_ring, rx_ring->next_to_clean); - i40e_inc_ntc(rx_ring); + rx_buffer = i40e_rx_bi(rx_ring, ntp); + i40e_inc_ntp(rx_ring); i40e_reuse_rx_page(rx_ring, rx_buffer); cleaned_count++; continue; @@ -2509,7 +2512,8 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget, i40e_put_rx_buffer(rx_ring, rx_buffer); cleaned_count++; - i40e_inc_ntc(rx_ring); + i40e_inc_ntp(rx_ring); + rx_ring->next_to_clean = rx_ring->next_to_process; if (i40e_is_non_eop(rx_ring, rx_desc)) continue; diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h index 3e2935365104..6e0fd73367df 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h @@ -338,6 +338,10 @@ struct i40e_ring { u8 dcb_tc; /* Traffic class of ring */ u8 __iomem *tail; + /* Next descriptor to be processed; next_to_clean is updated only on + * processing EOP descriptor + */ + u16 next_to_process; /* high bit set means dynamic, use accessor routines to read/write. * hardware only supports 2us resolution for the ITR registers. * these values always store the USER setting, and must be converted
Add a new field called next_to_process in the i40e_ring that is advanced for every buffer and change the semantics of next_to_clean to point to the first buffer of a packet. Driver will use next_to_process in the same way next_to_clean was used previously. For the non multi-buffer case, next_to_process and next_to_clean will always be the same since each packet consists of a single buffer. Signed-off-by: Tirthendu Sarkar <tirthendu.sarkar@intel.com> --- drivers/net/ethernet/intel/i40e/i40e_txrx.c | 26 ++++++++++++--------- drivers/net/ethernet/intel/i40e/i40e_txrx.h | 4 ++++ 2 files changed, 19 insertions(+), 11 deletions(-)