@@ -2426,6 +2426,7 @@ static void i40e_inc_ntc(struct i40e_ring *rx_ring)
* i40e_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
* @rx_ring: rx descriptor ring to transact packets on
* @budget: Total limit on number of packets to process
+ * @rx_cleaned: Out parameter of the number of packets processed
*
* This function provides a "bounce buffer" approach to Rx interrupt
* processing. The advantage to this is that on systems that have
@@ -2434,7 +2435,8 @@ static void i40e_inc_ntc(struct i40e_ring *rx_ring)
*
* Returns amount of work completed
**/
-static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
+static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget,
+ unsigned int *rx_cleaned)
{
unsigned int total_rx_bytes = 0, total_rx_packets = 0, frame_sz = 0;
u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
@@ -2571,6 +2573,8 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
i40e_update_rx_stats(rx_ring, total_rx_bytes, total_rx_packets);
+ *rx_cleaned = total_rx_packets;
+
/* guarantee a trip back through this routine if there was a failure */
return failure ? budget : (int)total_rx_packets;
}
@@ -2694,11 +2698,13 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)
struct i40e_vsi *vsi = q_vector->vsi;
struct i40e_ring *ring;
bool tx_clean_complete = true;
+ bool rx_clean_complete = true;
bool clean_complete = true;
bool arm_wb = false;
int budget_per_ring;
int work_done = 0;
unsigned int tx_cleaned = 0;
+ unsigned int rx_cleaned = 0;
if (test_bit(__I40E_VSI_DOWN, vsi->state)) {
napi_complete(napi);
@@ -2738,13 +2744,13 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)
i40e_for_each_ring(ring, q_vector->rx) {
int cleaned = ring->xsk_pool ?
- i40e_clean_rx_irq_zc(ring, budget_per_ring) :
- i40e_clean_rx_irq(ring, budget_per_ring);
+ i40e_clean_rx_irq_zc(ring, budget_per_ring, &rx_cleaned) :
+ i40e_clean_rx_irq(ring, budget_per_ring, &rx_cleaned);
work_done += cleaned;
/* if we clean as many as budgeted, we must not be done */
if (cleaned >= budget_per_ring)
- clean_complete = false;
+ clean_complete = rx_clean_complete = false;
}
/* If work not completed, return budget and polling will return */
@@ -378,10 +378,12 @@ static void i40e_handle_xdp_result_zc(struct i40e_ring *rx_ring,
* i40e_clean_rx_irq_zc - Consumes Rx packets from the hardware ring
* @rx_ring: Rx ring
* @budget: NAPI budget
+ * @rx_cleaned: out parameter of the packets processed
*
* Returns amount of work completed
**/
-int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
+int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget,
+ unsigned int *rx_cleaned)
{
unsigned int total_rx_bytes = 0, total_rx_packets = 0;
u16 next_to_clean = rx_ring->next_to_clean;
@@ -452,6 +454,8 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
i40e_finalize_xdp_rx(rx_ring, xdp_xmit);
i40e_update_rx_stats(rx_ring, total_rx_bytes, total_rx_packets);
+ *rx_cleaned = total_rx_packets;
+
if (xsk_uses_need_wakeup(rx_ring->xsk_pool)) {
if (failure || next_to_clean == rx_ring->next_to_use)
xsk_set_rx_need_wakeup(rx_ring->xsk_pool);
@@ -28,7 +28,8 @@ int i40e_queue_pair_enable(struct i40e_vsi *vsi, int queue_pair);
int i40e_xsk_pool_setup(struct i40e_vsi *vsi, struct xsk_buff_pool *pool,
u16 qid);
bool i40e_alloc_rx_buffers_zc(struct i40e_ring *rx_ring, u16 cleaned_count);
-int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget);
+int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget,
+ unsigned int *rx_cleaned);
bool i40e_clean_xdp_tx_irq(struct i40e_vsi *vsi, struct i40e_ring *tx_ring,
unsigned int *tx_cleaned);
Adjust i40e_clean_rx_irq and i40e_clean_rx_irq_zc to accept an out parameter which records the number of RX packets cleaned. Care has been taken to avoid any changes in control flow. Signed-off-by: Joe Damato <jdamato@fastly.com> --- drivers/net/ethernet/intel/i40e/i40e_txrx.c | 14 ++++++++++---- drivers/net/ethernet/intel/i40e/i40e_xsk.c | 6 +++++- drivers/net/ethernet/intel/i40e/i40e_xsk.h | 3 ++- 3 files changed, 17 insertions(+), 6 deletions(-)