@@ -690,6 +690,9 @@ static void efx_get_base_stats(struct net_device *net_dev,
tx->packets += tx_queue->old_complete_packets;
tx->bytes += tx_queue->old_complete_bytes;
}
+ /* Include XDP TX in device-wide stats */
+ tx->packets += tx_queue->complete_xdp_packets;
+ tx->bytes += tx_queue->complete_xdp_bytes;
}
}
}
@@ -216,6 +216,10 @@ struct efx_tx_buffer {
* created. For TSO, counts the superframe size, not the sizes of
* generated frames on the wire (i.e. the headers are only counted
* once)
+ * @complete_xdp_packets: Number of XDP TX packets completed since this
+ * struct was created.
+ * @complete_xdp_bytes: Number of XDP TX bytes completed since this
+ * struct was created.
* @completed_timestamp_major: Top part of the most recent tx timestamp.
* @completed_timestamp_minor: Low part of the most recent tx timestamp.
* @insert_count: Current insert pointer
@@ -281,6 +285,8 @@ struct efx_tx_queue {
unsigned int pkts_compl;
unsigned long complete_packets;
unsigned long complete_bytes;
+ unsigned long complete_xdp_packets;
+ unsigned long complete_xdp_bytes;
u32 completed_timestamp_major;
u32 completed_timestamp_minor;
@@ -553,6 +553,7 @@ netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
void efx_xmit_done_single(struct efx_tx_queue *tx_queue)
{
+ unsigned int xdp_pkts_compl = 0, xdp_bytes_compl = 0;
unsigned int pkts_compl = 0, bytes_compl = 0;
unsigned int efv_pkts_compl = 0;
unsigned int read_ptr;
@@ -577,7 +578,8 @@ void efx_xmit_done_single(struct efx_tx_queue *tx_queue)
if (buffer->flags & EFX_TX_BUF_SKB)
finished = true;
efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl,
- &efv_pkts_compl);
+ &efv_pkts_compl, &xdp_pkts_compl,
+ &xdp_bytes_compl);
++tx_queue->read_count;
read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
@@ -585,6 +587,8 @@ void efx_xmit_done_single(struct efx_tx_queue *tx_queue)
tx_queue->pkts_compl += pkts_compl;
tx_queue->bytes_compl += bytes_compl;
+ tx_queue->complete_xdp_packets += xdp_pkts_compl;
+ tx_queue->complete_xdp_bytes += xdp_bytes_compl;
EFX_WARN_ON_PARANOID(pkts_compl + efv_pkts_compl != 1);
@@ -112,12 +112,14 @@ void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
/* Free any buffers left in the ring */
while (tx_queue->read_count != tx_queue->write_count) {
+ unsigned int xdp_pkts_compl = 0, xdp_bytes_compl = 0;
unsigned int pkts_compl = 0, bytes_compl = 0;
unsigned int efv_pkts_compl = 0;
buffer = &tx_queue->buffer[tx_queue->read_count & tx_queue->ptr_mask];
efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl,
- &efv_pkts_compl);
+ &efv_pkts_compl, &xdp_pkts_compl,
+ &xdp_bytes_compl);
++tx_queue->read_count;
}
@@ -153,7 +155,9 @@ void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
struct efx_tx_buffer *buffer,
unsigned int *pkts_compl,
unsigned int *bytes_compl,
- unsigned int *efv_pkts_compl)
+ unsigned int *efv_pkts_compl,
+ unsigned int *xdp_pkts,
+ unsigned int *xdp_bytes)
{
if (buffer->unmap_len) {
struct device *dma_dev = &tx_queue->efx->pci_dev->dev;
@@ -198,6 +202,10 @@ void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
tx_queue->queue, tx_queue->read_count);
} else if (buffer->flags & EFX_TX_BUF_XDP) {
xdp_return_frame_rx_napi(buffer->xdpf);
+ if (xdp_pkts)
+ (*xdp_pkts)++;
+ if (xdp_bytes)
+ (*xdp_bytes) += buffer->xdpf->len;
}
buffer->len = 0;
@@ -213,7 +221,9 @@ static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
unsigned int index,
unsigned int *pkts_compl,
unsigned int *bytes_compl,
- unsigned int *efv_pkts_compl)
+ unsigned int *efv_pkts_compl,
+ unsigned int *xdp_pkts,
+ unsigned int *xdp_bytes)
{
struct efx_nic *efx = tx_queue->efx;
unsigned int stop_index, read_ptr;
@@ -233,7 +243,7 @@ static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
}
efx_dequeue_buffer(tx_queue, buffer, pkts_compl, bytes_compl,
- efv_pkts_compl);
+ efv_pkts_compl, xdp_pkts, xdp_bytes);
++tx_queue->read_count;
read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
@@ -256,15 +266,18 @@ void efx_xmit_done_check_empty(struct efx_tx_queue *tx_queue)
int efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
{
unsigned int fill_level, pkts_compl = 0, bytes_compl = 0;
+ unsigned int xdp_pkts_compl = 0, xdp_bytes_compl = 0;
unsigned int efv_pkts_compl = 0;
struct efx_nic *efx = tx_queue->efx;
EFX_WARN_ON_ONCE_PARANOID(index > tx_queue->ptr_mask);
efx_dequeue_buffers(tx_queue, index, &pkts_compl, &bytes_compl,
- &efv_pkts_compl);
+ &efv_pkts_compl, &xdp_pkts_compl, &xdp_bytes_compl);
tx_queue->pkts_compl += pkts_compl;
tx_queue->bytes_compl += bytes_compl;
+ tx_queue->complete_xdp_packets += xdp_pkts_compl;
+ tx_queue->complete_xdp_bytes += xdp_bytes_compl;
if (pkts_compl + efv_pkts_compl > 1)
++tx_queue->merge_events;
@@ -293,6 +306,8 @@ int efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
void efx_enqueue_unwind(struct efx_tx_queue *tx_queue,
unsigned int insert_count)
{
+ unsigned int xdp_bytes_compl = 0;
+ unsigned int xdp_pkts_compl = 0;
unsigned int efv_pkts_compl = 0;
struct efx_tx_buffer *buffer;
unsigned int bytes_compl = 0;
@@ -303,7 +318,8 @@ void efx_enqueue_unwind(struct efx_tx_queue *tx_queue,
--tx_queue->insert_count;
buffer = __efx_tx_queue_get_insert_buffer(tx_queue);
efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl,
- &efv_pkts_compl);
+ &efv_pkts_compl, &xdp_pkts_compl,
+ &xdp_bytes_compl);
}
}
@@ -20,7 +20,9 @@ void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
struct efx_tx_buffer *buffer,
unsigned int *pkts_compl,
unsigned int *bytes_compl,
- unsigned int *efv_pkts_compl);
+ unsigned int *efv_pkts_compl,
+ unsigned int *xdp_pkts,
+ unsigned int *xdp_bytes);
static inline bool efx_tx_buffer_in_use(struct efx_tx_buffer *buffer)
{