@@ -638,6 +638,10 @@ static void efx_get_queue_stats_rx(struct net_device *net_dev, int idx,
rx_queue = efx_channel_get_rx_queue(channel);
/* Count only packets since last time datapath was started */
stats->packets = rx_queue->rx_packets - rx_queue->old_rx_packets;
+ stats->hw_drops = efx_get_queue_stat_rx_hw_drops(channel) -
+ channel->old_n_rx_hw_drops;
+ stats->hw_drop_overruns = channel->n_rx_nodesc_trunc -
+ channel->old_n_rx_hw_drop_overruns;
}
static void efx_get_queue_stats_tx(struct net_device *net_dev, int idx,
@@ -668,6 +672,8 @@ static void efx_get_base_stats(struct net_device *net_dev,
struct efx_channel *channel;
rx->packets = 0;
+ rx->hw_drops = 0;
+ rx->hw_drop_overruns = 0;
tx->packets = 0;
tx->bytes = 0;
@@ -676,10 +682,15 @@ static void efx_get_base_stats(struct net_device *net_dev,
*/
efx_for_each_channel(channel, efx) {
rx_queue = efx_channel_get_rx_queue(channel);
- if (channel->channel >= net_dev->real_num_rx_queues)
+ if (channel->channel >= net_dev->real_num_rx_queues) {
rx->packets += rx_queue->rx_packets;
- else
+ rx->hw_drops += efx_get_queue_stat_rx_hw_drops(channel);
+ rx->hw_drop_overruns += channel->n_rx_nodesc_trunc;
+ } else {
rx->packets += rx_queue->old_rx_packets;
+ rx->hw_drops += channel->old_n_rx_hw_drops;
+ rx->hw_drop_overruns += channel->old_n_rx_hw_drop_overruns;
+ }
efx_for_each_channel_tx_queue(tx_queue, channel) {
if (channel->channel < efx->tx_channel_offset ||
channel->channel >= efx->tx_channel_offset +
@@ -1100,6 +1100,10 @@ void efx_start_channels(struct efx_nic *efx)
atomic_inc(&efx->active_queues);
}
+ /* reset per-queue stats */
+ channel->old_n_rx_hw_drops = efx_get_queue_stat_rx_hw_drops(channel);
+ channel->old_n_rx_hw_drop_overruns = channel->n_rx_nodesc_trunc;
+
efx_for_each_channel_rx_queue(rx_queue, channel) {
efx_init_rx_queue(rx_queue);
atomic_inc(&efx->active_queues);
@@ -43,6 +43,13 @@ struct efx_channel *efx_copy_channel(const struct efx_channel *old_channel);
void efx_start_channels(struct efx_nic *efx);
void efx_stop_channels(struct efx_nic *efx);
+static inline u64 efx_get_queue_stat_rx_hw_drops(struct efx_channel *channel)
+{
+ return channel->n_rx_eth_crc_err + channel->n_rx_frm_trunc +
+ channel->n_rx_overlength + channel->n_rx_nodesc_trunc +
+ channel->n_rx_mport_bad;
+}
+
void efx_init_napi_channel(struct efx_channel *channel);
void efx_init_napi(struct efx_nic *efx);
void efx_fini_napi_channel(struct efx_channel *channel);
@@ -494,6 +494,10 @@ enum efx_sync_events_state {
* @n_rx_xdp_redirect: Count of RX packets redirected to a different NIC by XDP
* @n_rx_mport_bad: Count of RX packets dropped because their ingress mport was
* not recognised
+ * @old_n_rx_hw_drops: Count of all RX packets dropped for any reason as of last
+ * efx_start_channels()
+ * @old_n_rx_hw_drop_overruns: Value of @n_rx_nodesc_trunc as of last
+ * efx_start_channels()
* @rx_pkt_n_frags: Number of fragments in next packet to be delivered by
* __efx_rx_packet(), or zero if there is none
* @rx_pkt_index: Ring index of first buffer for next packet to be delivered
@@ -556,6 +560,9 @@ struct efx_channel {
unsigned int n_rx_xdp_redirect;
unsigned int n_rx_mport_bad;
+ unsigned int old_n_rx_hw_drops;
+ unsigned int old_n_rx_hw_drop_overruns;
+
unsigned int rx_pkt_n_frags;
unsigned int rx_pkt_index;