@@ -1535,13 +1535,9 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
struct ibmvnic_tx_buff *tx_buff = NULL;
struct ibmvnic_sub_crq_queue *tx_scrq;
struct ibmvnic_tx_pool *tx_pool;
- unsigned int tx_send_failed = 0;
netdev_tx_t ret = NETDEV_TX_OK;
- unsigned int tx_map_failed = 0;
union sub_crq indir_arr[16];
unsigned int tx_dropped = 0;
- unsigned int tx_packets = 0;
- unsigned int tx_bytes = 0;
dma_addr_t data_dma_addr;
struct netdev_queue *txq;
unsigned long lpar_rc;
@@ -1558,18 +1554,13 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
if (!netif_subqueue_stopped(netdev, skb))
netif_stop_subqueue(netdev, queue_num);
dev_kfree_skb_any(skb);
-
- tx_send_failed++;
tx_dropped++;
- ret = NETDEV_TX_OK;
- goto out;
+ goto err_out;
}
if (ibmvnic_xmit_workarounds(skb, netdev)) {
tx_dropped++;
- tx_send_failed++;
- ret = NETDEV_TX_OK;
- goto out;
+ goto err_out;
}
if (skb_is_gso(skb))
tx_pool = &adapter->tso_pool[queue_num];
@@ -1584,10 +1575,8 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
if (index == IBMVNIC_INVALID_MAP) {
dev_kfree_skb_any(skb);
- tx_send_failed++;
tx_dropped++;
- ret = NETDEV_TX_OK;
- goto out;
+ goto err_out;
}
tx_pool->free_map[tx_pool->consumer_index] = IBMVNIC_INVALID_MAP;
@@ -1707,12 +1696,9 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
netif_stop_subqueue(netdev, queue_num);
}
- tx_packets++;
- tx_bytes += skb->len;
txq->trans_start = jiffies;
- ret = NETDEV_TX_OK;
- goto out;
+ return ret;
tx_flush_err:
dev_kfree_skb_any(skb);
tx_buff->skb = NULL;
@@ -1758,14 +1744,8 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
netif_tx_stop_all_queues(netdev);
netif_carrier_off(netdev);
}
-out:
+err_out:
netdev->stats.tx_dropped += tx_dropped;
- netdev->stats.tx_bytes += tx_bytes;
- netdev->stats.tx_packets += tx_packets;
- adapter->tx_send_failed += tx_send_failed;
- adapter->tx_map_failed += tx_map_failed;
- adapter->tx_stats_buffers[queue_num].packets += tx_packets;
- adapter->tx_stats_buffers[queue_num].bytes += tx_bytes;
adapter->tx_stats_buffers[queue_num].dropped_packets += tx_dropped;
return ret;
@@ -3147,6 +3127,7 @@ static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter,
int num_entries = 0;
int total_bytes = 0;
int num_packets = 0;
+ int tx_dropped = 0;
next = ibmvnic_next_scrq(adapter, scrq);
/* ensure that we are reading the correct queue entry */
@@ -3157,6 +3138,7 @@ static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter,
if (next->tx_comp.rcs[i]) {
dev_err(dev, "tx error %x\n",
next->tx_comp.rcs[i]);
+ tx_dropped++;
error = true;
}
index = be32_to_cpu(next->tx_comp.correlators[i]);
@@ -3200,6 +3182,12 @@ static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter,
netdev_dbg(adapter->netdev, "Started queue %d\n",
scrq->pool_index);
}
+ adapter->netdev->stats.tx_packets += num_packets;
+ adapter->netdev->stats.tx_bytes += total_bytes;
+ adapter->netdev->stats.tx_dropped += tx_dropped;
+ adapter->tx_stats_buffers[scrq->pool_index].packets += num_packets;
+ adapter->tx_stats_buffers[scrq->pool_index].bytes += total_bytes;
+ adapter->tx_stats_buffers[scrq->pool_index].dropped_packets += tx_dropped;
}
enable_scrq_irq(adapter, scrq);
@@ -992,8 +992,6 @@ struct ibmvnic_adapter {
int replenish_add_buff_success;
int replenish_add_buff_failure;
int replenish_task_cycles;
- int tx_send_failed;
- int tx_map_failed;
struct ibmvnic_tx_queue_stats *tx_stats_buffers;
struct ibmvnic_rx_queue_stats *rx_stats_buffers;
Update error handling code in ibmvnic_xmit to be more readable and remove unused statistics counters. Also record statistics when TX completions are received to improve accuracy. Signed-off-by: Thomas Falcon <tlfalcon@linux.ibm.com> --- drivers/net/ethernet/ibm/ibmvnic.c | 38 ++++++++++-------------------- drivers/net/ethernet/ibm/ibmvnic.h | 2 -- 2 files changed, 13 insertions(+), 27 deletions(-)