@@ -366,6 +366,10 @@ void mlx5e_ktls_tx_handle_resync_dump_comp(struct mlx5e_txqsq *sq,
stats->tls_dump_bytes += wi->num_bytes;
break;
case MLX5E_DUMP_WQE_NVMEOTCP:
+#ifdef CONFIG_ULP_DDP
+ stats->nvmeotcp_dump_packets++;
+ stats->nvmeotcp_dump_bytes += wi->num_bytes;
+#endif
break;
}
}
@@ -1388,8 +1388,10 @@ bool mlx5e_nvmeotcp_resync_cap(struct mlx5e_nvmeotcp_queue *queue,
if (unlikely(ret))
goto err_out;
out:
+ sq->stats->nvmeotcp_resync++;
return true;
err_out:
+ sq->stats->nvmeotcp_resync_fail++;
return false;
}
@@ -1413,21 +1415,29 @@ mlx5e_nvmeotcp_handle_ooo_skb(struct mlx5e_nvmeotcp_queue *queue,
u32 seq, int datalen)
{
struct ulp_ddp_pdu_info *pdu_info = NULL;
+ struct mlx5e_sq_stats *stats = sq->stats;
+ stats->nvmeotcp_ooo++;
if (mlx5e_nvmeotcp_check_if_need_offload(queue, seq + datalen, seq)) {
+ stats->nvmeotcp_no_need_offload++;
return MLX5E_NVMEOTCP_RESYNC_SKIP;
+ }
/* ask for pdu_info that includes the tcp_seq */
pdu_info = ulp_ddp_get_pdu_info(skb->sk, seq);
- if (!pdu_info)
+ if (!pdu_info) {
+ stats->nvmeotcp_no_pdu_info++;
return MLX5E_NVMEOTCP_RESYNC_SKIP;
+ }
queue->end_seq_hint = pdu_info->end_seq - 4;
queue->start_pdu_hint = pdu_info->start_seq;
/* check if this packet contain crc - if so offload else no */
if (mlx5e_nvmeotcp_check_if_need_offload(queue, seq + datalen, seq)) {
+ stats->nvmeotcp_no_need_offload++;
return MLX5E_NVMEOTCP_RESYNC_SKIP;
+ }
/*update NIC about resync - he will rebuild parse machine
*send psv with small fence
@@ -1464,6 +1474,7 @@ bool mlx5e_nvmeotcp_handle_tx_skb(struct net_device *netdev,
struct sk_buff *skb, int *nvmeotcp_tisn)
{
struct mlx5e_nvmeotcp_queue *ctx;
+ struct mlx5e_sq_stats *stats = sq->stats;
int datalen;
u32 seq;
@@ -1484,8 +1495,10 @@ bool mlx5e_nvmeotcp_handle_tx_skb(struct net_device *netdev,
if (WARN_ON_ONCE(ctx->ulp_ddp_ctx.netdev != netdev))
goto err_out;
- if (unlikely(mlx5e_nvmeotcp_test_and_clear_pending(ctx)))
+ if (unlikely(mlx5e_nvmeotcp_test_and_clear_pending(ctx))) {
mlx5e_nvmeotcp_tx_post_param_wqes(sq, skb->sk, ctx);
+ stats->nvmeotcp_ctx++;
+ }
seq = ntohl(tcp_hdr(skb)->seq);
if (unlikely(ctx->ulp_ddp_ctx.expected_seq != seq)) {
@@ -1504,6 +1517,11 @@ bool mlx5e_nvmeotcp_handle_tx_skb(struct net_device *netdev,
*nvmeotcp_tisn = ctx->tisn;
ctx->ulp_ddp_ctx.expected_seq = seq + datalen;
+ stats->nvmeotcp_offload_packets += skb_is_gso(skb) ?
+ skb_shinfo(skb)->gso_segs : 1;
+
+ stats->nvmeotcp_offload_bytes += datalen;
+
goto good_out;
out:
*nvmeotcp_tisn = 0;
@@ -125,6 +125,18 @@ static const struct counter_desc sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_drop_no_sync_data) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_drop_bypass_req) },
#endif
+#ifdef CONFIG_MLX5_EN_NVMEOTCP
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_nvmeotcp_offload_packets) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_nvmeotcp_offload_bytes) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_nvmeotcp_ooo) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_nvmeotcp_dump_packets) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_nvmeotcp_dump_bytes) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_nvmeotcp_resync) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_nvmeotcp_ctx) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_nvmeotcp_resync_fail) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_nvmeotcp_no_need_offload) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_nvmeotcp_no_pdu_info) },
+#endif
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_packets) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_bytes) },
@@ -420,6 +432,19 @@ static void mlx5e_stats_grp_sw_update_stats_sq(struct mlx5e_sw_stats *s,
s->tx_tls_drop_no_sync_data += sq_stats->tls_drop_no_sync_data;
s->tx_tls_drop_bypass_req += sq_stats->tls_drop_bypass_req;
#endif
+#ifdef CONFIG_MLX5_EN_NVMEOTCP
+ s->tx_nvmeotcp_offload_packets += sq_stats->nvmeotcp_offload_packets;
+ s->tx_nvmeotcp_offload_bytes += sq_stats->nvmeotcp_offload_bytes;
+ s->tx_nvmeotcp_ooo += sq_stats->nvmeotcp_ooo;
+ s->tx_nvmeotcp_dump_bytes += sq_stats->nvmeotcp_dump_bytes;
+ s->tx_nvmeotcp_dump_packets += sq_stats->nvmeotcp_dump_packets;
+ s->tx_nvmeotcp_resync += sq_stats->nvmeotcp_resync;
+ s->tx_nvmeotcp_ctx += sq_stats->nvmeotcp_ctx;
+ s->tx_nvmeotcp_resync_fail += sq_stats->nvmeotcp_resync_fail;
+ s->tx_nvmeotcp_no_need_offload += sq_stats->nvmeotcp_no_need_offload;
+ s->tx_nvmeotcp_no_pdu_info += sq_stats->nvmeotcp_no_pdu_info;
+#endif
+
s->tx_cqes += sq_stats->cqes;
}
@@ -1850,6 +1875,18 @@ static const struct counter_desc sq_stats_desc[] = {
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_skip_no_sync_data) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_drop_no_sync_data) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_drop_bypass_req) },
+#endif
+#ifdef CONFIG_MLX5_EN_NVMEOTCP
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, nvmeotcp_offload_packets) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, nvmeotcp_offload_bytes) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, nvmeotcp_ooo) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, nvmeotcp_dump_packets) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, nvmeotcp_dump_bytes) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, nvmeotcp_resync) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, nvmeotcp_ctx) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, nvmeotcp_no_need_offload) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, nvmeotcp_no_pdu_info) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, nvmeotcp_resync_fail) },
#endif
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_none) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, stopped) },
@@ -205,6 +205,17 @@ struct mlx5e_sw_stats {
u64 rx_nvmeotcp_resync;
u64 rx_nvmeotcp_offload_packets;
u64 rx_nvmeotcp_offload_bytes;
+
+ u64 tx_nvmeotcp_offload_packets;
+ u64 tx_nvmeotcp_offload_bytes;
+ u64 tx_nvmeotcp_ooo;
+ u64 tx_nvmeotcp_resync;
+ u64 tx_nvmeotcp_dump_packets;
+ u64 tx_nvmeotcp_dump_bytes;
+ u64 tx_nvmeotcp_ctx;
+ u64 tx_nvmeotcp_no_need_offload;
+ u64 tx_nvmeotcp_no_pdu_info;
+ u64 tx_nvmeotcp_resync_fail;
#endif
u64 ch_events;
u64 ch_poll;
@@ -405,6 +416,18 @@ struct mlx5e_sq_stats {
u64 tls_skip_no_sync_data;
u64 tls_drop_no_sync_data;
u64 tls_drop_bypass_req;
+#endif
+#ifdef CONFIG_MLX5_EN_NVMEOTCP
+ u64 nvmeotcp_offload_packets;
+ u64 nvmeotcp_offload_bytes;
+ u64 nvmeotcp_ooo;
+ u64 nvmeotcp_resync;
+ u64 nvmeotcp_dump_packets;
+ u64 nvmeotcp_dump_bytes;
+ u64 nvmeotcp_ctx;
+ u64 nvmeotcp_resync_fail;
+ u64 nvmeotcp_no_need_offload;
+ u64 nvmeotcp_no_pdu_info;
#endif
/* less likely accessed in data path */
u64 csum_none;