@@ -113,6 +113,9 @@ struct mlx5e_accel_tx_state {
#ifdef CONFIG_MLX5_EN_IPSEC
struct mlx5e_accel_tx_ipsec_state ipsec;
#endif
+#ifdef CONFIG_MLX5_EN_NVMEOTCP
+ int tisn;
+#endif
};
static inline bool mlx5e_accel_tx_begin(struct net_device *dev,
@@ -137,6 +140,12 @@ static inline bool mlx5e_accel_tx_begin(struct net_device *dev,
}
#endif
+#ifdef CONFIG_MLX5_EN_NVMEOTCP
+ if (test_bit(MLX5E_SQ_STATE_NVMEOTCP, &sq->state)) {
+ if (unlikely(!mlx5e_nvmeotcp_handle_tx_skb(dev, sq, skb, &state->tisn)))
+ return false;
+ }
+#endif
return true;
}
@@ -187,6 +196,10 @@ static inline void mlx5e_accel_tx_finish(struct mlx5e_txqsq *sq,
mlx5e_tls_handle_tx_wqe(&wqe->ctrl, &state->tls);
#endif
+#ifdef CONFIG_MLX5_EN_NVMEOTCP
+ mlx5e_nvmeotcp_handle_tx_wqe(sq, &wqe->ctrl, state->tisn);
+#endif
+
#ifdef CONFIG_MLX5_EN_IPSEC
if (test_bit(MLX5E_SQ_STATE_IPSEC, &sq->state) &&
state->ipsec.xo && state->ipsec.tailen)
@@ -1160,6 +1160,69 @@ void mlx5e_nvmeotcp_tx_post_param_wqes(struct mlx5e_txqsq *sq, struct sock *sk,
mlx5e_nvmeotcp_tx_post_progress_params(ctx, sq, tcp_sk(sk)->copied_seq, false);
}
+static inline bool mlx5e_is_sk_tx_device_offloaded(struct sock *sk)
+{
+ /* Return True after smp_store_release assing in
+ * mlx5e_nvmeotcp_queue_tx_init().
+ */
+ return sk && sk_fullsock(sk) &&
+ (smp_load_acquire(&sk->sk_validate_xmit_skb) ==
+ &ulp_ddp_validate_xmit_skb);
+}
+
+bool mlx5e_nvmeotcp_handle_tx_skb(struct net_device *netdev,
+ struct mlx5e_txqsq *sq,
+ struct sk_buff *skb, int *nvmeotcp_tisn)
+{
+ struct mlx5e_nvmeotcp_queue *ctx;
+ int datalen;
+ u32 seq;
+
+ if (!mlx5e_is_sk_tx_device_offloaded(skb->sk))
+ goto out;
+
+ datalen = skb->len - (skb_transport_offset(skb) + tcp_hdrlen(skb));
+ if (!datalen)
+ goto out;
+
+ ctx = container_of(ulp_ddp_get_ctx(skb->sk),
+ struct mlx5e_nvmeotcp_queue, ulp_ddp_ctx);
+ if (!ctx)
+ goto out;
+
+ mlx5e_tx_mpwqe_ensure_complete(sq);
+
+ if (WARN_ON_ONCE(ctx->ulp_ddp_ctx.netdev != netdev))
+ goto err_out;
+
+ if (unlikely(mlx5e_nvmeotcp_test_and_clear_pending(ctx)))
+ mlx5e_nvmeotcp_tx_post_param_wqes(sq, skb->sk, ctx);
+
+ seq = ntohl(tcp_hdr(skb)->seq);
+ if (unlikely(ctx->ulp_ddp_ctx.expected_seq != seq))
+ goto err_out;
+
+ *nvmeotcp_tisn = ctx->tisn;
+ ctx->ulp_ddp_ctx.expected_seq = seq + datalen;
+ goto good_out;
+out:
+ *nvmeotcp_tisn = 0;
+good_out:
+ return true;
+err_out:
+ dev_kfree_skb(skb);
+ return false;
+}
+
+void mlx5e_nvmeotcp_handle_tx_wqe(struct mlx5e_txqsq *sq,
+ struct mlx5_wqe_ctrl_seg *cseg,
+ int tisn)
+{
+ if (tisn) {
+ cseg->tis_tir_num = cpu_to_be32(tisn << 8);
+ }
+}
+
void mlx5e_nvmeotcp_cleanup(struct mlx5e_priv *priv)
{
struct mlx5e_nvmeotcp *nvmeotcp = priv->nvmeotcp;
@@ -119,6 +119,12 @@ void mlx5e_nvmeotcp_ddp_inv_done(struct mlx5e_icosq_wqe_info *wi);
void mlx5e_nvmeotcp_ctx_comp(struct mlx5e_icosq_wqe_info *wi);
int mlx5e_nvmeotcp_init_rx(struct mlx5e_priv *priv);
void mlx5e_nvmeotcp_cleanup_rx(struct mlx5e_priv *priv);
+bool mlx5e_nvmeotcp_handle_tx_skb(struct net_device *netdev,
+ struct mlx5e_txqsq *sq,
+ struct sk_buff *skb, int *tisn);
+void mlx5e_nvmeotcp_handle_tx_wqe(struct mlx5e_txqsq *sq,
+ struct mlx5_wqe_ctrl_seg *csegl,
+ int tisn);
#else
static inline void mlx5e_nvmeotcp_build_netdev(struct mlx5e_priv *priv) { }
@@ -249,6 +249,13 @@ mlx5e_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb,
eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM | MLX5_ETH_WQE_L4_CSUM;
sq->stats->csum_partial++;
#endif
+#ifdef CONFIG_MLX5_EN_NVMEOTCP
+ } else if (unlikely(accel && accel->tisn)) {
+ eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM | MLX5_ETH_WQE_L4_CSUM;
+ sq->stats->csum_partial++;
+#endif
+
+
} else if (unlikely(mlx5e_ipsec_eseg_meta(eseg))) {
ipsec_txwqe_build_eseg_csum(sq, skb, eseg);
} else
@@ -352,6 +359,10 @@ mlx5e_tx_wqe_inline_mode(struct mlx5e_txqsq *sq, struct sk_buff *skb,
if (accel && accel->tls.tls_tisn)
return MLX5_INLINE_MODE_TCP_UDP;
#endif
+#ifdef CONFIG_MLX5_EN_NVMEOTCP
+ if (accel && accel->tisn)
+ return MLX5_INLINE_MODE_TCP_UDP;
+#endif
mode = sq->min_inline_mode;