@@ -164,6 +164,17 @@ config MLX5_EN_TLS
help
Build support for TLS cryptography-offload acceleration in the NIC.
+config MLX5_EN_NVMEOTCP
+ bool "NVMEoTCP acceleration"
+ depends on ULP_DDP
+ depends on MLX5_CORE_EN
+ default y
+ help
+ Build support for NVMEoTCP acceleration in the NIC.
+ This includes Direct Data Placement and CRC offload.
+ Note: Support for hardware with this capability needs to be selected
+ for this option to become available.
+
config MLX5_SW_STEERING
bool "Mellanox Technologies software-managed steering"
depends on MLX5_CORE_EN && MLX5_ESWITCH
@@ -109,6 +109,8 @@ mlx5_core-$(CONFIG_MLX5_EN_TLS) += en_accel/ktls_stats.o \
en_accel/fs_tcp.o en_accel/ktls.o en_accel/ktls_txrx.o \
en_accel/ktls_tx.o en_accel/ktls_rx.o
+mlx5_core-$(CONFIG_MLX5_EN_NVMEOTCP) += en_accel/fs_tcp.o en_accel/nvmeotcp.o
+
mlx5_core-$(CONFIG_MLX5_SW_STEERING) += steering/dr_domain.o steering/dr_table.o \
steering/dr_matcher.o steering/dr_rule.o \
steering/dr_icm_pool.o steering/dr_buddy.o \
@@ -327,6 +327,7 @@ struct mlx5e_params {
int hard_mtu;
bool ptp_rx;
__be32 terminate_lkey_be;
+ bool nvmeotcp;
};
static inline u8 mlx5e_get_dcb_num_tc(struct mlx5e_params *params)
@@ -934,6 +935,9 @@ struct mlx5e_priv {
#endif
#ifdef CONFIG_MLX5_EN_TLS
struct mlx5e_tls *tls;
+#endif
+#ifdef CONFIG_MLX5_EN_NVMEOTCP
+ struct mlx5e_nvmeotcp *nvmeotcp;
#endif
struct devlink_health_reporter *tx_reporter;
struct devlink_health_reporter *rx_reporter;
@@ -77,7 +77,7 @@ enum {
MLX5E_INNER_TTC_FT_LEVEL,
MLX5E_FS_TT_UDP_FT_LEVEL = MLX5E_INNER_TTC_FT_LEVEL + 1,
MLX5E_FS_TT_ANY_FT_LEVEL = MLX5E_INNER_TTC_FT_LEVEL + 1,
-#ifdef CONFIG_MLX5_EN_TLS
+#if defined(CONFIG_MLX5_EN_TLS) || defined(CONFIG_MLX5_EN_NVMEOTCP)
MLX5E_ACCEL_FS_TCP_FT_LEVEL = MLX5E_INNER_TTC_FT_LEVEL + 1,
#endif
#ifdef CONFIG_MLX5_EN_ARFS
@@ -169,7 +169,7 @@ struct mlx5e_fs_any *mlx5e_fs_get_any(struct mlx5e_flow_steering *fs);
void mlx5e_fs_set_any(struct mlx5e_flow_steering *fs, struct mlx5e_fs_any *any);
struct mlx5e_fs_udp *mlx5e_fs_get_udp(struct mlx5e_flow_steering *fs);
void mlx5e_fs_set_udp(struct mlx5e_flow_steering *fs, struct mlx5e_fs_udp *udp);
-#ifdef CONFIG_MLX5_EN_TLS
+#if defined(CONFIG_MLX5_EN_TLS) || defined(CONFIG_MLX5_EN_NVMEOTCP)
struct mlx5e_accel_fs_tcp *mlx5e_fs_get_accel_tcp(struct mlx5e_flow_steering *fs);
void mlx5e_fs_set_accel_tcp(struct mlx5e_flow_steering *fs, struct mlx5e_accel_fs_tcp *accel_tcp);
#endif
@@ -873,7 +873,8 @@ static void mlx5e_build_common_cq_param(struct mlx5_core_dev *mdev,
void *cqc = param->cqc;
MLX5_SET(cqc, cqc, uar_page, mdev->priv.uar->index);
- if (MLX5_CAP_GEN(mdev, cqe_128_always) && cache_line_size() >= 128)
+ if (MLX5_CAP_GEN(mdev, cqe_128_always) &&
+ (cache_line_size() >= 128 || param->force_cqe128))
MLX5_SET(cqc, cqc, cqe_sz, CQE_STRIDE_128_PAD);
}
@@ -903,6 +904,9 @@ static void mlx5e_build_rx_cq_param(struct mlx5_core_dev *mdev,
void *cqc = param->cqc;
u8 log_cq_size;
+ /* nvme-tcp offload mandates 128 byte cqes */
+ param->force_cqe128 |= IS_ENABLED(CONFIG_MLX5_EN_NVMEOTCP) && params->nvmeotcp;
+
switch (params->rq_wq_type) {
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
hw_stridx = MLX5_CAP_GEN(mdev, mini_cqe_resp_stride_index);
@@ -1242,9 +1246,9 @@ static u8 mlx5e_build_async_icosq_log_wq_sz(struct mlx5_core_dev *mdev)
return MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE;
}
-static void mlx5e_build_icosq_param(struct mlx5_core_dev *mdev,
- u8 log_wq_size,
- struct mlx5e_sq_param *param)
+void mlx5e_build_icosq_param(struct mlx5_core_dev *mdev,
+ u8 log_wq_size,
+ struct mlx5e_sq_param *param)
{
void *sqc = param->sqc;
void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
@@ -17,6 +17,7 @@ struct mlx5e_cq_param {
struct mlx5_wq_param wq;
u16 eq_ix;
u8 cq_period_mode;
+ bool force_cqe128;
};
struct mlx5e_rq_param {
@@ -147,6 +148,8 @@ void mlx5e_build_xdpsq_param(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk,
struct mlx5e_sq_param *param);
+void mlx5e_build_icosq_param(struct mlx5_core_dev *mdev,
+ u8 log_wq_size, struct mlx5e_sq_param *param);
int mlx5e_build_channel_param(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
u16 q_counter,
@@ -40,6 +40,7 @@
#include "en_accel/ktls.h"
#include "en_accel/ktls_txrx.h"
#include <en_accel/macsec.h>
+#include "en_accel/nvmeotcp.h"
#include "en.h"
#include "en/txrx.h"
@@ -202,11 +203,13 @@ static inline void mlx5e_accel_tx_finish(struct mlx5e_txqsq *sq,
static inline int mlx5e_accel_init_rx(struct mlx5e_priv *priv)
{
+ mlx5e_nvmeotcp_init_rx(priv);
return mlx5e_ktls_init_rx(priv);
}
static inline void mlx5e_accel_cleanup_rx(struct mlx5e_priv *priv)
{
+ mlx5e_nvmeotcp_cleanup_rx(priv);
mlx5e_ktls_cleanup_rx(priv);
}
@@ -6,7 +6,7 @@
#include "en/fs.h"
-#ifdef CONFIG_MLX5_EN_TLS
+#if defined(CONFIG_MLX5_EN_TLS) || defined(CONFIG_MLX5_EN_NVMEOTCP)
int mlx5e_accel_fs_tcp_create(struct mlx5e_flow_steering *fs);
void mlx5e_accel_fs_tcp_destroy(struct mlx5e_flow_steering *fs);
struct mlx5_flow_handle *mlx5e_accel_fs_add_sk(struct mlx5e_flow_steering *fs,
new file mode 100644
@@ -0,0 +1,216 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+// Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES.
+
+#include <linux/netdevice.h>
+#include <linux/idr.h>
+#include "en_accel/nvmeotcp.h"
+#include "en_accel/fs_tcp.h"
+#include "en/txrx.h"
+
+#define MAX_NUM_NVMEOTCP_QUEUES (4000)
+#define MIN_NUM_NVMEOTCP_QUEUES (1)
+
+static const struct rhashtable_params rhash_queues = {
+ .key_len = sizeof(int),
+ .key_offset = offsetof(struct mlx5e_nvmeotcp_queue, id),
+ .head_offset = offsetof(struct mlx5e_nvmeotcp_queue, hash),
+ .automatic_shrinking = true,
+ .min_size = MIN_NUM_NVMEOTCP_QUEUES,
+ .max_size = MAX_NUM_NVMEOTCP_QUEUES,
+};
+
+static int
+mlx5e_nvmeotcp_offload_limits(struct net_device *netdev,
+ struct ulp_ddp_limits *limits)
+{
+ return 0;
+}
+
+static int
+mlx5e_nvmeotcp_queue_init(struct net_device *netdev,
+ struct sock *sk,
+ struct ulp_ddp_config *tconfig)
+{
+ return 0;
+}
+
+static void
+mlx5e_nvmeotcp_queue_teardown(struct net_device *netdev,
+ struct sock *sk)
+{
+}
+
+static int
+mlx5e_nvmeotcp_ddp_setup(struct net_device *netdev,
+ struct sock *sk,
+ struct ulp_ddp_io *ddp)
+{
+ return 0;
+}
+
+static void
+mlx5e_nvmeotcp_ddp_teardown(struct net_device *netdev,
+ struct sock *sk,
+ struct ulp_ddp_io *ddp,
+ void *ddp_ctx)
+{
+}
+
+static void
+mlx5e_nvmeotcp_ddp_resync(struct net_device *netdev,
+ struct sock *sk, u32 seq)
+{
+}
+
+int set_ulp_ddp_nvme_tcp(struct net_device *netdev, bool enable)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5e_params new_params;
+ int err = 0;
+
+ /* There may be offloaded queues when an ethtool callback to disable the feature is made.
+ * Hence, we can't destroy the tcp flow-table since it may be referenced by the offload
+ * related flows and we'll keep the 128B CQEs on the channel RQs. Also, since we don't
+ * deref/destroy the fs tcp table when the feature is disabled, we don't ref it again
+ * if the feature is enabled multiple times.
+ */
+ if (!enable || priv->nvmeotcp->enabled)
+ return 0;
+
+ err = mlx5e_accel_fs_tcp_create(priv->fs);
+ if (err)
+ return err;
+
+ new_params = priv->channels.params;
+ new_params.nvmeotcp = enable;
+ err = mlx5e_safe_switch_params(priv, &new_params, NULL, NULL, true);
+ if (err)
+ goto fs_tcp_destroy;
+
+ priv->nvmeotcp->enabled = true;
+ return 0;
+
+fs_tcp_destroy:
+ mlx5e_accel_fs_tcp_destroy(priv->fs);
+ return err;
+}
+
+static int mlx5e_ulp_ddp_set_caps(struct net_device *netdev, unsigned long *new_caps,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ DECLARE_BITMAP(old_caps, ULP_DDP_C_COUNT);
+ struct mlx5e_params *params;
+ int ret = 0;
+ int nvme = -1;
+
+ mutex_lock(&priv->state_lock);
+ params = &priv->channels.params;
+ bitmap_copy(old_caps, netdev->ulp_ddp_caps.active, ULP_DDP_C_COUNT);
+
+ /* always handle nvme-tcp-ddp and nvme-tcp-ddgst-rx together (all or nothing) */
+
+ if (ulp_ddp_cap_turned_on(old_caps, new_caps, ULP_DDP_C_NVME_TCP_BIT) &&
+ ulp_ddp_cap_turned_on(old_caps, new_caps, ULP_DDP_C_NVME_TCP_DDGST_RX_BIT)) {
+ if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "NVMe-TCP offload not supported when CQE compress is active. Disable rx_cqe_compress ethtool private flag first\n");
+ goto out;
+ }
+
+ if (netdev->features & (NETIF_F_LRO | NETIF_F_GRO_HW)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "NVMe-TCP offload not supported when HW_GRO/LRO is active. Disable rx-gro-hw ethtool feature first\n");
+ goto out;
+ }
+ nvme = 1;
+ } else if (ulp_ddp_cap_turned_off(old_caps, new_caps, ULP_DDP_C_NVME_TCP_BIT) &&
+ ulp_ddp_cap_turned_off(old_caps, new_caps, ULP_DDP_C_NVME_TCP_DDGST_RX_BIT)) {
+ nvme = 0;
+ }
+
+ if (nvme >= 0) {
+ ret = set_ulp_ddp_nvme_tcp(netdev, nvme);
+ if (ret)
+ goto out;
+ change_bit(ULP_DDP_C_NVME_TCP_BIT, netdev->ulp_ddp_caps.active);
+ change_bit(ULP_DDP_C_NVME_TCP_DDGST_RX_BIT, netdev->ulp_ddp_caps.active);
+ }
+
+out:
+ mutex_unlock(&priv->state_lock);
+ return ret;
+}
+
+const struct ulp_ddp_dev_ops mlx5e_nvmeotcp_ops = {
+ .limits = mlx5e_nvmeotcp_offload_limits,
+ .sk_add = mlx5e_nvmeotcp_queue_init,
+ .sk_del = mlx5e_nvmeotcp_queue_teardown,
+ .setup = mlx5e_nvmeotcp_ddp_setup,
+ .teardown = mlx5e_nvmeotcp_ddp_teardown,
+ .resync = mlx5e_nvmeotcp_ddp_resync,
+ .set_caps = mlx5e_ulp_ddp_set_caps,
+};
+
+void mlx5e_nvmeotcp_build_netdev(struct mlx5e_priv *priv)
+{
+ struct net_device *netdev = priv->netdev;
+ struct mlx5_core_dev *mdev = priv->mdev;
+
+ if (!(MLX5_CAP_GEN(mdev, nvmeotcp) &&
+ MLX5_CAP_DEV_NVMEOTCP(mdev, zerocopy) &&
+ MLX5_CAP_DEV_NVMEOTCP(mdev, crc_rx) && MLX5_CAP_GEN(mdev, cqe_128_always)))
+ return;
+
+ /* report ULP DPP as supported, but don't enable it by default */
+ set_bit(ULP_DDP_C_NVME_TCP_BIT, netdev->ulp_ddp_caps.hw);
+ set_bit(ULP_DDP_C_NVME_TCP_DDGST_RX_BIT, netdev->ulp_ddp_caps.hw);
+}
+
+void mlx5e_nvmeotcp_cleanup_rx(struct mlx5e_priv *priv)
+{
+ if (priv->nvmeotcp && priv->nvmeotcp->enabled)
+ mlx5e_accel_fs_tcp_destroy(priv->fs);
+}
+
+int mlx5e_nvmeotcp_init(struct mlx5e_priv *priv)
+{
+ struct mlx5e_nvmeotcp *nvmeotcp = NULL;
+ int ret = 0;
+
+ if (!MLX5_CAP_GEN(priv->mdev, nvmeotcp))
+ return 0;
+
+ nvmeotcp = kzalloc(sizeof(*nvmeotcp), GFP_KERNEL);
+
+ if (!nvmeotcp)
+ return -ENOMEM;
+
+ ida_init(&nvmeotcp->queue_ids);
+ ret = rhashtable_init(&nvmeotcp->queue_hash, &rhash_queues);
+ if (ret)
+ goto err_ida;
+
+ nvmeotcp->enabled = false;
+
+ priv->nvmeotcp = nvmeotcp;
+ return 0;
+
+err_ida:
+ ida_destroy(&nvmeotcp->queue_ids);
+ kfree(nvmeotcp);
+ return ret;
+}
+
+void mlx5e_nvmeotcp_cleanup(struct mlx5e_priv *priv)
+{
+ struct mlx5e_nvmeotcp *nvmeotcp = priv->nvmeotcp;
+
+ if (!nvmeotcp)
+ return;
+
+ rhashtable_destroy(&nvmeotcp->queue_hash);
+ ida_destroy(&nvmeotcp->queue_ids);
+ kfree(nvmeotcp);
+ priv->nvmeotcp = NULL;
+}
new file mode 100644
@@ -0,0 +1,121 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. */
+#ifndef __MLX5E_NVMEOTCP_H__
+#define __MLX5E_NVMEOTCP_H__
+
+#ifdef CONFIG_MLX5_EN_NVMEOTCP
+
+#include <net/ulp_ddp.h>
+#include "en.h"
+#include "en/params.h"
+
+struct mlx5e_nvmeotcp_queue_entry {
+ struct mlx5e_nvmeotcp_queue *queue;
+ u32 sgl_length;
+ u32 klm_mkey;
+ struct scatterlist *sgl;
+ u32 ccid_gen;
+ u64 size;
+
+ /* for the ddp invalidate done callback */
+ void *ddp_ctx;
+ struct ulp_ddp_io *ddp;
+};
+
+struct mlx5e_nvmeotcp_queue_handler {
+ struct napi_struct napi;
+ struct mlx5e_cq *cq;
+};
+
+/**
+ * struct mlx5e_nvmeotcp_queue - mlx5 metadata for NVMEoTCP queue
+ * @ulp_ddp_ctx: Generic ulp ddp context
+ * @tir: Destination TIR created for NVMEoTCP offload
+ * @fh: Flow handle representing the 5-tuple steering for this flow
+ * @id: Flow tag ID used to identify this queue
+ * @size: NVMEoTCP queue depth
+ * @ccid_gen: Generation ID for the CCID, used to avoid conflicts in DDP
+ * @max_klms_per_wqe: Number of KLMs per DDP operation
+ * @hash: Hash table of queues mapped by @id
+ * @pda: Padding alignment
+ * @tag_buf_table_id: Tag buffer table for CCIDs
+ * @dgst: Digest supported (header and/or data)
+ * @sq: Send queue used for posting umrs
+ * @ref_count: Reference count for this structure
+ * @after_resync_cqe: Indicate if resync occurred
+ * @ccid_table: Table holding metadata for each CC (Command Capsule)
+ * @ccid: ID of the current CC
+ * @ccsglidx: Index within the scatter-gather list (SGL) of the current CC
+ * @ccoff: Offset within the current CC
+ * @ccoff_inner: Current offset within the @ccsglidx element
+ * @channel_ix: Channel IX for this nvmeotcp_queue
+ * @sk: The socket used by the NVMe-TCP queue
+ * @crc_rx: CRC Rx offload indication for this queue
+ * @priv: mlx5e netdev priv
+ * @static_params_done: Async completion structure for the initial umr mapping
+ * synchronization
+ * @sq_lock: Spin lock for the icosq
+ * @qh: Completion queue handler for processing umr completions
+ */
+struct mlx5e_nvmeotcp_queue {
+ struct ulp_ddp_ctx ulp_ddp_ctx;
+ struct mlx5e_tir tir;
+ struct mlx5_flow_handle *fh;
+ int id;
+ u32 size;
+ /* needed when the upper layer immediately reuses CCID + some packet loss happens */
+ u32 ccid_gen;
+ u32 max_klms_per_wqe;
+ struct rhash_head hash;
+ int pda;
+ u32 tag_buf_table_id;
+ u8 dgst;
+ struct mlx5e_icosq sq;
+
+ /* data-path section cache aligned */
+ refcount_t ref_count;
+ /* for MASK HW resync cqe */
+ bool after_resync_cqe;
+ struct mlx5e_nvmeotcp_queue_entry *ccid_table;
+ /* current ccid fields */
+ int ccid;
+ int ccsglidx;
+ off_t ccoff;
+ int ccoff_inner;
+
+ u32 channel_ix;
+ struct sock *sk;
+ u8 crc_rx:1;
+ /* for ddp invalidate flow */
+ struct mlx5e_priv *priv;
+ /* end of data-path section */
+
+ struct completion static_params_done;
+ /* spin lock for the ico sq, ULP can issue requests from multiple contexts */
+ spinlock_t sq_lock;
+ struct mlx5e_nvmeotcp_queue_handler qh;
+};
+
+struct mlx5e_nvmeotcp {
+ struct ida queue_ids;
+ struct rhashtable queue_hash;
+ bool enabled;
+};
+
+void mlx5e_nvmeotcp_build_netdev(struct mlx5e_priv *priv);
+int mlx5e_nvmeotcp_init(struct mlx5e_priv *priv);
+int set_ulp_ddp_nvme_tcp(struct net_device *netdev, bool enable);
+void mlx5e_nvmeotcp_cleanup(struct mlx5e_priv *priv);
+static inline void mlx5e_nvmeotcp_init_rx(struct mlx5e_priv *priv) {}
+void mlx5e_nvmeotcp_cleanup_rx(struct mlx5e_priv *priv);
+extern const struct ulp_ddp_dev_ops mlx5e_nvmeotcp_ops;
+#else
+
+static inline void mlx5e_nvmeotcp_build_netdev(struct mlx5e_priv *priv) {}
+static inline int mlx5e_nvmeotcp_init(struct mlx5e_priv *priv) { return 0; }
+static inline void mlx5e_nvmeotcp_cleanup(struct mlx5e_priv *priv) {}
+static inline int set_ulp_ddp_nvme_tcp(struct net_device *dev, bool en) { return -EOPNOTSUPP; }
+static inline void mlx5e_nvmeotcp_init_rx(struct mlx5e_priv *priv) {}
+static inline void mlx5e_nvmeotcp_cleanup_rx(struct mlx5e_priv *priv) {}
+#endif
+#endif /* __MLX5E_NVMEOTCP_H__ */
@@ -38,6 +38,7 @@
#include "en/ptp.h"
#include "lib/clock.h"
#include "en/fs_ethtool.h"
+#include "en_accel/nvmeotcp.h"
void mlx5e_ethtool_get_drvinfo(struct mlx5e_priv *priv,
struct ethtool_drvinfo *drvinfo)
@@ -1929,6 +1930,11 @@ int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool new_val
return -EINVAL;
}
+ if (priv->channels.params.nvmeotcp) {
+ netdev_warn(priv->netdev, "Can't set CQE compression after ULP DDP NVMe-TCP offload\n");
+ return -EINVAL;
+ }
+
new_params = priv->channels.params;
MLX5E_SET_PFLAG(&new_params, MLX5E_PFLAG_RX_CQE_COMPRESS, new_val);
if (rx_filter)
@@ -62,7 +62,7 @@ struct mlx5e_flow_steering {
#ifdef CONFIG_MLX5_EN_ARFS
struct mlx5e_arfs_tables *arfs;
#endif
-#ifdef CONFIG_MLX5_EN_TLS
+#if defined(CONFIG_MLX5_EN_TLS) || defined(CONFIG_MLX5_EN_NVMEOTCP)
struct mlx5e_accel_fs_tcp *accel_tcp;
#endif
struct mlx5e_fs_udp *udp;
@@ -1557,7 +1557,7 @@ void mlx5e_fs_set_any(struct mlx5e_flow_steering *fs, struct mlx5e_fs_any *any)
fs->any = any;
}
-#ifdef CONFIG_MLX5_EN_TLS
+#if defined(CONFIG_MLX5_EN_TLS) || defined(CONFIG_MLX5_EN_NVMEOTCP)
struct mlx5e_accel_fs_tcp *mlx5e_fs_get_accel_tcp(struct mlx5e_flow_steering *fs)
{
return fs->accel_tcp;
@@ -50,6 +50,7 @@
#include "en_accel/macsec.h"
#include "en_accel/en_accel.h"
#include "en_accel/ktls.h"
+#include "en_accel/nvmeotcp.h"
#include "lib/vxlan.h"
#include "lib/clock.h"
#include "en/port.h"
@@ -4267,6 +4268,13 @@ static netdev_features_t mlx5e_fix_features(struct net_device *netdev,
features &= ~NETIF_F_NETNS_LOCAL;
}
+ if (features & (NETIF_F_LRO | NETIF_F_GRO_HW)) {
+ if (params->nvmeotcp) {
+ netdev_warn(netdev, "Disabling HW-GRO/LRO, not supported after ULP DDP NVMe-TCP offload\n");
+ features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
+ }
+ }
+
mutex_unlock(&priv->state_lock);
return features;
@@ -5020,6 +5028,9 @@ const struct net_device_ops mlx5e_netdev_ops = {
.ndo_has_offload_stats = mlx5e_has_offload_stats,
.ndo_get_offload_stats = mlx5e_get_offload_stats,
#endif
+#ifdef CONFIG_MLX5_EN_NVMEOTCP
+ .ulp_ddp_ops = &mlx5e_nvmeotcp_ops,
+#endif
};
static u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout)
@@ -5289,6 +5300,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
mlx5e_macsec_build_netdev(priv);
mlx5e_ipsec_build_netdev(priv);
mlx5e_ktls_build_netdev(priv);
+ mlx5e_nvmeotcp_build_netdev(priv);
}
void mlx5e_create_q_counters(struct mlx5e_priv *priv)
@@ -5360,6 +5372,10 @@ static int mlx5e_nic_init(struct mlx5_core_dev *mdev,
if (err)
mlx5_core_err(mdev, "TLS initialization failed, %d\n", err);
+ err = mlx5e_nvmeotcp_init(priv);
+ if (err)
+ mlx5_core_err(mdev, "NVMEoTCP initialization failed, %d\n", err);
+
mlx5e_health_create_reporters(priv);
/* If netdev is already registered (e.g. move from uplink to nic profile),
@@ -5380,6 +5396,7 @@ static int mlx5e_nic_init(struct mlx5_core_dev *mdev,
static void mlx5e_nic_cleanup(struct mlx5e_priv *priv)
{
mlx5e_health_destroy_reporters(priv);
+ mlx5e_nvmeotcp_cleanup(priv);
mlx5e_ktls_cleanup(priv);
mlx5e_fs_cleanup(priv->fs);
debugfs_remove_recursive(priv->dfs_root);
@@ -1758,6 +1758,7 @@ static const int types[] = {
MLX5_CAP_MACSEC,
MLX5_CAP_ADV_VIRTUALIZATION,
MLX5_CAP_CRYPTO,
+ MLX5_CAP_DEV_NVMEOTCP,
};
static void mlx5_hca_caps_free(struct mlx5_core_dev *dev)