@@ -50,6 +50,9 @@ enum mlx5e_icosq_wqe_type {
MLX5E_ICOSQ_WQE_SET_PSV_TLS,
MLX5E_ICOSQ_WQE_GET_PSV_TLS,
#endif
+#ifdef CONFIG_MLX5_EN_NVMEOTCP
+ MLX5E_ICOSQ_WQE_UMR_NVMEOTCP,
+#endif
};
/* General */
@@ -4,6 +4,7 @@
#include <linux/netdevice.h>
#include <linux/idr.h>
#include "en_accel/nvmeotcp.h"
+#include "en_accel/nvmeotcp_utils.h"
#include "en_accel/fs_tcp.h"
#include "en/txrx.h"
@@ -19,9 +20,123 @@ static const struct rhashtable_params rhash_queues = {
.max_size = MAX_NUM_NVMEOTCP_QUEUES,
};
+static void
+fill_nvmeotcp_klm_wqe(struct mlx5e_nvmeotcp_queue *queue, struct mlx5e_umr_wqe *wqe, u16 ccid,
+ u32 klm_entries, u16 klm_offset)
+{
+ struct scatterlist *sgl_mkey;
+ u32 lkey, i;
+
+ lkey = queue->priv->mdev->mlx5e_res.hw_objs.mkey;
+ for (i = 0; i < klm_entries; i++) {
+ sgl_mkey = &queue->ccid_table[ccid].sgl[i + klm_offset];
+ wqe->inline_klms[i].bcount = cpu_to_be32(sg_dma_len(sgl_mkey));
+ wqe->inline_klms[i].key = cpu_to_be32(lkey);
+ wqe->inline_klms[i].va = cpu_to_be64(sgl_mkey->dma_address);
+ }
+
+ for (; i < ALIGN(klm_entries, MLX5_UMR_KLM_NUM_ENTRIES_ALIGNMENT); i++) {
+ wqe->inline_klms[i].bcount = 0;
+ wqe->inline_klms[i].key = 0;
+ wqe->inline_klms[i].va = 0;
+ }
+}
+
+static void
+build_nvmeotcp_klm_umr(struct mlx5e_nvmeotcp_queue *queue, struct mlx5e_umr_wqe *wqe,
+ u16 ccid, int klm_entries, u32 klm_offset, u32 len,
+ enum wqe_type klm_type)
+{
+ u32 id = (klm_type == KLM_UMR) ? queue->ccid_table[ccid].klm_mkey :
+ (mlx5e_tir_get_tirn(&queue->tir) << MLX5_WQE_CTRL_TIR_TIS_INDEX_SHIFT);
+ u8 opc_mod = (klm_type == KLM_UMR) ? MLX5_CTRL_SEGMENT_OPC_MOD_UMR_UMR :
+ MLX5_OPC_MOD_TRANSPORT_TIR_STATIC_PARAMS;
+ u32 ds_cnt = MLX5E_KLM_UMR_DS_CNT(ALIGN(klm_entries, MLX5_UMR_KLM_NUM_ENTRIES_ALIGNMENT));
+ struct mlx5_wqe_umr_ctrl_seg *ucseg = &wqe->uctrl;
+ struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
+ struct mlx5_mkey_seg *mkc = &wqe->mkc;
+ u32 sqn = queue->sq.sqn;
+ u16 pc = queue->sq.pc;
+
+ cseg->opmod_idx_opcode = cpu_to_be32((pc << MLX5_WQE_CTRL_WQE_INDEX_SHIFT) |
+ MLX5_OPCODE_UMR | (opc_mod) << 24);
+ cseg->qpn_ds = cpu_to_be32((sqn << MLX5_WQE_CTRL_QPN_SHIFT) | ds_cnt);
+ cseg->general_id = cpu_to_be32(id);
+
+ if (klm_type == KLM_UMR && !klm_offset) {
+ ucseg->mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_XLT_OCT_SIZE |
+ MLX5_MKEY_MASK_LEN | MLX5_MKEY_MASK_FREE);
+ mkc->xlt_oct_size = cpu_to_be32(ALIGN(len, MLX5_UMR_KLM_NUM_ENTRIES_ALIGNMENT));
+ mkc->len = cpu_to_be64(queue->ccid_table[ccid].size);
+ }
+
+ ucseg->flags = MLX5_UMR_INLINE | MLX5_UMR_TRANSLATION_OFFSET_EN;
+ ucseg->xlt_octowords = cpu_to_be16(ALIGN(klm_entries, MLX5_UMR_KLM_NUM_ENTRIES_ALIGNMENT));
+ ucseg->xlt_offset = cpu_to_be16(klm_offset);
+ fill_nvmeotcp_klm_wqe(queue, wqe, ccid, klm_entries, klm_offset);
+}
+
+static void
+mlx5e_nvmeotcp_fill_wi(struct mlx5e_icosq *sq, u32 wqebbs, u16 pi)
+{
+ struct mlx5e_icosq_wqe_info *wi = &sq->db.wqe_info[pi];
+
+ memset(wi, 0, sizeof(*wi));
+
+ wi->num_wqebbs = wqebbs;
+ wi->wqe_type = MLX5E_ICOSQ_WQE_UMR_NVMEOTCP;
+}
+
+static u32
+post_klm_wqe(struct mlx5e_nvmeotcp_queue *queue,
+ enum wqe_type wqe_type,
+ u16 ccid,
+ u32 klm_length,
+ u32 klm_offset)
+{
+ struct mlx5e_icosq *sq = &queue->sq;
+ u32 wqebbs, cur_klm_entries;
+ struct mlx5e_umr_wqe *wqe;
+ u16 pi, wqe_sz;
+
+ cur_klm_entries = min_t(int, queue->max_klms_per_wqe, klm_length - klm_offset);
+ wqe_sz = MLX5E_KLM_UMR_WQE_SZ(ALIGN(cur_klm_entries, MLX5_UMR_KLM_NUM_ENTRIES_ALIGNMENT));
+ wqebbs = DIV_ROUND_UP(wqe_sz, MLX5_SEND_WQE_BB);
+ pi = mlx5e_icosq_get_next_pi(sq, wqebbs);
+ wqe = MLX5E_NVMEOTCP_FETCH_KLM_WQE(sq, pi);
+ mlx5e_nvmeotcp_fill_wi(sq, wqebbs, pi);
+ build_nvmeotcp_klm_umr(queue, wqe, ccid, cur_klm_entries, klm_offset,
+ klm_length, wqe_type);
+ sq->pc += wqebbs;
+ sq->doorbell_cseg = &wqe->ctrl;
+ return cur_klm_entries;
+}
+
+static void
+mlx5e_nvmeotcp_post_klm_wqe(struct mlx5e_nvmeotcp_queue *queue, enum wqe_type wqe_type,
+ u16 ccid, u32 klm_length)
+{
+ struct mlx5e_icosq *sq = &queue->sq;
+ u32 klm_offset = 0, wqes, i;
+
+ wqes = DIV_ROUND_UP(klm_length, queue->max_klms_per_wqe);
+
+ spin_lock_bh(&queue->sq_lock);
+
+ for (i = 0; i < wqes; i++)
+ klm_offset += post_klm_wqe(queue, wqe_type, ccid, klm_length, klm_offset);
+
+ if (wqe_type == KLM_UMR) /* not asking for completion on ddp_setup UMRs */
+ __mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, sq->doorbell_cseg, 0);
+ else
+ mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, sq->doorbell_cseg);
+
+ spin_unlock_bh(&queue->sq_lock);
+}
+
static int
mlx5e_nvmeotcp_offload_limits(struct net_device *netdev,
- struct ulp_ddp_limits *ulp_limits)
+ struct ulp_ddp_limits *limits)
{
return 0;
}
@@ -45,6 +160,14 @@ mlx5e_nvmeotcp_ddp_setup(struct net_device *netdev,
struct sock *sk,
struct ulp_ddp_io *ddp)
{
+ struct mlx5e_nvmeotcp_queue *queue;
+
+ queue = container_of(ulp_ddp_get_ctx(sk),
+ struct mlx5e_nvmeotcp_queue, ulp_ddp_ctx);
+
+ /* Placeholder - map_sg and initializing the count */
+
+ mlx5e_nvmeotcp_post_klm_wqe(queue, KLM_UMR, ddp->command_id, 0);
return 0;
}
new file mode 100644
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. */
+#ifndef __MLX5E_NVMEOTCP_UTILS_H__
+#define __MLX5E_NVMEOTCP_UTILS_H__
+
+#include "en.h"
+
+#define MLX5E_NVMEOTCP_FETCH_KLM_WQE(sq, pi) \
+ ((struct mlx5e_umr_wqe *)\
+ mlx5e_fetch_wqe(&(sq)->wq, pi, sizeof(struct mlx5e_umr_wqe)))
+
+#define MLX5_CTRL_SEGMENT_OPC_MOD_UMR_NVMEOTCP_TIR_PROGRESS_PARAMS 0x4
+
+#define MLX5_CTRL_SEGMENT_OPC_MOD_UMR_TIR_PARAMS 0x2
+#define MLX5_CTRL_SEGMENT_OPC_MOD_UMR_UMR 0x0
+
+enum wqe_type {
+ KLM_UMR,
+ BSF_KLM_UMR,
+ SET_PSV_UMR,
+ BSF_UMR,
+ KLM_INV_UMR,
+};
+
+#endif /* __MLX5E_NVMEOTCP_UTILS_H__ */
@@ -984,6 +984,10 @@ int mlx5e_poll_ico_cq(struct mlx5e_cq *cq, int budget)
case MLX5E_ICOSQ_WQE_GET_PSV_TLS:
mlx5e_ktls_handle_get_psv_completion(wi, sq);
break;
+#endif
+#ifdef CONFIG_MLX5_EN_NVMEOTCP
+ case MLX5E_ICOSQ_WQE_UMR_NVMEOTCP:
+ break;
#endif
default:
netdev_WARN_ONCE(cq->netdev,