diff mbox series

[rdma-core,2/2] mlx5: Add IBV_UVERBS_CQ_FLAGS_IGNORE_OVERRUN support

Message ID 1534259360-29819-3-git-send-email-yishaih@mellanox.com (mailing list archive)
State Not Applicable
Headers show
Series CQ ignore overrun | expand

Commit Message

Yishai Hadas Aug. 14, 2018, 3:09 p.m. UTC
From: Artemy Kovalyov <artemyko@mellanox.com>

Add IBV_UVERBS_CQ_FLAGS_IGNORE_OVERRUN support by using the extended
create CQ command (i.e. ibv_cmd_create_cq_ex).

When regular CQ attempts to generate a CQE and the CQ is already full
overflow occurs and async error is generated. On CQ created with this
flag overflow check is disabled, error is never generated and CQE always
will be written to next entry.

Signed-off-by: Artemy Kovalyov <artemyko@mellanox.com>
Signed-off-by: Yishai Hadas <yishaih@mellanox.com>
---
 providers/mlx5/mlx5-abi.h |  2 ++
 providers/mlx5/verbs.c    | 60 +++++++++++++++++++++++++++++++++--------------
 2 files changed, 44 insertions(+), 18 deletions(-)
diff mbox series

Patch

diff --git a/providers/mlx5/mlx5-abi.h b/providers/mlx5/mlx5-abi.h
index 785aa0d..2b66e82 100644
--- a/providers/mlx5/mlx5-abi.h
+++ b/providers/mlx5/mlx5-abi.h
@@ -59,6 +59,8 @@  DECLARE_DRV_CMD(mlx5_alloc_pd, IB_USER_VERBS_CMD_ALLOC_PD,
 		empty, mlx5_ib_alloc_pd_resp);
 DECLARE_DRV_CMD(mlx5_create_cq, IB_USER_VERBS_CMD_CREATE_CQ,
 		mlx5_ib_create_cq, mlx5_ib_create_cq_resp);
+DECLARE_DRV_CMD(mlx5_create_cq_ex, IB_USER_VERBS_EX_CMD_CREATE_CQ,
+		mlx5_ib_create_cq, mlx5_ib_create_cq_resp);
 DECLARE_DRV_CMD(mlx5_create_srq, IB_USER_VERBS_CMD_CREATE_SRQ,
 		mlx5_ib_create_srq, mlx5_ib_create_srq_resp);
 DECLARE_DRV_CMD(mlx5_create_srq_ex, IB_USER_VERBS_CMD_CREATE_XSRQ,
diff --git a/providers/mlx5/verbs.c b/providers/mlx5/verbs.c
index 82efc4d..ff40029 100644
--- a/providers/mlx5/verbs.c
+++ b/providers/mlx5/verbs.c
@@ -614,7 +614,9 @@  enum {
 };
 
 enum {
-	CREATE_CQ_SUPPORTED_FLAGS = IBV_CREATE_CQ_ATTR_SINGLE_THREADED
+	CREATE_CQ_SUPPORTED_FLAGS =
+		IBV_CREATE_CQ_ATTR_SINGLE_THREADED |
+		IBV_CREATE_CQ_ATTR_IGNORE_OVERRUN
 };
 
 static struct ibv_cq_ex *create_cq(struct ibv_context *context,
@@ -622,8 +624,12 @@  static struct ibv_cq_ex *create_cq(struct ibv_context *context,
 				   int cq_alloc_flags,
 				   struct mlx5dv_cq_init_attr *mlx5cq_attr)
 {
-	struct mlx5_create_cq		cmd;
-	struct mlx5_create_cq_resp	resp;
+	struct mlx5_create_cq		cmd = {};
+	struct mlx5_create_cq_resp	resp = {};
+	struct mlx5_create_cq_ex	cmd_ex = {};
+	struct mlx5_create_cq_ex_resp	resp_ex = {};
+	struct mlx5_ib_create_cq       *cmd_drv;
+	struct mlx5_ib_create_cq_resp  *resp_drv;
 	struct mlx5_cq		       *cq;
 	int				cqe_sz;
 	int				ret;
@@ -631,6 +637,7 @@  static struct ibv_cq_ex *create_cq(struct ibv_context *context,
 	int				rc;
 	struct mlx5_context *mctx = to_mctx(context);
 	FILE *fp = to_mctx(context)->dbg_fp;
+	bool				use_ex = false;
 
 	if (!cq_attr->cqe) {
 		mlx5_dbg(fp, MLX5_DBG_CQ, "CQE invalid\n");
@@ -665,9 +672,15 @@  static struct ibv_cq_ex *create_cq(struct ibv_context *context,
 		return NULL;
 	}
 
-	if (cq_attr->comp_mask & IBV_CQ_INIT_ATTR_MASK_FLAGS &&
-	    cq_attr->flags & IBV_CREATE_CQ_ATTR_SINGLE_THREADED)
-		cq->flags |= MLX5_CQ_FLAGS_SINGLE_THREADED;
+	if (cq_attr->comp_mask & IBV_CQ_INIT_ATTR_MASK_FLAGS) {
+		if (cq_attr->flags & IBV_CREATE_CQ_ATTR_SINGLE_THREADED)
+			cq->flags |= MLX5_CQ_FLAGS_SINGLE_THREADED;
+		if (cq_attr->flags & IBV_CREATE_CQ_ATTR_IGNORE_OVERRUN)
+			use_ex = true;
+	}
+
+	cmd_drv = use_ex ? &cmd_ex.drv_payload : &cmd.drv_payload;
+	resp_drv = use_ex ? &resp_ex.drv_payload : &resp.drv_payload;
 
 	if (cq_alloc_flags & MLX5_CQ_FLAGS_EXTENDED) {
 		rc = mlx5_cq_fill_pfns(cq, cq_attr, mctx);
@@ -677,7 +690,6 @@  static struct ibv_cq_ex *create_cq(struct ibv_context *context,
 		}
 	}
 
-	memset(&cmd, 0, sizeof cmd);
 	cq->cons_index = 0;
 
 	if (mlx5_spinlock_init(&cq->lock, !mlx5_single_threaded))
@@ -714,9 +726,9 @@  static struct ibv_cq_ex *create_cq(struct ibv_context *context,
 	cq->cqe_sz			= cqe_sz;
 	cq->flags			= cq_alloc_flags;
 
-	cmd.buf_addr = (uintptr_t) cq->buf_a.buf;
-	cmd.db_addr  = (uintptr_t) cq->dbrec;
-	cmd.cqe_size = cqe_sz;
+	cmd_drv->buf_addr = (uintptr_t) cq->buf_a.buf;
+	cmd_drv->db_addr  = (uintptr_t) cq->dbrec;
+	cmd_drv->cqe_size = cqe_sz;
 
 	if (mlx5cq_attr) {
 		if (!check_comp_mask(mlx5cq_attr->comp_mask,
@@ -731,8 +743,8 @@  static struct ibv_cq_ex *create_cq(struct ibv_context *context,
 			if (mctx->cqe_comp_caps.max_num &&
 			    (mlx5cq_attr->cqe_comp_res_format &
 			     mctx->cqe_comp_caps.supported_format)) {
-				cmd.cqe_comp_en = 1;
-				cmd.cqe_comp_res_format = mlx5cq_attr->cqe_comp_res_format;
+				cmd_drv->cqe_comp_en = 1;
+				cmd_drv->cqe_comp_res_format = mlx5cq_attr->cqe_comp_res_format;
 			} else {
 				mlx5_dbg(fp, MLX5_DBG_CQ, "CQE Compression is not supported\n");
 				errno = EINVAL;
@@ -759,15 +771,27 @@  static struct ibv_cq_ex *create_cq(struct ibv_context *context,
 					goto err_db;
 				}
 
-				cmd.flags |= MLX5_IB_CREATE_CQ_FLAGS_CQE_128B_PAD;
+				cmd_drv->flags |= MLX5_IB_CREATE_CQ_FLAGS_CQE_128B_PAD;
 			}
 		}
 	}
 
-	ret = ibv_cmd_create_cq(context, ncqe - 1, cq_attr->channel,
-				cq_attr->comp_vector,
-				ibv_cq_ex_to_cq(&cq->ibv_cq), &cmd.ibv_cmd,
-				sizeof(cmd), &resp.ibv_resp, sizeof(resp));
+	if (use_ex) {
+		struct ibv_cq_init_attr_ex cq_attr_ex = *cq_attr;
+
+		cq_attr_ex.cqe = ncqe - 1;
+		ret = ibv_cmd_create_cq_ex(context, &cq_attr_ex, &cq->ibv_cq,
+					   &cmd_ex.ibv_cmd, sizeof(cmd_ex),
+					   &resp_ex.ibv_resp, sizeof(resp_ex));
+	} else {
+		ret = ibv_cmd_create_cq(context, ncqe - 1, cq_attr->channel,
+					cq_attr->comp_vector,
+					ibv_cq_ex_to_cq(&cq->ibv_cq),
+					&cmd.ibv_cmd, sizeof(cmd),
+					&resp.ibv_resp, sizeof(resp));
+	}
+
+
 	if (ret) {
 		mlx5_dbg(fp, MLX5_DBG_CQ, "ret %d\n", ret);
 		goto err_db;
@@ -775,7 +799,7 @@  static struct ibv_cq_ex *create_cq(struct ibv_context *context,
 
 	cq->active_buf = &cq->buf_a;
 	cq->resize_buf = NULL;
-	cq->cqn = resp.cqn;
+	cq->cqn = resp_drv->cqn;
 	cq->stall_enable = to_mctx(context)->stall_enable;
 	cq->stall_adaptive_enable = to_mctx(context)->stall_adaptive_enable;
 	cq->stall_cycles = to_mctx(context)->stall_cycles;