From patchwork Thu Mar 23 13:55:28 2017 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Yishai Hadas X-Patchwork-Id: 9641127 Return-Path: Received: from mail.wl.linuxfoundation.org (pdx-wl-mail.web.codeaurora.org [172.30.200.125]) by pdx-korg-patchwork.web.codeaurora.org (Postfix) with ESMTP id 3FB9C6020B for ; Thu, 23 Mar 2017 13:55:51 +0000 (UTC) Received: from mail.wl.linuxfoundation.org (localhost [127.0.0.1]) by mail.wl.linuxfoundation.org (Postfix) with ESMTP id 303642766D for ; Thu, 23 Mar 2017 13:55:51 +0000 (UTC) Received: by mail.wl.linuxfoundation.org (Postfix, from userid 486) id 23FBF28433; Thu, 23 Mar 2017 13:55:51 +0000 (UTC) X-Spam-Checker-Version: SpamAssassin 3.3.1 (2010-03-16) on pdx-wl-mail.web.codeaurora.org X-Spam-Level: X-Spam-Status: No, score=-6.9 required=2.0 tests=BAYES_00, RCVD_IN_DNSWL_HI, UNPARSEABLE_RELAY autolearn=ham version=3.3.1 Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by mail.wl.linuxfoundation.org (Postfix) with ESMTP id AAD002766D for ; Thu, 23 Mar 2017 13:55:50 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S964976AbdCWNzt (ORCPT ); Thu, 23 Mar 2017 09:55:49 -0400 Received: from mail-il-dmz.mellanox.com ([193.47.165.129]:54027 "EHLO mellanox.co.il" rhost-flags-OK-OK-OK-FAIL) by vger.kernel.org with ESMTP id S964977AbdCWNzr (ORCPT ); Thu, 23 Mar 2017 09:55:47 -0400 Received: from Internal Mail-Server by MTLPINE1 (envelope-from yishaih@mellanox.com) with ESMTPS (AES256-SHA encrypted); 23 Mar 2017 15:55:41 +0200 Received: from vnc17.mtl.labs.mlnx (vnc17.mtl.labs.mlnx [10.7.2.17]) by labmailer.mlnx (8.13.8/8.13.8) with ESMTP id v2NDtfgf008295; Thu, 23 Mar 2017 15:55:41 +0200 Received: from vnc17.mtl.labs.mlnx (vnc17.mtl.labs.mlnx [127.0.0.1]) by vnc17.mtl.labs.mlnx (8.13.8/8.13.8) with ESMTP id v2NDteTu008820; Thu, 23 Mar 2017 15:55:40 +0200 Received: (from yishaih@localhost) by vnc17.mtl.labs.mlnx (8.13.8/8.13.8/Submit) id v2NDtexl008819; Thu, 23 Mar 2017 15:55:40 +0200 From: Yishai Hadas To: dledford@redhat.com Cc: linux-rdma@vger.kernel.org, yishaih@mellanox.com, bodong@mellanox.com, majd@mellanox.com, jgunthorpe@obsidianresearch.com Subject: [PATCH V2 rdma-core 5/6] mlx5: Expose direct create_cq with private data Date: Thu, 23 Mar 2017 15:55:28 +0200 Message-Id: <1490277329-8738-6-git-send-email-yishaih@mellanox.com> X-Mailer: git-send-email 1.8.2.3 In-Reply-To: <1490277329-8738-1-git-send-email-yishaih@mellanox.com> References: <1490277329-8738-1-git-send-email-yishaih@mellanox.com> Sender: linux-rdma-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: linux-rdma@vger.kernel.org X-Virus-Scanned: ClamAV using ClamSMTP This patch exposes an API named mlx5dv_create_cq to enable creating a CQ with some mlx5 private data. Specifically, it enables creating a CQ in a mode that few CQEs may be compressed into a single CQE. To use the shared code of libibverbs when a CQ is created it uses the verbs_init_cq() helper API that was introduced in previous patch. Signed-off-by: Yishai Hadas --- providers/mlx5/libmlx5.map | 5 +++++ providers/mlx5/mlx5-abi.h | 3 +++ providers/mlx5/mlx5dv.h | 13 +++++++++++++ providers/mlx5/verbs.c | 45 ++++++++++++++++++++++++++++++++++++++++++--- 4 files changed, 63 insertions(+), 3 deletions(-) diff --git a/providers/mlx5/libmlx5.map b/providers/mlx5/libmlx5.map index 7ce3eb1..ffd7b5c 100644 --- a/providers/mlx5/libmlx5.map +++ b/providers/mlx5/libmlx5.map @@ -6,3 +6,8 @@ MLX5_1.0 { mlx5dv_init_obj; local: *; }; + +MLX5_1.14 { + global: + mlx5dv_create_cq; +} MLX5_1.0; diff --git a/providers/mlx5/mlx5-abi.h b/providers/mlx5/mlx5-abi.h index 487de3a..d05cb40 100644 --- a/providers/mlx5/mlx5-abi.h +++ b/providers/mlx5/mlx5-abi.h @@ -119,6 +119,9 @@ struct mlx5_create_cq { __u64 buf_addr; __u64 db_addr; __u32 cqe_size; + __u8 cqe_comp_en; + __u8 cqe_comp_res_format; + __u16 reserved; }; struct mlx5_create_cq_resp { diff --git a/providers/mlx5/mlx5dv.h b/providers/mlx5/mlx5dv.h index a2f7acf..a03b1d7 100644 --- a/providers/mlx5/mlx5dv.h +++ b/providers/mlx5/mlx5dv.h @@ -82,6 +82,19 @@ enum mlx5dv_context_flags { MLX5DV_CONTEXT_FLAGS_MPW = (1 << 1), }; +enum mlx5dv_cq_init_attr_mask { + MLX5DV_CQ_INIT_ATTR_MASK_COMPRESSED_CQE = 1 << 0, + MLX5DV_CQ_INIT_ATTR_MASK_RESERVED = 1 << 1, +}; + +struct mlx5dv_cq_init_attr { + uint64_t comp_mask; /* Use enum mlx5dv_cq_init_attr_mask */ + uint8_t cqe_comp_res_format; /* Use enum mlx5dv_cqe_comp_res_format */ +}; + +struct ibv_cq_ex *mlx5dv_create_cq(struct ibv_context *context, + struct ibv_cq_init_attr_ex *cq_attr, + struct mlx5dv_cq_init_attr *mlx5_cq_attr); /* * Most device capabilities are exported by ibv_query_device(...), * but there is HW device-specific information which is important diff --git a/providers/mlx5/verbs.c b/providers/mlx5/verbs.c index a223291..b7a8502 100644 --- a/providers/mlx5/verbs.c +++ b/providers/mlx5/verbs.c @@ -341,7 +341,8 @@ enum { static struct ibv_cq_ex *create_cq(struct ibv_context *context, const struct ibv_cq_init_attr_ex *cq_attr, - int cq_alloc_flags) + int cq_alloc_flags, + struct mlx5dv_cq_init_attr *mlx5cq_attr) { struct mlx5_create_cq cmd; struct mlx5_create_cq_resp resp; @@ -349,6 +350,7 @@ static struct ibv_cq_ex *create_cq(struct ibv_context *context, int cqe_sz; int ret; int ncqe; + struct mlx5_context *mctx = to_mctx(context); FILE *fp = to_mctx(context)->dbg_fp; if (!cq_attr->cqe) { @@ -428,6 +430,28 @@ static struct ibv_cq_ex *create_cq(struct ibv_context *context, cmd.db_addr = (uintptr_t) cq->dbrec; cmd.cqe_size = cqe_sz; + if (mlx5cq_attr) { + if (mlx5cq_attr->comp_mask & ~(MLX5DV_CQ_INIT_ATTR_MASK_RESERVED - 1)) { + mlx5_dbg(fp, MLX5_DBG_CQ, + "Unsupported vendor comp_mask for create_cq\n"); + errno = EINVAL; + goto err_db; + } + + if (mlx5cq_attr->comp_mask & MLX5DV_CQ_INIT_ATTR_MASK_COMPRESSED_CQE) { + if (mctx->cqe_comp_caps.max_num && + (mlx5cq_attr->cqe_comp_res_format & + mctx->cqe_comp_caps.supported_format)) { + cmd.cqe_comp_en = 1; + cmd.cqe_comp_res_format = mlx5cq_attr->cqe_comp_res_format; + } else { + mlx5_dbg(fp, MLX5_DBG_CQ, "CQE Compression is not supported\n"); + errno = EINVAL; + goto err_db; + } + } + } + ret = ibv_cmd_create_cq(context, ncqe - 1, cq_attr->channel, cq_attr->comp_vector, ibv_cq_ex_to_cq(&cq->ibv_cq), &cmd.ibv_cmd, @@ -478,14 +502,29 @@ struct ibv_cq *mlx5_create_cq(struct ibv_context *context, int cqe, return NULL; } - cq = create_cq(context, &cq_attr, 0); + cq = create_cq(context, &cq_attr, 0, NULL); return cq ? ibv_cq_ex_to_cq(cq) : NULL; } struct ibv_cq_ex *mlx5_create_cq_ex(struct ibv_context *context, struct ibv_cq_init_attr_ex *cq_attr) { - return create_cq(context, cq_attr, MLX5_CQ_FLAGS_EXTENDED); + return create_cq(context, cq_attr, MLX5_CQ_FLAGS_EXTENDED, NULL); +} + +struct ibv_cq_ex *mlx5dv_create_cq(struct ibv_context *context, + struct ibv_cq_init_attr_ex *cq_attr, + struct mlx5dv_cq_init_attr *mlx5_cq_attr) +{ + struct ibv_cq_ex *cq; + + cq = create_cq(context, cq_attr, MLX5_CQ_FLAGS_EXTENDED, mlx5_cq_attr); + if (!cq) + return NULL; + + verbs_init_cq(ibv_cq_ex_to_cq(cq), context, + cq_attr->channel, cq_attr->cq_context); + return cq; } int mlx5_resize_cq(struct ibv_cq *ibcq, int cqe)