From patchwork Tue Jul 27 07:28:12 2021 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Wenpeng Liang X-Patchwork-Id: 12402039 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org X-Spam-Level: X-Spam-Status: No, score=-16.7 required=3.0 tests=BAYES_00, HEADER_FROM_DIFFERENT_DOMAINS,INCLUDES_CR_TRAILER,INCLUDES_PATCH, MAILING_LIST_MULTI,SPF_HELO_NONE,SPF_PASS,URIBL_BLOCKED,USER_AGENT_GIT autolearn=ham autolearn_force=no version=3.4.0 Received: from mail.kernel.org (mail.kernel.org [198.145.29.99]) by smtp.lore.kernel.org (Postfix) with ESMTP id B1674C4338F for ; Tue, 27 Jul 2021 07:32:01 +0000 (UTC) Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by mail.kernel.org (Postfix) with ESMTP id 95C10611C5 for ; Tue, 27 Jul 2021 07:32:01 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S235612AbhG0Hb6 (ORCPT ); Tue, 27 Jul 2021 03:31:58 -0400 Received: from szxga01-in.huawei.com ([45.249.212.187]:16006 "EHLO szxga01-in.huawei.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S235629AbhG0Hb5 (ORCPT ); Tue, 27 Jul 2021 03:31:57 -0400 Received: from dggemv711-chm.china.huawei.com (unknown [172.30.72.57]) by szxga01-in.huawei.com (SkyGuard) with ESMTP id 4GYpLN5CV9zZtDp; Tue, 27 Jul 2021 15:28:28 +0800 (CST) Received: from dggpeml500017.china.huawei.com (7.185.36.243) by dggemv711-chm.china.huawei.com (10.1.198.66) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256) id 15.1.2176.2; Tue, 27 Jul 2021 15:31:56 +0800 Received: from localhost.localdomain (10.67.165.24) by dggpeml500017.china.huawei.com (7.185.36.243) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256) id 15.1.2176.2; Tue, 27 Jul 2021 15:31:55 +0800 From: Wenpeng Liang To: , CC: , Subject: [PATCH v2 rdma-core 01/10] Update kernel headers Date: Tue, 27 Jul 2021 15:28:12 +0800 Message-ID: <1627370901-10054-2-git-send-email-liangwenpeng@huawei.com> X-Mailer: git-send-email 2.8.1 In-Reply-To: <1627370901-10054-1-git-send-email-liangwenpeng@huawei.com> References: <1627370901-10054-1-git-send-email-liangwenpeng@huawei.com> MIME-Version: 1.0 X-Originating-IP: [10.67.165.24] X-ClientProxiedBy: dggems701-chm.china.huawei.com (10.3.19.178) To dggpeml500017.china.huawei.com (7.185.36.243) X-CFilter-Loop: Reflected Precedence: bulk List-ID: X-Mailing-List: linux-rdma@vger.kernel.org To commit ?? ("RDMA/hns: Dump detailed driver-specific UCTX"). Signed-off-by: Wenpeng Liang --- kernel-headers/rdma/hns-abi.h | 86 ++++++++++++++++++++++++++++++++++++++++++ kernel-headers/rdma/mlx5-abi.h | 17 ++++++++- 2 files changed, 101 insertions(+), 2 deletions(-) diff --git a/kernel-headers/rdma/hns-abi.h b/kernel-headers/rdma/hns-abi.h index 42b1776..40ac2c3 100644 --- a/kernel-headers/rdma/hns-abi.h +++ b/kernel-headers/rdma/hns-abi.h @@ -77,21 +77,107 @@ enum hns_roce_qp_cap_flags { HNS_ROCE_QP_CAP_RQ_RECORD_DB = 1 << 0, HNS_ROCE_QP_CAP_SQ_RECORD_DB = 1 << 1, HNS_ROCE_QP_CAP_OWNER_DB = 1 << 2, + HNS_ROCE_QP_CAP_DYNAMIC_CTX_ATTACH = 1 << 4, + HNS_ROCE_QP_CAP_DYNAMIC_CTX_DETACH = 1 << 6, }; struct hns_roce_ib_create_qp_resp { __aligned_u64 cap_flags; }; +enum { + HNS_ROCE_ALLOC_UCTX_COMP_DCA_MAX_QPS = 1 << 0, +}; + +struct hns_roce_ib_alloc_ucontext { + __u32 comp; + __u32 dca_max_qps; +}; + +enum { + HNS_ROCE_CAP_FLAG_DCA_MODE = 1 << 15, +}; + struct hns_roce_ib_alloc_ucontext_resp { __u32 qp_tab_size; __u32 cqe_size; __u32 srq_tab_size; __u32 reserved; + __aligned_u64 cap_flags; + __u32 dca_qps; + __u32 dca_mmap_size; }; struct hns_roce_ib_alloc_pd_resp { __u32 pdn; }; +enum { + HNS_ROCE_MMAP_REGULAR_PAGE, + HNS_ROCE_MMAP_DCA_PAGE, +}; + +struct hns_roce_ib_modify_qp_resp { + __u32 dcan; + __u32 reserved; +}; + +#define UVERBS_ID_NS_MASK 0xF000 +#define UVERBS_ID_NS_SHIFT 12 + +enum hns_ib_objects { + HNS_IB_OBJECT_DCA_MEM = (1U << UVERBS_ID_NS_SHIFT), +}; + +enum hns_ib_dca_mem_methods { + HNS_IB_METHOD_DCA_MEM_REG = (1U << UVERBS_ID_NS_SHIFT), + HNS_IB_METHOD_DCA_MEM_DEREG, + HNS_IB_METHOD_DCA_MEM_SHRINK, + HNS_IB_METHOD_DCA_MEM_ATTACH, + HNS_IB_METHOD_DCA_MEM_DETACH, + HNS_IB_METHOD_DCA_MEM_QUERY, +}; + +enum hns_ib_dca_mem_reg_attrs { + HNS_IB_ATTR_DCA_MEM_REG_HANDLE = (1U << UVERBS_ID_NS_SHIFT), + HNS_IB_ATTR_DCA_MEM_REG_LEN, + HNS_IB_ATTR_DCA_MEM_REG_ADDR, + HNS_IB_ATTR_DCA_MEM_REG_KEY, +}; + +enum hns_ib_dca_mem_dereg_attrs { + HNS_IB_ATTR_DCA_MEM_DEREG_HANDLE = (1U << UVERBS_ID_NS_SHIFT), +}; + +enum hns_ib_dca_mem_shrink_attrs { + HNS_IB_ATTR_DCA_MEM_SHRINK_HANDLE = (1U << UVERBS_ID_NS_SHIFT), + HNS_IB_ATTR_DCA_MEM_SHRINK_RESERVED_SIZE, + HNS_IB_ATTR_DCA_MEM_SHRINK_OUT_FREE_KEY, + HNS_IB_ATTR_DCA_MEM_SHRINK_OUT_FREE_MEMS, +}; + +#define HNS_IB_ATTACH_FLAGS_NEW_BUFFER 1U + +enum hns_ib_dca_mem_attach_attrs { + HNS_IB_ATTR_DCA_MEM_ATTACH_HANDLE = (1U << UVERBS_ID_NS_SHIFT), + HNS_IB_ATTR_DCA_MEM_ATTACH_SQ_OFFSET, + HNS_IB_ATTR_DCA_MEM_ATTACH_SGE_OFFSET, + HNS_IB_ATTR_DCA_MEM_ATTACH_RQ_OFFSET, + HNS_IB_ATTR_DCA_MEM_ATTACH_OUT_ALLOC_FLAGS, + HNS_IB_ATTR_DCA_MEM_ATTACH_OUT_ALLOC_PAGES, +}; + +enum hns_ib_dca_mem_detach_attrs { + HNS_IB_ATTR_DCA_MEM_DETACH_HANDLE = (1U << UVERBS_ID_NS_SHIFT), + HNS_IB_ATTR_DCA_MEM_DETACH_SQ_INDEX, +}; + +enum hns_ib_dca_mem_query_attrs { + HNS_IB_ATTR_DCA_MEM_QUERY_HANDLE = (1U << UVERBS_ID_NS_SHIFT), + HNS_IB_ATTR_DCA_MEM_QUERY_PAGE_INDEX, + HNS_IB_ATTR_DCA_MEM_QUERY_OUT_KEY, + HNS_IB_ATTR_DCA_MEM_QUERY_OUT_OFFSET, + HNS_IB_ATTR_DCA_MEM_QUERY_OUT_PAGE_COUNT, +}; + #endif /* HNS_ABI_USER_H */ diff --git a/kernel-headers/rdma/mlx5-abi.h b/kernel-headers/rdma/mlx5-abi.h index 8597e6f..86be4a9 100644 --- a/kernel-headers/rdma/mlx5-abi.h +++ b/kernel-headers/rdma/mlx5-abi.h @@ -50,6 +50,7 @@ enum { MLX5_QP_FLAG_ALLOW_SCATTER_CQE = 1 << 8, MLX5_QP_FLAG_PACKET_BASED_CREDIT_MODE = 1 << 9, MLX5_QP_FLAG_UAR_PAGE_INDEX = 1 << 10, + MLX5_QP_FLAG_DCI_STREAM = 1 << 11, }; enum { @@ -238,6 +239,11 @@ struct mlx5_ib_striding_rq_caps { __u32 reserved; }; +struct mlx5_ib_dci_streams_caps { + __u8 max_log_num_concurent; + __u8 max_log_num_errored; +}; + enum mlx5_ib_query_dev_resp_flags { /* Support 128B CQE compression */ MLX5_IB_QUERY_DEV_RESP_FLAGS_CQE_128B_COMP = 1 << 0, @@ -266,7 +272,8 @@ struct mlx5_ib_query_device_resp { struct mlx5_ib_sw_parsing_caps sw_parsing_caps; struct mlx5_ib_striding_rq_caps striding_rq_caps; __u32 tunnel_offloads_caps; /* enum mlx5_ib_tunnel_offloads */ - __u32 reserved; + struct mlx5_ib_dci_streams_caps dci_streams_caps; + __u16 reserved; }; enum mlx5_ib_create_cq_flags { @@ -313,6 +320,11 @@ struct mlx5_ib_create_srq_resp { __u32 reserved; }; +struct mlx5_ib_create_qp_dci_streams { + __u8 log_num_concurent; + __u8 log_num_errored; +}; + struct mlx5_ib_create_qp { __aligned_u64 buf_addr; __aligned_u64 db_addr; @@ -327,7 +339,8 @@ struct mlx5_ib_create_qp { __aligned_u64 access_key; }; __u32 ece_options; - __u32 reserved; + struct mlx5_ib_create_qp_dci_streams dci_streams; + __u16 reserved; }; /* RX Hash function flags */ From patchwork Tue Jul 27 07:28:13 2021 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Wenpeng Liang X-Patchwork-Id: 12402037 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org X-Spam-Level: X-Spam-Status: No, score=-16.7 required=3.0 tests=BAYES_00, HEADER_FROM_DIFFERENT_DOMAINS,INCLUDES_CR_TRAILER,INCLUDES_PATCH, MAILING_LIST_MULTI,SPF_HELO_NONE,SPF_PASS,URIBL_BLOCKED,USER_AGENT_GIT autolearn=ham autolearn_force=no version=3.4.0 Received: from mail.kernel.org (mail.kernel.org [198.145.29.99]) by smtp.lore.kernel.org (Postfix) with ESMTP id 920BDC432BE for ; Tue, 27 Jul 2021 07:32:00 +0000 (UTC) Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by mail.kernel.org (Postfix) with ESMTP id 6F4D5611C5 for ; Tue, 27 Jul 2021 07:32:00 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S235695AbhG0Hb6 (ORCPT ); Tue, 27 Jul 2021 03:31:58 -0400 Received: from szxga02-in.huawei.com ([45.249.212.188]:12412 "EHLO szxga02-in.huawei.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S235612AbhG0Hb5 (ORCPT ); Tue, 27 Jul 2021 03:31:57 -0400 Received: from dggemv704-chm.china.huawei.com (unknown [172.30.72.54]) by szxga02-in.huawei.com (SkyGuard) with ESMTP id 4GYpLN5BYmzcgxT; Tue, 27 Jul 2021 15:28:28 +0800 (CST) Received: from dggpeml500017.china.huawei.com (7.185.36.243) by dggemv704-chm.china.huawei.com (10.3.19.47) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256) id 15.1.2176.2; Tue, 27 Jul 2021 15:31:56 +0800 Received: from localhost.localdomain (10.67.165.24) by dggpeml500017.china.huawei.com (7.185.36.243) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256) id 15.1.2176.2; Tue, 27 Jul 2021 15:31:56 +0800 From: Wenpeng Liang To: , CC: , Subject: [PATCH v2 rdma-core 02/10] libhns: Introduce DCA for RC QP Date: Tue, 27 Jul 2021 15:28:13 +0800 Message-ID: <1627370901-10054-3-git-send-email-liangwenpeng@huawei.com> X-Mailer: git-send-email 2.8.1 In-Reply-To: <1627370901-10054-1-git-send-email-liangwenpeng@huawei.com> References: <1627370901-10054-1-git-send-email-liangwenpeng@huawei.com> MIME-Version: 1.0 X-Originating-IP: [10.67.165.24] X-ClientProxiedBy: dggems701-chm.china.huawei.com (10.3.19.178) To dggpeml500017.china.huawei.com (7.185.36.243) X-CFilter-Loop: Reflected Precedence: bulk List-ID: X-Mailing-List: linux-rdma@vger.kernel.org From: Xi Wang The HIP09 introduces the DCA(Dynamic context attachment) feature which supports many RC QPs to share the WQE buffer in a memory pool, this will reduce the memory consumption when there are too many QPs inactive. Two functions are defined for adding buffers to memory pool and removing buffers from memory pool by calling ib cmd implemented in hns kernelspace driver. If a QP enables DCA feature, the WQE's buffer will be attached to the memory pool when the users start to post WRs and be detached when all CQEs has been polled. Signed-off-by: Xi Wang Signed-off-by: Wenpeng Liang --- providers/hns/hns_roce_u.c | 45 ++++++++++++++ providers/hns/hns_roce_u.h | 18 ++++++ providers/hns/hns_roce_u_buf.c | 138 +++++++++++++++++++++++++++++++++++++++++ 3 files changed, 201 insertions(+) diff --git a/providers/hns/hns_roce_u.c b/providers/hns/hns_roce_u.c index 3b31ad3..a4e0997 100644 --- a/providers/hns/hns_roce_u.c +++ b/providers/hns/hns_roce_u.c @@ -95,6 +95,40 @@ static const struct verbs_context_ops hns_common_ops = { .get_srq_num = hns_roce_u_get_srq_num, }; +static int init_dca_context(struct hns_roce_context *ctx, int page_size) +{ + struct hns_roce_dca_ctx *dca_ctx = &ctx->dca_ctx; + int ret; + + if (!(ctx->cap_flags & HNS_ROCE_CAP_FLAG_DCA_MODE)) + return 0; + + list_head_init(&dca_ctx->mem_list); + ret = pthread_spin_init(&dca_ctx->lock, PTHREAD_PROCESS_PRIVATE); + if (ret) + return ret; + + dca_ctx->unit_size = page_size * HNS_DCA_DEFAULT_UNIT_PAGES; + dca_ctx->max_size = HNS_DCA_MAX_MEM_SIZE; + dca_ctx->mem_cnt = 0; + + return 0; +} + +static void uninit_dca_context(struct hns_roce_context *ctx) +{ + struct hns_roce_dca_ctx *dca_ctx = &ctx->dca_ctx; + + if (!(ctx->cap_flags & HNS_ROCE_CAP_FLAG_DCA_MODE)) + return; + + pthread_spin_lock(&dca_ctx->lock); + hns_roce_cleanup_dca_mem(ctx); + pthread_spin_unlock(&dca_ctx->lock); + + pthread_spin_destroy(&dca_ctx->lock); +} + static struct verbs_context *hns_roce_alloc_context(struct ibv_device *ibdev, int cmd_fd, void *private_data) @@ -123,6 +157,8 @@ static struct verbs_context *hns_roce_alloc_context(struct ibv_device *ibdev, else context->cqe_size = HNS_ROCE_V3_CQE_SIZE; + context->cap_flags = resp.cap_flags; + context->num_qps = resp.qp_tab_size; context->num_srqs = resp.srq_tab_size; @@ -178,8 +214,15 @@ static struct verbs_context *hns_roce_alloc_context(struct ibv_device *ibdev, verbs_set_ops(&context->ibv_ctx, &hns_common_ops); verbs_set_ops(&context->ibv_ctx, &hr_dev->u_hw->hw_ops); + if (init_dca_context(context, hr_dev->page_size)) + goto tptr_free; + return &context->ibv_ctx; +tptr_free: + if (hr_dev->hw_version == HNS_ROCE_HW_VER1) + munmap(context->cq_tptr_base, HNS_ROCE_CQ_DB_BUF_SIZE); + db_free: munmap(context->uar, hr_dev->page_size); context->uar = NULL; @@ -199,6 +242,8 @@ static void hns_roce_free_context(struct ibv_context *ibctx) if (hr_dev->hw_version == HNS_ROCE_HW_VER1) munmap(context->cq_tptr_base, HNS_ROCE_CQ_DB_BUF_SIZE); + uninit_dca_context(context); + verbs_uninit_context(&context->ibv_ctx); free(context); } diff --git a/providers/hns/hns_roce_u.h b/providers/hns/hns_roce_u.h index 0d7abd8..dc56b16 100644 --- a/providers/hns/hns_roce_u.h +++ b/providers/hns/hns_roce_u.h @@ -146,8 +146,21 @@ struct hns_roce_db_page { bitmap *bitmap; }; +#define HNS_DCA_MAX_MEM_SIZE ~0UL +#define HNS_DCA_DEFAULT_UNIT_PAGES 16 + +struct hns_roce_dca_ctx { + struct list_head mem_list; + pthread_spinlock_t lock; + int mem_cnt; + unsigned int unit_size; + uint64_t max_size; + uint64_t curr_size; +}; + struct hns_roce_context { struct verbs_context ibv_ctx; + uint32_t cap_flags; void *uar; pthread_spinlock_t uar_lock; @@ -180,6 +193,8 @@ struct hns_roce_context { unsigned int max_srq_sge; int max_cqe; unsigned int cqe_size; + + struct hns_roce_dca_ctx dca_ctx; }; struct hns_roce_pd { @@ -424,6 +439,9 @@ void hns_roce_free_buf(struct hns_roce_buf *buf); void hns_roce_free_qp_buf(struct hns_roce_qp *qp, struct hns_roce_context *ctx); +void hns_roce_cleanup_dca_mem(struct hns_roce_context *ctx); +int hns_roce_add_dca_mem(struct hns_roce_context *ctx, uint32_t size); + void hns_roce_init_qp_indices(struct hns_roce_qp *qp); extern const struct hns_roce_u_hw hns_roce_u_hw_v1; diff --git a/providers/hns/hns_roce_u_buf.c b/providers/hns/hns_roce_u_buf.c index 471dd9c..2de0e86 100644 --- a/providers/hns/hns_roce_u_buf.c +++ b/providers/hns/hns_roce_u_buf.c @@ -60,3 +60,141 @@ void hns_roce_free_buf(struct hns_roce_buf *buf) munmap(buf->buf, buf->length); } + +struct hns_roce_dca_mem { + uint32_t handle; + struct list_node entry; + struct hns_roce_buf buf; + struct hns_roce_context *ctx; +}; + +static void free_dca_mem(struct hns_roce_context *ctx, + struct hns_roce_dca_mem *mem) +{ + hns_roce_free_buf(&mem->buf); + free(mem); +} + +static struct hns_roce_dca_mem *alloc_dca_mem(uint32_t size) +{ + struct hns_roce_dca_mem *mem = NULL; + int ret; + + mem = malloc(sizeof(struct hns_roce_dca_mem)); + if (!mem) { + errno = ENOMEM; + return NULL; + } + + ret = hns_roce_alloc_buf(&mem->buf, size, HNS_HW_PAGE_SIZE); + if (ret) { + errno = ENOMEM; + free(mem); + return NULL; + } + + return mem; +} + +static inline uint64_t dca_mem_to_key(struct hns_roce_dca_mem *dca_mem) +{ + return (uintptr_t)dca_mem; +} + +static inline void *dca_mem_addr(struct hns_roce_dca_mem *dca_mem, int offset) +{ + return dca_mem->buf.buf + offset; +} + +static int register_dca_mem(struct hns_roce_context *ctx, uint64_t key, + void *addr, uint32_t size, uint32_t *handle) +{ + struct ib_uverbs_attr *attr; + int ret; + + DECLARE_COMMAND_BUFFER(cmd, HNS_IB_OBJECT_DCA_MEM, + HNS_IB_METHOD_DCA_MEM_REG, 4); + fill_attr_in_uint32(cmd, HNS_IB_ATTR_DCA_MEM_REG_LEN, size); + fill_attr_in_uint64(cmd, HNS_IB_ATTR_DCA_MEM_REG_ADDR, + ioctl_ptr_to_u64(addr)); + fill_attr_in_uint64(cmd, HNS_IB_ATTR_DCA_MEM_REG_KEY, key); + attr = fill_attr_out_obj(cmd, HNS_IB_ATTR_DCA_MEM_REG_HANDLE); + + ret = execute_ioctl(&ctx->ibv_ctx.context, cmd); + if (ret) + return ret; + + *handle = read_attr_obj(HNS_IB_ATTR_DCA_MEM_REG_HANDLE, attr); + + return 0; +} + +static void deregister_dca_mem(struct hns_roce_context *ctx, uint32_t handle) +{ + DECLARE_COMMAND_BUFFER(cmd, HNS_IB_OBJECT_DCA_MEM, + HNS_IB_METHOD_DCA_MEM_DEREG, 1); + fill_attr_in_obj(cmd, HNS_IB_ATTR_DCA_MEM_DEREG_HANDLE, handle); + execute_ioctl(&ctx->ibv_ctx.context, cmd); +} + +void hns_roce_cleanup_dca_mem(struct hns_roce_context *ctx) +{ + struct hns_roce_dca_ctx *dca_ctx = &ctx->dca_ctx; + struct hns_roce_dca_mem *mem; + struct hns_roce_dca_mem *tmp; + + list_for_each_safe(&dca_ctx->mem_list, mem, tmp, entry) + deregister_dca_mem(ctx, mem->handle); +} + +static bool add_dca_mem_enabled(struct hns_roce_dca_ctx *ctx, + uint32_t alloc_size) +{ + bool enable; + + pthread_spin_lock(&ctx->lock); + + if (ctx->unit_size == 0) /* Pool size can't be increased */ + enable = false; + else if (ctx->max_size == HNS_DCA_MAX_MEM_SIZE) /* Pool size no limit */ + enable = true; + else /* Pool size doesn't exceed max size */ + enable = (ctx->curr_size + alloc_size) < ctx->max_size; + + pthread_spin_unlock(&ctx->lock); + + return enable; +} + +int hns_roce_add_dca_mem(struct hns_roce_context *ctx, uint32_t size) +{ + struct hns_roce_dca_ctx *dca_ctx = &ctx->dca_ctx; + struct hns_roce_dca_mem *mem; + int ret; + + if (!add_dca_mem_enabled(&ctx->dca_ctx, size)) + return -ENOMEM; + + /* Step 1: Alloc DCA mem address */ + mem = alloc_dca_mem( + DIV_ROUND_UP(size, dca_ctx->unit_size) * dca_ctx->unit_size); + if (!mem) + return -ENOMEM; + + /* Step 2: Register DCA mem uobject to pin user address */ + ret = register_dca_mem(ctx, dca_mem_to_key(mem), dca_mem_addr(mem, 0), + mem->buf.length, &mem->handle); + if (ret) { + free_dca_mem(ctx, mem); + return ret; + } + + /* Step 3: Add DCA mem node to pool */ + pthread_spin_lock(&dca_ctx->lock); + list_add_tail(&dca_ctx->mem_list, &mem->entry); + dca_ctx->mem_cnt++; + dca_ctx->curr_size += mem->buf.length; + pthread_spin_unlock(&dca_ctx->lock); + + return 0; +} From patchwork Tue Jul 27 07:28:14 2021 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Wenpeng Liang X-Patchwork-Id: 12402053 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org X-Spam-Level: X-Spam-Status: No, score=-16.7 required=3.0 tests=BAYES_00, HEADER_FROM_DIFFERENT_DOMAINS,INCLUDES_CR_TRAILER,INCLUDES_PATCH, MAILING_LIST_MULTI,SPF_HELO_NONE,SPF_PASS,URIBL_BLOCKED,USER_AGENT_GIT autolearn=ham autolearn_force=no version=3.4.0 Received: from mail.kernel.org (mail.kernel.org [198.145.29.99]) by smtp.lore.kernel.org (Postfix) with ESMTP id C1993C19F31 for ; Tue, 27 Jul 2021 07:32:04 +0000 (UTC) Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by mail.kernel.org (Postfix) with ESMTP id A77B6611CE for ; Tue, 27 Jul 2021 07:32:04 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S235786AbhG0HcC (ORCPT ); Tue, 27 Jul 2021 03:32:02 -0400 Received: from szxga03-in.huawei.com ([45.249.212.189]:12317 "EHLO szxga03-in.huawei.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S235740AbhG0HcB (ORCPT ); Tue, 27 Jul 2021 03:32:01 -0400 Received: from dggemv703-chm.china.huawei.com (unknown [172.30.72.57]) by szxga03-in.huawei.com (SkyGuard) with ESMTP id 4GYpK15Zj0z7yf6; Tue, 27 Jul 2021 15:27:17 +0800 (CST) Received: from dggpeml500017.china.huawei.com (7.185.36.243) by dggemv703-chm.china.huawei.com (10.3.19.46) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256) id 15.1.2176.2; Tue, 27 Jul 2021 15:31:56 +0800 Received: from localhost.localdomain (10.67.165.24) by dggpeml500017.china.huawei.com (7.185.36.243) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256) id 15.1.2176.2; Tue, 27 Jul 2021 15:31:56 +0800 From: Wenpeng Liang To: , CC: , Subject: [PATCH v2 rdma-core 03/10] libhns: Add support for shrinking DCA memory pool Date: Tue, 27 Jul 2021 15:28:14 +0800 Message-ID: <1627370901-10054-4-git-send-email-liangwenpeng@huawei.com> X-Mailer: git-send-email 2.8.1 In-Reply-To: <1627370901-10054-1-git-send-email-liangwenpeng@huawei.com> References: <1627370901-10054-1-git-send-email-liangwenpeng@huawei.com> MIME-Version: 1.0 X-Originating-IP: [10.67.165.24] X-ClientProxiedBy: dggems701-chm.china.huawei.com (10.3.19.178) To dggpeml500017.china.huawei.com (7.185.36.243) X-CFilter-Loop: Reflected Precedence: bulk List-ID: X-Mailing-List: linux-rdma@vger.kernel.org From: Xi Wang The QP's WQE buffer may be detached after QP is modified or CQE is polled, and the state of DCA mem object may be changed as clean for no QP is using it. So shrink the clean DCA mem from the memory pool and destroy the DCA mem's buffer to reduce the memory consumption. Signed-off-by: Xi Wang Signed-off-by: Wenpeng Liang --- providers/hns/hns_roce_u.h | 2 + providers/hns/hns_roce_u_buf.c | 96 ++++++++++++++++++++++++++++++++++++++++ providers/hns/hns_roce_u_hw_v2.c | 7 +++ 3 files changed, 105 insertions(+) diff --git a/providers/hns/hns_roce_u.h b/providers/hns/hns_roce_u.h index dc56b16..bddd3dd 100644 --- a/providers/hns/hns_roce_u.h +++ b/providers/hns/hns_roce_u.h @@ -155,6 +155,7 @@ struct hns_roce_dca_ctx { int mem_cnt; unsigned int unit_size; uint64_t max_size; + uint64_t min_size; uint64_t curr_size; }; @@ -439,6 +440,7 @@ void hns_roce_free_buf(struct hns_roce_buf *buf); void hns_roce_free_qp_buf(struct hns_roce_qp *qp, struct hns_roce_context *ctx); +void hns_roce_shrink_dca_mem(struct hns_roce_context *ctx); void hns_roce_cleanup_dca_mem(struct hns_roce_context *ctx); int hns_roce_add_dca_mem(struct hns_roce_context *ctx, uint32_t size); diff --git a/providers/hns/hns_roce_u_buf.c b/providers/hns/hns_roce_u_buf.c index 2de0e86..ff9e9a7 100644 --- a/providers/hns/hns_roce_u_buf.c +++ b/providers/hns/hns_roce_u_buf.c @@ -101,6 +101,20 @@ static inline uint64_t dca_mem_to_key(struct hns_roce_dca_mem *dca_mem) return (uintptr_t)dca_mem; } +static struct hns_roce_dca_mem *key_to_dca_mem(struct hns_roce_dca_ctx *ctx, + uint64_t key) +{ + struct hns_roce_dca_mem *mem; + struct hns_roce_dca_mem *tmp; + + list_for_each_safe(&ctx->mem_list, mem, tmp, entry) { + if (dca_mem_to_key(mem) == key) + return mem; + } + + return NULL; +} + static inline void *dca_mem_addr(struct hns_roce_dca_mem *dca_mem, int offset) { return dca_mem->buf.buf + offset; @@ -147,6 +161,25 @@ void hns_roce_cleanup_dca_mem(struct hns_roce_context *ctx) deregister_dca_mem(ctx, mem->handle); } +struct hns_dca_mem_shrink_resp { + uint32_t free_mems; + uint64_t free_key; +}; + +static int shrink_dca_mem(struct hns_roce_context *ctx, uint32_t handle, + uint64_t size, struct hns_dca_mem_shrink_resp *resp) +{ + DECLARE_COMMAND_BUFFER(cmd, HNS_IB_OBJECT_DCA_MEM, + HNS_IB_METHOD_DCA_MEM_SHRINK, 4); + fill_attr_in_obj(cmd, HNS_IB_ATTR_DCA_MEM_SHRINK_HANDLE, handle); + fill_attr_in_uint64(cmd, HNS_IB_ATTR_DCA_MEM_SHRINK_RESERVED_SIZE, size); + fill_attr_out(cmd, HNS_IB_ATTR_DCA_MEM_SHRINK_OUT_FREE_KEY, + &resp->free_key, sizeof(resp->free_key)); + fill_attr_out(cmd, HNS_IB_ATTR_DCA_MEM_SHRINK_OUT_FREE_MEMS, + &resp->free_mems, sizeof(resp->free_mems)); + + return execute_ioctl(&ctx->ibv_ctx.context, cmd); +} static bool add_dca_mem_enabled(struct hns_roce_dca_ctx *ctx, uint32_t alloc_size) { @@ -166,6 +199,17 @@ static bool add_dca_mem_enabled(struct hns_roce_dca_ctx *ctx, return enable; } +static bool shrink_dca_mem_enabled(struct hns_roce_dca_ctx *ctx) +{ + bool enable; + + pthread_spin_lock(&ctx->lock); + enable = ctx->mem_cnt > 0 && ctx->min_size < ctx->max_size; + pthread_spin_unlock(&ctx->lock); + + return enable; +} + int hns_roce_add_dca_mem(struct hns_roce_context *ctx, uint32_t size) { struct hns_roce_dca_ctx *dca_ctx = &ctx->dca_ctx; @@ -198,3 +242,55 @@ int hns_roce_add_dca_mem(struct hns_roce_context *ctx, uint32_t size) return 0; } + +void hns_roce_shrink_dca_mem(struct hns_roce_context *ctx) +{ + struct hns_roce_dca_ctx *dca_ctx = &ctx->dca_ctx; + struct hns_dca_mem_shrink_resp resp = {}; + struct hns_roce_dca_mem *mem; + int dca_mem_cnt; + uint32_t handle; + int ret; + + pthread_spin_lock(&dca_ctx->lock); + dca_mem_cnt = ctx->dca_ctx.mem_cnt; + pthread_spin_unlock(&dca_ctx->lock); + while (dca_mem_cnt > 0 && shrink_dca_mem_enabled(dca_ctx)) { + resp.free_mems = 0; + /* Step 1: Use any DCA mem uobject to shrink pool */ + pthread_spin_lock(&dca_ctx->lock); + mem = list_tail(&dca_ctx->mem_list, + struct hns_roce_dca_mem, entry); + handle = mem ? mem->handle : 0; + pthread_spin_unlock(&dca_ctx->lock); + if (!mem) + break; + + ret = shrink_dca_mem(ctx, handle, dca_ctx->min_size, &resp); + if (ret || likely(resp.free_mems < 1)) + break; + + /* Step 2: Remove shrunk DCA mem node from pool */ + pthread_spin_lock(&dca_ctx->lock); + mem = key_to_dca_mem(dca_ctx, resp.free_key); + if (mem) { + list_del(&mem->entry); + dca_ctx->mem_cnt--; + dca_ctx->curr_size -= mem->buf.length; + } + + handle = mem ? mem->handle : 0; + pthread_spin_unlock(&dca_ctx->lock); + if (!mem) + break; + + /* Step 3: Destroy DCA mem uobject */ + deregister_dca_mem(ctx, handle); + free_dca_mem(ctx, mem); + /* No any free memory after deregister 1 DCA mem */ + if (resp.free_mems <= 1) + break; + + dca_mem_cnt--; + } +} diff --git a/providers/hns/hns_roce_u_hw_v2.c b/providers/hns/hns_roce_u_hw_v2.c index 2308f78..bec2a45 100644 --- a/providers/hns/hns_roce_u_hw_v2.c +++ b/providers/hns/hns_roce_u_hw_v2.c @@ -654,6 +654,10 @@ static int hns_roce_u_v2_poll_cq(struct ibv_cq *ibvcq, int ne, pthread_spin_unlock(&cq->lock); + /* Try to shrink the DCA mem */ + if (ctx->dca_ctx.mem_cnt > 0) + hns_roce_shrink_dca_mem(ctx); + return err == V2_CQ_POLL_ERR ? err : npolled; } @@ -1563,6 +1567,9 @@ static int hns_roce_u_v2_destroy_qp(struct ibv_qp *ibqp) free(qp); + if (ctx->dca_ctx.mem_cnt > 0) + hns_roce_shrink_dca_mem(ctx); + return ret; } From patchwork Tue Jul 27 07:28:15 2021 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Wenpeng Liang X-Patchwork-Id: 12402049 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org X-Spam-Level: X-Spam-Status: No, score=-16.8 required=3.0 tests=BAYES_00, HEADER_FROM_DIFFERENT_DOMAINS,INCLUDES_CR_TRAILER,INCLUDES_PATCH, MAILING_LIST_MULTI,SPF_HELO_NONE,SPF_PASS,USER_AGENT_GIT autolearn=ham autolearn_force=no version=3.4.0 Received: from mail.kernel.org (mail.kernel.org [198.145.29.99]) by smtp.lore.kernel.org (Postfix) with ESMTP id 561ABC4320A for ; Tue, 27 Jul 2021 07:32:02 +0000 (UTC) Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by mail.kernel.org (Postfix) with ESMTP id 3D8AE611C4 for ; Tue, 27 Jul 2021 07:32:02 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S235656AbhG0HcA (ORCPT ); Tue, 27 Jul 2021 03:32:00 -0400 Received: from szxga02-in.huawei.com ([45.249.212.188]:7879 "EHLO szxga02-in.huawei.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S235504AbhG0Hb6 (ORCPT ); Tue, 27 Jul 2021 03:31:58 -0400 Received: from dggemv711-chm.china.huawei.com (unknown [172.30.72.54]) by szxga02-in.huawei.com (SkyGuard) with ESMTP id 4GYpL41zMdz805q; Tue, 27 Jul 2021 15:28:12 +0800 (CST) Received: from dggpeml500017.china.huawei.com (7.185.36.243) by dggemv711-chm.china.huawei.com (10.1.198.66) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256) id 15.1.2176.2; Tue, 27 Jul 2021 15:31:56 +0800 Received: from localhost.localdomain (10.67.165.24) by dggpeml500017.china.huawei.com (7.185.36.243) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256) id 15.1.2176.2; Tue, 27 Jul 2021 15:31:56 +0800 From: Wenpeng Liang To: , CC: , Subject: [PATCH v2 rdma-core 04/10] libhns: Add support for attaching QP's WQE buffer Date: Tue, 27 Jul 2021 15:28:15 +0800 Message-ID: <1627370901-10054-5-git-send-email-liangwenpeng@huawei.com> X-Mailer: git-send-email 2.8.1 In-Reply-To: <1627370901-10054-1-git-send-email-liangwenpeng@huawei.com> References: <1627370901-10054-1-git-send-email-liangwenpeng@huawei.com> MIME-Version: 1.0 X-Originating-IP: [10.67.165.24] X-ClientProxiedBy: dggems701-chm.china.huawei.com (10.3.19.178) To dggpeml500017.china.huawei.com (7.185.36.243) X-CFilter-Loop: Reflected Precedence: bulk List-ID: X-Mailing-List: linux-rdma@vger.kernel.org From: Xi Wang If a uQP works in DCA mode, the WQE's buffer will be split as many blocks and be stored into a list. The blocks are allocated from the DCA's memory pool before posting WRs and are dropped when the QP's CI is equal to PI after polling CQ. Signed-off-by: Xi Wang Signed-off-by: Wenpeng Liang --- providers/hns/hns_roce_u.h | 26 ++++++- providers/hns/hns_roce_u_buf.c | 148 ++++++++++++++++++++++++++++++++++++++- providers/hns/hns_roce_u_hw_v2.c | 145 +++++++++++++++++++++++++++++++++----- providers/hns/hns_roce_u_hw_v2.h | 7 ++ providers/hns/hns_roce_u_verbs.c | 32 +++++++-- 5 files changed, 333 insertions(+), 25 deletions(-) diff --git a/providers/hns/hns_roce_u.h b/providers/hns/hns_roce_u.h index bddd3dd..08e60b7 100644 --- a/providers/hns/hns_roce_u.h +++ b/providers/hns/hns_roce_u.h @@ -283,11 +283,18 @@ struct hns_roce_rinl_buf { unsigned int wqe_cnt; }; +struct hns_roce_dca_buf { + void **bufs; + unsigned int max_cnt; + unsigned int shift; +}; + struct hns_roce_qp { struct verbs_qp verbs_qp; struct hns_roce_buf buf; + struct hns_roce_dca_buf dca_wqe; int max_inline_data; - int buf_size; + unsigned int buf_size; unsigned int sq_signal_bits; struct hns_roce_wq sq; struct hns_roce_wq rq; @@ -327,11 +334,22 @@ struct hns_roce_u_hw { struct verbs_context_ops hw_ops; }; +struct hns_roce_dca_attach_attr { + uint32_t sq_offset; + uint32_t sge_offset; + uint32_t rq_offset; +}; + +struct hns_roce_dca_detach_attr { + uint32_t sq_index; +}; + /* * The entries's buffer should be aligned to a multiple of the hardware's * minimum page size. */ #define hr_hw_page_align(x) align(x, HNS_HW_PAGE_SIZE) +#define hr_hw_page_count(x) (hr_hw_page_align(x) / HNS_HW_PAGE_SIZE) static inline unsigned int to_hr_hem_entries_size(int count, int buf_shift) { @@ -440,9 +458,13 @@ void hns_roce_free_buf(struct hns_roce_buf *buf); void hns_roce_free_qp_buf(struct hns_roce_qp *qp, struct hns_roce_context *ctx); +int hns_roce_attach_dca_mem(struct hns_roce_context *ctx, uint32_t handle, + struct hns_roce_dca_attach_attr *attr, + uint32_t size, struct hns_roce_dca_buf *buf); +void hns_roce_detach_dca_mem(struct hns_roce_context *ctx, uint32_t handle, + struct hns_roce_dca_detach_attr *attr); void hns_roce_shrink_dca_mem(struct hns_roce_context *ctx); void hns_roce_cleanup_dca_mem(struct hns_roce_context *ctx); -int hns_roce_add_dca_mem(struct hns_roce_context *ctx, uint32_t size); void hns_roce_init_qp_indices(struct hns_roce_qp *qp); diff --git a/providers/hns/hns_roce_u_buf.c b/providers/hns/hns_roce_u_buf.c index ff9e9a7..8142fcd 100644 --- a/providers/hns/hns_roce_u_buf.c +++ b/providers/hns/hns_roce_u_buf.c @@ -180,6 +180,66 @@ static int shrink_dca_mem(struct hns_roce_context *ctx, uint32_t handle, return execute_ioctl(&ctx->ibv_ctx.context, cmd); } + +struct hns_dca_mem_query_resp { + uint64_t key; + uint32_t offset; + uint32_t page_count; +}; + +static int query_dca_mem(struct hns_roce_context *ctx, uint32_t handle, + uint32_t index, struct hns_dca_mem_query_resp *resp) +{ + DECLARE_COMMAND_BUFFER(cmd, HNS_IB_OBJECT_DCA_MEM, + HNS_IB_METHOD_DCA_MEM_QUERY, 5); + fill_attr_in_obj(cmd, HNS_IB_ATTR_DCA_MEM_QUERY_HANDLE, handle); + fill_attr_in_uint32(cmd, HNS_IB_ATTR_DCA_MEM_QUERY_PAGE_INDEX, index); + fill_attr_out(cmd, HNS_IB_ATTR_DCA_MEM_QUERY_OUT_KEY, + &resp->key, sizeof(resp->key)); + fill_attr_out(cmd, HNS_IB_ATTR_DCA_MEM_QUERY_OUT_OFFSET, + &resp->offset, sizeof(resp->offset)); + fill_attr_out(cmd, HNS_IB_ATTR_DCA_MEM_QUERY_OUT_PAGE_COUNT, + &resp->page_count, sizeof(resp->page_count)); + return execute_ioctl(&ctx->ibv_ctx.context, cmd); +} + +void hns_roce_detach_dca_mem(struct hns_roce_context *ctx, uint32_t handle, + struct hns_roce_dca_detach_attr *attr) +{ + DECLARE_COMMAND_BUFFER(cmd, HNS_IB_OBJECT_DCA_MEM, + HNS_IB_METHOD_DCA_MEM_DETACH, 4); + fill_attr_in_obj(cmd, HNS_IB_ATTR_DCA_MEM_DETACH_HANDLE, handle); + fill_attr_in_uint32(cmd, HNS_IB_ATTR_DCA_MEM_DETACH_SQ_INDEX, + attr->sq_index); + execute_ioctl(&ctx->ibv_ctx.context, cmd); +} + +struct hns_dca_mem_attach_resp { +#define HNS_DCA_ATTACH_OUT_FLAGS_NEW_BUFFER BIT(0) + uint32_t alloc_flags; + uint32_t alloc_pages; +}; + +static int attach_dca_mem(struct hns_roce_context *ctx, uint32_t handle, + struct hns_roce_dca_attach_attr *attr, + struct hns_dca_mem_attach_resp *resp) +{ + DECLARE_COMMAND_BUFFER(cmd, HNS_IB_OBJECT_DCA_MEM, + HNS_IB_METHOD_DCA_MEM_ATTACH, 6); + fill_attr_in_obj(cmd, HNS_IB_ATTR_DCA_MEM_ATTACH_HANDLE, handle); + fill_attr_in_uint32(cmd, HNS_IB_ATTR_DCA_MEM_ATTACH_SQ_OFFSET, + attr->sq_offset); + fill_attr_in_uint32(cmd, HNS_IB_ATTR_DCA_MEM_ATTACH_SGE_OFFSET, + attr->sge_offset); + fill_attr_in_uint32(cmd, HNS_IB_ATTR_DCA_MEM_ATTACH_RQ_OFFSET, + attr->rq_offset); + fill_attr_out(cmd, HNS_IB_ATTR_DCA_MEM_ATTACH_OUT_ALLOC_FLAGS, + &resp->alloc_flags, sizeof(resp->alloc_flags)); + fill_attr_out(cmd, HNS_IB_ATTR_DCA_MEM_ATTACH_OUT_ALLOC_PAGES, + &resp->alloc_pages, sizeof(resp->alloc_pages)); + return execute_ioctl(&ctx->ibv_ctx.context, cmd); +} + static bool add_dca_mem_enabled(struct hns_roce_dca_ctx *ctx, uint32_t alloc_size) { @@ -210,7 +270,7 @@ static bool shrink_dca_mem_enabled(struct hns_roce_dca_ctx *ctx) return enable; } -int hns_roce_add_dca_mem(struct hns_roce_context *ctx, uint32_t size) +static int add_dca_mem(struct hns_roce_context *ctx, uint32_t size) { struct hns_roce_dca_ctx *dca_ctx = &ctx->dca_ctx; struct hns_roce_dca_mem *mem; @@ -294,3 +354,89 @@ void hns_roce_shrink_dca_mem(struct hns_roce_context *ctx) dca_mem_cnt--; } } + +static void config_dca_pages(void *addr, struct hns_roce_dca_buf *buf, + uint32_t page_index, int page_count) +{ + void **pages = &buf->bufs[page_index]; + int page_size = 1 << buf->shift; + int i; + + for (i = 0; i < page_count; i++) { + pages[i] = addr; + addr += page_size; + } +} + +static int setup_dca_buf(struct hns_roce_context *ctx, uint32_t handle, + struct hns_roce_dca_buf *buf, uint32_t page_count) +{ + struct hns_roce_dca_ctx *dca_ctx = &ctx->dca_ctx; + struct hns_dca_mem_query_resp resp = {}; + struct hns_roce_dca_mem *mem; + uint32_t idx = 0; + int ret; + + while (idx < page_count && idx < buf->max_cnt) { + resp.page_count = 0; + ret = query_dca_mem(ctx, handle, idx, &resp); + if (ret) + return -ENOMEM; + if (resp.page_count < 1) + break; + + pthread_spin_lock(&dca_ctx->lock); + mem = key_to_dca_mem(dca_ctx, resp.key); + if (mem && resp.offset < mem->buf.length) { + config_dca_pages(dca_mem_addr(mem, resp.offset), + buf, idx, resp.page_count); + } else { + pthread_spin_unlock(&dca_ctx->lock); + break; + } + pthread_spin_unlock(&dca_ctx->lock); + + idx += resp.page_count; + } + + return (idx >= page_count) ? 0 : -ENOMEM; +} + +#define DCA_EXPAND_MEM_TRY_TIMES 3 +int hns_roce_attach_dca_mem(struct hns_roce_context *ctx, uint32_t handle, + struct hns_roce_dca_attach_attr *attr, + uint32_t size, struct hns_roce_dca_buf *buf) +{ + uint32_t buf_pages = size >> buf->shift; + struct hns_dca_mem_attach_resp resp = {}; + bool is_new_buf = true; + int try_times = 0; + int ret = 0; + + do { + resp.alloc_pages = 0; + ret = attach_dca_mem(ctx, handle, attr, &resp); + if (ret) + break; + + if (resp.alloc_pages >= buf_pages) { + is_new_buf = !!(resp.alloc_flags & + HNS_DCA_ATTACH_OUT_FLAGS_NEW_BUFFER); + break; + } + + ret = add_dca_mem(ctx, size); + if (ret) + break; + } while (try_times++ < DCA_EXPAND_MEM_TRY_TIMES); + + if (ret || resp.alloc_pages < buf_pages) + return -ENOMEM; + + + /* No need config user address if DCA config not changed */ + if (!is_new_buf && buf->bufs[0]) + return 0; + + return setup_dca_buf(ctx, handle, buf, buf_pages); +} diff --git a/providers/hns/hns_roce_u_hw_v2.c b/providers/hns/hns_roce_u_hw_v2.c index bec2a45..dff0e42 100644 --- a/providers/hns/hns_roce_u_hw_v2.c +++ b/providers/hns/hns_roce_u_hw_v2.c @@ -227,19 +227,35 @@ static struct hns_roce_v2_cqe *next_cqe_sw_v2(struct hns_roce_cq *cq) return get_sw_cqe_v2(cq, cq->cons_index); } +static inline bool check_qp_dca_enable(struct hns_roce_qp *qp) +{ + return !!qp->dca_wqe.bufs; +} + +static inline void *get_wqe(struct hns_roce_qp *qp, unsigned int offset) +{ + if (likely(qp->buf.buf)) + return qp->buf.buf + offset; + else if (unlikely(check_qp_dca_enable(qp))) + return qp->dca_wqe.bufs[offset >> qp->dca_wqe.shift] + + (offset & ((1 << qp->dca_wqe.shift) - 1)); + else + return NULL; +} + static void *get_recv_wqe_v2(struct hns_roce_qp *qp, unsigned int n) { - return qp->buf.buf + qp->rq.offset + (n << qp->rq.wqe_shift); + return get_wqe(qp, qp->rq.offset + (n << qp->rq.wqe_shift)); } static void *get_send_wqe(struct hns_roce_qp *qp, unsigned int n) { - return qp->buf.buf + qp->sq.offset + (n << qp->sq.wqe_shift); + return get_wqe(qp, qp->sq.offset + (n << qp->sq.wqe_shift)); } static void *get_send_sge_ex(struct hns_roce_qp *qp, unsigned int n) { - return qp->buf.buf + qp->ex_sge.offset + (n << qp->ex_sge.sge_shift); + return get_wqe(qp, qp->ex_sge.offset + (n << qp->ex_sge.sge_shift)); } static void *get_srq_wqe(struct hns_roce_srq *srq, unsigned int n) @@ -502,6 +518,72 @@ static int hns_roce_handle_recv_inl_wqe(struct hns_roce_v2_cqe *cqe, return V2_CQ_OK; } +static bool check_dca_attach_enable(struct hns_roce_qp *qp) +{ + return check_qp_dca_enable(qp) && + (qp->flags & HNS_ROCE_QP_CAP_DYNAMIC_CTX_ATTACH); +} +static bool check_dca_detach_enable(struct hns_roce_qp *qp) +{ + return check_qp_dca_enable(qp) && + (qp->flags & HNS_ROCE_QP_CAP_DYNAMIC_CTX_DETACH); +} + +static int dca_attach_qp_buf(struct hns_roce_context *ctx, + struct hns_roce_qp *qp) +{ + struct hns_roce_dca_attach_attr attr = {}; + uint32_t idx; + int ret; + + pthread_spin_lock(&qp->sq.lock); + pthread_spin_lock(&qp->rq.lock); + + if (qp->sq.wqe_cnt > 0) { + idx = qp->sq.head & (qp->sq.wqe_cnt - 1); + attr.sq_offset = idx << qp->sq.wqe_shift; + } + + if (qp->ex_sge.sge_cnt > 0) { + idx = qp->next_sge & (qp->ex_sge.sge_cnt - 1); + attr.sge_offset = idx << qp->ex_sge.sge_shift; + } + + if (qp->rq.wqe_cnt > 0) { + idx = qp->rq.head & (qp->rq.wqe_cnt - 1); + attr.rq_offset = idx << qp->rq.wqe_shift; + } + + + ret = hns_roce_attach_dca_mem(ctx, qp->verbs_qp.qp.handle, &attr, + qp->buf_size, &qp->dca_wqe); + + pthread_spin_unlock(&qp->rq.lock); + pthread_spin_unlock(&qp->sq.lock); + + return ret; +} + +static void dca_detach_qp_buf(struct hns_roce_context *ctx, + struct hns_roce_qp *qp) +{ + struct hns_roce_dca_detach_attr attr; + bool is_empty; + + pthread_spin_lock(&qp->sq.lock); + pthread_spin_lock(&qp->rq.lock); + + is_empty = qp->sq.head == qp->sq.tail && qp->rq.head == qp->rq.tail; + if (is_empty && qp->sq.wqe_cnt > 0) + attr.sq_index = qp->sq.head & (qp->sq.wqe_cnt - 1); + + pthread_spin_unlock(&qp->rq.lock); + pthread_spin_unlock(&qp->sq.lock); + + if (is_empty) + hns_roce_detach_dca_mem(ctx, qp->verbs_qp.qp.handle, &attr); +} + static int hns_roce_v2_poll_one(struct hns_roce_cq *cq, struct hns_roce_qp **cur_qp, struct ibv_wc *wc) { @@ -641,6 +723,9 @@ static int hns_roce_u_v2_poll_cq(struct ibv_cq *ibvcq, int ne, for (npolled = 0; npolled < ne; ++npolled) { err = hns_roce_v2_poll_one(cq, &qp, wc + npolled); + if (qp && check_dca_detach_enable(qp)) + dca_detach_qp_buf(ctx, qp); + if (err != V2_CQ_OK) break; } @@ -690,18 +775,23 @@ static int hns_roce_u_v2_arm_cq(struct ibv_cq *ibvcq, int solicited) return 0; } -static int check_qp_send(struct ibv_qp *qp, struct hns_roce_context *ctx) +static int check_qp_send(struct hns_roce_qp *qp, struct hns_roce_context *ctx) { - if (unlikely(qp->qp_type != IBV_QPT_RC && - qp->qp_type != IBV_QPT_UD) && - qp->qp_type != IBV_QPT_XRC_SEND) + struct ibv_qp *ibvqp = &qp->verbs_qp.qp; + + if (unlikely(ibvqp->qp_type != IBV_QPT_RC && + ibvqp->qp_type != IBV_QPT_UD) && + ibvqp->qp_type != IBV_QPT_XRC_SEND) return -EINVAL; - if (unlikely(qp->state == IBV_QPS_RESET || - qp->state == IBV_QPS_INIT || - qp->state == IBV_QPS_RTR)) + if (unlikely(ibvqp->state == IBV_QPS_RESET || + ibvqp->state == IBV_QPS_INIT || + ibvqp->state == IBV_QPS_RTR)) return -EINVAL; + if (check_dca_attach_enable(qp)) + return dca_attach_qp_buf(ctx, qp); + return 0; } @@ -1058,6 +1148,16 @@ static int set_rc_inl(struct hns_roce_qp *qp, const struct ibv_send_wr *wr, return 0; } +static inline void fill_rc_dca_fields(uint32_t qp_num, + struct hns_roce_rc_sq_wqe *wqe) +{ + roce_set_field(wqe->byte_4, RC_SQ_WQE_BYTE_4_SQPN_L_M, + RC_SQ_WQE_BYTE_4_SQPN_L_S, qp_num); + roce_set_field(wqe->byte_4, RC_SQ_WQE_BYTE_4_SQPN_H_M, + RC_SQ_WQE_BYTE_4_SQPN_H_S, + qp_num >> RC_SQ_WQE_BYTE_4_SQPN_L_W); +} + static void set_bind_mw_seg(struct hns_roce_rc_sq_wqe *wqe, const struct ibv_send_wr *wr) { @@ -1173,6 +1273,9 @@ static int set_rc_wqe(void *wqe, struct hns_roce_qp *qp, struct ibv_send_wr *wr, return ret; wqe_valid: + if (check_qp_dca_enable(qp)) + fill_rc_dca_fields(qp->verbs_qp.qp.qp_num, rc_sq_wqe); + /* * The pipeline can sequentially post all valid WQEs into WQ buffer, * including new WQEs waiting for the doorbell to update the PI again. @@ -1199,7 +1302,7 @@ int hns_roce_u_v2_post_send(struct ibv_qp *ibvqp, struct ibv_send_wr *wr, struct ibv_qp_attr attr; int ret; - ret = check_qp_send(ibvqp, ctx); + ret = check_qp_send(qp, ctx); if (unlikely(ret)) { *bad_wr = wr; return ret; @@ -1274,15 +1377,20 @@ out: return ret; } -static int check_qp_recv(struct ibv_qp *qp, struct hns_roce_context *ctx) +static int check_qp_recv(struct hns_roce_qp *qp, struct hns_roce_context *ctx) { - if (unlikely(qp->qp_type != IBV_QPT_RC && - qp->qp_type != IBV_QPT_UD)) + struct ibv_qp *ibvqp = &qp->verbs_qp.qp; + + if (unlikely(ibvqp->qp_type != IBV_QPT_RC && + ibvqp->qp_type != IBV_QPT_UD)) return -EINVAL; - if (qp->state == IBV_QPS_RESET || qp->srq) + if (ibvqp->state == IBV_QPS_RESET || ibvqp->srq) return -EINVAL; + if (check_dca_attach_enable(qp)) + return dca_attach_qp_buf(ctx, qp); + return 0; } @@ -1325,7 +1433,7 @@ static int hns_roce_u_v2_post_recv(struct ibv_qp *ibvqp, struct ibv_recv_wr *wr, struct ibv_qp_attr attr; int ret; - ret = check_qp_recv(ibvqp, ctx); + ret = check_qp_recv(qp, ctx); if (unlikely(ret)) { *bad_wr = wr; return ret; @@ -1458,6 +1566,7 @@ static int hns_roce_u_v2_modify_qp(struct ibv_qp *qp, struct ibv_qp_attr *attr, struct ibv_modify_qp cmd; struct hns_roce_qp *hr_qp = to_hr_qp(qp); bool flag = false; /* modify qp to error */ + struct hns_roce_context *ctx = to_hr_ctx(qp->context); if ((attr_mask & IBV_QP_STATE) && (attr->qp_state == IBV_QPS_ERR)) { pthread_spin_lock(&hr_qp->sq.lock); @@ -1490,6 +1599,10 @@ static int hns_roce_u_v2_modify_qp(struct ibv_qp *qp, struct ibv_qp_attr *attr, hns_roce_init_qp_indices(to_hr_qp(qp)); } + /* Try to shrink the DCA mem */ + if (ctx->dca_ctx.mem_cnt > 0) + hns_roce_shrink_dca_mem(ctx); + record_qp_attr(qp, attr, attr_mask); return ret; diff --git a/providers/hns/hns_roce_u_hw_v2.h b/providers/hns/hns_roce_u_hw_v2.h index c13d82e..be6be73 100644 --- a/providers/hns/hns_roce_u_hw_v2.h +++ b/providers/hns/hns_roce_u_hw_v2.h @@ -239,6 +239,13 @@ struct hns_roce_rc_sq_wqe { #define RC_SQ_WQE_BYTE_4_RDMA_WRITE_S 22 +#define RC_SQ_WQE_BYTE_4_SQPN_L_W 2 +#define RC_SQ_WQE_BYTE_4_SQPN_L_S 5 +#define RC_SQ_WQE_BYTE_4_SQPN_L_M GENMASK(6, 5) + +#define RC_SQ_WQE_BYTE_4_SQPN_H_S 13 +#define RC_SQ_WQE_BYTE_4_SQPN_H_M GENMASK(30, 13) + #define RC_SQ_WQE_BYTE_16_XRC_SRQN_S 0 #define RC_SQ_WQE_BYTE_16_XRC_SRQN_M \ (((1UL << 24) - 1) << RC_SQ_WQE_BYTE_16_XRC_SRQN_S) diff --git a/providers/hns/hns_roce_u_verbs.c b/providers/hns/hns_roce_u_verbs.c index 7b44829..015f417 100644 --- a/providers/hns/hns_roce_u_verbs.c +++ b/providers/hns/hns_roce_u_verbs.c @@ -903,6 +903,14 @@ static int calc_qp_buff_size(struct hns_roce_device *hr_dev, return 0; } +static inline bool check_qp_support_dca(bool pool_en, enum ibv_qp_type qp_type) +{ + if (pool_en && (qp_type == IBV_QPT_RC || qp_type == IBV_QPT_XRC_SEND)) + return true; + + return false; +} + static void qp_free_wqe(struct hns_roce_qp *qp) { qp_free_recv_inl_buf(qp); @@ -914,8 +922,8 @@ static void qp_free_wqe(struct hns_roce_qp *qp) hns_roce_free_buf(&qp->buf); } -static int qp_alloc_wqe(struct ibv_qp_cap *cap, struct hns_roce_qp *qp, - struct hns_roce_context *ctx) +static int qp_alloc_wqe(struct ibv_qp_init_attr_ex *attr, + struct hns_roce_qp *qp, struct hns_roce_context *ctx) { struct hns_roce_device *hr_dev = to_hr_dev(ctx->ibv_ctx.context.device); @@ -933,12 +941,24 @@ static int qp_alloc_wqe(struct ibv_qp_cap *cap, struct hns_roce_qp *qp, } if (qp->rq_rinl_buf.wqe_cnt) { - if (qp_alloc_recv_inl_buf(cap, qp)) + if (qp_alloc_recv_inl_buf(&attr->cap, qp)) goto err_alloc; } - if (hns_roce_alloc_buf(&qp->buf, qp->buf_size, HNS_HW_PAGE_SIZE)) - goto err_alloc; + if (check_qp_support_dca(ctx->dca_ctx.max_size != 0, attr->qp_type)) { + /* when DCA is enabled, use a buffer list to store page addr */ + qp->buf.buf = NULL; + qp->dca_wqe.max_cnt = hr_hw_page_count(qp->buf_size); + qp->dca_wqe.shift = HNS_HW_PAGE_SHIFT; + qp->dca_wqe.bufs = calloc(qp->dca_wqe.max_cnt, + sizeof(void *)); + if (!qp->dca_wqe.bufs) + goto err_alloc; + } else { + if (hns_roce_alloc_buf(&qp->buf, qp->buf_size, + HNS_HW_PAGE_SIZE)) + goto err_alloc; + } return 0; @@ -1174,7 +1194,7 @@ static int hns_roce_alloc_qp_buf(struct ibv_qp_init_attr_ex *attr, pthread_spin_init(&qp->rq.lock, PTHREAD_PROCESS_PRIVATE)) return -ENOMEM; - ret = qp_alloc_wqe(&attr->cap, qp, ctx); + ret = qp_alloc_wqe(attr, qp, ctx); if (ret) return ret; From patchwork Tue Jul 27 07:28:16 2021 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Wenpeng Liang X-Patchwork-Id: 12402041 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org X-Spam-Level: X-Spam-Status: No, score=-16.8 required=3.0 tests=BAYES_00, HEADER_FROM_DIFFERENT_DOMAINS,INCLUDES_CR_TRAILER,INCLUDES_PATCH, MAILING_LIST_MULTI,SPF_HELO_NONE,SPF_PASS,USER_AGENT_GIT autolearn=ham autolearn_force=no version=3.4.0 Received: from mail.kernel.org (mail.kernel.org [198.145.29.99]) by smtp.lore.kernel.org (Postfix) with ESMTP id C1040C4320E for ; Tue, 27 Jul 2021 07:32:01 +0000 (UTC) Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by mail.kernel.org (Postfix) with ESMTP id AA301611ED for ; Tue, 27 Jul 2021 07:32:01 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S235629AbhG0Hb7 (ORCPT ); Tue, 27 Jul 2021 03:31:59 -0400 Received: from szxga08-in.huawei.com ([45.249.212.255]:12268 "EHLO szxga08-in.huawei.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S235656AbhG0Hb5 (ORCPT ); Tue, 27 Jul 2021 03:31:57 -0400 Received: from dggemv711-chm.china.huawei.com (unknown [172.30.72.55]) by szxga08-in.huawei.com (SkyGuard) with ESMTP id 4GYpHZ2XY7z1CNtG; Tue, 27 Jul 2021 15:26:02 +0800 (CST) Received: from dggpeml500017.china.huawei.com (7.185.36.243) by dggemv711-chm.china.huawei.com (10.1.198.66) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256) id 15.1.2176.2; Tue, 27 Jul 2021 15:31:56 +0800 Received: from localhost.localdomain (10.67.165.24) by dggpeml500017.china.huawei.com (7.185.36.243) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256) id 15.1.2176.2; Tue, 27 Jul 2021 15:31:56 +0800 From: Wenpeng Liang To: , CC: , Subject: [PATCH v2 rdma-core 05/10] libhns: Use shared memory to sync DCA status Date: Tue, 27 Jul 2021 15:28:16 +0800 Message-ID: <1627370901-10054-6-git-send-email-liangwenpeng@huawei.com> X-Mailer: git-send-email 2.8.1 In-Reply-To: <1627370901-10054-1-git-send-email-liangwenpeng@huawei.com> References: <1627370901-10054-1-git-send-email-liangwenpeng@huawei.com> MIME-Version: 1.0 X-Originating-IP: [10.67.165.24] X-ClientProxiedBy: dggems701-chm.china.huawei.com (10.3.19.178) To dggpeml500017.china.huawei.com (7.185.36.243) X-CFilter-Loop: Reflected Precedence: bulk List-ID: X-Mailing-List: linux-rdma@vger.kernel.org From: Xi Wang The user DCA needs to check the QP attaching state before filling wqe buffer by the response from uverbs 'HNS_IB_METHOD_DCA_MEM_ATTACH', but this will result in too much time being wasted on system calls, so use a shared table between user driver and kernel driver to sync DCA status. Signed-off-by: Xi Wang Signed-off-by: Wenpeng Liang --- providers/hns/hns_roce_u.c | 135 +++++++++++++++++++++++++++++++---------- providers/hns/hns_roce_u.h | 11 ++++ providers/hns/hns_roce_u_abi.h | 3 +- 3 files changed, 116 insertions(+), 33 deletions(-) diff --git a/providers/hns/hns_roce_u.c b/providers/hns/hns_roce_u.c index a4e0997..3b13d0f 100644 --- a/providers/hns/hns_roce_u.c +++ b/providers/hns/hns_roce_u.c @@ -95,7 +95,46 @@ static const struct verbs_context_ops hns_common_ops = { .get_srq_num = hns_roce_u_get_srq_num, }; -static int init_dca_context(struct hns_roce_context *ctx, int page_size) +/* command value is offset[15:8] */ +static void hns_roce_mmap_set_command(int command, off_t *offset) +{ + *offset |= (command & 0xff) << 8; +} + +/* index value is offset[63:16] | offset[7:0] */ +static void hns_roce_mmap_set_index(unsigned long index, off_t *offset) +{ + *offset |= (index & 0xff) | ((index >> 8) << 16); +} + +static off_t get_uar_mmap_offset(unsigned long idx, int page_size, int cmd) +{ + off_t offset = 0; + + hns_roce_mmap_set_command(cmd, &offset); + hns_roce_mmap_set_index(idx, &offset); + + return offset * page_size; +} + +static int mmap_dca(struct hns_roce_dca_ctx *dca_ctx, int cmd_fd, int page_size, + size_t size) +{ + void *addr; + + addr = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, cmd_fd, + get_uar_mmap_offset(0, page_size, HNS_ROCE_MMAP_DCA_PAGE)); + if (addr == MAP_FAILED) + return -EINVAL; + + dca_ctx->buf_status = addr; + dca_ctx->sync_status = addr + size / 2; + + return 0; +} + +static int init_dca_context(struct hns_roce_context *ctx, int cmd_fd, + int page_size, int max_qps, int mmap_size) { struct hns_roce_dca_ctx *dca_ctx = &ctx->dca_ctx; int ret; @@ -112,6 +151,16 @@ static int init_dca_context(struct hns_roce_context *ctx, int page_size) dca_ctx->max_size = HNS_DCA_MAX_MEM_SIZE; dca_ctx->mem_cnt = 0; + if (mmap_size > 0) { + const unsigned int bits_per_qp = 2 * HNS_DCA_BITS_PER_STATUS; + + if (!mmap_dca(dca_ctx, cmd_fd, page_size, mmap_size)) { + dca_ctx->status_size = mmap_size; + dca_ctx->max_qps = min_t(int, max_qps, + mmap_size * 8 / bits_per_qp); + } + } + return 0; } @@ -126,19 +175,60 @@ static void uninit_dca_context(struct hns_roce_context *ctx) hns_roce_cleanup_dca_mem(ctx); pthread_spin_unlock(&dca_ctx->lock); + if (dca_ctx->buf_status) + munmap(dca_ctx->buf_status, dca_ctx->status_size); + pthread_spin_destroy(&dca_ctx->lock); } +static int hns_roce_mmap(struct hns_roce_device *hr_dev, + struct hns_roce_context *context, int cmd_fd) +{ + int page_size = hr_dev->page_size; + off_t offset; + + offset = get_uar_mmap_offset(0, page_size, HNS_ROCE_MMAP_REGULAR_PAGE); + context->uar = mmap(NULL, page_size, PROT_READ | PROT_WRITE, + MAP_SHARED, cmd_fd, offset); + if (context->uar == MAP_FAILED) + return -EINVAL; + + offset = get_uar_mmap_offset(1, page_size, HNS_ROCE_MMAP_REGULAR_PAGE); + if (hr_dev->hw_version == HNS_ROCE_HW_VER1) { + /* + * when vma->vm_pgoff is 1, the cq_tptr_base includes 64K CQ, + * a pointer of CQ need 2B size + */ + context->cq_tptr_base = mmap(NULL, HNS_ROCE_CQ_DB_BUF_SIZE, + PROT_READ | PROT_WRITE, MAP_SHARED, + cmd_fd, offset); + if (context->cq_tptr_base == MAP_FAILED) + goto db_free; + } + + return 0; + +db_free: + munmap(context->uar, hr_dev->page_size); + + return -EINVAL; +} + +static void ucontext_set_cmd(struct hns_roce_alloc_ucontext *cmd, int page_size) +{ + cmd->comp = HNS_ROCE_ALLOC_UCTX_COMP_DCA_MAX_QPS; + cmd->dca_max_qps = page_size * 8 / 2 * HNS_DCA_BITS_PER_STATUS; +} + static struct verbs_context *hns_roce_alloc_context(struct ibv_device *ibdev, int cmd_fd, void *private_data) { struct hns_roce_device *hr_dev = to_hr_dev(ibdev); struct hns_roce_alloc_ucontext_resp resp = {}; + struct hns_roce_alloc_ucontext cmd = {}; struct ibv_device_attr dev_attrs; struct hns_roce_context *context; - struct ibv_get_context cmd; - int offset = 0; int i; context = verbs_init_and_alloc_context(ibdev, cmd_fd, context, ibv_ctx, @@ -146,7 +236,8 @@ static struct verbs_context *hns_roce_alloc_context(struct ibv_device *ibdev, if (!context) return NULL; - if (ibv_cmd_get_context(&context->ibv_ctx, &cmd, sizeof(cmd), + ucontext_set_cmd(&cmd, hr_dev->page_size); + if (ibv_cmd_get_context(&context->ibv_ctx, &cmd.ibv_cmd, sizeof(cmd), &resp.ibv_resp, sizeof(resp))) goto err_free; @@ -190,42 +281,22 @@ static struct verbs_context *hns_roce_alloc_context(struct ibv_device *ibdev, context->max_srq_wr = dev_attrs.max_srq_wr; context->max_srq_sge = dev_attrs.max_srq_sge; - context->uar = mmap(NULL, hr_dev->page_size, PROT_READ | PROT_WRITE, - MAP_SHARED, cmd_fd, offset); - if (context->uar == MAP_FAILED) - goto err_free; - - offset += hr_dev->page_size; - - if (hr_dev->hw_version == HNS_ROCE_HW_VER1) { - /* - * when vma->vm_pgoff is 1, the cq_tptr_base includes 64K CQ, - * a pointer of CQ need 2B size - */ - context->cq_tptr_base = mmap(NULL, HNS_ROCE_CQ_DB_BUF_SIZE, - PROT_READ | PROT_WRITE, MAP_SHARED, - cmd_fd, offset); - if (context->cq_tptr_base == MAP_FAILED) - goto db_free; - } - pthread_spin_init(&context->uar_lock, PTHREAD_PROCESS_PRIVATE); verbs_set_ops(&context->ibv_ctx, &hns_common_ops); verbs_set_ops(&context->ibv_ctx, &hr_dev->u_hw->hw_ops); - if (init_dca_context(context, hr_dev->page_size)) - goto tptr_free; + if (init_dca_context(context, cmd_fd, hr_dev->page_size, resp.dca_qps, + resp.dca_mmap_size)) + goto err_free; - return &context->ibv_ctx; + if (hns_roce_mmap(hr_dev, context, cmd_fd)) + goto dca_free; -tptr_free: - if (hr_dev->hw_version == HNS_ROCE_HW_VER1) - munmap(context->cq_tptr_base, HNS_ROCE_CQ_DB_BUF_SIZE); + return &context->ibv_ctx; -db_free: - munmap(context->uar, hr_dev->page_size); - context->uar = NULL; +dca_free: + uninit_dca_context(context); err_free: verbs_uninit_context(&context->ibv_ctx); diff --git a/providers/hns/hns_roce_u.h b/providers/hns/hns_roce_u.h index 08e60b7..95e8046 100644 --- a/providers/hns/hns_roce_u.h +++ b/providers/hns/hns_roce_u.h @@ -35,6 +35,7 @@ #include #include +#include #include #include @@ -44,6 +45,8 @@ #include #include #include +#include + #include #include "hns_roce_u_abi.h" @@ -54,6 +57,8 @@ #define PFX "hns: " +typedef _Atomic(uint64_t) atomic_bitmap_t; + /* The minimum page size is 4K for hardware */ #define HNS_HW_PAGE_SHIFT 12 #define HNS_HW_PAGE_SIZE (1 << HNS_HW_PAGE_SHIFT) @@ -157,6 +162,12 @@ struct hns_roce_dca_ctx { uint64_t max_size; uint64_t min_size; uint64_t curr_size; + +#define HNS_DCA_BITS_PER_STATUS 1 + unsigned int max_qps; + unsigned int status_size; + atomic_bitmap_t *buf_status; + atomic_bitmap_t *sync_status; }; struct hns_roce_context { diff --git a/providers/hns/hns_roce_u_abi.h b/providers/hns/hns_roce_u_abi.h index e56f9d3..23509c1 100644 --- a/providers/hns/hns_roce_u_abi.h +++ b/providers/hns/hns_roce_u_abi.h @@ -39,10 +39,11 @@ DECLARE_DRV_CMD(hns_roce_alloc_pd, IB_USER_VERBS_CMD_ALLOC_PD, empty, hns_roce_ib_alloc_pd_resp); + DECLARE_DRV_CMD(hns_roce_create_cq, IB_USER_VERBS_CMD_CREATE_CQ, hns_roce_ib_create_cq, hns_roce_ib_create_cq_resp); DECLARE_DRV_CMD(hns_roce_alloc_ucontext, IB_USER_VERBS_CMD_GET_CONTEXT, - empty, hns_roce_ib_alloc_ucontext_resp); + hns_roce_ib_alloc_ucontext, hns_roce_ib_alloc_ucontext_resp); DECLARE_DRV_CMD(hns_roce_create_qp, IB_USER_VERBS_CMD_CREATE_QP, hns_roce_ib_create_qp, hns_roce_ib_create_qp_resp); From patchwork Tue Jul 27 07:28:17 2021 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Wenpeng Liang X-Patchwork-Id: 12402043 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org X-Spam-Level: X-Spam-Status: No, score=-16.7 required=3.0 tests=BAYES_00, HEADER_FROM_DIFFERENT_DOMAINS,INCLUDES_CR_TRAILER,INCLUDES_PATCH, MAILING_LIST_MULTI,SPF_HELO_NONE,SPF_PASS,URIBL_BLOCKED,USER_AGENT_GIT autolearn=ham autolearn_force=no version=3.4.0 Received: from mail.kernel.org (mail.kernel.org [198.145.29.99]) by smtp.lore.kernel.org (Postfix) with ESMTP id 03124C43214 for ; Tue, 27 Jul 2021 07:32:03 +0000 (UTC) Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by mail.kernel.org (Postfix) with ESMTP id E0BA5611C4 for ; Tue, 27 Jul 2021 07:32:02 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S235659AbhG0HcB (ORCPT ); Tue, 27 Jul 2021 03:32:01 -0400 Received: from szxga03-in.huawei.com ([45.249.212.189]:12314 "EHLO szxga03-in.huawei.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S235675AbhG0Hb6 (ORCPT ); Tue, 27 Jul 2021 03:31:58 -0400 Received: from dggemv704-chm.china.huawei.com (unknown [172.30.72.54]) by szxga03-in.huawei.com (SkyGuard) with ESMTP id 4GYpJy4tgbz7ydv; Tue, 27 Jul 2021 15:27:14 +0800 (CST) Received: from dggpeml500017.china.huawei.com (7.185.36.243) by dggemv704-chm.china.huawei.com (10.3.19.47) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256) id 15.1.2176.2; Tue, 27 Jul 2021 15:31:56 +0800 Received: from localhost.localdomain (10.67.165.24) by dggpeml500017.china.huawei.com (7.185.36.243) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256) id 15.1.2176.2; Tue, 27 Jul 2021 15:31:56 +0800 From: Wenpeng Liang To: , CC: , Subject: [PATCH v2 rdma-core 06/10] libhns: Sync DCA status by shared memory Date: Tue, 27 Jul 2021 15:28:17 +0800 Message-ID: <1627370901-10054-7-git-send-email-liangwenpeng@huawei.com> X-Mailer: git-send-email 2.8.1 In-Reply-To: <1627370901-10054-1-git-send-email-liangwenpeng@huawei.com> References: <1627370901-10054-1-git-send-email-liangwenpeng@huawei.com> MIME-Version: 1.0 X-Originating-IP: [10.67.165.24] X-ClientProxiedBy: dggems701-chm.china.huawei.com (10.3.19.178) To dggpeml500017.china.huawei.com (7.185.36.243) X-CFilter-Loop: Reflected Precedence: bulk List-ID: X-Mailing-List: linux-rdma@vger.kernel.org From: Xi Wang Use DCA num from the resp of modify_qp() and indicate the DCA status bit in the shared memory, if the num is valid, the user DCA can get the DCA status by testing the bit in the shared memory for each QP, othewise invoke the verbs 'HNS_IB_METHOD_DCA_MEM_ATTACH' to check the DCA status. Each QP has 2 bits in shared memory, 1 bit is used to lock the DCA status changing by kernel driver or user driver, another bit is used to indicate the DCA attaching status. Signed-off-by: Xi Wang Signed-off-by: Wenpeng Liang --- providers/hns/hns_roce_u.h | 31 +++++++++++++++++++++++++++++ providers/hns/hns_roce_u_abi.h | 3 +++ providers/hns/hns_roce_u_buf.c | 42 ++++++++++++++++++++++++++++++++++++++++ providers/hns/hns_roce_u_hw_v2.c | 28 +++++++++++++++++++++++---- 4 files changed, 100 insertions(+), 4 deletions(-) diff --git a/providers/hns/hns_roce_u.h b/providers/hns/hns_roce_u.h index 95e8046..fb7b864 100644 --- a/providers/hns/hns_roce_u.h +++ b/providers/hns/hns_roce_u.h @@ -298,6 +298,7 @@ struct hns_roce_dca_buf { void **bufs; unsigned int max_cnt; unsigned int shift; + unsigned int dcan; }; struct hns_roce_qp { @@ -349,6 +350,7 @@ struct hns_roce_dca_attach_attr { uint32_t sq_offset; uint32_t sge_offset; uint32_t rq_offset; + bool force; }; struct hns_roce_dca_detach_attr { @@ -402,6 +404,32 @@ static inline struct hns_roce_ah *to_hr_ah(struct ibv_ah *ibv_ah) return container_of(ibv_ah, struct hns_roce_ah, ibv_ah); } +#define HNS_ROCE_BIT_MASK(nr) (1UL << ((nr) % 64)) +#define HNS_ROCE_BIT_WORD(nr) ((nr) / 64) + +static inline bool atomic_test_bit(atomic_bitmap_t *p, uint32_t nr) +{ + p += HNS_ROCE_BIT_WORD(nr); + return !!(atomic_load(p) & HNS_ROCE_BIT_MASK(nr)); +} + +static inline bool test_and_set_bit_lock(atomic_bitmap_t *p, uint32_t nr) +{ + uint64_t mask = HNS_ROCE_BIT_MASK(nr); + + p += HNS_ROCE_BIT_WORD(nr); + if (atomic_load(p) & mask) + return true; + + return (atomic_fetch_or(p, mask) & mask) != 0; +} + +static inline void clear_bit_unlock(atomic_bitmap_t *p, uint32_t nr) +{ + p += HNS_ROCE_BIT_WORD(nr); + atomic_fetch_and(p, ~HNS_ROCE_BIT_MASK(nr)); +} + int hns_roce_u_query_device(struct ibv_context *context, const struct ibv_query_device_ex_input *input, struct ibv_device_attr_ex *attr, size_t attr_size); @@ -474,6 +502,9 @@ int hns_roce_attach_dca_mem(struct hns_roce_context *ctx, uint32_t handle, uint32_t size, struct hns_roce_dca_buf *buf); void hns_roce_detach_dca_mem(struct hns_roce_context *ctx, uint32_t handle, struct hns_roce_dca_detach_attr *attr); +bool hns_roce_dca_start_post(struct hns_roce_dca_ctx *ctx, uint32_t dcan); +void hns_roce_dca_stop_post(struct hns_roce_dca_ctx *ctx, uint32_t dcan); + void hns_roce_shrink_dca_mem(struct hns_roce_context *ctx); void hns_roce_cleanup_dca_mem(struct hns_roce_context *ctx); diff --git a/providers/hns/hns_roce_u_abi.h b/providers/hns/hns_roce_u_abi.h index 23509c1..3a9aacf 100644 --- a/providers/hns/hns_roce_u_abi.h +++ b/providers/hns/hns_roce_u_abi.h @@ -57,4 +57,7 @@ DECLARE_DRV_CMD(hns_roce_create_srq, IB_USER_VERBS_CMD_CREATE_SRQ, DECLARE_DRV_CMD(hns_roce_create_srq_ex, IB_USER_VERBS_CMD_CREATE_XSRQ, hns_roce_ib_create_srq, hns_roce_ib_create_srq_resp); +DECLARE_DRV_CMD(hns_roce_modify_qp_ex, IB_USER_VERBS_EX_CMD_MODIFY_QP, + empty, hns_roce_ib_modify_qp_resp); + #endif /* _HNS_ROCE_U_ABI_H */ diff --git a/providers/hns/hns_roce_u_buf.c b/providers/hns/hns_roce_u_buf.c index 8142fcd..9a26aff 100644 --- a/providers/hns/hns_roce_u_buf.c +++ b/providers/hns/hns_roce_u_buf.c @@ -402,6 +402,45 @@ static int setup_dca_buf(struct hns_roce_context *ctx, uint32_t handle, return (idx >= page_count) ? 0 : -ENOMEM; } +#define DCAN_TO_SYNC_BIT(n) ((n) * HNS_DCA_BITS_PER_STATUS) +#define DCAN_TO_STAT_BIT(n) DCAN_TO_SYNC_BIT(n) + +#define MAX_DCA_TRY_LOCK_TIMES 10 +bool hns_roce_dca_start_post(struct hns_roce_dca_ctx *ctx, uint32_t dcan) +{ + atomic_bitmap_t *st = ctx->sync_status; + int try_times = 0; + + if (!st || dcan >= ctx->max_qps) + return true; + + while (test_and_set_bit_lock(st, DCAN_TO_SYNC_BIT(dcan))) + if (try_times++ > MAX_DCA_TRY_LOCK_TIMES) + return false; + + return true; +} + +void hns_roce_dca_stop_post(struct hns_roce_dca_ctx *ctx, uint32_t dcan) +{ + atomic_bitmap_t *st = ctx->sync_status; + + if (!st || dcan >= ctx->max_qps) + return; + + clear_bit_unlock(st, DCAN_TO_SYNC_BIT(dcan)); +} + +static bool check_dca_is_attached(struct hns_roce_dca_ctx *ctx, uint32_t dcan) +{ + atomic_bitmap_t *st = ctx->buf_status; + + if (!st || dcan >= ctx->max_qps) + return false; + + return atomic_test_bit(st, DCAN_TO_STAT_BIT(dcan)); +} + #define DCA_EXPAND_MEM_TRY_TIMES 3 int hns_roce_attach_dca_mem(struct hns_roce_context *ctx, uint32_t handle, struct hns_roce_dca_attach_attr *attr, @@ -413,6 +452,9 @@ int hns_roce_attach_dca_mem(struct hns_roce_context *ctx, uint32_t handle, int try_times = 0; int ret = 0; + if (!attr->force && check_dca_is_attached(&ctx->dca_ctx, buf->dcan)) + return 0; + do { resp.alloc_pages = 0; ret = attach_dca_mem(ctx, handle, attr, &resp); diff --git a/providers/hns/hns_roce_u_hw_v2.c b/providers/hns/hns_roce_u_hw_v2.c index dff0e42..8989a08 100644 --- a/providers/hns/hns_roce_u_hw_v2.c +++ b/providers/hns/hns_roce_u_hw_v2.c @@ -533,6 +533,7 @@ static int dca_attach_qp_buf(struct hns_roce_context *ctx, struct hns_roce_qp *qp) { struct hns_roce_dca_attach_attr attr = {}; + bool enable_detach; uint32_t idx; int ret; @@ -554,9 +555,16 @@ static int dca_attach_qp_buf(struct hns_roce_context *ctx, attr.rq_offset = idx << qp->rq.wqe_shift; } + enable_detach = check_dca_detach_enable(qp); + if (enable_detach && + !hns_roce_dca_start_post(&ctx->dca_ctx, qp->dca_wqe.dcan)) + /* Force attach if failed to sync dca status */ + attr.force = true; ret = hns_roce_attach_dca_mem(ctx, qp->verbs_qp.qp.handle, &attr, - qp->buf_size, &qp->dca_wqe); + qp->buf_size, &qp->dca_wqe); + if (ret && enable_detach) + hns_roce_dca_stop_post(&ctx->dca_ctx, qp->dca_wqe.dcan); pthread_spin_unlock(&qp->rq.lock); pthread_spin_unlock(&qp->sq.lock); @@ -1368,6 +1376,9 @@ out: pthread_spin_unlock(&qp->sq.lock); + if (check_dca_detach_enable(qp)) + hns_roce_dca_stop_post(&ctx->dca_ctx, qp->dca_wqe.dcan); + if (ibvqp->state == IBV_QPS_ERR) { attr.qp_state = IBV_QPS_ERR; @@ -1475,6 +1486,9 @@ out: pthread_spin_unlock(&qp->rq.lock); + if (check_dca_detach_enable(qp)) + hns_roce_dca_stop_post(&ctx->dca_ctx, qp->dca_wqe.dcan); + if (ibvqp->state == IBV_QPS_ERR) { attr.qp_state = IBV_QPS_ERR; hns_roce_u_v2_modify_qp(ibvqp, &attr, IBV_QP_STATE); @@ -1563,8 +1577,9 @@ static int hns_roce_u_v2_modify_qp(struct ibv_qp *qp, struct ibv_qp_attr *attr, int attr_mask) { int ret; - struct ibv_modify_qp cmd; + struct hns_roce_modify_qp_ex cmd_ex = {}; struct hns_roce_qp *hr_qp = to_hr_qp(qp); + struct hns_roce_modify_qp_ex_resp resp_ex = {}; bool flag = false; /* modify qp to error */ struct hns_roce_context *ctx = to_hr_ctx(qp->context); @@ -1574,7 +1589,9 @@ static int hns_roce_u_v2_modify_qp(struct ibv_qp *qp, struct ibv_qp_attr *attr, flag = true; } - ret = ibv_cmd_modify_qp(qp, attr, attr_mask, &cmd, sizeof(cmd)); + ret = ibv_cmd_modify_qp_ex(qp, attr, attr_mask, &cmd_ex.ibv_cmd, + sizeof(cmd_ex), &resp_ex.ibv_resp, + sizeof(resp_ex)); if (flag) { pthread_spin_unlock(&hr_qp->rq.lock); @@ -1584,8 +1601,11 @@ static int hns_roce_u_v2_modify_qp(struct ibv_qp *qp, struct ibv_qp_attr *attr, if (ret) return ret; - if (attr_mask & IBV_QP_STATE) + if (attr_mask & IBV_QP_STATE) { qp->state = attr->qp_state; + if (attr->qp_state == IBV_QPS_RTR) + hr_qp->dca_wqe.dcan = resp_ex.drv_payload.dcan; + } if ((attr_mask & IBV_QP_STATE) && attr->qp_state == IBV_QPS_RESET) { if (qp->recv_cq) From patchwork Tue Jul 27 07:28:18 2021 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Wenpeng Liang X-Patchwork-Id: 12402057 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org X-Spam-Level: X-Spam-Status: No, score=-16.7 required=3.0 tests=BAYES_00, HEADER_FROM_DIFFERENT_DOMAINS,INCLUDES_CR_TRAILER,INCLUDES_PATCH, MAILING_LIST_MULTI,SPF_HELO_NONE,SPF_PASS,URIBL_BLOCKED,USER_AGENT_GIT autolearn=ham autolearn_force=no version=3.4.0 Received: from mail.kernel.org (mail.kernel.org [198.145.29.99]) by smtp.lore.kernel.org (Postfix) with ESMTP id E0046C19F35 for ; Tue, 27 Jul 2021 07:32:05 +0000 (UTC) Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by mail.kernel.org (Postfix) with ESMTP id C8D8F61009 for ; Tue, 27 Jul 2021 07:32:05 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S235731AbhG0HcE (ORCPT ); Tue, 27 Jul 2021 03:32:04 -0400 Received: from szxga03-in.huawei.com ([45.249.212.189]:12318 "EHLO szxga03-in.huawei.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S235741AbhG0HcC (ORCPT ); Tue, 27 Jul 2021 03:32:02 -0400 Received: from dggemv703-chm.china.huawei.com (unknown [172.30.72.57]) by szxga03-in.huawei.com (SkyGuard) with ESMTP id 4GYpK203r0z7yfR; Tue, 27 Jul 2021 15:27:18 +0800 (CST) Received: from dggpeml500017.china.huawei.com (7.185.36.243) by dggemv703-chm.china.huawei.com (10.3.19.46) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256) id 15.1.2176.2; Tue, 27 Jul 2021 15:31:56 +0800 Received: from localhost.localdomain (10.67.165.24) by dggpeml500017.china.huawei.com (7.185.36.243) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256) id 15.1.2176.2; Tue, 27 Jul 2021 15:31:56 +0800 From: Wenpeng Liang To: , CC: , Subject: [PATCH v2 rdma-core 07/10] libhns: Add direct verbs support to config DCA Date: Tue, 27 Jul 2021 15:28:18 +0800 Message-ID: <1627370901-10054-8-git-send-email-liangwenpeng@huawei.com> X-Mailer: git-send-email 2.8.1 In-Reply-To: <1627370901-10054-1-git-send-email-liangwenpeng@huawei.com> References: <1627370901-10054-1-git-send-email-liangwenpeng@huawei.com> MIME-Version: 1.0 X-Originating-IP: [10.67.165.24] X-ClientProxiedBy: dggems701-chm.china.huawei.com (10.3.19.178) To dggpeml500017.china.huawei.com (7.185.36.243) X-CFilter-Loop: Reflected Precedence: bulk List-ID: X-Mailing-List: linux-rdma@vger.kernel.org From: Xi Wang Add two direct verbs to config DCA: 1. hnsdv_open_device() is used to config DCA memory pool. 2. hnsdv_create_qp() is used to create a DCA QP. Signed-off-by: Xi Wang Signed-off-by: Wenpeng Liang --- debian/control | 2 +- debian/ibverbs-providers.install | 1 + debian/ibverbs-providers.lintian-overrides | 4 +- debian/ibverbs-providers.symbols | 6 ++ debian/libibverbs-dev.install | 4 ++ providers/hns/CMakeLists.txt | 9 ++- providers/hns/hns_roce_u.c | 89 ++++++++++++++++++++++++------ providers/hns/hns_roce_u.h | 2 + providers/hns/hns_roce_u_abi.h | 1 + providers/hns/hns_roce_u_verbs.c | 43 ++++++++++++--- providers/hns/hnsdv.h | 65 ++++++++++++++++++++++ providers/hns/libhns.map | 9 +++ redhat/rdma-core.spec | 5 +- suse/rdma-core.spec | 21 ++++++- 14 files changed, 231 insertions(+), 30 deletions(-) create mode 100644 providers/hns/hnsdv.h create mode 100644 providers/hns/libhns.map diff --git a/debian/control b/debian/control index a400707..4d40de8 100644 --- a/debian/control +++ b/debian/control @@ -94,7 +94,7 @@ Description: User space provider drivers for libibverbs - cxgb4: Chelsio T4 iWARP HCAs - efa: Amazon Elastic Fabric Adapter - hfi1verbs: Intel Omni-Path HFI - - hns: HiSilicon Hip06 SoC + - hns: HiSilicon+ Hip06 SoC - ipathverbs: QLogic InfiniPath HCAs - irdma: Intel Ethernet Connection RDMA - mlx4: Mellanox ConnectX-3 InfiniBand HCAs diff --git a/debian/ibverbs-providers.install b/debian/ibverbs-providers.install index 4f971fb..c6ecbbc 100644 --- a/debian/ibverbs-providers.install +++ b/debian/ibverbs-providers.install @@ -1,5 +1,6 @@ etc/libibverbs.d/ usr/lib/*/libefa.so.* usr/lib/*/libibverbs/lib*-rdmav*.so +usr/lib/*/libhns.so.* usr/lib/*/libmlx4.so.* usr/lib/*/libmlx5.so.* diff --git a/debian/ibverbs-providers.lintian-overrides b/debian/ibverbs-providers.lintian-overrides index 8a44d54..f6afb70 100644 --- a/debian/ibverbs-providers.lintian-overrides +++ b/debian/ibverbs-providers.lintian-overrides @@ -1,2 +1,2 @@ -# libefa, libmlx4 and libmlx5 are ibverbs provider that provides more functions. -ibverbs-providers: package-name-doesnt-match-sonames libefa1 libmlx4-1 libmlx5-1 +# libefa, libhns, libmlx4 and libmlx5 are ibverbs provider that provides more functions. +ibverbs-providers: package-name-doesnt-match-sonames libefa1 libhns-1 libmlx4-1 libmlx5-1 diff --git a/debian/ibverbs-providers.symbols b/debian/ibverbs-providers.symbols index 294832b..c048e82 100644 --- a/debian/ibverbs-providers.symbols +++ b/debian/ibverbs-providers.symbols @@ -141,3 +141,9 @@ libefa.so.1 ibverbs-providers #MINVER# efadv_create_qp_ex@EFA_1.1 26 efadv_query_device@EFA_1.1 26 efadv_query_ah@EFA_1.1 26 +libhns.so.1 ibverbs-providers #MINVER# +* Build-Depends-Package: libibverbs-dev + HNS_1.0@HNS_1.0 36 + hnsdv_is_supported@HNS_1.0 36 + hnsdv_open_device@HNS_1.0 36 + hnsdv_create_qp@HNS_1.0 36 diff --git a/debian/libibverbs-dev.install b/debian/libibverbs-dev.install index bc8caa5..7d6e6a2 100644 --- a/debian/libibverbs-dev.install +++ b/debian/libibverbs-dev.install @@ -1,5 +1,6 @@ usr/include/infiniband/arch.h usr/include/infiniband/efadv.h +usr/include/infiniband/hnsdv.h usr/include/infiniband/ib_user_ioctl_verbs.h usr/include/infiniband/mlx4dv.h usr/include/infiniband/mlx5_api.h @@ -14,6 +15,8 @@ usr/include/infiniband/verbs_api.h usr/lib/*/lib*-rdmav*.a usr/lib/*/libefa.a usr/lib/*/libefa.so +usr/lib/*/libhns.a +usr/lib/*/libhns.so usr/lib/*/libibverbs*.so usr/lib/*/libibverbs.a usr/lib/*/libmlx4.a @@ -21,6 +24,7 @@ usr/lib/*/libmlx4.so usr/lib/*/libmlx5.a usr/lib/*/libmlx5.so usr/lib/*/pkgconfig/libefa.pc +usr/lib/*/pkgconfig/libhns.pc usr/lib/*/pkgconfig/libibverbs.pc usr/lib/*/pkgconfig/libmlx4.pc usr/lib/*/pkgconfig/libmlx5.pc diff --git a/providers/hns/CMakeLists.txt b/providers/hns/CMakeLists.txt index 697dbd7..6e602f6 100644 --- a/providers/hns/CMakeLists.txt +++ b/providers/hns/CMakeLists.txt @@ -1,4 +1,5 @@ -rdma_provider(hns +rdma_shared_provider(hns libhns.map + 1 1.0.${PACKAGE_VERSION} hns_roce_u.c hns_roce_u_buf.c hns_roce_u_db.c @@ -6,3 +7,9 @@ rdma_provider(hns hns_roce_u_hw_v2.c hns_roce_u_verbs.c ) + +publish_headers(infiniband + hnsdv.h +) + +rdma_pkg_config("hns" "libibverbs" "${CMAKE_THREAD_LIBS_INIT}") diff --git a/providers/hns/hns_roce_u.c b/providers/hns/hns_roce_u.c index 3b13d0f..cbfdb74 100644 --- a/providers/hns/hns_roce_u.c +++ b/providers/hns/hns_roce_u.c @@ -133,23 +133,67 @@ static int mmap_dca(struct hns_roce_dca_ctx *dca_ctx, int cmd_fd, int page_size, return 0; } +bool hnsdv_is_supported(struct ibv_device *device) +{ + return is_hns_dev(device); +} + +struct ibv_context *hnsdv_open_device(struct ibv_device *device, + struct hnsdv_context_attr *attr) +{ + if (!is_hns_dev(device)) { + errno = EOPNOTSUPP; + return NULL; + } + + return verbs_open_device(device, attr); +} + +static void set_dca_pool_param(struct hnsdv_context_attr *attr, int page_size, + struct hns_roce_dca_ctx *ctx) +{ + if (attr->comp_mask & HNSDV_CONTEXT_MASK_DCA_UNIT_SIZE) + ctx->unit_size = align(attr->dca_unit_size, page_size); + else + ctx->unit_size = page_size * HNS_DCA_DEFAULT_UNIT_PAGES; + + /* The memory pool cannot be expanded, only init the DCA context. */ + if (ctx->unit_size == 0) + return; + + /* If not set, the memory pool can be expanded unlimitedly. */ + if (attr->comp_mask & HNSDV_CONTEXT_MASK_DCA_MAX_SIZE) + ctx->max_size = DIV_ROUND_UP(attr->dca_max_size, + ctx->unit_size) * ctx->unit_size; + else + ctx->max_size = HNS_DCA_MAX_MEM_SIZE; + + /* If not set, the memory pool cannot be shrunk. */ + if (attr->comp_mask & HNSDV_CONTEXT_MASK_DCA_MIN_SIZE) + ctx->min_size = DIV_ROUND_UP(attr->dca_min_size, + ctx->unit_size) * ctx->unit_size; + else + ctx->min_size = HNS_DCA_MAX_MEM_SIZE; +} + static int init_dca_context(struct hns_roce_context *ctx, int cmd_fd, - int page_size, int max_qps, int mmap_size) + int page_size, struct hnsdv_context_attr *attr, + int max_qps, int mmap_size) { struct hns_roce_dca_ctx *dca_ctx = &ctx->dca_ctx; int ret; - if (!(ctx->cap_flags & HNS_ROCE_CAP_FLAG_DCA_MODE)) - return 0; - + dca_ctx->unit_size = 0; + dca_ctx->mem_cnt = 0; list_head_init(&dca_ctx->mem_list); ret = pthread_spin_init(&dca_ctx->lock, PTHREAD_PROCESS_PRIVATE); if (ret) return ret; - dca_ctx->unit_size = page_size * HNS_DCA_DEFAULT_UNIT_PAGES; - dca_ctx->max_size = HNS_DCA_MAX_MEM_SIZE; - dca_ctx->mem_cnt = 0; + if (!attr || !(attr->flags & HNSDV_CONTEXT_FLAGS_DCA)) + return 0; + + set_dca_pool_param(attr, page_size, dca_ctx); if (mmap_size > 0) { const unsigned int bits_per_qp = 2 * HNS_DCA_BITS_PER_STATUS; @@ -214,16 +258,22 @@ db_free: return -EINVAL; } -static void ucontext_set_cmd(struct hns_roce_alloc_ucontext *cmd, int page_size) +static void ucontext_set_cmd(struct hns_roce_alloc_ucontext *cmd, + struct hnsdv_context_attr *attr) { - cmd->comp = HNS_ROCE_ALLOC_UCTX_COMP_DCA_MAX_QPS; - cmd->dca_max_qps = page_size * 8 / 2 * HNS_DCA_BITS_PER_STATUS; + if (!attr || !(attr->flags & HNSDV_CONTEXT_FLAGS_DCA)) + return; + + if (attr->comp_mask & HNSDV_CONTEXT_MASK_DCA_PRIME_QPS) { + cmd->comp = HNS_ROCE_ALLOC_UCTX_COMP_DCA_MAX_QPS; + cmd->dca_max_qps = attr->dca_prime_qps; + } } -static struct verbs_context *hns_roce_alloc_context(struct ibv_device *ibdev, - int cmd_fd, - void *private_data) +static struct verbs_context * +hns_roce_alloc_context(struct ibv_device *ibdev, int cmd_fd, void *private_data) { + struct hnsdv_context_attr *ctx_attr = private_data; struct hns_roce_device *hr_dev = to_hr_dev(ibdev); struct hns_roce_alloc_ucontext_resp resp = {}; struct hns_roce_alloc_ucontext cmd = {}; @@ -236,7 +286,7 @@ static struct verbs_context *hns_roce_alloc_context(struct ibv_device *ibdev, if (!context) return NULL; - ucontext_set_cmd(&cmd, hr_dev->page_size); + ucontext_set_cmd(&cmd, ctx_attr); if (ibv_cmd_get_context(&context->ibv_ctx, &cmd.ibv_cmd, sizeof(cmd), &resp.ibv_resp, sizeof(resp))) goto err_free; @@ -286,8 +336,8 @@ static struct verbs_context *hns_roce_alloc_context(struct ibv_device *ibdev, verbs_set_ops(&context->ibv_ctx, &hns_common_ops); verbs_set_ops(&context->ibv_ctx, &hr_dev->u_hw->hw_ops); - if (init_dca_context(context, cmd_fd, hr_dev->page_size, resp.dca_qps, - resp.dca_mmap_size)) + if (init_dca_context(context, cmd_fd, hr_dev->page_size, ctx_attr, + resp.dca_qps, resp.dca_mmap_size)) goto err_free; if (hns_roce_mmap(hr_dev, context, cmd_fd)) @@ -349,4 +399,11 @@ static const struct verbs_device_ops hns_roce_dev_ops = { .uninit_device = hns_uninit_device, .alloc_context = hns_roce_alloc_context, }; + +bool is_hns_dev(struct ibv_device *device) +{ + struct verbs_device *verbs_device = verbs_get_device(device); + + return verbs_device->ops == &hns_roce_dev_ops; +} PROVIDER_DRIVER(hns, hns_roce_dev_ops); diff --git a/providers/hns/hns_roce_u.h b/providers/hns/hns_roce_u.h index fb7b864..086e285 100644 --- a/providers/hns/hns_roce_u.h +++ b/providers/hns/hns_roce_u.h @@ -430,6 +430,8 @@ static inline void clear_bit_unlock(atomic_bitmap_t *p, uint32_t nr) atomic_fetch_and(p, ~HNS_ROCE_BIT_MASK(nr)); } +bool is_hns_dev(struct ibv_device *device); + int hns_roce_u_query_device(struct ibv_context *context, const struct ibv_query_device_ex_input *input, struct ibv_device_attr_ex *attr, size_t attr_size); diff --git a/providers/hns/hns_roce_u_abi.h b/providers/hns/hns_roce_u_abi.h index 3a9aacf..5d41ea9 100644 --- a/providers/hns/hns_roce_u_abi.h +++ b/providers/hns/hns_roce_u_abi.h @@ -36,6 +36,7 @@ #include #include #include +#include "hnsdv.h" DECLARE_DRV_CMD(hns_roce_alloc_pd, IB_USER_VERBS_CMD_ALLOC_PD, empty, hns_roce_ib_alloc_pd_resp); diff --git a/providers/hns/hns_roce_u_verbs.c b/providers/hns/hns_roce_u_verbs.c index 015f417..59257e8 100644 --- a/providers/hns/hns_roce_u_verbs.c +++ b/providers/hns/hns_roce_u_verbs.c @@ -903,9 +903,21 @@ static int calc_qp_buff_size(struct hns_roce_device *hr_dev, return 0; } -static inline bool check_qp_support_dca(bool pool_en, enum ibv_qp_type qp_type) +static inline bool check_qp_support_dca(struct hns_roce_dca_ctx *dca_ctx, + struct ibv_qp_init_attr_ex *attr, + struct hnsdv_qp_init_attr *hns_attr) { - if (pool_en && (qp_type == IBV_QPT_RC || qp_type == IBV_QPT_XRC_SEND)) + /* DCA pool disable */ + if (!dca_ctx->unit_size) + return false; + + /* Unsupport type */ + if (attr->qp_type != IBV_QPT_RC && attr->qp_type != IBV_QPT_XRC_SEND) + return false; + + if (hns_attr && + (hns_attr->comp_mask & HNSDV_QP_INIT_ATTR_MASK_QP_CREATE_FLAGS) && + (hns_attr->create_flags & HNSDV_QP_CREATE_ENABLE_DCA_MODE)) return true; return false; @@ -923,6 +935,7 @@ static void qp_free_wqe(struct hns_roce_qp *qp) } static int qp_alloc_wqe(struct ibv_qp_init_attr_ex *attr, + struct hnsdv_qp_init_attr *hns_attr, struct hns_roce_qp *qp, struct hns_roce_context *ctx) { struct hns_roce_device *hr_dev = to_hr_dev(ctx->ibv_ctx.context.device); @@ -945,7 +958,7 @@ static int qp_alloc_wqe(struct ibv_qp_init_attr_ex *attr, goto err_alloc; } - if (check_qp_support_dca(ctx->dca_ctx.max_size != 0, attr->qp_type)) { + if (check_qp_support_dca(&ctx->dca_ctx, attr, hns_attr)) { /* when DCA is enabled, use a buffer list to store page addr */ qp->buf.buf = NULL; qp->dca_wqe.max_cnt = hr_hw_page_count(qp->buf_size); @@ -1185,6 +1198,7 @@ void hns_roce_free_qp_buf(struct hns_roce_qp *qp, struct hns_roce_context *ctx) } static int hns_roce_alloc_qp_buf(struct ibv_qp_init_attr_ex *attr, + struct hnsdv_qp_init_attr *hns_attr, struct hns_roce_qp *qp, struct hns_roce_context *ctx) { @@ -1194,7 +1208,7 @@ static int hns_roce_alloc_qp_buf(struct ibv_qp_init_attr_ex *attr, pthread_spin_init(&qp->rq.lock, PTHREAD_PROCESS_PRIVATE)) return -ENOMEM; - ret = qp_alloc_wqe(attr, qp, ctx); + ret = qp_alloc_wqe(attr, hns_attr, qp, ctx); if (ret) return ret; @@ -1206,7 +1220,8 @@ static int hns_roce_alloc_qp_buf(struct ibv_qp_init_attr_ex *attr, } static struct ibv_qp *create_qp(struct ibv_context *ibv_ctx, - struct ibv_qp_init_attr_ex *attr) + struct ibv_qp_init_attr_ex *attr, + struct hnsdv_qp_init_attr *hns_attr) { struct hns_roce_context *context = to_hr_ctx(ibv_ctx); struct hns_roce_qp *qp; @@ -1224,7 +1239,7 @@ static struct ibv_qp *create_qp(struct ibv_context *ibv_ctx, hns_roce_set_qp_params(attr, qp, context); - ret = hns_roce_alloc_qp_buf(attr, qp, context); + ret = hns_roce_alloc_qp_buf(attr, hns_attr, qp, context); if (ret) goto err_buf; @@ -1264,7 +1279,7 @@ struct ibv_qp *hns_roce_u_create_qp(struct ibv_pd *pd, attrx.comp_mask = IBV_QP_INIT_ATTR_PD; attrx.pd = pd; - qp = create_qp(pd->context, &attrx); + qp = create_qp(pd->context, &attrx, NULL); if (qp) memcpy(attr, &attrx, sizeof(*attr)); @@ -1274,7 +1289,19 @@ struct ibv_qp *hns_roce_u_create_qp(struct ibv_pd *pd, struct ibv_qp *hns_roce_u_create_qp_ex(struct ibv_context *context, struct ibv_qp_init_attr_ex *attr) { - return create_qp(context, attr); + return create_qp(context, attr, NULL); +} + +struct ibv_qp *hnsdv_create_qp(struct ibv_context *context, + struct ibv_qp_init_attr_ex *qp_attr, + struct hnsdv_qp_init_attr *hns_attr) +{ + if (!is_hns_dev(context->device)) { + errno = EOPNOTSUPP; + return NULL; + } + + return create_qp(context, qp_attr, hns_attr); } struct ibv_qp *hns_roce_u_open_qp(struct ibv_context *context, diff --git a/providers/hns/hnsdv.h b/providers/hns/hnsdv.h new file mode 100644 index 0000000..cfe1611 --- /dev/null +++ b/providers/hns/hnsdv.h @@ -0,0 +1,65 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */ +/* + * Copyright (c) 2021 HiSilicon Limited. + */ + +#ifndef __HNSDV_H__ +#define __HNSDV_H__ + +#include +#include + +#include + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +enum hnsdv_context_attr_flags { + HNSDV_CONTEXT_FLAGS_DCA = 1 << 0, +}; + +enum hnsdv_context_comp_mask { + HNSDV_CONTEXT_MASK_DCA_PRIME_QPS = 1 << 0, + HNSDV_CONTEXT_MASK_DCA_UNIT_SIZE = 1 << 1, + HNSDV_CONTEXT_MASK_DCA_MAX_SIZE = 1 << 2, + HNSDV_CONTEXT_MASK_DCA_MIN_SIZE = 1 << 3, +}; + +struct hnsdv_context_attr { + uint64_t flags; /* Use enum hnsdv_context_attr_flags */ + uint64_t comp_mask; /* Use enum hnsdv_context_comp_mask */ + uint32_t dca_prime_qps; + uint32_t dca_unit_size; + uint64_t dca_max_size; + uint64_t dca_min_size; +}; + +bool hnsdv_is_supported(struct ibv_device *device); +struct ibv_context *hnsdv_open_device(struct ibv_device *device, + struct hnsdv_context_attr *attr); + +enum hnsdv_qp_create_flags { + HNSDV_QP_CREATE_ENABLE_DCA_MODE = 1 << 0, +}; + +enum hnsdv_qp_init_attr_mask { + HNSDV_QP_INIT_ATTR_MASK_QP_CREATE_FLAGS = 1 << 0, +}; + +struct hnsdv_qp_init_attr { + uint64_t comp_mask; /* Use enum hnsdv_qp_init_attr_mask */ + uint32_t create_flags; /* Use enum hnsdv_qp_create_flags */ +}; + +struct ibv_qp *hnsdv_create_qp(struct ibv_context *context, + struct ibv_qp_init_attr_ex *qp_attr, + struct hnsdv_qp_init_attr *hns_qp_attr); + +#ifdef __cplusplus +} +#endif + +#endif /* __HNSDV_H__ */ diff --git a/providers/hns/libhns.map b/providers/hns/libhns.map new file mode 100644 index 0000000..aed491c --- /dev/null +++ b/providers/hns/libhns.map @@ -0,0 +1,9 @@ +/* Export symbols should be added below according to + Documentation/versioning.md document. */ +HNS_1.0 { + global: + hnsdv_is_supported; + hnsdv_open_device; + hnsdv_create_qp; + local: *; +}; diff --git a/redhat/rdma-core.spec b/redhat/rdma-core.spec index 1aecbaa..0d99e1c 100644 --- a/redhat/rdma-core.spec +++ b/redhat/rdma-core.spec @@ -151,6 +151,8 @@ Provides: libefa = %{version}-%{release} Obsoletes: libefa < %{version}-%{release} Provides: libhfi1 = %{version}-%{release} Obsoletes: libhfi1 < %{version}-%{release} +Provides: libhns = %{version}-%{release} +Obsoletes: libhns < %{version}-%{release} Provides: libipathverbs = %{version}-%{release} Obsoletes: libipathverbs < %{version}-%{release} Provides: libirdma = %{version}-%{release} @@ -178,7 +180,7 @@ Device-specific plug-in ibverbs userspace drivers are included: - libcxgb4: Chelsio T4 iWARP HCA - libefa: Amazon Elastic Fabric Adapter - libhfi1: Intel Omni-Path HFI -- libhns: HiSilicon Hip06 SoC +- libhns: HiSilicon Hip06+ SoC - libipathverbs: QLogic InfiniPath HCA - libirdma: Intel Ethernet Connection RDMA - libmlx4: Mellanox ConnectX-3 InfiniBand HCA @@ -563,6 +565,7 @@ fi %dir %{_sysconfdir}/libibverbs.d %dir %{_libdir}/libibverbs %{_libdir}/libefa.so.* +%{_libdir}/libhns.so.* %{_libdir}/libibverbs*.so.* %{_libdir}/libibverbs/*.so %{_libdir}/libmlx5.so.* diff --git a/suse/rdma-core.spec b/suse/rdma-core.spec index b0967f3..6ee1767 100644 --- a/suse/rdma-core.spec +++ b/suse/rdma-core.spec @@ -30,6 +30,7 @@ License: GPL-2.0-only OR BSD-2-Clause Group: Productivity/Networking/Other %define efa_so_major 1 +%define hns_so_major 1 %define verbs_so_major 1 %define rdmacm_so_major 1 %define umad_so_major 3 @@ -39,6 +40,7 @@ Group: Productivity/Networking/Other %define mad_major 5 %define efa_lname libefa%{efa_so_major} +%define hns_lname libhns%{hns_so_major} %define verbs_lname libibverbs%{verbs_so_major} %define rdmacm_lname librdmacm%{rdmacm_so_major} %define umad_lname libibumad%{umad_so_major} @@ -145,6 +147,7 @@ Requires: %{umad_lname} = %{version}-%{release} Requires: %{verbs_lname} = %{version}-%{release} %if 0%{?dma_coherent} Requires: %{efa_lname} = %{version}-%{release} +Requires: %{hns_lname} = %{version}-%{release} Requires: %{mlx4_lname} = %{version}-%{release} Requires: %{mlx5_lname} = %{version}-%{release} %endif @@ -185,6 +188,7 @@ Requires: %{name}%{?_isa} = %{version}-%{release} Obsoletes: libcxgb4-rdmav2 < %{version}-%{release} Obsoletes: libefa-rdmav2 < %{version}-%{release} Obsoletes: libhfi1verbs-rdmav2 < %{version}-%{release} +Obsoletes: libhns-rdmav2 < %{version}-%{release} Obsoletes: libipathverbs-rdmav2 < %{version}-%{release} Obsoletes: libirdma-rdmav2 < %{version}-%{release} Obsoletes: libmlx4-rdmav2 < %{version}-%{release} @@ -194,6 +198,7 @@ Obsoletes: libocrdma-rdmav2 < %{version}-%{release} Obsoletes: librxe-rdmav2 < %{version}-%{release} %if 0%{?dma_coherent} Requires: %{efa_lname} = %{version}-%{release} +Requires: %{hns_lname} = %{version}-%{release} Requires: %{mlx4_lname} = %{version}-%{release} Requires: %{mlx5_lname} = %{version}-%{release} %endif @@ -212,7 +217,7 @@ Device-specific plug-in ibverbs userspace drivers are included: - libcxgb4: Chelsio T4 iWARP HCA - libefa: Amazon Elastic Fabric Adapter - libhfi1: Intel Omni-Path HFI -- libhns: HiSilicon Hip06 SoC +- libhns: HiSilicon Hip06+ SoC - libipathverbs: QLogic InfiniPath HCA - libirdma: Intel Ethernet Connection RDMA - libmlx4: Mellanox ConnectX-3 InfiniBand HCA @@ -239,6 +244,13 @@ Group: System/Libraries %description -n %efa_lname This package contains the efa runtime library. +%package -n %hns_lname +Summary: HNS runtime library +Group: System/Libraries + +%description -n %hns_lname +This package contains the hns runtime library. + %package -n %mlx4_lname Summary: MLX4 runtime library Group: System/Libraries @@ -482,6 +494,9 @@ rm -rf %{buildroot}/%{_sbindir}/srp_daemon.sh %post -n %efa_lname -p /sbin/ldconfig %postun -n %efa_lname -p /sbin/ldconfig +%post -n %hns_lname -p /sbin/ldconfig +%postun -n %hns_lname -p /sbin/ldconfig + %post -n %mlx4_lname -p /sbin/ldconfig %postun -n %mlx4_lname -p /sbin/ldconfig @@ -664,6 +679,10 @@ rm -rf %{buildroot}/%{_sbindir}/srp_daemon.sh %defattr(-,root,root) %{_libdir}/libefa*.so.* +%files -n %hns_lname +%defattr(-,root,root) +%{_libdir}/libhns*.so.* + %files -n %mlx4_lname %defattr(-,root,root) %{_libdir}/libmlx4*.so.* From patchwork Tue Jul 27 07:28:19 2021 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Wenpeng Liang X-Patchwork-Id: 12402055 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org X-Spam-Level: X-Spam-Status: No, score=-16.7 required=3.0 tests=BAYES_00, HEADER_FROM_DIFFERENT_DOMAINS,INCLUDES_CR_TRAILER,INCLUDES_PATCH, MAILING_LIST_MULTI,SPF_HELO_NONE,SPF_PASS,URIBL_BLOCKED,USER_AGENT_GIT autolearn=ham autolearn_force=no version=3.4.0 Received: from mail.kernel.org (mail.kernel.org [198.145.29.99]) by smtp.lore.kernel.org (Postfix) with ESMTP id 7D35FC19F33 for ; Tue, 27 Jul 2021 07:32:05 +0000 (UTC) Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by mail.kernel.org (Postfix) with ESMTP id 63D0561009 for ; Tue, 27 Jul 2021 07:32:05 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S235810AbhG0HcD (ORCPT ); Tue, 27 Jul 2021 03:32:03 -0400 Received: from szxga03-in.huawei.com ([45.249.212.189]:12319 "EHLO szxga03-in.huawei.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S235746AbhG0HcC (ORCPT ); Tue, 27 Jul 2021 03:32:02 -0400 Received: from dggemv703-chm.china.huawei.com (unknown [172.30.72.57]) by szxga03-in.huawei.com (SkyGuard) with ESMTP id 4GYpK20LdHz7yfL; Tue, 27 Jul 2021 15:27:18 +0800 (CST) Received: from dggpeml500017.china.huawei.com (7.185.36.243) by dggemv703-chm.china.huawei.com (10.3.19.46) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256) id 15.1.2176.2; Tue, 27 Jul 2021 15:31:57 +0800 Received: from localhost.localdomain (10.67.165.24) by dggpeml500017.china.huawei.com (7.185.36.243) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256) id 15.1.2176.2; Tue, 27 Jul 2021 15:31:56 +0800 From: Wenpeng Liang To: , CC: , Subject: [PATCH v2 rdma-core 08/10] libhns: Add man pages to introduce DCA feature Date: Tue, 27 Jul 2021 15:28:19 +0800 Message-ID: <1627370901-10054-9-git-send-email-liangwenpeng@huawei.com> X-Mailer: git-send-email 2.8.1 In-Reply-To: <1627370901-10054-1-git-send-email-liangwenpeng@huawei.com> References: <1627370901-10054-1-git-send-email-liangwenpeng@huawei.com> MIME-Version: 1.0 X-Originating-IP: [10.67.165.24] X-ClientProxiedBy: dggems701-chm.china.huawei.com (10.3.19.178) To dggpeml500017.china.huawei.com (7.185.36.243) X-CFilter-Loop: Reflected Precedence: bulk List-ID: X-Mailing-List: linux-rdma@vger.kernel.org From: Xi Wang Document hns DCA feature and related direct verbs. Signed-off-by: Xi Wang Signed-off-by: Wenpeng Liang --- CMakeLists.txt | 1 + debian/ibverbs-providers.install | 2 +- debian/libibverbs-dev.install | 2 + providers/hns/man/CMakeLists.txt | 7 +++ providers/hns/man/hns_dca.7.md | 35 +++++++++++++++ providers/hns/man/hnsdv.7.md | 34 ++++++++++++++ providers/hns/man/hnsdv_create_qp.3.md | 69 ++++++++++++++++++++++++++++ providers/hns/man/hnsdv_is_supported.3.md | 39 ++++++++++++++++ providers/hns/man/hnsdv_open_device.3.md | 74 +++++++++++++++++++++++++++++++ redhat/rdma-core.spec | 2 + 10 files changed, 264 insertions(+), 1 deletion(-) create mode 100644 providers/hns/man/CMakeLists.txt create mode 100644 providers/hns/man/hns_dca.7.md create mode 100644 providers/hns/man/hnsdv.7.md create mode 100644 providers/hns/man/hnsdv_create_qp.3.md create mode 100644 providers/hns/man/hnsdv_is_supported.3.md create mode 100644 providers/hns/man/hnsdv_open_device.3.md diff --git a/CMakeLists.txt b/CMakeLists.txt index 743a109..da4b435 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -669,6 +669,7 @@ add_subdirectory(providers/cxgb4) # NO SPARSE add_subdirectory(providers/efa) add_subdirectory(providers/efa/man) add_subdirectory(providers/hns) +add_subdirectory(providers/hns/man) add_subdirectory(providers/irdma) add_subdirectory(providers/mlx4) add_subdirectory(providers/mlx4/man) diff --git a/debian/ibverbs-providers.install b/debian/ibverbs-providers.install index c6ecbbc..c4c4c11 100644 --- a/debian/ibverbs-providers.install +++ b/debian/ibverbs-providers.install @@ -1,6 +1,6 @@ etc/libibverbs.d/ usr/lib/*/libefa.so.* -usr/lib/*/libibverbs/lib*-rdmav*.so usr/lib/*/libhns.so.* +usr/lib/*/libibverbs/lib*-rdmav*.so usr/lib/*/libmlx4.so.* usr/lib/*/libmlx5.so.* diff --git a/debian/libibverbs-dev.install b/debian/libibverbs-dev.install index 7d6e6a2..2c2feaf 100644 --- a/debian/libibverbs-dev.install +++ b/debian/libibverbs-dev.install @@ -29,11 +29,13 @@ usr/lib/*/pkgconfig/libibverbs.pc usr/lib/*/pkgconfig/libmlx4.pc usr/lib/*/pkgconfig/libmlx5.pc usr/share/man/man3/efadv_*.3 +usr/share/man/man3/hns*.3 usr/share/man/man3/ibv_* usr/share/man/man3/mbps_to_ibv_rate.3 usr/share/man/man3/mlx4dv_*.3 usr/share/man/man3/mlx5dv_*.3 usr/share/man/man3/mult_to_ibv_rate.3 usr/share/man/man7/efadv.7 +usr/share/man/man7/hns*.7 usr/share/man/man7/mlx4dv.7 usr/share/man/man7/mlx5dv.7 diff --git a/providers/hns/man/CMakeLists.txt b/providers/hns/man/CMakeLists.txt new file mode 100644 index 0000000..b375a65 --- /dev/null +++ b/providers/hns/man/CMakeLists.txt @@ -0,0 +1,7 @@ +rdma_man_pages( + hnsdv_is_supported.3.md + hnsdv_open_device.3.md + hnsdv_create_qp.3.md + hnsdv.7 + hns_dca.7 +) diff --git a/providers/hns/man/hns_dca.7.md b/providers/hns/man/hns_dca.7.md new file mode 100644 index 0000000..2cdd90a --- /dev/null +++ b/providers/hns/man/hns_dca.7.md @@ -0,0 +1,35 @@ +--- +layout: page +title: DCA +section: 7 +tagline: DCA +date: 2021-07-13 +header: "HNS DCA Manual" +footer: hns +--- + +# NAME + +DCA - Dynamic Context Attachment + +This allows all WQEs to share a memory pool that belongs to the user context. + +# DESCRIPTION + +The DCA feature aims to reduce memory consumption by sharing WQE memory for QPs working in sparse traffic scenarios. + +The DCA memory pool consists of multiple umem objects. Each umem object is a buffer allocated in user driver and register into kernel driver. The ULP need to setup the memory pool's parameter by calling hnsdv_open_device() and the driver will expand or shrink the memory pool based on this parameter. + +When a QP's DCA was enabled by setting create flags through ibv_create_qp_ex(), the WQE buffer will not be allocated directly until the ULP invokes the ibv_post_xxx(). If the memory in the pool is insufficient and the capacity expansion conditions are met, the driver will add new umem objects to the pool. + +When all WQEs of a QP are not used by the ROCEE after ibv_poll_cq() or ibv_modify_qp() are invoked, the WQE buffer will be reclaimed to the DCA memory pool. If the free memory in the pool meets the shrink conditions, the driver will delete the unused umem object. + +# SEE ALSO + +*hnsdv_open_device(3)*, *hnsdv_create_qp(3)* + +# AUTHORS + +Xi Wang + +Weihang Li diff --git a/providers/hns/man/hnsdv.7.md b/providers/hns/man/hnsdv.7.md new file mode 100644 index 0000000..dd47379 --- /dev/null +++ b/providers/hns/man/hnsdv.7.md @@ -0,0 +1,34 @@ +--- +layout: page +title: HNSDV +section: 7 +tagline: Verbs +date: 2021-07-13 +header: "HNS Direct Verbs Manual" +footer: hns +--- + +# NAME + +hnsdv - Direct verbs for hns devices + +This provides low level access to hns devices to perform direct operations, +without general branching performed by libibverbs. + +# DESCRIPTION + +The libibverbs API is an abstract one. It is agnostic to any underlying provider specific implementation. While this abstraction has the advantage of user applications portability, it has a performance penalty. For some applications optimizing performance is more important than portability. + +The hns direct verbs API is intended for such applications. It exposes hns specific low level operations, allowing the application to bypass the libibverbs API. + +The direct include of hnsdv.h together with linkage to hns library will allow usage of this new interface. + +# SEE ALSO + +**verbs**(7) + +# AUTHORS + +Xi Wang + +Weihang Li diff --git a/providers/hns/man/hnsdv_create_qp.3.md b/providers/hns/man/hnsdv_create_qp.3.md new file mode 100644 index 0000000..27d6db1 --- /dev/null +++ b/providers/hns/man/hnsdv_create_qp.3.md @@ -0,0 +1,69 @@ +--- +layout: page +title: hnsdv_create_qp +section: 3 +tagline: Verbs +date: 2021-07-13 +header: "hns Programmer's Manual" +footer: hns +--- + +# NAME + +hnsdv_create_qp - creates a queue pair (QP) + +# SYNOPSIS + +```c +#include + +struct ibv_qp *hnsdv_create_qp(struct ibv_context *context, + struct ibv_qp_init_attr_ex *attr, + struct hnsdv_qp_init_attr *hns_attr) +``` + + +# DESCRIPTION + +**hnsdv_create_qp()** creates a queue pair (QP) with specific driver properties. + +# ARGUMENTS + +Please see *ibv_create_qp_ex(3)* man page for *context* and *attr*. + +## hns_attr + +```c +struct hnsdv_qp_init_attr { + uint64_t comp_mask; + uint32_t create_flags; +}; +``` + +*comp_mask* +: Bitmask specifying what fields in the structure are valid: + HNSDV_QP_INIT_ATTR_MASK_QP_CREATE_FLAGS: + valid values in *create_flags* + +*create_flags* +: A bitwise OR of the various values described below. + + HNSDV_QP_CREATE_DYNAMIC_CONTEXT_ATTACH : + Enable DCA feature for QP, the WQE buffer will allocate + from DCA memory pool when calling ibv_post_send() or + ibv_post_recv(). + +# RETURN VALUE + +**hnsdv_create_qp()** +returns a pointer to the created QP, on error NULL will be returned and errno will be set. + +# SEE ALSO + +**ibv_create_qp_ex**(3), + +# AUTHOR + +Xi Wang + +Weihang Li diff --git a/providers/hns/man/hnsdv_is_supported.3.md b/providers/hns/man/hnsdv_is_supported.3.md new file mode 100644 index 0000000..b5f00bd --- /dev/null +++ b/providers/hns/man/hnsdv_is_supported.3.md @@ -0,0 +1,39 @@ +--- +layout: page +title: hnsdv_is_supported +section: 3 +tagline: Verbs +--- + +# NAME + +hnsdv_is_supported - Check whether an RDMA device implemented by the hns provider + +# SYNOPSIS + +```c +#include + +bool hnsdv_is_supported(struct ibv_device *device); +``` + +# DESCRIPTION + +hnsdv functions may be called only if this function returns true for the RDMA device. + +# ARGUMENTS + +*device* +: RDMA device to check. + +# RETURN VALUE + +Returns true if device is implemented by hns provider. + +# SEE ALSO + +*hnsdv(7)* + +# AUTHOR + +Xi Wang diff --git a/providers/hns/man/hnsdv_open_device.3.md b/providers/hns/man/hnsdv_open_device.3.md new file mode 100644 index 0000000..c2d8262 --- /dev/null +++ b/providers/hns/man/hnsdv_open_device.3.md @@ -0,0 +1,74 @@ +--- +layout: page +title: hnsdv_open_device +section: 3 +tagline: Verbs +--- + +# NAME + +hnsdv_open_device - Open an RDMA device context for the hns provider + +# SYNOPSIS + +```c +#include + +struct ibv_context * +hnsdv_open_device(struct ibv_device *device, struct hnsdv_context_attr *attr); +``` + +# DESCRIPTION + +Open an RDMA device context with specific hns provider attributes. + +# ARGUMENTS + +*device* +: RDMA device to open. + +## *attr* argument + +```c +struct hnsdv_context_attr { + uint64_t flags; + uint64_t comp_mask; + uint32_t dca_prime_qps; + uint32_t dca_unit_size; + uint64_t dca_max_size; + uint64_t dca_min_size; +}; +``` + +*flags* +: A bitwise OR of the various values described below. + + *HNSDV_CONTEXT_FLAGS_DCA*: + Create a DCA memory pool to support all QPs share it. + +*comp_mask* +: Bitmask specifying what fields in the structure are valid + +*dca_prime_qps* +: The DCA status will sync by shared memory when DCA num is small than prime qps . + +*dca_unit_size* +: The unit size when adding a new buffer to DCA memory pool. + +*dca_max_size* +: The DCA pool will be expanded when the total size is smaller than maximal size. + +*dca_min_size* +: The DCA pool will be shrunk when the free size is bigger than minimal size. + +# RETURN VALUE + +Returns a pointer to the allocated device context, or NULL if the request fails. + +# SEE ALSO + +*hnsdv_create_qp(3)*, *hns_dca(7)* + +# AUTHOR + +Xi Wang diff --git a/redhat/rdma-core.spec b/redhat/rdma-core.spec index 0d99e1c..9a24552 100644 --- a/redhat/rdma-core.spec +++ b/redhat/rdma-core.spec @@ -440,6 +440,7 @@ fi %{_libdir}/lib*.so %{_libdir}/pkgconfig/*.pc %{_mandir}/man3/efadv* +%{_mandir}/man3/hns* %{_mandir}/man3/ibv_* %{_mandir}/man3/rdma* %{_mandir}/man3/umad* @@ -448,6 +449,7 @@ fi %{_mandir}/man3/mlx5dv* %{_mandir}/man3/mlx4dv* %{_mandir}/man7/efadv* +%{_mandir}/man7/hns* %{_mandir}/man7/mlx5dv* %{_mandir}/man7/mlx4dv* %{_mandir}/man3/ibnd_* From patchwork Tue Jul 27 07:28:20 2021 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Wenpeng Liang X-Patchwork-Id: 12402045 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org X-Spam-Level: X-Spam-Status: No, score=-16.7 required=3.0 tests=BAYES_00, HEADER_FROM_DIFFERENT_DOMAINS,INCLUDES_CR_TRAILER,INCLUDES_PATCH, MAILING_LIST_MULTI,SPF_HELO_NONE,SPF_PASS,URIBL_BLOCKED,USER_AGENT_GIT autolearn=ham autolearn_force=no version=3.4.0 Received: from mail.kernel.org (mail.kernel.org [198.145.29.99]) by smtp.lore.kernel.org (Postfix) with ESMTP id B3277C43216 for ; Tue, 27 Jul 2021 07:32:02 +0000 (UTC) Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by mail.kernel.org (Postfix) with ESMTP id 9F2F961009 for ; Tue, 27 Jul 2021 07:32:02 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S235504AbhG0HcB (ORCPT ); Tue, 27 Jul 2021 03:32:01 -0400 Received: from szxga03-in.huawei.com ([45.249.212.189]:12315 "EHLO szxga03-in.huawei.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S235659AbhG0Hb6 (ORCPT ); Tue, 27 Jul 2021 03:31:58 -0400 Received: from dggemv711-chm.china.huawei.com (unknown [172.30.72.56]) by szxga03-in.huawei.com (SkyGuard) with ESMTP id 4GYpJy61vBz7ydR; Tue, 27 Jul 2021 15:27:14 +0800 (CST) Received: from dggpeml500017.china.huawei.com (7.185.36.243) by dggemv711-chm.china.huawei.com (10.1.198.66) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256) id 15.1.2176.2; Tue, 27 Jul 2021 15:31:57 +0800 Received: from localhost.localdomain (10.67.165.24) by dggpeml500017.china.huawei.com (7.185.36.243) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256) id 15.1.2176.2; Tue, 27 Jul 2021 15:31:57 +0800 From: Wenpeng Liang To: , CC: , Subject: [PATCH v2 rdma-core 09/10] pyverbs/hns: Initial support for HNS direct verbs Date: Tue, 27 Jul 2021 15:28:20 +0800 Message-ID: <1627370901-10054-10-git-send-email-liangwenpeng@huawei.com> X-Mailer: git-send-email 2.8.1 In-Reply-To: <1627370901-10054-1-git-send-email-liangwenpeng@huawei.com> References: <1627370901-10054-1-git-send-email-liangwenpeng@huawei.com> MIME-Version: 1.0 X-Originating-IP: [10.67.165.24] X-ClientProxiedBy: dggems701-chm.china.huawei.com (10.3.19.178) To dggpeml500017.china.huawei.com (7.185.36.243) X-CFilter-Loop: Reflected Precedence: bulk List-ID: X-Mailing-List: linux-rdma@vger.kernel.org From: Xi Wang Add initial support for HNS direct verbs. For now, DCA direct verbs are supported. Signed-off-by: Xi Wang Signed-off-by: Wenpeng Liang --- pyverbs/CMakeLists.txt | 1 + pyverbs/providers/hns/CMakeLists.txt | 7 ++ pyverbs/providers/hns/__init__.pxd | 0 pyverbs/providers/hns/__init__.py | 0 pyverbs/providers/hns/hns_enums.pyx | 1 + pyverbs/providers/hns/hnsdv.pxd | 25 ++++++ pyverbs/providers/hns/hnsdv.pyx | 158 ++++++++++++++++++++++++++++++++++ pyverbs/providers/hns/hnsdv_enums.pxd | 21 +++++ pyverbs/providers/hns/libhns.pxd | 28 ++++++ 9 files changed, 241 insertions(+) create mode 100644 pyverbs/providers/hns/CMakeLists.txt create mode 100644 pyverbs/providers/hns/__init__.pxd create mode 100644 pyverbs/providers/hns/__init__.py create mode 120000 pyverbs/providers/hns/hns_enums.pyx create mode 100644 pyverbs/providers/hns/hnsdv.pxd create mode 100644 pyverbs/providers/hns/hnsdv.pyx create mode 100644 pyverbs/providers/hns/hnsdv_enums.pxd create mode 100644 pyverbs/providers/hns/libhns.pxd diff --git a/pyverbs/CMakeLists.txt b/pyverbs/CMakeLists.txt index c532b4c..80f6e2b 100644 --- a/pyverbs/CMakeLists.txt +++ b/pyverbs/CMakeLists.txt @@ -44,4 +44,5 @@ rdma_python_module(pyverbs if (HAVE_COHERENT_DMA) add_subdirectory(providers/mlx5) add_subdirectory(providers/efa) +add_subdirectory(providers/hns) endif() diff --git a/pyverbs/providers/hns/CMakeLists.txt b/pyverbs/providers/hns/CMakeLists.txt new file mode 100644 index 0000000..bb60f16 --- /dev/null +++ b/pyverbs/providers/hns/CMakeLists.txt @@ -0,0 +1,7 @@ +# SPDX-License-Identifier: (GPL-2.0 OR Linux-OpenIB) +# Copyright (c) 2021 HiSilicon Limited. All rights reserved. + +rdma_cython_module(pyverbs/providers/hns hns + hns_enums.pyx + hnsdv.pyx +) diff --git a/pyverbs/providers/hns/__init__.pxd b/pyverbs/providers/hns/__init__.pxd new file mode 100644 index 0000000..e69de29 diff --git a/pyverbs/providers/hns/__init__.py b/pyverbs/providers/hns/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/pyverbs/providers/hns/hns_enums.pyx b/pyverbs/providers/hns/hns_enums.pyx new file mode 120000 index 0000000..33b3389 --- /dev/null +++ b/pyverbs/providers/hns/hns_enums.pyx @@ -0,0 +1 @@ +hnsdv_enums.pxd \ No newline at end of file diff --git a/pyverbs/providers/hns/hnsdv.pxd b/pyverbs/providers/hns/hnsdv.pxd new file mode 100644 index 0000000..b23fab8 --- /dev/null +++ b/pyverbs/providers/hns/hnsdv.pxd @@ -0,0 +1,25 @@ +# SPDX-License-Identifier: (GPL-2.0 OR Linux-OpenIB) +# Copyright (c) 2021 HiSilicon Limited. All rights reserved. + +#cython: language_level=3 + +from pyverbs.base cimport PyverbsObject +cimport pyverbs.providers.hns.libhns as dv +from pyverbs.device cimport Context +from pyverbs.qp cimport QP, QPEx + + +cdef class HnsContext(Context): + cpdef close(self) + +cdef class HnsDVContextAttr(PyverbsObject): + cdef dv.hnsdv_context_attr attr + +cdef class HnsDVContext(PyverbsObject): + pass + +cdef class HnsDVQPInitAttr(PyverbsObject): + cdef dv.hnsdv_qp_init_attr attr + +cdef class HnsQP(QPEx): + pass diff --git a/pyverbs/providers/hns/hnsdv.pyx b/pyverbs/providers/hns/hnsdv.pyx new file mode 100644 index 0000000..4642255 --- /dev/null +++ b/pyverbs/providers/hns/hnsdv.pyx @@ -0,0 +1,158 @@ +# SPDX-License-Identifier: (GPL-2.0 OR Linux-OpenIB) +# Copyright (c) 2021 HiSilicon Limited. All rights reserved. + +from libc.stdint cimport uintptr_t, uint8_t, uint16_t, uint32_t +import logging + +from pyverbs.pyverbs_error import PyverbsUserError + +cimport pyverbs.providers.hns.hnsdv_enums as dve +cimport pyverbs.providers.hns.libhns as dv + +from pyverbs.qp cimport QPInitAttrEx, QPEx +from pyverbs.base import PyverbsRDMAErrno +from pyverbs.base cimport close_weakrefs +from pyverbs.pd cimport PD + +cdef class HnsDVContextAttr(PyverbsObject): + """ + Represent hnsdv_context_attr struct. This class is used to open an hns + device. + """ + def __init__(self, flags=0, comp_mask=0, dca_qps=1): + super().__init__() + self.attr.flags = flags + self.attr.comp_mask = comp_mask + if dca_qps > 0: + self.attr.comp_mask |= dve.HNSDV_CONTEXT_MASK_DCA_PRIME_QPS + self.attr.dca_prime_qps = dca_qps + + def __str__(self): + print_format = '{:20}: {:<20}\n' + return print_format.format('flags', self.attr.flags) +\ + print_format.format('comp_mask', self.attr.comp_mask) + + @property + def flags(self): + return self.attr.flags + @flags.setter + def flags(self, val): + self.attr.flags = val + + @property + def comp_mask(self): + return self.attr.comp_mask + @comp_mask.setter + def comp_mask(self, val): + self.attr.comp_mask = val + +cdef class HnsContext(Context): + """ + Represent hns context, which extends Context. + """ + def __init__(self, HnsDVContextAttr attr not None, name=''): + """ + Open an hns device using the given attributes + :param name: The RDMA device's name (used by parent class) + :param attr: hns-specific device attributes + :return: None + """ + super().__init__(name=name, attr=attr) + if not dv.hnsdv_is_supported(self.device): + raise PyverbsUserError('This is not an HNS device') + self.context = dv.hnsdv_open_device(self.device, &attr.attr) + if self.context == NULL: + raise PyverbsRDMAErrno('Failed to open hns context on {dev}' + .format(dev=self.name)) + + def __dealloc__(self): + self.close() + + cpdef close(self): + if self.context != NULL: + super(HnsContext, self).close() + +cdef class HnsDVQPInitAttr(PyverbsObject): + """ + Represents hnsdv_qp_init_attr struct, initial attributes used for hns QP + creation. + """ + def __init__(self, comp_mask=0, create_flags=0): + """ + Initializes an HnsDVQPInitAttr object with the given user data. + :param comp_mask: A bitmask specifying which fields are valid + :param create_flags: A bitwise OR of hnsdv_qp_create_flags + :return: An initialized HnsDVQPInitAttr object + """ + super().__init__() + self.attr.comp_mask = comp_mask + self.attr.create_flags = create_flags + + def __str__(self): + print_format = '{:20}: {:<20}\n' + return print_format.format('Comp mask', + qp_comp_mask_to_str(self.attr.comp_mask)) +\ + print_format.format('Create flags', + qp_create_flags_to_str(self.attr.create_flags)) + + @property + def comp_mask(self): + return self.attr.comp_mask + @comp_mask.setter + def comp_mask(self, val): + self.attr.comp_mask = val + + @property + def create_flags(self): + return self.attr.create_flags + @create_flags.setter + def create_flags(self, val): + self.attr.create_flags = val + +cdef class HnsQP(QPEx): + def __init__(self, Context context, QPInitAttrEx init_attr, + HnsDVQPInitAttr dv_init_attr): + """ + Initializes an hns QP according to the user-provided data. + :param context: Context object + :param init_attr: QPInitAttrEx object + :return: An initialized HnsQP + """ + cdef PD pd + + # Initialize the logger here as the parent's __init__ is called after + # the QP is allocated. Allocation can fail, which will lead to exceptions + # thrown during object's teardown. + self.logger = logging.getLogger(self.__class__.__name__) + if init_attr.pd is not None: + pd = init_attr.pd + pd.add_ref(self) + self.qp = \ + dv.hnsdv_create_qp(context.context, + &init_attr.attr, + &dv_init_attr.attr if dv_init_attr is not None + else NULL) + if self.qp == NULL: + raise PyverbsRDMAErrno('Failed to create HNS QP.\nQPInitAttrEx ' + 'attributes:\n{}\nHNSDVQPInitAttr:\n{}'. + format(init_attr, dv_init_attr)) + super().__init__(context, init_attr) + +def bitmask_to_str(bits, values): + numeric_bits = bits + res = '' + for t in values.keys(): + if t & bits: + res += values[t] + ', ' + bits -= t + if bits == 0: + break + return res[:-2] + ' ({})'.format(numeric_bits) # Remove last comma and space + +def qp_comp_mask_to_str(flags): + l = {dve.HNSDV_QP_INIT_ATTR_MASK_QP_CREATE_FLAGS: 'Create flags'} + return bitmask_to_str(flags, l) + +def qp_create_flags_to_str(flags): + l = {dve.HNSDV_QP_CREATE_ENABLE_DCA_MODE: 'Enable DCA'} + return bitmask_to_str(flags, l) diff --git a/pyverbs/providers/hns/hnsdv_enums.pxd b/pyverbs/providers/hns/hnsdv_enums.pxd new file mode 100644 index 0000000..9fa43af --- /dev/null +++ b/pyverbs/providers/hns/hnsdv_enums.pxd @@ -0,0 +1,21 @@ +# SPDX-License-Identifier: (GPL-2.0 OR Linux-OpenIB) +# Copyright (c) 2021 HiSilicon Limited. All rights reserved. + +#cython: language_level=3 + +cdef extern from 'infiniband/hnsdv.h': + + cpdef enum hnsdv_context_attr_flags: + HNSDV_CONTEXT_FLAGS_DCA = 1 << 0 + + cpdef enum hnsdv_context_comp_mask: + HNSDV_CONTEXT_MASK_DCA_PRIME_QPS = 1 << 0 + HNSDV_CONTEXT_MASK_DCA_UNIT_SIZE = 1 << 1 + HNSDV_CONTEXT_MASK_DCA_MAX_SIZE = 1 << 2 + HNSDV_CONTEXT_MASK_DCA_MIN_SIZE = 1 << 3 + + cpdef enum hnsdv_qp_init_attr_mask: + HNSDV_QP_INIT_ATTR_MASK_QP_CREATE_FLAGS = 1 << 0 + + cpdef enum hnsdv_qp_create_flags: + HNSDV_QP_CREATE_ENABLE_DCA_MODE = 1 << 0 diff --git a/pyverbs/providers/hns/libhns.pxd b/pyverbs/providers/hns/libhns.pxd new file mode 100644 index 0000000..c1e4ec3 --- /dev/null +++ b/pyverbs/providers/hns/libhns.pxd @@ -0,0 +1,28 @@ +# SPDX-License-Identifier: (GPL-2.0 OR Linux-OpenIB) +# Copyright (c) 2021 HiSilicon Limited. All rights reserved. + +from libc.stdint cimport uint8_t, uint16_t, uint32_t, uint64_t +from libcpp cimport bool + +cimport pyverbs.libibverbs as v + +cdef extern from 'infiniband/hnsdv.h': + + cdef struct hnsdv_context_attr: + uint64_t flags + uint64_t comp_mask + uint32_t dca_prime_qps + uint32_t dca_unit_size + uint64_t dca_max_size + uint64_t dca_min_size + + cdef struct hnsdv_qp_init_attr: + uint64_t comp_mask + uint32_t create_flags + + bool hnsdv_is_supported(v.ibv_device *device) + v.ibv_context* hnsdv_open_device(v.ibv_device *device, + hnsdv_context_attr *attr) + v.ibv_qp *hnsdv_create_qp(v.ibv_context *context, + v.ibv_qp_init_attr_ex *qp_attr, + hnsdv_qp_init_attr *hns_qp_attr) From patchwork Tue Jul 27 07:28:21 2021 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Wenpeng Liang X-Patchwork-Id: 12402051 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org X-Spam-Level: X-Spam-Status: No, score=-16.7 required=3.0 tests=BAYES_00, HEADER_FROM_DIFFERENT_DOMAINS,INCLUDES_CR_TRAILER,INCLUDES_PATCH, MAILING_LIST_MULTI,SPF_HELO_NONE,SPF_PASS,URIBL_BLOCKED,USER_AGENT_GIT autolearn=ham autolearn_force=no version=3.4.0 Received: from mail.kernel.org (mail.kernel.org [198.145.29.99]) by smtp.lore.kernel.org (Postfix) with ESMTP id 5069FC4338F for ; Tue, 27 Jul 2021 07:32:04 +0000 (UTC) Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by mail.kernel.org (Postfix) with ESMTP id 38A49611CE for ; Tue, 27 Jul 2021 07:32:04 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S235679AbhG0HcC (ORCPT ); Tue, 27 Jul 2021 03:32:02 -0400 Received: from szxga03-in.huawei.com ([45.249.212.189]:12316 "EHLO szxga03-in.huawei.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S235731AbhG0HcA (ORCPT ); Tue, 27 Jul 2021 03:32:00 -0400 Received: from dggemv704-chm.china.huawei.com (unknown [172.30.72.54]) by szxga03-in.huawei.com (SkyGuard) with ESMTP id 4GYpJz665bz7x6T; Tue, 27 Jul 2021 15:27:15 +0800 (CST) Received: from dggpeml500017.china.huawei.com (7.185.36.243) by dggemv704-chm.china.huawei.com (10.3.19.47) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256) id 15.1.2176.2; Tue, 27 Jul 2021 15:31:57 +0800 Received: from localhost.localdomain (10.67.165.24) by dggpeml500017.china.huawei.com (7.185.36.243) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256) id 15.1.2176.2; Tue, 27 Jul 2021 15:31:57 +0800 From: Wenpeng Liang To: , CC: , Subject: [PATCH v2 rdma-core 10/10] tests: Add traffic test of send on HNS DCA QPEx Date: Tue, 27 Jul 2021 15:28:21 +0800 Message-ID: <1627370901-10054-11-git-send-email-liangwenpeng@huawei.com> X-Mailer: git-send-email 2.8.1 In-Reply-To: <1627370901-10054-1-git-send-email-liangwenpeng@huawei.com> References: <1627370901-10054-1-git-send-email-liangwenpeng@huawei.com> MIME-Version: 1.0 X-Originating-IP: [10.67.165.24] X-ClientProxiedBy: dggems701-chm.china.huawei.com (10.3.19.178) To dggpeml500017.china.huawei.com (7.185.36.243) X-CFilter-Loop: Reflected Precedence: bulk List-ID: X-Mailing-List: linux-rdma@vger.kernel.org From: Xi Wang Add traffic test of old send on QPEx class. Signed-off-by: Xi Wang Signed-off-by: Wenpeng Liang --- tests/hns_base.py | 80 +++++++++++++++++++++++++++++++++++++++++++++++++++ tests/test_hns_dca.py | 37 ++++++++++++++++++++++++ 2 files changed, 117 insertions(+) create mode 100644 tests/hns_base.py create mode 100644 tests/test_hns_dca.py diff --git a/tests/hns_base.py b/tests/hns_base.py new file mode 100644 index 0000000..46d6a28 --- /dev/null +++ b/tests/hns_base.py @@ -0,0 +1,80 @@ +# SPDX-License-Identifier: (GPL-2.0 OR Linux-OpenIB) +# Copyright (c) 2021 HiSilicon Limited. All rights reserved. + +import unittest +import random +import errno + +from pyverbs.providers.hns.hnsdv import HnsContext, HnsDVContextAttr, \ + HnsDVQPInitAttr, HnsQP +from tests.base import RCResources, RDMATestCase, PyverbsAPITestCase +from pyverbs.pyverbs_error import PyverbsRDMAError, PyverbsUserError +from pyverbs.qp import QPCap, QPInitAttrEx +import pyverbs.providers.hns.hns_enums as dve +import pyverbs.device as d +import pyverbs.enums as e +from pyverbs.mr import MR + + +HUAWEI_VENDOR_ID = 0x19e5 + +def is_hns_dev(ctx): + dev_attrs = ctx.query_device() + return dev_attrs.vendor_id == HUAWEI_VENDOR_ID + + +def skip_if_not_hns_dev(ctx): + if not is_hns_dev(ctx): + raise unittest.SkipTest('Can not run the test over non HNS device') + + +class HnsPyverbsAPITestCase(PyverbsAPITestCase): + def setUp(self): + super().setUp() + skip_if_not_hns_dev(self.ctx) + + +class HnsRDMATestCase(RDMATestCase): + def setUp(self): + super().setUp() + skip_if_not_hns_dev(d.Context(name=self.dev_name)) + + +class HnsDcaResources(RCResources): + def create_context(self): + hnsdv_attr = HnsDVContextAttr(flags=dve.HNSDV_CONTEXT_FLAGS_DCA) + try: + self.ctx = HnsContext(hnsdv_attr, name=self.dev_name) + except PyverbsUserError as ex: + raise unittest.SkipTest(f'Could not open hns context ({ex})') + except PyverbsRDMAError: + raise unittest.SkipTest('Opening hns context is not supported') + + def create_qp_cap(self): + return QPCap(100, 0, 10, 0) + + def create_qp_init_attr(self): + return QPInitAttrEx(cap=self.create_qp_cap(), pd=self.pd, scq=self.cq, + rcq=self.cq, srq=self.srq, qp_type=e.IBV_QPT_RC, + comp_mask=e.IBV_QP_INIT_ATTR_PD, + sq_sig_all=1) + + def create_qps(self): + # Create the DCA QPs. + qp_init_attr = self.create_qp_init_attr() + try: + for _ in range(self.qp_count): + attr = HnsDVQPInitAttr(comp_mask=dve.HNSDV_QP_INIT_ATTR_MASK_QP_CREATE_FLAGS, + create_flags=dve.HNSDV_QP_CREATE_ENABLE_DCA_MODE) + qp = HnsQP(self.ctx, qp_init_attr, attr) + self.qps.append(qp) + self.qps_num.append(qp.qp_num) + self.psns.append(random.getrandbits(24)) + except PyverbsRDMAError as ex: + if ex.error_code == errno.EOPNOTSUPP: + raise unittest.SkipTest(f'Create DCA QP is not supported') + raise ex + + def create_mr(self): + access = e.IBV_ACCESS_REMOTE_WRITE | e.IBV_ACCESS_LOCAL_WRITE + self.mr = MR(self.pd, self.msg_size, access) diff --git a/tests/test_hns_dca.py b/tests/test_hns_dca.py new file mode 100644 index 0000000..8f47fb1 --- /dev/null +++ b/tests/test_hns_dca.py @@ -0,0 +1,37 @@ +# SPDX-License-Identifier: (GPL-2.0 OR Linux-OpenIB) +# Copyright (c) 2021 HiSilicon Limited. All rights reserved. + +import unittest +import errno + +from pyverbs.pyverbs_error import PyverbsRDMAError +import pyverbs.enums as e + +from tests.hns_base import HnsRDMATestCase +from tests.hns_base import HnsDcaResources +import tests.utils as u + + + +class QPDCATestCase(HnsRDMATestCase): + def setUp(self): + super().setUp() + self.iters = 100 + self.server = None + self.client = None + + def create_players(self, qp_count=8): + try: + self.client = HnsDcaResources(self.dev_name, self.ib_port, self.gid_index, qp_count) + self.server = HnsDcaResources(self.dev_name, self.ib_port, self.gid_index, qp_count) + except PyverbsRDMAError as ex: + if ex.error_code == errno.EOPNOTSUPP: + raise unittest.SkipTest('Create DCA Resources is not supported') + raise ex + self.client.pre_run(self.server.psns, self.server.qps_num) + self.server.pre_run(self.client.psns, self.client.qps_num) + + def test_qp_ex_dca_send(self): + self.create_players() + u.traffic(self.client, self.server, self.iters, self.gid_index, self.ib_port, + new_send=False)