From patchwork Sat Oct 27 10:56:32 2018 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Lijun Ou X-Patchwork-Id: 10658337 Return-Path: Received: from mail.wl.linuxfoundation.org (pdx-wl-mail.web.codeaurora.org [172.30.200.125]) by pdx-korg-patchwork-2.web.codeaurora.org (Postfix) with ESMTP id 41AD313A9 for ; Sat, 27 Oct 2018 10:54:48 +0000 (UTC) Received: from mail.wl.linuxfoundation.org (localhost [127.0.0.1]) by mail.wl.linuxfoundation.org (Postfix) with ESMTP id 382462B1A1 for ; Sat, 27 Oct 2018 10:54:48 +0000 (UTC) Received: by mail.wl.linuxfoundation.org (Postfix, from userid 486) id 2C95F2B5E8; Sat, 27 Oct 2018 10:54:48 +0000 (UTC) X-Spam-Checker-Version: SpamAssassin 3.3.1 (2010-03-16) on pdx-wl-mail.web.codeaurora.org X-Spam-Level: X-Spam-Status: No, score=-7.9 required=2.0 tests=BAYES_00,MAILING_LIST_MULTI, RCVD_IN_DNSWL_HI autolearn=ham version=3.3.1 Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by mail.wl.linuxfoundation.org (Postfix) with ESMTP id 89BDC2B542 for ; Sat, 27 Oct 2018 10:54:47 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1728457AbeJ0TfT (ORCPT ); Sat, 27 Oct 2018 15:35:19 -0400 Received: from szxga06-in.huawei.com ([45.249.212.32]:52332 "EHLO huawei.com" rhost-flags-OK-OK-OK-FAIL) by vger.kernel.org with ESMTP id S1728386AbeJ0TfT (ORCPT ); Sat, 27 Oct 2018 15:35:19 -0400 Received: from DGGEMS407-HUB.china.huawei.com (unknown [172.30.72.59]) by Forcepoint Email with ESMTP id EB2A49DC5424A; Sat, 27 Oct 2018 18:54:42 +0800 (CST) Received: from localhost.localdomain (10.67.212.75) by DGGEMS407-HUB.china.huawei.com (10.3.19.207) with Microsoft SMTP Server id 14.3.408.0; Sat, 27 Oct 2018 18:54:39 +0800 From: Lijun Ou To: , CC: , Subject: [PATCH rdma-core 1/6] libhns: Add verb of creating srq for hip08 user mode Date: Sat, 27 Oct 2018 18:56:32 +0800 Message-ID: <1540637797-97065-2-git-send-email-oulijun@huawei.com> X-Mailer: git-send-email 2.8.1 In-Reply-To: <1540637797-97065-1-git-send-email-oulijun@huawei.com> References: <1540637797-97065-1-git-send-email-oulijun@huawei.com> MIME-Version: 1.0 X-Originating-IP: [10.67.212.75] X-CFilter-Loop: Reflected Sender: linux-rdma-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: linux-rdma@vger.kernel.org X-Virus-Scanned: ClamAV using ClamSMTP Signed-off-by: Lijun Ou --- kernel-headers/rdma/hns-abi.h | 11 +++ providers/hns/hns_roce_u.c | 1 + providers/hns/hns_roce_u.h | 19 ++++- providers/hns/hns_roce_u_abi.h | 3 + providers/hns/hns_roce_u_hw_v2.h | 2 + providers/hns/hns_roce_u_verbs.c | 146 +++++++++++++++++++++++++++++++++++++++ 6 files changed, 180 insertions(+), 2 deletions(-) diff --git a/kernel-headers/rdma/hns-abi.h b/kernel-headers/rdma/hns-abi.h index c1f8773..eb76b38 100644 --- a/kernel-headers/rdma/hns-abi.h +++ b/kernel-headers/rdma/hns-abi.h @@ -46,6 +46,17 @@ struct hns_roce_ib_create_cq_resp { __aligned_u64 cap_flags; }; +struct hns_roce_ib_create_srq { + __aligned_u64 buf_addr; + __aligned_u64 db_addr; + __aligned_u64 que_addr; +}; + +struct hns_roce_ib_create_srq_resp { + __u32 srqn; + __u32 reserved; +}; + struct hns_roce_ib_create_qp { __aligned_u64 buf_addr; __aligned_u64 db_addr; diff --git a/providers/hns/hns_roce_u.c b/providers/hns/hns_roce_u.c index 3597e9a..fe12b11 100644 --- a/providers/hns/hns_roce_u.c +++ b/providers/hns/hns_roce_u.c @@ -78,6 +78,7 @@ static const struct verbs_context_ops hns_common_ops = { .query_qp = hns_roce_u_query_qp, .reg_mr = hns_roce_u_reg_mr, .rereg_mr = hns_roce_u_rereg_mr, + .create_srq = hns_roce_u_create_srq, }; static struct verbs_context *hns_roce_alloc_context(struct ibv_device *ibdev, diff --git a/providers/hns/hns_roce_u.h b/providers/hns/hns_roce_u.h index 93c917d..3ad1c14 100644 --- a/providers/hns/hns_roce_u.h +++ b/providers/hns/hns_roce_u.h @@ -95,6 +95,9 @@ struct hns_roce_buf { unsigned int length; }; +#define BIT_CNT_PER_BYTE 8 + + /* the sw doorbell type; */ enum hns_roce_db_type { HNS_ROCE_QP_TYPE_DB, @@ -154,8 +157,16 @@ struct hns_roce_cq { unsigned long flags; }; +struct hns_roce_idx_que { + struct hns_roce_buf buf; + int buf_size; + int entry_sz; + unsigned long *bitmap; + unsigned long use_cnt; +}; + struct hns_roce_srq { - struct ibv_srq ibv_srq; + struct verbs_srq verbs_srq; struct hns_roce_buf buf; pthread_spinlock_t lock; unsigned long *wrid; @@ -167,6 +178,7 @@ struct hns_roce_srq { int tail; unsigned int *db; unsigned short counter; + struct hns_roce_idx_que idx_que; }; struct hns_roce_wq { @@ -253,7 +265,8 @@ static inline struct hns_roce_cq *to_hr_cq(struct ibv_cq *ibv_cq) static inline struct hns_roce_srq *to_hr_srq(struct ibv_srq *ibv_srq) { - return container_of(ibv_srq, struct hns_roce_srq, ibv_srq); + return container_of(container_of(ibv_srq, struct verbs_srq, srq), + struct hns_roce_srq, verbs_srq); } static inline struct hns_roce_qp *to_hr_qp(struct ibv_qp *ibv_qp) @@ -288,6 +301,8 @@ int hns_roce_u_modify_cq(struct ibv_cq *cq, struct ibv_modify_cq_attr *attr); int hns_roce_u_destroy_cq(struct ibv_cq *cq); void hns_roce_u_cq_event(struct ibv_cq *cq); +struct ibv_srq *hns_roce_u_create_srq(struct ibv_pd *pd, + struct ibv_srq_init_attr *srq_init_attr); struct ibv_qp *hns_roce_u_create_qp(struct ibv_pd *pd, struct ibv_qp_init_attr *attr); diff --git a/providers/hns/hns_roce_u_abi.h b/providers/hns/hns_roce_u_abi.h index 3b646b6..79fd7dd 100644 --- a/providers/hns/hns_roce_u_abi.h +++ b/providers/hns/hns_roce_u_abi.h @@ -46,4 +46,7 @@ DECLARE_DRV_CMD(hns_roce_create_qp, IB_USER_VERBS_CMD_CREATE_QP, DECLARE_DRV_CMD(hns_roce_alloc_ucontext, IB_USER_VERBS_CMD_GET_CONTEXT, empty, hns_roce_ib_alloc_ucontext_resp); +DECLARE_DRV_CMD(hns_roce_create_srq, IB_USER_VERBS_CMD_CREATE_SRQ, + hns_roce_ib_create_srq, hns_roce_ib_create_srq_resp); + #endif /* _HNS_ROCE_U_ABI_H */ diff --git a/providers/hns/hns_roce_u_hw_v2.h b/providers/hns/hns_roce_u_hw_v2.h index ff63bb2..3a019ef 100644 --- a/providers/hns/hns_roce_u_hw_v2.h +++ b/providers/hns/hns_roce_u_hw_v2.h @@ -52,6 +52,8 @@ enum { /* V2 REG DEFINITION */ #define ROCEE_VF_DB_CFG0_OFFSET 0x0230 +#define HNS_ROCE_IDX_QUE_ENTRY_SZ 4 + enum { HNS_ROCE_WQE_OP_SEND = 0x0, HNS_ROCE_WQE_OP_SEND_WITH_INV = 0x1, diff --git a/providers/hns/hns_roce_u_verbs.c b/providers/hns/hns_roce_u_verbs.c index b0f928e..2767667 100644 --- a/providers/hns/hns_roce_u_verbs.c +++ b/providers/hns/hns_roce_u_verbs.c @@ -277,6 +277,16 @@ static int align_queue_size(int req) return nent; } +static int align_srq_size(int req) +{ + int nent; + + for (nent = 1; nent < req; nent <<= 1) + ; + + return nent; +} + static void hns_roce_set_sq_sizes(struct hns_roce_qp *qp, struct ibv_qp_cap *cap, enum ibv_qp_type type) { @@ -434,6 +444,142 @@ int hns_roce_u_destroy_cq(struct ibv_cq *cq) return ret; } +static int hns_roce_create_idx_que(struct ibv_pd *pd, struct hns_roce_srq *srq) +{ + struct hns_roce_idx_que *idx_que = &srq->idx_que; + uint32_t bitmap_num; + int i; + + idx_que->entry_sz = HNS_ROCE_IDX_QUE_ENTRY_SZ; + + /* bits needed in bitmap*/ + bitmap_num = align(srq->max, BIT_CNT_PER_BYTE * sizeof(uint64_t)); + + idx_que->bitmap = calloc(1, bitmap_num / BIT_CNT_PER_BYTE); + if (!idx_que->bitmap) + return -1; + + /* bitmap_num indicates amount of u64 */ + bitmap_num = bitmap_num / (BIT_CNT_PER_BYTE * sizeof(uint64_t)); + + idx_que->buf_size = srq->max * idx_que->entry_sz; + if (hns_roce_alloc_buf(&idx_que->buf, align(idx_que->buf_size, 0x1000), + to_hr_dev(pd->context->device)->page_size)) { + free(idx_que->bitmap); + idx_que->bitmap = NULL; + return -1; + } + + memset(idx_que->buf.buf, 0, idx_que->buf_size); + + /*init the idx_que bitmap */ + for (i = 0; i < bitmap_num; ++i) + idx_que->bitmap[i] = ~(0UL); + + return 0; +} + +static int hns_roce_alloc_srq_buf(struct ibv_pd *pd, struct ibv_srq_attr *attr, + struct hns_roce_srq *srq) +{ + int srq_buf_size; + int srq_size; + + srq->wrid = calloc(1, srq->max * sizeof(unsigned long)); + if (!srq->wrid) + return -1; + + /* srq size */ + srq_size = srq->max_gs * sizeof(struct hns_roce_v2_wqe_data_seg); + + for (srq->wqe_shift = 4; 1 << srq->wqe_shift < srq_size; + ++srq->wqe_shift) + ; /* nothing */ + + srq_buf_size = srq->max << srq->wqe_shift; + + /* allocate srq wqe buf */ + if (hns_roce_alloc_buf(&srq->buf, srq_buf_size, + to_hr_dev(pd->context->device)->page_size)) { + free(srq->wrid); + return -1; + } + + memset(srq->buf.buf, 0, srq_buf_size); + + srq->head = 0; + srq->tail = srq->max - 1; + + return 0; +} + +struct ibv_srq *hns_roce_u_create_srq(struct ibv_pd *pd, + struct ibv_srq_init_attr *srq_init_attr) +{ + struct hns_roce_create_srq cmd; + struct hns_roce_create_srq_resp resp; + struct hns_roce_srq *srq; + int ret; + + if (srq_init_attr->attr.max_wr > (1 << 15) || + srq_init_attr->attr.max_sge > (1 << 8)) + return NULL; + + srq = calloc(1, sizeof(*srq)); + if (!srq) + return NULL; + + if (pthread_spin_init(&srq->lock, PTHREAD_PROCESS_PRIVATE)) + goto out; + + srq->max = align_srq_size(srq_init_attr->attr.max_wr + 1); + srq->max_gs = srq_init_attr->attr.max_sge; + + ret = hns_roce_create_idx_que(pd, srq); + if (ret) { + fprintf(stderr, "hns_roce_create_idx_que failed!\n"); + goto out; + } + + if (hns_roce_alloc_srq_buf(pd, &srq_init_attr->attr, srq)) { + fprintf(stderr, "hns_roce_alloc_srq_buf failed!\n"); + goto err_idx_que; + } + + srq->db = hns_roce_alloc_db(to_hr_ctx(pd->context), + HNS_ROCE_QP_TYPE_DB); + if (!srq->db) + goto err_srq_buf; + + *(srq->db) = 0; + cmd.buf_addr = (uintptr_t)srq->buf.buf; + cmd.que_addr = (uintptr_t)srq->idx_que.buf.buf; + cmd.db_addr = (uintptr_t)srq->db; + + ret = ibv_cmd_create_srq(pd, &srq->verbs_srq.srq, srq_init_attr, + &cmd.ibv_cmd, sizeof(cmd), &resp.ibv_resp, + sizeof(resp)); + if (ret) + goto err_srq_db; + + srq->srqn = resp.srqn; + return &srq->verbs_srq.srq; + +err_srq_db: + hns_roce_free_db(to_hr_ctx(pd->context), srq->db, HNS_ROCE_QP_TYPE_DB); + +err_srq_buf: + free(srq->wrid); + hns_roce_free_buf(&srq->buf); + +err_idx_que: + free(&srq->idx_que.bitmap); + hns_roce_free_buf(&srq->idx_que.buf); +out: + free(srq); + return NULL; +} + static int hns_roce_verify_qp(struct ibv_qp_init_attr *attr, struct hns_roce_context *context) {