From patchwork Wed May 25 15:05:18 2016 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Lijun Ou X-Patchwork-Id: 9135603 Return-Path: Received: from mail.wl.linuxfoundation.org (pdx-wl-mail.web.codeaurora.org [172.30.200.125]) by pdx-korg-patchwork.web.codeaurora.org (Postfix) with ESMTP id 80E40607D7 for ; Wed, 25 May 2016 14:55:41 +0000 (UTC) Received: from mail.wl.linuxfoundation.org (localhost [127.0.0.1]) by mail.wl.linuxfoundation.org (Postfix) with ESMTP id 7366E28164 for ; Wed, 25 May 2016 14:55:41 +0000 (UTC) Received: by mail.wl.linuxfoundation.org (Postfix, from userid 486) id 67EAE282D7; Wed, 25 May 2016 14:55:41 +0000 (UTC) X-Spam-Checker-Version: SpamAssassin 3.3.1 (2010-03-16) on pdx-wl-mail.web.codeaurora.org X-Spam-Level: X-Spam-Status: No, score=-6.9 required=2.0 tests=BAYES_00,RCVD_IN_DNSWL_HI autolearn=unavailable version=3.3.1 Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by mail.wl.linuxfoundation.org (Postfix) with ESMTP id 60B2928164 for ; Wed, 25 May 2016 14:55:40 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1755233AbcEYOyt (ORCPT ); Wed, 25 May 2016 10:54:49 -0400 Received: from szxga03-in.huawei.com ([119.145.14.66]:55702 "EHLO szxga03-in.huawei.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1754993AbcEYOwv (ORCPT ); Wed, 25 May 2016 10:52:51 -0400 Received: from 172.24.1.136 (EHLO szxeml433-hub.china.huawei.com) ([172.24.1.136]) by szxrg03-dlp.huawei.com (MOS 4.4.3-GA FastPath queued) with ESMTP id CCE88520; Wed, 25 May 2016 22:52:42 +0800 (CST) Received: from linux-ioko.site (10.71.200.31) by szxeml433-hub.china.huawei.com (10.82.67.210) with Microsoft SMTP Server id 14.3.235.1; Wed, 25 May 2016 22:52:29 +0800 From: Lijun Ou To: , , , , , , CC: , , , , , , , , , , , Subject: [PATCH v8 15/22] IB/hns: Add PD operations support Date: Wed, 25 May 2016 23:05:18 +0800 Message-ID: <1464188725-42805-16-git-send-email-oulijun@huawei.com> X-Mailer: git-send-email 1.9.1 In-Reply-To: <1464188725-42805-1-git-send-email-oulijun@huawei.com> References: <1464188725-42805-1-git-send-email-oulijun@huawei.com> MIME-Version: 1.0 X-Originating-IP: [10.71.200.31] X-CFilter-Loop: Reflected X-Mirapoint-Virus-RAPID-Raw: score=unknown(0), refid=str=0001.0A090203.5745BC3A.02C4, ss=1, re=0.000, recu=0.000, reip=0.000, cl=1, cld=1, fgs=0, ip=0.0.0.0, so=2013-05-26 15:14:31, dmn=2013-03-21 17:37:32 X-Mirapoint-Loop-Id: eb52782c36abc9e06e870220b4076c9c Sender: linux-rdma-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: linux-rdma@vger.kernel.org X-Virus-Scanned: ClamAV using ClamSMTP This patch added the verbs to operate PD. It mainly includes the functions of allocating PD and deallocating PD. Signed-off-by: Wei Hu Signed-off-by: Nenglong Zhao Signed-off-by: Lijun Ou --- drivers/infiniband/hw/hns/hns_roce_device.h | 17 ++++++++ drivers/infiniband/hw/hns/hns_roce_main.c | 8 +++- drivers/infiniband/hw/hns/hns_roce_pd.c | 62 +++++++++++++++++++++++++++++ 3 files changed, 86 insertions(+), 1 deletion(-) diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h index c404e55..97f117e 100644 --- a/drivers/infiniband/hw/hns/hns_roce_device.h +++ b/drivers/infiniband/hw/hns/hns_roce_device.h @@ -143,6 +143,11 @@ struct hns_roce_ucontext { struct hns_roce_uar uar; }; +struct hns_roce_pd { + struct ib_pd ibpd; + unsigned long pdn; +}; + struct hns_roce_bitmap { /* Bitmap Traversal last a bit which is 1 */ unsigned long last; @@ -411,6 +416,11 @@ static inline struct hns_roce_ucontext return container_of(ibucontext, struct hns_roce_ucontext, ibucontext); } +static inline struct hns_roce_pd *to_hr_pd(struct ib_pd *ibpd) +{ + return container_of(ibpd, struct hns_roce_pd, ibpd); +} + static inline void hns_roce_write64_k(__be32 val[2], void __iomem *dest) { __raw_writeq(*(u64 *) val, dest); @@ -458,6 +468,13 @@ int hns_roce_bitmap_alloc_range(struct hns_roce_bitmap *bitmap, int cnt, void hns_roce_bitmap_free_range(struct hns_roce_bitmap *bitmap, unsigned long obj, int cnt); +struct ib_pd *hns_roce_alloc_pd(struct ib_device *ib_dev, + struct ib_ucontext *context, + struct ib_udata *udata); +int hns_roce_pd_alloc(struct hns_roce_dev *hr_dev, unsigned long *pdn); +void hns_roce_pd_free(struct hns_roce_dev *hr_dev, unsigned long pdn); +int hns_roce_dealloc_pd(struct ib_pd *pd); + void hns_roce_cq_completion(struct hns_roce_dev *hr_dev, u32 cqn); void hns_roce_cq_event(struct hns_roce_dev *hr_dev, u32 cqn, int event_type); void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type); diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c index b93800f..0732d0d 100644 --- a/drivers/infiniband/hw/hns/hns_roce_main.c +++ b/drivers/infiniband/hw/hns/hns_roce_main.c @@ -604,7 +604,9 @@ int hns_roce_register_device(struct hns_roce_dev *hr_dev) ib_dev->uverbs_cmd_mask = (1ULL << IB_USER_VERBS_CMD_GET_CONTEXT) | (1ULL << IB_USER_VERBS_CMD_QUERY_DEVICE) | - (1ULL << IB_USER_VERBS_CMD_QUERY_PORT); + (1ULL << IB_USER_VERBS_CMD_QUERY_PORT) | + (1ULL << IB_USER_VERBS_CMD_ALLOC_PD) | + (1ULL << IB_USER_VERBS_CMD_DEALLOC_PD); /* HCA||device||port */ ib_dev->modify_device = hns_roce_modify_device; @@ -618,6 +620,10 @@ int hns_roce_register_device(struct hns_roce_dev *hr_dev) ib_dev->dealloc_ucontext = hns_roce_dealloc_ucontext; ib_dev->mmap = hns_roce_mmap; + /* PD */ + ib_dev->alloc_pd = hns_roce_alloc_pd; + ib_dev->dealloc_pd = hns_roce_dealloc_pd; + ret = ib_register_device(ib_dev, NULL); if (ret) { dev_err(dev, "ib_register_device failed!\n"); diff --git a/drivers/infiniband/hw/hns/hns_roce_pd.c b/drivers/infiniband/hw/hns/hns_roce_pd.c index 6ad38f2..f7f8fc0 100644 --- a/drivers/infiniband/hw/hns/hns_roce_pd.c +++ b/drivers/infiniband/hw/hns/hns_roce_pd.c @@ -40,6 +40,28 @@ #include "hns_roce_common.h" #include "hns_roce_device.h" +int hns_roce_pd_alloc(struct hns_roce_dev *hr_dev, unsigned long *pdn) +{ + struct device *dev = &hr_dev->pdev->dev; + unsigned long pd_number; + int ret = 0; + + ret = hns_roce_bitmap_alloc(&hr_dev->pd_bitmap, &pd_number); + if (ret == -1) { + dev_err(dev, "alloc pdn from pdbitmap failed\n"); + return -ENOMEM; + } + + *pdn = pd_number; + + return 0; +} + +void hns_roce_pd_free(struct hns_roce_dev *hr_dev, unsigned long pdn) +{ + hns_roce_bitmap_free(&hr_dev->pd_bitmap, pdn); +} + int hns_roce_init_pd_table(struct hns_roce_dev *hr_dev) { return hns_roce_bitmap_init(&hr_dev->pd_bitmap, hr_dev->caps.num_pds, @@ -52,6 +74,46 @@ void hns_roce_cleanup_pd_table(struct hns_roce_dev *hr_dev) hns_roce_bitmap_cleanup(&hr_dev->pd_bitmap); } +struct ib_pd *hns_roce_alloc_pd(struct ib_device *ib_dev, + struct ib_ucontext *context, + struct ib_udata *udata) +{ + struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev); + struct device *dev = &hr_dev->pdev->dev; + struct hns_roce_pd *pd; + int ret; + + pd = kmalloc(sizeof(*pd), GFP_KERNEL); + if (!pd) + return ERR_PTR(-ENOMEM); + + ret = hns_roce_pd_alloc(to_hr_dev(ib_dev), &pd->pdn); + if (ret) { + kfree(pd); + dev_err(dev, "[alloc_pd]hns_roce_pd_alloc failed!\n"); + return ERR_PTR(ret); + } + + if (context) { + if (ib_copy_to_udata(udata, &pd->pdn, sizeof(u64))) { + hns_roce_pd_free(to_hr_dev(ib_dev), pd->pdn); + dev_err(dev, "[alloc_pd]ib_copy_to_udata failed!\n"); + kfree(pd); + return ERR_PTR(-EFAULT); + } + } + + return &pd->ibpd; +} + +int hns_roce_dealloc_pd(struct ib_pd *pd) +{ + hns_roce_pd_free(to_hr_dev(pd->device), to_hr_pd(pd)->pdn); + kfree(to_hr_pd(pd)); + + return 0; +} + int hns_roce_uar_alloc(struct hns_roce_dev *hr_dev, struct hns_roce_uar *uar) { struct resource *res;