From patchwork Thu May 30 15:47:54 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Lijun Ou X-Patchwork-Id: 10969027 X-Patchwork-Delegate: leon@leon.nu Return-Path: Received: from mail.wl.linuxfoundation.org (pdx-wl-mail.web.codeaurora.org [172.30.200.125]) by pdx-korg-patchwork-2.web.codeaurora.org (Postfix) with ESMTP id BEACF92A for ; Thu, 30 May 2019 15:49:41 +0000 (UTC) Received: from mail.wl.linuxfoundation.org (localhost [127.0.0.1]) by mail.wl.linuxfoundation.org (Postfix) with ESMTP id AE06E2890F for ; Thu, 30 May 2019 15:49:41 +0000 (UTC) Received: by mail.wl.linuxfoundation.org (Postfix, from userid 486) id A260528A12; Thu, 30 May 2019 15:49:41 +0000 (UTC) X-Spam-Checker-Version: SpamAssassin 3.3.1 (2010-03-16) on pdx-wl-mail.web.codeaurora.org X-Spam-Level: X-Spam-Status: No, score=-7.9 required=2.0 tests=BAYES_00,MAILING_LIST_MULTI, RCVD_IN_DNSWL_HI autolearn=ham version=3.3.1 Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by mail.wl.linuxfoundation.org (Postfix) with ESMTP id 6E4A32890F for ; Thu, 30 May 2019 15:49:39 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1726589AbfE3Pth (ORCPT ); Thu, 30 May 2019 11:49:37 -0400 Received: from szxga07-in.huawei.com ([45.249.212.35]:46816 "EHLO huawei.com" rhost-flags-OK-OK-OK-FAIL) by vger.kernel.org with ESMTP id S1727123AbfE3Ptf (ORCPT ); Thu, 30 May 2019 11:49:35 -0400 Received: from DGGEMS411-HUB.china.huawei.com (unknown [172.30.72.58]) by Forcepoint Email with ESMTP id 3D43657ABFAFB4D4F510; Thu, 30 May 2019 23:49:27 +0800 (CST) Received: from linux-ioko.site (10.71.200.31) by DGGEMS411-HUB.china.huawei.com (10.3.19.211) with Microsoft SMTP Server id 14.3.439.0; Thu, 30 May 2019 23:49:16 +0800 From: Lijun Ou To: , CC: , , Subject: [PATCH V3 for-next 1/3] RDMA/hns: Add mtr support for mixed multihop addressing Date: Thu, 30 May 2019 23:47:54 +0800 Message-ID: <1559231276-67517-2-git-send-email-oulijun@huawei.com> X-Mailer: git-send-email 1.9.1 In-Reply-To: <1559231276-67517-1-git-send-email-oulijun@huawei.com> References: <1559231276-67517-1-git-send-email-oulijun@huawei.com> MIME-Version: 1.0 X-Originating-IP: [10.71.200.31] X-CFilter-Loop: Reflected Sender: linux-rdma-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: linux-rdma@vger.kernel.org X-Virus-Scanned: ClamAV using ClamSMTP Currently, the MTT(memory translate table) design required a buffer space must has the same hopnum, but the hip08 hw can support mixed hopnum config in a buffer space. This patch adds the MTR(memory translate region) design for supporting mixed multihop. Signed-off-by: Xi Wang Signed-off-by: Lijun Ou --- drivers/infiniband/hw/hns/hns_roce_device.h | 36 +++ drivers/infiniband/hw/hns/hns_roce_hem.c | 467 ++++++++++++++++++++++++++++ drivers/infiniband/hw/hns/hns_roce_hem.h | 14 + drivers/infiniband/hw/hns/hns_roce_mr.c | 121 +++++++ 4 files changed, 638 insertions(+) diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h index d6e8b44..720be44 100644 --- a/drivers/infiniband/hw/hns/hns_roce_device.h +++ b/drivers/infiniband/hw/hns/hns_roce_device.h @@ -341,6 +341,29 @@ struct hns_roce_mtt { enum hns_roce_mtt_type mtt_type; }; +struct hns_roce_buf_region { + int offset; /* page offset */ + u32 count; /* page count*/ + int hopnum; /* addressing hop num */ +}; + +#define HNS_ROCE_MAX_BT_REGION 3 +#define HNS_ROCE_MAX_BT_LEVEL 3 +struct hns_roce_hem_list { + struct list_head root_bt; + /* link all bt dma mem by hop config */ + struct list_head mid_bt[HNS_ROCE_MAX_BT_REGION][HNS_ROCE_MAX_BT_LEVEL]; + struct list_head btm_bt; /* link all bottom bt in @mid_bt */ + dma_addr_t root_ba; /* pointer to the root ba table */ + int bt_pg_shift; +}; + +/* memory translate region */ +struct hns_roce_mtr { + struct hns_roce_hem_list hem_list; + int buf_pg_shift; +}; + struct hns_roce_mw { struct ib_mw ibmw; u32 pdn; @@ -1111,6 +1134,19 @@ void hns_roce_mtt_cleanup(struct hns_roce_dev *hr_dev, int hns_roce_buf_write_mtt(struct hns_roce_dev *hr_dev, struct hns_roce_mtt *mtt, struct hns_roce_buf *buf); +void hns_roce_mtr_init(struct hns_roce_mtr *mtr, int bt_pg_shift, + int buf_pg_shift); +int hns_roce_mtr_attach(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr, + dma_addr_t **bufs, struct hns_roce_buf_region *regions, + int region_cnt); +void hns_roce_mtr_cleanup(struct hns_roce_dev *hr_dev, + struct hns_roce_mtr *mtr); + +/* hns roce hw need current block and next block addr from mtt */ +#define MTT_MIN_COUNT 2 +int hns_roce_mtr_find(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr, + int offset, u64 *mtt_buf, int mtt_max, u64 *base_addr); + int hns_roce_init_pd_table(struct hns_roce_dev *hr_dev); int hns_roce_init_mr_table(struct hns_roce_dev *hr_dev); int hns_roce_init_eq_table(struct hns_roce_dev *hr_dev); diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.c b/drivers/infiniband/hw/hns/hns_roce_hem.c index 157c84a..d758e95 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hem.c +++ b/drivers/infiniband/hw/hns/hns_roce_hem.c @@ -1157,3 +1157,470 @@ void hns_roce_cleanup_hem(struct hns_roce_dev *hr_dev) &hr_dev->mr_table.mtt_cqe_table); hns_roce_cleanup_hem_table(hr_dev, &hr_dev->mr_table.mtt_table); } + +struct roce_hem_item { + struct list_head list; /* link all hems in the same bt level */ + struct list_head sibling; /* link all hems in last hop for mtt */ + void *addr; + dma_addr_t dma_addr; + size_t count; /* max ba numbers */ + int start; /* start buf offset in this hem */ + int end; /* end buf offset in this hem */ +}; + +#define hem_list_for_each(pos, n, head) \ + list_for_each_entry_safe(pos, n, head, list) + +#define hem_list_del_item(hem) list_del(&hem->list) +#define hem_list_add_item(hem, head) list_add(&hem->list, head) +#define hem_list_link_item(hem, head) list_add(&hem->sibling, head) + +static struct roce_hem_item *hem_list_alloc_item(struct hns_roce_dev *hr_dev, + int start, int end, + int count, bool exist_bt, + int bt_level) +{ + struct roce_hem_item *hem; + + hem = kzalloc(sizeof(*hem), GFP_KERNEL); + if (!hem) + return NULL; + + if (exist_bt) { + hem->addr = dma_alloc_coherent(hr_dev->dev, + count * BA_BYTE_LEN, + &hem->dma_addr, GFP_KERNEL); + if (!hem->addr) { + kfree(hem); + return NULL; + } + } + + hem->count = count; + hem->start = start; + hem->end = end; + INIT_LIST_HEAD(&hem->list); + INIT_LIST_HEAD(&hem->sibling); + + return hem; +} + +static void hem_list_free_item(struct hns_roce_dev *hr_dev, + struct roce_hem_item *hem, bool exist_bt) +{ + if (exist_bt) + dma_free_coherent(hr_dev->dev, hem->count * BA_BYTE_LEN, + hem->addr, hem->dma_addr); + kfree(hem); +} + +static void hem_list_free_all(struct hns_roce_dev *hr_dev, + struct list_head *head, bool exist_bt) +{ + struct roce_hem_item *hem, *temp_hem; + + hem_list_for_each(hem, temp_hem, head) { + hem_list_del_item(hem); + hem_list_free_item(hr_dev, hem, exist_bt); + } +} + +static void hem_list_link_bt(struct hns_roce_dev *hr_dev, void *base_addr, + u64 table_addr) +{ + *(u64 *)(base_addr) = table_addr; +} + +/* assign L0 table address to hem from root bt */ +static void hem_list_assign_bt(struct hns_roce_dev *hr_dev, + struct roce_hem_item *hem, void *cpu_addr, + u64 phy_addr) +{ + hem->addr = cpu_addr; + hem->dma_addr = (dma_addr_t)phy_addr; +} + +static inline bool hem_list_page_is_in_range(struct roce_hem_item *hem, + int offset) +{ + return (hem->start <= offset && offset <= hem->end); +} + +static struct roce_hem_item *hem_list_search_item(struct list_head *ba_list, + int page_offset) +{ + struct roce_hem_item *hem, *temp_hem; + struct roce_hem_item *found = NULL; + + hem_list_for_each(hem, temp_hem, ba_list) { + if (hem_list_page_is_in_range(hem, page_offset)) { + found = hem; + break; + } + } + + return found; +} + +static bool hem_list_is_bottom_bt(int hopnum, int bt_level) +{ + /* + * hopnum base address table levels + * 0 L0(buf) + * 1 L0 -> buf + * 2 L0 -> L1 -> buf + * 3 L0 -> L1 -> L2 -> buf + */ + return bt_level >= (hopnum ? hopnum - 1 : hopnum); +} + +/** + * calc base address entries num + * @hopnum: num of mutihop addressing + * @bt_level: base address table level + * @unit: ba entries per bt page + */ +static u32 hem_list_calc_ba_range(int hopnum, int bt_level, int unit) +{ + u32 step; + int max; + int i; + + if (hopnum <= bt_level) + return 0; + /* + * hopnum bt_level range + * 1 0 unit + * ------------ + * 2 0 unit * unit + * 2 1 unit + * ------------ + * 3 0 unit * unit * unit + * 3 1 unit * unit + * 3 2 unit + */ + step = 1; + max = hopnum - bt_level; + for (i = 0; i < max; i++) + step = step * unit; + + return step; +} + +/** + * calc the root ba entries which could cover all regions + * @regions: buf region array + * @region_cnt: array size of @regions + * @unit: ba entries per bt page + */ +int hns_roce_hem_list_calc_root_ba(const struct hns_roce_buf_region *regions, + int region_cnt, int unit) +{ + struct hns_roce_buf_region *r; + int total = 0; + int step; + int i; + + for (i = 0; i < region_cnt; i++) { + r = (struct hns_roce_buf_region *)®ions[i]; + if (r->hopnum > 1) { + step = hem_list_calc_ba_range(r->hopnum, 1, unit); + if (step > 0) + total += (r->count + step - 1) / step; + } else { + total += r->count; + } + } + + return total; +} + +static int hem_list_alloc_mid_bt(struct hns_roce_dev *hr_dev, + const struct hns_roce_buf_region *r, int unit, + int offset, struct list_head *mid_bt, + struct list_head *btm_bt) +{ + struct roce_hem_item *hem_ptrs[HNS_ROCE_MAX_BT_LEVEL] = { NULL }; + struct list_head temp_list[HNS_ROCE_MAX_BT_LEVEL]; + struct roce_hem_item *cur, *pre; + const int hopnum = r->hopnum; + int start_aligned; + int distance; + int ret = 0; + int max_ofs; + int level; + u32 step; + int end; + + if (hopnum <= 1) + return 0; + + if (hopnum > HNS_ROCE_MAX_BT_LEVEL) { + dev_err(hr_dev->dev, "invalid hopnum %d!\n", hopnum); + return -EINVAL; + } + + if (offset < r->offset) { + dev_err(hr_dev->dev, "invalid offset %d,min %d!\n", + offset, r->offset); + return -EINVAL; + } + + distance = offset - r->offset; + max_ofs = r->offset + r->count - 1; + for (level = 0; level < hopnum; level++) + INIT_LIST_HEAD(&temp_list[level]); + + /* config L1 bt to last bt and link them to corresponding parent */ + for (level = 1; level < hopnum; level++) { + cur = hem_list_search_item(&mid_bt[level], offset); + if (cur) { + hem_ptrs[level] = cur; + continue; + } + + step = hem_list_calc_ba_range(hopnum, level, unit); + if (step < 1) { + ret = -EINVAL; + goto err_exit; + } + + start_aligned = (distance / step) * step + r->offset; + end = min_t(int, start_aligned + step - 1, max_ofs); + cur = hem_list_alloc_item(hr_dev, start_aligned, end, unit, + true, level); + if (!cur) { + ret = -ENOMEM; + goto err_exit; + } + hem_ptrs[level] = cur; + hem_list_add_item(cur, &temp_list[level]); + if (hem_list_is_bottom_bt(hopnum, level)) + hem_list_link_item(cur, &temp_list[0]); + + /* link bt to parent bt */ + if (level > 1) { + pre = hem_ptrs[level - 1]; + step = (cur->start - pre->start) / step * BA_BYTE_LEN; + hem_list_link_bt(hr_dev, pre->addr + step, + cur->dma_addr); + } + } + + list_splice(&temp_list[0], btm_bt); + for (level = 1; level < hopnum; level++) + list_splice(&temp_list[level], &mid_bt[level]); + + return 0; + +err_exit: + for (level = 1; level < hopnum; level++) + hem_list_free_all(hr_dev, &temp_list[level], true); + + return ret; +} + +static int hem_list_alloc_root_bt(struct hns_roce_dev *hr_dev, + struct hns_roce_hem_list *hem_list, int unit, + const struct hns_roce_buf_region *regions, + int region_cnt) +{ + struct roce_hem_item *hem, *temp_hem, *root_hem; + struct list_head temp_list[HNS_ROCE_MAX_BT_REGION]; + const struct hns_roce_buf_region *r; + struct list_head temp_root; + struct list_head temp_btm; + void *cpu_base; + u64 phy_base; + int ret = 0; + int offset; + int total; + int step; + int i; + + r = ®ions[0]; + root_hem = hem_list_search_item(&hem_list->root_bt, r->offset); + if (root_hem) + return 0; + + INIT_LIST_HEAD(&temp_root); + total = r->offset; + /* indicate to last region */ + r = ®ions[region_cnt - 1]; + root_hem = hem_list_alloc_item(hr_dev, total, r->offset + r->count - 1, + unit, true, 0); + if (!root_hem) + return -ENOMEM; + hem_list_add_item(root_hem, &temp_root); + + hem_list->root_ba = root_hem->dma_addr; + + INIT_LIST_HEAD(&temp_btm); + for (i = 0; i < region_cnt; i++) + INIT_LIST_HEAD(&temp_list[i]); + + total = 0; + for (i = 0; i < region_cnt && total < unit; i++) { + r = ®ions[i]; + if (!r->count) + continue; + + /* all regions's mid[x][0] shared the root_bt's trunk */ + cpu_base = root_hem->addr + total * BA_BYTE_LEN; + phy_base = root_hem->dma_addr + total * BA_BYTE_LEN; + + /* if hopnum is 0 or 1, cut a new fake hem from the root bt + * which's address share to all regions. + */ + if (hem_list_is_bottom_bt(r->hopnum, 0)) { + hem = hem_list_alloc_item(hr_dev, r->offset, + r->offset + r->count - 1, + r->count, false, 0); + if (!hem) { + ret = -ENOMEM; + goto err_exit; + } + hem_list_assign_bt(hr_dev, hem, cpu_base, phy_base); + hem_list_add_item(hem, &temp_list[i]); + hem_list_link_item(hem, &temp_btm); + total += r->count; + } else { + step = hem_list_calc_ba_range(r->hopnum, 1, unit); + if (step < 1) { + ret = -EINVAL; + goto err_exit; + } + /* if exist mid bt, link L1 to L0 */ + hem_list_for_each(hem, temp_hem, + &hem_list->mid_bt[i][1]) { + offset = hem->start / step * BA_BYTE_LEN; + hem_list_link_bt(hr_dev, cpu_base + offset, + hem->dma_addr); + total++; + } + } + } + + list_splice(&temp_btm, &hem_list->btm_bt); + list_splice(&temp_root, &hem_list->root_bt); + for (i = 0; i < region_cnt; i++) + list_splice(&temp_list[i], &hem_list->mid_bt[i][0]); + + return 0; + +err_exit: + for (i = 0; i < region_cnt; i++) + hem_list_free_all(hr_dev, &temp_list[i], false); + + hem_list_free_all(hr_dev, &temp_root, true); + + return ret; +} + +/* construct the base address table and link them by address hop config */ +int hns_roce_hem_list_request(struct hns_roce_dev *hr_dev, + struct hns_roce_hem_list *hem_list, + const struct hns_roce_buf_region *regions, + int region_cnt) +{ + const struct hns_roce_buf_region *r; + int ofs, end; + int ret = 0; + int unit; + int i; + + if (region_cnt > HNS_ROCE_MAX_BT_REGION) { + dev_err(hr_dev->dev, "invalid region region_cnt %d!\n", + region_cnt); + return -EINVAL; + } + + unit = (1 << hem_list->bt_pg_shift) / BA_BYTE_LEN; + for (i = 0; i < region_cnt; i++) { + r = ®ions[i]; + if (!r->count) + continue; + + end = r->offset + r->count; + for (ofs = r->offset; ofs < end; ofs += unit) { + ret = hem_list_alloc_mid_bt(hr_dev, r, unit, ofs, + hem_list->mid_bt[i], + &hem_list->btm_bt); + if (ret) { + dev_err(hr_dev->dev, + "alloc hem trunk fail ret=%d!\n", ret); + goto err_alloc; + } + } + } + + ret = hem_list_alloc_root_bt(hr_dev, hem_list, unit, regions, + region_cnt); + if (ret) + dev_err(hr_dev->dev, "alloc hem root fail ret=%d!\n", ret); + else + return 0; + +err_alloc: + hns_roce_hem_list_release(hr_dev, hem_list); + + return ret; +} + +void hns_roce_hem_list_release(struct hns_roce_dev *hr_dev, + struct hns_roce_hem_list *hem_list) +{ + int i, j; + + for (i = 0; i < HNS_ROCE_MAX_BT_REGION; i++) + for (j = 0; j < HNS_ROCE_MAX_BT_LEVEL; j++) + hem_list_free_all(hr_dev, &hem_list->mid_bt[i][j], + j != 0); + + hem_list_free_all(hr_dev, &hem_list->root_bt, true); + INIT_LIST_HEAD(&hem_list->btm_bt); + hem_list->root_ba = 0; +} + +void hns_roce_hem_list_init(struct hns_roce_hem_list *hem_list, + int bt_page_order) +{ + int i, j; + + INIT_LIST_HEAD(&hem_list->root_bt); + INIT_LIST_HEAD(&hem_list->btm_bt); + for (i = 0; i < HNS_ROCE_MAX_BT_REGION; i++) + for (j = 0; j < HNS_ROCE_MAX_BT_LEVEL; j++) + INIT_LIST_HEAD(&hem_list->mid_bt[i][j]); + + hem_list->bt_pg_shift = bt_page_order; +} + +void *hns_roce_hem_list_find_mtt(struct hns_roce_dev *hr_dev, + struct hns_roce_hem_list *hem_list, + int offset, int *mtt_cnt, u64 *phy_addr) +{ + struct list_head *head = &hem_list->btm_bt; + struct roce_hem_item *hem, *temp_hem; + void *cpu_base = NULL; + u64 phy_base = 0; + int nr = 0; + + list_for_each_entry_safe(hem, temp_hem, head, sibling) { + if (hem_list_page_is_in_range(hem, offset)) { + nr = offset - hem->start; + cpu_base = hem->addr + nr * BA_BYTE_LEN; + phy_base = hem->dma_addr + nr * BA_BYTE_LEN; + nr = hem->end + 1 - offset; + break; + } + } + + if (mtt_cnt) + *mtt_cnt = nr; + + if (phy_addr) + *phy_addr = phy_base; + + return cpu_base; +} diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.h b/drivers/infiniband/hw/hns/hns_roce_hem.h index d9d6689..e865fc8 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hem.h +++ b/drivers/infiniband/hw/hns/hns_roce_hem.h @@ -133,6 +133,20 @@ int hns_roce_calc_hem_mhop(struct hns_roce_dev *hr_dev, struct hns_roce_hem_mhop *mhop); bool hns_roce_check_whether_mhop(struct hns_roce_dev *hr_dev, u32 type); +void hns_roce_hem_list_init(struct hns_roce_hem_list *hem_list, + int bt_page_order); +int hns_roce_hem_list_calc_root_ba(const struct hns_roce_buf_region *regions, + int region_cnt, int unit); +int hns_roce_hem_list_request(struct hns_roce_dev *hr_dev, + struct hns_roce_hem_list *hem_list, + const struct hns_roce_buf_region *regions, + int region_cnt); +void hns_roce_hem_list_release(struct hns_roce_dev *hr_dev, + struct hns_roce_hem_list *hem_list); +void *hns_roce_hem_list_find_mtt(struct hns_roce_dev *hr_dev, + struct hns_roce_hem_list *hem_list, + int offset, int *mtt_cnt, u64 *phy_addr); + static inline void hns_roce_hem_first(struct hns_roce_hem *hem, struct hns_roce_hem_iter *iter) { diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c index 38ed4ac..7bdc34f 100644 --- a/drivers/infiniband/hw/hns/hns_roce_mr.c +++ b/drivers/infiniband/hw/hns/hns_roce_mr.c @@ -1496,3 +1496,124 @@ int hns_roce_dealloc_mw(struct ib_mw *ibmw) return 0; } + +void hns_roce_mtr_init(struct hns_roce_mtr *mtr, int bt_pg_shift, + int buf_pg_shift) +{ + hns_roce_hem_list_init(&mtr->hem_list, bt_pg_shift); + mtr->buf_pg_shift = buf_pg_shift; +} + +void hns_roce_mtr_cleanup(struct hns_roce_dev *hr_dev, + struct hns_roce_mtr *mtr) +{ + hns_roce_hem_list_release(hr_dev, &mtr->hem_list); +} +EXPORT_SYMBOL_GPL(hns_roce_mtr_cleanup); + +static int hns_roce_write_mtr(struct hns_roce_dev *hr_dev, + struct hns_roce_mtr *mtr, dma_addr_t *bufs, + struct hns_roce_buf_region *r) +{ + int offset; + int count; + int npage; + u64 *mtts; + int end; + int i; + + offset = r->offset; + end = offset + r->count; + npage = 0; + while (offset < end) { + mtts = hns_roce_hem_list_find_mtt(hr_dev, &mtr->hem_list, + offset, &count, NULL); + if (!mtts) + return -ENOBUFS; + + /* Save page addr, low 12 bits : 0 */ + for (i = 0; i < count; i++) { + if (hr_dev->hw_rev == HNS_ROCE_HW_VER1) + mtts[i] = cpu_to_le64(bufs[npage] >> + PAGE_ADDR_SHIFT); + else + mtts[i] = cpu_to_le64(bufs[npage]); + + npage++; + } + offset += count; + } + + /* Memory barrier */ + mb(); + + return 0; +} + +int hns_roce_mtr_attach(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr, + dma_addr_t **bufs, struct hns_roce_buf_region *regions, + int region_cnt) +{ + struct hns_roce_buf_region *r; + int ret; + int i; + + ret = hns_roce_hem_list_request(hr_dev, &mtr->hem_list, regions, + region_cnt); + if (ret) + return ret; + + for (i = 0; i < region_cnt; i++) { + r = ®ions[i]; + ret = hns_roce_write_mtr(hr_dev, mtr, bufs[i], r); + if (ret) { + dev_err(hr_dev->dev, + "write mtr[%d/%d] err %d,offset=%d.\n", + i, region_cnt, ret, r->offset); + goto err_write; + } + } + + return 0; + +err_write: + hns_roce_hem_list_release(hr_dev, &mtr->hem_list); + + return ret; +} + +int hns_roce_mtr_find(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr, + int offset, u64 *mtt_buf, int mtt_max, u64 *base_addr) +{ + u64 *mtts = mtt_buf; + int mtt_count; + int total = 0; + u64 *addr; + int npage; + int left; + + if (mtts == NULL || mtt_max < 1) + goto done; + + left = mtt_max; + while (left > 0) { + mtt_count = 0; + addr = hns_roce_hem_list_find_mtt(hr_dev, &mtr->hem_list, + offset + total, + &mtt_count, NULL); + if (!addr || !mtt_count) + goto done; + + npage = min(mtt_count, left); + memcpy(&mtts[total], addr, BA_BYTE_LEN * npage); + left -= npage; + total += npage; + } + +done: + if (base_addr) + *base_addr = mtr->hem_list.root_ba; + + return total; +} +EXPORT_SYMBOL_GPL(hns_roce_mtr_find); From patchwork Thu May 30 15:47:55 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Lijun Ou X-Patchwork-Id: 10969021 X-Patchwork-Delegate: leon@leon.nu Return-Path: Received: from mail.wl.linuxfoundation.org (pdx-wl-mail.web.codeaurora.org [172.30.200.125]) by pdx-korg-patchwork-2.web.codeaurora.org (Postfix) with ESMTP id 189DA92A for ; Thu, 30 May 2019 15:49:31 +0000 (UTC) Received: from mail.wl.linuxfoundation.org (localhost [127.0.0.1]) by mail.wl.linuxfoundation.org (Postfix) with ESMTP id 08FF728387 for ; Thu, 30 May 2019 15:49:31 +0000 (UTC) Received: by mail.wl.linuxfoundation.org (Postfix, from userid 486) id F106728879; Thu, 30 May 2019 15:49:30 +0000 (UTC) X-Spam-Checker-Version: SpamAssassin 3.3.1 (2010-03-16) on pdx-wl-mail.web.codeaurora.org X-Spam-Level: X-Spam-Status: No, score=-7.9 required=2.0 tests=BAYES_00,MAILING_LIST_MULTI, RCVD_IN_DNSWL_HI autolearn=ham version=3.3.1 Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by mail.wl.linuxfoundation.org (Postfix) with ESMTP id 702A528387 for ; Thu, 30 May 2019 15:49:30 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1727141AbfE3Pt3 (ORCPT ); Thu, 30 May 2019 11:49:29 -0400 Received: from szxga07-in.huawei.com ([45.249.212.35]:46810 "EHLO huawei.com" rhost-flags-OK-OK-OK-FAIL) by vger.kernel.org with ESMTP id S1727105AbfE3Pt3 (ORCPT ); Thu, 30 May 2019 11:49:29 -0400 Received: from DGGEMS411-HUB.china.huawei.com (unknown [172.30.72.58]) by Forcepoint Email with ESMTP id 332C3AB39479B85B962E; Thu, 30 May 2019 23:49:27 +0800 (CST) Received: from linux-ioko.site (10.71.200.31) by DGGEMS411-HUB.china.huawei.com (10.3.19.211) with Microsoft SMTP Server id 14.3.439.0; Thu, 30 May 2019 23:49:17 +0800 From: Lijun Ou To: , CC: , , Subject: [PATCH V3 for-next 2/3] RDMA/hns: Add a group interfaces for optimizing buffers getting flow Date: Thu, 30 May 2019 23:47:55 +0800 Message-ID: <1559231276-67517-3-git-send-email-oulijun@huawei.com> X-Mailer: git-send-email 1.9.1 In-Reply-To: <1559231276-67517-1-git-send-email-oulijun@huawei.com> References: <1559231276-67517-1-git-send-email-oulijun@huawei.com> MIME-Version: 1.0 X-Originating-IP: [10.71.200.31] X-CFilter-Loop: Reflected Sender: linux-rdma-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: linux-rdma@vger.kernel.org X-Virus-Scanned: ClamAV using ClamSMTP Currently, the code for getting umem and kmem buffers exist many files, this patch adds a group interfaces to simplify the buffers getting flow. Signed-off-by: Xi Wang Signed-off-by: Lijun Ou --- V1->V2: 1. Use new APIs instead of for_each_sg. --- drivers/infiniband/hw/hns/hns_roce_alloc.c | 99 +++++++++++++++++++++++++++++ drivers/infiniband/hw/hns/hns_roce_device.h | 12 ++++ 2 files changed, 111 insertions(+) diff --git a/drivers/infiniband/hw/hns/hns_roce_alloc.c b/drivers/infiniband/hw/hns/hns_roce_alloc.c index dac058d..14fcc35 100644 --- a/drivers/infiniband/hw/hns/hns_roce_alloc.c +++ b/drivers/infiniband/hw/hns/hns_roce_alloc.c @@ -34,6 +34,7 @@ #include #include #include "hns_roce_device.h" +#include int hns_roce_bitmap_alloc(struct hns_roce_bitmap *bitmap, unsigned long *obj) { @@ -238,6 +239,104 @@ int hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size, u32 max_direct, return -ENOMEM; } +int hns_roce_get_kmem_bufs(struct hns_roce_dev *hr_dev, dma_addr_t *bufs, + int buf_cnt, int start, struct hns_roce_buf *buf) +{ + int i, end; + int total; + + end = start + buf_cnt; + if (end > buf->npages) { + dev_err(hr_dev->dev, + "invalid kmem region,offset %d,buf_cnt %d,total %d!\n", + start, buf_cnt, buf->npages); + return -EINVAL; + } + + total = 0; + for (i = start; i < end; i++) + if (buf->nbufs == 1) + bufs[total++] = buf->direct.map + + (i << buf->page_shift); + else + bufs[total++] = buf->page_list[i].map; + + return total; +} + +int hns_roce_get_umem_bufs(struct hns_roce_dev *hr_dev, dma_addr_t *bufs, + int buf_cnt, int start, struct ib_umem *umem, + int page_shift) +{ + struct ib_block_iter biter; + int total = 0; + int idx = 0; + u64 addr; + + if (page_shift < PAGE_SHIFT) { + dev_err(hr_dev->dev, "invalid page shift %d!\n", page_shift); + return -EINVAL; + } + + /* convert system page cnt to hw page cnt */ + rdma_for_each_block(umem->sg_head.sgl, &biter, umem->nmap, + 1 << page_shift) { + addr = rdma_block_iter_dma_address(&biter); + if (idx >= start) { + bufs[total++] = addr; + if (total >= buf_cnt) + goto done; + } + idx++; + } + +done: + return total; +} + +void hns_roce_init_buf_region(struct hns_roce_buf_region *region, int hopnum, + int offset, int buf_cnt) +{ + if (hopnum == HNS_ROCE_HOP_NUM_0) + region->hopnum = 0; + else + region->hopnum = hopnum; + + region->offset = offset; + region->count = buf_cnt; +} + +void hns_roce_free_buf_list(dma_addr_t **bufs, int region_cnt) +{ + int i; + + for (i = 0; i < region_cnt; i++) { + kfree(bufs[i]); + bufs[i] = NULL; + } +} + +int hns_roce_alloc_buf_list(struct hns_roce_buf_region *regions, + dma_addr_t **bufs, int region_cnt) +{ + struct hns_roce_buf_region *r; + int i; + + for (i = 0; i < region_cnt; i++) { + r = ®ions[i]; + bufs[i] = kcalloc(r->count, sizeof(dma_addr_t), GFP_KERNEL); + if (!bufs[i]) + goto err_alloc; + } + + return 0; + +err_alloc: + hns_roce_free_buf_list(bufs, i); + + return -ENOMEM; +} + void hns_roce_cleanup_bitmap(struct hns_roce_dev *hr_dev) { if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ) diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h index 720be44..f946ece 100644 --- a/drivers/infiniband/hw/hns/hns_roce_device.h +++ b/drivers/infiniband/hw/hns/hns_roce_device.h @@ -1211,6 +1211,18 @@ int hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size, u32 max_direct, int hns_roce_ib_umem_write_mtt(struct hns_roce_dev *hr_dev, struct hns_roce_mtt *mtt, struct ib_umem *umem); +void hns_roce_init_buf_region(struct hns_roce_buf_region *region, int hopnum, + int offset, int buf_cnt); +int hns_roce_alloc_buf_list(struct hns_roce_buf_region *regions, + dma_addr_t **bufs, int count); +void hns_roce_free_buf_list(dma_addr_t **bufs, int count); + +int hns_roce_get_kmem_bufs(struct hns_roce_dev *hr_dev, dma_addr_t *bufs, + int buf_cnt, int start, struct hns_roce_buf *buf); +int hns_roce_get_umem_bufs(struct hns_roce_dev *hr_dev, dma_addr_t *bufs, + int buf_cnt, int start, struct ib_umem *umem, + int page_shift); + int hns_roce_create_srq(struct ib_srq *srq, struct ib_srq_init_attr *srq_init_attr, struct ib_udata *udata); From patchwork Thu May 30 15:47:56 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Lijun Ou X-Patchwork-Id: 10969025 X-Patchwork-Delegate: leon@leon.nu Return-Path: Received: from mail.wl.linuxfoundation.org (pdx-wl-mail.web.codeaurora.org [172.30.200.125]) by pdx-korg-patchwork-2.web.codeaurora.org (Postfix) with ESMTP id A14BD1575 for ; Thu, 30 May 2019 15:49:40 +0000 (UTC) Received: from mail.wl.linuxfoundation.org (localhost [127.0.0.1]) by mail.wl.linuxfoundation.org (Postfix) with ESMTP id 9134028879 for ; Thu, 30 May 2019 15:49:40 +0000 (UTC) Received: by mail.wl.linuxfoundation.org (Postfix, from userid 486) id 8559B28A12; Thu, 30 May 2019 15:49:40 +0000 (UTC) X-Spam-Checker-Version: SpamAssassin 3.3.1 (2010-03-16) on pdx-wl-mail.web.codeaurora.org X-Spam-Level: X-Spam-Status: No, score=-7.9 required=2.0 tests=BAYES_00,MAILING_LIST_MULTI, RCVD_IN_DNSWL_HI autolearn=ham version=3.3.1 Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by mail.wl.linuxfoundation.org (Postfix) with ESMTP id 30CCD28879 for ; Thu, 30 May 2019 15:49:39 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1727387AbfE3Ptf (ORCPT ); Thu, 30 May 2019 11:49:35 -0400 Received: from szxga07-in.huawei.com ([45.249.212.35]:46822 "EHLO huawei.com" rhost-flags-OK-OK-OK-FAIL) by vger.kernel.org with ESMTP id S1725961AbfE3Ptf (ORCPT ); Thu, 30 May 2019 11:49:35 -0400 Received: from DGGEMS411-HUB.china.huawei.com (unknown [172.30.72.58]) by Forcepoint Email with ESMTP id 43EDD8A58AC436BC78A0; Thu, 30 May 2019 23:49:27 +0800 (CST) Received: from linux-ioko.site (10.71.200.31) by DGGEMS411-HUB.china.huawei.com (10.3.19.211) with Microsoft SMTP Server id 14.3.439.0; Thu, 30 May 2019 23:49:18 +0800 From: Lijun Ou To: , CC: , , Subject: [PATCH V3 for-next 3/3] RDMA/hns: Fix bug when wqe num is larger than 16K Date: Thu, 30 May 2019 23:47:56 +0800 Message-ID: <1559231276-67517-4-git-send-email-oulijun@huawei.com> X-Mailer: git-send-email 1.9.1 In-Reply-To: <1559231276-67517-1-git-send-email-oulijun@huawei.com> References: <1559231276-67517-1-git-send-email-oulijun@huawei.com> MIME-Version: 1.0 X-Originating-IP: [10.71.200.31] X-CFilter-Loop: Reflected Sender: linux-rdma-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: linux-rdma@vger.kernel.org X-Virus-Scanned: ClamAV using ClamSMTP hip08 can support up to 32768 wqes in one qp. currently if the wqe num is larger than 16384, the driver will lead a calltrace as follows. [21361.393725] Call trace: [21361.398605] hns_roce_v2_modify_qp+0xbcc/0x1360 [hns_roce_hw_v2] [21361.410627] hns_roce_modify_qp+0x1d8/0x2f8 [hns_roce] [21361.420906] _ib_modify_qp+0x70/0x118 [21361.428222] ib_modify_qp+0x14/0x1c [21361.435193] rt_ktest_modify_qp+0xb8/0x650 [rdma_test] [21361.445472] exec_modify_qp_cmd+0x110/0x4d8 [rdma_test] [21361.455924] rt_ktest_dispatch_cmd_3+0xa94/0x2edc [rdma_test] [21361.467422] rt_ktest_dispatch_cmd_2+0x9c/0x108 [rdma_test] [21361.478570] rt_ktest_dispatch_cmd+0x138/0x904 [rdma_test] [21361.489545] rt_ktest_dev_write+0x328/0x4b0 [rdma_test] [21361.499998] __vfs_write+0x38/0x15c [21361.506966] vfs_write+0xa8/0x1a0 [21361.513586] ksys_write+0x50/0xb0 [21361.520206] sys_write+0xc/0x14 [21361.526479] el0_svc_naked+0x30/0x34 [21361.533622] Code: 1ac10841 d37d7c22 0b000021 d37df021 (f86268c0) [21361.545815] ---[ end trace e2a1feb2c3d7f13c ]--- When the wqe num is larger than 16384, hns_roce_table_find will return an invalid mtt, this will lead an kernel paging requet error if the driver try to access it. It's the mtt design defect which can't support up to the max wqe num of hip08. This patch fixs it by replacing mtt with mtr for wqe. Fixes: 926a01dc000d ("RDMA/hns: Add QP operations support for hip08 SoC") Signed-off-by: Xi Wang Signed-off-by: Lijun Ou --- V2-V3: 1. Remove building error --- drivers/infiniband/hw/hns/hns_roce_device.h | 11 ++ drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 118 +++++++++++------ drivers/infiniband/hw/hns/hns_roce_qp.c | 189 +++++++++++++++++++++------- 3 files changed, 235 insertions(+), 83 deletions(-) diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h index f946ece..1b5acc0 100644 --- a/drivers/infiniband/hw/hns/hns_roce_device.h +++ b/drivers/infiniband/hw/hns/hns_roce_device.h @@ -660,6 +660,14 @@ struct hns_roce_qp { struct ib_umem *umem; struct hns_roce_mtt mtt; + struct hns_roce_mtr mtr; + + /* this define must less than HNS_ROCE_MAX_BT_REGION */ +#define HNS_ROCE_WQE_REGION_MAX 3 + struct hns_roce_buf_region regions[HNS_ROCE_WQE_REGION_MAX]; + int region_cnt; + int wqe_bt_pg_shift; + u32 buff_size; struct mutex mutex; u8 port; @@ -870,6 +878,9 @@ struct hns_roce_caps { u32 mtt_ba_pg_sz; u32 mtt_buf_pg_sz; u32 mtt_hop_num; + u32 wqe_sq_hop_num; + u32 wqe_sge_hop_num; + u32 wqe_rq_hop_num; u32 sccc_ba_pg_sz; u32 sccc_buf_pg_sz; u32 sccc_hop_num; diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c index 7fcec99..5de5ae5 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c @@ -1574,6 +1574,9 @@ static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev) caps->mtt_ba_pg_sz = 0; caps->mtt_buf_pg_sz = 0; caps->mtt_hop_num = HNS_ROCE_MTT_HOP_NUM; + caps->wqe_sq_hop_num = 2; + caps->wqe_sge_hop_num = 1; + caps->wqe_rq_hop_num = 2; caps->cqe_ba_pg_sz = 0; caps->cqe_buf_pg_sz = 0; caps->cqe_hop_num = HNS_ROCE_CQE_HOP_NUM; @@ -3026,7 +3029,6 @@ static int hns_roce_v2_clear_hem(struct hns_roce_dev *hr_dev, } static int hns_roce_v2_qp_modify(struct hns_roce_dev *hr_dev, - struct hns_roce_mtt *mtt, enum ib_qp_state cur_state, enum ib_qp_state new_state, struct hns_roce_v2_qp_context *context, @@ -3522,6 +3524,31 @@ static void modify_qp_init_to_init(struct ib_qp *ibqp, } } +static bool check_wqe_rq_mtt_count(struct hns_roce_dev *hr_dev, + struct hns_roce_qp *hr_qp, int mtt_cnt, + u32 page_size) +{ + struct device *dev = hr_dev->dev; + + if (hr_qp->rq.wqe_cnt < 1) + return true; + + if (mtt_cnt < 1) { + dev_err(dev, "qp(0x%lx) rqwqe buf ba find failed\n", + hr_qp->qpn); + return false; + } + + if (mtt_cnt < MTT_MIN_COUNT && + (hr_qp->rq.offset + page_size) < hr_qp->buff_size) { + dev_err(dev, "qp(0x%lx) next rqwqe buf ba find failed\n", + hr_qp->qpn); + return false; + } + + return true; +} + static int modify_qp_init_to_rtr(struct ib_qp *ibqp, const struct ib_qp_attr *attr, int attr_mask, struct hns_roce_v2_qp_context *context, @@ -3531,25 +3558,27 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp, struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); struct device *dev = hr_dev->dev; + u64 mtts[MTT_MIN_COUNT] = { 0 }; dma_addr_t dma_handle_3; dma_addr_t dma_handle_2; - dma_addr_t dma_handle; + u64 wqe_sge_ba; u32 page_size; u8 port_num; u64 *mtts_3; u64 *mtts_2; - u64 *mtts; + int count; u8 *dmac; u8 *smac; int port; /* Search qp buf's mtts */ - mtts = hns_roce_table_find(hr_dev, &hr_dev->mr_table.mtt_table, - hr_qp->mtt.first_seg, &dma_handle); - if (!mtts) { - dev_err(dev, "qp buf pa find failed\n"); - return -EINVAL; - } + page_size = 1 << (hr_dev->caps.mtt_buf_pg_sz + PAGE_SHIFT); + count = hns_roce_mtr_find(hr_dev, &hr_qp->mtr, + hr_qp->rq.offset / page_size, mtts, + MTT_MIN_COUNT, &wqe_sge_ba); + if (!ibqp->srq) + if (!check_wqe_rq_mtt_count(hr_dev, hr_qp, count, page_size)) + return -EINVAL; /* Search IRRL's mtts */ mtts_2 = hns_roce_table_find(hr_dev, &hr_dev->qp_table.irrl_table, @@ -3573,7 +3602,7 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp, } dmac = (u8 *)attr->ah_attr.roce.dmac; - context->wqe_sge_ba = (u32)(dma_handle >> 3); + context->wqe_sge_ba = (u32)(wqe_sge_ba >> 3); qpc_mask->wqe_sge_ba = 0; /* @@ -3583,22 +3612,23 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp, * 0 at the same time, else set them to 0x1. */ roce_set_field(context->byte_12_sq_hop, V2_QPC_BYTE_12_WQE_SGE_BA_M, - V2_QPC_BYTE_12_WQE_SGE_BA_S, dma_handle >> (32 + 3)); + V2_QPC_BYTE_12_WQE_SGE_BA_S, wqe_sge_ba >> (32 + 3)); roce_set_field(qpc_mask->byte_12_sq_hop, V2_QPC_BYTE_12_WQE_SGE_BA_M, V2_QPC_BYTE_12_WQE_SGE_BA_S, 0); roce_set_field(context->byte_12_sq_hop, V2_QPC_BYTE_12_SQ_HOP_NUM_M, V2_QPC_BYTE_12_SQ_HOP_NUM_S, - hr_dev->caps.mtt_hop_num == HNS_ROCE_HOP_NUM_0 ? - 0 : hr_dev->caps.mtt_hop_num); + hr_dev->caps.wqe_sq_hop_num == HNS_ROCE_HOP_NUM_0 ? + 0 : hr_dev->caps.wqe_sq_hop_num); roce_set_field(qpc_mask->byte_12_sq_hop, V2_QPC_BYTE_12_SQ_HOP_NUM_M, V2_QPC_BYTE_12_SQ_HOP_NUM_S, 0); roce_set_field(context->byte_20_smac_sgid_idx, V2_QPC_BYTE_20_SGE_HOP_NUM_M, V2_QPC_BYTE_20_SGE_HOP_NUM_S, - ((ibqp->qp_type == IB_QPT_GSI) || hr_qp->sq.max_gs > 2) ? - hr_dev->caps.mtt_hop_num : 0); + ((ibqp->qp_type == IB_QPT_GSI) || + hr_qp->sq.max_gs > HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE) ? + hr_dev->caps.wqe_sge_hop_num : 0); roce_set_field(qpc_mask->byte_20_smac_sgid_idx, V2_QPC_BYTE_20_SGE_HOP_NUM_M, V2_QPC_BYTE_20_SGE_HOP_NUM_S, 0); @@ -3606,8 +3636,8 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp, roce_set_field(context->byte_20_smac_sgid_idx, V2_QPC_BYTE_20_RQ_HOP_NUM_M, V2_QPC_BYTE_20_RQ_HOP_NUM_S, - hr_dev->caps.mtt_hop_num == HNS_ROCE_HOP_NUM_0 ? - 0 : hr_dev->caps.mtt_hop_num); + hr_dev->caps.wqe_rq_hop_num == HNS_ROCE_HOP_NUM_0 ? + 0 : hr_dev->caps.wqe_rq_hop_num); roce_set_field(qpc_mask->byte_20_smac_sgid_idx, V2_QPC_BYTE_20_RQ_HOP_NUM_M, V2_QPC_BYTE_20_RQ_HOP_NUM_S, 0); @@ -3615,7 +3645,7 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp, roce_set_field(context->byte_16_buf_ba_pg_sz, V2_QPC_BYTE_16_WQE_SGE_BA_PG_SZ_M, V2_QPC_BYTE_16_WQE_SGE_BA_PG_SZ_S, - hr_dev->caps.mtt_ba_pg_sz + PG_SHIFT_OFFSET); + hr_qp->wqe_bt_pg_shift + PG_SHIFT_OFFSET); roce_set_field(qpc_mask->byte_16_buf_ba_pg_sz, V2_QPC_BYTE_16_WQE_SGE_BA_PG_SZ_M, V2_QPC_BYTE_16_WQE_SGE_BA_PG_SZ_S, 0); @@ -3628,29 +3658,24 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp, V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_M, V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_S, 0); - page_size = 1 << (hr_dev->caps.mtt_buf_pg_sz + PAGE_SHIFT); - context->rq_cur_blk_addr = (u32)(mtts[hr_qp->rq.offset / page_size] - >> PAGE_ADDR_SHIFT); + context->rq_cur_blk_addr = (u32)(mtts[0] >> PAGE_ADDR_SHIFT); qpc_mask->rq_cur_blk_addr = 0; roce_set_field(context->byte_92_srq_info, V2_QPC_BYTE_92_RQ_CUR_BLK_ADDR_M, V2_QPC_BYTE_92_RQ_CUR_BLK_ADDR_S, - mtts[hr_qp->rq.offset / page_size] - >> (32 + PAGE_ADDR_SHIFT)); + mtts[0] >> (32 + PAGE_ADDR_SHIFT)); roce_set_field(qpc_mask->byte_92_srq_info, V2_QPC_BYTE_92_RQ_CUR_BLK_ADDR_M, V2_QPC_BYTE_92_RQ_CUR_BLK_ADDR_S, 0); - context->rq_nxt_blk_addr = (u32)(mtts[hr_qp->rq.offset / page_size + 1] - >> PAGE_ADDR_SHIFT); + context->rq_nxt_blk_addr = (u32)(mtts[1] >> PAGE_ADDR_SHIFT); qpc_mask->rq_nxt_blk_addr = 0; roce_set_field(context->byte_104_rq_sge, V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_M, V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_S, - mtts[hr_qp->rq.offset / page_size + 1] - >> (32 + PAGE_ADDR_SHIFT)); + mtts[1] >> (32 + PAGE_ADDR_SHIFT)); roce_set_field(qpc_mask->byte_104_rq_sge, V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_M, V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_S, 0); @@ -3778,18 +3803,30 @@ static int modify_qp_rtr_to_rts(struct ib_qp *ibqp, struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); struct device *dev = hr_dev->dev; - dma_addr_t dma_handle; + u64 sge_cur_blk = 0; + u64 sq_cur_blk = 0; u32 page_size; - u64 *mtts; + int count; /* Search qp buf's mtts */ - mtts = hns_roce_table_find(hr_dev, &hr_dev->mr_table.mtt_table, - hr_qp->mtt.first_seg, &dma_handle); - if (!mtts) { - dev_err(dev, "qp buf pa find failed\n"); + count = hns_roce_mtr_find(hr_dev, &hr_qp->mtr, 0, &sq_cur_blk, 1, NULL); + if (count < 1) { + dev_err(dev, "qp(0x%lx) buf pa find failed\n", hr_qp->qpn); return -EINVAL; } + if (hr_qp->sge.offset) { + page_size = 1 << (hr_dev->caps.mtt_buf_pg_sz + PAGE_SHIFT); + count = hns_roce_mtr_find(hr_dev, &hr_qp->mtr, + hr_qp->sge.offset / page_size, + &sge_cur_blk, 1, NULL); + if (count < 1) { + dev_err(dev, "qp(0x%lx) sge pa find failed\n", + hr_qp->qpn); + return -EINVAL; + } + } + /* Not support alternate path and path migration */ if ((attr_mask & IB_QP_ALT_PATH) || (attr_mask & IB_QP_PATH_MIG_STATE)) { @@ -3803,38 +3840,37 @@ static int modify_qp_rtr_to_rts(struct ib_qp *ibqp, * we should set all bits of the relevant fields in context mask to * 0 at the same time, else set them to 0x1. */ - context->sq_cur_blk_addr = (u32)(mtts[0] >> PAGE_ADDR_SHIFT); + context->sq_cur_blk_addr = (u32)(sq_cur_blk >> PAGE_ADDR_SHIFT); roce_set_field(context->byte_168_irrl_idx, V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_M, V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_S, - mtts[0] >> (32 + PAGE_ADDR_SHIFT)); + sq_cur_blk >> (32 + PAGE_ADDR_SHIFT)); qpc_mask->sq_cur_blk_addr = 0; roce_set_field(qpc_mask->byte_168_irrl_idx, V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_M, V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_S, 0); - page_size = 1 << (hr_dev->caps.mtt_buf_pg_sz + PAGE_SHIFT); context->sq_cur_sge_blk_addr = ((ibqp->qp_type == IB_QPT_GSI) || hr_qp->sq.max_gs > HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE) ? - ((u32)(mtts[hr_qp->sge.offset / page_size] >> + ((u32)(sge_cur_blk >> PAGE_ADDR_SHIFT)) : 0; roce_set_field(context->byte_184_irrl_idx, V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_M, V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_S, ((ibqp->qp_type == IB_QPT_GSI) || hr_qp->sq.max_gs > HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE) ? - (mtts[hr_qp->sge.offset / page_size] >> + (sge_cur_blk >> (32 + PAGE_ADDR_SHIFT)) : 0); qpc_mask->sq_cur_sge_blk_addr = 0; roce_set_field(qpc_mask->byte_184_irrl_idx, V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_M, V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_S, 0); - context->rx_sq_cur_blk_addr = (u32)(mtts[0] >> PAGE_ADDR_SHIFT); + context->rx_sq_cur_blk_addr = (u32)(sq_cur_blk >> PAGE_ADDR_SHIFT); roce_set_field(context->byte_232_irrl_sge, V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_M, V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_S, - mtts[0] >> (32 + PAGE_ADDR_SHIFT)); + sq_cur_blk >> (32 + PAGE_ADDR_SHIFT)); qpc_mask->rx_sq_cur_blk_addr = 0; roce_set_field(qpc_mask->byte_232_irrl_sge, V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_M, @@ -4235,7 +4271,7 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp, V2_QPC_BYTE_60_QP_ST_S, 0); /* SW pass context to HW */ - ret = hns_roce_v2_qp_modify(hr_dev, &hr_qp->mtt, cur_state, new_state, + ret = hns_roce_v2_qp_modify(hr_dev, cur_state, new_state, context, hr_qp); if (ret) { dev_err(dev, "hns_roce_qp_modify failed(%d)\n", ret); diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c index 8db2817..99ec5d4 100644 --- a/drivers/infiniband/hw/hns/hns_roce_qp.c +++ b/drivers/infiniband/hw/hns/hns_roce_qp.c @@ -422,6 +422,91 @@ static int hns_roce_set_user_sq_size(struct hns_roce_dev *hr_dev, return 0; } +static int split_wqe_buf_region(struct hns_roce_dev *hr_dev, + struct hns_roce_qp *hr_qp, + struct hns_roce_buf_region *regions, + int region_max, int page_shift) +{ + int page_size = 1 << page_shift; + bool is_extend_sge; + int region_cnt = 0; + int buf_size; + int buf_cnt; + + if (hr_qp->buff_size < 1 || region_max < 1) + return region_cnt; + + if (hr_qp->sge.sge_cnt > 0) + is_extend_sge = true; + else + is_extend_sge = false; + + /* sq region */ + if (is_extend_sge) + buf_size = hr_qp->sge.offset - hr_qp->sq.offset; + else + buf_size = hr_qp->rq.offset - hr_qp->sq.offset; + + if (buf_size > 0 && region_cnt < region_max) { + buf_cnt = DIV_ROUND_UP(buf_size, page_size); + hns_roce_init_buf_region(®ions[region_cnt], + hr_dev->caps.wqe_sq_hop_num, + hr_qp->sq.offset / page_size, + buf_cnt); + region_cnt++; + } + + /* sge region */ + if (is_extend_sge) { + buf_size = hr_qp->rq.offset - hr_qp->sge.offset; + if (buf_size > 0 && region_cnt < region_max) { + buf_cnt = DIV_ROUND_UP(buf_size, page_size); + hns_roce_init_buf_region(®ions[region_cnt], + hr_dev->caps.wqe_sge_hop_num, + hr_qp->sge.offset / page_size, + buf_cnt); + region_cnt++; + } + } + + /* rq region */ + buf_size = hr_qp->buff_size - hr_qp->rq.offset; + if (buf_size > 0) { + buf_cnt = DIV_ROUND_UP(buf_size, page_size); + hns_roce_init_buf_region(®ions[region_cnt], + hr_dev->caps.wqe_rq_hop_num, + hr_qp->rq.offset / page_size, + buf_cnt); + region_cnt++; + } + + return region_cnt; +} + +static int calc_wqe_bt_page_shift(struct hns_roce_dev *hr_dev, + struct hns_roce_buf_region *regions, + int region_cnt) +{ + int bt_pg_shift; + int ba_num; + int ret; + + bt_pg_shift = PAGE_SHIFT + hr_dev->caps.mtt_ba_pg_sz; + + /* all root ba entries must in one bt page */ + do { + ba_num = (1 << bt_pg_shift) / BA_BYTE_LEN; + ret = hns_roce_hem_list_calc_root_ba(regions, region_cnt, + ba_num); + if (ret <= ba_num) + break; + + bt_pg_shift++; + } while (ret > ba_num); + + return bt_pg_shift - PAGE_SHIFT; +} + static int hns_roce_set_kernel_sq_size(struct hns_roce_dev *hr_dev, struct ib_qp_cap *cap, struct hns_roce_qp *hr_qp) @@ -534,15 +619,17 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, struct ib_udata *udata, unsigned long sqpn, struct hns_roce_qp *hr_qp) { + dma_addr_t *buf_list[ARRAY_SIZE(hr_qp->regions)] = { 0 }; struct device *dev = hr_dev->dev; struct hns_roce_ib_create_qp ucmd; struct hns_roce_ib_create_qp_resp resp = {}; struct hns_roce_ucontext *uctx = rdma_udata_to_drv_context( udata, struct hns_roce_ucontext, ibucontext); + struct hns_roce_buf_region *r; unsigned long qpn = 0; - int ret = 0; u32 page_shift; - u32 npages; + int buf_count; + int ret; int i; mutex_init(&hr_qp->mutex); @@ -596,6 +683,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, init_attr->cap.max_recv_sge]; } + page_shift = PAGE_SHIFT + hr_dev->caps.mtt_buf_pg_sz; if (udata) { if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) { dev_err(dev, "ib_copy_from_udata error for create qp\n"); @@ -617,32 +705,28 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, ret = PTR_ERR(hr_qp->umem); goto err_rq_sge_list; } - - hr_qp->mtt.mtt_type = MTT_TYPE_WQE; - page_shift = PAGE_SHIFT; - if (hr_dev->caps.mtt_buf_pg_sz) { - npages = (ib_umem_page_count(hr_qp->umem) + - (1 << hr_dev->caps.mtt_buf_pg_sz) - 1) / - (1 << hr_dev->caps.mtt_buf_pg_sz); - page_shift += hr_dev->caps.mtt_buf_pg_sz; - ret = hns_roce_mtt_init(hr_dev, npages, - page_shift, - &hr_qp->mtt); - } else { - ret = hns_roce_mtt_init(hr_dev, - ib_umem_page_count(hr_qp->umem), - page_shift, &hr_qp->mtt); - } + hr_qp->region_cnt = split_wqe_buf_region(hr_dev, hr_qp, + hr_qp->regions, ARRAY_SIZE(hr_qp->regions), + page_shift); + ret = hns_roce_alloc_buf_list(hr_qp->regions, buf_list, + hr_qp->region_cnt); if (ret) { - dev_err(dev, "hns_roce_mtt_init error for create qp\n"); - goto err_buf; + dev_err(dev, "alloc buf_list error for create qp\n"); + goto err_alloc_list; } - ret = hns_roce_ib_umem_write_mtt(hr_dev, &hr_qp->mtt, - hr_qp->umem); - if (ret) { - dev_err(dev, "hns_roce_ib_umem_write_mtt error for create qp\n"); - goto err_mtt; + for (i = 0; i < hr_qp->region_cnt; i++) { + r = &hr_qp->regions[i]; + buf_count = hns_roce_get_umem_bufs(hr_dev, + buf_list[i], r->count, r->offset, + hr_qp->umem, page_shift); + if (buf_count != r->count) { + dev_err(dev, + "get umem buf err, expect %d,ret %d.\n", + r->count, buf_count); + ret = -ENOBUFS; + goto err_get_bufs; + } } if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SQ_RECORD_DB) && @@ -653,7 +737,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, &hr_qp->sdb); if (ret) { dev_err(dev, "sq record doorbell map failed!\n"); - goto err_mtt; + goto err_get_bufs; } /* indicate kernel supports sq record db */ @@ -715,7 +799,6 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, } /* Allocate QP buf */ - page_shift = PAGE_SHIFT + hr_dev->caps.mtt_buf_pg_sz; if (hns_roce_buf_alloc(hr_dev, hr_qp->buff_size, (1 << page_shift) * 2, &hr_qp->hr_buf, page_shift)) { @@ -723,21 +806,28 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, ret = -ENOMEM; goto err_db; } - - hr_qp->mtt.mtt_type = MTT_TYPE_WQE; - /* Write MTT */ - ret = hns_roce_mtt_init(hr_dev, hr_qp->hr_buf.npages, - hr_qp->hr_buf.page_shift, &hr_qp->mtt); + hr_qp->region_cnt = split_wqe_buf_region(hr_dev, hr_qp, + hr_qp->regions, ARRAY_SIZE(hr_qp->regions), + page_shift); + ret = hns_roce_alloc_buf_list(hr_qp->regions, buf_list, + hr_qp->region_cnt); if (ret) { - dev_err(dev, "hns_roce_mtt_init error for kernel create qp\n"); - goto err_buf; + dev_err(dev, "alloc buf_list error for create qp!\n"); + goto err_alloc_list; } - ret = hns_roce_buf_write_mtt(hr_dev, &hr_qp->mtt, - &hr_qp->hr_buf); - if (ret) { - dev_err(dev, "hns_roce_buf_write_mtt error for kernel create qp\n"); - goto err_mtt; + for (i = 0; i < hr_qp->region_cnt; i++) { + r = &hr_qp->regions[i]; + buf_count = hns_roce_get_kmem_bufs(hr_dev, + buf_list[i], r->count, r->offset, + &hr_qp->hr_buf); + if (buf_count != r->count) { + dev_err(dev, + "get kmem buf err, expect %d,ret %d.\n", + r->count, buf_count); + ret = -ENOBUFS; + goto err_get_bufs; + } } hr_qp->sq.wrid = kcalloc(hr_qp->sq.wqe_cnt, sizeof(u64), @@ -761,6 +851,17 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, } } + hr_qp->wqe_bt_pg_shift = calc_wqe_bt_page_shift(hr_dev, hr_qp->regions, + hr_qp->region_cnt); + hns_roce_mtr_init(&hr_qp->mtr, PAGE_SHIFT + hr_qp->wqe_bt_pg_shift, + page_shift); + ret = hns_roce_mtr_attach(hr_dev, &hr_qp->mtr, buf_list, + hr_qp->regions, hr_qp->region_cnt); + if (ret) { + dev_err(dev, "mtr attatch error for create qp\n"); + goto err_mtr; + } + if (init_attr->qp_type == IB_QPT_GSI && hr_dev->hw_rev == HNS_ROCE_HW_VER1) { /* In v1 engine, GSI QP context in RoCE engine's register */ @@ -796,6 +897,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, } hr_qp->event = hns_roce_ib_qp_event; + hns_roce_free_buf_list(buf_list, hr_qp->region_cnt); return 0; @@ -810,6 +912,9 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, if (!sqpn) hns_roce_release_range_qp(hr_dev, qpn, 1); +err_mtr: + hns_roce_mtr_cleanup(hr_dev, &hr_qp->mtr); + err_wrid: if (udata) { if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) && @@ -829,10 +934,10 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, hns_roce_qp_has_sq(init_attr)) hns_roce_db_unmap_user(uctx, &hr_qp->sdb); -err_mtt: - hns_roce_mtt_cleanup(hr_dev, &hr_qp->mtt); +err_get_bufs: + hns_roce_free_buf_list(buf_list, hr_qp->region_cnt); -err_buf: +err_alloc_list: if (hr_qp->umem) ib_umem_release(hr_qp->umem); else