From patchwork Thu Jun 9 19:35:16 2016 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Hariprasad S X-Patchwork-Id: 9167819 Return-Path: Received: from mail.wl.linuxfoundation.org (pdx-wl-mail.web.codeaurora.org [172.30.200.125]) by pdx-korg-patchwork.web.codeaurora.org (Postfix) with ESMTP id 1009160467 for ; Thu, 9 Jun 2016 19:32:41 +0000 (UTC) Received: from mail.wl.linuxfoundation.org (localhost [127.0.0.1]) by mail.wl.linuxfoundation.org (Postfix) with ESMTP id F06AC28338 for ; Thu, 9 Jun 2016 19:32:40 +0000 (UTC) Received: by mail.wl.linuxfoundation.org (Postfix, from userid 486) id E53B42834F; Thu, 9 Jun 2016 19:32:40 +0000 (UTC) X-Spam-Checker-Version: SpamAssassin 3.3.1 (2010-03-16) on pdx-wl-mail.web.codeaurora.org X-Spam-Level: X-Spam-Status: No, score=-6.9 required=2.0 tests=BAYES_00,RCVD_IN_DNSWL_HI autolearn=ham version=3.3.1 Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by mail.wl.linuxfoundation.org (Postfix) with ESMTP id 2390B28338 for ; Thu, 9 Jun 2016 19:32:40 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1750949AbcFITcj (ORCPT ); Thu, 9 Jun 2016 15:32:39 -0400 Received: from [12.32.117.8] ([12.32.117.8]:16307 "EHLO stargate3.asicdesigners.com" rhost-flags-FAIL-FAIL-OK-FAIL) by vger.kernel.org with ESMTP id S1750810AbcFITci (ORCPT ); Thu, 9 Jun 2016 15:32:38 -0400 Received: from gill.asicdesigners.com.com ([10.193.186.211]) by stargate3.asicdesigners.com (8.13.8/8.13.8) with ESMTP id u59JWKJY019507; Thu, 9 Jun 2016 12:32:33 -0700 From: Hariprasad Shenai To: dledford@redhat.com Cc: linux-rdma@vger.kernel.org, nirranjan@chelsio.com, Hariprasad Shenai , Steve Wise Subject: [PATCH net-next 6/7] RDMA/iw_cxgb4: Low resource fixes for Memory registration Date: Fri, 10 Jun 2016 01:05:16 +0530 Message-Id: <1465500917-18491-7-git-send-email-hariprasad@chelsio.com> X-Mailer: git-send-email 2.3.4 In-Reply-To: <1465500917-18491-1-git-send-email-hariprasad@chelsio.com> References: <1465500917-18491-1-git-send-email-hariprasad@chelsio.com> Sender: linux-rdma-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: linux-rdma@vger.kernel.org X-Virus-Scanned: ClamAV using ClamSMTP Pre-allocate buffers for deregistering memory region and memory window during RDMA connection close, when system is running out of memory. Signed-off-by: Steve Wise Signed-off-by: Hariprasad Shenai --- drivers/infiniband/hw/cxgb4/iw_cxgb4.h | 2 + drivers/infiniband/hw/cxgb4/mem.c | 111 ++++++++++++++++++++++----------- 2 files changed, 76 insertions(+), 37 deletions(-) diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h index 5b6972757a4c..8a01b1cb7e42 100644 --- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h +++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h @@ -384,6 +384,7 @@ struct c4iw_mr { struct ib_mr ibmr; struct ib_umem *umem; struct c4iw_dev *rhp; + struct sk_buff *dereg_skb; u64 kva; struct tpt_attributes attr; u64 *mpl; @@ -400,6 +401,7 @@ static inline struct c4iw_mr *to_c4iw_mr(struct ib_mr *ibmr) struct c4iw_mw { struct ib_mw ibmw; struct c4iw_dev *rhp; + struct sk_buff *dereg_skb; u64 kva; struct tpt_attributes attr; }; diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c index d495675ea68d..8a8a0ed65fe9 100644 --- a/drivers/infiniband/hw/cxgb4/mem.c +++ b/drivers/infiniband/hw/cxgb4/mem.c @@ -59,9 +59,9 @@ static int mr_exceeds_hw_limits(struct c4iw_dev *dev, u64 length) } static int _c4iw_write_mem_dma_aligned(struct c4iw_rdev *rdev, u32 addr, - u32 len, dma_addr_t data, int wait) + u32 len, dma_addr_t data, + int wait, struct sk_buff *skb) { - struct sk_buff *skb; struct ulp_mem_io *req; struct ulptx_sgl *sgl; u8 wr_len; @@ -74,9 +74,11 @@ static int _c4iw_write_mem_dma_aligned(struct c4iw_rdev *rdev, u32 addr, c4iw_init_wr_wait(&wr_wait); wr_len = roundup(sizeof(*req) + sizeof(*sgl), 16); - skb = alloc_skb(wr_len, GFP_KERNEL); - if (!skb) - return -ENOMEM; + if (!skb) { + skb = alloc_skb(wr_len, GFP_KERNEL | __GFP_NOFAIL); + if (!skb) + return -ENOMEM; + } set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0); req = (struct ulp_mem_io *)__skb_put(skb, wr_len); @@ -108,9 +110,8 @@ static int _c4iw_write_mem_dma_aligned(struct c4iw_rdev *rdev, u32 addr, } static int _c4iw_write_mem_inline(struct c4iw_rdev *rdev, u32 addr, u32 len, - void *data) + void *data, struct sk_buff *skb) { - struct sk_buff *skb; struct ulp_mem_io *req; struct ulptx_idata *sc; u8 wr_len, *to_dp, *from_dp; @@ -134,9 +135,11 @@ static int _c4iw_write_mem_inline(struct c4iw_rdev *rdev, u32 addr, u32 len, wr_len = roundup(sizeof *req + sizeof *sc + roundup(copy_len, T4_ULPTX_MIN_IO), 16); - skb = alloc_skb(wr_len, GFP_KERNEL); - if (!skb) - return -ENOMEM; + if (!skb) { + skb = alloc_skb(wr_len, GFP_KERNEL | __GFP_NOFAIL); + if (!skb) + return -ENOMEM; + } set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0); req = (struct ulp_mem_io *)__skb_put(skb, wr_len); @@ -173,6 +176,7 @@ static int _c4iw_write_mem_inline(struct c4iw_rdev *rdev, u32 addr, u32 len, memset(to_dp + copy_len, 0, T4_ULPTX_MIN_IO - (copy_len % T4_ULPTX_MIN_IO)); ret = c4iw_ofld_send(rdev, skb); + skb = NULL; if (ret) return ret; len -= C4IW_MAX_INLINE_SIZE; @@ -182,7 +186,8 @@ static int _c4iw_write_mem_inline(struct c4iw_rdev *rdev, u32 addr, u32 len, return ret; } -static int _c4iw_write_mem_dma(struct c4iw_rdev *rdev, u32 addr, u32 len, void *data) +static int _c4iw_write_mem_dma(struct c4iw_rdev *rdev, u32 addr, u32 len, + void *data, struct sk_buff *skb) { u32 remain = len; u32 dmalen; @@ -205,7 +210,7 @@ static int _c4iw_write_mem_dma(struct c4iw_rdev *rdev, u32 addr, u32 len, void * dmalen = T4_ULPTX_MAX_DMA; remain -= dmalen; ret = _c4iw_write_mem_dma_aligned(rdev, addr, dmalen, daddr, - !remain); + !remain, skb); if (ret) goto out; addr += dmalen >> 5; @@ -213,7 +218,7 @@ static int _c4iw_write_mem_dma(struct c4iw_rdev *rdev, u32 addr, u32 len, void * daddr += dmalen; } if (remain) - ret = _c4iw_write_mem_inline(rdev, addr, remain, data); + ret = _c4iw_write_mem_inline(rdev, addr, remain, data, skb); out: dma_unmap_single(&rdev->lldi.pdev->dev, save, len, DMA_TO_DEVICE); return ret; @@ -224,23 +229,25 @@ out: * If data is NULL, clear len byte of memory to zero. */ static int write_adapter_mem(struct c4iw_rdev *rdev, u32 addr, u32 len, - void *data) + void *data, struct sk_buff *skb) { if (is_t5(rdev->lldi.adapter_type) && use_dsgl) { if (len > inline_threshold) { - if (_c4iw_write_mem_dma(rdev, addr, len, data)) { + if (_c4iw_write_mem_dma(rdev, addr, len, data, skb)) { printk_ratelimited(KERN_WARNING "%s: dma map" " failure (non fatal)\n", pci_name(rdev->lldi.pdev)); return _c4iw_write_mem_inline(rdev, addr, len, - data); - } else + data, skb); + } else { return 0; + } } else - return _c4iw_write_mem_inline(rdev, addr, len, data); + return _c4iw_write_mem_inline(rdev, addr, + len, data, skb); } else - return _c4iw_write_mem_inline(rdev, addr, len, data); + return _c4iw_write_mem_inline(rdev, addr, len, data, skb); } /* @@ -253,7 +260,8 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry, u32 *stag, u8 stag_state, u32 pdid, enum fw_ri_stag_type type, enum fw_ri_mem_perms perm, int bind_enabled, u32 zbva, u64 to, - u64 len, u8 page_size, u32 pbl_size, u32 pbl_addr) + u64 len, u8 page_size, u32 pbl_size, u32 pbl_addr, + struct sk_buff *skb) { int err; struct fw_ri_tpte tpt; @@ -307,7 +315,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry, } err = write_adapter_mem(rdev, stag_idx + (rdev->lldi.vr->stag.start >> 5), - sizeof(tpt), &tpt); + sizeof(tpt), &tpt, skb); if (reset_tpt_entry) { c4iw_put_resource(&rdev->resource.tpt_table, stag_idx); @@ -327,28 +335,29 @@ static int write_pbl(struct c4iw_rdev *rdev, __be64 *pbl, __func__, pbl_addr, rdev->lldi.vr->pbl.start, pbl_size); - err = write_adapter_mem(rdev, pbl_addr >> 5, pbl_size << 3, pbl); + err = write_adapter_mem(rdev, pbl_addr >> 5, pbl_size << 3, pbl, NULL); return err; } static int dereg_mem(struct c4iw_rdev *rdev, u32 stag, u32 pbl_size, - u32 pbl_addr) + u32 pbl_addr, struct sk_buff *skb) { return write_tpt_entry(rdev, 1, &stag, 0, 0, 0, 0, 0, 0, 0UL, 0, 0, - pbl_size, pbl_addr); + pbl_size, pbl_addr, skb); } static int allocate_window(struct c4iw_rdev *rdev, u32 * stag, u32 pdid) { *stag = T4_STAG_UNSET; return write_tpt_entry(rdev, 0, stag, 0, pdid, FW_RI_STAG_MW, 0, 0, 0, - 0UL, 0, 0, 0, 0); + 0UL, 0, 0, 0, 0, NULL); } -static int deallocate_window(struct c4iw_rdev *rdev, u32 stag) +static int deallocate_window(struct c4iw_rdev *rdev, u32 stag, + struct sk_buff *skb) { return write_tpt_entry(rdev, 1, &stag, 0, 0, 0, 0, 0, 0, 0UL, 0, 0, 0, - 0); + 0, skb); } static int allocate_stag(struct c4iw_rdev *rdev, u32 *stag, u32 pdid, @@ -356,7 +365,7 @@ static int allocate_stag(struct c4iw_rdev *rdev, u32 *stag, u32 pdid, { *stag = T4_STAG_UNSET; return write_tpt_entry(rdev, 0, stag, 0, pdid, FW_RI_STAG_NSMR, 0, 0, 0, - 0UL, 0, 0, pbl_size, pbl_addr); + 0UL, 0, 0, pbl_size, pbl_addr, NULL); } static int finish_mem_reg(struct c4iw_mr *mhp, u32 stag) @@ -383,14 +392,16 @@ static int register_mem(struct c4iw_dev *rhp, struct c4iw_pd *php, mhp->attr.mw_bind_enable, mhp->attr.zbva, mhp->attr.va_fbo, mhp->attr.len ? mhp->attr.len : -1, shift - 12, - mhp->attr.pbl_size, mhp->attr.pbl_addr); + mhp->attr.pbl_size, mhp->attr.pbl_addr, NULL); if (ret) return ret; ret = finish_mem_reg(mhp, stag); - if (ret) + if (ret) { dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size, - mhp->attr.pbl_addr); + mhp->attr.pbl_addr, mhp->dereg_skb); + mhp->dereg_skb = NULL; + } return ret; } @@ -423,6 +434,12 @@ struct ib_mr *c4iw_get_dma_mr(struct ib_pd *pd, int acc) if (!mhp) return ERR_PTR(-ENOMEM); + mhp->dereg_skb = alloc_skb(SGE_MAX_WR_LEN, GFP_KERNEL); + if (!mhp->dereg_skb) { + ret = -ENOMEM; + goto err0; + } + mhp->rhp = rhp; mhp->attr.pdid = php->pdid; mhp->attr.perms = c4iw_ib_to_tpt_access(acc); @@ -435,7 +452,8 @@ struct ib_mr *c4iw_get_dma_mr(struct ib_pd *pd, int acc) ret = write_tpt_entry(&rhp->rdev, 0, &stag, 1, php->pdid, FW_RI_STAG_NSMR, mhp->attr.perms, - mhp->attr.mw_bind_enable, 0, 0, ~0ULL, 0, 0, 0); + mhp->attr.mw_bind_enable, 0, 0, ~0ULL, 0, 0, 0, + NULL); if (ret) goto err1; @@ -445,8 +463,10 @@ struct ib_mr *c4iw_get_dma_mr(struct ib_pd *pd, int acc) return &mhp->ibmr; err2: dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size, - mhp->attr.pbl_addr); + mhp->attr.pbl_addr, mhp->dereg_skb); err1: + kfree_skb(mhp->dereg_skb); +err0: kfree(mhp); return ERR_PTR(ret); } @@ -481,11 +501,18 @@ struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, if (!mhp) return ERR_PTR(-ENOMEM); + mhp->dereg_skb = alloc_skb(SGE_MAX_WR_LEN, GFP_KERNEL); + if (!mhp->dereg_skb) { + kfree(mhp); + return ERR_PTR(-ENOMEM); + } + mhp->rhp = rhp; mhp->umem = ib_umem_get(pd->uobject->context, start, length, acc, 0); if (IS_ERR(mhp->umem)) { err = PTR_ERR(mhp->umem); + kfree_skb(mhp->dereg_skb); kfree(mhp); return ERR_PTR(err); } @@ -550,6 +577,7 @@ err_pbl: err: ib_umem_release(mhp->umem); + kfree_skb(mhp->dereg_skb); kfree(mhp); return ERR_PTR(err); } @@ -572,8 +600,16 @@ struct ib_mw *c4iw_alloc_mw(struct ib_pd *pd, enum ib_mw_type type, mhp = kzalloc(sizeof(*mhp), GFP_KERNEL); if (!mhp) return ERR_PTR(-ENOMEM); + + mhp->dereg_skb = alloc_skb(SGE_MAX_WR_LEN, GFP_KERNEL); + if (!mhp->dereg_skb) { + kfree(mhp); + return ERR_PTR(-ENOMEM); + } + ret = allocate_window(&rhp->rdev, &stag, php->pdid); if (ret) { + kfree(mhp->dereg_skb); kfree(mhp); return ERR_PTR(ret); } @@ -584,7 +620,8 @@ struct ib_mw *c4iw_alloc_mw(struct ib_pd *pd, enum ib_mw_type type, mmid = (stag) >> 8; mhp->ibmw.rkey = stag; if (insert_handle(rhp, &rhp->mmidr, mhp, mmid)) { - deallocate_window(&rhp->rdev, mhp->attr.stag); + deallocate_window(&rhp->rdev, mhp->attr.stag, mhp->dereg_skb); + kfree(mhp->dereg_skb); kfree(mhp); return ERR_PTR(-ENOMEM); } @@ -602,7 +639,7 @@ int c4iw_dealloc_mw(struct ib_mw *mw) rhp = mhp->rhp; mmid = (mw->rkey) >> 8; remove_handle(rhp, &rhp->mmidr, mmid); - deallocate_window(&rhp->rdev, mhp->attr.stag); + deallocate_window(&rhp->rdev, mhp->attr.stag, mhp->dereg_skb); kfree(mhp); PDBG("%s ib_mw %p mmid 0x%x ptr %p\n", __func__, mw, mmid, mhp); return 0; @@ -666,7 +703,7 @@ struct ib_mr *c4iw_alloc_mr(struct ib_pd *pd, return &(mhp->ibmr); err3: dereg_mem(&rhp->rdev, stag, mhp->attr.pbl_size, - mhp->attr.pbl_addr); + mhp->attr.pbl_addr, mhp->dereg_skb); err2: c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr, mhp->attr.pbl_size << 3); @@ -718,7 +755,7 @@ int c4iw_dereg_mr(struct ib_mr *ib_mr) dma_free_coherent(&mhp->rhp->rdev.lldi.pdev->dev, mhp->max_mpl_len, mhp->mpl, mhp->mpl_addr); dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size, - mhp->attr.pbl_addr); + mhp->attr.pbl_addr, mhp->dereg_skb); if (mhp->attr.pbl_size) c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr, mhp->attr.pbl_size << 3);