From patchwork Thu Sep 24 17:34:43 2015 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Sagi Grimberg X-Patchwork-Id: 7258781 Return-Path: X-Original-To: patchwork-linux-rdma@patchwork.kernel.org Delivered-To: patchwork-parsemail@patchwork2.web.kernel.org Received: from mail.kernel.org (mail.kernel.org [198.145.29.136]) by patchwork2.web.kernel.org (Postfix) with ESMTP id 9066DBF036 for ; Thu, 24 Sep 2015 17:36:00 +0000 (UTC) Received: from mail.kernel.org (localhost [127.0.0.1]) by mail.kernel.org (Postfix) with ESMTP id 6D979206CC for ; Thu, 24 Sep 2015 17:35:59 +0000 (UTC) Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by mail.kernel.org (Postfix) with ESMTP id 4B027206EA for ; Thu, 24 Sep 2015 17:35:58 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1756798AbbIXRfs (ORCPT ); Thu, 24 Sep 2015 13:35:48 -0400 Received: from [193.47.165.129] ([193.47.165.129]:41797 "EHLO mellanox.co.il" rhost-flags-FAIL-FAIL-OK-FAIL) by vger.kernel.org with ESMTP id S1756829AbbIXRfp (ORCPT ); Thu, 24 Sep 2015 13:35:45 -0400 Received: from Internal Mail-Server by MTLPINE1 (envelope-from sagig@mellanox.com) with ESMTPS (AES256-SHA encrypted); 24 Sep 2015 19:35:03 +0200 Received: from r-vnc05.mtr.labs.mlnx (r-vnc05.mtr.labs.mlnx [10.208.0.115]) by labmailer.mlnx (8.13.8/8.13.8) with ESMTP id t8OHZ2DP020434; Thu, 24 Sep 2015 20:35:02 +0300 Received: from r-vnc05.mtr.labs.mlnx (localhost [127.0.0.1]) by r-vnc05.mtr.labs.mlnx (8.14.4/8.14.4) with ESMTP id t8OHZ22N008687; Thu, 24 Sep 2015 20:35:02 +0300 Received: (from sagig@localhost) by r-vnc05.mtr.labs.mlnx (8.14.4/8.14.4/Submit) id t8OHZ2T7008686; Thu, 24 Sep 2015 20:35:02 +0300 From: Sagi Grimberg To: linux-rdma@vger.kernel.org Cc: linux-nfs@vger.kernel.org, "Nicholas A. Bellinger" Subject: [PATCH v2 15/26] IB/srp: Split srp_map_sg Date: Thu, 24 Sep 2015 20:34:43 +0300 Message-Id: <1443116094-7969-16-git-send-email-sagig@mellanox.com> X-Mailer: git-send-email 1.8.4.3 In-Reply-To: <1443116094-7969-1-git-send-email-sagig@mellanox.com> References: <1443116094-7969-1-git-send-email-sagig@mellanox.com> Sender: linux-rdma-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: linux-rdma@vger.kernel.org X-Spam-Status: No, score=-6.9 required=5.0 tests=BAYES_00, RCVD_IN_DNSWL_HI, RP_MATCHES_RCVD, UNPARSEABLE_RELAY autolearn=ham version=3.3.1 X-Spam-Checker-Version: SpamAssassin 3.3.1 (2010-03-16) on mail.kernel.org X-Virus-Scanned: ClamAV using ClamSMTP This is a perparation patch for the new registration API conversion. It splits srp_map_sg per registration strategy (srp_map_sg[fmr|fr|dma]. On its own it adds some code duplication, but it makes the API switch easier to comprehend. Signed-off-by: Sagi Grimberg --- drivers/infiniband/ulp/srp/ib_srp.c | 156 ++++++++++++++++++++++++------------ 1 file changed, 105 insertions(+), 51 deletions(-) diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c index f8b9c18da03d..74748d1075fc 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.c +++ b/drivers/infiniband/ulp/srp/ib_srp.c @@ -1286,6 +1286,17 @@ static int srp_map_finish_fmr(struct srp_map_state *state, if (WARN_ON_ONCE(state->fmr.next >= state->fmr.end)) return -ENOMEM; + WARN_ON_ONCE(!dev->use_fmr); + + if (state->npages == 0) + return 0; + + if (state->npages == 1 && target->global_mr) { + srp_map_desc(state, state->base_dma_addr, state->dma_len, + target->global_mr->rkey); + return 0; + } + fmr = ib_fmr_pool_map_phys(ch->fmr_pool, state->pages, state->npages, io_addr); if (IS_ERR(fmr)) @@ -1297,6 +1308,9 @@ static int srp_map_finish_fmr(struct srp_map_state *state, srp_map_desc(state, state->base_dma_addr & ~dev->mr_page_mask, state->dma_len, fmr->fmr->rkey); + state->npages = 0; + state->dma_len = 0; + return 0; } @@ -1309,10 +1323,23 @@ static int srp_map_finish_fr(struct srp_map_state *state, struct ib_fast_reg_wr wr; struct srp_fr_desc *desc; u32 rkey; + int err; + if (WARN_ON_ONCE(state->fr.next >= state->fr.end)) return -ENOMEM; + WARN_ON_ONCE(!dev->use_fast_reg); + + if (state->npages == 0) + return 0; + + if (state->npages == 1 && target->global_mr) { + srp_map_desc(state, state->base_dma_addr, state->dma_len, + target->global_mr->rkey); + return 0; + } + desc = srp_fr_pool_get(ch->fr_pool); if (!desc) return -ENOMEM; @@ -1342,7 +1369,14 @@ static int srp_map_finish_fr(struct srp_map_state *state, srp_map_desc(state, state->base_dma_addr, state->dma_len, desc->mr->rkey); - return ib_post_send(ch->qp, &wr.wr, &bad_wr); + err = ib_post_send(ch->qp, &wr.wr, &bad_wr); + if (err) + return err; + + state->npages = 0; + state->dma_len = 0; + + return 0; } static int srp_finish_mapping(struct srp_map_state *state, @@ -1350,26 +1384,9 @@ static int srp_finish_mapping(struct srp_map_state *state, { struct srp_target_port *target = ch->target; struct srp_device *dev = target->srp_host->srp_dev; - int ret = 0; - - WARN_ON_ONCE(!dev->use_fast_reg && !dev->use_fmr); - - if (state->npages == 0) - return 0; - if (state->npages == 1 && target->global_mr) - srp_map_desc(state, state->base_dma_addr, state->dma_len, - target->global_mr->rkey); - else - ret = dev->use_fast_reg ? srp_map_finish_fr(state, ch) : - srp_map_finish_fmr(state, ch); - - if (ret == 0) { - state->npages = 0; - state->dma_len = 0; - } - - return ret; + return dev->use_fast_reg ? srp_map_finish_fr(state, ch) : + srp_map_finish_fmr(state, ch); } static int srp_map_sg_entry(struct srp_map_state *state, @@ -1415,47 +1432,79 @@ static int srp_map_sg_entry(struct srp_map_state *state, return ret; } -static int srp_map_sg(struct srp_map_state *state, struct srp_rdma_ch *ch, - struct srp_request *req, struct scatterlist *scat, - int count) +static int srp_map_sg_fmr(struct srp_map_state *state, struct srp_rdma_ch *ch, + struct srp_request *req, struct scatterlist *scat, + int count) { - struct srp_target_port *target = ch->target; - struct srp_device *dev = target->srp_host->srp_dev; struct scatterlist *sg; int i, ret; - state->desc = req->indirect_desc; - state->pages = req->map_page; - if (dev->use_fast_reg) { - state->fr.next = req->fr_list; - state->fr.end = req->fr_list + target->cmd_sg_cnt; - } else if (dev->use_fmr) { - state->fmr.next = req->fmr_list; - state->fmr.end = req->fmr_list + target->cmd_sg_cnt; + state->desc = req->indirect_desc; + state->pages = req->map_page; + state->fmr.next = req->fmr_list; + state->fmr.end = req->fmr_list + ch->target->cmd_sg_cnt; + + for_each_sg(scat, sg, count, i) { + ret = srp_map_sg_entry(state, ch, sg, i); + if (ret) + return ret; } - if (dev->use_fast_reg || dev->use_fmr) { - for_each_sg(scat, sg, count, i) { - ret = srp_map_sg_entry(state, ch, sg, i); - if (ret) - goto out; - } - ret = srp_finish_mapping(state, ch); + ret = srp_finish_mapping(state, ch); + if (ret) + return ret; + + req->nmdesc = state->nmdesc; + + return 0; +} + +static int srp_map_sg_fr(struct srp_map_state *state, struct srp_rdma_ch *ch, + struct srp_request *req, struct scatterlist *scat, + int count) +{ + struct scatterlist *sg; + int i, ret; + + state->desc = req->indirect_desc; + state->pages = req->map_page; + state->fmr.next = req->fmr_list; + state->fmr.end = req->fmr_list + ch->target->cmd_sg_cnt; + + for_each_sg(scat, sg, count, i) { + ret = srp_map_sg_entry(state, ch, sg, i); if (ret) - goto out; - } else { - for_each_sg(scat, sg, count, i) { - srp_map_desc(state, ib_sg_dma_address(dev->dev, sg), - ib_sg_dma_len(dev->dev, sg), - target->global_mr->rkey); - } + return ret; + } + + ret = srp_finish_mapping(state, ch); + if (ret) + return ret; + + req->nmdesc = state->nmdesc; + + return 0; +} + +static int srp_map_sg_dma(struct srp_map_state *state, struct srp_rdma_ch *ch, + struct srp_request *req, struct scatterlist *scat, + int count) +{ + struct srp_target_port *target = ch->target; + struct srp_device *dev = target->srp_host->srp_dev; + struct scatterlist *sg; + int i; + + state->desc = req->indirect_desc; + for_each_sg(scat, sg, count, i) { + srp_map_desc(state, ib_sg_dma_address(dev->dev, sg), + ib_sg_dma_len(dev->dev, sg), + target->global_mr->rkey); } req->nmdesc = state->nmdesc; - ret = 0; -out: - return ret; + return 0; } /* @@ -1563,7 +1612,12 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch, target->indirect_size, DMA_TO_DEVICE); memset(&state, 0, sizeof(state)); - srp_map_sg(&state, ch, req, scat, count); + if (dev->use_fast_reg) + srp_map_sg_fr(&state, ch, req, scat, count); + else if (dev->use_fmr) + srp_map_sg_fmr(&state, ch, req, scat, count); + else + srp_map_sg_dma(&state, ch, req, scat, count); /* We've mapped the request, now pull as much of the indirect * descriptor table as we can into the command buffer. If this