From patchwork Mon Oct 24 08:37:22 2016 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Adrian Hunter X-Patchwork-Id: 9391687 Return-Path: Received: from mail.wl.linuxfoundation.org (pdx-wl-mail.web.codeaurora.org [172.30.200.125]) by pdx-korg-patchwork.web.codeaurora.org (Postfix) with ESMTP id AFEA760231 for ; Mon, 24 Oct 2016 08:43:04 +0000 (UTC) Received: from mail.wl.linuxfoundation.org (localhost [127.0.0.1]) by mail.wl.linuxfoundation.org (Postfix) with ESMTP id A097128D80 for ; Mon, 24 Oct 2016 08:43:04 +0000 (UTC) Received: by mail.wl.linuxfoundation.org (Postfix, from userid 486) id 9522728D8A; Mon, 24 Oct 2016 08:43:04 +0000 (UTC) X-Spam-Checker-Version: SpamAssassin 3.3.1 (2010-03-16) on pdx-wl-mail.web.codeaurora.org X-Spam-Level: X-Spam-Status: No, score=-6.9 required=2.0 tests=BAYES_00,RCVD_IN_DNSWL_HI autolearn=ham version=3.3.1 Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by mail.wl.linuxfoundation.org (Postfix) with ESMTP id 2824A28D80 for ; Mon, 24 Oct 2016 08:43:04 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S936080AbcJXInD (ORCPT ); Mon, 24 Oct 2016 04:43:03 -0400 Received: from mga09.intel.com ([134.134.136.24]:33721 "EHLO mga09.intel.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S935974AbcJXInC (ORCPT ); Mon, 24 Oct 2016 04:43:02 -0400 Received: from fmsmga004.fm.intel.com ([10.253.24.48]) by orsmga102.jf.intel.com with ESMTP; 24 Oct 2016 01:43:02 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.31,541,1473145200"; d="scan'208";a="183077113" Received: from ahunter-desktop.fi.intel.com ([10.237.72.168]) by fmsmga004.fm.intel.com with ESMTP; 24 Oct 2016 01:42:58 -0700 From: Adrian Hunter To: Ulf Hansson Cc: linux-mmc , Alex Lemberg , Mateusz Nowak , Yuliy Izrailov , Jaehoon Chung , Dong Aisheng , Das Asutosh , Zhangfei Gao , Dorfman Konstantin , David Griego , Sahitya Tummala , Harjani Ritesh , Venu Byravarasu Subject: [PATCH V5 07/25] mmc: queue: Use queue depth to allocate and free Date: Mon, 24 Oct 2016 11:37:22 +0300 Message-Id: <1477298260-5064-8-git-send-email-adrian.hunter@intel.com> X-Mailer: git-send-email 1.9.1 In-Reply-To: <1477298260-5064-1-git-send-email-adrian.hunter@intel.com> References: <1477298260-5064-1-git-send-email-adrian.hunter@intel.com> Organization: Intel Finland Oy, Registered Address: PL 281, 00181 Helsinki, Business Identity Code: 0357606 - 4, Domiciled in Helsinki Sender: linux-mmc-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: linux-mmc@vger.kernel.org X-Virus-Scanned: ClamAV using ClamSMTP Instead of allocating resources for 2 slots in the queue, allow for an arbitrary number. Signed-off-by: Adrian Hunter --- drivers/mmc/card/queue.c | 103 +++++++++++++++++++++-------------------------- 1 file changed, 46 insertions(+), 57 deletions(-) diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c index 544b38f3fd9f..000274523446 100644 --- a/drivers/mmc/card/queue.c +++ b/drivers/mmc/card/queue.c @@ -189,86 +189,75 @@ static void mmc_queue_setup_discard(struct request_queue *q, static bool mmc_queue_alloc_bounce_bufs(struct mmc_queue *mq, unsigned int bouncesz) { - struct mmc_queue_req *mqrq_cur = mq->mqrq_cur; - struct mmc_queue_req *mqrq_prev = mq->mqrq_prev; - - mqrq_cur->bounce_buf = kmalloc(bouncesz, GFP_KERNEL); - if (!mqrq_cur->bounce_buf) { - pr_warn("%s: unable to allocate bounce cur buffer\n", - mmc_card_name(mq->card)); - return false; - } + int i; - mqrq_prev->bounce_buf = kmalloc(bouncesz, GFP_KERNEL); - if (!mqrq_prev->bounce_buf) { - pr_warn("%s: unable to allocate bounce prev buffer\n", - mmc_card_name(mq->card)); - kfree(mqrq_cur->bounce_buf); - mqrq_cur->bounce_buf = NULL; - return false; + for (i = 0; i < mq->qdepth; i++) { + mq->mqrq[i].bounce_buf = kmalloc(bouncesz, GFP_KERNEL); + if (!mq->mqrq[i].bounce_buf) + goto out_err; } return true; + +out_err: + while (--i >= 0) { + kfree(mq->mqrq[i].bounce_buf); + mq->mqrq[i].bounce_buf = NULL; + } + pr_warn("%s: unable to allocate bounce buffers\n", + mmc_card_name(mq->card)); + return false; } static int mmc_queue_alloc_bounce_sgs(struct mmc_queue *mq, unsigned int bouncesz) { - struct mmc_queue_req *mqrq_cur = mq->mqrq_cur; - struct mmc_queue_req *mqrq_prev = mq->mqrq_prev; - int ret; - - mqrq_cur->sg = mmc_alloc_sg(1, &ret); - if (ret) - return ret; + int i, ret; - mqrq_cur->bounce_sg = mmc_alloc_sg(bouncesz / 512, &ret); - if (ret) - return ret; - - mqrq_prev->sg = mmc_alloc_sg(1, &ret); - if (ret) - return ret; + for (i = 0; i < mq->qdepth; i++) { + mq->mqrq[i].sg = mmc_alloc_sg(1, &ret); + if (ret) + return ret; - mqrq_prev->bounce_sg = mmc_alloc_sg(bouncesz / 512, &ret); + mq->mqrq[i].bounce_sg = mmc_alloc_sg(bouncesz / 512, &ret); + if (ret) + return ret; + } - return ret; + return 0; } static int mmc_queue_alloc_sgs(struct mmc_queue *mq, int max_segs) { - struct mmc_queue_req *mqrq_cur = mq->mqrq_cur; - struct mmc_queue_req *mqrq_prev = mq->mqrq_prev; - int ret; + int i, ret; - mqrq_cur->sg = mmc_alloc_sg(max_segs, &ret); - if (ret) - return ret; + for (i = 0; i < mq->qdepth; i++) { + mq->mqrq[i].sg = mmc_alloc_sg(max_segs, &ret); + if (ret) + return ret; + } - mqrq_prev->sg = mmc_alloc_sg(max_segs, &ret); + return 0; +} - return ret; +static void mmc_queue_req_free_bufs(struct mmc_queue_req *mqrq) +{ + kfree(mqrq->bounce_sg); + mqrq->bounce_sg = NULL; + + kfree(mqrq->sg); + mqrq->sg = NULL; + + kfree(mqrq->bounce_buf); + mqrq->bounce_buf = NULL; } static void mmc_queue_reqs_free_bufs(struct mmc_queue *mq) { - struct mmc_queue_req *mqrq_cur = mq->mqrq_cur; - struct mmc_queue_req *mqrq_prev = mq->mqrq_prev; - - kfree(mqrq_cur->bounce_sg); - mqrq_cur->bounce_sg = NULL; - kfree(mqrq_prev->bounce_sg); - mqrq_prev->bounce_sg = NULL; - - kfree(mqrq_cur->sg); - mqrq_cur->sg = NULL; - kfree(mqrq_cur->bounce_buf); - mqrq_cur->bounce_buf = NULL; - - kfree(mqrq_prev->sg); - mqrq_prev->sg = NULL; - kfree(mqrq_prev->bounce_buf); - mqrq_prev->bounce_buf = NULL; + int i; + + for (i = 0; i < mq->qdepth; i++) + mmc_queue_req_free_bufs(&mq->mqrq[i]); } /**