From patchwork Mon Mar 13 15:48:31 2017 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Christoph Hellwig X-Patchwork-Id: 9621361 Return-Path: Received: from mail.wl.linuxfoundation.org (pdx-wl-mail.web.codeaurora.org [172.30.200.125]) by pdx-korg-patchwork.web.codeaurora.org (Postfix) with ESMTP id 73D5A60522 for ; Mon, 13 Mar 2017 15:49:34 +0000 (UTC) Received: from mail.wl.linuxfoundation.org (localhost [127.0.0.1]) by mail.wl.linuxfoundation.org (Postfix) with ESMTP id 6736C26E76 for ; Mon, 13 Mar 2017 15:49:34 +0000 (UTC) Received: by mail.wl.linuxfoundation.org (Postfix, from userid 486) id 5BF9428178; Mon, 13 Mar 2017 15:49:34 +0000 (UTC) X-Spam-Checker-Version: SpamAssassin 3.3.1 (2010-03-16) on pdx-wl-mail.web.codeaurora.org X-Spam-Level: X-Spam-Status: No, score=-6.8 required=2.0 tests=BAYES_00,DKIM_SIGNED, RCVD_IN_DNSWL_HI,T_DKIM_INVALID autolearn=ham version=3.3.1 Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by mail.wl.linuxfoundation.org (Postfix) with ESMTP id 27ACA2807B for ; Mon, 13 Mar 2017 15:49:32 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1753403AbdCMPt1 (ORCPT ); Mon, 13 Mar 2017 11:49:27 -0400 Received: from bombadil.infradead.org ([65.50.211.133]:34794 "EHLO bombadil.infradead.org" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1753874AbdCMPsy (ORCPT ); Mon, 13 Mar 2017 11:48:54 -0400 DKIM-Signature: v=1; a=rsa-sha256; q=dns/txt; c=relaxed/relaxed; d=infradead.org; s=bombadil.20170209; h=References:In-Reply-To:Message-Id: Date:Subject:Cc:To:From:Sender:Reply-To:MIME-Version:Content-Type: Content-Transfer-Encoding:Content-ID:Content-Description:Resent-Date: Resent-From:Resent-Sender:Resent-To:Resent-Cc:Resent-Message-ID:List-Id: List-Help:List-Unsubscribe:List-Subscribe:List-Post:List-Owner:List-Archive; bh=kxWH33uQu4+zfzPSUuHHMqynII5fTp2IwkZSoc7x+fA=; b=KACwOl/DsgwLYJ8vVebLcnJLX sHKM9v2h3RtEgIo0ZYjtsoe3KPh7yNM2cU4qBDFHvOfDIuVAVlbPMQ5puU5SLyGED0p6mGT/RGTL2 /X/rcQXiPlMc9qEb8gv8F0Kk9E3iwxE+7R89PEBNKbRSU9vXeUy3jXkbPDTHOHaOW/TEMjSizEhcX tf6wns2dUC/NvfY5FOzqhgwGYh7JmOZ4ZaAa/s4naErUn3w1scnZGZu+XT1XWKf/D2KpeOHuE2ouw ek7d/qOh78K26RyPRDpTeFEPEy1i/zNOAf7K67tJGB++NTP9VnTY3DN48xgH5R93d2c3fFV/CXX/n fo46/Dzqg==; Received: from 96-88-73-249-static.hfc.comcastbusiness.net ([96.88.73.249] helo=localhost) by bombadil.infradead.org with esmtpsa (Exim 4.87 #1 (Red Hat Linux)) id 1cnSD7-0005vh-Tj; Mon, 13 Mar 2017 15:48:38 +0000 From: Christoph Hellwig To: axboe@kernel.dk Cc: linux-block@vger.kernel.org Subject: [PATCH 2/4] blk-mq: merge mq and sq make_request instances Date: Mon, 13 Mar 2017 09:48:31 -0600 Message-Id: <20170313154833.14165-3-hch@lst.de> X-Mailer: git-send-email 2.11.0 In-Reply-To: <20170313154833.14165-1-hch@lst.de> References: <20170313154833.14165-1-hch@lst.de> X-SRS-Rewrite: SMTP reverse-path rewritten from by bombadil.infradead.org. See http://www.infradead.org/rpr.html Sender: linux-block-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: linux-block@vger.kernel.org X-Virus-Scanned: ClamAV using ClamSMTP They are mostly the same code anyway - this just one small conditional for the plug case that is different for both variants. Signed-off-by: Christoph Hellwig --- block/blk-mq.c | 164 +++++++++++---------------------------------------------- 1 file changed, 31 insertions(+), 133 deletions(-) diff --git a/block/blk-mq.c b/block/blk-mq.c index acf0ddf4af52..53e49a3f6f0a 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -1478,11 +1478,6 @@ static void blk_mq_try_issue_directly(struct request *rq, blk_qc_t *cookie) blk_mq_sched_insert_request(rq, false, true, true, false); } -/* - * Multiple hardware queue variant. This will not use per-process plugs, - * but will attempt to bypass the hctx queueing if we can go straight to - * hardware for SYNC IO. - */ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio) { const int is_sync = op_is_sync(bio->bi_opf); @@ -1534,7 +1529,36 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio) } plug = current->plug; - if (((plug && !blk_queue_nomerges(q)) || is_sync)) { + if (plug && q->nr_hw_queues == 1) { + struct request *last = NULL; + + blk_mq_bio_to_request(rq, bio); + + /* + * @request_count may become stale because of schedule + * out, so check the list again. + */ + if (list_empty(&plug->mq_list)) + request_count = 0; + else if (blk_queue_nomerges(q)) + request_count = blk_plug_queued_count(q); + + if (!request_count) + trace_block_plug(q); + else + last = list_entry_rq(plug->mq_list.prev); + + blk_mq_put_ctx(data.ctx); + + if (request_count >= BLK_MAX_REQUEST_COUNT || (last && + blk_rq_bytes(last) >= BLK_PLUG_FLUSH_SIZE)) { + blk_flush_plug_list(plug, false); + trace_block_plug(q); + } + + list_add_tail(&rq->queuelist, &plug->mq_list); + goto done; + } else if (((plug && !blk_queue_nomerges(q)) || is_sync)) { struct request *old_rq = NULL; blk_mq_bio_to_request(rq, bio); @@ -1596,119 +1620,6 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio) return cookie; } -/* - * Single hardware queue variant. This will attempt to use any per-process - * plug for merging and IO deferral. - */ -static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio) -{ - const int is_sync = op_is_sync(bio->bi_opf); - const int is_flush_fua = op_is_flush(bio->bi_opf); - struct blk_plug *plug; - unsigned int request_count = 0; - struct blk_mq_alloc_data data = { .flags = 0 }; - struct request *rq; - blk_qc_t cookie; - unsigned int wb_acct; - - blk_queue_bounce(q, &bio); - - if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) { - bio_io_error(bio); - return BLK_QC_T_NONE; - } - - blk_queue_split(q, &bio, q->bio_split); - - if (!is_flush_fua && !blk_queue_nomerges(q)) { - if (blk_attempt_plug_merge(q, bio, &request_count, NULL)) - return BLK_QC_T_NONE; - } else - request_count = blk_plug_queued_count(q); - - if (blk_mq_sched_bio_merge(q, bio)) - return BLK_QC_T_NONE; - - wb_acct = wbt_wait(q->rq_wb, bio, NULL); - - trace_block_getrq(q, bio, bio->bi_opf); - - rq = blk_mq_sched_get_request(q, bio, bio->bi_opf, &data); - if (unlikely(!rq)) { - __wbt_done(q->rq_wb, wb_acct); - return BLK_QC_T_NONE; - } - - wbt_track(&rq->issue_stat, wb_acct); - - cookie = request_to_qc_t(data.hctx, rq); - - if (unlikely(is_flush_fua)) { - if (q->elevator) - goto elv_insert; - blk_mq_bio_to_request(rq, bio); - blk_insert_flush(rq); - goto run_queue; - } - - /* - * A task plug currently exists. Since this is completely lockless, - * utilize that to temporarily store requests until the task is - * either done or scheduled away. - */ - plug = current->plug; - if (plug) { - struct request *last = NULL; - - blk_mq_bio_to_request(rq, bio); - - /* - * @request_count may become stale because of schedule - * out, so check the list again. - */ - if (list_empty(&plug->mq_list)) - request_count = 0; - if (!request_count) - trace_block_plug(q); - else - last = list_entry_rq(plug->mq_list.prev); - - blk_mq_put_ctx(data.ctx); - - if (request_count >= BLK_MAX_REQUEST_COUNT || (last && - blk_rq_bytes(last) >= BLK_PLUG_FLUSH_SIZE)) { - blk_flush_plug_list(plug, false); - trace_block_plug(q); - } - - list_add_tail(&rq->queuelist, &plug->mq_list); - return cookie; - } - - if (q->elevator) { -elv_insert: - blk_mq_put_ctx(data.ctx); - blk_mq_bio_to_request(rq, bio); - blk_mq_sched_insert_request(rq, false, true, - !is_sync || is_flush_fua, true); - goto done; - } - if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) { - /* - * For a SYNC request, send it to the hardware immediately. For - * an ASYNC request, just ensure that we run it later on. The - * latter allows for merging opportunities and more efficient - * dispatching. - */ -run_queue: - blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua); - } - - blk_mq_put_ctx(data.ctx); -done: - return cookie; -} - void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags, unsigned int hctx_idx) { @@ -2366,10 +2277,7 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set, INIT_LIST_HEAD(&q->requeue_list); spin_lock_init(&q->requeue_lock); - if (q->nr_hw_queues > 1) - blk_queue_make_request(q, blk_mq_make_request); - else - blk_queue_make_request(q, blk_sq_make_request); + blk_queue_make_request(q, blk_mq_make_request); /* * Do this after blk_queue_make_request() overrides it... @@ -2717,16 +2625,6 @@ void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues) set->nr_hw_queues = nr_hw_queues; list_for_each_entry(q, &set->tag_list, tag_set_list) { blk_mq_realloc_hw_ctxs(set, q); - - /* - * Manually set the make_request_fn as blk_queue_make_request - * resets a lot of the queue settings. - */ - if (q->nr_hw_queues > 1) - q->make_request_fn = blk_mq_make_request; - else - q->make_request_fn = blk_sq_make_request; - blk_mq_queue_reinit(q, cpu_online_mask); }