From patchwork Sat Apr 16 18:55:27 2016 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Christoph Hellwig X-Patchwork-Id: 8861791 Return-Path: X-Original-To: patchwork-linux-block@patchwork.kernel.org Delivered-To: patchwork-parsemail@patchwork2.web.kernel.org Received: from mail.kernel.org (mail.kernel.org [198.145.29.136]) by patchwork2.web.kernel.org (Postfix) with ESMTP id 84092BF29F for ; Sat, 16 Apr 2016 18:55:57 +0000 (UTC) Received: from mail.kernel.org (localhost [127.0.0.1]) by mail.kernel.org (Postfix) with ESMTP id 431E420154 for ; Sat, 16 Apr 2016 18:55:56 +0000 (UTC) Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by mail.kernel.org (Postfix) with ESMTP id 06E9A2014A for ; Sat, 16 Apr 2016 18:55:54 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1751928AbcDPSzw (ORCPT ); Sat, 16 Apr 2016 14:55:52 -0400 Received: from bombadil.infradead.org ([198.137.202.9]:52017 "EHLO bombadil.infradead.org" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1751606AbcDPSzv (ORCPT ); Sat, 16 Apr 2016 14:55:51 -0400 Received: from [12.130.117.134] (helo=localhost) by bombadil.infradead.org with esmtpsa (Exim 4.80.1 #2 (Red Hat Linux)) id 1arVNm-0005MS-4I; Sat, 16 Apr 2016 18:55:50 +0000 From: Christoph Hellwig To: axboe@fb.com Cc: linux-block@vger.kernel.org, Ming Lin , Sagi Grimberg Subject: [PATCH 1/2] block: remove struct bio_batch Date: Sat, 16 Apr 2016 14:55:27 -0400 Message-Id: <1460832928-15514-2-git-send-email-hch@lst.de> X-Mailer: git-send-email 2.1.4 In-Reply-To: <1460832928-15514-1-git-send-email-hch@lst.de> References: <1460832928-15514-1-git-send-email-hch@lst.de> X-SRS-Rewrite: SMTP reverse-path rewritten from by bombadil.infradead.org. See http://www.infradead.org/rpr.html Sender: linux-block-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: linux-block@vger.kernel.org X-Spam-Status: No, score=-7.9 required=5.0 tests=BAYES_00, RCVD_IN_DNSWL_HI, RP_MATCHES_RCVD, UNPARSEABLE_RELAY autolearn=unavailable version=3.3.1 X-Spam-Checker-Version: SpamAssassin 3.3.1 (2010-03-16) on mail.kernel.org X-Virus-Scanned: ClamAV using ClamSMTP It can be replaced with a combination of bio_chain and submit_bio_wait. Signed-off-by: Christoph Hellwig Signed-off-by: Ming Lin Signed-off-by: Sagi Grimberg --- block/blk-lib.c | 118 +++++++++++++------------------------------------------- 1 file changed, 27 insertions(+), 91 deletions(-) diff --git a/block/blk-lib.c b/block/blk-lib.c index 9ebf653..700d248 100644 --- a/block/blk-lib.c +++ b/block/blk-lib.c @@ -9,21 +9,17 @@ #include "blk.h" -struct bio_batch { - atomic_t done; - int error; - struct completion *wait; -}; - -static void bio_batch_end_io(struct bio *bio) +static struct bio *next_bio(struct bio *bio, int rw, unsigned int nr_pages, + gfp_t gfp) { - struct bio_batch *bb = bio->bi_private; + struct bio *new = bio_alloc(gfp, nr_pages); + + if (bio) { + bio_chain(bio, new); + submit_bio(rw, bio); + } - if (bio->bi_error && bio->bi_error != -EOPNOTSUPP) - bb->error = bio->bi_error; - if (atomic_dec_and_test(&bb->done)) - complete(bb->wait); - bio_put(bio); + return new; } /** @@ -40,13 +36,11 @@ static void bio_batch_end_io(struct bio *bio) int blkdev_issue_discard(struct block_device *bdev, sector_t sector, sector_t nr_sects, gfp_t gfp_mask, unsigned long flags) { - DECLARE_COMPLETION_ONSTACK(wait); struct request_queue *q = bdev_get_queue(bdev); int type = REQ_WRITE | REQ_DISCARD; unsigned int granularity; int alignment; - struct bio_batch bb; - struct bio *bio; + struct bio *bio = NULL; int ret = 0; struct blk_plug plug; @@ -66,25 +60,15 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector, type |= REQ_SECURE; } - atomic_set(&bb.done, 1); - bb.error = 0; - bb.wait = &wait; - blk_start_plug(&plug); while (nr_sects) { unsigned int req_sects; sector_t end_sect, tmp; - bio = bio_alloc(gfp_mask, 1); - if (!bio) { - ret = -ENOMEM; - break; - } - /* Make sure bi_size doesn't overflow */ req_sects = min_t(sector_t, nr_sects, UINT_MAX >> 9); - /* + /** * If splitting a request, and the next starting sector would be * misaligned, stop the discard at the previous aligned sector. */ @@ -98,18 +82,14 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector, req_sects = end_sect - sector; } + bio = next_bio(bio, type, 1, gfp_mask); bio->bi_iter.bi_sector = sector; - bio->bi_end_io = bio_batch_end_io; bio->bi_bdev = bdev; - bio->bi_private = &bb; bio->bi_iter.bi_size = req_sects << 9; nr_sects -= req_sects; sector = end_sect; - atomic_inc(&bb.done); - submit_bio(type, bio); - /* * We can loop for a long time in here, if someone does * full device discards (like mkfs). Be nice and allow @@ -118,15 +98,11 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector, */ cond_resched(); } + if (bio) + ret = submit_bio_wait(type, bio); blk_finish_plug(&plug); - /* Wait for bios in-flight */ - if (!atomic_dec_and_test(&bb.done)) - wait_for_completion_io(&wait); - - if (bb.error) - return bb.error; - return ret; + return ret != -EOPNOTSUPP ? ret : 0; } EXPORT_SYMBOL(blkdev_issue_discard); @@ -145,11 +121,9 @@ int blkdev_issue_write_same(struct block_device *bdev, sector_t sector, sector_t nr_sects, gfp_t gfp_mask, struct page *page) { - DECLARE_COMPLETION_ONSTACK(wait); struct request_queue *q = bdev_get_queue(bdev); unsigned int max_write_same_sectors; - struct bio_batch bb; - struct bio *bio; + struct bio *bio = NULL; int ret = 0; if (!q) @@ -158,21 +132,10 @@ int blkdev_issue_write_same(struct block_device *bdev, sector_t sector, /* Ensure that max_write_same_sectors doesn't overflow bi_size */ max_write_same_sectors = UINT_MAX >> 9; - atomic_set(&bb.done, 1); - bb.error = 0; - bb.wait = &wait; - while (nr_sects) { - bio = bio_alloc(gfp_mask, 1); - if (!bio) { - ret = -ENOMEM; - break; - } - + bio = next_bio(bio, REQ_WRITE | REQ_WRITE_SAME, 1, gfp_mask); bio->bi_iter.bi_sector = sector; - bio->bi_end_io = bio_batch_end_io; bio->bi_bdev = bdev; - bio->bi_private = &bb; bio->bi_vcnt = 1; bio->bi_io_vec->bv_page = page; bio->bi_io_vec->bv_offset = 0; @@ -186,18 +149,11 @@ int blkdev_issue_write_same(struct block_device *bdev, sector_t sector, bio->bi_iter.bi_size = nr_sects << 9; nr_sects = 0; } - - atomic_inc(&bb.done); - submit_bio(REQ_WRITE | REQ_WRITE_SAME, bio); } - /* Wait for bios in-flight */ - if (!atomic_dec_and_test(&bb.done)) - wait_for_completion_io(&wait); - - if (bb.error) - return bb.error; - return ret; + if (bio) + ret = submit_bio_wait(REQ_WRITE | REQ_WRITE_SAME, bio); + return ret != -EOPNOTSUPP ? ret : 0; } EXPORT_SYMBOL(blkdev_issue_write_same); @@ -216,28 +172,15 @@ static int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, sector_t nr_sects, gfp_t gfp_mask) { int ret; - struct bio *bio; - struct bio_batch bb; + struct bio *bio = NULL; unsigned int sz; - DECLARE_COMPLETION_ONSTACK(wait); - atomic_set(&bb.done, 1); - bb.error = 0; - bb.wait = &wait; - - ret = 0; while (nr_sects != 0) { - bio = bio_alloc(gfp_mask, - min(nr_sects, (sector_t)BIO_MAX_PAGES)); - if (!bio) { - ret = -ENOMEM; - break; - } - + bio = next_bio(bio, WRITE, + min(nr_sects, (sector_t)BIO_MAX_PAGES), + gfp_mask); bio->bi_iter.bi_sector = sector; bio->bi_bdev = bdev; - bio->bi_end_io = bio_batch_end_io; - bio->bi_private = &bb; while (nr_sects != 0) { sz = min((sector_t) PAGE_SIZE >> 9 , nr_sects); @@ -247,18 +190,11 @@ static int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, if (ret < (sz << 9)) break; } - ret = 0; - atomic_inc(&bb.done); - submit_bio(WRITE, bio); } - /* Wait for bios in-flight */ - if (!atomic_dec_and_test(&bb.done)) - wait_for_completion_io(&wait); - - if (bb.error) - return bb.error; - return ret; + if (bio) + return submit_bio_wait(WRITE, bio); + return 0; } /**