From patchwork Fri Mar 27 06:09:33 2009 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Mikulas Patocka X-Patchwork-Id: 14681 X-Patchwork-Delegate: agk@redhat.com Received: from hormel.redhat.com (hormel1.redhat.com [209.132.177.33]) by demeter.kernel.org (8.14.2/8.14.2) with ESMTP id n2R69ZRv002698 for ; Fri, 27 Mar 2009 06:09:35 GMT Received: from listman.util.phx.redhat.com (listman.util.phx.redhat.com [10.8.4.110]) by hormel.redhat.com (Postfix) with ESMTP id 77C2E61A1EA; Fri, 27 Mar 2009 02:09:35 -0400 (EDT) Received: from int-mx1.corp.redhat.com (int-mx1.corp.redhat.com [172.16.52.254]) by listman.util.phx.redhat.com (8.13.1/8.13.1) with ESMTP id n2R69YM1009831 for ; Fri, 27 Mar 2009 02:09:34 -0400 Received: from hs20-bc2-1.build.redhat.com (hs20-bc2-1.build.redhat.com [10.10.28.34]) by int-mx1.corp.redhat.com (8.13.1/8.13.1) with ESMTP id n2R69dRY027657; Fri, 27 Mar 2009 02:09:39 -0400 Received: from hs20-bc2-1.build.redhat.com (localhost.localdomain [127.0.0.1]) by hs20-bc2-1.build.redhat.com (8.13.1/8.13.1) with ESMTP id n2R69YDk017109; Fri, 27 Mar 2009 02:09:34 -0400 Received: from localhost (mpatocka@localhost) by hs20-bc2-1.build.redhat.com (8.13.1/8.13.1/Submit) with ESMTP id n2R69Xfh017103; Fri, 27 Mar 2009 02:09:33 -0400 X-Authentication-Warning: hs20-bc2-1.build.redhat.com: mpatocka owned process doing -bs Date: Fri, 27 Mar 2009 02:09:33 -0400 (EDT) From: Mikulas Patocka X-X-Sender: mpatocka@hs20-bc2-1.build.redhat.com To: dm-devel@redhat.com In-Reply-To: Message-ID: References: MIME-Version: 1.0 X-Scanned-By: MIMEDefang 2.58 on 172.16.52.254 X-loop: dm-devel@redhat.com Cc: Alasdair G Kergon Subject: [dm-devel] [PATCH 13/14] barriers X-BeenThere: dm-devel@redhat.com X-Mailman-Version: 2.1.5 Precedence: junk Reply-To: device-mapper development List-Id: device-mapper development List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Sender: dm-devel-bounces@redhat.com Errors-To: dm-devel-bounces@redhat.com Rework helper thread. IO may be submitted to a worker thread with queue_io(). queue_io() sets DMF_QUEUE_IO_FOR_THREAD so that all further IO is queued for the thread. When the thread finishes its work, it clears DMF_QUEUE_IO_FOR_THREAD and from this point on, requests are submitted from dm_request again. Add new flag DMF_BLOCK_FOR_SUSPEND that is set when the IO needs to be blocked because of an ongoing suspend (DMF_BLOCK_IO had this meaning before this patch). Signed-off-by: Mikulas Patocka --- drivers/md/dm.c | 92 +++++++++++++++++++++++++------------------------------- 1 file changed, 42 insertions(+), 50 deletions(-) -- dm-devel mailing list dm-devel@redhat.com https://www.redhat.com/mailman/listinfo/dm-devel Index: linux-2.6.29-rc8-devel/drivers/md/dm.c =================================================================== --- linux-2.6.29-rc8-devel.orig/drivers/md/dm.c 2009-03-27 05:00:56.000000000 +0100 +++ linux-2.6.29-rc8-devel/drivers/md/dm.c 2009-03-27 05:09:58.000000000 +0100 @@ -89,12 +89,13 @@ union map_info *dm_get_mapinfo(struct bi /* * Bits for the md->flags field. */ -#define DMF_BLOCK_IO 0 -#define DMF_SUSPENDED 1 -#define DMF_FROZEN 2 -#define DMF_FREEING 3 -#define DMF_DELETING 4 -#define DMF_NOFLUSH_SUSPENDING 5 +#define DMF_QUEUE_IO_FOR_THREAD 0 +#define DMF_BLOCK_FOR_SUSPEND 1 +#define DMF_SUSPENDED 2 +#define DMF_FROZEN 3 +#define DMF_FREEING 4 +#define DMF_DELETING 5 +#define DMF_NOFLUSH_SUSPENDING 6 /* * Work processed by per-device workqueue. @@ -435,21 +436,15 @@ static void end_io_acct(struct dm_io *io /* * Add the bio to the list of deferred io. */ -static int queue_io(struct mapped_device *md, struct bio *bio) +static void queue_io(struct mapped_device *md, struct bio *bio) { down_write(&md->io_lock); - - if (!test_bit(DMF_BLOCK_IO, &md->flags)) { - up_write(&md->io_lock); - return 1; - } - spin_lock_irq(&md->deferred_lock); bio_list_add(&md->deferred, bio); spin_unlock_irq(&md->deferred_lock); - + if (!test_and_set_bit(DMF_QUEUE_IO_FOR_THREAD, &md->flags)) + queue_work(md->wq, &md->work); up_write(&md->io_lock); - return 0; /* deferred successfully */ } /* @@ -908,7 +903,6 @@ out: */ static int dm_request(struct request_queue *q, struct bio *bio) { - int r = -EIO; int rw = bio_data_dir(bio); struct mapped_device *md = q->queuedata; int cpu; @@ -930,34 +924,26 @@ static int dm_request(struct request_que part_stat_unlock(); /* - * If we're suspended we have to queue - * this io for later. + * If we're suspended or the thread is processing barriers + * we have to queue this io for later. */ - while (test_bit(DMF_BLOCK_IO, &md->flags)) { + if (unlikely(test_bit(DMF_QUEUE_IO_FOR_THREAD, &md->flags))) { up_read(&md->io_lock); - if (bio_rw(bio) != READA) - r = queue_io(md, bio); + if (unlikely(test_bit(DMF_BLOCK_FOR_SUSPEND, &md->flags)) && + bio_rw(bio) == READA) { + bio_io_error(bio); + return 0; + } - if (r <= 0) - goto out_req; + queue_io(md, bio); - /* - * We're in a while loop, because someone could suspend - * before we get to the following read lock. - */ - down_read(&md->io_lock); + return 0; } __split_and_process_bio(md, bio); up_read(&md->io_lock); return 0; - -out_req: - if (r < 0) - bio_io_error(bio); - - return 0; } static void dm_unplug_all(struct request_queue *q) @@ -977,7 +963,7 @@ static int dm_any_congested(void *conges struct mapped_device *md = congested_data; struct dm_table *map; - if (!test_bit(DMF_BLOCK_IO, &md->flags)) { + if (!test_bit(DMF_BLOCK_FOR_SUSPEND, &md->flags)) { map = dm_get_table(md); if (map) { r = dm_table_any_congested(map, bdi_bits); @@ -1413,29 +1399,32 @@ static int dm_wait_for_completion(struct static void dm_wq_work(struct work_struct *work) { struct mapped_device *md = container_of(work, struct mapped_device, work); - struct bio *c; - down_write(&md->io_lock); -next_bio: - spin_lock_irq(&md->deferred_lock); - c = bio_list_pop(&md->deferred); - spin_unlock_irq(&md->deferred_lock); + while (!test_bit(DMF_BLOCK_FOR_SUSPEND, &md->flags)) { + struct bio *c; - if (c) { - __split_and_process_bio(md, c); - goto next_bio; - } + spin_lock_irq(&md->deferred_lock); + c = bio_list_pop(&md->deferred); + spin_unlock_irq(&md->deferred_lock); - clear_bit(DMF_BLOCK_IO, &md->flags); + up_write(&md->io_lock); - up_write(&md->io_lock); + if (!c) { + clear_bit(DMF_QUEUE_IO_FOR_THREAD, &md->flags); + break; + } + + __split_and_process_bio(md, c); + + down_write(&md->io_lock); + } } static void dm_queue_flush(struct mapped_device *md) { + clear_bit(DMF_BLOCK_FOR_SUSPEND, &md->flags); queue_work(md->wq, &md->work); - flush_workqueue(md->wq); } /* @@ -1541,16 +1530,19 @@ int dm_suspend(struct mapped_device *md, } /* - * First we set the BLOCK_IO flag so no more ios will be mapped. + * First we set the QUEUE_IO_FOR_THREAD flag so no more ios + * will be mapped. */ down_write(&md->io_lock); - set_bit(DMF_BLOCK_IO, &md->flags); + set_bit(DMF_BLOCK_FOR_SUSPEND, &md->flags); + set_bit(DMF_QUEUE_IO_FOR_THREAD, &md->flags); up_write(&md->io_lock); /* * Wait for the already-mapped ios to complete. */ + flush_workqueue(md->wq); r = dm_wait_for_completion(md, TASK_INTERRUPTIBLE); down_write(&md->io_lock);