===================================================================
@@ -162,6 +162,7 @@ struct mapped_device {
/* A pointer to the currently processing pre/post flush request */
struct request *flush_request;
+ atomic_t flush_pending;
/*
* The current mapping.
@@ -777,10 +778,16 @@ static void store_barrier_error(struct m
* the md may be freed in dm_put() at the end of this function.
* Or do dm_get() before calling this function and dm_put() later.
*/
-static void rq_completed(struct mapped_device *md, int rw, int run_queue)
+static void rq_completed(struct mapped_device *md, int rw, int run_queue, bool is_flush)
{
atomic_dec(&md->pending[rw]);
+ if (is_flush) {
+ atomic_dec(&md->flush_pending);
+ if (!atomic_read(&md->flush_pending))
+ wake_up(&md->wait);
+ }
+
/* nudge anyone waiting on suspend queue */
if (!md_in_flight(md))
wake_up(&md->wait);
@@ -837,7 +844,7 @@ static void dm_end_request(struct reques
} else
blk_end_request_all(rq, error);
- rq_completed(md, rw, run_queue);
+ rq_completed(md, rw, run_queue, is_barrier);
}
static void dm_unprep_request(struct request *rq)
@@ -880,7 +887,7 @@ void dm_requeue_unmapped_request(struct
blk_requeue_request(q, rq);
spin_unlock_irqrestore(q->queue_lock, flags);
- rq_completed(md, rw, 0);
+ rq_completed(md, rw, 0, false);
}
EXPORT_SYMBOL_GPL(dm_requeue_unmapped_request);
@@ -1993,6 +2000,7 @@ static struct mapped_device *alloc_dev(i
atomic_set(&md->pending[0], 0);
atomic_set(&md->pending[1], 0);
+ atomic_set(&md->flush_pending, 0);
init_waitqueue_head(&md->wait);
INIT_WORK(&md->work, dm_wq_work);
INIT_WORK(&md->barrier_work, dm_rq_barrier_work);
@@ -2375,7 +2383,7 @@ void dm_put(struct mapped_device *md)
}
EXPORT_SYMBOL_GPL(dm_put);
-static int dm_wait_for_completion(struct mapped_device *md, int interruptible)
+static int dm_wait_for_completion(struct mapped_device *md, int interruptible, bool for_flush)
{
int r = 0;
DECLARE_WAITQUEUE(wait, current);
@@ -2388,6 +2396,8 @@ static int dm_wait_for_completion(struct
set_current_state(interruptible);
smp_mb();
+ if (for_flush && !atomic_read(&md->flush_pending))
+ break;
if (!md_in_flight(md))
break;
@@ -2408,14 +2418,14 @@ static int dm_wait_for_completion(struct
static void dm_flush(struct mapped_device *md)
{
- dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE);
+ dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE, false);
bio_init(&md->barrier_bio);
md->barrier_bio.bi_bdev = md->bdev;
md->barrier_bio.bi_rw = WRITE_BARRIER;
__split_and_process_bio(md, &md->barrier_bio);
- dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE);
+ dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE, false);
}
static void process_barrier(struct mapped_device *md, struct bio *bio)
@@ -2512,11 +2522,12 @@ static int dm_rq_barrier(struct mapped_d
clone = clone_rq(md->flush_request, md, GFP_NOIO);
dm_rq_set_target_request_nr(clone, j);
atomic_inc(&md->pending[rq_data_dir(clone)]);
+ atomic_inc(&md->flush_pending);
map_request(ti, clone, md);
}
}
- dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE);
+ dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE, true);
dm_table_put(map);
return md->barrier_error;
@@ -2705,7 +2716,7 @@ int dm_suspend(struct mapped_device *md,
* We call dm_wait_for_completion to wait for all existing requests
* to finish.
*/
- r = dm_wait_for_completion(md, TASK_INTERRUPTIBLE);
+ r = dm_wait_for_completion(md, TASK_INTERRUPTIBLE, false);
down_write(&md->io_lock);
if (noflush)