===================================================================
@@ -819,6 +819,11 @@ int dm_table_get_type(struct dm_table *t
DM_TYPE_BIO_BASED : DM_TYPE_REQUEST_BASED;
}
+int dm_table_bio_based(struct dm_table *t)
+{
+ return dm_table_get_type(t) == DM_TYPE_BIO_BASED;
+}
+
int dm_table_request_based(struct dm_table *t)
{
return dm_table_get_type(t) == DM_TYPE_REQUEST_BASED;
===================================================================
@@ -1779,7 +1779,6 @@ static struct mapped_device *alloc_dev(i
md->queue->backing_dev_info.congested_fn = dm_any_congested;
md->queue->backing_dev_info.congested_data = md;
blk_queue_make_request(md->queue, dm_request);
- blk_queue_ordered(md->queue, QUEUE_ORDERED_DRAIN, NULL);
blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY);
md->queue->unplug_fn = dm_unplug_all;
blk_queue_merge_bvec(md->queue, dm_merge_bvec);
@@ -2206,6 +2205,16 @@ int dm_swap_table(struct mapped_device *
goto out;
}
+ /*
+ * It is enought that blk_queue_ordered() is called only once when
+ * the first bio-based table is bound.
+ *
+ * This setting should be moved to alloc_dev() when request-based dm
+ * supports barrier.
+ */
+ if (!md->map && dm_table_bio_based(table))
+ blk_queue_ordered(md->queue, QUEUE_ORDERED_DRAIN, NULL);
+
__unbind(md);
r = __bind(md, table);
===================================================================
@@ -58,6 +58,7 @@ int dm_table_any_congested(struct dm_tab
int dm_table_any_busy_target(struct dm_table *t);
int dm_table_set_type(struct dm_table *t);
int dm_table_get_type(struct dm_table *t);
+int dm_table_bio_based(struct dm_table *t);
int dm_table_request_based(struct dm_table *t);
int dm_table_alloc_md_mempools(struct dm_table *t);
void dm_table_free_md_mempools(struct dm_table *t);