@@ -152,6 +152,7 @@ static struct target_type linear_target = {
.ioctl = linear_ioctl,
.merge = linear_merge,
.iterate_devices = linear_iterate_devices,
+ .features = DM_TARGET_SUPPORTS_DISCARDS,
};
int __init dm_linear_init(void)
@@ -54,6 +54,8 @@ struct dm_table {
sector_t *highs;
struct dm_target *targets;
+ unsigned discards_supported:1;
+
/*
* Indicates the rw permissions for the new logical
* device. This should be a combination of FMODE_READ
@@ -203,6 +205,7 @@ int dm_table_create(struct dm_table **result, fmode_t mode,
INIT_LIST_HEAD(&t->devices);
atomic_set(&t->holders, 0);
+ t->discards_supported = 1;
if (!num_targets)
num_targets = KEYS_PER_NODE;
@@ -770,6 +773,9 @@ int dm_table_add_target(struct dm_table *t, const char *type,
t->highs[t->num_targets++] = tgt->begin + tgt->len - 1;
+ if (!(tgt->type->features & DM_TARGET_SUPPORTS_DISCARDS))
+ t->discards_supported = 0;
+
return 0;
bad:
@@ -905,6 +911,12 @@ int dm_table_complete(struct dm_table *t)
int r = 0;
unsigned int leaf_nodes;
+ /*
+ * We only support discards if there is exactly one underlying device.
+ */
+ if (!list_is_singular(&t->devices))
+ t->discards_supported = 0;
+
/* how many indexes will the btree have ? */
leaf_nodes = dm_div_up(t->num_targets, KEYS_PER_NODE);
t->depth = 1 + int_log(leaf_nodes, CHILDREN_PER_NODE);
@@ -1086,6 +1098,9 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
else
queue_flag_set_unlocked(QUEUE_FLAG_CLUSTER, q);
+ if (dm_table_supports_discards(t))
+ queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
+
dm_table_set_integrity(t);
/*
@@ -1232,6 +1247,42 @@ struct mapped_device *dm_table_get_md(struct dm_table *t)
return t->md;
}
+static int device_discard_incapable(struct dm_target *ti, struct dm_dev *dev,
+ sector_t start, sector_t len, void *data)
+{
+ struct block_device *bdev = dev->bdev;
+ struct request_queue *q = bdev_get_queue(bdev);
+
+ WARN_ON(!q);
+ return (!q || !blk_queue_discard(q));
+}
+
+bool dm_table_supports_discards(struct dm_table *t)
+{
+ struct dm_target *uninitialized_var(ti);
+ unsigned i = 0;
+
+ if (!t->discards_supported)
+ return 0;
+
+ /*
+ * table's targets support discards but do
+ * the underlying devices?
+ */
+ while (i < dm_table_get_num_targets(t)) {
+ ti = dm_table_get_target(t, i++);
+
+ if (!ti->type->iterate_devices)
+ return 0; /* assume DISCARD incapable */
+
+ if (ti->type->iterate_devices(ti, device_discard_incapable,
+ NULL))
+ return 0;
+ }
+
+ return 1;
+}
+
EXPORT_SYMBOL(dm_vcalloc);
EXPORT_SYMBOL(dm_get_device);
EXPORT_SYMBOL(dm_put_device);
@@ -1198,6 +1198,48 @@ static int __clone_and_map_empty_barrier(struct clone_info *ci)
return 0;
}
+/*
+ * Perform all io with a single clone.
+ */
+static void __clone_and_map_simple(struct clone_info *ci, struct dm_target *ti)
+{
+ struct bio *clone, *bio = ci->bio;
+ struct dm_target_io *tio;
+
+ tio = alloc_tio(ci, ti);
+ clone = clone_bio(bio, ci->sector, ci->idx,
+ bio->bi_vcnt - ci->idx, ci->sector_count,
+ ci->md->bs);
+ __map_bio(ti, clone, tio);
+ ci->sector_count = 0;
+}
+
+static int __clone_and_map_discard(struct clone_info *ci)
+{
+ struct dm_target *ti;
+ sector_t max;
+
+ if (!dm_table_supports_discards(ci->map))
+ return -EOPNOTSUPP;
+
+ ti = dm_table_find_target(ci->map, ci->sector);
+ if (!dm_target_is_valid(ti))
+ return -EIO;
+
+ max = max_io_len(ci->md, ci->sector, ti);
+
+ if (ci->sector_count <= max)
+ __clone_and_map_simple(ci, ti);
+ else {
+ /*
+ * FIXME: Handle a discard that spans two or more targets.
+ */
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
static int __clone_and_map(struct clone_info *ci)
{
struct bio *clone, *bio = ci->bio;
@@ -1208,27 +1250,21 @@ static int __clone_and_map(struct clone_info *ci)
if (unlikely(bio_empty_barrier(bio)))
return __clone_and_map_empty_barrier(ci);
+ if (unlikely(bio_rw_flagged(bio, BIO_RW_DISCARD)))
+ return __clone_and_map_discard(ci);
+
ti = dm_table_find_target(ci->map, ci->sector);
if (!dm_target_is_valid(ti))
return -EIO;
max = max_io_len(ci->md, ci->sector, ti);
- /*
- * Allocate a target io object.
- */
- tio = alloc_tio(ci, ti);
-
if (ci->sector_count <= max) {
/*
* Optimise for the simple case where we can do all of
* the remaining io with a single clone.
*/
- clone = clone_bio(bio, ci->sector, ci->idx,
- bio->bi_vcnt - ci->idx, ci->sector_count,
- ci->md->bs);
- __map_bio(ti, clone, tio);
- ci->sector_count = 0;
+ __clone_and_map_simple(ci, ti);
} else if (to_sector(bio->bi_io_vec[ci->idx].bv_len) <= max) {
/*
@@ -1249,6 +1285,7 @@ static int __clone_and_map(struct clone_info *ci)
len += bv_len;
}
+ tio = alloc_tio(ci, ti);
clone = clone_bio(bio, ci->sector, ci->idx, i - ci->idx, len,
ci->md->bs);
__map_bio(ti, clone, tio);
@@ -1272,12 +1309,11 @@ static int __clone_and_map(struct clone_info *ci)
return -EIO;
max = max_io_len(ci->md, ci->sector, ti);
-
- tio = alloc_tio(ci, ti);
}
len = min(remaining, max);
+ tio = alloc_tio(ci, ti);
clone = split_bvec(bio, ci->sector, ci->idx,
bv->bv_offset + offset, len,
ci->md->bs);
@@ -62,6 +62,7 @@ int dm_table_any_busy_target(struct dm_table *t);
int dm_table_set_type(struct dm_table *t);
unsigned dm_table_get_type(struct dm_table *t);
bool dm_table_request_based(struct dm_table *t);
+bool dm_table_supports_discards(struct dm_table *t);
int dm_table_alloc_md_mempools(struct dm_table *t);
void dm_table_free_md_mempools(struct dm_table *t);
struct dm_md_mempools *dm_table_get_md_mempools(struct dm_table *t);
@@ -130,6 +130,7 @@ void dm_put_device(struct dm_target *ti, struct dm_dev *d);
/*
* Target features
*/
+#define DM_TARGET_SUPPORTS_DISCARDS 0x00000001
struct target_type {
uint64_t features;