@@ -1846,6 +1846,33 @@ static bool dm_table_supports_discards(struct dm_table *t)
return true;
}
+static int device_not_secerase_capable(struct dm_target *ti,
+ struct dm_dev *dev, sector_t start,
+ sector_t len, void *data)
+{
+ struct request_queue *q = bdev_get_queue(dev->bdev);
+
+ return q && !blk_queue_secure_erase(q);
+}
+
+static bool dm_targets_support_secure_erase(struct dm_table *t)
+{
+ unsigned int i = 0;
+ struct dm_target *ti;
+
+ while (i < dm_table_get_num_targets(t)) {
+ ti = dm_table_get_target(t, i++);
+
+ if (!ti->type->iterate_devices ||
+ ti->type->iterate_devices(ti, device_not_secerase_capable,
+ NULL))
+ return false;
+ }
+
+ return true;
+
+}
+
void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
struct queue_limits *limits)
{
@@ -1867,6 +1894,9 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
} else
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
+ if (dm_targets_support_secure_erase(t))
+ queue_flag_set_unlocked(QUEUE_FLAG_SECERASE, q);
+
if (dm_table_supports_flush(t, (1UL << QUEUE_FLAG_WC))) {
wc = true;
if (dm_table_supports_flush(t, (1UL << QUEUE_FLAG_FUA)))