===================================================================
@@ -252,6 +252,7 @@ static void dm_hash_remove_all(int keep_
int i, dev_skipped, dev_removed;
struct hash_cell *hc;
struct list_head *tmp, *n;
+ struct mapped_device *md;
down_write(&_hash_lock);
@@ -260,13 +261,14 @@ retry:
for (i = 0; i < NUM_BUCKETS; i++) {
list_for_each_safe (tmp, n, _name_buckets + i) {
hc = list_entry(tmp, struct hash_cell, name_list);
+ md = hc->md;
- if (keep_open_devices &&
- dm_lock_for_deletion(hc->md)) {
+ if (keep_open_devices && dm_lock_for_deletion(md)) {
dev_skipped++;
continue;
}
__hash_remove(hc);
+ dm_destroy(md);
dev_removed = 1;
}
}
@@ -640,6 +642,7 @@ static int dev_create(struct dm_ioctl *p
r = dm_hash_insert(param->name, *param->uuid ? param->uuid : NULL, md);
if (r) {
dm_put(md);
+ dm_destroy(md);
return r;
}
@@ -742,6 +745,7 @@ static int dev_remove(struct dm_ioctl *p
param->flags |= DM_UEVENT_GENERATED_FLAG;
dm_put(md);
+ dm_destroy(md);
return 0;
}
===================================================================
@@ -2175,6 +2175,7 @@ void dm_set_mdptr(struct mapped_device *
void dm_get(struct mapped_device *md)
{
atomic_inc(&md->holders);
+ BUG_ON(test_bit(DMF_FREEING, &md->flags));
}
const char *dm_device_name(struct mapped_device *md)
@@ -2183,27 +2184,41 @@ const char *dm_device_name(struct mapped
}
EXPORT_SYMBOL_GPL(dm_device_name);
-void dm_put(struct mapped_device *md)
+void dm_destroy(struct mapped_device *md)
{
struct dm_table *map;
- BUG_ON(test_bit(DMF_FREEING, &md->flags));
+ might_sleep();
- if (atomic_dec_and_lock(&md->holders, &_minor_lock)) {
- map = dm_get_live_table(md);
- idr_replace(&_minor_idr, MINOR_ALLOCED,
- MINOR(disk_devt(dm_disk(md))));
- set_bit(DMF_FREEING, &md->flags);
- spin_unlock(&_minor_lock);
- if (!dm_suspended_md(md)) {
- dm_table_presuspend_targets(map);
- dm_table_postsuspend_targets(map);
- }
- dm_sysfs_exit(md);
- dm_table_put(map);
- dm_table_destroy(__unbind(md));
- free_dev(md);
+ spin_lock(&_minor_lock);
+ map = dm_get_live_table(md);
+ idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md))));
+ set_bit(DMF_FREEING, &md->flags);
+ spin_unlock(&_minor_lock);
+
+ if (!dm_suspended_md(md)) {
+ dm_table_presuspend_targets(map);
+ dm_table_postsuspend_targets(map);
}
+
+ /*
+ * Rare but there may be I/O requests still going to complete,
+ * for example. Wait for all references to disappear.
+ * No one shouldn't increment the reference count of the mapped_device,
+ * after the mapped_device becomes DMF_FREEING state.
+ */
+ while (atomic_read(&md->holders))
+ msleep(1);
+
+ dm_sysfs_exit(md);
+ dm_table_put(map);
+ dm_table_destroy(__unbind(md));
+ free_dev(md);
+}
+
+void dm_put(struct mapped_device *md)
+{
+ atomic_dec(&md->holders);
}
EXPORT_SYMBOL_GPL(dm_put);
===================================================================
@@ -122,6 +122,10 @@ void dm_linear_exit(void);
int dm_stripe_init(void);
void dm_stripe_exit(void);
+/*
+ * mapped_device operations
+ */
+void dm_destroy(struct mapped_device *md);
int dm_open_count(struct mapped_device *md);
int dm_lock_for_deletion(struct mapped_device *md);