===================================================================
@@ -861,6 +861,43 @@ static void __invalidate_snapshot(struct
dm_table_event(s->ti->table);
}
+static void check_allocated_chunk(struct block_device *bdev, chunk_t chunk, struct dm_snap_pending_exception *pe, int line)
+{
+ struct dm_snapshot *snap;
+ int i = 0;
+ struct origin *o;
+ down_read(&_origins_lock);
+ o = __lookup_origin(bdev);
+ if (!o) {
+ printk("line %d\n", line);
+ BUG();
+ }
+ list_for_each_entry (snap, &o->snapshots, list) {
+ struct dm_snap_exception *e;
+ down_write(&snap->lock);
+ if (!snap->valid || !snap->active)
+ goto next_snapshot;
+ e = lookup_exception(&snap->complete, chunk);
+ if (e)
+ goto next_snapshot;
+ e = lookup_exception(&snap->pending, chunk);
+ if (e) {
+ struct dm_snap_pending_exception *pe = container_of(e, struct dm_snap_pending_exception, e);
+ if (!pe->primary_pe) {
+ printk(KERN_ALERT "%d: no primary pe %Lx in snapshot %p(%d), copying snapshot %p, pe %p, pe->primary_pe %p, refcount %d\n", line, (unsigned long long)chunk, snap, i, pe->snap, pe, pe->primary_pe, atomic_read(&pe->ref_count));
+ BUG();
+ }
+ goto next_snapshot;
+ }
+ printk(KERN_ALERT "%d: not allocated chunk %Lx in snapshot %p(%d), copying snapshot %p, pe %p, pe->primary_pe %p, refcount %d\n", line, (unsigned long long)chunk, snap, i, pe->snap, pe, pe->primary_pe, atomic_read(&pe->ref_count));
+ BUG();
+next_snapshot:
+ up_write(&snap->lock);
+ i++;
+ }
+ up_read(&_origins_lock);
+}
+
static void get_pending_exception(struct dm_snap_pending_exception *pe)
{
atomic_inc(&pe->ref_count);
@@ -917,6 +954,8 @@ static void pending_complete(struct dm_s
BUG_ON(pe->e.hash_list.next == LIST_POISON1);
BUG_ON(pe->e.hash_list.prev == LIST_POISON2);
+ check_allocated_chunk(s->origin->bdev, pe->e.old_chunk, pe, __LINE__);
+
if (!success) {
/* Read/write error - snapshot is unusable */
down_write(&s->lock);
@@ -1017,6 +1056,8 @@ static void copy_callback(int read_err,
BUG_ON(pe->e.hash_list.next == LIST_POISON1);
BUG_ON(pe->e.hash_list.prev == LIST_POISON2);
+ check_allocated_chunk(s->origin->bdev, pe->e.old_chunk, pe, __LINE__);
+
if (read_err || write_err) {
s->store.check_pending_exception(&s->store, pe, __LINE__);
pending_complete(pe, 0);
@@ -1056,6 +1097,8 @@ static void start_copy(struct dm_snap_pe
BUG_ON(pe->e.hash_list.next == LIST_POISON1);
BUG_ON(pe->e.hash_list.prev == LIST_POISON2);
+ check_allocated_chunk(bdev, pe->e.old_chunk, pe, __LINE__);
+
/* Hand over to kcopyd */
dm_kcopyd_copy(s->kcopyd_client,
&src, 1, &dest, 0, copy_callback, pe);
@@ -1155,6 +1198,11 @@ static int snapshot_map(struct dm_target
chunk_t chunk;
struct dm_snap_pending_exception *pe = NULL;
+ if (bio_rw(bio) == WRITE) {
+ printk(KERN_ALERT "Writing to a snapshot --- not supported!\n");
+ BUG();
+ }
+
chunk = sector_to_chunk(s, bio->bi_sector);
/* Full snapshots are not usable */
@@ -1300,8 +1348,11 @@ static int __origin_write(struct list_he
goto next_snapshot;
/* Nothing to do if writing beyond end of snapshot */
- if (bio->bi_sector >= dm_table_get_size(snap->ti->table))
+ if (bio->bi_sector >= dm_table_get_size(snap->ti->table)) {
+ printk(KERN_ALERT "over snapshot end - not supported: %Lx >= %Lx\n", (unsigned long long)bio->bi_sector, (unsigned long long)dm_table_get_size(snap->ti->table));
+ BUG();
goto next_snapshot;
+ }
/*
* Remember, different snapshots can have
@@ -1486,8 +1537,13 @@ static void origin_resume(struct dm_targ
down_read(&_origins_lock);
o = __lookup_origin(dev->bdev);
if (o)
- list_for_each_entry (snap, &o->snapshots, list)
+ list_for_each_entry (snap, &o->snapshots, list) {
+ if (chunk_size && chunk_size != snap->chunk_size) {
+ printk(KERN_ALERT "Different chunk sizes - not supported!\n");
+ BUG();
+ }
chunk_size = min_not_zero(chunk_size, snap->chunk_size);
+ }
up_read(&_origins_lock);
ti->split_io = chunk_size;