===================================================================
@@ -131,6 +131,8 @@ struct pstore {
struct dm_io_client *io_client;
struct workqueue_struct *metadata_wq;
+
+ struct task_struct *process;
};
static unsigned sectors_to_pages(unsigned sectors)
@@ -577,6 +579,48 @@ static int persistent_prepare(struct exc
return 0;
}
+struct dm_snap_pending_exception {
+ struct dm_snap_exception e;
+
+ /*
+ * Origin buffers waiting for this to complete are held
+ * in a bio list
+ */
+ struct bio_list origin_bios;
+ struct bio_list snapshot_bios;
+
+ /*
+ * Short-term queue of pending exceptions prior to submission.
+ */
+ struct list_head list;
+
+ /*
+ * The primary pending_exception is the one that holds
+ * the ref_count and the list of origin_bios for a
+ * group of pending_exceptions. It is always last to get freed.
+ * These fields get set up when writing to the origin.
+ */
+ struct dm_snap_pending_exception *primary_pe;
+
+ /*
+ * Number of pending_exceptions processing this chunk.
+ * When this drops to zero we must complete the origin bios.
+ * If incrementing or decrementing this, hold pe->snap->lock for
+ * the sibling concerned and not pe->primary_pe->snap->lock unless
+ * they are the same.
+ */
+ atomic_t ref_count;
+
+ /* Pointer back to snapshot context */
+ struct dm_snapshot *snap;
+
+ /*
+ * 1 indicates the exception has already been sent to
+ * kcopyd.
+ */
+ int started;
+};
+
static void persistent_commit(struct exception_store *store,
struct dm_snap_exception *e,
void (*callback) (void *, int success),
@@ -586,6 +630,16 @@ static void persistent_commit(struct exc
struct pstore *ps = get_info(store);
struct disk_exception de;
struct commit_callback *cb;
+ struct dm_snap_pending_exception *pe;
+
+ if (!ps->process)
+ ps->process = current;
+
+ BUG_ON(ps->process != current);
+
+ pe = callback_context;
+ BUG_ON(pe->e.hash_list.next == LIST_POISON1);
+ BUG_ON(pe->e.hash_list.prev == LIST_POISON2);
de.old_chunk = e->old_chunk;
de.new_chunk = e->new_chunk;
@@ -601,13 +655,30 @@ static void persistent_commit(struct exc
cb->callback = callback;
cb->context = callback_context;
+ for (i = 0; i < ps->callback_count; i++) {
+ cb = ps->callbacks + i;
+ pe = cb->context;
+ BUG_ON(pe->e.hash_list.next == LIST_POISON1);
+ BUG_ON(pe->e.hash_list.prev == LIST_POISON2);
+ }
/*
* If there are exceptions in flight and we have not yet
* filled this metadata area there's nothing more to do.
*/
if (!atomic_dec_and_test(&ps->pending_count) &&
- (ps->current_committed != ps->exceptions_per_area))
+ (ps->current_committed != ps->exceptions_per_area)) {
+ pe = callback_context;
+ BUG_ON(pe->e.hash_list.next == LIST_POISON1);
+ BUG_ON(pe->e.hash_list.prev == LIST_POISON2);
return;
+ }
+ for (i = 0; i < ps->callback_count; i++) {
+ cb = ps->callbacks + i;
+ pe = cb->context;
+ BUG_ON(pe->e.hash_list.next == LIST_POISON1);
+ BUG_ON(pe->e.hash_list.prev == LIST_POISON2);
+ }
+
/*
* If we completely filled the current area, then wipe the next one.
@@ -633,6 +704,16 @@ static void persistent_commit(struct exc
for (i = 0; i < ps->callback_count; i++) {
cb = ps->callbacks + i;
+ pe = cb->context;
+ BUG_ON(pe->e.hash_list.next == LIST_POISON1);
+ BUG_ON(pe->e.hash_list.prev == LIST_POISON2);
+ }
+
+ for (i = 0; i < ps->callback_count; i++) {
+ cb = ps->callbacks + i;
+ pe = cb->context;
+ BUG_ON(pe->e.hash_list.next == LIST_POISON1);
+ BUG_ON(pe->e.hash_list.prev == LIST_POISON2);
cb->callback(cb->context, ps->valid);
}
@@ -668,6 +749,8 @@ int dm_create_persistent(struct exceptio
atomic_set(&ps->pending_count, 0);
ps->callbacks = NULL;
+ ps->process = NULL;
+
ps->metadata_wq = create_singlethread_workqueue("ksnaphd");
if (!ps->metadata_wq) {
kfree(ps);