From patchwork Mon Oct 5 19:02:41 2009 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Mike Snitzer X-Patchwork-Id: 51779 Received: from hormel.redhat.com (hormel1.redhat.com [209.132.177.33]) by demeter.kernel.org (8.14.2/8.14.2) with ESMTP id n95J2wCE016164 for ; Mon, 5 Oct 2009 19:02:58 GMT Received: from listman.util.phx.redhat.com (listman.util.phx.redhat.com [10.8.4.110]) by hormel.redhat.com (Postfix) with ESMTP id 85ECE8E08AE; Mon, 5 Oct 2009 15:02:58 -0400 (EDT) Received: from int-mx03.intmail.prod.int.phx2.redhat.com (nat-pool.util.phx.redhat.com [10.8.5.200]) by listman.util.phx.redhat.com (8.13.1/8.13.1) with ESMTP id n95J2qIZ009985 for ; Mon, 5 Oct 2009 15:02:52 -0400 Received: from localhost (dhcp-100-18-171.bos.redhat.com [10.16.18.171]) by int-mx03.intmail.prod.int.phx2.redhat.com (8.13.8/8.13.8) with ESMTP id n95J2pr5005969; Mon, 5 Oct 2009 15:02:51 -0400 From: Mike Snitzer To: dm-devel@redhat.com Date: Mon, 5 Oct 2009 15:02:41 -0400 Message-Id: <1254769367-12111-7-git-send-email-snitzer@redhat.com> In-Reply-To: <1254769367-12111-1-git-send-email-snitzer@redhat.com> References: <1254769367-12111-1-git-send-email-snitzer@redhat.com> X-Scanned-By: MIMEDefang 2.67 on 10.5.11.16 X-loop: dm-devel@redhat.com Cc: Mikulas Patocka Subject: [dm-devel] [PATCH 06/12] dm-snapshot-merge-process X-BeenThere: dm-devel@redhat.com X-Mailman-Version: 2.1.5 Precedence: junk Reply-To: device-mapper development List-Id: device-mapper development List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Sender: dm-devel-bounces@redhat.com Errors-To: dm-devel-bounces@redhat.com diff --git a/drivers/md/dm-exception-store.h b/drivers/md/dm-exception-store.h index b293595..ac568c4 100644 --- a/drivers/md/dm-exception-store.h +++ b/drivers/md/dm-exception-store.h @@ -153,6 +153,13 @@ static inline void dm_consecutive_chunk_count_inc(struct dm_snap_exception *e) BUG_ON(!dm_consecutive_chunk_count(e)); } +static inline void dm_consecutive_chunk_count_dec(struct dm_snap_exception *e) +{ + BUG_ON(!dm_consecutive_chunk_count(e)); + + e->new_chunk -= (1ULL << DM_CHUNK_NUMBER_BITS); +} + # else # define DM_CHUNK_CONSECUTIVE_BITS 0 @@ -170,6 +177,10 @@ static inline void dm_consecutive_chunk_count_inc(struct dm_snap_exception *e) { } +static inline void dm_consecutive_chunk_count_dec(struct dm_snap_exception *e) +{ +} + # endif /* diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c index 2cb0636..b3a2543 100644 --- a/drivers/md/dm-snap.c +++ b/drivers/md/dm-snap.c @@ -101,6 +101,13 @@ struct dm_snapshot { mempool_t *tracked_chunk_pool; spinlock_t tracked_chunk_lock; struct hlist_head tracked_chunk_hash[DM_TRACKED_CHUNK_HASH_SIZE]; + + /* Merge operation is in progress */ + int merge_running; + + /* It is requested to shut down merging */ + /* Cleared back to 0 when the merging is stopped */ + int merge_shutdown; }; struct dm_dev *dm_snap_get_cow(struct dm_snapshot *s) @@ -633,6 +640,129 @@ static int init_hash_tables(struct dm_snapshot *s) return 0; } +static void merge_callback(int read_err, unsigned long write_err, + void *context); + +static void snapshot_merge_process(struct dm_snapshot *s) +{ + int r; + chunk_t old_chunk, new_chunk; + struct dm_snap_exception *e; + struct dm_io_region src, dest; + + BUG_ON(!s->merge_running); + if (s->merge_shutdown) + goto shut; + + if (!s->valid) { + DMERR("snapshot is invalid, can't merge"); + goto shut; + } + + if (!s->store->type->prepare_merge || + !s->store->type->commit_merge) { + DMERR("target store does not support merging"); + goto shut; + } + r = s->store->type->prepare_merge(s->store, &old_chunk, &new_chunk); + if (r <= 0) { + if (r < 0) + DMERR("Read error in exception store, " + "shutting down merge"); + goto shut; + } + + /* TODO: use larger I/O size once we verify that kcopyd handles it */ + + /* !!! FIXME: intelock writes to this chunk */ + down_write(&s->lock); + e = lookup_exception(&s->complete, old_chunk); + if (!e) { + DMERR("exception for block %llu is on disk but not in memory", + (unsigned long long)old_chunk); + up_write(&s->lock); + goto shut; + } + if (dm_consecutive_chunk_count(e)) { + if (old_chunk == e->old_chunk) { + e->old_chunk++; + e->new_chunk++; + } else if (old_chunk != e->old_chunk + + dm_consecutive_chunk_count(e)) { + DMERR("merge from the middle of a chunk range"); + up_write(&s->lock); + goto shut; + } + dm_consecutive_chunk_count_dec(e); + } else { + remove_exception(e); + free_exception(e); + } + up_write(&s->lock); + + dest.bdev = s->origin->bdev; + dest.sector = chunk_to_sector(s->store, old_chunk); + dest.count = min((sector_t)s->store->chunk_size, + get_dev_size(dest.bdev) - dest.sector); + + src.bdev = s->cow->bdev; + src.sector = chunk_to_sector(s->store, new_chunk); + src.count = dest.count; + + dm_kcopyd_copy(s->kcopyd_client, &src, 1, &dest, 0, merge_callback, s); + return; + +shut: + s->merge_running = 0; +} + +static void merge_callback(int read_err, unsigned long write_err, void *context) +{ + int r; + struct dm_snapshot *s = context; + + if (read_err || write_err) { + if (read_err) + DMERR("Read error in data, shutting down merge"); + else + DMERR("Write error in data, shutting down merge"); + goto shut; + } + + r = s->store->type->commit_merge(s->store, 1); + if (r < 0) { + DMERR("Write error in exception store, shutting down merge"); + goto shut; + } + + snapshot_merge_process(s); + return; + +shut: + s->merge_running = 0; +} + +static void start_merge(struct dm_snapshot *merging) +{ + if (!merging->merge_running && !merging->merge_shutdown) { + merging->merge_running = 1; + snapshot_merge_process(merging); + } +} + +/* + * Stop the merging process and wait until it finishes. + */ + +static void stop_merge(struct dm_snapshot *merging) +{ + while (merging->merge_running) { + merging->merge_shutdown = 1; + msleep(1); + } + merging->merge_shutdown = 0; +} + /* * Construct a snapshot mapping:

*/ @@ -700,6 +830,8 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv) s->active = 0; atomic_set(&s->pending_exceptions_count, 0); s->handover = 0; + s->merge_running = 0; + s->merge_shutdown = 0; init_rwsem(&s->lock); spin_lock_init(&s->pe_lock); @@ -855,6 +987,9 @@ static void snapshot_dtr(struct dm_target *ti) up_write(&s->lock); up_write(&_origins_lock); + if (is_merge(ti)) + stop_merge(s); + /* Prevent further origin writes from using this snapshot. */ /* After this returns there can be no new kcopyd jobs. */ unregister_snapshot(s); @@ -1303,6 +1438,21 @@ static void snapshot_resume(struct dm_target *ti) up_write(&_origins_lock); } +static void snapshot_merge_resume(struct dm_target *ti) +{ + struct dm_snapshot *s = ti->private; + + snapshot_resume(ti); + start_merge(s); +} + +static void snapshot_merge_presuspend(struct dm_target *ti) +{ + struct dm_snapshot *s = ti->private; + + stop_merge(s); +} + static int snapshot_status(struct dm_target *ti, status_type_t type, char *result, unsigned int maxlen) { @@ -1607,7 +1757,8 @@ static struct target_type snapshot_merge_target = { .dtr = snapshot_dtr, .map = snapshot_merge_map, .end_io = snapshot_end_io, - .resume = snapshot_resume, + .presuspend = snapshot_merge_presuspend, + .resume = snapshot_merge_resume, .status = snapshot_status, .iterate_devices = snapshot_iterate_devices, };