@@ -34,6 +34,8 @@ typedef struct BackupBlockJob {
BlockDriverState *cbw;
BlockDriverState *source_bs;
BlockDriverState *target_bs;
+ BlockBackend *source_blk;
+ BlockBackend *target_blk;
BdrvDirtyBitmap *sync_bitmap;
@@ -102,7 +104,17 @@ static void backup_clean(Job *job)
{
BackupBlockJob *s = container_of(job, BackupBlockJob, common.job);
block_job_remove_all_bdrv(&s->common);
- bdrv_cbw_drop(s->cbw);
+ if (s->cbw) {
+ assert(!s->source_blk && !s->target_blk);
+ bdrv_cbw_drop(s->cbw);
+ } else {
+ block_copy_state_free(s->bcs);
+ s->bcs = NULL;
+ blk_unref(s->source_blk);
+ s->source_blk = NULL;
+ blk_unref(s->target_blk);
+ s->target_blk = NULL;
+ }
}
void backup_do_checkpoint(BlockJob *job, Error **errp)
@@ -368,6 +380,7 @@ BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs,
int64_t cluster_size;
BlockDriverState *cbw = NULL;
BlockCopyState *bcs = NULL;
+ BlockBackend *source_blk = NULL, *target_blk = NULL;
assert(bs);
assert(target);
@@ -450,9 +463,37 @@ BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs,
goto error;
}
- cbw = bdrv_cbw_append(bs, target, filter_node_name, compress, &bcs, errp);
- if (!cbw) {
- goto error;
+ source_blk = blk_new_with_bs(bs, BLK_PERM_CONSISTENT_READ,
+ BLK_PERM_WRITE_UNCHANGED |
+ BLK_PERM_CONSISTENT_READ, NULL);
+ if (source_blk) {
+ BdrvDirtyBitmap *copy_bitmap;
+
+ target_blk = blk_new_with_bs(target, BLK_PERM_WRITE,
+ BLK_PERM_CONSISTENT_READ, errp);
+ if (!target_blk) {
+ goto error;
+ }
+
+ bcs = block_copy_state_new(blk_root(source_blk), blk_root(target_blk),
+ false, compress, errp);
+ if (!bcs) {
+ goto error;
+ }
+
+ /*
+ * initalize bitmap in a way copy-before-write filter do it, to have
+ * same code path later.
+ */
+ copy_bitmap = block_copy_dirty_bitmap(bcs);
+ bdrv_set_dirty_bitmap(copy_bitmap, 0,
+ bdrv_dirty_bitmap_size(copy_bitmap));
+ } else {
+ cbw = bdrv_cbw_append(bs, target, filter_node_name, compress, &bcs,
+ errp);
+ if (!cbw) {
+ goto error;
+ }
}
cluster_size = block_copy_cluster_size(bcs);
@@ -464,7 +505,7 @@ BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs,
}
/* job->len is fixed, so we can't allow resize */
- job = block_job_create(job_id, &backup_job_driver, txn, cbw,
+ job = block_job_create(job_id, &backup_job_driver, txn, cbw ?: bs,
0, BLK_PERM_ALL,
speed, creation_flags, cb, opaque, errp);
if (!job) {
@@ -474,6 +515,8 @@ BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs,
job->cbw = cbw;
job->source_bs = bs;
job->target_bs = target;
+ job->source_blk = source_blk;
+ job->target_blk = target_blk;
job->on_source_error = on_source_error;
job->on_target_error = on_target_error;
job->sync_mode = sync_mode;
@@ -500,6 +543,8 @@ BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs,
if (cbw) {
bdrv_cbw_drop(cbw);
}
+ blk_unref(source_blk);
+ blk_unref(target_blk);
return NULL;
}
If source is immutable and there no writers on it, we don't need to insert a filter, so let's detect it and use simple blk's for block-copy. Note, that it's possible, that user will try to add writers on source during backup. It will fail, as our source blk doesn't share write. In future we can add a tri-state source-mode parameter for backup job with the following values: immutable: got without filter. blockdev-backup command fails if there are writers on source. Adding writers during backup will fail. filtered: insert filter unconditionally. Writers are supported on start. User may add new writers above copy-before-write filter during backup [current behavior] auto: go "immutable" if there no writers on start, go "filtered" otherwise And "auto" would be a default behavior. For now, let's just change a default behavior to not create extra filter when it's not necessary. Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> --- block/backup.c | 55 +++++++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 50 insertions(+), 5 deletions(-)