@@ -52,10 +52,11 @@ typedef int (*precopy_policy_t)(struct precopy_stats, void *);
/* callbacks provided by xc_domain_save */
struct save_callbacks {
- /* Called after expiration of checkpoint interval,
+ /*
+ * Called after expiration of checkpoint interval,
* to suspend the guest.
*/
- int (*suspend)(void* data);
+ int (*suspend)(void *data);
/*
* Called before and after every batch of page data sent during
@@ -79,7 +80,7 @@ struct save_callbacks {
* xc_domain_save then flushes the output buffer, while the
* guest continues to run.
*/
- int (*postcopy)(void* data);
+ int (*postcopy)(void *data);
/*
* Called after the memory checkpoint has been flushed
@@ -94,7 +95,7 @@ struct save_callbacks {
* 0: terminate checkpointing gracefully
* 1: take another checkpoint
*/
- int (*checkpoint)(void* data);
+ int (*checkpoint)(void *data);
/*
* Called after the checkpoint callback.
@@ -103,13 +104,13 @@ struct save_callbacks {
* 0: terminate checkpointing gracefully
* 1: take another checkpoint
*/
- int (*wait_checkpoint)(void* data);
+ int (*wait_checkpoint)(void *data);
/* Enable qemu-dm logging dirty pages to xen */
int (*switch_qemu_logdirty)(uint32_t domid, unsigned enable, void *data); /* HVM only */
/* to be provided as the last argument to each callback function */
- void* data;
+ void *data;
};
/* Type of stream. Plain, or using a continuous replication protocol? */
@@ -138,22 +139,24 @@ int xc_domain_save(xc_interface *xch, int io_fd, uint32_t dom,
/* callbacks provided by xc_domain_restore */
struct restore_callbacks {
- /* Called after a new checkpoint to suspend the guest.
- */
- int (*suspend)(void* data);
+ /* Called after a new checkpoint to suspend the guest. */
+ int (*suspend)(void *data);
- /* Called after the secondary vm is ready to resume.
+ /*
+ * Called after the secondary vm is ready to resume.
* Callback function resumes the guest & the device model,
* returns to xc_domain_restore.
*/
- int (*postcopy)(void* data);
+ int (*postcopy)(void *data);
- /* A checkpoint record has been found in the stream.
- * returns: */
+ /*
+ * A checkpoint record has been found in the stream.
+ * returns:
+ */
#define XGR_CHECKPOINT_ERROR 0 /* Terminate processing */
#define XGR_CHECKPOINT_SUCCESS 1 /* Continue reading more data from the stream */
#define XGR_CHECKPOINT_FAILOVER 2 /* Failover and resume VM */
- int (*checkpoint)(void* data);
+ int (*checkpoint)(void *data);
/*
* Called after the checkpoint callback.
@@ -162,7 +165,7 @@ struct restore_callbacks {
* 0: terminate checkpointing gracefully
* 1: take another checkpoint
*/
- int (*wait_checkpoint)(void* data);
+ int (*wait_checkpoint)(void *data);
/*
* callback to send store gfn and console gfn to xl
@@ -173,7 +176,7 @@ struct restore_callbacks {
void *data);
/* to be provided as the last argument to each callback function */
- void* data;
+ void *data;
};
/**
@@ -4,7 +4,7 @@
#include <xen-tools/libs.h>
-static const char *dhdr_types[] =
+static const char *const dhdr_types[] =
{
[DHDR_TYPE_X86_PV] = "x86 PV",
[DHDR_TYPE_X86_HVM] = "x86 HVM",
@@ -18,7 +18,7 @@ const char *dhdr_type_to_str(uint32_t type)
return "Reserved";
}
-static const char *mandatory_rec_types[] =
+static const char *const mandatory_rec_types[] =
{
[REC_TYPE_END] = "End",
[REC_TYPE_PAGE_DATA] = "Page data",
@@ -58,13 +58,12 @@ int write_split_record(struct xc_sr_context *ctx, struct xc_sr_record *rec,
xc_interface *xch = ctx->xch;
typeof(rec->length) combined_length = rec->length + sz;
size_t record_length = ROUNDUP(combined_length, REC_ALIGN_ORDER);
- struct iovec parts[] =
- {
+ struct iovec parts[] = {
{ &rec->type, sizeof(rec->type) },
{ &combined_length, sizeof(combined_length) },
{ rec->data, rec->length },
{ buf, sz },
- { (void*)zeroes, record_length - combined_length },
+ { (void *)zeroes, record_length - combined_length },
};
if ( record_length > REC_LENGTH_MAX )
@@ -228,7 +228,7 @@ struct xc_sr_context
struct precopy_stats stats;
xen_pfn_t *batch_pfns;
- unsigned nr_batch_pfns;
+ unsigned int nr_batch_pfns;
unsigned long *deferred_pages;
unsigned long nr_deferred_pages;
xc_hypercall_buffer_t dirty_bitmap_hbuf;
@@ -262,8 +262,8 @@ struct xc_sr_context
*/
#define DEFAULT_BUF_RECORDS 1024
struct xc_sr_record *buffered_records;
- unsigned allocated_rec_num;
- unsigned buffered_rec_num;
+ unsigned int allocated_rec_num;
+ unsigned int buffered_rec_num;
/*
* Xenstore and Console parameters.
@@ -333,7 +333,7 @@ struct xc_sr_context
{
struct xc_sr_blob basic, extd, xsave, msr;
} *vcpus;
- unsigned nr_vcpus;
+ unsigned int nr_vcpus;
} restore;
};
} x86_pv;
@@ -418,7 +418,7 @@ int read_record(struct xc_sr_context *ctx, int fd, struct xc_sr_record *rec);
* x86_pv_localise_page() if we receive pagetables frames ahead of the
* contents of the frames they point at.
*/
-int populate_pfns(struct xc_sr_context *ctx, unsigned count,
+int populate_pfns(struct xc_sr_context *ctx, unsigned int count,
const xen_pfn_t *original_pfns, const uint32_t *types);
#endif
@@ -4,11 +4,10 @@ int write_x86_tsc_info(struct xc_sr_context *ctx)
{
xc_interface *xch = ctx->xch;
struct xc_sr_rec_x86_tsc_info tsc = {};
- struct xc_sr_record rec =
- {
+ struct xc_sr_record rec = {
.type = REC_TYPE_X86_TSC_INFO,
.length = sizeof(tsc),
- .data = &tsc
+ .data = &tsc,
};
if ( xc_domain_get_tsc_info(xch, ctx->domid, &tsc.mode,
@@ -10,10 +10,10 @@ xen_pfn_t mfn_to_pfn(struct xc_sr_context *ctx, xen_pfn_t mfn)
bool mfn_in_pseudophysmap(struct xc_sr_context *ctx, xen_pfn_t mfn)
{
- return ( (mfn <= ctx->x86_pv.max_mfn) &&
- (mfn_to_pfn(ctx, mfn) <= ctx->x86_pv.max_pfn) &&
- (xc_pfn_to_mfn(mfn_to_pfn(ctx, mfn), ctx->x86_pv.p2m,
- ctx->x86_pv.width) == mfn) );
+ return ((mfn <= ctx->x86_pv.max_mfn) &&
+ (mfn_to_pfn(ctx, mfn) <= ctx->x86_pv.max_pfn) &&
+ (xc_pfn_to_mfn(mfn_to_pfn(ctx, mfn), ctx->x86_pv.p2m,
+ ctx->x86_pv.width) == mfn));
}
void dump_bad_pseudophysmap_entry(struct xc_sr_context *ctx, xen_pfn_t mfn)
@@ -157,7 +157,7 @@ int x86_pv_map_m2p(struct xc_sr_context *ctx)
{
struct xen_machphys_mfn_list xmml = {
.max_extents = 1,
- .extent_start = { &ctx->x86_pv.compat_m2p_mfn0 }
+ .extent_start = { &ctx->x86_pv.compat_m2p_mfn0 },
};
rc = do_memory_op(xch, XENMEM_machphys_compat_mfn_list,
@@ -175,7 +175,7 @@ int x86_pv_map_m2p(struct xc_sr_context *ctx)
rc = 0;
DPRINTF("max_mfn %#lx", ctx->x86_pv.max_mfn);
-err:
+ err:
free(entries);
free(extents_start);
@@ -134,13 +134,13 @@ static int pfn_set_populated(struct xc_sr_context *ctx, xen_pfn_t pfn)
* unpopulated subset. If types is NULL, no page type checking is performed
* and all unpopulated pfns are populated.
*/
-int populate_pfns(struct xc_sr_context *ctx, unsigned count,
+int populate_pfns(struct xc_sr_context *ctx, unsigned int count,
const xen_pfn_t *original_pfns, const uint32_t *types)
{
xc_interface *xch = ctx->xch;
xen_pfn_t *mfns = malloc(count * sizeof(*mfns)),
*pfns = malloc(count * sizeof(*pfns));
- unsigned i, nr_pfns = 0;
+ unsigned int i, nr_pfns = 0;
int rc = -1;
if ( !mfns || !pfns )
@@ -202,7 +202,7 @@ int populate_pfns(struct xc_sr_context *ctx, unsigned count,
* stream, populate and record their types, map the relevant subset and copy
* the data into the guest.
*/
-static int process_page_data(struct xc_sr_context *ctx, unsigned count,
+static int process_page_data(struct xc_sr_context *ctx, unsigned int count,
xen_pfn_t *pfns, uint32_t *types, void *page_data)
{
xc_interface *xch = ctx->xch;
@@ -210,8 +210,8 @@ static int process_page_data(struct xc_sr_context *ctx, unsigned count,
int *map_errs = malloc(count * sizeof(*map_errs));
int rc;
void *mapping = NULL, *guest_page = NULL;
- unsigned i, /* i indexes the pfns from the record. */
- j, /* j indexes the subset of pfns we decide to map. */
+ unsigned int i, /* i indexes the pfns from the record. */
+ j, /* j indexes the subset of pfns we decide to map. */
nr_pages = 0;
if ( !mfns || !map_errs )
@@ -258,8 +258,8 @@ static int process_page_data(struct xc_sr_context *ctx, unsigned count,
if ( nr_pages == 0 )
goto done;
- mapping = guest_page = xenforeignmemory_map(xch->fmem,
- ctx->domid, PROT_READ | PROT_WRITE,
+ mapping = guest_page = xenforeignmemory_map(
+ xch->fmem, ctx->domid, PROT_READ | PROT_WRITE,
nr_pages, mfns, map_errs);
if ( !mapping )
{
@@ -336,7 +336,7 @@ static int handle_page_data(struct xc_sr_context *ctx, struct xc_sr_record *rec)
{
xc_interface *xch = ctx->xch;
struct xc_sr_rec_page_data_header *pages = rec->data;
- unsigned i, pages_of_data = 0;
+ unsigned int i, pages_of_data = 0;
int rc = -1;
xen_pfn_t *pfns = NULL, pfn;
@@ -424,12 +424,11 @@ static int send_checkpoint_dirty_pfn_list(struct xc_sr_context *ctx)
{
xc_interface *xch = ctx->xch;
int rc = -1;
- unsigned count, written;
+ unsigned int count, written;
uint64_t i, *pfns = NULL;
struct iovec *iov = NULL;
xc_shadow_op_stats_t stats = { 0, ctx->restore.p2m_size };
- struct xc_sr_record rec =
- {
+ struct xc_sr_record rec = {
.type = REC_TYPE_CHECKPOINT_DIRTY_PFN_LIST,
};
DECLARE_HYPERCALL_BUFFER_SHADOW(unsigned long, dirty_bitmap,
@@ -510,7 +509,7 @@ static int handle_checkpoint(struct xc_sr_context *ctx)
{
xc_interface *xch = ctx->xch;
int rc = 0, ret;
- unsigned i;
+ unsigned int i;
if ( ctx->stream_type == XC_STREAM_PLAIN )
{
@@ -587,7 +586,7 @@ static int handle_checkpoint(struct xc_sr_context *ctx)
/* Wait for a new checkpoint */
ret = ctx->restore.callbacks->wait_checkpoint(
- ctx->restore.callbacks->data);
+ ctx->restore.callbacks->data);
HANDLE_CALLBACK_RETURN_VALUE(ret);
/* suspend secondary vm */
@@ -608,7 +607,7 @@ static int handle_checkpoint(struct xc_sr_context *ctx)
static int buffer_record(struct xc_sr_context *ctx, struct xc_sr_record *rec)
{
xc_interface *xch = ctx->xch;
- unsigned new_alloc_num;
+ unsigned int new_alloc_num;
struct xc_sr_record *p;
if ( ctx->restore.buffered_rec_num >= ctx->restore.allocated_rec_num )
@@ -675,8 +674,8 @@ static int setup(struct xc_sr_context *ctx)
if ( ctx->stream_type == XC_STREAM_COLO )
{
- dirty_bitmap = xc_hypercall_buffer_alloc_pages(xch, dirty_bitmap,
- NRPAGES(bitmap_size(ctx->restore.p2m_size)));
+ dirty_bitmap = xc_hypercall_buffer_alloc_pages(
+ xch, dirty_bitmap, NRPAGES(bitmap_size(ctx->restore.p2m_size)));
if ( !dirty_bitmap )
{
@@ -717,7 +716,7 @@ static int setup(struct xc_sr_context *ctx)
static void cleanup(struct xc_sr_context *ctx)
{
xc_interface *xch = ctx->xch;
- unsigned i;
+ unsigned int i;
DECLARE_HYPERCALL_BUFFER_SHADOW(unsigned long, dirty_bitmap,
&ctx->restore.dirty_bitmap_hbuf);
@@ -725,10 +724,12 @@ static void cleanup(struct xc_sr_context *ctx)
free(ctx->restore.buffered_records[i].data);
if ( ctx->stream_type == XC_STREAM_COLO )
- xc_hypercall_buffer_free_pages(xch, dirty_bitmap,
- NRPAGES(bitmap_size(ctx->restore.p2m_size)));
+ xc_hypercall_buffer_free_pages(
+ xch, dirty_bitmap, NRPAGES(bitmap_size(ctx->restore.p2m_size)));
+
free(ctx->restore.buffered_records);
free(ctx->restore.populated_pfns);
+
if ( ctx->restore.ops.cleanup(ctx) )
PERROR("Failed to clean up");
}
@@ -222,7 +222,7 @@ static int process_start_info(struct xc_sr_context *ctx,
rc = 0;
-err:
+ err:
if ( guest_start_info )
munmap(guest_start_info, PAGE_SIZE);
@@ -238,7 +238,7 @@ static int process_vcpu_basic(struct xc_sr_context *ctx,
xc_interface *xch = ctx->xch;
vcpu_guest_context_any_t *vcpu = ctx->x86_pv.restore.vcpus[vcpuid].basic.ptr;
xen_pfn_t pfn, mfn;
- unsigned i, gdt_count;
+ unsigned int i, gdt_count;
int rc = -1;
/* Vcpu 0 is special: Convert the suspend record to an mfn. */
@@ -474,7 +474,7 @@ static int update_vcpu_context(struct xc_sr_context *ctx)
{
xc_interface *xch = ctx->xch;
struct xc_sr_x86_pv_restore_vcpu *vcpu;
- unsigned i;
+ unsigned int i;
int rc = 0;
for ( i = 0; i < ctx->x86_pv.restore.nr_vcpus; ++i )
@@ -527,7 +527,7 @@ static int update_guest_p2m(struct xc_sr_context *ctx)
{
xc_interface *xch = ctx->xch;
xen_pfn_t mfn, pfn, *guest_p2m = NULL;
- unsigned i;
+ unsigned int i;
int rc = -1;
for ( i = 0; i < ctx->x86_pv.p2m_frames; ++i )
@@ -562,7 +562,7 @@ static int update_guest_p2m(struct xc_sr_context *ctx)
guest_p2m = xc_map_foreign_pages(xch, ctx->domid, PROT_WRITE,
ctx->x86_pv.p2m_pfns,
- ctx->x86_pv.p2m_frames );
+ ctx->x86_pv.p2m_frames);
if ( !guest_p2m )
{
PERROR("Failed to map p2m frames");
@@ -572,6 +572,7 @@ static int update_guest_p2m(struct xc_sr_context *ctx)
memcpy(guest_p2m, ctx->x86_pv.p2m,
(ctx->x86_pv.max_pfn + 1) * ctx->x86_pv.width);
rc = 0;
+
err:
if ( guest_p2m )
munmap(guest_p2m, ctx->x86_pv.p2m_frames * PAGE_SIZE);
@@ -675,7 +676,7 @@ static int handle_x86_pv_p2m_frames(struct xc_sr_context *ctx,
{
xc_interface *xch = ctx->xch;
struct xc_sr_rec_x86_pv_p2m_frames *data = rec->data;
- unsigned start, end, x, fpp = PAGE_SIZE / ctx->x86_pv.width;
+ unsigned int start, end, x, fpp = PAGE_SIZE / ctx->x86_pv.width;
int rc;
if ( !ctx->x86_pv.restore.seen_pv_info )
@@ -862,7 +863,7 @@ static int handle_shared_info(struct xc_sr_context *ctx,
struct xc_sr_record *rec)
{
xc_interface *xch = ctx->xch;
- unsigned i;
+ unsigned int i;
int rc = -1;
shared_info_any_t *guest_shinfo = NULL;
const shared_info_any_t *old_shinfo = rec->data;
@@ -904,8 +905,8 @@ static int handle_shared_info(struct xc_sr_context *ctx,
MEMSET_ARRAY_FIELD(guest_shinfo, evtchn_mask, 0xff, ctx->x86_pv.width);
rc = 0;
- err:
+ err:
if ( guest_shinfo )
munmap(guest_shinfo, PAGE_SIZE);
@@ -952,7 +953,7 @@ static int x86_pv_localise_page(struct xc_sr_context *ctx,
xc_interface *xch = ctx->xch;
uint64_t *table = page;
uint64_t pte;
- unsigned i, to_populate;
+ unsigned int i, to_populate;
xen_pfn_t pfns[(PAGE_SIZE / sizeof(uint64_t))];
type &= XEN_DOMCTL_PFINFO_LTABTYPE_MASK;
@@ -1134,7 +1135,7 @@ static int x86_pv_cleanup(struct xc_sr_context *ctx)
if ( ctx->x86_pv.restore.vcpus )
{
- unsigned i;
+ unsigned int i;
for ( i = 0; i < ctx->x86_pv.restore.nr_vcpus; ++i )
{
@@ -10,20 +10,18 @@ static int write_headers(struct xc_sr_context *ctx, uint16_t guest_type)
{
xc_interface *xch = ctx->xch;
int32_t xen_version = xc_version(xch, XENVER_version, NULL);
- struct xc_sr_ihdr ihdr =
- {
- .marker = IHDR_MARKER,
- .id = htonl(IHDR_ID),
- .version = htonl(IHDR_VERSION),
- .options = htons(IHDR_OPT_LITTLE_ENDIAN),
- };
- struct xc_sr_dhdr dhdr =
- {
- .type = guest_type,
- .page_shift = XC_PAGE_SHIFT,
- .xen_major = (xen_version >> 16) & 0xffff,
- .xen_minor = (xen_version) & 0xffff,
- };
+ struct xc_sr_ihdr ihdr = {
+ .marker = IHDR_MARKER,
+ .id = htonl(IHDR_ID),
+ .version = htonl(IHDR_VERSION),
+ .options = htons(IHDR_OPT_LITTLE_ENDIAN),
+ };
+ struct xc_sr_dhdr dhdr = {
+ .type = guest_type,
+ .page_shift = XC_PAGE_SHIFT,
+ .xen_major = (xen_version >> 16) & 0xffff,
+ .xen_minor = (xen_version) & 0xffff,
+ };
if ( xen_version < 0 )
{
@@ -51,7 +49,7 @@ static int write_headers(struct xc_sr_context *ctx, uint16_t guest_type)
*/
static int write_end_record(struct xc_sr_context *ctx)
{
- struct xc_sr_record end = { REC_TYPE_END, 0, NULL };
+ struct xc_sr_record end = { .type = REC_TYPE_END };
return write_record(ctx, &end);
}
@@ -61,7 +59,7 @@ static int write_end_record(struct xc_sr_context *ctx)
*/
static int write_checkpoint_record(struct xc_sr_context *ctx)
{
- struct xc_sr_record checkpoint = { REC_TYPE_CHECKPOINT, 0, NULL };
+ struct xc_sr_record checkpoint = { .type = REC_TYPE_CHECKPOINT };
return write_record(ctx, &checkpoint);
}
@@ -84,14 +82,13 @@ static int write_batch(struct xc_sr_context *ctx)
void **guest_data = NULL;
void **local_pages = NULL;
int *errors = NULL, rc = -1;
- unsigned i, p, nr_pages = 0, nr_pages_mapped = 0;
- unsigned nr_pfns = ctx->save.nr_batch_pfns;
+ unsigned int i, p, nr_pages = 0, nr_pages_mapped = 0;
+ unsigned int nr_pfns = ctx->save.nr_batch_pfns;
void *page, *orig_page;
uint64_t *rec_pfns = NULL;
struct iovec *iov = NULL; int iovcnt = 0;
struct xc_sr_rec_page_data_header hdr = { 0 };
- struct xc_sr_record rec =
- {
+ struct xc_sr_record rec = {
.type = REC_TYPE_PAGE_DATA,
};
@@ -153,8 +150,8 @@ static int write_batch(struct xc_sr_context *ctx)
if ( nr_pages > 0 )
{
- guest_mapping = xenforeignmemory_map(xch->fmem,
- ctx->domid, PROT_READ, nr_pages, mfns, errors);
+ guest_mapping = xenforeignmemory_map(
+ xch->fmem, ctx->domid, PROT_READ, nr_pages, mfns, errors);
if ( !guest_mapping )
{
PERROR("Failed to map guest pages");
@@ -481,7 +478,7 @@ static int update_progress_string(struct xc_sr_context *ctx, char **str)
static int simple_precopy_policy(struct precopy_stats stats, void *user)
{
return ((stats.dirty_count >= 0 &&
- stats.dirty_count < SPP_TARGET_DIRTY_COUNT) ||
+ stats.dirty_count < SPP_TARGET_DIRTY_COUNT) ||
stats.iteration >= SPP_MAX_ITERATIONS)
? XGS_POLICY_STOP_AND_COPY
: XGS_POLICY_CONTINUE_PRECOPY;
@@ -511,12 +508,13 @@ static int send_memory_live(struct xc_sr_context *ctx)
if ( rc )
goto out;
- ctx->save.stats = (struct precopy_stats)
- { .dirty_count = ctx->save.p2m_size };
+ ctx->save.stats = (struct precopy_stats){
+ .dirty_count = ctx->save.p2m_size,
+ };
policy_stats = &ctx->save.stats;
if ( precopy_policy == NULL )
- precopy_policy = simple_precopy_policy;
+ precopy_policy = simple_precopy_policy;
bitmap_set(dirty_bitmap, ctx->save.p2m_size);
@@ -546,7 +544,7 @@ static int send_memory_live(struct xc_sr_context *ctx)
policy_decision = precopy_policy(*policy_stats, data);
if ( policy_decision != XGS_POLICY_CONTINUE_PRECOPY )
- break;
+ break;
if ( xc_shadow_control(
xch, ctx->domid, XEN_DOMCTL_SHADOW_OP_CLEAN,
@@ -571,10 +569,10 @@ static int send_memory_live(struct xc_sr_context *ctx)
static int colo_merge_secondary_dirty_bitmap(struct xc_sr_context *ctx)
{
xc_interface *xch = ctx->xch;
- struct xc_sr_record rec = { 0, 0, NULL };
+ struct xc_sr_record rec;
uint64_t *pfns = NULL;
uint64_t pfn;
- unsigned count, i;
+ unsigned int count, i;
int rc;
DECLARE_HYPERCALL_BUFFER_SHADOW(unsigned long, dirty_bitmap,
&ctx->save.dirty_bitmap_hbuf);
@@ -585,14 +583,14 @@ static int colo_merge_secondary_dirty_bitmap(struct xc_sr_context *ctx)
if ( rec.type != REC_TYPE_CHECKPOINT_DIRTY_PFN_LIST )
{
- PERROR("Expect dirty bitmap record, but received %u", rec.type );
+ PERROR("Expect dirty bitmap record, but received %u", rec.type);
rc = -1;
goto err;
}
if ( rec.length % sizeof(*pfns) )
{
- PERROR("Invalid dirty pfn list record length %u", rec.length );
+ PERROR("Invalid dirty pfn list record length %u", rec.length);
rc = -1;
goto err;
}
@@ -603,7 +601,7 @@ static int colo_merge_secondary_dirty_bitmap(struct xc_sr_context *ctx)
for ( i = 0; i < count; i++ )
{
pfn = pfns[i];
- if (pfn > ctx->save.p2m_size)
+ if ( pfn > ctx->save.p2m_size )
{
PERROR("Invalid pfn 0x%" PRIx64, pfn);
rc = -1;
@@ -688,11 +686,7 @@ static int verify_frames(struct xc_sr_context *ctx)
xc_interface *xch = ctx->xch;
xc_shadow_op_stats_t stats = { 0, ctx->save.p2m_size };
int rc;
- struct xc_sr_record rec =
- {
- .type = REC_TYPE_VERIFY,
- .length = 0,
- };
+ struct xc_sr_record rec = { .type = REC_TYPE_VERIFY };
DPRINTF("Enabling verify mode");
@@ -748,7 +742,7 @@ static int send_domain_memory_live(struct xc_sr_context *ctx)
goto out;
}
- out:
+ out:
return rc;
}
@@ -795,7 +789,7 @@ static int setup(struct xc_sr_context *ctx)
goto err;
dirty_bitmap = xc_hypercall_buffer_alloc_pages(
- xch, dirty_bitmap, NRPAGES(bitmap_size(ctx->save.p2m_size)));
+ xch, dirty_bitmap, NRPAGES(bitmap_size(ctx->save.p2m_size)));
ctx->save.batch_pfns = malloc(MAX_BATCH_SIZE *
sizeof(*ctx->save.batch_pfns));
ctx->save.deferred_pages = calloc(1, bitmap_size(ctx->save.p2m_size));
@@ -966,7 +960,7 @@ static int save(struct xc_sr_context *ctx, uint16_t guest_type)
};
int xc_domain_save(xc_interface *xch, int io_fd, uint32_t dom,
- uint32_t flags, struct save_callbacks* callbacks,
+ uint32_t flags, struct save_callbacks *callbacks,
xc_stream_type_t stream_type, int recv_fd)
{
struct xc_sr_context ctx = {
@@ -11,8 +11,7 @@ static int write_hvm_context(struct xc_sr_context *ctx)
{
xc_interface *xch = ctx->xch;
int rc, hvm_buf_size;
- struct xc_sr_record hvm_rec =
- {
+ struct xc_sr_record hvm_rec = {
.type = REC_TYPE_HVM_CONTEXT,
};
@@ -134,7 +133,6 @@ static xen_pfn_t x86_hvm_pfn_to_gfn(const struct xc_sr_context *ctx,
static int x86_hvm_normalise_page(struct xc_sr_context *ctx,
xen_pfn_t type, void **page)
{
- /* no-op */
return 0;
}
@@ -174,19 +172,16 @@ static int x86_hvm_setup(struct xc_sr_context *ctx)
static int x86_hvm_start_of_stream(struct xc_sr_context *ctx)
{
- /* no-op */
return 0;
}
static int x86_hvm_start_of_checkpoint(struct xc_sr_context *ctx)
{
- /* no-op */
return 0;
}
static int x86_hvm_check_vm_state(struct xc_sr_context *ctx)
{
- /* no-op */
return 0;
}
@@ -80,7 +80,7 @@ static int map_p2m_leaves(struct xc_sr_context *ctx, xen_pfn_t *mfns,
size_t n_mfns)
{
xc_interface *xch = ctx->xch;
- unsigned x;
+ unsigned int x;
ctx->x86_pv.p2m = xc_map_foreign_pages(xch, ctx->domid, PROT_READ,
mfns, n_mfns);
@@ -133,7 +133,7 @@ static int map_p2m_tree(struct xc_sr_context *ctx)
*/
xc_interface *xch = ctx->xch;
int rc = -1;
- unsigned x, saved_x, fpp, fll_entries, fl_entries;
+ unsigned int x, saved_x, fpp, fll_entries, fl_entries;
xen_pfn_t fll_mfn, saved_mfn, max_pfn;
xen_pfn_t *local_fll = NULL;
@@ -260,8 +260,7 @@ static int map_p2m_tree(struct xc_sr_context *ctx)
/* Map the p2m leaves themselves. */
rc = map_p2m_leaves(ctx, local_fl, fl_entries);
-err:
-
+ err:
free(local_fl);
if ( guest_fl )
munmap(guest_fl, fll_entries * PAGE_SIZE);
@@ -318,7 +317,7 @@ static int map_p2m_list(struct xc_sr_context *ctx, uint64_t p2m_cr3)
xen_pfn_t p2m_mfn, mfn, saved_mfn, max_pfn;
uint64_t *ptes = NULL;
xen_pfn_t *mfns = NULL;
- unsigned fpp, n_pages, level, shift, idx_start, idx_end, idx, saved_idx;
+ unsigned int fpp, n_pages, level, shift, idx_start, idx_end, idx, saved_idx;
int rc = -1;
p2m_mfn = cr3_to_mfn(ctx, p2m_cr3);
@@ -450,7 +449,7 @@ static int map_p2m_list(struct xc_sr_context *ctx, uint64_t p2m_cr3)
/* Map the p2m leaves themselves. */
rc = map_p2m_leaves(ctx, mfns, idx_end - idx_start + 1);
-err:
+ err:
free(mfns);
if ( ptes )
munmap(ptes, n_pages * PAGE_SIZE);
@@ -483,15 +482,13 @@ static int write_one_vcpu_basic(struct xc_sr_context *ctx, uint32_t id)
{
xc_interface *xch = ctx->xch;
xen_pfn_t mfn, pfn;
- unsigned i, gdt_count;
+ unsigned int i, gdt_count;
int rc = -1;
vcpu_guest_context_any_t vcpu;
- struct xc_sr_rec_x86_pv_vcpu_hdr vhdr =
- {
+ struct xc_sr_rec_x86_pv_vcpu_hdr vhdr = {
.vcpu_id = id,
};
- struct xc_sr_record rec =
- {
+ struct xc_sr_record rec = {
.type = REC_TYPE_X86_PV_VCPU_BASIC,
.length = sizeof(vhdr),
.data = &vhdr,
@@ -586,18 +583,15 @@ static int write_one_vcpu_basic(struct xc_sr_context *ctx, uint32_t id)
static int write_one_vcpu_extended(struct xc_sr_context *ctx, uint32_t id)
{
xc_interface *xch = ctx->xch;
- struct xc_sr_rec_x86_pv_vcpu_hdr vhdr =
- {
+ struct xc_sr_rec_x86_pv_vcpu_hdr vhdr = {
.vcpu_id = id,
};
- struct xc_sr_record rec =
- {
+ struct xc_sr_record rec = {
.type = REC_TYPE_X86_PV_VCPU_EXTENDED,
.length = sizeof(vhdr),
.data = &vhdr,
};
- struct xen_domctl domctl =
- {
+ struct xen_domctl domctl = {
.cmd = XEN_DOMCTL_get_ext_vcpucontext,
.domain = ctx->domid,
.u.ext_vcpucontext.vcpu = id,
@@ -626,18 +620,15 @@ static int write_one_vcpu_xsave(struct xc_sr_context *ctx, uint32_t id)
xc_interface *xch = ctx->xch;
int rc = -1;
DECLARE_HYPERCALL_BUFFER(void, buffer);
- struct xc_sr_rec_x86_pv_vcpu_hdr vhdr =
- {
+ struct xc_sr_rec_x86_pv_vcpu_hdr vhdr = {
.vcpu_id = id,
};
- struct xc_sr_record rec =
- {
+ struct xc_sr_record rec = {
.type = REC_TYPE_X86_PV_VCPU_XSAVE,
.length = sizeof(vhdr),
.data = &vhdr,
};
- struct xen_domctl domctl =
- {
+ struct xen_domctl domctl = {
.cmd = XEN_DOMCTL_getvcpuextstate,
.domain = ctx->domid,
.u.vcpuextstate.vcpu = id,
@@ -695,18 +686,15 @@ static int write_one_vcpu_msrs(struct xc_sr_context *ctx, uint32_t id)
int rc = -1;
size_t buffersz;
DECLARE_HYPERCALL_BUFFER(void, buffer);
- struct xc_sr_rec_x86_pv_vcpu_hdr vhdr =
- {
+ struct xc_sr_rec_x86_pv_vcpu_hdr vhdr = {
.vcpu_id = id,
};
- struct xc_sr_record rec =
- {
+ struct xc_sr_record rec = {
.type = REC_TYPE_X86_PV_VCPU_MSRS,
.length = sizeof(vhdr),
.data = &vhdr,
};
- struct xen_domctl domctl =
- {
+ struct xen_domctl domctl = {
.cmd = XEN_DOMCTL_get_vcpu_msrs,
.domain = ctx->domid,
.u.vcpu_msrs.vcpu = id,
@@ -805,17 +793,15 @@ static int write_all_vcpu_information(struct xc_sr_context *ctx)
*/
static int write_x86_pv_info(struct xc_sr_context *ctx)
{
- struct xc_sr_rec_x86_pv_info info =
- {
- .guest_width = ctx->x86_pv.width,
- .pt_levels = ctx->x86_pv.levels,
- };
- struct xc_sr_record rec =
- {
- .type = REC_TYPE_X86_PV_INFO,
- .length = sizeof(info),
- .data = &info
- };
+ struct xc_sr_rec_x86_pv_info info = {
+ .guest_width = ctx->x86_pv.width,
+ .pt_levels = ctx->x86_pv.levels,
+ };
+ struct xc_sr_record rec = {
+ .type = REC_TYPE_X86_PV_INFO,
+ .length = sizeof(info),
+ .data = &info,
+ };
return write_record(ctx, &rec);
}
@@ -827,20 +813,17 @@ static int write_x86_pv_info(struct xc_sr_context *ctx)
static int write_x86_pv_p2m_frames(struct xc_sr_context *ctx)
{
xc_interface *xch = ctx->xch;
- int rc; unsigned i;
+ int rc; unsigned int i;
size_t datasz = ctx->x86_pv.p2m_frames * sizeof(uint64_t);
uint64_t *data = NULL;
- struct xc_sr_rec_x86_pv_p2m_frames hdr =
- {
- .start_pfn = 0,
- .end_pfn = ctx->x86_pv.max_pfn,
- };
- struct xc_sr_record rec =
- {
- .type = REC_TYPE_X86_PV_P2M_FRAMES,
- .length = sizeof(hdr),
- .data = &hdr,
- };
+ struct xc_sr_rec_x86_pv_p2m_frames hdr = {
+ .end_pfn = ctx->x86_pv.max_pfn,
+ };
+ struct xc_sr_record rec = {
+ .type = REC_TYPE_X86_PV_P2M_FRAMES,
+ .length = sizeof(hdr),
+ .data = &hdr,
+ };
/* No need to translate if sizeof(uint64_t) == sizeof(xen_pfn_t). */
if ( sizeof(uint64_t) != sizeof(*ctx->x86_pv.p2m_pfns) )
@@ -871,8 +854,7 @@ static int write_x86_pv_p2m_frames(struct xc_sr_context *ctx)
*/
static int write_shared_info(struct xc_sr_context *ctx)
{
- struct xc_sr_record rec =
- {
+ struct xc_sr_record rec = {
.type = REC_TYPE_SHARED_INFO,
.length = PAGE_SIZE,
.data = ctx->x86_pv.shinfo,
@@ -890,7 +872,7 @@ static int normalise_pagetable(struct xc_sr_context *ctx, const uint64_t *src,
{
xc_interface *xch = ctx->xch;
uint64_t pte;
- unsigned i, xen_first = -1, xen_last = -1; /* Indices of Xen mappings. */
+ unsigned int i, xen_first = -1, xen_last = -1; /* Indices of Xen mappings. */
type &= XEN_DOMCTL_PFINFO_LTABTYPE_MASK;
@@ -1004,7 +986,6 @@ static int normalise_pagetable(struct xc_sr_context *ctx, const uint64_t *src,
return 0;
}
-/* save_ops function. */
static xen_pfn_t x86_pv_pfn_to_gfn(const struct xc_sr_context *ctx,
xen_pfn_t pfn)
{
@@ -1040,7 +1021,7 @@ static int x86_pv_normalise_page(struct xc_sr_context *ctx, xen_pfn_t type,
rc = normalise_pagetable(ctx, *page, local_page, type);
*page = local_page;
- out:
+ out:
return rc;
}
@@ -1071,9 +1052,6 @@ static int x86_pv_setup(struct xc_sr_context *ctx)
return 0;
}
-/*
- * save_ops function. Writes PV header records into the stream.
- */
static int x86_pv_start_of_stream(struct xc_sr_context *ctx)
{
int rc;
@@ -1127,9 +1105,6 @@ static int x86_pv_check_vm_state(struct xc_sr_context *ctx)
return x86_pv_check_vm_state_p2m_list(ctx);
}
-/*
- * save_ops function. Cleanup.
- */
static int x86_pv_cleanup(struct xc_sr_context *ctx)
{
free(ctx->x86_pv.p2m_pfns);
@@ -257,8 +257,8 @@ def read_pv_extended_info(vm):
if so_far != total_length:
- raise StreamError("Overshot Extended Info size by %d bytes"
- % (so_far - total_length,))
+ raise StreamError("Overshot Extended Info size by %d bytes" %
+ (so_far - total_length, ))
def read_pv_p2m_frames(vm):
fpp = 4096 / vm.width
@@ -375,8 +375,8 @@ def read_chunks(vm):
elif marker > 0:
if marker > legacy.MAX_BATCH:
- raise StreamError("Page batch (%d) exceeded MAX_BATCH (%d)"
- % (marker, legacy.MAX_BATCH))
+ raise StreamError("Page batch (%d) exceeded MAX_BATCH (%d)" %
+ (marker, legacy.MAX_BATCH))
pfns = unpack_ulongs(marker)
# xc_domain_save() leaves many XEN_DOMCTL_PFINFO_XTAB records for
@@ -398,8 +398,8 @@ def read_chunks(vm):
max_id, = unpack_exact("i")
if max_id > legacy.MAX_VCPU_ID:
- raise StreamError("Vcpu max_id out of range: %d > %d"
- % (max_id, legacy.MAX_VCPU_ID))
+ raise StreamError("Vcpu max_id out of range: %d > %d" %
+ (max_id, legacy.MAX_VCPU_ID))
vm.max_vcpu_id = max_id
bitmap = unpack_exact("Q" * ((max_id/64) + 1))
@@ -414,8 +414,8 @@ def read_chunks(vm):
bit_idx += 1
word >>= 1
- info(" Vcpu info: max_id %d, online map %s"
- % (vm.max_vcpu_id, vm.online_vcpu_map))
+ info(" Vcpu info: max_id %d, online map %s" %
+ (vm.max_vcpu_id, vm.online_vcpu_map))
elif marker == legacy.CHUNK_hvm_ident_pt:
_, ident_pt = unpack_exact("=IQ")
@@ -512,7 +512,7 @@ def read_chunks(vm):
[public.HVM_PARAM_NR_IOREQ_SERVER_PAGES, nr_pages])
else:
- raise StreamError("Unrecognised chunk %d" % (marker,))
+ raise StreamError("Unrecognised chunk %d" % (marker, ))
def read_hvm_tail(vm):
@@ -579,7 +579,7 @@ def read_legacy_stream(vm):
try:
vm.p2m_size, = unpack_ulongs(1)
- info("P2M Size: 0x%x" % (vm.p2m_size,))
+ info("P2M Size: 0x%x" % (vm.p2m_size, ))
if vm.libxl:
write_libxl_hdr()
@@ -74,7 +74,7 @@
REC_TYPE_x86_pv_vcpu_msrs : "x86 PV vcpu msrs",
REC_TYPE_verify : "Verify",
REC_TYPE_checkpoint : "Checkpoint",
- REC_TYPE_checkpoint_dirty_pfn_list : "Checkpoint dirty pfn list"
+ REC_TYPE_checkpoint_dirty_pfn_list : "Checkpoint dirty pfn list",
}
# page_data
@@ -137,24 +137,25 @@ def verify_ihdr(self):
self.unpack_exact(IHDR_FORMAT)
if marker != IHDR_MARKER:
- raise StreamError("Bad image marker: Expected 0x%x, got 0x%x"
- % (IHDR_MARKER, marker))
+ raise StreamError("Bad image marker: Expected 0x%x, got 0x%x" %
+ (IHDR_MARKER, marker))
if ident != IHDR_IDENT:
- raise StreamError("Bad image id: Expected 0x%x, got 0x%x"
- % (IHDR_IDENT, ident))
+ raise StreamError("Bad image id: Expected 0x%x, got 0x%x" %
+ (IHDR_IDENT, ident))
if version != IHDR_VERSION:
- raise StreamError("Unknown image version: Expected %d, got %d"
- % (IHDR_VERSION, version))
+ raise StreamError("Unknown image version: Expected %d, got %d" %
+ (IHDR_VERSION, version))
if options & IHDR_OPT_RESZ_MASK:
- raise StreamError("Reserved bits set in image options field: 0x%x"
- % (options & IHDR_OPT_RESZ_MASK))
+ raise StreamError("Reserved bits set in image options field: 0x%x" %
+ (options & IHDR_OPT_RESZ_MASK))
if res1 != 0 or res2 != 0:
- raise StreamError("Reserved bits set in image header: 0x%04x:0x%08x"
- % (res1, res2))
+ raise StreamError(
+ "Reserved bits set in image header: 0x%04x:0x%08x" %
+ (res1, res2))
if ( (sys.byteorder == "little") and
((options & IHDR_OPT_BIT_ENDIAN) != IHDR_OPT_LE) ):
@@ -175,19 +176,19 @@ def verify_dhdr(self):
raise StreamError("Unrecognised domain type 0x%x" % (gtype, ))
if res1 != 0:
- raise StreamError("Reserved bits set in domain header 0x%04x"
- % (res1, ))
+ raise StreamError("Reserved bits set in domain header 0x%04x" %
+ (res1, ))
if page_shift != 12:
- raise StreamError("Page shift expected to be 12. Got %d"
- % (page_shift, ))
+ raise StreamError("Page shift expected to be 12. Got %d" %
+ (page_shift, ))
if major == 0:
- self.info("Domain Header: legacy converted %s"
- % (dhdr_type_to_str[gtype], ))
+ self.info("Domain Header: legacy converted %s" %
+ (dhdr_type_to_str[gtype], ))
else:
- self.info("Domain Header: %s from Xen %d.%d"
- % (dhdr_type_to_str[gtype], major, minor))
+ self.info("Domain Header: %s from Xen %d.%d" %
+ (dhdr_type_to_str[gtype], major, minor))
def verify_record(self):
@@ -204,12 +205,12 @@ def verify_record(self):
if rtype != REC_TYPE_page_data:
if self.squashed_pagedata_records > 0:
- self.info("Squashed %d Page Data records together"
- % (self.squashed_pagedata_records, ))
+ self.info("Squashed %d Page Data records together" %
+ (self.squashed_pagedata_records, ))
self.squashed_pagedata_records = 0
- self.info("Libxc Record: %s, length %d"
- % (rec_type_to_str[rtype], length))
+ self.info("Libxc Record: %s, length %d" %
+ (rec_type_to_str[rtype], length))
else:
self.squashed_pagedata_records += 1
@@ -219,8 +220,9 @@ def verify_record(self):
raise StreamError("Padding containing non0 bytes found")
if rtype not in record_verifiers:
- raise RuntimeError("No verification function for libxc record '%s'"
- % rec_type_to_str[rtype])
+ raise RuntimeError(
+ "No verification function for libxc record '%s'" %
+ rec_type_to_str[rtype])
else:
record_verifiers[rtype](self, content[:length])
@@ -239,32 +241,32 @@ def verify_record_page_data(self, content):
minsz = calcsize(PAGE_DATA_FORMAT)
if len(content) <= minsz:
- raise RecordError("PAGE_DATA record must be at least %d bytes long"
- % (minsz, ))
+ raise RecordError(
+ "PAGE_DATA record must be at least %d bytes long" % (minsz, ))
count, res1 = unpack(PAGE_DATA_FORMAT, content[:minsz])
if res1 != 0:
- raise StreamError("Reserved bits set in PAGE_DATA record 0x%04x"
- % (res1, ))
+ raise StreamError(
+ "Reserved bits set in PAGE_DATA record 0x%04x" % (res1, ))
pfnsz = count * 8
if (len(content) - minsz) < pfnsz:
- raise RecordError("PAGE_DATA record must contain a pfn record for "
- "each count")
+ raise RecordError(
+ "PAGE_DATA record must contain a pfn record for each count")
- pfns = list(unpack("=%dQ" % (count,), content[minsz:minsz + pfnsz]))
+ pfns = list(unpack("=%dQ" % (count, ), content[minsz:minsz + pfnsz]))
nr_pages = 0
for idx, pfn in enumerate(pfns):
if pfn & PAGE_DATA_PFN_RESZ_MASK:
- raise RecordError("Reserved bits set in pfn[%d]: 0x%016x",
- idx, pfn & PAGE_DATA_PFN_RESZ_MASK)
+ raise RecordError("Reserved bits set in pfn[%d]: 0x%016x" %
+ (idx, pfn & PAGE_DATA_PFN_RESZ_MASK))
if pfn >> PAGE_DATA_TYPE_SHIFT in (5, 6, 7, 8):
- raise RecordError("Invalid type value in pfn[%d]: 0x%016x",
- idx, pfn & PAGE_DATA_TYPE_LTAB_MASK)
+ raise RecordError("Invalid type value in pfn[%d]: 0x%016x" %
+ (idx, pfn & PAGE_DATA_TYPE_LTAB_MASK))
# We expect page data for each normal page or pagetable
if PAGE_DATA_TYPE_NOTAB <= (pfn & PAGE_DATA_TYPE_LTABTYPE_MASK) \
@@ -273,8 +275,8 @@ def verify_record_page_data(self, content):
pagesz = nr_pages * 4096
if len(content) != minsz + pfnsz + pagesz:
- raise RecordError("Expected %u + %u + %u, got %u"
- % (minsz, pfnsz, pagesz, len(content)))
+ raise RecordError("Expected %u + %u + %u, got %u" %
+ (minsz, pfnsz, pagesz, len(content)))
def verify_record_x86_pv_info(self, content):
@@ -282,8 +284,8 @@ def verify_record_x86_pv_info(self, content):
expectedsz = calcsize(X86_PV_INFO_FORMAT)
if len(content) != expectedsz:
- raise RecordError("x86_pv_info: expected length of %d, got %d"
- % (expectedsz, len(content)))
+ raise RecordError("x86_pv_info: expected length of %d, got %d" %
+ (expectedsz, len(content)))
width, levels, res1, res2 = unpack(X86_PV_INFO_FORMAT, content)
@@ -294,8 +296,9 @@ def verify_record_x86_pv_info(self, content):
raise RecordError("Expected levels of 3 or 4, got %d" % (levels, ))
if res1 != 0 or res2 != 0:
- raise StreamError("Reserved bits set in X86_PV_INFO: 0x%04x 0x%08x"
- % (res1, res2))
+ raise StreamError(
+ "Reserved bits set in X86_PV_INFO: 0x%04x 0x%08x" %
+ (res1, res2))
bitness = {4:32, 8:64}[width]
self.info(" %sbit guest, %d levels of pagetables" % (bitness, levels))
@@ -309,8 +312,8 @@ def verify_record_x86_pv_p2m_frames(self, content):
" least 8 bytes long")
if len(content) % 8 != 0:
- raise RecordError("Length expected to be a multiple of 8, not %d"
- % (len(content), ))
+ raise RecordError("Length expected to be a multiple of 8, not %d" %
+ (len(content), ))
start, end = unpack("=II", content[:8])
self.info(" Start pfn 0x%x, End 0x%x" % (start, end))
@@ -321,30 +324,32 @@ def verify_record_x86_pv_vcpu_generic(self, content, name):
minsz = calcsize(X86_PV_VCPU_HDR_FORMAT)
if len(content) < minsz:
- raise RecordError("X86_PV_VCPU_%s record length must be at least %d"
- " bytes long" % (name, minsz))
+ raise RecordError(
+ "X86_PV_VCPU_%s record length must be at least %d bytes long" %
+ (name, minsz))
if len(content) == minsz:
- self.info("Warning: X86_PV_VCPU_%s record with zero content"
- % (name, ))
+ self.info("Warning: X86_PV_VCPU_%s record with zero content" %
+ (name, ))
vcpuid, res1 = unpack(X86_PV_VCPU_HDR_FORMAT, content[:minsz])
if res1 != 0:
raise StreamError(
- "Reserved bits set in x86_pv_vcpu_%s record 0x%04x"
- % (name, res1))
+ "Reserved bits set in x86_pv_vcpu_%s record 0x%04x" %
+ (name, res1))
- self.info(" vcpu%d %s context, %d bytes"
- % (vcpuid, name, len(content) - minsz))
+ self.info(" vcpu%d %s context, %d bytes" %
+ (vcpuid, name, len(content) - minsz))
def verify_record_shared_info(self, content):
""" shared info record """
- if len(content) != 4096:
- raise RecordError("Length expected to be 4906 bytes, not %d"
- % (len(content), ))
+ contentsz = len(content)
+ if contentsz != 4096:
+ raise RecordError("Length expected to be 4906 bytes, not %d" %
+ (contentsz, ))
def verify_record_tsc_info(self, content):
@@ -358,11 +363,11 @@ def verify_record_tsc_info(self, content):
mode, khz, nsec, incarn, res1 = unpack(X86_TSC_INFO_FORMAT, content)
if res1 != 0:
- raise StreamError("Reserved bits set in X86_TSC_INFO: 0x%08x"
- % (res1, ))
+ raise StreamError("Reserved bits set in X86_TSC_INFO: 0x%08x" %
+ (res1, ))
- self.info(" Mode %u, %u kHz, %u ns, incarnation %d"
- % (mode, khz, nsec, incarn))
+ self.info(" Mode %u, %u kHz, %u ns, incarnation %d" %
+ (mode, khz, nsec, incarn))
def verify_record_hvm_context(self, content):
@@ -412,6 +417,7 @@ def verify_record_checkpoint(self, content):
if len(content) != 0:
raise RecordError("Checkpoint record with non-zero length")
+
def verify_record_checkpoint_dirty_pfn_list(self, content):
""" checkpoint dirty pfn list """
raise RecordError("Found checkpoint dirty pfn list record in stream")
@@ -45,7 +45,7 @@
REC_TYPE_emulator_xenstore_data : "Emulator xenstore data",
REC_TYPE_emulator_context : "Emulator context",
REC_TYPE_checkpoint_end : "Checkpoint end",
- REC_TYPE_checkpoint_state : "Checkpoint state"
+ REC_TYPE_checkpoint_state : "Checkpoint state",
}
# emulator_* header
@@ -90,16 +90,16 @@ def verify_hdr(self):
ident, version, options = self.unpack_exact(HDR_FORMAT)
if ident != HDR_IDENT:
- raise StreamError("Bad image id: Expected 0x%x, got 0x%x"
- % (HDR_IDENT, ident))
+ raise StreamError("Bad image id: Expected 0x%x, got 0x%x" %
+ (HDR_IDENT, ident))
if version != HDR_VERSION:
- raise StreamError("Unknown image version: Expected %d, got %d"
- % (HDR_VERSION, version))
+ raise StreamError("Unknown image version: Expected %d, got %d" %
+ (HDR_VERSION, version))
if options & HDR_OPT_RESZ_MASK:
- raise StreamError("Reserved bits set in image options field: 0x%x"
- % (options & HDR_OPT_RESZ_MASK))
+ raise StreamError("Reserved bits set in image options field: 0x%x" %
+ (options & HDR_OPT_RESZ_MASK))
if ( (sys.byteorder == "little") and
((options & HDR_OPT_BIT_ENDIAN) != HDR_OPT_LE) ):
@@ -121,8 +121,8 @@ def verify_record(self):
if rtype not in rec_type_to_str:
raise StreamError("Unrecognised record type %x" % (rtype, ))
- self.info("Libxl Record: %s, length %d"
- % (rec_type_to_str[rtype], length))
+ self.info("Libxl Record: %s, length %d" %
+ (rec_type_to_str[rtype], length))
contentsz = (length + 7) & ~7
content = self.rdexact(contentsz)
@@ -132,8 +132,9 @@ def verify_record(self):
raise StreamError("Padding containing non0 bytes found")
if rtype not in record_verifiers:
- raise RuntimeError("No verification function for libxl record '%s'"
- % rec_type_to_str[rtype])
+ raise RuntimeError(
+ "No verification function for libxl record '%s'" %
+ rec_type_to_str[rtype])
else:
record_verifiers[rtype](self, content[:length])
@@ -162,16 +163,16 @@ def verify_record_emulator_xenstore_data(self, content):
minsz = calcsize(EMULATOR_HEADER_FORMAT)
if len(content) < minsz:
- raise RecordError("Length must be at least %d bytes, got %d"
- % (minsz, len(content)))
+ raise RecordError("Length must be at least %d bytes, got %d" %
+ (minsz, len(content)))
emu_id, emu_idx = unpack(EMULATOR_HEADER_FORMAT, content[:minsz])
if emu_id not in emulator_id_to_str:
raise RecordError("Unrecognised emulator id 0x%x" % (emu_id, ))
- self.info("Emulator Xenstore Data (%s, idx %d)"
- % (emulator_id_to_str[emu_id], emu_idx))
+ self.info("Emulator Xenstore Data (%s, idx %d)" %
+ (emulator_id_to_str[emu_id], emu_idx))
# Chop off the emulator header
content = content[minsz:]
@@ -185,8 +186,8 @@ def verify_record_emulator_xenstore_data(self, content):
parts = content[:-1].split("\x00")
if (len(parts) % 2) != 0:
- raise RecordError("Expected an even number of strings, got %d"
- % (len(parts), ))
+ raise RecordError("Expected an even number of strings, got %d" %
+ (len(parts), ))
for key, val in zip(parts[0::2], parts[1::2]):
self.info(" '%s' = '%s'" % (key, val))
@@ -197,8 +198,8 @@ def verify_record_emulator_context(self, content):
minsz = calcsize(EMULATOR_HEADER_FORMAT)
if len(content) < minsz:
- raise RecordError("Length must be at least %d bytes, got %d"
- % (minsz, len(content)))
+ raise RecordError("Length must be at least %d bytes, got %d" %
+ (minsz, len(content)))
emu_id, emu_idx = unpack(EMULATOR_HEADER_FORMAT, content[:minsz])
The code has devating from the prevailing style in many ways. Adjust spacing, indentation, position of operators, layout of multiline comments, removal of superfluous comments, constness, trailing commas, and use of unqualified 'unsigned'. No functional change. Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com> --- CC: Ian Jackson <Ian.Jackson@citrix.com> CC: Wei Liu <wl@xen.org> --- tools/libxc/include/xenguest.h | 35 ++++---- tools/libxc/xc_sr_common.c | 9 +-- tools/libxc/xc_sr_common.h | 10 +-- tools/libxc/xc_sr_common_x86.c | 5 +- tools/libxc/xc_sr_common_x86_pv.c | 12 +-- tools/libxc/xc_sr_restore.c | 39 ++++----- tools/libxc/xc_sr_restore_x86_pv.c | 21 ++--- tools/libxc/xc_sr_save.c | 74 ++++++++--------- tools/libxc/xc_sr_save_x86_hvm.c | 7 +- tools/libxc/xc_sr_save_x86_pv.c | 101 +++++++++-------------- tools/python/scripts/convert-legacy-stream | 20 ++--- tools/python/xen/migration/libxc.py | 124 +++++++++++++++-------------- tools/python/xen/migration/libxl.py | 39 ++++----- 13 files changed, 235 insertions(+), 261 deletions(-)