@@ -233,6 +233,8 @@ struct sr_save_arrays {
int errors[MAX_BATCH_SIZE];
/* write_batch: iovec[] for writev(). */
struct iovec iov[MAX_BATCH_SIZE + 4];
+ /* write_batch */
+ uint64_t rec_pfns[MAX_BATCH_SIZE];
};
struct sr_restore_arrays {
@@ -96,7 +96,7 @@ static int write_batch(struct xc_sr_context *ctx)
unsigned int i, p, nr_pages = 0, nr_pages_mapped = 0;
unsigned int nr_pfns = ctx->save.nr_batch_pfns;
void *page, *orig_page;
- uint64_t *rec_pfns = NULL;
+ uint64_t *rec_pfns = ctx->save.m->rec_pfns;
struct iovec *iov = ctx->save.m->iov; int iovcnt = 0;
struct xc_sr_rec_page_data_header hdr = { 0 };
struct xc_sr_record rec = {
@@ -201,14 +201,6 @@ static int write_batch(struct xc_sr_context *ctx)
}
}
- rec_pfns = malloc(nr_pfns * sizeof(*rec_pfns));
- if ( !rec_pfns )
- {
- ERROR("Unable to allocate %zu bytes of memory for page data pfn list",
- nr_pfns * sizeof(*rec_pfns));
- goto err;
- }
-
hdr.count = nr_pfns;
rec.length = sizeof(hdr);
@@ -259,7 +251,6 @@ static int write_batch(struct xc_sr_context *ctx)
rc = ctx->save.nr_batch_pfns = 0;
err:
- free(rec_pfns);
if ( guest_mapping )
xenforeignmemory_unmap(xch->fmem, guest_mapping, nr_pages_mapped);
for ( i = 0; local_pages && i < nr_pfns; ++i )