Message ID | 20210601161118.18986-9-olaf@aepfle.de (mailing list archive) |
---|---|
State | Superseded |
Headers | show |
Series | leftover from 2020 | expand |
On 01.06.21 18:10, Olaf Hering wrote: > Show how fast domU pages are transferred in each iteration. > > The relevant data is how fast the pfns travel, not so much how much > protocol overhead exists. So the reported MiB/sec is just for pfns. > > Signed-off-by: Olaf Hering <olaf@aepfle.de> > --- > tools/libs/saverestore/common.h | 2 ++ > tools/libs/saverestore/save.c | 47 +++++++++++++++++++++++++++++++++ > 2 files changed, 49 insertions(+) > > diff --git a/tools/libs/saverestore/common.h b/tools/libs/saverestore/common.h > index 50a8479d39..f5fe23caad 100644 > --- a/tools/libs/saverestore/common.h > +++ b/tools/libs/saverestore/common.h > @@ -250,6 +250,8 @@ struct xc_sr_context > bool debug; > > unsigned long p2m_size; > + size_t pages_sent; > + size_t overhead_sent; > > struct precopy_stats stats; > > diff --git a/tools/libs/saverestore/save.c b/tools/libs/saverestore/save.c > index bcff2d28f5..760ca04a84 100644 > --- a/tools/libs/saverestore/save.c > +++ b/tools/libs/saverestore/save.c > @@ -1,5 +1,6 @@ > #include <assert.h> > #include <arpa/inet.h> > +#include <time.h> > > #include "common.h" > > @@ -238,6 +239,8 @@ static int write_batch(struct xc_sr_context *ctx) > iov[3].iov_len = nr_pfns * sizeof(*rec_pfns); > > iovcnt = 4; > + ctx->save.pages_sent += nr_pages; > + ctx->save.overhead_sent += sizeof(rec) + sizeof(hdr) + nr_pfns * sizeof(*rec_pfns); > > if ( nr_pages ) > { > @@ -357,6 +360,43 @@ static int suspend_domain(struct xc_sr_context *ctx) > return 0; > } > > +static void show_transfer_rate(struct xc_sr_context *ctx, struct timespec *start) > +{ > + xc_interface *xch = ctx->xch; > + struct timespec end = {}, diff = {}; > + size_t ms, MiB_sec = ctx->save.pages_sent * PAGE_SIZE; I'd rather not initialize MiB_sec here ... > + > + if (!MiB_sec) ... and test for ctx->save.pages_sent to be non-zero here. > + return; > + > + if ( clock_gettime(CLOCK_MONOTONIC, &end) ) > + PERROR("clock_gettime"); > + > + if ( (end.tv_nsec - start->tv_nsec) < 0 ) > + { > + diff.tv_sec = end.tv_sec - start->tv_sec - 1; > + diff.tv_nsec = end.tv_nsec - start->tv_nsec + (1000U*1000U*1000U); > + } > + else > + { > + diff.tv_sec = end.tv_sec - start->tv_sec; > + diff.tv_nsec = end.tv_nsec - start->tv_nsec; > + } > + > + ms = (diff.tv_nsec / (1000U*1000U)); > + if (!ms) > + ms = 1; I'd move this ... > + ms += (diff.tv_sec * 1000U); ... below this. > + > + MiB_sec *= 1000U; > + MiB_sec /= ms; > + MiB_sec /= 1024U*1024U; Avoid MiB_sec holding bytes per second for some time and use: MiB_sec = ((ctx->save.pages_sent * PAGE_SIZE * 1000) / ms) / (1024U * 1024U); Juergen
Am Wed, 2 Jun 2021 09:10:44 +0200 schrieb Juergen Gross <jgross@suse.com>: > MiB_sec = ((ctx->save.pages_sent * PAGE_SIZE * 1000) / ms) / > (1024U * 1024U); I'm not sure: how does this improve the patch? Olaf
On 08.06.21 10:58, Olaf Hering wrote: > Am Wed, 2 Jun 2021 09:10:44 +0200 > schrieb Juergen Gross <jgross@suse.com>: > >> MiB_sec = ((ctx->save.pages_sent * PAGE_SIZE * 1000) / ms) / >> (1024U * 1024U); > > I'm not sure: how does this improve the patch? The scattered calculation makes it much harder to verify it (at least for me). And initializing a variable named "MiB_sec" with a value being clearly bytes doesn't help. Juergen
diff --git a/tools/libs/saverestore/common.h b/tools/libs/saverestore/common.h index 50a8479d39..f5fe23caad 100644 --- a/tools/libs/saverestore/common.h +++ b/tools/libs/saverestore/common.h @@ -250,6 +250,8 @@ struct xc_sr_context bool debug; unsigned long p2m_size; + size_t pages_sent; + size_t overhead_sent; struct precopy_stats stats; diff --git a/tools/libs/saverestore/save.c b/tools/libs/saverestore/save.c index bcff2d28f5..760ca04a84 100644 --- a/tools/libs/saverestore/save.c +++ b/tools/libs/saverestore/save.c @@ -1,5 +1,6 @@ #include <assert.h> #include <arpa/inet.h> +#include <time.h> #include "common.h" @@ -238,6 +239,8 @@ static int write_batch(struct xc_sr_context *ctx) iov[3].iov_len = nr_pfns * sizeof(*rec_pfns); iovcnt = 4; + ctx->save.pages_sent += nr_pages; + ctx->save.overhead_sent += sizeof(rec) + sizeof(hdr) + nr_pfns * sizeof(*rec_pfns); if ( nr_pages ) { @@ -357,6 +360,43 @@ static int suspend_domain(struct xc_sr_context *ctx) return 0; } +static void show_transfer_rate(struct xc_sr_context *ctx, struct timespec *start) +{ + xc_interface *xch = ctx->xch; + struct timespec end = {}, diff = {}; + size_t ms, MiB_sec = ctx->save.pages_sent * PAGE_SIZE; + + if (!MiB_sec) + return; + + if ( clock_gettime(CLOCK_MONOTONIC, &end) ) + PERROR("clock_gettime"); + + if ( (end.tv_nsec - start->tv_nsec) < 0 ) + { + diff.tv_sec = end.tv_sec - start->tv_sec - 1; + diff.tv_nsec = end.tv_nsec - start->tv_nsec + (1000U*1000U*1000U); + } + else + { + diff.tv_sec = end.tv_sec - start->tv_sec; + diff.tv_nsec = end.tv_nsec - start->tv_nsec; + } + + ms = (diff.tv_nsec / (1000U*1000U)); + if (!ms) + ms = 1; + ms += (diff.tv_sec * 1000U); + + MiB_sec *= 1000U; + MiB_sec /= ms; + MiB_sec /= 1024U*1024U; + + errno = 0; + IPRINTF("%s: %zu bytes + %zu pages in %ld.%09ld sec, %zu MiB/sec", __func__, + ctx->save.overhead_sent, ctx->save.pages_sent, diff.tv_sec, diff.tv_nsec, MiB_sec); +} + /* * Send a subset of pages in the guests p2m, according to the dirty bitmap. * Used for each subsequent iteration of the live migration loop. @@ -370,9 +410,15 @@ static int send_dirty_pages(struct xc_sr_context *ctx, xen_pfn_t p; unsigned long written; int rc; + struct timespec start = {}; DECLARE_HYPERCALL_BUFFER_SHADOW(unsigned long, dirty_bitmap, &ctx->save.dirty_bitmap_hbuf); + ctx->save.pages_sent = 0; + ctx->save.overhead_sent = 0; + if ( clock_gettime(CLOCK_MONOTONIC, &start) ) + PERROR("clock_gettime"); + for ( p = 0, written = 0; p < ctx->save.p2m_size; ++p ) { if ( !test_bit(p, dirty_bitmap) ) @@ -396,6 +442,7 @@ static int send_dirty_pages(struct xc_sr_context *ctx, if ( written > entries ) DPRINTF("Bitmap contained more entries than expected..."); + show_transfer_rate(ctx, &start); xc_report_progress_step(xch, entries, entries); return ctx->save.ops.check_vm_state(ctx);
Show how fast domU pages are transferred in each iteration. The relevant data is how fast the pfns travel, not so much how much protocol overhead exists. So the reported MiB/sec is just for pfns. Signed-off-by: Olaf Hering <olaf@aepfle.de> --- tools/libs/saverestore/common.h | 2 ++ tools/libs/saverestore/save.c | 47 +++++++++++++++++++++++++++++++++ 2 files changed, 49 insertions(+)