diff mbox

[RFC,05/20] libxc/xc_sr: factor out filter_pages()

Message ID 1490605592-12189-6-git-send-email-jtotto@uwaterloo.ca (mailing list archive)
State New, archived
Headers show

Commit Message

Joshua Otto March 27, 2017, 9:06 a.m. UTC
When processing a PAGE_DATA record, the restore side needs to set the
types of incoming pages using the appropriate restore op and filter the
list of pfns in the record to the subset that are 'backed' - ie.
accompanied by real backing data in the stream that needs to be filled
in.

Both of these steps are also required when processing postcopy records,
so factor it out into a common helper.

No functional change.

Signed-off-by: Joshua Otto <jtotto@uwaterloo.ca>
---
 tools/libxc/xc_sr_restore.c | 100 +++++++++++++++++++++++++++++++++-----------
 1 file changed, 75 insertions(+), 25 deletions(-)

Comments

Andrew Cooper March 28, 2017, 7:27 p.m. UTC | #1
On 27/03/17 10:06, Joshua Otto wrote:
> diff --git a/tools/libxc/xc_sr_restore.c b/tools/libxc/xc_sr_restore.c
> index 481a904..8574ee8 100644
> --- a/tools/libxc/xc_sr_restore.c
> +++ b/tools/libxc/xc_sr_restore.c
> @@ -194,6 +194,68 @@ int populate_pfns(struct xc_sr_context *ctx, unsigned count,
>      return rc;
>  }
>  
> +static void set_page_types(struct xc_sr_context *ctx, unsigned count,
> +                           xen_pfn_t *pfns, uint32_t *types)
> +{
> +    unsigned i;

Please use unsigned int rather than just "unsigned" throughout.

> +
> +    for ( i = 0; i < count; ++i )
> +        ctx->restore.ops.set_page_type(ctx, pfns[i], types[i]);
> +}
> +
> +/*
> + * Given count pfns and their types, allocate and fill in buffer bpfns with only
> + * those pfns that are 'backed' by real page data that needs to be migrated.
> + * The caller must later free() *bpfns.
> + *
> + * Returns 0 on success and non-0 on failure.  *bpfns can be free()ed even after
> + * failure.
> + */
> +static int filter_pages(struct xc_sr_context *ctx,
> +                        unsigned count,
> +                        xen_pfn_t *pfns,
> +                        uint32_t *types,
> +                        /* OUT */ unsigned *nr_pages,
> +                        /* OUT */ xen_pfn_t **bpfns)
> +{
> +    xc_interface *xch = ctx->xch;

Pointers to arrays are very easy to get wrong in C.  This code will be
less error if you use

xen_pfn_t *_pfns;  (variable name subject to improvement)

> +    unsigned i;
> +
> +    *nr_pages = 0;
> +    *bpfns = malloc(count * sizeof(*bpfns));

_pfns = *bfns = malloc(...).

Then use _pfns in place of (*bpfns) everywhere else.

However,  your sizeof has the wrong indirection.  It works on x86
because xen_pfn_t is the same size as a pointer, but it will blow up on
32bit ARM, where a pointer is 4 bytes but xen_pfn_t is 8 bytes.

> +    if ( !(*bpfns) )
> +    {
> +        ERROR("Failed to allocate %zu bytes to process page data",
> +              count * (sizeof(*bpfns)));
> +        return -1;
> +    }
> +
> +    for ( i = 0; i < count; ++i )
> +    {
> +        switch ( types[i] )
> +        {
> +        case XEN_DOMCTL_PFINFO_NOTAB:
> +
> +        case XEN_DOMCTL_PFINFO_L1TAB:
> +        case XEN_DOMCTL_PFINFO_L1TAB | XEN_DOMCTL_PFINFO_LPINTAB:
> +
> +        case XEN_DOMCTL_PFINFO_L2TAB:
> +        case XEN_DOMCTL_PFINFO_L2TAB | XEN_DOMCTL_PFINFO_LPINTAB:
> +
> +        case XEN_DOMCTL_PFINFO_L3TAB:
> +        case XEN_DOMCTL_PFINFO_L3TAB | XEN_DOMCTL_PFINFO_LPINTAB:
> +
> +        case XEN_DOMCTL_PFINFO_L4TAB:
> +        case XEN_DOMCTL_PFINFO_L4TAB | XEN_DOMCTL_PFINFO_LPINTAB:
> +
> +            (*bpfns)[(*nr_pages)++] = pfns[i];
> +            break;
> +        }
> +    }
> +
> +    return 0;
> +}
> +
>  /*
>   * Given a list of pfns, their types, and a block of page data from the
>   * stream, populate and record their types, map the relevant subset and copy
> @@ -203,7 +265,7 @@ static int process_page_data(struct xc_sr_context *ctx, unsigned count,
>                               xen_pfn_t *pfns, uint32_t *types, void *page_data)
>  {
>      xc_interface *xch = ctx->xch;
> -    xen_pfn_t *mfns = malloc(count * sizeof(*mfns));
> +    xen_pfn_t *mfns = NULL;

This shows a naming bug, which is my fault.  This should be named gfns,
not mfns.  (It inherits its name from the legacy migration code, but
that was also wrong.)

Please correct it, either in this patch or another; the memory
management terms are hard enough, even when all the code is correct.

~Andrew

>      int *map_errs = malloc(count * sizeof(*map_errs));
>      int rc;
>      void *mapping = NULL, *guest_page = NULL;
> @@ -211,11 +273,11 @@ static int process_page_data(struct xc_sr_context *ctx, unsigned count,
>          j,         /* j indexes the subset of pfns we decide to map. */
>          nr_pages = 0;
>  
> -    if ( !mfns || !map_errs )
> +    if ( !map_errs )
>      {
>          rc = -1;
>          ERROR("Failed to allocate %zu bytes to process page data",
> -              count * (sizeof(*mfns) + sizeof(*map_errs)));
> +              count * sizeof(*map_errs));
>          goto err;
>      }
>  
>
Joshua Otto March 30, 2017, 4:42 a.m. UTC | #2
On Tue, Mar 28, 2017 at 08:27:48PM +0100, Andrew Cooper wrote:
> On 27/03/17 10:06, Joshua Otto wrote:
> > diff --git a/tools/libxc/xc_sr_restore.c b/tools/libxc/xc_sr_restore.c
> > index 481a904..8574ee8 100644
> > --- a/tools/libxc/xc_sr_restore.c
> > +++ b/tools/libxc/xc_sr_restore.c
> > @@ -194,6 +194,68 @@ int populate_pfns(struct xc_sr_context *ctx, unsigned count,
> >      return rc;
> >  }
> >  
> > +static void set_page_types(struct xc_sr_context *ctx, unsigned count,
> > +                           xen_pfn_t *pfns, uint32_t *types)
> > +{
> > +    unsigned i;
> 
> Please use unsigned int rather than just "unsigned" throughout.

Okay.  (For what it's worth, I chose plain "unsigned" here for consistency with
the rest of xc_sr_save/xc_sr_restore)

> > +
> > +    for ( i = 0; i < count; ++i )
> > +        ctx->restore.ops.set_page_type(ctx, pfns[i], types[i]);
> > +}
> > +
> > +/*
> > + * Given count pfns and their types, allocate and fill in buffer bpfns with only
> > + * those pfns that are 'backed' by real page data that needs to be migrated.
> > + * The caller must later free() *bpfns.
> > + *
> > + * Returns 0 on success and non-0 on failure.  *bpfns can be free()ed even after
> > + * failure.
> > + */
> > +static int filter_pages(struct xc_sr_context *ctx,
> > +                        unsigned count,
> > +                        xen_pfn_t *pfns,
> > +                        uint32_t *types,
> > +                        /* OUT */ unsigned *nr_pages,
> > +                        /* OUT */ xen_pfn_t **bpfns)
> > +{
> > +    xc_interface *xch = ctx->xch;
> 
> Pointers to arrays are very easy to get wrong in C.  This code will be
> less error if you use
> 
> xen_pfn_t *_pfns;  (variable name subject to improvement)
> 
> > +    unsigned i;
> > +
> > +    *nr_pages = 0;
> > +    *bpfns = malloc(count * sizeof(*bpfns));
> 
> _pfns = *bfns = malloc(...).
> 
> Then use _pfns in place of (*bpfns) everywhere else.
> 
> However,  your sizeof has the wrong indirection.  It works on x86
> because xen_pfn_t is the same size as a pointer, but it will blow up on
> 32bit ARM, where a pointer is 4 bytes but xen_pfn_t is 8 bytes.

Agh!  Oh dear.

> > +    if ( !(*bpfns) )
> > +    {
> > +        ERROR("Failed to allocate %zu bytes to process page data",
> > +              count * (sizeof(*bpfns)));
> > +        return -1;
> > +    }
> > +
> > +    for ( i = 0; i < count; ++i )
> > +    {
> > +        switch ( types[i] )
> > +        {
> > +        case XEN_DOMCTL_PFINFO_NOTAB:
> > +
> > +        case XEN_DOMCTL_PFINFO_L1TAB:
> > +        case XEN_DOMCTL_PFINFO_L1TAB | XEN_DOMCTL_PFINFO_LPINTAB:
> > +
> > +        case XEN_DOMCTL_PFINFO_L2TAB:
> > +        case XEN_DOMCTL_PFINFO_L2TAB | XEN_DOMCTL_PFINFO_LPINTAB:
> > +
> > +        case XEN_DOMCTL_PFINFO_L3TAB:
> > +        case XEN_DOMCTL_PFINFO_L3TAB | XEN_DOMCTL_PFINFO_LPINTAB:
> > +
> > +        case XEN_DOMCTL_PFINFO_L4TAB:
> > +        case XEN_DOMCTL_PFINFO_L4TAB | XEN_DOMCTL_PFINFO_LPINTAB:
> > +
> > +            (*bpfns)[(*nr_pages)++] = pfns[i];
> > +            break;
> > +        }
> > +    }
> > +
> > +    return 0;
> > +}
> > +
> >  /*
> >   * Given a list of pfns, their types, and a block of page data from the
> >   * stream, populate and record their types, map the relevant subset and copy
> > @@ -203,7 +265,7 @@ static int process_page_data(struct xc_sr_context *ctx, unsigned count,
> >                               xen_pfn_t *pfns, uint32_t *types, void *page_data)
> >  {
> >      xc_interface *xch = ctx->xch;
> > -    xen_pfn_t *mfns = malloc(count * sizeof(*mfns));
> > +    xen_pfn_t *mfns = NULL;
> 
> This shows a naming bug, which is my fault.  This should be named gfns,
> not mfns.  (It inherits its name from the legacy migration code, but
> that was also wrong.)
> 
> Please correct it, either in this patch or another; the memory
> management terms are hard enough, even when all the code is correct.

Ahhhhhhh - I actually found this desperately confusing when trying to grok the
code originally.  Thanks for clearing that up!

Josh
diff mbox

Patch

diff --git a/tools/libxc/xc_sr_restore.c b/tools/libxc/xc_sr_restore.c
index 481a904..8574ee8 100644
--- a/tools/libxc/xc_sr_restore.c
+++ b/tools/libxc/xc_sr_restore.c
@@ -194,6 +194,68 @@  int populate_pfns(struct xc_sr_context *ctx, unsigned count,
     return rc;
 }
 
+static void set_page_types(struct xc_sr_context *ctx, unsigned count,
+                           xen_pfn_t *pfns, uint32_t *types)
+{
+    unsigned i;
+
+    for ( i = 0; i < count; ++i )
+        ctx->restore.ops.set_page_type(ctx, pfns[i], types[i]);
+}
+
+/*
+ * Given count pfns and their types, allocate and fill in buffer bpfns with only
+ * those pfns that are 'backed' by real page data that needs to be migrated.
+ * The caller must later free() *bpfns.
+ *
+ * Returns 0 on success and non-0 on failure.  *bpfns can be free()ed even after
+ * failure.
+ */
+static int filter_pages(struct xc_sr_context *ctx,
+                        unsigned count,
+                        xen_pfn_t *pfns,
+                        uint32_t *types,
+                        /* OUT */ unsigned *nr_pages,
+                        /* OUT */ xen_pfn_t **bpfns)
+{
+    xc_interface *xch = ctx->xch;
+    unsigned i;
+
+    *nr_pages = 0;
+    *bpfns = malloc(count * sizeof(*bpfns));
+    if ( !(*bpfns) )
+    {
+        ERROR("Failed to allocate %zu bytes to process page data",
+              count * (sizeof(*bpfns)));
+        return -1;
+    }
+
+    for ( i = 0; i < count; ++i )
+    {
+        switch ( types[i] )
+        {
+        case XEN_DOMCTL_PFINFO_NOTAB:
+
+        case XEN_DOMCTL_PFINFO_L1TAB:
+        case XEN_DOMCTL_PFINFO_L1TAB | XEN_DOMCTL_PFINFO_LPINTAB:
+
+        case XEN_DOMCTL_PFINFO_L2TAB:
+        case XEN_DOMCTL_PFINFO_L2TAB | XEN_DOMCTL_PFINFO_LPINTAB:
+
+        case XEN_DOMCTL_PFINFO_L3TAB:
+        case XEN_DOMCTL_PFINFO_L3TAB | XEN_DOMCTL_PFINFO_LPINTAB:
+
+        case XEN_DOMCTL_PFINFO_L4TAB:
+        case XEN_DOMCTL_PFINFO_L4TAB | XEN_DOMCTL_PFINFO_LPINTAB:
+
+            (*bpfns)[(*nr_pages)++] = pfns[i];
+            break;
+        }
+    }
+
+    return 0;
+}
+
 /*
  * Given a list of pfns, their types, and a block of page data from the
  * stream, populate and record their types, map the relevant subset and copy
@@ -203,7 +265,7 @@  static int process_page_data(struct xc_sr_context *ctx, unsigned count,
                              xen_pfn_t *pfns, uint32_t *types, void *page_data)
 {
     xc_interface *xch = ctx->xch;
-    xen_pfn_t *mfns = malloc(count * sizeof(*mfns));
+    xen_pfn_t *mfns = NULL;
     int *map_errs = malloc(count * sizeof(*map_errs));
     int rc;
     void *mapping = NULL, *guest_page = NULL;
@@ -211,11 +273,11 @@  static int process_page_data(struct xc_sr_context *ctx, unsigned count,
         j,         /* j indexes the subset of pfns we decide to map. */
         nr_pages = 0;
 
-    if ( !mfns || !map_errs )
+    if ( !map_errs )
     {
         rc = -1;
         ERROR("Failed to allocate %zu bytes to process page data",
-              count * (sizeof(*mfns) + sizeof(*map_errs)));
+              count * sizeof(*map_errs));
         goto err;
     }
 
@@ -226,31 +288,19 @@  static int process_page_data(struct xc_sr_context *ctx, unsigned count,
         goto err;
     }
 
-    for ( i = 0; i < count; ++i )
-    {
-        ctx->restore.ops.set_page_type(ctx, pfns[i], types[i]);
-
-        switch ( types[i] )
-        {
-        case XEN_DOMCTL_PFINFO_NOTAB:
-
-        case XEN_DOMCTL_PFINFO_L1TAB:
-        case XEN_DOMCTL_PFINFO_L1TAB | XEN_DOMCTL_PFINFO_LPINTAB:
-
-        case XEN_DOMCTL_PFINFO_L2TAB:
-        case XEN_DOMCTL_PFINFO_L2TAB | XEN_DOMCTL_PFINFO_LPINTAB:
+    set_page_types(ctx, count, pfns, types);
 
-        case XEN_DOMCTL_PFINFO_L3TAB:
-        case XEN_DOMCTL_PFINFO_L3TAB | XEN_DOMCTL_PFINFO_LPINTAB:
-
-        case XEN_DOMCTL_PFINFO_L4TAB:
-        case XEN_DOMCTL_PFINFO_L4TAB | XEN_DOMCTL_PFINFO_LPINTAB:
-
-            mfns[nr_pages++] = ctx->restore.ops.pfn_to_gfn(ctx, pfns[i]);
-            break;
-        }
+    rc = filter_pages(ctx, count, pfns, types, &nr_pages, &mfns);
+    if ( rc )
+    {
+        ERROR("Failed to filter mfns for batch of %u pages", count);
+        goto err;
     }
 
+    /* Map physically-backed pfns ('bpfns') to their gmfns. */
+    for ( i = 0; i < nr_pages; ++i )
+        mfns[i] = ctx->restore.ops.pfn_to_gfn(ctx, mfns[i]);
+
     /* Nothing to do? */
     if ( nr_pages == 0 )
         goto done;