diff mbox

[v2,REPOST,09/12] x86/hvm/ioreq: simplify code and use consistent naming

Message ID 20170822145107.6877-10-paul.durrant@citrix.com (mailing list archive)
State New, archived
Headers show

Commit Message

Paul Durrant Aug. 22, 2017, 2:51 p.m. UTC
This patch re-works much of the IOREQ server initialization and teardown
code:

- The hvm_map/unmap_ioreq_gfn() functions are expanded to call through
  to hvm_alloc/free_ioreq_gfn() rather than expecting them to be called
  separately by outer functions.
- Several functions now test the validity of the hvm_ioreq_page gfn value
  to determine whether they need to act. This means can be safely called
  for the bufioreq page even when it is not used.
- hvm_add/remove_ioreq_gfn() simply return in the case of the default
  IOREQ server so callers no longer need to test before calling.
- hvm_ioreq_server_setup_pages() is renamed to hvm_ioreq_server_map_pages()
  to mirror the existing hvm_ioreq_server_unmap_pages().

All of this significantly shortens the code.

Signed-off-by: Paul Durrant <paul.durrant@citrix.com>
---
Cc: Jan Beulich <jbeulich@suse.com>
Cc: Andrew Cooper <andrew.cooper3@citrix.com>
---
 xen/arch/x86/hvm/ioreq.c | 181 ++++++++++++++++++-----------------------------
 1 file changed, 69 insertions(+), 112 deletions(-)

Comments

Roger Pau Monne Aug. 24, 2017, 5:02 p.m. UTC | #1
On Tue, Aug 22, 2017 at 03:51:03PM +0100, Paul Durrant wrote:
> This patch re-works much of the IOREQ server initialization and teardown
> code:
> 
> - The hvm_map/unmap_ioreq_gfn() functions are expanded to call through
>   to hvm_alloc/free_ioreq_gfn() rather than expecting them to be called
>   separately by outer functions.
> - Several functions now test the validity of the hvm_ioreq_page gfn value
>   to determine whether they need to act. This means can be safely called
>   for the bufioreq page even when it is not used.
> - hvm_add/remove_ioreq_gfn() simply return in the case of the default
>   IOREQ server so callers no longer need to test before calling.
> - hvm_ioreq_server_setup_pages() is renamed to hvm_ioreq_server_map_pages()
>   to mirror the existing hvm_ioreq_server_unmap_pages().
> 
> All of this significantly shortens the code.
> 
> Signed-off-by: Paul Durrant <paul.durrant@citrix.com>
> ---
> Cc: Jan Beulich <jbeulich@suse.com>
> Cc: Andrew Cooper <andrew.cooper3@citrix.com>
> ---
>  xen/arch/x86/hvm/ioreq.c | 181 ++++++++++++++++++-----------------------------
>  1 file changed, 69 insertions(+), 112 deletions(-)
> 
> diff --git a/xen/arch/x86/hvm/ioreq.c b/xen/arch/x86/hvm/ioreq.c
> index 5737082238..edfb394c59 100644
> --- a/xen/arch/x86/hvm/ioreq.c
> +++ b/xen/arch/x86/hvm/ioreq.c
> @@ -181,63 +181,76 @@ bool handle_hvm_io_completion(struct vcpu *v)
>      return true;
>  }
>  
> -static int hvm_alloc_ioreq_gfn(struct domain *d, unsigned long *gfn)
> +static unsigned long hvm_alloc_ioreq_gfn(struct hvm_ioreq_server *s)

gfn_t as the return type instead? I see that you are moving it, so I
won't insist (I assume there's also some other refactoring involved in
making this return gfn_t). I see there are also further uses of
unsigned long to store gfns, I'm not going to point those out.

>  {
> +    struct domain *d = s->domain;
>      unsigned int i;
> -    int rc;
>  
> -    rc = -ENOMEM;
> +    ASSERT(!s->is_default);
> +
>      for ( i = 0; i < sizeof(d->arch.hvm_domain.ioreq_gfn.mask) * 8; i++ )
>      {
>          if ( test_and_clear_bit(i, &d->arch.hvm_domain.ioreq_gfn.mask) )
>          {

The braces are not strictly needed anymore, but that's a question of
taste.

> -            *gfn = d->arch.hvm_domain.ioreq_gfn.base + i;
> -            rc = 0;
> -            break;
> +            return d->arch.hvm_domain.ioreq_gfn.base + i;
>          }
>      }
>  
> -    return rc;
> +    return gfn_x(INVALID_GFN);
>  }
>  
> -static void hvm_free_ioreq_gfn(struct domain *d, unsigned long gfn)
> +static void hvm_free_ioreq_gfn(struct hvm_ioreq_server *s,
> +                               unsigned long gfn)
>  {
> +    struct domain *d = s->domain;
>      unsigned int i = gfn - d->arch.hvm_domain.ioreq_gfn.base;
>  
> -    if ( gfn != gfn_x(INVALID_GFN) )
> -        set_bit(i, &d->arch.hvm_domain.ioreq_gfn.mask);
> +    ASSERT(!s->is_default);

I would maybe add a gfn != gfn_x(INVALID_GFN) in the ASSERT.

> +
> +    set_bit(i, &d->arch.hvm_domain.ioreq_gfn.mask);
>  }
>  
> -static void hvm_unmap_ioreq_page(struct hvm_ioreq_server *s, bool buf)
> +static void hvm_unmap_ioreq_gfn(struct hvm_ioreq_server *s, bool buf)

I'm not sure if you need the buf parameter, it seems in all cases you
want to unmap everything when calling hvm_unmap_ioreq_gfn? (same
applies to hvm_remove_ioreq_gfn and hvm_add_ioreq_gfn)

>  static int hvm_ioreq_server_map_pages(struct hvm_ioreq_server *s,
> -                                      unsigned long ioreq_gfn,
> -                                      unsigned long bufioreq_gfn)
> -{
> -    int rc;
> -
> -    rc = hvm_map_ioreq_page(s, false, ioreq_gfn);
> -    if ( rc )
> -        return rc;
> -
> -    if ( bufioreq_gfn != gfn_x(INVALID_GFN) )
> -        rc = hvm_map_ioreq_page(s, true, bufioreq_gfn);
> -
> -    if ( rc )
> -        hvm_unmap_ioreq_page(s, false);
> -
> -    return rc;
> -}
> -
> -static int hvm_ioreq_server_setup_pages(struct hvm_ioreq_server *s,
> -                                        bool handle_bufioreq)
> +                                      bool handle_bufioreq)
>  {
> -    struct domain *d = s->domain;
> -    unsigned long ioreq_gfn = gfn_x(INVALID_GFN);
> -    unsigned long bufioreq_gfn = gfn_x(INVALID_GFN);
> -    int rc;
> -
> -    if ( s->is_default )
> -    {
> -        /*
> -         * The default ioreq server must handle buffered ioreqs, for
> -         * backwards compatibility.
> -         */
> -        ASSERT(handle_bufioreq);
> -        return hvm_ioreq_server_map_pages(s,
> -                   d->arch.hvm_domain.params[HVM_PARAM_IOREQ_PFN],
> -                   d->arch.hvm_domain.params[HVM_PARAM_BUFIOREQ_PFN]);
> -    }
> +    int rc = -ENOMEM;

No need to set rc, you are just overwriting it in the next line.

Roger.
Paul Durrant Aug. 25, 2017, 10:18 a.m. UTC | #2
> -----Original Message-----
> From: Roger Pau Monne
> Sent: 24 August 2017 18:03
> To: Paul Durrant <Paul.Durrant@citrix.com>
> Cc: xen-devel@lists.xenproject.org; Andrew Cooper
> <Andrew.Cooper3@citrix.com>; Jan Beulich <jbeulich@suse.com>
> Subject: Re: [Xen-devel] [PATCH v2 REPOST 09/12] x86/hvm/ioreq: simplify
> code and use consistent naming
> 
> On Tue, Aug 22, 2017 at 03:51:03PM +0100, Paul Durrant wrote:
> > This patch re-works much of the IOREQ server initialization and teardown
> > code:
> >
> > - The hvm_map/unmap_ioreq_gfn() functions are expanded to call
> through
> >   to hvm_alloc/free_ioreq_gfn() rather than expecting them to be called
> >   separately by outer functions.
> > - Several functions now test the validity of the hvm_ioreq_page gfn value
> >   to determine whether they need to act. This means can be safely called
> >   for the bufioreq page even when it is not used.
> > - hvm_add/remove_ioreq_gfn() simply return in the case of the default
> >   IOREQ server so callers no longer need to test before calling.
> > - hvm_ioreq_server_setup_pages() is renamed to
> hvm_ioreq_server_map_pages()
> >   to mirror the existing hvm_ioreq_server_unmap_pages().
> >
> > All of this significantly shortens the code.
> >
> > Signed-off-by: Paul Durrant <paul.durrant@citrix.com>
> > ---
> > Cc: Jan Beulich <jbeulich@suse.com>
> > Cc: Andrew Cooper <andrew.cooper3@citrix.com>
> > ---
> >  xen/arch/x86/hvm/ioreq.c | 181 ++++++++++++++++++----------------------
> -------
> >  1 file changed, 69 insertions(+), 112 deletions(-)
> >
> > diff --git a/xen/arch/x86/hvm/ioreq.c b/xen/arch/x86/hvm/ioreq.c
> > index 5737082238..edfb394c59 100644
> > --- a/xen/arch/x86/hvm/ioreq.c
> > +++ b/xen/arch/x86/hvm/ioreq.c
> > @@ -181,63 +181,76 @@ bool handle_hvm_io_completion(struct vcpu *v)
> >      return true;
> >  }
> >
> > -static int hvm_alloc_ioreq_gfn(struct domain *d, unsigned long *gfn)
> > +static unsigned long hvm_alloc_ioreq_gfn(struct hvm_ioreq_server *s)
> 
> gfn_t as the return type instead? I see that you are moving it, so I
> won't insist (I assume there's also some other refactoring involved in
> making this return gfn_t). I see there are also further uses of
> unsigned long to store gfns, I'm not going to point those out.
> 
> >  {
> > +    struct domain *d = s->domain;
> >      unsigned int i;
> > -    int rc;
> >
> > -    rc = -ENOMEM;
> > +    ASSERT(!s->is_default);
> > +
> >      for ( i = 0; i < sizeof(d->arch.hvm_domain.ioreq_gfn.mask) * 8; i++ )
> >      {
> >          if ( test_and_clear_bit(i, &d->arch.hvm_domain.ioreq_gfn.mask) )
> >          {
> 
> The braces are not strictly needed anymore, but that's a question of
> taste.
> 
> > -            *gfn = d->arch.hvm_domain.ioreq_gfn.base + i;
> > -            rc = 0;
> > -            break;
> > +            return d->arch.hvm_domain.ioreq_gfn.base + i;
> >          }
> >      }
> >
> > -    return rc;
> > +    return gfn_x(INVALID_GFN);
> >  }
> >
> > -static void hvm_free_ioreq_gfn(struct domain *d, unsigned long gfn)
> > +static void hvm_free_ioreq_gfn(struct hvm_ioreq_server *s,
> > +                               unsigned long gfn)
> >  {
> > +    struct domain *d = s->domain;
> >      unsigned int i = gfn - d->arch.hvm_domain.ioreq_gfn.base;
> >
> > -    if ( gfn != gfn_x(INVALID_GFN) )
> > -        set_bit(i, &d->arch.hvm_domain.ioreq_gfn.mask);
> > +    ASSERT(!s->is_default);
> 
> I would maybe add a gfn != gfn_x(INVALID_GFN) in the ASSERT.
> 
> > +
> > +    set_bit(i, &d->arch.hvm_domain.ioreq_gfn.mask);
> >  }
> >
> > -static void hvm_unmap_ioreq_page(struct hvm_ioreq_server *s, bool
> buf)
> > +static void hvm_unmap_ioreq_gfn(struct hvm_ioreq_server *s, bool buf)
> 
> I'm not sure if you need the buf parameter, it seems in all cases you
> want to unmap everything when calling hvm_unmap_ioreq_gfn? (same
> applies to hvm_remove_ioreq_gfn and hvm_add_ioreq_gfn)

It's really just so map/unmap and add/remove mirror each other.

> 
> >  static int hvm_ioreq_server_map_pages(struct hvm_ioreq_server *s,
> > -                                      unsigned long ioreq_gfn,
> > -                                      unsigned long bufioreq_gfn)
> > -{
> > -    int rc;
> > -
> > -    rc = hvm_map_ioreq_page(s, false, ioreq_gfn);
> > -    if ( rc )
> > -        return rc;
> > -
> > -    if ( bufioreq_gfn != gfn_x(INVALID_GFN) )
> > -        rc = hvm_map_ioreq_page(s, true, bufioreq_gfn);
> > -
> > -    if ( rc )
> > -        hvm_unmap_ioreq_page(s, false);
> > -
> > -    return rc;
> > -}
> > -
> > -static int hvm_ioreq_server_setup_pages(struct hvm_ioreq_server *s,
> > -                                        bool handle_bufioreq)
> > +                                      bool handle_bufioreq)
> >  {
> > -    struct domain *d = s->domain;
> > -    unsigned long ioreq_gfn = gfn_x(INVALID_GFN);
> > -    unsigned long bufioreq_gfn = gfn_x(INVALID_GFN);
> > -    int rc;
> > -
> > -    if ( s->is_default )
> > -    {
> > -        /*
> > -         * The default ioreq server must handle buffered ioreqs, for
> > -         * backwards compatibility.
> > -         */
> > -        ASSERT(handle_bufioreq);
> > -        return hvm_ioreq_server_map_pages(s,
> > -                   d->arch.hvm_domain.params[HVM_PARAM_IOREQ_PFN],
> > -                   d->arch.hvm_domain.params[HVM_PARAM_BUFIOREQ_PFN]);
> > -    }
> > +    int rc = -ENOMEM;
> 
> No need to set rc, you are just overwriting it in the next line.
> 

Indeed.

Thanks,

  Paul

> Roger.
diff mbox

Patch

diff --git a/xen/arch/x86/hvm/ioreq.c b/xen/arch/x86/hvm/ioreq.c
index 5737082238..edfb394c59 100644
--- a/xen/arch/x86/hvm/ioreq.c
+++ b/xen/arch/x86/hvm/ioreq.c
@@ -181,63 +181,76 @@  bool handle_hvm_io_completion(struct vcpu *v)
     return true;
 }
 
-static int hvm_alloc_ioreq_gfn(struct domain *d, unsigned long *gfn)
+static unsigned long hvm_alloc_ioreq_gfn(struct hvm_ioreq_server *s)
 {
+    struct domain *d = s->domain;
     unsigned int i;
-    int rc;
 
-    rc = -ENOMEM;
+    ASSERT(!s->is_default);
+
     for ( i = 0; i < sizeof(d->arch.hvm_domain.ioreq_gfn.mask) * 8; i++ )
     {
         if ( test_and_clear_bit(i, &d->arch.hvm_domain.ioreq_gfn.mask) )
         {
-            *gfn = d->arch.hvm_domain.ioreq_gfn.base + i;
-            rc = 0;
-            break;
+            return d->arch.hvm_domain.ioreq_gfn.base + i;
         }
     }
 
-    return rc;
+    return gfn_x(INVALID_GFN);
 }
 
-static void hvm_free_ioreq_gfn(struct domain *d, unsigned long gfn)
+static void hvm_free_ioreq_gfn(struct hvm_ioreq_server *s,
+                               unsigned long gfn)
 {
+    struct domain *d = s->domain;
     unsigned int i = gfn - d->arch.hvm_domain.ioreq_gfn.base;
 
-    if ( gfn != gfn_x(INVALID_GFN) )
-        set_bit(i, &d->arch.hvm_domain.ioreq_gfn.mask);
+    ASSERT(!s->is_default);
+
+    set_bit(i, &d->arch.hvm_domain.ioreq_gfn.mask);
 }
 
-static void hvm_unmap_ioreq_page(struct hvm_ioreq_server *s, bool buf)
+static void hvm_unmap_ioreq_gfn(struct hvm_ioreq_server *s, bool buf)
 {
     struct hvm_ioreq_page *iorp = buf ? &s->bufioreq : &s->ioreq;
 
+    if ( iorp->gfn == gfn_x(INVALID_GFN) )
+        return;
+
     destroy_ring_for_helper(&iorp->va, iorp->page);
+    iorp->page = NULL;
+
+    if ( !s->is_default )
+        hvm_free_ioreq_gfn(s, iorp->gfn);
+
+    iorp->gfn = gfn_x(INVALID_GFN);
 }
 
-static int hvm_map_ioreq_page(
-    struct hvm_ioreq_server *s, bool buf, unsigned long gfn)
+static int hvm_map_ioreq_gfn(struct hvm_ioreq_server *s, bool buf)
 {
     struct domain *d = s->domain;
     struct hvm_ioreq_page *iorp = buf ? &s->bufioreq : &s->ioreq;
-    struct page_info *page;
-    void *va;
     int rc;
 
-    if ( (rc = prepare_ring_for_helper(d, gfn, &page, &va)) )
-        return rc;
-
-    if ( (iorp->va != NULL) || d->is_dying )
-    {
-        destroy_ring_for_helper(&va, page);
+    if ( d->is_dying )
         return -EINVAL;
-    }
 
-    iorp->va = va;
-    iorp->page = page;
-    iorp->gfn = gfn;
+    if ( s->is_default )
+        iorp->gfn = buf ?
+                    d->arch.hvm_domain.params[HVM_PARAM_BUFIOREQ_PFN] :
+                    d->arch.hvm_domain.params[HVM_PARAM_IOREQ_PFN];
+    else
+        iorp->gfn = hvm_alloc_ioreq_gfn(s);
+
+    if ( iorp->gfn == gfn_x(INVALID_GFN) )
+        return -ENOMEM;
 
-    return 0;
+    rc = prepare_ring_for_helper(d, iorp->gfn, &iorp->page, &iorp->va);
+
+    if ( rc )
+        hvm_unmap_ioreq_gfn(s, buf);
+
+    return rc;
 }
 
 bool is_ioreq_server_page(struct domain *d, const struct page_info *page)
@@ -251,8 +264,7 @@  bool is_ioreq_server_page(struct domain *d, const struct page_info *page)
                           &d->arch.hvm_domain.ioreq_server.list,
                           list_entry )
     {
-        if ( (s->ioreq.va && s->ioreq.page == page) ||
-             (s->bufioreq.va && s->bufioreq.page == page) )
+        if ( (s->ioreq.page == page) || (s->bufioreq.page == page) )
         {
             found = true;
             break;
@@ -264,20 +276,30 @@  bool is_ioreq_server_page(struct domain *d, const struct page_info *page)
     return found;
 }
 
-static void hvm_remove_ioreq_gfn(
-    struct domain *d, struct hvm_ioreq_page *iorp)
+static void hvm_remove_ioreq_gfn(struct hvm_ioreq_server *s, bool buf)
+
 {
+    struct domain *d = s->domain;
+    struct hvm_ioreq_page *iorp = buf ? &s->bufioreq : &s->ioreq;
+
+    if ( s->is_default || iorp->gfn == gfn_x(INVALID_GFN) )
+        return;
+
     if ( guest_physmap_remove_page(d, _gfn(iorp->gfn),
                                    _mfn(page_to_mfn(iorp->page)), 0) )
         domain_crash(d);
     clear_page(iorp->va);
 }
 
-static int hvm_add_ioreq_gfn(
-    struct domain *d, struct hvm_ioreq_page *iorp)
+static int hvm_add_ioreq_gfn(struct hvm_ioreq_server *s, bool buf)
 {
+    struct domain *d = s->domain;
+    struct hvm_ioreq_page *iorp = buf ? &s->bufioreq : &s->ioreq;
     int rc;
 
+    if ( s->is_default || iorp->gfn == gfn_x(INVALID_GFN) )
+        return 0;
+
     clear_page(iorp->va);
 
     rc = guest_physmap_add_page(d, _gfn(iorp->gfn),
@@ -412,78 +434,25 @@  static void hvm_ioreq_server_remove_all_vcpus(struct hvm_ioreq_server *s)
 }
 
 static int hvm_ioreq_server_map_pages(struct hvm_ioreq_server *s,
-                                      unsigned long ioreq_gfn,
-                                      unsigned long bufioreq_gfn)
-{
-    int rc;
-
-    rc = hvm_map_ioreq_page(s, false, ioreq_gfn);
-    if ( rc )
-        return rc;
-
-    if ( bufioreq_gfn != gfn_x(INVALID_GFN) )
-        rc = hvm_map_ioreq_page(s, true, bufioreq_gfn);
-
-    if ( rc )
-        hvm_unmap_ioreq_page(s, false);
-
-    return rc;
-}
-
-static int hvm_ioreq_server_setup_pages(struct hvm_ioreq_server *s,
-                                        bool handle_bufioreq)
+                                      bool handle_bufioreq)
 {
-    struct domain *d = s->domain;
-    unsigned long ioreq_gfn = gfn_x(INVALID_GFN);
-    unsigned long bufioreq_gfn = gfn_x(INVALID_GFN);
-    int rc;
-
-    if ( s->is_default )
-    {
-        /*
-         * The default ioreq server must handle buffered ioreqs, for
-         * backwards compatibility.
-         */
-        ASSERT(handle_bufioreq);
-        return hvm_ioreq_server_map_pages(s,
-                   d->arch.hvm_domain.params[HVM_PARAM_IOREQ_PFN],
-                   d->arch.hvm_domain.params[HVM_PARAM_BUFIOREQ_PFN]);
-    }
+    int rc = -ENOMEM;
 
-    rc = hvm_alloc_ioreq_gfn(d, &ioreq_gfn);
+    rc = hvm_map_ioreq_gfn(s, false);
 
     if ( !rc && handle_bufioreq )
-        rc = hvm_alloc_ioreq_gfn(d, &bufioreq_gfn);
-
-    if ( !rc )
-        rc = hvm_ioreq_server_map_pages(s, ioreq_gfn, bufioreq_gfn);
+        rc = hvm_map_ioreq_gfn(s, true);
 
     if ( rc )
-    {
-        hvm_free_ioreq_gfn(d, ioreq_gfn);
-        hvm_free_ioreq_gfn(d, bufioreq_gfn);
-    }
+        hvm_unmap_ioreq_gfn(s, false);
 
     return rc;
 }
 
 static void hvm_ioreq_server_unmap_pages(struct hvm_ioreq_server *s)
 {
-    struct domain *d = s->domain;
-    bool handle_bufioreq = !!s->bufioreq.va;
-
-    if ( handle_bufioreq )
-        hvm_unmap_ioreq_page(s, true);
-
-    hvm_unmap_ioreq_page(s, false);
-
-    if ( !s->is_default )
-    {
-        if ( handle_bufioreq )
-            hvm_free_ioreq_gfn(d, s->bufioreq.gfn);
-
-        hvm_free_ioreq_gfn(d, s->ioreq.gfn);
-    }
+    hvm_unmap_ioreq_gfn(s, true);
+    hvm_unmap_ioreq_gfn(s, false);
 }
 
 static void hvm_ioreq_server_free_rangesets(struct hvm_ioreq_server *s)
@@ -540,22 +509,15 @@  static int hvm_ioreq_server_alloc_rangesets(struct hvm_ioreq_server *s)
 
 static void hvm_ioreq_server_enable(struct hvm_ioreq_server *s)
 {
-    struct domain *d = s->domain;
     struct hvm_ioreq_vcpu *sv;
-    bool handle_bufioreq = !!s->bufioreq.va;
 
     spin_lock(&s->lock);
 
     if ( s->enabled )
         goto done;
 
-    if ( !s->is_default )
-    {
-        hvm_remove_ioreq_gfn(d, &s->ioreq);
-
-        if ( handle_bufioreq )
-            hvm_remove_ioreq_gfn(d, &s->bufioreq);
-    }
+    hvm_remove_ioreq_gfn(s, false);
+    hvm_remove_ioreq_gfn(s, true);
 
     s->enabled = true;
 
@@ -570,21 +532,13 @@  static void hvm_ioreq_server_enable(struct hvm_ioreq_server *s)
 
 static void hvm_ioreq_server_disable(struct hvm_ioreq_server *s)
 {
-    struct domain *d = s->domain;
-    bool handle_bufioreq = !!s->bufioreq.va;
-
     spin_lock(&s->lock);
 
     if ( !s->enabled )
         goto done;
 
-    if ( !s->is_default )
-    {
-        if ( handle_bufioreq )
-            hvm_add_ioreq_gfn(d, &s->bufioreq);
-
-        hvm_add_ioreq_gfn(d, &s->ioreq);
-    }
+    hvm_add_ioreq_gfn(s, true);
+    hvm_add_ioreq_gfn(s, false);
 
     s->enabled = false;
 
@@ -607,6 +561,9 @@  static int hvm_ioreq_server_init(struct hvm_ioreq_server *s,
     INIT_LIST_HEAD(&s->ioreq_vcpu_list);
     spin_lock_init(&s->bufioreq_lock);
 
+    s->ioreq.gfn = gfn_x(INVALID_GFN);
+    s->bufioreq.gfn = gfn_x(INVALID_GFN);
+
     rc = hvm_ioreq_server_alloc_rangesets(s);
     if ( rc )
         return rc;
@@ -614,7 +571,7 @@  static int hvm_ioreq_server_init(struct hvm_ioreq_server *s,
     if ( bufioreq_handling == HVM_IOREQSRV_BUFIOREQ_ATOMIC )
         s->bufioreq_atomic = true;
 
-    rc = hvm_ioreq_server_setup_pages(
+    rc = hvm_ioreq_server_map_pages(
              s, bufioreq_handling != HVM_IOREQSRV_BUFIOREQ_OFF);
     if ( rc )
         goto fail_map;