diff mbox series

[v3,04/10] ioreq: add internal ioreq initialization support

Message ID 20190930133238.49868-5-roger.pau@citrix.com (mailing list archive)
State New, archived
Headers show
Series ioreq: add support for internal servers | expand

Commit Message

Roger Pau Monné Sept. 30, 2019, 1:32 p.m. UTC
Add support for internal ioreq servers to initialization and
deinitialization routines, prevent some functions from being executed
against internal ioreq servers and add guards to only allow internal
callers to modify internal ioreq servers. External callers (ie: from
hypercalls) are only allowed to deal with external ioreq servers.

Signed-off-by: Roger Pau Monné <roger.pau@citrix.com>
---
Changes since v2:
 - Return early from hvm_ioreq_server_init and hvm_ioreq_server_deinit
   if server is internal.
 - hvm_destroy_ioreq_server, hvm_get_ioreq_server_info and
   hvm_map_mem_type_to_ioreq_server can only be used against external
   servers, hence add an assert to that effect.
 - Simplify ASSERT in hvm_create_ioreq_server.

Changes since v1:
 - Do not pass an 'internal' parameter to most functions, and instead
   use the id to key whether an ioreq server is internal or external.
 - Prevent enabling an internal server without a handler.
---
 xen/arch/x86/hvm/dm.c            |  17 ++++-
 xen/arch/x86/hvm/ioreq.c         | 119 ++++++++++++++++++++-----------
 xen/include/asm-x86/hvm/domain.h |   5 +-
 xen/include/asm-x86/hvm/ioreq.h  |   8 ++-
 4 files changed, 105 insertions(+), 44 deletions(-)

Comments

Andrew Cooper Oct. 1, 2019, 9:57 a.m. UTC | #1
On 30/09/2019 14:32, Roger Pau Monne wrote:
> diff --git a/xen/include/asm-x86/hvm/ioreq.h b/xen/include/asm-x86/hvm/ioreq.h
> index 65491c48d2..c3917aa74d 100644
> --- a/xen/include/asm-x86/hvm/ioreq.h
> +++ b/xen/include/asm-x86/hvm/ioreq.h
> @@ -54,6 +54,12 @@ unsigned int hvm_broadcast_ioreq(ioreq_t *p, bool buffered);
>  
>  void hvm_ioreq_init(struct domain *d);
>  
> +static inline bool hvm_ioreq_is_internal(unsigned int id)
> +{
> +    ASSERT(id < MAX_NR_IOREQ_SERVERS);
> +    return id >= MAX_NR_EXTERNAL_IOREQ_SERVERS;

You cannot ASSERT() here.  id is guest-controlled data in the dm_op() path.

~Andrew
Roger Pau Monné Oct. 1, 2019, 10:43 a.m. UTC | #2
On Tue, Oct 01, 2019 at 10:57:13AM +0100, Andrew Cooper wrote:
> On 30/09/2019 14:32, Roger Pau Monne wrote:
> > diff --git a/xen/include/asm-x86/hvm/ioreq.h b/xen/include/asm-x86/hvm/ioreq.h
> > index 65491c48d2..c3917aa74d 100644
> > --- a/xen/include/asm-x86/hvm/ioreq.h
> > +++ b/xen/include/asm-x86/hvm/ioreq.h
> > @@ -54,6 +54,12 @@ unsigned int hvm_broadcast_ioreq(ioreq_t *p, bool buffered);
> >  
> >  void hvm_ioreq_init(struct domain *d);
> >  
> > +static inline bool hvm_ioreq_is_internal(unsigned int id)
> > +{
> > +    ASSERT(id < MAX_NR_IOREQ_SERVERS);
> > +    return id >= MAX_NR_EXTERNAL_IOREQ_SERVERS;
> 
> You cannot ASSERT() here.  id is guest-controlled data in the dm_op() path.

Urg, right, thanks for noticing. There's no check prior to calling
hvm_ioreq_is_internal on the dm_op path.

I guess just returning true if id >= MAX_NR_EXTERNAL_IOREQ_SERVERS
would be OK, get_ioreq_server already copes with overflowing ids.

Thanks, Roger.
Jan Beulich Oct. 2, 2019, 2:47 p.m. UTC | #3
On 30.09.2019 15:32, Roger Pau Monne wrote:
> @@ -855,6 +884,8 @@ int hvm_destroy_ioreq_server(struct domain *d, ioservid_t id)
>      struct hvm_ioreq_server *s;
>      int rc;
>  
> +    ASSERT(!hvm_ioreq_is_internal(id));

With this, ...

> @@ -871,13 +903,13 @@ int hvm_destroy_ioreq_server(struct domain *d, ioservid_t id)
>  
>      p2m_set_ioreq_server(d, 0, id);
>  
> -    hvm_ioreq_server_disable(s);
> +    hvm_ioreq_server_disable(s, hvm_ioreq_is_internal(id));

... why not simply "false" here?

>      /*
>       * It is safe to call hvm_ioreq_server_deinit() prior to
>       * set_ioreq_server() since the target domain is paused.
>       */
> -    hvm_ioreq_server_deinit(s);
> +    hvm_ioreq_server_deinit(s, false);

The more that here you do so.

> @@ -900,6 +932,8 @@ int hvm_get_ioreq_server_info(struct domain *d, ioservid_t id,
>      struct hvm_ioreq_server *s;
>      int rc;
>  
> +    ASSERT(!hvm_ioreq_is_internal(id));
> +
>      spin_lock_recursive(&d->arch.hvm.ioreq_server.lock);
>  
>      s = get_ioreq_server(d, id);
> @@ -909,6 +943,7 @@ int hvm_get_ioreq_server_info(struct domain *d, ioservid_t id,
>          goto out;
>  
>      rc = -EPERM;
> +    /* NB: don't allow fetching information from internal ioreq servers. */
>      if ( s->emulator != current->domain )
>          goto out;

The comment doesn't really seem to be applicable to the code here:
->emulator lives in the "external" part of the union, and hence if
anywhere I think the comment should go next to the ASSERT() above.

> @@ -1010,7 +1045,7 @@ int hvm_map_io_range_to_ioreq_server(struct domain *d, ioservid_t id,
>          goto out;
>  
>      rc = -EPERM;
> -    if ( s->emulator != current->domain )
> +    if ( !hvm_ioreq_is_internal(id) && s->emulator != current->domain )
>          goto out;
>  
>      switch ( type )
> @@ -1062,7 +1097,7 @@ int hvm_unmap_io_range_from_ioreq_server(struct domain *d, ioservid_t id,
>          goto out;
>  
>      rc = -EPERM;
> -    if ( s->emulator != current->domain )
> +    if ( !hvm_ioreq_is_internal(id) && s->emulator != current->domain )
>          goto out;
>  
>      switch ( type )
> @@ -1108,6 +1143,8 @@ int hvm_map_mem_type_to_ioreq_server(struct domain *d, ioservid_t id,
>      struct hvm_ioreq_server *s;
>      int rc;
>  
> +    ASSERT(!hvm_ioreq_is_internal(id));
> +
>      if ( type != HVMMEM_ioreq_server )
>          return -EINVAL;

Taking just these three, things seem pretty inconsistent: Why ASSERT()
here but if() above? I think it would be better if dm.c was left
unchanged (not sure if I'm in opposition with this to prior review
comments by someone else), in particular making it unnecessary (as it
seems) to expose hvm_ioreq_is_internal() outside of this CU.

> @@ -1184,7 +1221,7 @@ int hvm_all_ioreq_servers_add_vcpu(struct domain *d, struct vcpu *v)
>  
>      spin_lock_recursive(&d->arch.hvm.ioreq_server.lock);
>  
> -    FOR_EACH_IOREQ_SERVER(d, id, s)
> +    FOR_EACH_EXTERNAL_IOREQ_SERVER(d, id, s)

Still remembering the error path fix you likely spotted as necessary
while doing this work (commit 215f2576b0): Don't you need to again
adjust this same error path here (MAX_NR_IOREQ_SERVERS ->
MAX_NR_EXTERNAL_IOREQ_SERVERS)?

Jan
diff mbox series

Patch

diff --git a/xen/arch/x86/hvm/dm.c b/xen/arch/x86/hvm/dm.c
index c2fca9f729..6a3682e58c 100644
--- a/xen/arch/x86/hvm/dm.c
+++ b/xen/arch/x86/hvm/dm.c
@@ -417,7 +417,7 @@  static int dm_op(const struct dmop_args *op_args)
             break;
 
         rc = hvm_create_ioreq_server(d, data->handle_bufioreq,
-                                     &data->id);
+                                     &data->id, false);
         break;
     }
 
@@ -450,6 +450,9 @@  static int dm_op(const struct dmop_args *op_args)
         rc = -EINVAL;
         if ( data->pad )
             break;
+        rc = -EPERM;
+        if ( hvm_ioreq_is_internal(data->id) )
+            break;
 
         rc = hvm_map_io_range_to_ioreq_server(d, data->id, data->type,
                                               data->start, data->end);
@@ -464,6 +467,9 @@  static int dm_op(const struct dmop_args *op_args)
         rc = -EINVAL;
         if ( data->pad )
             break;
+        rc = -EPERM;
+        if ( hvm_ioreq_is_internal(data->id) )
+            break;
 
         rc = hvm_unmap_io_range_from_ioreq_server(d, data->id, data->type,
                                                   data->start, data->end);
@@ -481,6 +487,9 @@  static int dm_op(const struct dmop_args *op_args)
         rc = -EOPNOTSUPP;
         if ( !hap_enabled(d) )
             break;
+        rc = -EPERM;
+        if ( hvm_ioreq_is_internal(data->id) )
+            break;
 
         if ( first_gfn == 0 )
             rc = hvm_map_mem_type_to_ioreq_server(d, data->id,
@@ -528,6 +537,9 @@  static int dm_op(const struct dmop_args *op_args)
         rc = -EINVAL;
         if ( data->pad )
             break;
+        rc = -EPERM;
+        if ( hvm_ioreq_is_internal(data->id) )
+            break;
 
         rc = hvm_set_ioreq_server_state(d, data->id, !!data->enabled);
         break;
@@ -541,6 +553,9 @@  static int dm_op(const struct dmop_args *op_args)
         rc = -EINVAL;
         if ( data->pad )
             break;
+        rc = -EPERM;
+        if ( hvm_ioreq_is_internal(data->id) )
+            break;
 
         rc = hvm_destroy_ioreq_server(d, data->id);
         break;
diff --git a/xen/arch/x86/hvm/ioreq.c b/xen/arch/x86/hvm/ioreq.c
index ed0142c4e1..cdbd4244a4 100644
--- a/xen/arch/x86/hvm/ioreq.c
+++ b/xen/arch/x86/hvm/ioreq.c
@@ -59,10 +59,11 @@  static struct hvm_ioreq_server *get_ioreq_server(const struct domain *d,
 /*
  * Iterate over all possible ioreq servers.
  *
- * NOTE: The iteration is backwards such that more recently created
- *       ioreq servers are favoured in hvm_select_ioreq_server().
- *       This is a semantic that previously existed when ioreq servers
- *       were held in a linked list.
+ * NOTE: The iteration is backwards such that internal and more recently
+ *       created external ioreq servers are favoured in
+ *       hvm_select_ioreq_server().
+ *       This is a semantic that previously existed for external servers when
+ *       ioreq servers were held in a linked list.
  */
 #define FOR_EACH_IOREQ_SERVER(d, id, s) \
     for ( (id) = MAX_NR_IOREQ_SERVERS; (id) != 0; ) \
@@ -70,6 +71,12 @@  static struct hvm_ioreq_server *get_ioreq_server(const struct domain *d,
             continue; \
         else
 
+#define FOR_EACH_EXTERNAL_IOREQ_SERVER(d, id, s) \
+    for ( (id) = MAX_NR_EXTERNAL_IOREQ_SERVERS; (id) != 0; ) \
+        if ( !(s = GET_IOREQ_SERVER(d, --(id))) ) \
+            continue; \
+        else
+
 static ioreq_t *get_ioreq(struct hvm_ioreq_server *s, struct vcpu *v)
 {
     shared_iopage_t *p = s->ioreq.va;
@@ -86,7 +93,7 @@  bool hvm_io_pending(struct vcpu *v)
     struct hvm_ioreq_server *s;
     unsigned int id;
 
-    FOR_EACH_IOREQ_SERVER(d, id, s)
+    FOR_EACH_EXTERNAL_IOREQ_SERVER(d, id, s)
     {
         struct hvm_ioreq_vcpu *sv;
 
@@ -190,7 +197,7 @@  bool handle_hvm_io_completion(struct vcpu *v)
         return false;
     }
 
-    FOR_EACH_IOREQ_SERVER(d, id, s)
+    FOR_EACH_EXTERNAL_IOREQ_SERVER(d, id, s)
     {
         struct hvm_ioreq_vcpu *sv;
 
@@ -430,7 +437,7 @@  bool is_ioreq_server_page(struct domain *d, const struct page_info *page)
 
     spin_lock_recursive(&d->arch.hvm.ioreq_server.lock);
 
-    FOR_EACH_IOREQ_SERVER(d, id, s)
+    FOR_EACH_EXTERNAL_IOREQ_SERVER(d, id, s)
     {
         if ( (s->ioreq.page == page) || (s->bufioreq.page == page) )
         {
@@ -688,7 +695,7 @@  static int hvm_ioreq_server_alloc_rangesets(struct hvm_ioreq_server *s,
     return rc;
 }
 
-static void hvm_ioreq_server_enable(struct hvm_ioreq_server *s)
+static void hvm_ioreq_server_enable(struct hvm_ioreq_server *s, bool internal)
 {
     struct hvm_ioreq_vcpu *sv;
 
@@ -697,29 +704,40 @@  static void hvm_ioreq_server_enable(struct hvm_ioreq_server *s)
     if ( s->enabled )
         goto done;
 
-    hvm_remove_ioreq_gfn(s, false);
-    hvm_remove_ioreq_gfn(s, true);
+    if ( !internal )
+    {
+        hvm_remove_ioreq_gfn(s, false);
+        hvm_remove_ioreq_gfn(s, true);
 
-    s->enabled = true;
+        list_for_each_entry ( sv,
+                              &s->ioreq_vcpu_list,
+                              list_entry )
+            hvm_update_ioreq_evtchn(s, sv);
+    }
+    else if ( !s->handler )
+    {
+        ASSERT_UNREACHABLE();
+        goto done;
+    }
 
-    list_for_each_entry ( sv,
-                          &s->ioreq_vcpu_list,
-                          list_entry )
-        hvm_update_ioreq_evtchn(s, sv);
+    s->enabled = true;
 
   done:
     spin_unlock(&s->lock);
 }
 
-static void hvm_ioreq_server_disable(struct hvm_ioreq_server *s)
+static void hvm_ioreq_server_disable(struct hvm_ioreq_server *s, bool internal)
 {
     spin_lock(&s->lock);
 
     if ( !s->enabled )
         goto done;
 
-    hvm_add_ioreq_gfn(s, true);
-    hvm_add_ioreq_gfn(s, false);
+    if ( !internal )
+    {
+        hvm_add_ioreq_gfn(s, true);
+        hvm_add_ioreq_gfn(s, false);
+    }
 
     s->enabled = false;
 
@@ -736,21 +754,21 @@  static int hvm_ioreq_server_init(struct hvm_ioreq_server *s,
     int rc;
 
     s->target = d;
+    spin_lock_init(&s->lock);
+
+    rc = hvm_ioreq_server_alloc_rangesets(s, id);
+    if ( hvm_ioreq_is_internal(id) || rc )
+        return rc;
 
     get_knownalive_domain(currd);
-    s->emulator = currd;
 
-    spin_lock_init(&s->lock);
+    s->emulator = currd;
     INIT_LIST_HEAD(&s->ioreq_vcpu_list);
     spin_lock_init(&s->bufioreq_lock);
 
     s->ioreq.gfn = INVALID_GFN;
     s->bufioreq.gfn = INVALID_GFN;
 
-    rc = hvm_ioreq_server_alloc_rangesets(s, id);
-    if ( rc )
-        return rc;
-
     s->bufioreq_handling = bufioreq_handling;
 
     for_each_vcpu ( d, v )
@@ -763,6 +781,7 @@  static int hvm_ioreq_server_init(struct hvm_ioreq_server *s,
     return 0;
 
  fail_add:
+    ASSERT(!hvm_ioreq_is_internal(id));
     hvm_ioreq_server_remove_all_vcpus(s);
     hvm_ioreq_server_unmap_pages(s);
 
@@ -772,9 +791,15 @@  static int hvm_ioreq_server_init(struct hvm_ioreq_server *s,
     return rc;
 }
 
-static void hvm_ioreq_server_deinit(struct hvm_ioreq_server *s)
+static void hvm_ioreq_server_deinit(struct hvm_ioreq_server *s, bool internal)
 {
     ASSERT(!s->enabled);
+
+    hvm_ioreq_server_free_rangesets(s);
+
+    if ( internal )
+        return;
+
     hvm_ioreq_server_remove_all_vcpus(s);
 
     /*
@@ -789,13 +814,11 @@  static void hvm_ioreq_server_deinit(struct hvm_ioreq_server *s)
     hvm_ioreq_server_unmap_pages(s);
     hvm_ioreq_server_free_pages(s);
 
-    hvm_ioreq_server_free_rangesets(s);
-
     put_domain(s->emulator);
 }
 
 int hvm_create_ioreq_server(struct domain *d, int bufioreq_handling,
-                            ioservid_t *id)
+                            ioservid_t *id, bool internal)
 {
     struct hvm_ioreq_server *s;
     unsigned int i;
@@ -811,7 +834,9 @@  int hvm_create_ioreq_server(struct domain *d, int bufioreq_handling,
     domain_pause(d);
     spin_lock_recursive(&d->arch.hvm.ioreq_server.lock);
 
-    for ( i = 0; i < MAX_NR_IOREQ_SERVERS; i++ )
+    for ( i = (internal ? MAX_NR_EXTERNAL_IOREQ_SERVERS : 0);
+          i < (internal ? MAX_NR_IOREQ_SERVERS : MAX_NR_EXTERNAL_IOREQ_SERVERS);
+          i++ )
     {
         if ( !GET_IOREQ_SERVER(d, i) )
             break;
@@ -821,6 +846,10 @@  int hvm_create_ioreq_server(struct domain *d, int bufioreq_handling,
     if ( i >= MAX_NR_IOREQ_SERVERS )
         goto fail;
 
+    ASSERT(i < MAX_NR_EXTERNAL_IOREQ_SERVERS
+           ? !internal
+           : internal && i < MAX_NR_IOREQ_SERVERS);
+
     /*
      * It is safe to call set_ioreq_server() prior to
      * hvm_ioreq_server_init() since the target domain is paused.
@@ -855,6 +884,8 @@  int hvm_destroy_ioreq_server(struct domain *d, ioservid_t id)
     struct hvm_ioreq_server *s;
     int rc;
 
+    ASSERT(!hvm_ioreq_is_internal(id));
+
     spin_lock_recursive(&d->arch.hvm.ioreq_server.lock);
 
     s = get_ioreq_server(d, id);
@@ -864,6 +895,7 @@  int hvm_destroy_ioreq_server(struct domain *d, ioservid_t id)
         goto out;
 
     rc = -EPERM;
+    /* NB: internal servers cannot be destroyed. */
     if ( s->emulator != current->domain )
         goto out;
 
@@ -871,13 +903,13 @@  int hvm_destroy_ioreq_server(struct domain *d, ioservid_t id)
 
     p2m_set_ioreq_server(d, 0, id);
 
-    hvm_ioreq_server_disable(s);
+    hvm_ioreq_server_disable(s, hvm_ioreq_is_internal(id));
 
     /*
      * It is safe to call hvm_ioreq_server_deinit() prior to
      * set_ioreq_server() since the target domain is paused.
      */
-    hvm_ioreq_server_deinit(s);
+    hvm_ioreq_server_deinit(s, false);
     set_ioreq_server(d, id, NULL);
 
     domain_unpause(d);
@@ -900,6 +932,8 @@  int hvm_get_ioreq_server_info(struct domain *d, ioservid_t id,
     struct hvm_ioreq_server *s;
     int rc;
 
+    ASSERT(!hvm_ioreq_is_internal(id));
+
     spin_lock_recursive(&d->arch.hvm.ioreq_server.lock);
 
     s = get_ioreq_server(d, id);
@@ -909,6 +943,7 @@  int hvm_get_ioreq_server_info(struct domain *d, ioservid_t id,
         goto out;
 
     rc = -EPERM;
+    /* NB: don't allow fetching information from internal ioreq servers. */
     if ( s->emulator != current->domain )
         goto out;
 
@@ -956,7 +991,7 @@  int hvm_get_ioreq_server_frame(struct domain *d, ioservid_t id,
         goto out;
 
     rc = -EPERM;
-    if ( s->emulator != current->domain )
+    if ( hvm_ioreq_is_internal(id) || s->emulator != current->domain )
         goto out;
 
     rc = hvm_ioreq_server_alloc_pages(s);
@@ -1010,7 +1045,7 @@  int hvm_map_io_range_to_ioreq_server(struct domain *d, ioservid_t id,
         goto out;
 
     rc = -EPERM;
-    if ( s->emulator != current->domain )
+    if ( !hvm_ioreq_is_internal(id) && s->emulator != current->domain )
         goto out;
 
     switch ( type )
@@ -1062,7 +1097,7 @@  int hvm_unmap_io_range_from_ioreq_server(struct domain *d, ioservid_t id,
         goto out;
 
     rc = -EPERM;
-    if ( s->emulator != current->domain )
+    if ( !hvm_ioreq_is_internal(id) && s->emulator != current->domain )
         goto out;
 
     switch ( type )
@@ -1108,6 +1143,8 @@  int hvm_map_mem_type_to_ioreq_server(struct domain *d, ioservid_t id,
     struct hvm_ioreq_server *s;
     int rc;
 
+    ASSERT(!hvm_ioreq_is_internal(id));
+
     if ( type != HVMMEM_ioreq_server )
         return -EINVAL;
 
@@ -1157,15 +1194,15 @@  int hvm_set_ioreq_server_state(struct domain *d, ioservid_t id,
         goto out;
 
     rc = -EPERM;
-    if ( s->emulator != current->domain )
+    if ( !hvm_ioreq_is_internal(id) && s->emulator != current->domain )
         goto out;
 
     domain_pause(d);
 
     if ( enabled )
-        hvm_ioreq_server_enable(s);
+        hvm_ioreq_server_enable(s, hvm_ioreq_is_internal(id));
     else
-        hvm_ioreq_server_disable(s);
+        hvm_ioreq_server_disable(s, hvm_ioreq_is_internal(id));
 
     domain_unpause(d);
 
@@ -1184,7 +1221,7 @@  int hvm_all_ioreq_servers_add_vcpu(struct domain *d, struct vcpu *v)
 
     spin_lock_recursive(&d->arch.hvm.ioreq_server.lock);
 
-    FOR_EACH_IOREQ_SERVER(d, id, s)
+    FOR_EACH_EXTERNAL_IOREQ_SERVER(d, id, s)
     {
         rc = hvm_ioreq_server_add_vcpu(s, v);
         if ( rc )
@@ -1218,7 +1255,7 @@  void hvm_all_ioreq_servers_remove_vcpu(struct domain *d, struct vcpu *v)
 
     spin_lock_recursive(&d->arch.hvm.ioreq_server.lock);
 
-    FOR_EACH_IOREQ_SERVER(d, id, s)
+    FOR_EACH_EXTERNAL_IOREQ_SERVER(d, id, s)
         hvm_ioreq_server_remove_vcpu(s, v);
 
     spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock);
@@ -1235,13 +1272,13 @@  void hvm_destroy_all_ioreq_servers(struct domain *d)
 
     FOR_EACH_IOREQ_SERVER(d, id, s)
     {
-        hvm_ioreq_server_disable(s);
+        hvm_ioreq_server_disable(s, hvm_ioreq_is_internal(id));
 
         /*
          * It is safe to call hvm_ioreq_server_deinit() prior to
          * set_ioreq_server() since the target domain is being destroyed.
          */
-        hvm_ioreq_server_deinit(s);
+        hvm_ioreq_server_deinit(s, hvm_ioreq_is_internal(id));
         set_ioreq_server(d, id, NULL);
 
         xfree(s);
diff --git a/xen/include/asm-x86/hvm/domain.h b/xen/include/asm-x86/hvm/domain.h
index 56a32e3e35..f09ce9b417 100644
--- a/xen/include/asm-x86/hvm/domain.h
+++ b/xen/include/asm-x86/hvm/domain.h
@@ -97,7 +97,10 @@  struct hvm_pi_ops {
     void (*vcpu_block)(struct vcpu *);
 };
 
-#define MAX_NR_IOREQ_SERVERS 8
+#define MAX_NR_EXTERNAL_IOREQ_SERVERS 8
+#define MAX_NR_INTERNAL_IOREQ_SERVERS 1
+#define MAX_NR_IOREQ_SERVERS \
+    (MAX_NR_EXTERNAL_IOREQ_SERVERS + MAX_NR_INTERNAL_IOREQ_SERVERS)
 
 struct hvm_domain {
     /* Guest page range used for non-default ioreq servers */
diff --git a/xen/include/asm-x86/hvm/ioreq.h b/xen/include/asm-x86/hvm/ioreq.h
index 65491c48d2..c3917aa74d 100644
--- a/xen/include/asm-x86/hvm/ioreq.h
+++ b/xen/include/asm-x86/hvm/ioreq.h
@@ -24,7 +24,7 @@  bool handle_hvm_io_completion(struct vcpu *v);
 bool is_ioreq_server_page(struct domain *d, const struct page_info *page);
 
 int hvm_create_ioreq_server(struct domain *d, int bufioreq_handling,
-                            ioservid_t *id);
+                            ioservid_t *id, bool internal);
 int hvm_destroy_ioreq_server(struct domain *d, ioservid_t id);
 int hvm_get_ioreq_server_info(struct domain *d, ioservid_t id,
                               unsigned long *ioreq_gfn,
@@ -54,6 +54,12 @@  unsigned int hvm_broadcast_ioreq(ioreq_t *p, bool buffered);
 
 void hvm_ioreq_init(struct domain *d);
 
+static inline bool hvm_ioreq_is_internal(unsigned int id)
+{
+    ASSERT(id < MAX_NR_IOREQ_SERVERS);
+    return id >= MAX_NR_EXTERNAL_IOREQ_SERVERS;
+}
+
 #endif /* __ASM_X86_HVM_IOREQ_H__ */
 
 /*