diff mbox

[v3,08/12] x86/hvm/ioreq: maintain an array of ioreq servers rather than a list

Message ID 20170831093605.2757-9-paul.durrant@citrix.com (mailing list archive)
State New, archived
Headers show

Commit Message

Paul Durrant Aug. 31, 2017, 9:36 a.m. UTC
A subsequent patch will remove the current implicit limitation on creation
of ioreq servers which is due to the allocation of gfns for the ioreq
structures and buffered ioreq ring.

It will therefore be necessary to introduce an explicit limit and, since
this limit should be small, it simplifies the code to maintain an array of
that size rather than using a list.

Also, by reserving an array slot for the default server and populating
array slots early in create, the need to pass an 'is_default' boolean
to sub-functions can be avoided.

Signed-off-by: Paul Durrant <paul.durrant@citrix.com>
---
Cc: Jan Beulich <jbeulich@suse.com>
Cc: Andrew Cooper <andrew.cooper3@citrix.com>

v3:
 - New patch (replacing "move is_default into struct hvm_ioreq_server") in
   response to review comments.
---
 xen/arch/x86/hvm/ioreq.c         | 507 +++++++++++++++++++--------------------
 xen/include/asm-x86/hvm/domain.h |  11 +-
 2 files changed, 247 insertions(+), 271 deletions(-)

Comments

Roger Pau Monne Sept. 4, 2017, 1:40 p.m. UTC | #1
On Thu, Aug 31, 2017 at 10:36:01AM +0100, Paul Durrant wrote:
> A subsequent patch will remove the current implicit limitation on creation
> of ioreq servers which is due to the allocation of gfns for the ioreq
> structures and buffered ioreq ring.
> 
> It will therefore be necessary to introduce an explicit limit and, since
> this limit should be small, it simplifies the code to maintain an array of
> that size rather than using a list.
> 
> Also, by reserving an array slot for the default server and populating
> array slots early in create, the need to pass an 'is_default' boolean
> to sub-functions can be avoided.
> 
> Signed-off-by: Paul Durrant <paul.durrant@citrix.com>
> ---
> Cc: Jan Beulich <jbeulich@suse.com>
> Cc: Andrew Cooper <andrew.cooper3@citrix.com>
> 
> v3:
>  - New patch (replacing "move is_default into struct hvm_ioreq_server") in
>    response to review comments.
> ---
>  xen/arch/x86/hvm/ioreq.c         | 507 +++++++++++++++++++--------------------
>  xen/include/asm-x86/hvm/domain.h |  11 +-
>  2 files changed, 247 insertions(+), 271 deletions(-)
> 
> diff --git a/xen/arch/x86/hvm/ioreq.c b/xen/arch/x86/hvm/ioreq.c
> index 5e01e1a6d2..0e92763384 100644
> --- a/xen/arch/x86/hvm/ioreq.c
> +++ b/xen/arch/x86/hvm/ioreq.c
> @@ -46,14 +46,18 @@ static ioreq_t *get_ioreq(struct hvm_ioreq_server *s, struct vcpu *v)
>  bool hvm_io_pending(struct vcpu *v)
>  {
>      struct domain *d = v->domain;
> -    struct hvm_ioreq_server *s;
> +    unsigned int id;
>  
> -    list_for_each_entry ( s,
> -                          &d->arch.hvm_domain.ioreq_server.list,
> -                          list_entry )
> +    for ( id = 0; id < MAX_NR_IOREQ_SERVERS; id++ )
>      {
> +        struct hvm_ioreq_server *s;
>          struct hvm_ioreq_vcpu *sv;
>  
> +        s = d->arch.hvm_domain.ioreq_server.server[id];
> +

No need for the extra newline IMHO (here and below). You could also
do the initialization together with the definition, but I guess that's
going to exceed the line char limit?

Or even you could do something like this AFAICT:

for ( id = 0, s = d->arch.hvm_domain.ioreq_server.server[0];
      id < MAX_NR_IOREQ_SERVERS;
      id++, s = d->arch.hvm_domain.ioreq_server.server[id] )
{
     ....

I would make this a macro (FOREACH_IOREQ_SERVER or similar), since the
pattern seems to be repeated in quite a lot of places.

> +#define IS_DEFAULT(s) \
> +    (s == s->domain->arch.hvm_domain.ioreq_server.server[DEFAULT_IOSERVID])

Parentheses around the instances of s please.

>  static int hvm_ioreq_server_alloc_rangesets(struct hvm_ioreq_server *s,
> -                                            bool is_default)
> +                                            ioservid_t id)

You could get the id by doing some arithmetic with the array and s,
but I don't think that's worth it.

>  int hvm_create_ioreq_server(struct domain *d, domid_t domid,
> @@ -685,52 +667,66 @@ int hvm_create_ioreq_server(struct domain *d, domid_t domid,
>                              ioservid_t *id)
>  {
>      struct hvm_ioreq_server *s;
> +    unsigned int i;
>      int rc;
>  
>      if ( bufioreq_handling > HVM_IOREQSRV_BUFIOREQ_ATOMIC )
>          return -EINVAL;
>  
> -    rc = -ENOMEM;
>      s = xzalloc(struct hvm_ioreq_server);
>      if ( !s )
> -        goto fail1;
> +        return -ENOMEM;
>  
>      domain_pause(d);
>      spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
>  
> -    rc = -EEXIST;
> -    if ( is_default && d->arch.hvm_domain.default_ioreq_server != NULL )
> -        goto fail2;
> -
> -    rc = hvm_ioreq_server_init(s, d, domid, is_default, bufioreq_handling,
> -                               next_ioservid(d));
> -    if ( rc )
> -        goto fail3;
> -
> -    list_add(&s->list_entry,
> -             &d->arch.hvm_domain.ioreq_server.list);
> -
>      if ( is_default )
>      {
> -        d->arch.hvm_domain.default_ioreq_server = s;
> -        hvm_ioreq_server_enable(s, true);
> +        i = DEFAULT_IOSERVID;
> +
> +        rc = -EEXIST;
> +        if ( d->arch.hvm_domain.ioreq_server.server[i] )
> +            goto fail;
> +    }
> +    else
> +    {
> +        for ( i = 0; i < MAX_NR_IOREQ_SERVERS; i++ )
> +        {
> +            if ( i != DEFAULT_IOSERVID &&
> +                 !d->arch.hvm_domain.ioreq_server.server[i] )
> +                break;
> +        }
> +
> +        rc = -ENOSPC;
> +        if ( i >= MAX_NR_IOREQ_SERVERS )
> +            goto fail;
>      }
>  
> +    d->arch.hvm_domain.ioreq_server.server[i] = s;
> +
> +    rc = hvm_ioreq_server_init(s, d, domid, bufioreq_handling, i);
> +    if ( rc )
> +        goto fail;
> +
> +    if ( IS_DEFAULT(s) )
> +        hvm_ioreq_server_enable(s);
> +
>      if ( id )
> -        *id = s->id;
> +        *id = i;
> +
> +    d->arch.hvm_domain.ioreq_server.count++;
>  
>      spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
>      domain_unpause(d);
>  
>      return 0;
>  
> - fail3:
> - fail2:
> + fail:
>      spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
>      domain_unpause(d);
>  
> +    d->arch.hvm_domain.ioreq_server.server[i] = NULL;

Shouldn't this be done while holding the ioreq_server lock?

>      xfree(s);
> - fail1:
>      return rc;
>  }
>  
> @@ -741,35 +737,30 @@ int hvm_destroy_ioreq_server(struct domain *d, ioservid_t id)
>  
>      spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
>  
> -    rc = -ENOENT;
> -    list_for_each_entry ( s,
> -                          &d->arch.hvm_domain.ioreq_server.list,
> -                          list_entry )
> -    {
> -        if ( s == d->arch.hvm_domain.default_ioreq_server )
> -            continue;
> +    s = d->arch.hvm_domain.ioreq_server.server[id];
>  
> -        if ( s->id != id )
> -            continue;
> -
> -        domain_pause(d);
> +    rc = -ENOENT;
> +    if ( id >= MAX_NR_IOREQ_SERVERS || !s || IS_DEFAULT(s) )

The id >= MAX_NR_IOREQ_SERVERS should be done before getting the
element IMHO, even before getting the lock.

Also, I don't like the:

rc = ...
if ( ... )
    goto error

construct, I think it's easy to make a mistake and end up returning an
error code in the successful path (or forgetting to set an error when
needed). This is however widely used in Xen, so I'm not going to
complain any more.

>  void hvm_destroy_all_ioreq_servers(struct domain *d)
>  {
> -    struct hvm_ioreq_server *s, *next;
> +    unsigned int id;
>  
>      spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
>  
>      /* No need to domain_pause() as the domain is being torn down */
>  
> -    list_for_each_entry_safe ( s,
> -                               next,
> -                               &d->arch.hvm_domain.ioreq_server.list,
> -                               list_entry )
> +    for ( id = 0; id < MAX_NR_IOREQ_SERVERS; id++ )
>      {
> -        bool is_default = (s == d->arch.hvm_domain.default_ioreq_server);
> +        struct hvm_ioreq_server *s;
>  
> -        hvm_ioreq_server_disable(s, is_default);
> +        s = d->arch.hvm_domain.ioreq_server.server[id];
>  
> -        if ( is_default )
> -            d->arch.hvm_domain.default_ioreq_server = NULL;
> +        if ( !s )
> +            continue;
>  
> -        list_del(&s->list_entry);
> +        hvm_ioreq_server_disable(s);
> +        hvm_ioreq_server_deinit(s);
>  
> -        hvm_ioreq_server_deinit(s, is_default);
> +        ASSERT(d->arch.hvm_domain.ioreq_server.count);
> +        --d->arch.hvm_domain.ioreq_server.count;

It seems more common to use d->arch.hvm_domain.ioreq_server.count--,
unless there' a reason for prefixing the decrement.
  
> +#define MAX_NR_IOREQ_SERVERS 8
> +#define DEFAULT_IOSERVID 0

I would rather write it as DEFAULT_IOREQ_ID or DEFAULT_IOSERVER_ID I
don't think there's any need to shorten SERVER here (specially when
it's not shorted in MAX_NR_IOREQ_SERVERS.

Thanks, Roger.
Jan Beulich Sept. 4, 2017, 2:39 p.m. UTC | #2
>>> On 04.09.17 at 15:40, <roger.pau@citrix.com> wrote:
> On Thu, Aug 31, 2017 at 10:36:01AM +0100, Paul Durrant wrote:
>>  void hvm_destroy_all_ioreq_servers(struct domain *d)
>>  {
>> -    struct hvm_ioreq_server *s, *next;
>> +    unsigned int id;
>>  
>>      spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
>>  
>>      /* No need to domain_pause() as the domain is being torn down */
>>  
>> -    list_for_each_entry_safe ( s,
>> -                               next,
>> -                               &d->arch.hvm_domain.ioreq_server.list,
>> -                               list_entry )
>> +    for ( id = 0; id < MAX_NR_IOREQ_SERVERS; id++ )
>>      {
>> -        bool is_default = (s == d->arch.hvm_domain.default_ioreq_server);
>> +        struct hvm_ioreq_server *s;
>>  
>> -        hvm_ioreq_server_disable(s, is_default);
>> +        s = d->arch.hvm_domain.ioreq_server.server[id];
>>  
>> -        if ( is_default )
>> -            d->arch.hvm_domain.default_ioreq_server = NULL;
>> +        if ( !s )
>> +            continue;
>>  
>> -        list_del(&s->list_entry);
>> +        hvm_ioreq_server_disable(s);
>> +        hvm_ioreq_server_deinit(s);
>>  
>> -        hvm_ioreq_server_deinit(s, is_default);
>> +        ASSERT(d->arch.hvm_domain.ioreq_server.count);
>> +        --d->arch.hvm_domain.ioreq_server.count;
> 
> It seems more common to use d->arch.hvm_domain.ioreq_server.count--,
> unless there' a reason for prefixing the decrement.

At least for people also writing C++ code every now and then it is
certainly more natural to use the prefixing operator.

Jan
Paul Durrant Sept. 5, 2017, 9:06 a.m. UTC | #3
> -----Original Message-----
> From: Roger Pau Monne
> Sent: 04 September 2017 14:41
> To: Paul Durrant <Paul.Durrant@citrix.com>
> Cc: xen-devel@lists.xenproject.org; Andrew Cooper
> <Andrew.Cooper3@citrix.com>; Jan Beulich <jbeulich@suse.com>
> Subject: Re: [Xen-devel] [PATCH v3 08/12] x86/hvm/ioreq: maintain an array
> of ioreq servers rather than a list
> 
> On Thu, Aug 31, 2017 at 10:36:01AM +0100, Paul Durrant wrote:
> > A subsequent patch will remove the current implicit limitation on creation
> > of ioreq servers which is due to the allocation of gfns for the ioreq
> > structures and buffered ioreq ring.
> >
> > It will therefore be necessary to introduce an explicit limit and, since
> > this limit should be small, it simplifies the code to maintain an array of
> > that size rather than using a list.
> >
> > Also, by reserving an array slot for the default server and populating
> > array slots early in create, the need to pass an 'is_default' boolean
> > to sub-functions can be avoided.
> >
> > Signed-off-by: Paul Durrant <paul.durrant@citrix.com>
> > ---
> > Cc: Jan Beulich <jbeulich@suse.com>
> > Cc: Andrew Cooper <andrew.cooper3@citrix.com>
> >
> > v3:
> >  - New patch (replacing "move is_default into struct hvm_ioreq_server") in
> >    response to review comments.
> > ---
> >  xen/arch/x86/hvm/ioreq.c         | 507 +++++++++++++++++++----------------
> ----
> >  xen/include/asm-x86/hvm/domain.h |  11 +-
> >  2 files changed, 247 insertions(+), 271 deletions(-)
> >
> > diff --git a/xen/arch/x86/hvm/ioreq.c b/xen/arch/x86/hvm/ioreq.c
> > index 5e01e1a6d2..0e92763384 100644
> > --- a/xen/arch/x86/hvm/ioreq.c
> > +++ b/xen/arch/x86/hvm/ioreq.c
> > @@ -46,14 +46,18 @@ static ioreq_t *get_ioreq(struct hvm_ioreq_server
> *s, struct vcpu *v)
> >  bool hvm_io_pending(struct vcpu *v)
> >  {
> >      struct domain *d = v->domain;
> > -    struct hvm_ioreq_server *s;
> > +    unsigned int id;
> >
> > -    list_for_each_entry ( s,
> > -                          &d->arch.hvm_domain.ioreq_server.list,
> > -                          list_entry )
> > +    for ( id = 0; id < MAX_NR_IOREQ_SERVERS; id++ )
> >      {
> > +        struct hvm_ioreq_server *s;
> >          struct hvm_ioreq_vcpu *sv;
> >
> > +        s = d->arch.hvm_domain.ioreq_server.server[id];
> > +
> 
> No need for the extra newline IMHO (here and below). You could also
> do the initialization together with the definition, but I guess that's
> going to exceed the line char limit?
> 
> Or even you could do something like this AFAICT:
> 
> for ( id = 0, s = d->arch.hvm_domain.ioreq_server.server[0];
>       id < MAX_NR_IOREQ_SERVERS;
>       id++, s = d->arch.hvm_domain.ioreq_server.server[id] )
> {
>      ....
> 
> I would make this a macro (FOREACH_IOREQ_SERVER or similar), since the
> pattern seems to be repeated in quite a lot of places.

Yes, that's probably a good plan. I'll look at doing that.

> 
> > +#define IS_DEFAULT(s) \
> > +    (s == s->domain-
> >arch.hvm_domain.ioreq_server.server[DEFAULT_IOSERVID])
> 
> Parentheses around the instances of s please.

Indeed. Missed that.

> 
> >  static int hvm_ioreq_server_alloc_rangesets(struct hvm_ioreq_server *s,
> > -                                            bool is_default)
> > +                                            ioservid_t id)
> 
> You could get the id by doing some arithmetic with the array and s,
> but I don't think that's worth it.
> 

No, I can't anyway because the array contains pointers not the structs themselves.

> >  int hvm_create_ioreq_server(struct domain *d, domid_t domid,
> > @@ -685,52 +667,66 @@ int hvm_create_ioreq_server(struct domain *d,
> domid_t domid,
> >                              ioservid_t *id)
> >  {
> >      struct hvm_ioreq_server *s;
> > +    unsigned int i;
> >      int rc;
> >
> >      if ( bufioreq_handling > HVM_IOREQSRV_BUFIOREQ_ATOMIC )
> >          return -EINVAL;
> >
> > -    rc = -ENOMEM;
> >      s = xzalloc(struct hvm_ioreq_server);
> >      if ( !s )
> > -        goto fail1;
> > +        return -ENOMEM;
> >
> >      domain_pause(d);
> >      spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
> >
> > -    rc = -EEXIST;
> > -    if ( is_default && d->arch.hvm_domain.default_ioreq_server != NULL )
> > -        goto fail2;
> > -
> > -    rc = hvm_ioreq_server_init(s, d, domid, is_default, bufioreq_handling,
> > -                               next_ioservid(d));
> > -    if ( rc )
> > -        goto fail3;
> > -
> > -    list_add(&s->list_entry,
> > -             &d->arch.hvm_domain.ioreq_server.list);
> > -
> >      if ( is_default )
> >      {
> > -        d->arch.hvm_domain.default_ioreq_server = s;
> > -        hvm_ioreq_server_enable(s, true);
> > +        i = DEFAULT_IOSERVID;
> > +
> > +        rc = -EEXIST;
> > +        if ( d->arch.hvm_domain.ioreq_server.server[i] )
> > +            goto fail;
> > +    }
> > +    else
> > +    {
> > +        for ( i = 0; i < MAX_NR_IOREQ_SERVERS; i++ )
> > +        {
> > +            if ( i != DEFAULT_IOSERVID &&
> > +                 !d->arch.hvm_domain.ioreq_server.server[i] )
> > +                break;
> > +        }
> > +
> > +        rc = -ENOSPC;
> > +        if ( i >= MAX_NR_IOREQ_SERVERS )
> > +            goto fail;
> >      }
> >
> > +    d->arch.hvm_domain.ioreq_server.server[i] = s;
> > +
> > +    rc = hvm_ioreq_server_init(s, d, domid, bufioreq_handling, i);
> > +    if ( rc )
> > +        goto fail;
> > +
> > +    if ( IS_DEFAULT(s) )
> > +        hvm_ioreq_server_enable(s);
> > +
> >      if ( id )
> > -        *id = s->id;
> > +        *id = i;
> > +
> > +    d->arch.hvm_domain.ioreq_server.count++;
> >
> >      spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
> >      domain_unpause(d);
> >
> >      return 0;
> >
> > - fail3:
> > - fail2:
> > + fail:
> >      spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
> >      domain_unpause(d);
> >
> > +    d->arch.hvm_domain.ioreq_server.server[i] = NULL;
> 
> Shouldn't this be done while holding the ioreq_server lock?
> 

Yes, it should.

> >      xfree(s);
> > - fail1:
> >      return rc;
> >  }
> >
> > @@ -741,35 +737,30 @@ int hvm_destroy_ioreq_server(struct domain *d,
> ioservid_t id)
> >
> >      spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
> >
> > -    rc = -ENOENT;
> > -    list_for_each_entry ( s,
> > -                          &d->arch.hvm_domain.ioreq_server.list,
> > -                          list_entry )
> > -    {
> > -        if ( s == d->arch.hvm_domain.default_ioreq_server )
> > -            continue;
> > +    s = d->arch.hvm_domain.ioreq_server.server[id];
> >
> > -        if ( s->id != id )
> > -            continue;
> > -
> > -        domain_pause(d);
> > +    rc = -ENOENT;
> > +    if ( id >= MAX_NR_IOREQ_SERVERS || !s || IS_DEFAULT(s) )
> 
> The id >= MAX_NR_IOREQ_SERVERS should be done before getting the
> element IMHO, even before getting the lock.

Ok, if you prefer.

> 
> Also, I don't like the:
> 
> rc = ...
> if ( ... )
>     goto error
> 
> construct, I think it's easy to make a mistake and end up returning an
> error code in the successful path (or forgetting to set an error when
> needed). This is however widely used in Xen, so I'm not going to
> complain any more.

It's a construct I tend to use as, in my experience (although I have not checked with recent versions of gcc) it leads to smaller code. If you use:

If (theres-an-error)
{
  rc = -errno;
  goto error;
}

then this will probably generate (in pseudo-assembly):

test theres-an-error
jump-if false 1f
move -errno, rc
jump error
1: ...

whereas using:

rc = -errno;
if (theres-an-error)
  goto error;

will probably generate something like:

move -errno, rc
test theres-an-error
jump-if true error

which IMO is more efficient and easier to read.

> 
> >  void hvm_destroy_all_ioreq_servers(struct domain *d)
> >  {
> > -    struct hvm_ioreq_server *s, *next;
> > +    unsigned int id;
> >
> >      spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
> >
> >      /* No need to domain_pause() as the domain is being torn down */
> >
> > -    list_for_each_entry_safe ( s,
> > -                               next,
> > -                               &d->arch.hvm_domain.ioreq_server.list,
> > -                               list_entry )
> > +    for ( id = 0; id < MAX_NR_IOREQ_SERVERS; id++ )
> >      {
> > -        bool is_default = (s == d->arch.hvm_domain.default_ioreq_server);
> > +        struct hvm_ioreq_server *s;
> >
> > -        hvm_ioreq_server_disable(s, is_default);
> > +        s = d->arch.hvm_domain.ioreq_server.server[id];
> >
> > -        if ( is_default )
> > -            d->arch.hvm_domain.default_ioreq_server = NULL;
> > +        if ( !s )
> > +            continue;
> >
> > -        list_del(&s->list_entry);
> > +        hvm_ioreq_server_disable(s);
> > +        hvm_ioreq_server_deinit(s);
> >
> > -        hvm_ioreq_server_deinit(s, is_default);
> > +        ASSERT(d->arch.hvm_domain.ioreq_server.count);
> > +        --d->arch.hvm_domain.ioreq_server.count;
> 
> It seems more common to use d->arch.hvm_domain.ioreq_server.count--,
> unless there' a reason for prefixing the decrement.

That's habit from the days of the 68000 which had a predecrement-and-indirect addressing mode so using predecrement generally yielded smaller code :-) I still prefer it. Postdecrement just looks odd to me.

> > +#define MAX_NR_IOREQ_SERVERS 8
> > +#define DEFAULT_IOSERVID 0
> 
> I would rather write it as DEFAULT_IOREQ_ID or DEFAULT_IOSERVER_ID I
> don't think there's any need to shorten SERVER here (specially when
> it's not shorted in MAX_NR_IOREQ_SERVERS.
> 

I named it after the ioservid_t type. I'd prefer to keep it that way.

Cheers,

  Paul

> Thanks, Roger.
diff mbox

Patch

diff --git a/xen/arch/x86/hvm/ioreq.c b/xen/arch/x86/hvm/ioreq.c
index 5e01e1a6d2..0e92763384 100644
--- a/xen/arch/x86/hvm/ioreq.c
+++ b/xen/arch/x86/hvm/ioreq.c
@@ -46,14 +46,18 @@  static ioreq_t *get_ioreq(struct hvm_ioreq_server *s, struct vcpu *v)
 bool hvm_io_pending(struct vcpu *v)
 {
     struct domain *d = v->domain;
-    struct hvm_ioreq_server *s;
+    unsigned int id;
 
-    list_for_each_entry ( s,
-                          &d->arch.hvm_domain.ioreq_server.list,
-                          list_entry )
+    for ( id = 0; id < MAX_NR_IOREQ_SERVERS; id++ )
     {
+        struct hvm_ioreq_server *s;
         struct hvm_ioreq_vcpu *sv;
 
+        s = d->arch.hvm_domain.ioreq_server.server[id];
+
+        if ( !s )
+            continue;
+
         list_for_each_entry ( sv,
                               &s->ioreq_vcpu_list,
                               list_entry )
@@ -125,15 +129,19 @@  bool handle_hvm_io_completion(struct vcpu *v)
 {
     struct domain *d = v->domain;
     struct hvm_vcpu_io *vio = &v->arch.hvm_vcpu.hvm_io;
-    struct hvm_ioreq_server *s;
     enum hvm_io_completion io_completion;
+    unsigned int id;
 
-      list_for_each_entry ( s,
-                          &d->arch.hvm_domain.ioreq_server.list,
-                          list_entry )
+    for ( id = 0; id < MAX_NR_IOREQ_SERVERS; id++ )
     {
+        struct hvm_ioreq_server *s;
         struct hvm_ioreq_vcpu *sv;
 
+        s = d->arch.hvm_domain.ioreq_server.server[id];
+
+        if ( !s )
+            continue;
+
         list_for_each_entry ( sv,
                               &s->ioreq_vcpu_list,
                               list_entry )
@@ -242,15 +250,20 @@  static int hvm_map_ioreq_page(
 
 bool is_ioreq_server_page(struct domain *d, const struct page_info *page)
 {
-    const struct hvm_ioreq_server *s;
+    unsigned int id;
     bool found = false;
 
     spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
 
-    list_for_each_entry ( s,
-                          &d->arch.hvm_domain.ioreq_server.list,
-                          list_entry )
+    for ( id = 0; id < MAX_NR_IOREQ_SERVERS; id++ )
     {
+        struct hvm_ioreq_server *s;
+
+        s = d->arch.hvm_domain.ioreq_server.server[id];
+
+        if ( !s )
+            continue;
+
         if ( (s->ioreq.va && s->ioreq.page == page) ||
              (s->bufioreq.va && s->bufioreq.page == page) )
         {
@@ -301,8 +314,11 @@  static void hvm_update_ioreq_evtchn(struct hvm_ioreq_server *s,
     }
 }
 
+#define IS_DEFAULT(s) \
+    (s == s->domain->arch.hvm_domain.ioreq_server.server[DEFAULT_IOSERVID])
+
 static int hvm_ioreq_server_add_vcpu(struct hvm_ioreq_server *s,
-                                     bool is_default, struct vcpu *v)
+                                     struct vcpu *v)
 {
     struct hvm_ioreq_vcpu *sv;
     int rc;
@@ -331,7 +347,7 @@  static int hvm_ioreq_server_add_vcpu(struct hvm_ioreq_server *s,
             goto fail3;
 
         s->bufioreq_evtchn = rc;
-        if ( is_default )
+        if ( IS_DEFAULT(s) )
             d->arch.hvm_domain.params[HVM_PARAM_BUFIOREQ_EVTCHN] =
                 s->bufioreq_evtchn;
     }
@@ -431,7 +447,6 @@  static int hvm_ioreq_server_map_pages(struct hvm_ioreq_server *s,
 }
 
 static int hvm_ioreq_server_setup_pages(struct hvm_ioreq_server *s,
-                                        bool is_default,
                                         bool handle_bufioreq)
 {
     struct domain *d = s->domain;
@@ -439,7 +454,7 @@  static int hvm_ioreq_server_setup_pages(struct hvm_ioreq_server *s,
     unsigned long bufioreq_gfn = gfn_x(INVALID_GFN);
     int rc;
 
-    if ( is_default )
+    if ( IS_DEFAULT(s) )
     {
         /*
          * The default ioreq server must handle buffered ioreqs, for
@@ -468,8 +483,7 @@  static int hvm_ioreq_server_setup_pages(struct hvm_ioreq_server *s,
     return rc;
 }
 
-static void hvm_ioreq_server_unmap_pages(struct hvm_ioreq_server *s,
-                                         bool is_default)
+static void hvm_ioreq_server_unmap_pages(struct hvm_ioreq_server *s)
 {
     struct domain *d = s->domain;
     bool handle_bufioreq = !!s->bufioreq.va;
@@ -479,7 +493,7 @@  static void hvm_ioreq_server_unmap_pages(struct hvm_ioreq_server *s,
 
     hvm_unmap_ioreq_page(s, false);
 
-    if ( !is_default )
+    if ( !IS_DEFAULT(s) )
     {
         if ( handle_bufioreq )
             hvm_free_ioreq_gfn(d, s->bufioreq.gfn);
@@ -488,12 +502,11 @@  static void hvm_ioreq_server_unmap_pages(struct hvm_ioreq_server *s,
     }
 }
 
-static void hvm_ioreq_server_free_rangesets(struct hvm_ioreq_server *s,
-                                            bool is_default)
+static void hvm_ioreq_server_free_rangesets(struct hvm_ioreq_server *s)
 {
     unsigned int i;
 
-    if ( is_default )
+    if ( IS_DEFAULT(s) )
         return;
 
     for ( i = 0; i < NR_IO_RANGE_TYPES; i++ )
@@ -501,19 +514,19 @@  static void hvm_ioreq_server_free_rangesets(struct hvm_ioreq_server *s,
 }
 
 static int hvm_ioreq_server_alloc_rangesets(struct hvm_ioreq_server *s,
-                                            bool is_default)
+                                            ioservid_t id)
 {
     unsigned int i;
     int rc;
 
-    if ( is_default )
+    if ( IS_DEFAULT(s) )
         goto done;
 
     for ( i = 0; i < NR_IO_RANGE_TYPES; i++ )
     {
         char *name;
 
-        rc = asprintf(&name, "ioreq_server %d %s", s->id,
+        rc = asprintf(&name, "ioreq_server %d %s", id,
                       (i == XEN_DMOP_IO_RANGE_PORT) ? "port" :
                       (i == XEN_DMOP_IO_RANGE_MEMORY) ? "memory" :
                       (i == XEN_DMOP_IO_RANGE_PCI) ? "pci" :
@@ -537,13 +550,12 @@  static int hvm_ioreq_server_alloc_rangesets(struct hvm_ioreq_server *s,
     return 0;
 
  fail:
-    hvm_ioreq_server_free_rangesets(s, false);
+    hvm_ioreq_server_free_rangesets(s);
 
     return rc;
 }
 
-static void hvm_ioreq_server_enable(struct hvm_ioreq_server *s,
-                                    bool is_default)
+static void hvm_ioreq_server_enable(struct hvm_ioreq_server *s)
 {
     struct domain *d = s->domain;
     struct hvm_ioreq_vcpu *sv;
@@ -554,7 +566,7 @@  static void hvm_ioreq_server_enable(struct hvm_ioreq_server *s,
     if ( s->enabled )
         goto done;
 
-    if ( !is_default )
+    if ( !IS_DEFAULT(s) )
     {
         hvm_remove_ioreq_gfn(d, &s->ioreq);
 
@@ -573,8 +585,7 @@  static void hvm_ioreq_server_enable(struct hvm_ioreq_server *s,
     spin_unlock(&s->lock);
 }
 
-static void hvm_ioreq_server_disable(struct hvm_ioreq_server *s,
-                                     bool is_default)
+static void hvm_ioreq_server_disable(struct hvm_ioreq_server *s)
 {
     struct domain *d = s->domain;
     bool handle_bufioreq = !!s->bufioreq.va;
@@ -584,7 +595,7 @@  static void hvm_ioreq_server_disable(struct hvm_ioreq_server *s,
     if ( !s->enabled )
         goto done;
 
-    if ( !is_default )
+    if ( !IS_DEFAULT(s) )
     {
         if ( handle_bufioreq )
             hvm_add_ioreq_gfn(d, &s->bufioreq);
@@ -600,13 +611,11 @@  static void hvm_ioreq_server_disable(struct hvm_ioreq_server *s,
 
 static int hvm_ioreq_server_init(struct hvm_ioreq_server *s,
                                  struct domain *d, domid_t domid,
-                                 bool is_default, int bufioreq_handling,
-                                 ioservid_t id)
+                                 int bufioreq_handling, ioservid_t id)
 {
     struct vcpu *v;
     int rc;
 
-    s->id = id;
     s->domain = d;
     s->domid = domid;
 
@@ -614,7 +623,7 @@  static int hvm_ioreq_server_init(struct hvm_ioreq_server *s,
     INIT_LIST_HEAD(&s->ioreq_vcpu_list);
     spin_lock_init(&s->bufioreq_lock);
 
-    rc = hvm_ioreq_server_alloc_rangesets(s, is_default);
+    rc = hvm_ioreq_server_alloc_rangesets(s, id);
     if ( rc )
         return rc;
 
@@ -622,13 +631,13 @@  static int hvm_ioreq_server_init(struct hvm_ioreq_server *s,
         s->bufioreq_atomic = true;
 
     rc = hvm_ioreq_server_setup_pages(
-             s, is_default, bufioreq_handling != HVM_IOREQSRV_BUFIOREQ_OFF);
+             s, bufioreq_handling != HVM_IOREQSRV_BUFIOREQ_OFF);
     if ( rc )
         goto fail_map;
 
     for_each_vcpu ( d, v )
     {
-        rc = hvm_ioreq_server_add_vcpu(s, is_default, v);
+        rc = hvm_ioreq_server_add_vcpu(s, v);
         if ( rc )
             goto fail_add;
     }
@@ -637,47 +646,20 @@  static int hvm_ioreq_server_init(struct hvm_ioreq_server *s,
 
  fail_add:
     hvm_ioreq_server_remove_all_vcpus(s);
-    hvm_ioreq_server_unmap_pages(s, is_default);
+    hvm_ioreq_server_unmap_pages(s);
 
  fail_map:
-    hvm_ioreq_server_free_rangesets(s, is_default);
+    hvm_ioreq_server_free_rangesets(s);
 
     return rc;
 }
 
-static void hvm_ioreq_server_deinit(struct hvm_ioreq_server *s,
-                                    bool is_default)
+static void hvm_ioreq_server_deinit(struct hvm_ioreq_server *s)
 {
     ASSERT(!s->enabled);
     hvm_ioreq_server_remove_all_vcpus(s);
-    hvm_ioreq_server_unmap_pages(s, is_default);
-    hvm_ioreq_server_free_rangesets(s, is_default);
-}
-
-static ioservid_t next_ioservid(struct domain *d)
-{
-    struct hvm_ioreq_server *s;
-    ioservid_t id;
-
-    ASSERT(spin_is_locked(&d->arch.hvm_domain.ioreq_server.lock));
-
-    id = d->arch.hvm_domain.ioreq_server.id;
-
- again:
-    id++;
-
-    /* Check for uniqueness */
-    list_for_each_entry ( s,
-                          &d->arch.hvm_domain.ioreq_server.list,
-                          list_entry )
-    {
-        if ( id == s->id )
-            goto again;
-    }
-
-    d->arch.hvm_domain.ioreq_server.id = id;
-
-    return id;
+    hvm_ioreq_server_unmap_pages(s);
+    hvm_ioreq_server_free_rangesets(s);
 }
 
 int hvm_create_ioreq_server(struct domain *d, domid_t domid,
@@ -685,52 +667,66 @@  int hvm_create_ioreq_server(struct domain *d, domid_t domid,
                             ioservid_t *id)
 {
     struct hvm_ioreq_server *s;
+    unsigned int i;
     int rc;
 
     if ( bufioreq_handling > HVM_IOREQSRV_BUFIOREQ_ATOMIC )
         return -EINVAL;
 
-    rc = -ENOMEM;
     s = xzalloc(struct hvm_ioreq_server);
     if ( !s )
-        goto fail1;
+        return -ENOMEM;
 
     domain_pause(d);
     spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
 
-    rc = -EEXIST;
-    if ( is_default && d->arch.hvm_domain.default_ioreq_server != NULL )
-        goto fail2;
-
-    rc = hvm_ioreq_server_init(s, d, domid, is_default, bufioreq_handling,
-                               next_ioservid(d));
-    if ( rc )
-        goto fail3;
-
-    list_add(&s->list_entry,
-             &d->arch.hvm_domain.ioreq_server.list);
-
     if ( is_default )
     {
-        d->arch.hvm_domain.default_ioreq_server = s;
-        hvm_ioreq_server_enable(s, true);
+        i = DEFAULT_IOSERVID;
+
+        rc = -EEXIST;
+        if ( d->arch.hvm_domain.ioreq_server.server[i] )
+            goto fail;
+    }
+    else
+    {
+        for ( i = 0; i < MAX_NR_IOREQ_SERVERS; i++ )
+        {
+            if ( i != DEFAULT_IOSERVID &&
+                 !d->arch.hvm_domain.ioreq_server.server[i] )
+                break;
+        }
+
+        rc = -ENOSPC;
+        if ( i >= MAX_NR_IOREQ_SERVERS )
+            goto fail;
     }
 
+    d->arch.hvm_domain.ioreq_server.server[i] = s;
+
+    rc = hvm_ioreq_server_init(s, d, domid, bufioreq_handling, i);
+    if ( rc )
+        goto fail;
+
+    if ( IS_DEFAULT(s) )
+        hvm_ioreq_server_enable(s);
+
     if ( id )
-        *id = s->id;
+        *id = i;
+
+    d->arch.hvm_domain.ioreq_server.count++;
 
     spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
     domain_unpause(d);
 
     return 0;
 
- fail3:
- fail2:
+ fail:
     spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
     domain_unpause(d);
 
+    d->arch.hvm_domain.ioreq_server.server[i] = NULL;
     xfree(s);
- fail1:
     return rc;
 }
 
@@ -741,35 +737,30 @@  int hvm_destroy_ioreq_server(struct domain *d, ioservid_t id)
 
     spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
 
-    rc = -ENOENT;
-    list_for_each_entry ( s,
-                          &d->arch.hvm_domain.ioreq_server.list,
-                          list_entry )
-    {
-        if ( s == d->arch.hvm_domain.default_ioreq_server )
-            continue;
+    s = d->arch.hvm_domain.ioreq_server.server[id];
 
-        if ( s->id != id )
-            continue;
-
-        domain_pause(d);
+    rc = -ENOENT;
+    if ( id >= MAX_NR_IOREQ_SERVERS || !s || IS_DEFAULT(s) )
+        goto out;
 
-        p2m_set_ioreq_server(d, 0, s);
+    domain_pause(d);
 
-        hvm_ioreq_server_disable(s, false);
+    p2m_set_ioreq_server(d, 0, s);
 
-        list_del(&s->list_entry);
+    hvm_ioreq_server_disable(s);
+    hvm_ioreq_server_deinit(s);
 
-        hvm_ioreq_server_deinit(s, false);
+    domain_unpause(d);
 
-        domain_unpause(d);
+    ASSERT(d->arch.hvm_domain.ioreq_server.count);
+    --d->arch.hvm_domain.ioreq_server.count;
 
-        xfree(s);
+    d->arch.hvm_domain.ioreq_server.server[id] = NULL;
+    xfree(s);
 
-        rc = 0;
-        break;
-    }
+    rc = 0;
 
+ out:
     spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
 
     return rc;
@@ -785,29 +776,23 @@  int hvm_get_ioreq_server_info(struct domain *d, ioservid_t id,
 
     spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
 
-    rc = -ENOENT;
-    list_for_each_entry ( s,
-                          &d->arch.hvm_domain.ioreq_server.list,
-                          list_entry )
-    {
-        if ( s == d->arch.hvm_domain.default_ioreq_server )
-            continue;
+    s = d->arch.hvm_domain.ioreq_server.server[id];
 
-        if ( s->id != id )
-            continue;
+    rc = -ENOENT;
+    if ( id >= MAX_NR_IOREQ_SERVERS || !s || IS_DEFAULT(s) )
+        goto out;
 
-        *ioreq_gfn = s->ioreq.gfn;
+    *ioreq_gfn = s->ioreq.gfn;
 
-        if ( s->bufioreq.va != NULL )
-        {
-            *bufioreq_gfn = s->bufioreq.gfn;
-            *bufioreq_port = s->bufioreq_evtchn;
-        }
-
-        rc = 0;
-        break;
+    if ( s->bufioreq.va != NULL )
+    {
+        *bufioreq_gfn = s->bufioreq.gfn;
+        *bufioreq_port = s->bufioreq_evtchn;
     }
 
+    rc = 0;
+
+ out:
     spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
 
     return rc;
@@ -818,48 +803,41 @@  int hvm_map_io_range_to_ioreq_server(struct domain *d, ioservid_t id,
                                      uint64_t end)
 {
     struct hvm_ioreq_server *s;
+    struct rangeset *r;
     int rc;
 
     spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
 
-    rc = -ENOENT;
-    list_for_each_entry ( s,
-                          &d->arch.hvm_domain.ioreq_server.list,
-                          list_entry )
-    {
-        if ( s == d->arch.hvm_domain.default_ioreq_server )
-            continue;
+    s = d->arch.hvm_domain.ioreq_server.server[id];
 
-        if ( s->id == id )
-        {
-            struct rangeset *r;
+    rc = -ENOENT;
+    if ( id >= MAX_NR_IOREQ_SERVERS || !s || IS_DEFAULT(s) )
+        goto out;
 
-            switch ( type )
-            {
-            case XEN_DMOP_IO_RANGE_PORT:
-            case XEN_DMOP_IO_RANGE_MEMORY:
-            case XEN_DMOP_IO_RANGE_PCI:
-                r = s->range[type];
-                break;
+    switch ( type )
+    {
+    case XEN_DMOP_IO_RANGE_PORT:
+    case XEN_DMOP_IO_RANGE_MEMORY:
+    case XEN_DMOP_IO_RANGE_PCI:
+        r = s->range[type];
+        break;
 
-            default:
-                r = NULL;
-                break;
-            }
+    default:
+        r = NULL;
+        break;
+    }
 
-            rc = -EINVAL;
-            if ( !r )
-                break;
+    rc = -EINVAL;
+    if ( !r )
+        goto out;
 
-            rc = -EEXIST;
-            if ( rangeset_overlaps_range(r, start, end) )
-                break;
+    rc = -EEXIST;
+    if ( rangeset_overlaps_range(r, start, end) )
+        goto out;
 
-            rc = rangeset_add_range(r, start, end);
-            break;
-        }
-    }
+    rc = rangeset_add_range(r, start, end);
 
+ out:
     spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
 
     return rc;
@@ -870,48 +848,41 @@  int hvm_unmap_io_range_from_ioreq_server(struct domain *d, ioservid_t id,
                                          uint64_t end)
 {
     struct hvm_ioreq_server *s;
+    struct rangeset *r;
     int rc;
 
     spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
 
-    rc = -ENOENT;
-    list_for_each_entry ( s,
-                          &d->arch.hvm_domain.ioreq_server.list,
-                          list_entry )
-    {
-        if ( s == d->arch.hvm_domain.default_ioreq_server )
-            continue;
+    s = d->arch.hvm_domain.ioreq_server.server[id];
 
-        if ( s->id == id )
-        {
-            struct rangeset *r;
+    rc = -ENOENT;
+    if ( id >= MAX_NR_IOREQ_SERVERS || !s || IS_DEFAULT(s) )
+        goto out;
 
-            switch ( type )
-            {
-            case XEN_DMOP_IO_RANGE_PORT:
-            case XEN_DMOP_IO_RANGE_MEMORY:
-            case XEN_DMOP_IO_RANGE_PCI:
-                r = s->range[type];
-                break;
+    switch ( type )
+    {
+    case XEN_DMOP_IO_RANGE_PORT:
+    case XEN_DMOP_IO_RANGE_MEMORY:
+    case XEN_DMOP_IO_RANGE_PCI:
+        r = s->range[type];
+        break;
 
-            default:
-                r = NULL;
-                break;
-            }
+    default:
+        r = NULL;
+        break;
+    }
 
-            rc = -EINVAL;
-            if ( !r )
-                break;
+    rc = -EINVAL;
+    if ( !r )
+        goto out;
 
-            rc = -ENOENT;
-            if ( !rangeset_contains_range(r, start, end) )
-                break;
+    rc = -ENOENT;
+    if ( !rangeset_contains_range(r, start, end) )
+        goto out;
 
-            rc = rangeset_remove_range(r, start, end);
-            break;
-        }
-    }
+    rc = rangeset_remove_range(r, start, end);
 
+ out:
     spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
 
     return rc;
@@ -939,20 +910,12 @@  int hvm_map_mem_type_to_ioreq_server(struct domain *d, ioservid_t id,
 
     spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
 
-    rc = -ENOENT;
-    list_for_each_entry ( s,
-                          &d->arch.hvm_domain.ioreq_server.list,
-                          list_entry )
-    {
-        if ( s == d->arch.hvm_domain.default_ioreq_server )
-            continue;
+    s = d->arch.hvm_domain.ioreq_server.server[id];
 
-        if ( s->id == id )
-        {
-            rc = p2m_set_ioreq_server(d, flags, s);
-            break;
-        }
-    }
+    if ( id >= MAX_NR_IOREQ_SERVERS || !s || IS_DEFAULT(s) )
+        rc = -ENOENT;
+    else
+        rc = p2m_set_ioreq_server(d, flags, s);
 
     spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
 
@@ -970,56 +933,50 @@  int hvm_map_mem_type_to_ioreq_server(struct domain *d, ioservid_t id,
 int hvm_set_ioreq_server_state(struct domain *d, ioservid_t id,
                                bool enabled)
 {
-    struct list_head *entry;
+    struct hvm_ioreq_server *s;
     int rc;
 
     spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
 
-    rc = -ENOENT;
-    list_for_each ( entry,
-                    &d->arch.hvm_domain.ioreq_server.list )
-    {
-        struct hvm_ioreq_server *s = list_entry(entry,
-                                                struct hvm_ioreq_server,
-                                                list_entry);
-
-        if ( s == d->arch.hvm_domain.default_ioreq_server )
-            continue;
+    s = d->arch.hvm_domain.ioreq_server.server[id];
 
-        if ( s->id != id )
-            continue;
+    rc = -ENOENT;
+    if ( id >= MAX_NR_IOREQ_SERVERS || !s || IS_DEFAULT(s) )
+        goto out;
 
-        domain_pause(d);
+    domain_pause(d);
 
-        if ( enabled )
-            hvm_ioreq_server_enable(s, false);
-        else
-            hvm_ioreq_server_disable(s, false);
+    if ( enabled )
+        hvm_ioreq_server_enable(s);
+    else
+        hvm_ioreq_server_disable(s);
 
-        domain_unpause(d);
+    domain_unpause(d);
 
-        rc = 0;
-        break;
-    }
+    rc = 0;
 
+ out:
     spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
     return rc;
 }
 
 int hvm_all_ioreq_servers_add_vcpu(struct domain *d, struct vcpu *v)
 {
-    struct hvm_ioreq_server *s;
+    unsigned int id;
     int rc;
 
     spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
 
-    list_for_each_entry ( s,
-                          &d->arch.hvm_domain.ioreq_server.list,
-                          list_entry )
+    for ( id = 0; id < MAX_NR_IOREQ_SERVERS; id++ )
     {
-        bool is_default = (s == d->arch.hvm_domain.default_ioreq_server);
+        struct hvm_ioreq_server *s;
+
+        s = d->arch.hvm_domain.ioreq_server.server[id];
 
-        rc = hvm_ioreq_server_add_vcpu(s, is_default, v);
+        if ( !s )
+            continue;
+
+        rc = hvm_ioreq_server_add_vcpu(s, v);
         if ( rc )
             goto fail;
     }
@@ -1029,10 +986,17 @@  int hvm_all_ioreq_servers_add_vcpu(struct domain *d, struct vcpu *v)
     return 0;
 
  fail:
-    list_for_each_entry ( s,
-                          &d->arch.hvm_domain.ioreq_server.list,
-                          list_entry )
+    while ( id-- != 0 )
+    {
+        struct hvm_ioreq_server *s;
+
+        s = d->arch.hvm_domain.ioreq_server.server[id];
+
+        if ( !s )
+            continue;
+
         hvm_ioreq_server_remove_vcpu(s, v);
+    }
 
     spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
 
@@ -1041,44 +1005,52 @@  int hvm_all_ioreq_servers_add_vcpu(struct domain *d, struct vcpu *v)
 
 void hvm_all_ioreq_servers_remove_vcpu(struct domain *d, struct vcpu *v)
 {
-    struct hvm_ioreq_server *s;
+    unsigned int id;
 
     spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
 
-    list_for_each_entry ( s,
-                          &d->arch.hvm_domain.ioreq_server.list,
-                          list_entry )
+    for ( id = 0; id < MAX_NR_IOREQ_SERVERS; id++ )
+    {
+        struct hvm_ioreq_server *s;
+
+        s = d->arch.hvm_domain.ioreq_server.server[id];
+
+        if ( !s )
+            continue;
+
         hvm_ioreq_server_remove_vcpu(s, v);
+    }
 
     spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
 }
 
 void hvm_destroy_all_ioreq_servers(struct domain *d)
 {
-    struct hvm_ioreq_server *s, *next;
+    unsigned int id;
 
     spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
 
     /* No need to domain_pause() as the domain is being torn down */
 
-    list_for_each_entry_safe ( s,
-                               next,
-                               &d->arch.hvm_domain.ioreq_server.list,
-                               list_entry )
+    for ( id = 0; id < MAX_NR_IOREQ_SERVERS; id++ )
     {
-        bool is_default = (s == d->arch.hvm_domain.default_ioreq_server);
+        struct hvm_ioreq_server *s;
 
-        hvm_ioreq_server_disable(s, is_default);
+        s = d->arch.hvm_domain.ioreq_server.server[id];
 
-        if ( is_default )
-            d->arch.hvm_domain.default_ioreq_server = NULL;
+        if ( !s )
+            continue;
 
-        list_del(&s->list_entry);
+        hvm_ioreq_server_disable(s);
+        hvm_ioreq_server_deinit(s);
 
-        hvm_ioreq_server_deinit(s, is_default);
+        ASSERT(d->arch.hvm_domain.ioreq_server.count);
+        --d->arch.hvm_domain.ioreq_server.count;
 
+        d->arch.hvm_domain.ioreq_server.server[id] = NULL;
         xfree(s);
     }
+    ASSERT(!d->arch.hvm_domain.ioreq_server.count);
 
     spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
 }
@@ -1111,7 +1083,7 @@  int hvm_set_dm_domain(struct domain *d, domid_t domid)
      * still be set and thus, when the server is created, it will have
      * the correct domid.
      */
-    s = d->arch.hvm_domain.default_ioreq_server;
+    s = d->arch.hvm_domain.ioreq_server.server[DEFAULT_IOSERVID];
     if ( !s )
         goto done;
 
@@ -1160,16 +1132,16 @@  int hvm_set_dm_domain(struct domain *d, domid_t domid)
 struct hvm_ioreq_server *hvm_select_ioreq_server(struct domain *d,
                                                  ioreq_t *p)
 {
-    struct hvm_ioreq_server *s;
     uint32_t cf8;
     uint8_t type;
     uint64_t addr;
+    unsigned int id;
 
-    if ( list_empty(&d->arch.hvm_domain.ioreq_server.list) )
+    if ( !d->arch.hvm_domain.ioreq_server.count )
         return NULL;
 
     if ( p->type != IOREQ_TYPE_COPY && p->type != IOREQ_TYPE_PIO )
-        return d->arch.hvm_domain.default_ioreq_server;
+        return d->arch.hvm_domain.ioreq_server.server[DEFAULT_IOSERVID];
 
     cf8 = d->arch.hvm_domain.pci_cf8;
 
@@ -1211,16 +1183,14 @@  struct hvm_ioreq_server *hvm_select_ioreq_server(struct domain *d,
         addr = p->addr;
     }
 
-    list_for_each_entry ( s,
-                          &d->arch.hvm_domain.ioreq_server.list,
-                          list_entry )
+    for ( id = 0; id < MAX_NR_IOREQ_SERVERS; id++ )
     {
+        struct hvm_ioreq_server *s;
         struct rangeset *r;
 
-        if ( s == d->arch.hvm_domain.default_ioreq_server )
-            continue;
+        s = d->arch.hvm_domain.ioreq_server.server[id];
 
-        if ( !s->enabled )
+        if ( !s || IS_DEFAULT(s) )
             continue;
 
         r = s->range[type];
@@ -1253,7 +1223,7 @@  struct hvm_ioreq_server *hvm_select_ioreq_server(struct domain *d,
         }
     }
 
-    return d->arch.hvm_domain.default_ioreq_server;
+    return d->arch.hvm_domain.ioreq_server.server[DEFAULT_IOSERVID];
 }
 
 static int hvm_send_buffered_ioreq(struct hvm_ioreq_server *s, ioreq_t *p)
@@ -1411,14 +1381,20 @@  int hvm_send_ioreq(struct hvm_ioreq_server *s, ioreq_t *proto_p,
 unsigned int hvm_broadcast_ioreq(ioreq_t *p, bool buffered)
 {
     struct domain *d = current->domain;
-    struct hvm_ioreq_server *s;
-    unsigned int failed = 0;
+    unsigned int id, failed = 0;
+
+    for ( id = 0; id < MAX_NR_IOREQ_SERVERS; id++ )
+    {
+        struct hvm_ioreq_server *s;
+
+        s = d->arch.hvm_domain.ioreq_server.server[id];
+
+        if ( !s )
+            continue;
 
-    list_for_each_entry ( s,
-                          &d->arch.hvm_domain.ioreq_server.list,
-                          list_entry )
         if ( hvm_send_ioreq(s, p, buffered) == X86EMUL_UNHANDLEABLE )
             failed++;
+    }
 
     return failed;
 }
@@ -1438,7 +1414,6 @@  static int hvm_access_cf8(
 void hvm_ioreq_init(struct domain *d)
 {
     spin_lock_init(&d->arch.hvm_domain.ioreq_server.lock);
-    INIT_LIST_HEAD(&d->arch.hvm_domain.ioreq_server.list);
 
     register_portio_handler(d, 0xcf8, 4, hvm_access_cf8);
 }
diff --git a/xen/include/asm-x86/hvm/domain.h b/xen/include/asm-x86/hvm/domain.h
index 7f128c05ff..01fe8a72d8 100644
--- a/xen/include/asm-x86/hvm/domain.h
+++ b/xen/include/asm-x86/hvm/domain.h
@@ -60,7 +60,6 @@  struct hvm_ioreq_server {
 
     /* Domain id of emulating domain */
     domid_t                domid;
-    ioservid_t             id;
     struct hvm_ioreq_page  ioreq;
     struct list_head       ioreq_vcpu_list;
     struct hvm_ioreq_page  bufioreq;
@@ -100,6 +99,9 @@  struct hvm_pi_ops {
     void (*do_resume)(struct vcpu *v);
 };
 
+#define MAX_NR_IOREQ_SERVERS 8
+#define DEFAULT_IOSERVID 0
+
 struct hvm_domain {
     /* Guest page range used for non-default ioreq servers */
     struct {
@@ -109,11 +111,10 @@  struct hvm_domain {
 
     /* Lock protects all other values in the sub-struct and the default */
     struct {
-        spinlock_t       lock;
-        ioservid_t       id;
-        struct list_head list;
+        spinlock_t              lock;
+        struct hvm_ioreq_server *server[MAX_NR_IOREQ_SERVERS];
+        unsigned int            count;
     } ioreq_server;
-    struct hvm_ioreq_server *default_ioreq_server;
 
     /* Cached CF8 for guest PCI config cycles */
     uint32_t                pci_cf8;