@@ -33,6 +33,32 @@
#include <public/hvm/ioreq.h>
+static void set_ioreq_server(struct domain *d, unsigned int id,
+ struct hvm_ioreq_server *s)
+{
+ ASSERT(id < MAX_NR_IOREQ_SERVERS);
+ ASSERT(!d->arch.hvm_domain.ioreq_server.server[id]);
+
+ d->arch.hvm_domain.ioreq_server.server[id] = s;
+}
+
+static struct hvm_ioreq_server *get_ioreq_server(struct domain *d,
+ unsigned int id)
+{
+ if ( id >= MAX_NR_IOREQ_SERVERS )
+ return NULL;
+
+ return d->arch.hvm_domain.ioreq_server.server[id];
+}
+
+#define IS_DEFAULT(s) \
+ ((s) == get_ioreq_server((s)->domain, DEFAULT_IOSERVID))
+
+#define FOR_EACH_IOREQ_SERVER(d, id, s) \
+ for ( (id) = 0, (s) = get_ioreq_server((d), (id)); \
+ (id) < MAX_NR_IOREQ_SERVERS; \
+ (s) = get_ioreq_server((d), ++(id)) )
+
static ioreq_t *get_ioreq(struct hvm_ioreq_server *s, struct vcpu *v)
{
shared_iopage_t *p = s->ioreq.va;
@@ -47,13 +73,15 @@ bool hvm_io_pending(struct vcpu *v)
{
struct domain *d = v->domain;
struct hvm_ioreq_server *s;
+ unsigned int id;
- list_for_each_entry ( s,
- &d->arch.hvm_domain.ioreq_server.list,
- list_entry )
+ FOR_EACH_IOREQ_SERVER(d, id, s)
{
struct hvm_ioreq_vcpu *sv;
+ if ( !s )
+ continue;
+
list_for_each_entry ( sv,
&s->ioreq_vcpu_list,
list_entry )
@@ -127,13 +155,15 @@ bool handle_hvm_io_completion(struct vcpu *v)
struct hvm_vcpu_io *vio = &v->arch.hvm_vcpu.hvm_io;
struct hvm_ioreq_server *s;
enum hvm_io_completion io_completion;
+ unsigned int id;
- list_for_each_entry ( s,
- &d->arch.hvm_domain.ioreq_server.list,
- list_entry )
+ FOR_EACH_IOREQ_SERVER(d, id, s)
{
struct hvm_ioreq_vcpu *sv;
+ if ( !s )
+ continue;
+
list_for_each_entry ( sv,
&s->ioreq_vcpu_list,
list_entry )
@@ -243,14 +273,16 @@ static int hvm_map_ioreq_page(
bool is_ioreq_server_page(struct domain *d, const struct page_info *page)
{
const struct hvm_ioreq_server *s;
+ unsigned int id;
bool found = false;
spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
- list_for_each_entry ( s,
- &d->arch.hvm_domain.ioreq_server.list,
- list_entry )
+ FOR_EACH_IOREQ_SERVER(d, id, s)
{
+ if ( !s )
+ continue;
+
if ( (s->ioreq.va && s->ioreq.page == page) ||
(s->bufioreq.va && s->bufioreq.page == page) )
{
@@ -301,8 +333,9 @@ static void hvm_update_ioreq_evtchn(struct hvm_ioreq_server *s,
}
}
+
static int hvm_ioreq_server_add_vcpu(struct hvm_ioreq_server *s,
- bool is_default, struct vcpu *v)
+ struct vcpu *v)
{
struct hvm_ioreq_vcpu *sv;
int rc;
@@ -331,7 +364,7 @@ static int hvm_ioreq_server_add_vcpu(struct hvm_ioreq_server *s,
goto fail3;
s->bufioreq_evtchn = rc;
- if ( is_default )
+ if ( IS_DEFAULT(s) )
d->arch.hvm_domain.params[HVM_PARAM_BUFIOREQ_EVTCHN] =
s->bufioreq_evtchn;
}
@@ -431,7 +464,6 @@ static int hvm_ioreq_server_map_pages(struct hvm_ioreq_server *s,
}
static int hvm_ioreq_server_setup_pages(struct hvm_ioreq_server *s,
- bool is_default,
bool handle_bufioreq)
{
struct domain *d = s->domain;
@@ -439,7 +471,7 @@ static int hvm_ioreq_server_setup_pages(struct hvm_ioreq_server *s,
unsigned long bufioreq_gfn = gfn_x(INVALID_GFN);
int rc;
- if ( is_default )
+ if ( IS_DEFAULT(s) )
{
/*
* The default ioreq server must handle buffered ioreqs, for
@@ -468,8 +500,7 @@ static int hvm_ioreq_server_setup_pages(struct hvm_ioreq_server *s,
return rc;
}
-static void hvm_ioreq_server_unmap_pages(struct hvm_ioreq_server *s,
- bool is_default)
+static void hvm_ioreq_server_unmap_pages(struct hvm_ioreq_server *s)
{
struct domain *d = s->domain;
bool handle_bufioreq = !!s->bufioreq.va;
@@ -479,7 +510,7 @@ static void hvm_ioreq_server_unmap_pages(struct hvm_ioreq_server *s,
hvm_unmap_ioreq_page(s, false);
- if ( !is_default )
+ if ( !IS_DEFAULT(s) )
{
if ( handle_bufioreq )
hvm_free_ioreq_gfn(d, s->bufioreq.gfn);
@@ -488,12 +519,11 @@ static void hvm_ioreq_server_unmap_pages(struct hvm_ioreq_server *s,
}
}
-static void hvm_ioreq_server_free_rangesets(struct hvm_ioreq_server *s,
- bool is_default)
+static void hvm_ioreq_server_free_rangesets(struct hvm_ioreq_server *s)
{
unsigned int i;
- if ( is_default )
+ if ( IS_DEFAULT(s) )
return;
for ( i = 0; i < NR_IO_RANGE_TYPES; i++ )
@@ -501,19 +531,19 @@ static void hvm_ioreq_server_free_rangesets(struct hvm_ioreq_server *s,
}
static int hvm_ioreq_server_alloc_rangesets(struct hvm_ioreq_server *s,
- bool is_default)
+ ioservid_t id)
{
unsigned int i;
int rc;
- if ( is_default )
+ if ( IS_DEFAULT(s) )
goto done;
for ( i = 0; i < NR_IO_RANGE_TYPES; i++ )
{
char *name;
- rc = asprintf(&name, "ioreq_server %d %s", s->id,
+ rc = asprintf(&name, "ioreq_server %d %s", id,
(i == XEN_DMOP_IO_RANGE_PORT) ? "port" :
(i == XEN_DMOP_IO_RANGE_MEMORY) ? "memory" :
(i == XEN_DMOP_IO_RANGE_PCI) ? "pci" :
@@ -537,13 +567,12 @@ static int hvm_ioreq_server_alloc_rangesets(struct hvm_ioreq_server *s,
return 0;
fail:
- hvm_ioreq_server_free_rangesets(s, false);
+ hvm_ioreq_server_free_rangesets(s);
return rc;
}
-static void hvm_ioreq_server_enable(struct hvm_ioreq_server *s,
- bool is_default)
+static void hvm_ioreq_server_enable(struct hvm_ioreq_server *s)
{
struct domain *d = s->domain;
struct hvm_ioreq_vcpu *sv;
@@ -554,7 +583,7 @@ static void hvm_ioreq_server_enable(struct hvm_ioreq_server *s,
if ( s->enabled )
goto done;
- if ( !is_default )
+ if ( !IS_DEFAULT(s) )
{
hvm_remove_ioreq_gfn(d, &s->ioreq);
@@ -573,8 +602,7 @@ static void hvm_ioreq_server_enable(struct hvm_ioreq_server *s,
spin_unlock(&s->lock);
}
-static void hvm_ioreq_server_disable(struct hvm_ioreq_server *s,
- bool is_default)
+static void hvm_ioreq_server_disable(struct hvm_ioreq_server *s)
{
struct domain *d = s->domain;
bool handle_bufioreq = !!s->bufioreq.va;
@@ -584,7 +612,7 @@ static void hvm_ioreq_server_disable(struct hvm_ioreq_server *s,
if ( !s->enabled )
goto done;
- if ( !is_default )
+ if ( !IS_DEFAULT(s) )
{
if ( handle_bufioreq )
hvm_add_ioreq_gfn(d, &s->bufioreq);
@@ -600,13 +628,11 @@ static void hvm_ioreq_server_disable(struct hvm_ioreq_server *s,
static int hvm_ioreq_server_init(struct hvm_ioreq_server *s,
struct domain *d, domid_t domid,
- bool is_default, int bufioreq_handling,
- ioservid_t id)
+ int bufioreq_handling, ioservid_t id)
{
struct vcpu *v;
int rc;
- s->id = id;
s->domain = d;
s->domid = domid;
@@ -614,7 +640,7 @@ static int hvm_ioreq_server_init(struct hvm_ioreq_server *s,
INIT_LIST_HEAD(&s->ioreq_vcpu_list);
spin_lock_init(&s->bufioreq_lock);
- rc = hvm_ioreq_server_alloc_rangesets(s, is_default);
+ rc = hvm_ioreq_server_alloc_rangesets(s, id);
if ( rc )
return rc;
@@ -622,13 +648,13 @@ static int hvm_ioreq_server_init(struct hvm_ioreq_server *s,
s->bufioreq_atomic = true;
rc = hvm_ioreq_server_setup_pages(
- s, is_default, bufioreq_handling != HVM_IOREQSRV_BUFIOREQ_OFF);
+ s, bufioreq_handling != HVM_IOREQSRV_BUFIOREQ_OFF);
if ( rc )
goto fail_map;
for_each_vcpu ( d, v )
{
- rc = hvm_ioreq_server_add_vcpu(s, is_default, v);
+ rc = hvm_ioreq_server_add_vcpu(s, v);
if ( rc )
goto fail_add;
}
@@ -637,47 +663,20 @@ static int hvm_ioreq_server_init(struct hvm_ioreq_server *s,
fail_add:
hvm_ioreq_server_remove_all_vcpus(s);
- hvm_ioreq_server_unmap_pages(s, is_default);
+ hvm_ioreq_server_unmap_pages(s);
fail_map:
- hvm_ioreq_server_free_rangesets(s, is_default);
+ hvm_ioreq_server_free_rangesets(s);
return rc;
}
-static void hvm_ioreq_server_deinit(struct hvm_ioreq_server *s,
- bool is_default)
+static void hvm_ioreq_server_deinit(struct hvm_ioreq_server *s)
{
ASSERT(!s->enabled);
hvm_ioreq_server_remove_all_vcpus(s);
- hvm_ioreq_server_unmap_pages(s, is_default);
- hvm_ioreq_server_free_rangesets(s, is_default);
-}
-
-static ioservid_t next_ioservid(struct domain *d)
-{
- struct hvm_ioreq_server *s;
- ioservid_t id;
-
- ASSERT(spin_is_locked(&d->arch.hvm_domain.ioreq_server.lock));
-
- id = d->arch.hvm_domain.ioreq_server.id;
-
- again:
- id++;
-
- /* Check for uniqueness */
- list_for_each_entry ( s,
- &d->arch.hvm_domain.ioreq_server.list,
- list_entry )
- {
- if ( id == s->id )
- goto again;
- }
-
- d->arch.hvm_domain.ioreq_server.id = id;
-
- return id;
+ hvm_ioreq_server_unmap_pages(s);
+ hvm_ioreq_server_free_rangesets(s);
}
int hvm_create_ioreq_server(struct domain *d, domid_t domid,
@@ -685,52 +684,66 @@ int hvm_create_ioreq_server(struct domain *d, domid_t domid,
ioservid_t *id)
{
struct hvm_ioreq_server *s;
+ unsigned int i;
int rc;
if ( bufioreq_handling > HVM_IOREQSRV_BUFIOREQ_ATOMIC )
return -EINVAL;
- rc = -ENOMEM;
s = xzalloc(struct hvm_ioreq_server);
if ( !s )
- goto fail1;
+ return -ENOMEM;
domain_pause(d);
spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
- rc = -EEXIST;
- if ( is_default && d->arch.hvm_domain.default_ioreq_server != NULL )
- goto fail2;
-
- rc = hvm_ioreq_server_init(s, d, domid, is_default, bufioreq_handling,
- next_ioservid(d));
- if ( rc )
- goto fail3;
-
- list_add(&s->list_entry,
- &d->arch.hvm_domain.ioreq_server.list);
-
if ( is_default )
{
- d->arch.hvm_domain.default_ioreq_server = s;
- hvm_ioreq_server_enable(s, true);
+ i = DEFAULT_IOSERVID;
+
+ rc = -EEXIST;
+ if ( get_ioreq_server(d, i) )
+ goto fail;
+ }
+ else
+ {
+ for ( i = 0; i < MAX_NR_IOREQ_SERVERS; i++ )
+ {
+ if ( i != DEFAULT_IOSERVID && !get_ioreq_server(d, i) )
+ break;
+ }
+
+ rc = -ENOSPC;
+ if ( i >= MAX_NR_IOREQ_SERVERS )
+ goto fail;
}
+ set_ioreq_server(d, i, s);
+
+ rc = hvm_ioreq_server_init(s, d, domid, bufioreq_handling, i);
+ if ( rc )
+ goto fail;
+
+ if ( IS_DEFAULT(s) )
+ hvm_ioreq_server_enable(s);
+
if ( id )
- *id = s->id;
+ *id = i;
+
+ d->arch.hvm_domain.ioreq_server.count++;
spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
domain_unpause(d);
return 0;
- fail3:
- fail2:
+ fail:
+ set_ioreq_server(d, i, NULL);
+
spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
domain_unpause(d);
xfree(s);
- fail1:
return rc;
}
@@ -741,35 +754,34 @@ int hvm_destroy_ioreq_server(struct domain *d, ioservid_t id)
spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
- rc = -ENOENT;
- list_for_each_entry ( s,
- &d->arch.hvm_domain.ioreq_server.list,
- list_entry )
- {
- if ( s == d->arch.hvm_domain.default_ioreq_server )
- continue;
+ s = get_ioreq_server(d, id);
- if ( s->id != id )
- continue;
+ rc = -ENOENT;
+ if ( !s )
+ goto out;
- domain_pause(d);
+ rc = -EPERM;
+ if ( IS_DEFAULT(s) )
+ goto out;
- p2m_set_ioreq_server(d, 0, s);
+ domain_pause(d);
- hvm_ioreq_server_disable(s, false);
+ p2m_set_ioreq_server(d, 0, s);
- list_del(&s->list_entry);
+ hvm_ioreq_server_disable(s);
+ hvm_ioreq_server_deinit(s);
- hvm_ioreq_server_deinit(s, false);
+ domain_unpause(d);
- domain_unpause(d);
+ ASSERT(d->arch.hvm_domain.ioreq_server.count);
+ --d->arch.hvm_domain.ioreq_server.count;
- xfree(s);
+ set_ioreq_server(d, id, NULL);
+ xfree(s);
- rc = 0;
- break;
- }
+ rc = 0;
+ out:
spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
return rc;
@@ -785,29 +797,27 @@ int hvm_get_ioreq_server_info(struct domain *d, ioservid_t id,
spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
- rc = -ENOENT;
- list_for_each_entry ( s,
- &d->arch.hvm_domain.ioreq_server.list,
- list_entry )
- {
- if ( s == d->arch.hvm_domain.default_ioreq_server )
- continue;
+ s = get_ioreq_server(d, id);
- if ( s->id != id )
- continue;
+ rc = -ENOENT;
+ if ( !s )
+ goto out;
- *ioreq_gfn = s->ioreq.gfn;
+ rc = -EOPNOTSUPP;
+ if ( IS_DEFAULT(s) )
+ goto out;
- if ( s->bufioreq.va != NULL )
- {
- *bufioreq_gfn = s->bufioreq.gfn;
- *bufioreq_port = s->bufioreq_evtchn;
- }
+ *ioreq_gfn = s->ioreq.gfn;
- rc = 0;
- break;
+ if ( s->bufioreq.va != NULL )
+ {
+ *bufioreq_gfn = s->bufioreq.gfn;
+ *bufioreq_port = s->bufioreq_evtchn;
}
+ rc = 0;
+
+ out:
spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
return rc;
@@ -818,48 +828,45 @@ int hvm_map_io_range_to_ioreq_server(struct domain *d, ioservid_t id,
uint64_t end)
{
struct hvm_ioreq_server *s;
+ struct rangeset *r;
int rc;
spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
+ s = get_ioreq_server(d, id);
+
rc = -ENOENT;
- list_for_each_entry ( s,
- &d->arch.hvm_domain.ioreq_server.list,
- list_entry )
- {
- if ( s == d->arch.hvm_domain.default_ioreq_server )
- continue;
+ if ( !s )
+ goto out;
- if ( s->id == id )
- {
- struct rangeset *r;
+ rc = -EOPNOTSUPP;
+ if ( IS_DEFAULT(s) )
+ goto out;
- switch ( type )
- {
- case XEN_DMOP_IO_RANGE_PORT:
- case XEN_DMOP_IO_RANGE_MEMORY:
- case XEN_DMOP_IO_RANGE_PCI:
- r = s->range[type];
- break;
+ switch ( type )
+ {
+ case XEN_DMOP_IO_RANGE_PORT:
+ case XEN_DMOP_IO_RANGE_MEMORY:
+ case XEN_DMOP_IO_RANGE_PCI:
+ r = s->range[type];
+ break;
- default:
- r = NULL;
- break;
- }
+ default:
+ r = NULL;
+ break;
+ }
- rc = -EINVAL;
- if ( !r )
- break;
+ rc = -EINVAL;
+ if ( !r )
+ goto out;
- rc = -EEXIST;
- if ( rangeset_overlaps_range(r, start, end) )
- break;
+ rc = -EEXIST;
+ if ( rangeset_overlaps_range(r, start, end) )
+ goto out;
- rc = rangeset_add_range(r, start, end);
- break;
- }
- }
+ rc = rangeset_add_range(r, start, end);
+ out:
spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
return rc;
@@ -870,48 +877,45 @@ int hvm_unmap_io_range_from_ioreq_server(struct domain *d, ioservid_t id,
uint64_t end)
{
struct hvm_ioreq_server *s;
+ struct rangeset *r;
int rc;
spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
+ s = get_ioreq_server(d, id);
+
rc = -ENOENT;
- list_for_each_entry ( s,
- &d->arch.hvm_domain.ioreq_server.list,
- list_entry )
- {
- if ( s == d->arch.hvm_domain.default_ioreq_server )
- continue;
+ if ( !s )
+ goto out;
- if ( s->id == id )
- {
- struct rangeset *r;
+ rc = -EOPNOTSUPP;
+ if ( IS_DEFAULT(s) )
+ goto out;
- switch ( type )
- {
- case XEN_DMOP_IO_RANGE_PORT:
- case XEN_DMOP_IO_RANGE_MEMORY:
- case XEN_DMOP_IO_RANGE_PCI:
- r = s->range[type];
- break;
+ switch ( type )
+ {
+ case XEN_DMOP_IO_RANGE_PORT:
+ case XEN_DMOP_IO_RANGE_MEMORY:
+ case XEN_DMOP_IO_RANGE_PCI:
+ r = s->range[type];
+ break;
- default:
- r = NULL;
- break;
- }
+ default:
+ r = NULL;
+ break;
+ }
- rc = -EINVAL;
- if ( !r )
- break;
+ rc = -EINVAL;
+ if ( !r )
+ goto out;
- rc = -ENOENT;
- if ( !rangeset_contains_range(r, start, end) )
- break;
+ rc = -ENOENT;
+ if ( !rangeset_contains_range(r, start, end) )
+ goto out;
- rc = rangeset_remove_range(r, start, end);
- break;
- }
- }
+ rc = rangeset_remove_range(r, start, end);
+ out:
spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
return rc;
@@ -939,20 +943,14 @@ int hvm_map_mem_type_to_ioreq_server(struct domain *d, ioservid_t id,
spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
- rc = -ENOENT;
- list_for_each_entry ( s,
- &d->arch.hvm_domain.ioreq_server.list,
- list_entry )
- {
- if ( s == d->arch.hvm_domain.default_ioreq_server )
- continue;
+ s = get_ioreq_server(d, id);
- if ( s->id == id )
- {
- rc = p2m_set_ioreq_server(d, flags, s);
- break;
- }
- }
+ if ( !s )
+ rc = -ENOENT;
+ else if ( IS_DEFAULT(s) )
+ rc = -EOPNOTSUPP;
+ else
+ rc = p2m_set_ioreq_server(d, flags, s);
spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
@@ -970,38 +968,33 @@ int hvm_map_mem_type_to_ioreq_server(struct domain *d, ioservid_t id,
int hvm_set_ioreq_server_state(struct domain *d, ioservid_t id,
bool enabled)
{
- struct list_head *entry;
+ struct hvm_ioreq_server *s;
int rc;
spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
- rc = -ENOENT;
- list_for_each ( entry,
- &d->arch.hvm_domain.ioreq_server.list )
- {
- struct hvm_ioreq_server *s = list_entry(entry,
- struct hvm_ioreq_server,
- list_entry);
+ s = get_ioreq_server(d, id);
- if ( s == d->arch.hvm_domain.default_ioreq_server )
- continue;
+ rc = -ENOENT;
+ if ( !s )
+ goto out;
- if ( s->id != id )
- continue;
+ rc = -EOPNOTSUPP;
+ if ( IS_DEFAULT(s) )
+ goto out;
- domain_pause(d);
+ domain_pause(d);
- if ( enabled )
- hvm_ioreq_server_enable(s, false);
- else
- hvm_ioreq_server_disable(s, false);
+ if ( enabled )
+ hvm_ioreq_server_enable(s);
+ else
+ hvm_ioreq_server_disable(s);
- domain_unpause(d);
+ domain_unpause(d);
- rc = 0;
- break;
- }
+ rc = 0;
+ out:
spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
return rc;
}
@@ -1009,17 +1002,17 @@ int hvm_set_ioreq_server_state(struct domain *d, ioservid_t id,
int hvm_all_ioreq_servers_add_vcpu(struct domain *d, struct vcpu *v)
{
struct hvm_ioreq_server *s;
+ unsigned int id;
int rc;
spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
- list_for_each_entry ( s,
- &d->arch.hvm_domain.ioreq_server.list,
- list_entry )
+ FOR_EACH_IOREQ_SERVER(d, id, s)
{
- bool is_default = (s == d->arch.hvm_domain.default_ioreq_server);
+ if ( !s )
+ continue;
- rc = hvm_ioreq_server_add_vcpu(s, is_default, v);
+ rc = hvm_ioreq_server_add_vcpu(s, v);
if ( rc )
goto fail;
}
@@ -1029,10 +1022,15 @@ int hvm_all_ioreq_servers_add_vcpu(struct domain *d, struct vcpu *v)
return 0;
fail:
- list_for_each_entry ( s,
- &d->arch.hvm_domain.ioreq_server.list,
- list_entry )
+ while ( id-- != 0 )
+ {
+ s = get_ioreq_server(d, id);
+
+ if ( !s )
+ continue;
+
hvm_ioreq_server_remove_vcpu(s, v);
+ }
spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
@@ -1042,43 +1040,45 @@ int hvm_all_ioreq_servers_add_vcpu(struct domain *d, struct vcpu *v)
void hvm_all_ioreq_servers_remove_vcpu(struct domain *d, struct vcpu *v)
{
struct hvm_ioreq_server *s;
+ unsigned int id;
spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
- list_for_each_entry ( s,
- &d->arch.hvm_domain.ioreq_server.list,
- list_entry )
+ FOR_EACH_IOREQ_SERVER(d, id, s)
+ {
+ if ( !s )
+ continue;
+
hvm_ioreq_server_remove_vcpu(s, v);
+ }
spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
}
void hvm_destroy_all_ioreq_servers(struct domain *d)
{
- struct hvm_ioreq_server *s, *next;
+ struct hvm_ioreq_server *s;
+ unsigned int id;
spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
/* No need to domain_pause() as the domain is being torn down */
- list_for_each_entry_safe ( s,
- next,
- &d->arch.hvm_domain.ioreq_server.list,
- list_entry )
+ FOR_EACH_IOREQ_SERVER(d, id, s)
{
- bool is_default = (s == d->arch.hvm_domain.default_ioreq_server);
-
- hvm_ioreq_server_disable(s, is_default);
-
- if ( is_default )
- d->arch.hvm_domain.default_ioreq_server = NULL;
+ if ( !s )
+ continue;
- list_del(&s->list_entry);
+ hvm_ioreq_server_disable(s);
+ hvm_ioreq_server_deinit(s);
- hvm_ioreq_server_deinit(s, is_default);
+ ASSERT(d->arch.hvm_domain.ioreq_server.count);
+ --d->arch.hvm_domain.ioreq_server.count;
+ set_ioreq_server(d, id, NULL);
xfree(s);
}
+ ASSERT(!d->arch.hvm_domain.ioreq_server.count);
spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
}
@@ -1111,7 +1111,7 @@ int hvm_set_dm_domain(struct domain *d, domid_t domid)
* still be set and thus, when the server is created, it will have
* the correct domid.
*/
- s = d->arch.hvm_domain.default_ioreq_server;
+ s = get_ioreq_server(d, DEFAULT_IOSERVID);
if ( !s )
goto done;
@@ -1164,12 +1164,13 @@ struct hvm_ioreq_server *hvm_select_ioreq_server(struct domain *d,
uint32_t cf8;
uint8_t type;
uint64_t addr;
+ unsigned int id;
- if ( list_empty(&d->arch.hvm_domain.ioreq_server.list) )
+ if ( !d->arch.hvm_domain.ioreq_server.count )
return NULL;
if ( p->type != IOREQ_TYPE_COPY && p->type != IOREQ_TYPE_PIO )
- return d->arch.hvm_domain.default_ioreq_server;
+ return get_ioreq_server(d, DEFAULT_IOSERVID);
cf8 = d->arch.hvm_domain.pci_cf8;
@@ -1209,16 +1210,11 @@ struct hvm_ioreq_server *hvm_select_ioreq_server(struct domain *d,
addr = p->addr;
}
- list_for_each_entry ( s,
- &d->arch.hvm_domain.ioreq_server.list,
- list_entry )
+ FOR_EACH_IOREQ_SERVER(d, id, s)
{
struct rangeset *r;
- if ( s == d->arch.hvm_domain.default_ioreq_server )
- continue;
-
- if ( !s->enabled )
+ if ( !s || IS_DEFAULT(s) )
continue;
r = s->range[type];
@@ -1251,7 +1247,7 @@ struct hvm_ioreq_server *hvm_select_ioreq_server(struct domain *d,
}
}
- return d->arch.hvm_domain.default_ioreq_server;
+ return get_ioreq_server(d, DEFAULT_IOSERVID);
}
static int hvm_send_buffered_ioreq(struct hvm_ioreq_server *s, ioreq_t *p)
@@ -1410,13 +1406,16 @@ unsigned int hvm_broadcast_ioreq(ioreq_t *p, bool buffered)
{
struct domain *d = current->domain;
struct hvm_ioreq_server *s;
- unsigned int failed = 0;
+ unsigned int id, failed = 0;
+
+ FOR_EACH_IOREQ_SERVER(d, id, s)
+ {
+ if ( !s )
+ continue;
- list_for_each_entry ( s,
- &d->arch.hvm_domain.ioreq_server.list,
- list_entry )
if ( hvm_send_ioreq(s, p, buffered) == X86EMUL_UNHANDLEABLE )
failed++;
+ }
return failed;
}
@@ -1436,7 +1435,6 @@ static int hvm_access_cf8(
void hvm_ioreq_init(struct domain *d)
{
spin_lock_init(&d->arch.hvm_domain.ioreq_server.lock);
- INIT_LIST_HEAD(&d->arch.hvm_domain.ioreq_server.list);
register_portio_handler(d, 0xcf8, 4, hvm_access_cf8);
}
@@ -60,7 +60,6 @@ struct hvm_ioreq_server {
/* Domain id of emulating domain */
domid_t domid;
- ioservid_t id;
struct hvm_ioreq_page ioreq;
struct list_head ioreq_vcpu_list;
struct hvm_ioreq_page bufioreq;
@@ -100,6 +99,9 @@ struct hvm_pi_ops {
void (*do_resume)(struct vcpu *v);
};
+#define MAX_NR_IOREQ_SERVERS 8
+#define DEFAULT_IOSERVID 0
+
struct hvm_domain {
/* Guest page range used for non-default ioreq servers */
struct {
@@ -109,11 +111,10 @@ struct hvm_domain {
/* Lock protects all other values in the sub-struct and the default */
struct {
- spinlock_t lock;
- ioservid_t id;
- struct list_head list;
+ spinlock_t lock;
+ struct hvm_ioreq_server *server[MAX_NR_IOREQ_SERVERS];
+ unsigned int count;
} ioreq_server;
- struct hvm_ioreq_server *default_ioreq_server;
/* Cached CF8 for guest PCI config cycles */
uint32_t pci_cf8;