@@ -33,6 +33,37 @@
#include <public/hvm/ioreq.h>
+static void set_ioreq_server(struct domain *d, unsigned int id,
+ struct hvm_ioreq_server *s)
+{
+ ASSERT(id < MAX_NR_IOREQ_SERVERS);
+ ASSERT(!s || !d->arch.hvm_domain.ioreq_server.server[id]);
+
+ d->arch.hvm_domain.ioreq_server.server[id] = s;
+}
+
+#define GET_IOREQ_SERVER(d, id) \
+ (d)->arch.hvm_domain.ioreq_server.server[id]
+
+static struct hvm_ioreq_server *get_ioreq_server(const struct domain *d,
+ unsigned int id)
+{
+ if ( id >= MAX_NR_IOREQ_SERVERS )
+ return NULL;
+
+ return GET_IOREQ_SERVER(d, id);
+}
+
+#define IS_DEFAULT(s) \
+ ((s) && (s) == GET_IOREQ_SERVER((s)->domain, DEFAULT_IOSERVID))
+
+/* Iterate over all possible ioreq servers */
+#define FOR_EACH_IOREQ_SERVER(d, id, s) \
+ for ( (id) = 0; (id) < MAX_NR_IOREQ_SERVERS; (id)++ ) \
+ if ( !(s = GET_IOREQ_SERVER(d, id)) ) \
+ continue; \
+ else
+
static ioreq_t *get_ioreq(struct hvm_ioreq_server *s, struct vcpu *v)
{
shared_iopage_t *p = s->ioreq.va;
@@ -47,10 +78,9 @@ bool hvm_io_pending(struct vcpu *v)
{
struct domain *d = v->domain;
struct hvm_ioreq_server *s;
+ unsigned int id;
- list_for_each_entry ( s,
- &d->arch.hvm_domain.ioreq_server.list,
- list_entry )
+ FOR_EACH_IOREQ_SERVER(d, id, s)
{
struct hvm_ioreq_vcpu *sv;
@@ -127,10 +157,9 @@ bool handle_hvm_io_completion(struct vcpu *v)
struct hvm_vcpu_io *vio = &v->arch.hvm_vcpu.hvm_io;
struct hvm_ioreq_server *s;
enum hvm_io_completion io_completion;
+ unsigned int id;
- list_for_each_entry ( s,
- &d->arch.hvm_domain.ioreq_server.list,
- list_entry )
+ FOR_EACH_IOREQ_SERVER(d, id, s)
{
struct hvm_ioreq_vcpu *sv;
@@ -243,13 +272,12 @@ static int hvm_map_ioreq_page(
bool is_ioreq_server_page(struct domain *d, const struct page_info *page)
{
const struct hvm_ioreq_server *s;
+ unsigned int id;
bool found = false;
spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
- list_for_each_entry ( s,
- &d->arch.hvm_domain.ioreq_server.list,
- list_entry )
+ FOR_EACH_IOREQ_SERVER(d, id, s)
{
if ( (s->ioreq.va && s->ioreq.page == page) ||
(s->bufioreq.va && s->bufioreq.page == page) )
@@ -302,7 +330,7 @@ static void hvm_update_ioreq_evtchn(struct hvm_ioreq_server *s,
}
static int hvm_ioreq_server_add_vcpu(struct hvm_ioreq_server *s,
- bool is_default, struct vcpu *v)
+ struct vcpu *v)
{
struct hvm_ioreq_vcpu *sv;
int rc;
@@ -331,7 +359,7 @@ static int hvm_ioreq_server_add_vcpu(struct hvm_ioreq_server *s,
goto fail3;
s->bufioreq_evtchn = rc;
- if ( is_default )
+ if ( IS_DEFAULT(s) )
d->arch.hvm_domain.params[HVM_PARAM_BUFIOREQ_EVTCHN] =
s->bufioreq_evtchn;
}
@@ -431,7 +459,6 @@ static int hvm_ioreq_server_map_pages(struct hvm_ioreq_server *s,
}
static int hvm_ioreq_server_setup_pages(struct hvm_ioreq_server *s,
- bool is_default,
bool handle_bufioreq)
{
struct domain *d = s->domain;
@@ -439,7 +466,7 @@ static int hvm_ioreq_server_setup_pages(struct hvm_ioreq_server *s,
unsigned long bufioreq_gfn = gfn_x(INVALID_GFN);
int rc;
- if ( is_default )
+ if ( IS_DEFAULT(s) )
{
/*
* The default ioreq server must handle buffered ioreqs, for
@@ -468,8 +495,7 @@ static int hvm_ioreq_server_setup_pages(struct hvm_ioreq_server *s,
return rc;
}
-static void hvm_ioreq_server_unmap_pages(struct hvm_ioreq_server *s,
- bool is_default)
+static void hvm_ioreq_server_unmap_pages(struct hvm_ioreq_server *s)
{
struct domain *d = s->domain;
bool handle_bufioreq = !!s->bufioreq.va;
@@ -479,7 +505,7 @@ static void hvm_ioreq_server_unmap_pages(struct hvm_ioreq_server *s,
hvm_unmap_ioreq_page(s, false);
- if ( !is_default )
+ if ( !IS_DEFAULT(s) )
{
if ( handle_bufioreq )
hvm_free_ioreq_gfn(d, s->bufioreq.gfn);
@@ -488,12 +514,11 @@ static void hvm_ioreq_server_unmap_pages(struct hvm_ioreq_server *s,
}
}
-static void hvm_ioreq_server_free_rangesets(struct hvm_ioreq_server *s,
- bool is_default)
+static void hvm_ioreq_server_free_rangesets(struct hvm_ioreq_server *s)
{
unsigned int i;
- if ( is_default )
+ if ( IS_DEFAULT(s) )
return;
for ( i = 0; i < NR_IO_RANGE_TYPES; i++ )
@@ -501,19 +526,21 @@ static void hvm_ioreq_server_free_rangesets(struct hvm_ioreq_server *s,
}
static int hvm_ioreq_server_alloc_rangesets(struct hvm_ioreq_server *s,
- bool is_default)
+ ioservid_t id)
{
unsigned int i;
int rc;
- if ( is_default )
+ if ( id == DEFAULT_IOSERVID )
goto done;
+ ASSERT(!IS_DEFAULT(s));
+
for ( i = 0; i < NR_IO_RANGE_TYPES; i++ )
{
char *name;
- rc = asprintf(&name, "ioreq_server %d %s", s->id,
+ rc = asprintf(&name, "ioreq_server %d %s", id,
(i == XEN_DMOP_IO_RANGE_PORT) ? "port" :
(i == XEN_DMOP_IO_RANGE_MEMORY) ? "memory" :
(i == XEN_DMOP_IO_RANGE_PCI) ? "pci" :
@@ -537,13 +564,12 @@ static int hvm_ioreq_server_alloc_rangesets(struct hvm_ioreq_server *s,
return 0;
fail:
- hvm_ioreq_server_free_rangesets(s, false);
+ hvm_ioreq_server_free_rangesets(s);
return rc;
}
-static void hvm_ioreq_server_enable(struct hvm_ioreq_server *s,
- bool is_default)
+static void hvm_ioreq_server_enable(struct hvm_ioreq_server *s)
{
struct domain *d = s->domain;
struct hvm_ioreq_vcpu *sv;
@@ -554,7 +580,7 @@ static void hvm_ioreq_server_enable(struct hvm_ioreq_server *s,
if ( s->enabled )
goto done;
- if ( !is_default )
+ if ( !IS_DEFAULT(s) )
{
hvm_remove_ioreq_gfn(d, &s->ioreq);
@@ -573,8 +599,7 @@ static void hvm_ioreq_server_enable(struct hvm_ioreq_server *s,
spin_unlock(&s->lock);
}
-static void hvm_ioreq_server_disable(struct hvm_ioreq_server *s,
- bool is_default)
+static void hvm_ioreq_server_disable(struct hvm_ioreq_server *s)
{
struct domain *d = s->domain;
bool handle_bufioreq = !!s->bufioreq.va;
@@ -584,7 +609,7 @@ static void hvm_ioreq_server_disable(struct hvm_ioreq_server *s,
if ( !s->enabled )
goto done;
- if ( !is_default )
+ if ( !IS_DEFAULT(s) )
{
if ( handle_bufioreq )
hvm_add_ioreq_gfn(d, &s->bufioreq);
@@ -600,13 +625,11 @@ static void hvm_ioreq_server_disable(struct hvm_ioreq_server *s,
static int hvm_ioreq_server_init(struct hvm_ioreq_server *s,
struct domain *d, domid_t domid,
- bool is_default, int bufioreq_handling,
- ioservid_t id)
+ int bufioreq_handling, ioservid_t id)
{
struct vcpu *v;
int rc;
- s->id = id;
s->domain = d;
s->domid = domid;
@@ -614,7 +637,7 @@ static int hvm_ioreq_server_init(struct hvm_ioreq_server *s,
INIT_LIST_HEAD(&s->ioreq_vcpu_list);
spin_lock_init(&s->bufioreq_lock);
- rc = hvm_ioreq_server_alloc_rangesets(s, is_default);
+ rc = hvm_ioreq_server_alloc_rangesets(s, id);
if ( rc )
return rc;
@@ -622,13 +645,13 @@ static int hvm_ioreq_server_init(struct hvm_ioreq_server *s,
s->bufioreq_atomic = true;
rc = hvm_ioreq_server_setup_pages(
- s, is_default, bufioreq_handling != HVM_IOREQSRV_BUFIOREQ_OFF);
+ s, bufioreq_handling != HVM_IOREQSRV_BUFIOREQ_OFF);
if ( rc )
goto fail_map;
for_each_vcpu ( d, v )
{
- rc = hvm_ioreq_server_add_vcpu(s, is_default, v);
+ rc = hvm_ioreq_server_add_vcpu(s, v);
if ( rc )
goto fail_add;
}
@@ -637,47 +660,20 @@ static int hvm_ioreq_server_init(struct hvm_ioreq_server *s,
fail_add:
hvm_ioreq_server_remove_all_vcpus(s);
- hvm_ioreq_server_unmap_pages(s, is_default);
+ hvm_ioreq_server_unmap_pages(s);
fail_map:
- hvm_ioreq_server_free_rangesets(s, is_default);
+ hvm_ioreq_server_free_rangesets(s);
return rc;
}
-static void hvm_ioreq_server_deinit(struct hvm_ioreq_server *s,
- bool is_default)
+static void hvm_ioreq_server_deinit(struct hvm_ioreq_server *s)
{
ASSERT(!s->enabled);
hvm_ioreq_server_remove_all_vcpus(s);
- hvm_ioreq_server_unmap_pages(s, is_default);
- hvm_ioreq_server_free_rangesets(s, is_default);
-}
-
-static ioservid_t next_ioservid(struct domain *d)
-{
- struct hvm_ioreq_server *s;
- ioservid_t id;
-
- ASSERT(spin_is_locked(&d->arch.hvm_domain.ioreq_server.lock));
-
- id = d->arch.hvm_domain.ioreq_server.id;
-
- again:
- id++;
-
- /* Check for uniqueness */
- list_for_each_entry ( s,
- &d->arch.hvm_domain.ioreq_server.list,
- list_entry )
- {
- if ( id == s->id )
- goto again;
- }
-
- d->arch.hvm_domain.ioreq_server.id = id;
-
- return id;
+ hvm_ioreq_server_unmap_pages(s);
+ hvm_ioreq_server_free_rangesets(s);
}
int hvm_create_ioreq_server(struct domain *d, domid_t domid,
@@ -685,52 +681,64 @@ int hvm_create_ioreq_server(struct domain *d, domid_t domid,
ioservid_t *id)
{
struct hvm_ioreq_server *s;
+ unsigned int i;
int rc;
if ( bufioreq_handling > HVM_IOREQSRV_BUFIOREQ_ATOMIC )
return -EINVAL;
- rc = -ENOMEM;
s = xzalloc(struct hvm_ioreq_server);
if ( !s )
- goto fail1;
+ return -ENOMEM;
domain_pause(d);
spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
- rc = -EEXIST;
- if ( is_default && d->arch.hvm_domain.default_ioreq_server != NULL )
- goto fail2;
-
- rc = hvm_ioreq_server_init(s, d, domid, is_default, bufioreq_handling,
- next_ioservid(d));
- if ( rc )
- goto fail3;
-
- list_add(&s->list_entry,
- &d->arch.hvm_domain.ioreq_server.list);
-
if ( is_default )
{
- d->arch.hvm_domain.default_ioreq_server = s;
- hvm_ioreq_server_enable(s, true);
+ i = DEFAULT_IOSERVID;
+
+ rc = -EEXIST;
+ if ( GET_IOREQ_SERVER(d, i) )
+ goto fail;
+ }
+ else
+ {
+ for ( i = 0; i < MAX_NR_IOREQ_SERVERS; i++ )
+ {
+ if ( i != DEFAULT_IOSERVID && !GET_IOREQ_SERVER(d, i) )
+ break;
+ }
+
+ rc = -ENOSPC;
+ if ( i >= MAX_NR_IOREQ_SERVERS )
+ goto fail;
}
+ set_ioreq_server(d, i, s);
+
+ rc = hvm_ioreq_server_init(s, d, domid, bufioreq_handling, i);
+ if ( rc )
+ goto fail;
+
+ if ( i == DEFAULT_IOSERVID )
+ hvm_ioreq_server_enable(s);
+
if ( id )
- *id = s->id;
+ *id = i;
spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
domain_unpause(d);
return 0;
- fail3:
- fail2:
+ fail:
+ set_ioreq_server(d, i, NULL);
+
spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
domain_unpause(d);
xfree(s);
- fail1:
return rc;
}
@@ -739,37 +747,34 @@ int hvm_destroy_ioreq_server(struct domain *d, ioservid_t id)
struct hvm_ioreq_server *s;
int rc;
- spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
+ if ( id == DEFAULT_IOSERVID )
+ return -EPERM;
- rc = -ENOENT;
- list_for_each_entry ( s,
- &d->arch.hvm_domain.ioreq_server.list,
- list_entry )
- {
- if ( s == d->arch.hvm_domain.default_ioreq_server )
- continue;
+ spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
- if ( s->id != id )
- continue;
+ s = get_ioreq_server(d, id);
- domain_pause(d);
+ rc = -ENOENT;
+ if ( !s )
+ goto out;
- p2m_set_ioreq_server(d, 0, s);
+ ASSERT(!IS_DEFAULT(s));
- hvm_ioreq_server_disable(s, false);
+ domain_pause(d);
- list_del(&s->list_entry);
+ p2m_set_ioreq_server(d, 0, s);
- hvm_ioreq_server_deinit(s, false);
+ hvm_ioreq_server_disable(s);
+ hvm_ioreq_server_deinit(s);
- domain_unpause(d);
+ domain_unpause(d);
- xfree(s);
+ set_ioreq_server(d, id, NULL);
+ xfree(s);
- rc = 0;
- break;
- }
+ rc = 0;
+ out:
spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
return rc;
@@ -783,31 +788,30 @@ int hvm_get_ioreq_server_info(struct domain *d, ioservid_t id,
struct hvm_ioreq_server *s;
int rc;
+ if ( id == DEFAULT_IOSERVID )
+ return -EOPNOTSUPP;
+
spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
- rc = -ENOENT;
- list_for_each_entry ( s,
- &d->arch.hvm_domain.ioreq_server.list,
- list_entry )
- {
- if ( s == d->arch.hvm_domain.default_ioreq_server )
- continue;
+ s = get_ioreq_server(d, id);
- if ( s->id != id )
- continue;
+ rc = -ENOENT;
+ if ( !s )
+ goto out;
- *ioreq_gfn = s->ioreq.gfn;
+ ASSERT(!IS_DEFAULT(s));
- if ( s->bufioreq.va != NULL )
- {
- *bufioreq_gfn = s->bufioreq.gfn;
- *bufioreq_port = s->bufioreq_evtchn;
- }
+ *ioreq_gfn = s->ioreq.gfn;
- rc = 0;
- break;
+ if ( s->bufioreq.va != NULL )
+ {
+ *bufioreq_gfn = s->bufioreq.gfn;
+ *bufioreq_port = s->bufioreq_evtchn;
}
+ rc = 0;
+
+ out:
spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
return rc;
@@ -818,51 +822,49 @@ int hvm_map_io_range_to_ioreq_server(struct domain *d, ioservid_t id,
uint64_t end)
{
struct hvm_ioreq_server *s;
+ struct rangeset *r;
int rc;
if ( start > end )
return -EINVAL;
+ if ( id == DEFAULT_IOSERVID )
+ return -EOPNOTSUPP;
+
spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
+ s = get_ioreq_server(d, id);
+
rc = -ENOENT;
- list_for_each_entry ( s,
- &d->arch.hvm_domain.ioreq_server.list,
- list_entry )
- {
- if ( s == d->arch.hvm_domain.default_ioreq_server )
- continue;
+ if ( !s )
+ goto out;
- if ( s->id == id )
- {
- struct rangeset *r;
+ ASSERT(!IS_DEFAULT(s));
- switch ( type )
- {
- case XEN_DMOP_IO_RANGE_PORT:
- case XEN_DMOP_IO_RANGE_MEMORY:
- case XEN_DMOP_IO_RANGE_PCI:
- r = s->range[type];
- break;
+ switch ( type )
+ {
+ case XEN_DMOP_IO_RANGE_PORT:
+ case XEN_DMOP_IO_RANGE_MEMORY:
+ case XEN_DMOP_IO_RANGE_PCI:
+ r = s->range[type];
+ break;
- default:
- r = NULL;
- break;
- }
+ default:
+ r = NULL;
+ break;
+ }
- rc = -EINVAL;
- if ( !r )
- break;
+ rc = -EINVAL;
+ if ( !r )
+ goto out;
- rc = -EEXIST;
- if ( rangeset_overlaps_range(r, start, end) )
- break;
+ rc = -EEXIST;
+ if ( rangeset_overlaps_range(r, start, end) )
+ goto out;
- rc = rangeset_add_range(r, start, end);
- break;
- }
- }
+ rc = rangeset_add_range(r, start, end);
+ out:
spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
return rc;
@@ -873,51 +875,49 @@ int hvm_unmap_io_range_from_ioreq_server(struct domain *d, ioservid_t id,
uint64_t end)
{
struct hvm_ioreq_server *s;
+ struct rangeset *r;
int rc;
if ( start > end )
return -EINVAL;
+ if ( id == DEFAULT_IOSERVID )
+ return -EOPNOTSUPP;
+
spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
+ s = get_ioreq_server(d, id);
+
rc = -ENOENT;
- list_for_each_entry ( s,
- &d->arch.hvm_domain.ioreq_server.list,
- list_entry )
- {
- if ( s == d->arch.hvm_domain.default_ioreq_server )
- continue;
+ if ( !s )
+ goto out;
- if ( s->id == id )
- {
- struct rangeset *r;
+ ASSERT(!IS_DEFAULT(s));
- switch ( type )
- {
- case XEN_DMOP_IO_RANGE_PORT:
- case XEN_DMOP_IO_RANGE_MEMORY:
- case XEN_DMOP_IO_RANGE_PCI:
- r = s->range[type];
- break;
+ switch ( type )
+ {
+ case XEN_DMOP_IO_RANGE_PORT:
+ case XEN_DMOP_IO_RANGE_MEMORY:
+ case XEN_DMOP_IO_RANGE_PCI:
+ r = s->range[type];
+ break;
- default:
- r = NULL;
- break;
- }
+ default:
+ r = NULL;
+ break;
+ }
- rc = -EINVAL;
- if ( !r )
- break;
+ rc = -EINVAL;
+ if ( !r )
+ goto out;
- rc = -ENOENT;
- if ( !rangeset_contains_range(r, start, end) )
- break;
+ rc = -ENOENT;
+ if ( !rangeset_contains_range(r, start, end) )
+ goto out;
- rc = rangeset_remove_range(r, start, end);
- break;
- }
- }
+ rc = rangeset_remove_range(r, start, end);
+ out:
spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
return rc;
@@ -937,6 +937,9 @@ int hvm_map_mem_type_to_ioreq_server(struct domain *d, ioservid_t id,
struct hvm_ioreq_server *s;
int rc;
+ if ( id == DEFAULT_IOSERVID )
+ return -EOPNOTSUPP;
+
if ( type != HVMMEM_ioreq_server )
return -EINVAL;
@@ -945,19 +948,14 @@ int hvm_map_mem_type_to_ioreq_server(struct domain *d, ioservid_t id,
spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
- rc = -ENOENT;
- list_for_each_entry ( s,
- &d->arch.hvm_domain.ioreq_server.list,
- list_entry )
- {
- if ( s == d->arch.hvm_domain.default_ioreq_server )
- continue;
+ s = get_ioreq_server(d, id);
- if ( s->id == id )
- {
- rc = p2m_set_ioreq_server(d, flags, s);
- break;
- }
+ if ( !s )
+ rc = -ENOENT;
+ else
+ {
+ ASSERT(!IS_DEFAULT(s));
+ rc = p2m_set_ioreq_server(d, flags, s);
}
spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
@@ -976,38 +974,34 @@ int hvm_map_mem_type_to_ioreq_server(struct domain *d, ioservid_t id,
int hvm_set_ioreq_server_state(struct domain *d, ioservid_t id,
bool enabled)
{
- struct list_head *entry;
+ struct hvm_ioreq_server *s;
int rc;
+ if ( id == DEFAULT_IOSERVID )
+ return -EOPNOTSUPP;
+
spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
- rc = -ENOENT;
- list_for_each ( entry,
- &d->arch.hvm_domain.ioreq_server.list )
- {
- struct hvm_ioreq_server *s = list_entry(entry,
- struct hvm_ioreq_server,
- list_entry);
+ s = get_ioreq_server(d, id);
- if ( s == d->arch.hvm_domain.default_ioreq_server )
- continue;
+ rc = -ENOENT;
+ if ( !s )
+ goto out;
- if ( s->id != id )
- continue;
+ ASSERT(!IS_DEFAULT(s));
- domain_pause(d);
+ domain_pause(d);
- if ( enabled )
- hvm_ioreq_server_enable(s, false);
- else
- hvm_ioreq_server_disable(s, false);
+ if ( enabled )
+ hvm_ioreq_server_enable(s);
+ else
+ hvm_ioreq_server_disable(s);
- domain_unpause(d);
+ domain_unpause(d);
- rc = 0;
- break;
- }
+ rc = 0;
+ out:
spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
return rc;
}
@@ -1015,17 +1009,14 @@ int hvm_set_ioreq_server_state(struct domain *d, ioservid_t id,
int hvm_all_ioreq_servers_add_vcpu(struct domain *d, struct vcpu *v)
{
struct hvm_ioreq_server *s;
+ unsigned int id;
int rc;
spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
- list_for_each_entry ( s,
- &d->arch.hvm_domain.ioreq_server.list,
- list_entry )
+ FOR_EACH_IOREQ_SERVER(d, id, s)
{
- bool is_default = (s == d->arch.hvm_domain.default_ioreq_server);
-
- rc = hvm_ioreq_server_add_vcpu(s, is_default, v);
+ rc = hvm_ioreq_server_add_vcpu(s, v);
if ( rc )
goto fail;
}
@@ -1035,10 +1026,15 @@ int hvm_all_ioreq_servers_add_vcpu(struct domain *d, struct vcpu *v)
return 0;
fail:
- list_for_each_entry ( s,
- &d->arch.hvm_domain.ioreq_server.list,
- list_entry )
+ while ( id-- != 0 )
+ {
+ s = GET_IOREQ_SERVER(d, id);
+
+ if ( !s )
+ continue;
+
hvm_ioreq_server_remove_vcpu(s, v);
+ }
spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
@@ -1048,12 +1044,11 @@ int hvm_all_ioreq_servers_add_vcpu(struct domain *d, struct vcpu *v)
void hvm_all_ioreq_servers_remove_vcpu(struct domain *d, struct vcpu *v)
{
struct hvm_ioreq_server *s;
+ unsigned int id;
spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
- list_for_each_entry ( s,
- &d->arch.hvm_domain.ioreq_server.list,
- list_entry )
+ FOR_EACH_IOREQ_SERVER(d, id, s)
hvm_ioreq_server_remove_vcpu(s, v);
spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
@@ -1061,28 +1056,19 @@ void hvm_all_ioreq_servers_remove_vcpu(struct domain *d, struct vcpu *v)
void hvm_destroy_all_ioreq_servers(struct domain *d)
{
- struct hvm_ioreq_server *s, *next;
+ struct hvm_ioreq_server *s;
+ unsigned int id;
spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
/* No need to domain_pause() as the domain is being torn down */
- list_for_each_entry_safe ( s,
- next,
- &d->arch.hvm_domain.ioreq_server.list,
- list_entry )
+ FOR_EACH_IOREQ_SERVER(d, id, s)
{
- bool is_default = (s == d->arch.hvm_domain.default_ioreq_server);
-
- hvm_ioreq_server_disable(s, is_default);
-
- if ( is_default )
- d->arch.hvm_domain.default_ioreq_server = NULL;
-
- list_del(&s->list_entry);
-
- hvm_ioreq_server_deinit(s, is_default);
+ hvm_ioreq_server_disable(s);
+ hvm_ioreq_server_deinit(s);
+ set_ioreq_server(d, id, NULL);
xfree(s);
}
@@ -1117,7 +1103,7 @@ int hvm_set_dm_domain(struct domain *d, domid_t domid)
* still be set and thus, when the server is created, it will have
* the correct domid.
*/
- s = d->arch.hvm_domain.default_ioreq_server;
+ s = GET_IOREQ_SERVER(d, DEFAULT_IOSERVID);
if ( !s )
goto done;
@@ -1170,12 +1156,10 @@ struct hvm_ioreq_server *hvm_select_ioreq_server(struct domain *d,
uint32_t cf8;
uint8_t type;
uint64_t addr;
-
- if ( list_empty(&d->arch.hvm_domain.ioreq_server.list) )
- return NULL;
+ unsigned int id;
if ( p->type != IOREQ_TYPE_COPY && p->type != IOREQ_TYPE_PIO )
- return d->arch.hvm_domain.default_ioreq_server;
+ return GET_IOREQ_SERVER(d, DEFAULT_IOSERVID);
cf8 = d->arch.hvm_domain.pci_cf8;
@@ -1215,16 +1199,11 @@ struct hvm_ioreq_server *hvm_select_ioreq_server(struct domain *d,
addr = p->addr;
}
- list_for_each_entry ( s,
- &d->arch.hvm_domain.ioreq_server.list,
- list_entry )
+ FOR_EACH_IOREQ_SERVER(d, id, s)
{
struct rangeset *r;
- if ( s == d->arch.hvm_domain.default_ioreq_server )
- continue;
-
- if ( !s->enabled )
+ if ( IS_DEFAULT(s) )
continue;
r = s->range[type];
@@ -1257,7 +1236,7 @@ struct hvm_ioreq_server *hvm_select_ioreq_server(struct domain *d,
}
}
- return d->arch.hvm_domain.default_ioreq_server;
+ return GET_IOREQ_SERVER(d, DEFAULT_IOSERVID);
}
static int hvm_send_buffered_ioreq(struct hvm_ioreq_server *s, ioreq_t *p)
@@ -1416,13 +1395,13 @@ unsigned int hvm_broadcast_ioreq(ioreq_t *p, bool buffered)
{
struct domain *d = current->domain;
struct hvm_ioreq_server *s;
- unsigned int failed = 0;
+ unsigned int id, failed = 0;
- list_for_each_entry ( s,
- &d->arch.hvm_domain.ioreq_server.list,
- list_entry )
+ FOR_EACH_IOREQ_SERVER(d, id, s)
+ {
if ( hvm_send_ioreq(s, p, buffered) == X86EMUL_UNHANDLEABLE )
failed++;
+ }
return failed;
}
@@ -1442,7 +1421,6 @@ static int hvm_access_cf8(
void hvm_ioreq_init(struct domain *d)
{
spin_lock_init(&d->arch.hvm_domain.ioreq_server.lock);
- INIT_LIST_HEAD(&d->arch.hvm_domain.ioreq_server.list);
register_portio_handler(d, 0xcf8, 4, hvm_access_cf8);
}
@@ -60,7 +60,6 @@ struct hvm_ioreq_server {
/* Domain id of emulating domain */
domid_t domid;
- ioservid_t id;
struct hvm_ioreq_page ioreq;
struct list_head ioreq_vcpu_list;
struct hvm_ioreq_page bufioreq;
@@ -100,6 +99,9 @@ struct hvm_pi_ops {
void (*do_resume)(struct vcpu *v);
};
+#define MAX_NR_IOREQ_SERVERS 8
+#define DEFAULT_IOSERVID 0
+
struct hvm_domain {
/* Guest page range used for non-default ioreq servers */
struct {
@@ -109,11 +111,9 @@ struct hvm_domain {
/* Lock protects all other values in the sub-struct and the default */
struct {
- spinlock_t lock;
- ioservid_t id;
- struct list_head list;
+ spinlock_t lock;
+ struct hvm_ioreq_server *server[MAX_NR_IOREQ_SERVERS];
} ioreq_server;
- struct hvm_ioreq_server *default_ioreq_server;
/* Cached CF8 for guest PCI config cycles */
uint32_t pci_cf8;