@@ -38,13 +38,13 @@ static void set_ioreq_server(struct domain *d, unsigned int id,
struct ioreq_server *s)
{
ASSERT(id < MAX_NR_IOREQ_SERVERS);
- ASSERT(!s || !d->arch.hvm.ioreq_server.server[id]);
+ ASSERT(!s || !d->ioreq_server.server[id]);
- d->arch.hvm.ioreq_server.server[id] = s;
+ d->ioreq_server.server[id] = s;
}
#define GET_IOREQ_SERVER(d, id) \
- (d)->arch.hvm.ioreq_server.server[id]
+ (d)->ioreq_server.server[id]
static struct ioreq_server *get_ioreq_server(const struct domain *d,
unsigned int id)
@@ -285,7 +285,7 @@ bool is_ioreq_server_page(struct domain *d, const struct page_info *page)
unsigned int id;
bool found = false;
- spin_lock_recursive(&d->arch.hvm.ioreq_server.lock);
+ spin_lock_recursive(&d->ioreq_server.lock);
FOR_EACH_IOREQ_SERVER(d, id, s)
{
@@ -296,7 +296,7 @@ bool is_ioreq_server_page(struct domain *d, const struct page_info *page)
}
}
- spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock);
+ spin_unlock_recursive(&d->ioreq_server.lock);
return found;
}
@@ -606,7 +606,7 @@ int hvm_create_ioreq_server(struct domain *d, int bufioreq_handling,
return -ENOMEM;
domain_pause(d);
- spin_lock_recursive(&d->arch.hvm.ioreq_server.lock);
+ spin_lock_recursive(&d->ioreq_server.lock);
for ( i = 0; i < MAX_NR_IOREQ_SERVERS; i++ )
{
@@ -634,13 +634,13 @@ int hvm_create_ioreq_server(struct domain *d, int bufioreq_handling,
if ( id )
*id = i;
- spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock);
+ spin_unlock_recursive(&d->ioreq_server.lock);
domain_unpause(d);
return 0;
fail:
- spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock);
+ spin_unlock_recursive(&d->ioreq_server.lock);
domain_unpause(d);
xfree(s);
@@ -652,7 +652,7 @@ int hvm_destroy_ioreq_server(struct domain *d, ioservid_t id)
struct ioreq_server *s;
int rc;
- spin_lock_recursive(&d->arch.hvm.ioreq_server.lock);
+ spin_lock_recursive(&d->ioreq_server.lock);
s = get_ioreq_server(d, id);
@@ -684,7 +684,7 @@ int hvm_destroy_ioreq_server(struct domain *d, ioservid_t id)
rc = 0;
out:
- spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock);
+ spin_unlock_recursive(&d->ioreq_server.lock);
return rc;
}
@@ -697,7 +697,7 @@ int hvm_get_ioreq_server_info(struct domain *d, ioservid_t id,
struct ioreq_server *s;
int rc;
- spin_lock_recursive(&d->arch.hvm.ioreq_server.lock);
+ spin_lock_recursive(&d->ioreq_server.lock);
s = get_ioreq_server(d, id);
@@ -731,7 +731,7 @@ int hvm_get_ioreq_server_info(struct domain *d, ioservid_t id,
rc = 0;
out:
- spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock);
+ spin_unlock_recursive(&d->ioreq_server.lock);
return rc;
}
@@ -744,7 +744,7 @@ int hvm_get_ioreq_server_frame(struct domain *d, ioservid_t id,
ASSERT(is_hvm_domain(d));
- spin_lock_recursive(&d->arch.hvm.ioreq_server.lock);
+ spin_lock_recursive(&d->ioreq_server.lock);
s = get_ioreq_server(d, id);
@@ -782,7 +782,7 @@ int hvm_get_ioreq_server_frame(struct domain *d, ioservid_t id,
}
out:
- spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock);
+ spin_unlock_recursive(&d->ioreq_server.lock);
return rc;
}
@@ -798,7 +798,7 @@ int hvm_map_io_range_to_ioreq_server(struct domain *d, ioservid_t id,
if ( start > end )
return -EINVAL;
- spin_lock_recursive(&d->arch.hvm.ioreq_server.lock);
+ spin_lock_recursive(&d->ioreq_server.lock);
s = get_ioreq_server(d, id);
@@ -834,7 +834,7 @@ int hvm_map_io_range_to_ioreq_server(struct domain *d, ioservid_t id,
rc = rangeset_add_range(r, start, end);
out:
- spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock);
+ spin_unlock_recursive(&d->ioreq_server.lock);
return rc;
}
@@ -850,7 +850,7 @@ int hvm_unmap_io_range_from_ioreq_server(struct domain *d, ioservid_t id,
if ( start > end )
return -EINVAL;
- spin_lock_recursive(&d->arch.hvm.ioreq_server.lock);
+ spin_lock_recursive(&d->ioreq_server.lock);
s = get_ioreq_server(d, id);
@@ -886,7 +886,7 @@ int hvm_unmap_io_range_from_ioreq_server(struct domain *d, ioservid_t id,
rc = rangeset_remove_range(r, start, end);
out:
- spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock);
+ spin_unlock_recursive(&d->ioreq_server.lock);
return rc;
}
@@ -911,7 +911,7 @@ int hvm_map_mem_type_to_ioreq_server(struct domain *d, ioservid_t id,
if ( flags & ~XEN_DMOP_IOREQ_MEM_ACCESS_WRITE )
return -EINVAL;
- spin_lock_recursive(&d->arch.hvm.ioreq_server.lock);
+ spin_lock_recursive(&d->ioreq_server.lock);
s = get_ioreq_server(d, id);
@@ -926,7 +926,7 @@ int hvm_map_mem_type_to_ioreq_server(struct domain *d, ioservid_t id,
rc = arch_ioreq_server_map_mem_type(d, s, flags);
out:
- spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock);
+ spin_unlock_recursive(&d->ioreq_server.lock);
if ( rc == 0 )
arch_ioreq_server_map_mem_type_completed(d, s, flags);
@@ -940,7 +940,7 @@ int hvm_set_ioreq_server_state(struct domain *d, ioservid_t id,
struct ioreq_server *s;
int rc;
- spin_lock_recursive(&d->arch.hvm.ioreq_server.lock);
+ spin_lock_recursive(&d->ioreq_server.lock);
s = get_ioreq_server(d, id);
@@ -964,7 +964,7 @@ int hvm_set_ioreq_server_state(struct domain *d, ioservid_t id,
rc = 0;
out:
- spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock);
+ spin_unlock_recursive(&d->ioreq_server.lock);
return rc;
}
@@ -974,7 +974,7 @@ int hvm_all_ioreq_servers_add_vcpu(struct domain *d, struct vcpu *v)
unsigned int id;
int rc;
- spin_lock_recursive(&d->arch.hvm.ioreq_server.lock);
+ spin_lock_recursive(&d->ioreq_server.lock);
FOR_EACH_IOREQ_SERVER(d, id, s)
{
@@ -983,7 +983,7 @@ int hvm_all_ioreq_servers_add_vcpu(struct domain *d, struct vcpu *v)
goto fail;
}
- spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock);
+ spin_unlock_recursive(&d->ioreq_server.lock);
return 0;
@@ -998,7 +998,7 @@ int hvm_all_ioreq_servers_add_vcpu(struct domain *d, struct vcpu *v)
hvm_ioreq_server_remove_vcpu(s, v);
}
- spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock);
+ spin_unlock_recursive(&d->ioreq_server.lock);
return rc;
}
@@ -1008,12 +1008,12 @@ void hvm_all_ioreq_servers_remove_vcpu(struct domain *d, struct vcpu *v)
struct ioreq_server *s;
unsigned int id;
- spin_lock_recursive(&d->arch.hvm.ioreq_server.lock);
+ spin_lock_recursive(&d->ioreq_server.lock);
FOR_EACH_IOREQ_SERVER(d, id, s)
hvm_ioreq_server_remove_vcpu(s, v);
- spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock);
+ spin_unlock_recursive(&d->ioreq_server.lock);
}
void hvm_destroy_all_ioreq_servers(struct domain *d)
@@ -1024,7 +1024,7 @@ void hvm_destroy_all_ioreq_servers(struct domain *d)
if ( !arch_ioreq_server_destroy_all(d) )
return;
- spin_lock_recursive(&d->arch.hvm.ioreq_server.lock);
+ spin_lock_recursive(&d->ioreq_server.lock);
/* No need to domain_pause() as the domain is being torn down */
@@ -1042,7 +1042,7 @@ void hvm_destroy_all_ioreq_servers(struct domain *d)
xfree(s);
}
- spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock);
+ spin_unlock_recursive(&d->ioreq_server.lock);
}
struct ioreq_server *hvm_select_ioreq_server(struct domain *d,
@@ -1274,7 +1274,7 @@ unsigned int hvm_broadcast_ioreq(ioreq_t *p, bool buffered)
void hvm_ioreq_init(struct domain *d)
{
- spin_lock_init(&d->arch.hvm.ioreq_server.lock);
+ spin_lock_init(&d->ioreq_server.lock);
arch_ioreq_domain_init(d);
}
@@ -63,8 +63,6 @@ struct hvm_pi_ops {
void (*vcpu_block)(struct vcpu *);
};
-#define MAX_NR_IOREQ_SERVERS 8
-
struct hvm_domain {
/* Guest page range used for non-default ioreq servers */
struct {
@@ -73,12 +71,6 @@ struct hvm_domain {
unsigned long legacy_mask; /* indexed by HVM param number */
} ioreq_gfn;
- /* Lock protects all other values in the sub-struct and the default */
- struct {
- spinlock_t lock;
- struct ioreq_server *server[MAX_NR_IOREQ_SERVERS];
- } ioreq_server;
-
/* Cached CF8 for guest PCI config cycles */
uint32_t pci_cf8;
@@ -318,6 +318,8 @@ struct sched_unit {
struct evtchn_port_ops;
+#define MAX_NR_IOREQ_SERVERS 8
+
struct domain
{
domid_t domain_id;
@@ -533,6 +535,14 @@ struct domain
struct {
unsigned int val;
} teardown;
+
+#ifdef CONFIG_IOREQ_SERVER
+ /* Lock protects all other values in the sub-struct */
+ struct {
+ spinlock_t lock;
+ struct ioreq_server *server[MAX_NR_IOREQ_SERVERS];
+ } ioreq_server;
+#endif
};
static inline struct page_list_head *page_to_list(