@@ -188,6 +188,14 @@ int xendevicemodel_get_ioreq_server_info(
data->id = id;
+ /*
+ * If the caller is not requesting gfn values then instruct the
+ * hypercall not to retrieve them as this may cause them to be
+ * mapped.
+ */
+ if (!ioreq_gfn && !bufioreq_gfn)
+ data->flags |= XEN_DMOP_no_gfns;
+
rc = xendevicemodel_op(dmod, domid, 1, &op, sizeof(op));
if (rc)
return rc;
@@ -61,11 +61,11 @@ int xendevicemodel_create_ioreq_server(
* @parm domid the domain id to be serviced
* @parm id the IOREQ Server id.
* @parm ioreq_gfn pointer to a xen_pfn_t to receive the synchronous ioreq
- * gfn
+ * gfn. (May be NULL if not required)
* @parm bufioreq_gfn pointer to a xen_pfn_t to receive the buffered ioreq
- * gfn
+ * gfn. (May be NULL if not required)
* @parm bufioreq_port pointer to a evtchn_port_t to receive the buffered
- * ioreq event channel
+ * ioreq event channel. (May be NULL if not required)
* @return 0 on success, -1 on failure.
*/
int xendevicemodel_get_ioreq_server_info(
@@ -418,16 +418,19 @@ static int dm_op(const struct dmop_args *op_args)
{
struct xen_dm_op_get_ioreq_server_info *data =
&op.u.get_ioreq_server_info;
+ const uint16_t valid_flags = XEN_DMOP_no_gfns;
const_op = false;
rc = -EINVAL;
- if ( data->pad )
+ if ( data->flags & ~valid_flags )
break;
rc = hvm_get_ioreq_server_info(d, data->id,
- &data->ioreq_gfn,
- &data->bufioreq_gfn,
+ (data->flags & XEN_DMOP_no_gfns) ?
+ NULL : &data->ioreq_gfn,
+ (data->flags & XEN_DMOP_no_gfns) ?
+ NULL : &data->bufioreq_gfn,
&data->bufioreq_port);
break;
}
@@ -354,6 +354,9 @@ static void hvm_update_ioreq_evtchn(struct hvm_ioreq_server *s,
}
}
+#define HANDLE_BUFIOREQ(s) \
+ (s->bufioreq_handling != HVM_IOREQSRV_BUFIOREQ_OFF)
+
static int hvm_ioreq_server_add_vcpu(struct hvm_ioreq_server *s,
struct vcpu *v)
{
@@ -375,7 +378,7 @@ static int hvm_ioreq_server_add_vcpu(struct hvm_ioreq_server *s,
sv->ioreq_evtchn = rc;
- if ( v->vcpu_id == 0 && s->bufioreq.va != NULL )
+ if ( v->vcpu_id == 0 && HANDLE_BUFIOREQ(s) )
{
struct domain *d = s->domain;
@@ -426,7 +429,7 @@ static void hvm_ioreq_server_remove_vcpu(struct hvm_ioreq_server *s,
list_del(&sv->list_entry);
- if ( v->vcpu_id == 0 && s->bufioreq.va != NULL )
+ if ( v->vcpu_id == 0 && HANDLE_BUFIOREQ(s) )
free_xen_event_channel(v->domain, s->bufioreq_evtchn);
free_xen_event_channel(v->domain, sv->ioreq_evtchn);
@@ -453,7 +456,7 @@ static void hvm_ioreq_server_remove_all_vcpus(struct hvm_ioreq_server *s)
list_del(&sv->list_entry);
- if ( v->vcpu_id == 0 && s->bufioreq.va != NULL )
+ if ( v->vcpu_id == 0 && HANDLE_BUFIOREQ(s) )
free_xen_event_channel(v->domain, s->bufioreq_evtchn);
free_xen_event_channel(v->domain, sv->ioreq_evtchn);
@@ -464,14 +467,13 @@ static void hvm_ioreq_server_remove_all_vcpus(struct hvm_ioreq_server *s)
spin_unlock(&s->lock);
}
-static int hvm_ioreq_server_map_pages(struct hvm_ioreq_server *s,
- bool handle_bufioreq)
+static int hvm_ioreq_server_map_pages(struct hvm_ioreq_server *s)
{
int rc;
rc = hvm_map_ioreq_gfn(s, false);
- if ( !rc && handle_bufioreq )
+ if ( !rc && HANDLE_BUFIOREQ(s) )
rc = hvm_map_ioreq_gfn(s, true);
if ( rc )
@@ -599,13 +601,7 @@ static int hvm_ioreq_server_init(struct hvm_ioreq_server *s,
if ( rc )
return rc;
- if ( bufioreq_handling == HVM_IOREQSRV_BUFIOREQ_ATOMIC )
- s->bufioreq_atomic = true;
-
- rc = hvm_ioreq_server_map_pages(
- s, bufioreq_handling != HVM_IOREQSRV_BUFIOREQ_OFF);
- if ( rc )
- goto fail_map;
+ s->bufioreq_handling = bufioreq_handling;
for_each_vcpu ( d, v )
{
@@ -620,9 +616,6 @@ static int hvm_ioreq_server_init(struct hvm_ioreq_server *s,
hvm_ioreq_server_remove_all_vcpus(s);
hvm_ioreq_server_unmap_pages(s);
- fail_map:
- hvm_ioreq_server_free_rangesets(s);
-
return rc;
}
@@ -762,11 +755,20 @@ int hvm_get_ioreq_server_info(struct domain *d, ioservid_t id,
if ( IS_DEFAULT(s) )
goto out;
+ if ( ioreq_gfn || bufioreq_gfn )
+ {
+ rc = hvm_ioreq_server_map_pages(s);
+ if ( rc )
+ goto out;
+ }
+
*ioreq_gfn = gfn_x(s->ioreq.gfn);
- if ( s->bufioreq.va != NULL )
+ if ( HANDLE_BUFIOREQ(s) )
{
- *bufioreq_gfn = gfn_x(s->bufioreq.gfn);
+ if ( bufioreq_gfn )
+ *bufioreq_gfn = gfn_x(s->bufioreq.gfn);
+
*bufioreq_port = s->bufioreq_evtchn;
}
@@ -1280,7 +1282,8 @@ static int hvm_send_buffered_ioreq(struct hvm_ioreq_server *s, ioreq_t *p)
pg->ptrs.write_pointer += qw ? 2 : 1;
/* Canonicalize read/write pointers to prevent their overflow. */
- while ( s->bufioreq_atomic && qw++ < IOREQ_BUFFER_SLOT_NUM &&
+ while ( (s->bufioreq_handling == HVM_IOREQSRV_BUFIOREQ_ATOMIC) &&
+ qw++ < IOREQ_BUFFER_SLOT_NUM &&
pg->ptrs.read_pointer >= IOREQ_BUFFER_SLOT_NUM )
{
union bufioreq_pointers old = pg->ptrs, new;
@@ -68,8 +68,8 @@ struct hvm_ioreq_server {
spinlock_t bufioreq_lock;
evtchn_port_t bufioreq_evtchn;
struct rangeset *range[NR_IO_RANGE_TYPES];
+ int bufioreq_handling;
bool enabled;
- bool bufioreq_atomic;
};
/*
@@ -79,28 +79,34 @@ struct xen_dm_op_create_ioreq_server {
* XEN_DMOP_get_ioreq_server_info: Get all the information necessary to
* access IOREQ Server <id>.
*
- * The emulator needs to map the synchronous ioreq structures and buffered
- * ioreq ring (if it exists) that Xen uses to request emulation. These are
- * hosted in the target domain's gmfns <ioreq_gfn> and <bufioreq_gfn>
- * respectively. In addition, if the IOREQ Server is handling buffered
- * emulation requests, the emulator needs to bind to event channel
- * <bufioreq_port> to listen for them. (The event channels used for
- * synchronous emulation requests are specified in the per-CPU ioreq
- * structures in <ioreq_gfn>).
- * If the IOREQ Server is not handling buffered emulation requests then the
- * values handed back in <bufioreq_gfn> and <bufioreq_port> will both be 0.
+ * If the IOREQ Server is handling buffered emulation requests, the
+ * emulator needs to bind to event channel <bufioreq_port> to listen for
+ * them. (The event channels used for synchronous emulation requests are
+ * specified in the per-CPU ioreq structures).
+ * In addition, if the XENMEM_acquire_resource memory op cannot be used,
+ * the emulator will need to map the synchronous ioreq structures and
+ * buffered ioreq ring (if it exists) from guest memory. If <flags> does
+ * not contain XEN_DMOP_no_gfns then these pages will be made available and
+ * the frame numbers passed back in gfns <ioreq_gfn> and <bufioreq_gfn>
+ * respectively. (If the IOREQ Server is not handling buffered emulation
+ * only <ioreq_gfn> will be valid).
*/
#define XEN_DMOP_get_ioreq_server_info 2
struct xen_dm_op_get_ioreq_server_info {
/* IN - server id */
ioservid_t id;
- uint16_t pad;
+ /* IN - flags */
+ uint16_t flags;
+
+#define _XEN_DMOP_no_gfns 0
+#define XEN_DMOP_no_gfns (1u << _XEN_DMOP_no_gfns)
+
/* OUT - buffered ioreq port */
evtchn_port_t bufioreq_port;
- /* OUT - sync ioreq gfn */
+ /* OUT - sync ioreq gfn (see block comment above) */
uint64_aligned_t ioreq_gfn;
- /* OUT - buffered ioreq gfn */
+ /* OUT - buffered ioreq gfn (see block comment above)*/
uint64_aligned_t bufioreq_gfn;
};