@@ -188,6 +188,14 @@ int xendevicemodel_get_ioreq_server_info(
data->id = id;
+ /*
+ * If the caller is not requesting gfn values then instruct the
+ * hypercall not to retrieve them as this may cause them to be
+ * mapped.
+ */
+ if (!ioreq_gfn && !bufioreq_gfn)
+ data->flags |= XEN_DMOP_no_gfns;
+
rc = xendevicemodel_op(dmod, domid, 1, &op, sizeof(op));
if (rc)
return rc;
@@ -61,11 +61,11 @@ int xendevicemodel_create_ioreq_server(
* @parm domid the domain id to be serviced
* @parm id the IOREQ Server id.
* @parm ioreq_gfn pointer to a xen_pfn_t to receive the synchronous ioreq
- * gfn
+ * gfn. (May be NULL if not required)
* @parm bufioreq_gfn pointer to a xen_pfn_t to receive the buffered ioreq
- * gfn
+ * gfn. (May be NULL if not required)
* @parm bufioreq_port pointer to a evtchn_port_t to receive the buffered
- * ioreq event channel
+ * ioreq event channel. (May be NULL if not required)
* @return 0 on success, -1 on failure.
*/
int xendevicemodel_get_ioreq_server_info(
@@ -416,16 +416,19 @@ static int dm_op(const struct dmop_args *op_args)
{
struct xen_dm_op_get_ioreq_server_info *data =
&op.u.get_ioreq_server_info;
+ const uint16_t valid_flags = XEN_DMOP_no_gfns;
const_op = false;
rc = -EINVAL;
- if ( data->pad )
+ if ( data->flags & ~valid_flags )
break;
rc = hvm_get_ioreq_server_info(d, data->id,
- &data->ioreq_gfn,
- &data->bufioreq_gfn,
+ (data->flags & XEN_DMOP_no_gfns) ?
+ NULL : &data->ioreq_gfn,
+ (data->flags & XEN_DMOP_no_gfns) ?
+ NULL : &data->bufioreq_gfn,
&data->bufioreq_port);
break;
}
@@ -350,6 +350,9 @@ static void hvm_update_ioreq_evtchn(struct hvm_ioreq_server *s,
}
}
+#define HANDLE_BUFIOREQ(s) \
+ ((s)->bufioreq_handling != HVM_IOREQSRV_BUFIOREQ_OFF)
+
static int hvm_ioreq_server_add_vcpu(struct hvm_ioreq_server *s,
struct vcpu *v)
{
@@ -371,7 +374,7 @@ static int hvm_ioreq_server_add_vcpu(struct hvm_ioreq_server *s,
sv->ioreq_evtchn = rc;
- if ( v->vcpu_id == 0 && s->bufioreq.va != NULL )
+ if ( v->vcpu_id == 0 && HANDLE_BUFIOREQ(s) )
{
struct domain *d = s->domain;
@@ -422,7 +425,7 @@ static void hvm_ioreq_server_remove_vcpu(struct hvm_ioreq_server *s,
list_del(&sv->list_entry);
- if ( v->vcpu_id == 0 && s->bufioreq.va != NULL )
+ if ( v->vcpu_id == 0 && HANDLE_BUFIOREQ(s) )
free_xen_event_channel(v->domain, s->bufioreq_evtchn);
free_xen_event_channel(v->domain, sv->ioreq_evtchn);
@@ -449,7 +452,7 @@ static void hvm_ioreq_server_remove_all_vcpus(struct hvm_ioreq_server *s)
list_del(&sv->list_entry);
- if ( v->vcpu_id == 0 && s->bufioreq.va != NULL )
+ if ( v->vcpu_id == 0 && HANDLE_BUFIOREQ(s) )
free_xen_event_channel(v->domain, s->bufioreq_evtchn);
free_xen_event_channel(v->domain, sv->ioreq_evtchn);
@@ -460,14 +463,13 @@ static void hvm_ioreq_server_remove_all_vcpus(struct hvm_ioreq_server *s)
spin_unlock(&s->lock);
}
-static int hvm_ioreq_server_map_pages(struct hvm_ioreq_server *s,
- bool handle_bufioreq)
+static int hvm_ioreq_server_map_pages(struct hvm_ioreq_server *s)
{
int rc;
rc = hvm_map_ioreq_gfn(s, false);
- if ( !rc && handle_bufioreq )
+ if ( !rc && HANDLE_BUFIOREQ(s) )
rc = hvm_map_ioreq_gfn(s, true);
if ( rc )
@@ -597,13 +599,7 @@ static int hvm_ioreq_server_init(struct hvm_ioreq_server *s,
if ( rc )
return rc;
- if ( bufioreq_handling == HVM_IOREQSRV_BUFIOREQ_ATOMIC )
- s->bufioreq_atomic = true;
-
- rc = hvm_ioreq_server_map_pages(
- s, bufioreq_handling != HVM_IOREQSRV_BUFIOREQ_OFF);
- if ( rc )
- goto fail_map;
+ s->bufioreq_handling = bufioreq_handling;
for_each_vcpu ( d, v )
{
@@ -618,9 +614,6 @@ static int hvm_ioreq_server_init(struct hvm_ioreq_server *s,
hvm_ioreq_server_remove_all_vcpus(s);
hvm_ioreq_server_unmap_pages(s);
- fail_map:
- hvm_ioreq_server_free_rangesets(s);
-
return rc;
}
@@ -757,12 +750,23 @@ int hvm_get_ioreq_server_info(struct domain *d, ioservid_t id,
ASSERT(!IS_DEFAULT(s));
- *ioreq_gfn = gfn_x(s->ioreq.gfn);
+ if ( ioreq_gfn || bufioreq_gfn )
+ {
+ rc = hvm_ioreq_server_map_pages(s);
+ if ( rc )
+ goto out;
+ }
- if ( s->bufioreq.va != NULL )
+ if ( ioreq_gfn )
+ *ioreq_gfn = gfn_x(s->ioreq.gfn);
+
+ if ( HANDLE_BUFIOREQ(s) )
{
- *bufioreq_gfn = gfn_x(s->bufioreq.gfn);
- *bufioreq_port = s->bufioreq_evtchn;
+ if ( bufioreq_gfn )
+ *bufioreq_gfn = gfn_x(s->bufioreq.gfn);
+
+ if ( bufioreq_port )
+ *bufioreq_port = s->bufioreq_evtchn;
}
rc = 0;
@@ -1264,7 +1268,8 @@ static int hvm_send_buffered_ioreq(struct hvm_ioreq_server *s, ioreq_t *p)
pg->ptrs.write_pointer += qw ? 2 : 1;
/* Canonicalize read/write pointers to prevent their overflow. */
- while ( s->bufioreq_atomic && qw++ < IOREQ_BUFFER_SLOT_NUM &&
+ while ( (s->bufioreq_handling == HVM_IOREQSRV_BUFIOREQ_ATOMIC) &&
+ qw++ < IOREQ_BUFFER_SLOT_NUM &&
pg->ptrs.read_pointer >= IOREQ_BUFFER_SLOT_NUM )
{
union bufioreq_pointers old = pg->ptrs, new;
@@ -69,7 +69,7 @@ struct hvm_ioreq_server {
evtchn_port_t bufioreq_evtchn;
struct rangeset *range[NR_IO_RANGE_TYPES];
bool enabled;
- bool bufioreq_atomic;
+ uint8_t bufioreq_handling;
};
/*
@@ -79,28 +79,34 @@ struct xen_dm_op_create_ioreq_server {
* XEN_DMOP_get_ioreq_server_info: Get all the information necessary to
* access IOREQ Server <id>.
*
- * The emulator needs to map the synchronous ioreq structures and buffered
- * ioreq ring (if it exists) that Xen uses to request emulation. These are
- * hosted in the target domain's gmfns <ioreq_gfn> and <bufioreq_gfn>
- * respectively. In addition, if the IOREQ Server is handling buffered
- * emulation requests, the emulator needs to bind to event channel
- * <bufioreq_port> to listen for them. (The event channels used for
- * synchronous emulation requests are specified in the per-CPU ioreq
- * structures in <ioreq_gfn>).
- * If the IOREQ Server is not handling buffered emulation requests then the
- * values handed back in <bufioreq_gfn> and <bufioreq_port> will both be 0.
+ * If the IOREQ Server is handling buffered emulation requests, the
+ * emulator needs to bind to event channel <bufioreq_port> to listen for
+ * them. (The event channels used for synchronous emulation requests are
+ * specified in the per-CPU ioreq structures).
+ * In addition, if the XENMEM_acquire_resource memory op cannot be used,
+ * the emulator will need to map the synchronous ioreq structures and
+ * buffered ioreq ring (if it exists) from guest memory. If <flags> does
+ * not contain XEN_DMOP_no_gfns then these pages will be made available and
+ * the frame numbers passed back in gfns <ioreq_gfn> and <bufioreq_gfn>
+ * respectively. (If the IOREQ Server is not handling buffered emulation
+ * only <ioreq_gfn> will be valid).
*/
#define XEN_DMOP_get_ioreq_server_info 2
struct xen_dm_op_get_ioreq_server_info {
/* IN - server id */
ioservid_t id;
- uint16_t pad;
+ /* IN - flags */
+ uint16_t flags;
+
+#define _XEN_DMOP_no_gfns 0
+#define XEN_DMOP_no_gfns (1u << _XEN_DMOP_no_gfns)
+
/* OUT - buffered ioreq port */
evtchn_port_t bufioreq_port;
- /* OUT - sync ioreq gfn */
+ /* OUT - sync ioreq gfn (see block comment above) */
uint64_aligned_t ioreq_gfn;
- /* OUT - buffered ioreq gfn */
+ /* OUT - buffered ioreq gfn (see block comment above)*/
uint64_aligned_t bufioreq_gfn;
};