@@ -260,6 +260,19 @@ static int hvm_map_ioreq_gfn(struct hvm_ioreq_server *s, bool buf)
struct hvm_ioreq_page *iorp = buf ? &s->bufioreq : &s->ioreq;
int rc;
+ if ( iorp->page )
+ {
+ /*
+ * If a page has already been allocated (which will happen on
+ * demand if hvm_get_ioreq_server_frame() is called), then
+ * mapping a guest frame is not permitted.
+ */
+ if ( gfn_eq(iorp->gfn, INVALID_GFN) )
+ return -EPERM;
+
+ return 0;
+ }
+
if ( d->is_dying )
return -EINVAL;
@@ -282,6 +295,61 @@ static int hvm_map_ioreq_gfn(struct hvm_ioreq_server *s, bool buf)
return rc;
}
+static int hvm_alloc_ioreq_mfn(struct hvm_ioreq_server *s, bool buf)
+{
+ struct domain *currd = current->domain;
+ struct hvm_ioreq_page *iorp = buf ? &s->bufioreq : &s->ioreq;
+
+ if ( iorp->page )
+ {
+ /*
+ * If a guest frame has already been mapped (which may happen
+ * on demand if hvm_get_ioreq_server_info() is called), then
+ * allocating a page is not permitted.
+ */
+ if ( !gfn_eq(iorp->gfn, INVALID_GFN) )
+ return -EPERM;
+
+ return 0;
+ }
+
+ /*
+ * Allocated IOREQ server pages are assigned to the emulating
+ * domain, not the target domain. This is because the emulator is
+ * likely to be destroyed after the target domain has been torn
+ * down, and we must use MEMF_no_refcount otherwise page allocation
+ * could fail if the emulating domain has already reached its
+ * maximum allocation.
+ */
+ iorp->page = alloc_domheap_page(currd, MEMF_no_refcount);
+ if ( !iorp->page )
+ return -ENOMEM;
+
+ iorp->va = __map_domain_page_global(iorp->page);
+ if ( !iorp->va )
+ {
+ iorp->page = NULL;
+ return -ENOMEM;
+ }
+
+ clear_page(iorp->va);
+ return 0;
+}
+
+static void hvm_free_ioreq_mfn(struct hvm_ioreq_server *s, bool buf)
+{
+ struct hvm_ioreq_page *iorp = buf ? &s->bufioreq : &s->ioreq;
+
+ if ( !iorp->page )
+ return;
+
+ unmap_domain_page_global(iorp->va);
+ iorp->va = NULL;
+
+ put_page(iorp->page);
+ iorp->page = NULL;
+}
+
bool is_ioreq_server_page(struct domain *d, const struct page_info *page)
{
const struct hvm_ioreq_server *s;
@@ -488,6 +556,27 @@ static void hvm_ioreq_server_unmap_pages(struct hvm_ioreq_server *s)
hvm_unmap_ioreq_gfn(s, false);
}
+static int hvm_ioreq_server_alloc_pages(struct hvm_ioreq_server *s)
+{
+ int rc;
+
+ rc = hvm_alloc_ioreq_mfn(s, false);
+
+ if ( !rc && (s->bufioreq_handling != HVM_IOREQSRV_BUFIOREQ_OFF) )
+ rc = hvm_alloc_ioreq_mfn(s, true);
+
+ if ( rc )
+ hvm_free_ioreq_mfn(s, false);
+
+ return rc;
+}
+
+static void hvm_ioreq_server_free_pages(struct hvm_ioreq_server *s)
+{
+ hvm_free_ioreq_mfn(s, true);
+ hvm_free_ioreq_mfn(s, false);
+}
+
static void hvm_ioreq_server_free_rangesets(struct hvm_ioreq_server *s)
{
unsigned int i;
@@ -614,7 +703,18 @@ static int hvm_ioreq_server_init(struct hvm_ioreq_server *s,
fail_add:
hvm_ioreq_server_remove_all_vcpus(s);
+
+ /*
+ * NOTE: It is safe to call both hvm_ioreq_server_unmap_pages() and
+ * hvm_ioreq_server_free_pages() in that order.
+ * This is because the former will do nothing if the pages
+ * are not mapped, leaving the page to be freed by the latter.
+ * However if the pages are mapped then the former will set
+ * the page_info pointer to NULL, meaning the latter will do
+ * nothing.
+ */
hvm_ioreq_server_unmap_pages(s);
+ hvm_ioreq_server_free_pages(s);
return rc;
}
@@ -624,6 +724,7 @@ static void hvm_ioreq_server_deinit(struct hvm_ioreq_server *s)
ASSERT(!s->enabled);
hvm_ioreq_server_remove_all_vcpus(s);
hvm_ioreq_server_unmap_pages(s);
+ hvm_ioreq_server_free_pages(s);
hvm_ioreq_server_free_rangesets(s);
}
@@ -762,7 +863,8 @@ int hvm_get_ioreq_server_info(struct domain *d, ioservid_t id,
goto out;
}
- *ioreq_gfn = gfn_x(s->ioreq.gfn);
+ if ( ioreq_gfn )
+ *ioreq_gfn = gfn_x(s->ioreq.gfn);
if ( HANDLE_BUFIOREQ(s) )
{
@@ -780,6 +882,33 @@ int hvm_get_ioreq_server_info(struct domain *d, ioservid_t id,
return rc;
}
+mfn_t hvm_get_ioreq_server_frame(struct domain *d, ioservid_t id,
+ unsigned int idx)
+{
+ struct hvm_ioreq_server *s;
+ mfn_t mfn = INVALID_MFN;
+
+ spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
+
+ s = get_ioreq_server(d, id);
+
+ if ( id >= MAX_NR_IOREQ_SERVERS || !s || IS_DEFAULT(s) )
+ goto out;
+
+ if ( hvm_ioreq_server_alloc_pages(s) )
+ goto out;
+
+ if ( idx == 0 )
+ mfn = _mfn(page_to_mfn(s->bufioreq.page));
+ else if ( idx == 1 )
+ mfn = _mfn(page_to_mfn(s->ioreq.page));
+
+ out:
+ spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
+
+ return mfn;
+}
+
int hvm_map_io_range_to_ioreq_server(struct domain *d, ioservid_t id,
uint32_t type, uint64_t start,
uint64_t end)
@@ -122,6 +122,7 @@
#include <asm/fixmap.h>
#include <asm/io_apic.h>
#include <asm/pci.h>
+#include <asm/hvm/ioreq.h>
#include <asm/hvm/grant_table.h>
#include <asm/pv/grant_table.h>
@@ -4795,6 +4796,27 @@ static int xenmem_acquire_grant_table(struct domain *d,
return 0;
}
+static int xenmem_acquire_ioreq_server(struct domain *d,
+ unsigned int id,
+ unsigned long frame,
+ unsigned long nr_frames,
+ unsigned long mfn_list[])
+{
+ unsigned int i;
+
+ for ( i = 0; i < nr_frames; i++ )
+ {
+ mfn_t mfn = hvm_get_ioreq_server_frame(d, id, frame + i);
+
+ if ( mfn_eq(mfn, INVALID_MFN) )
+ return -EINVAL;
+
+ mfn_list[i] = mfn_x(mfn);
+ }
+
+ return 0;
+}
+
static int xenmem_acquire_resource(xen_mem_acquire_resource_t *xmar)
{
struct domain *d, *currd = current->domain;
@@ -4829,6 +4851,11 @@ static int xenmem_acquire_resource(xen_mem_acquire_resource_t *xmar)
mfn_list);
break;
+ case XENMEM_resource_ioreq_server:
+ rc = xenmem_acquire_ioreq_server(d, xmar->id, xmar->frame,
+ xmar->nr_frames, mfn_list);
+ break;
+
default:
rc = -EOPNOTSUPP;
break;
@@ -31,6 +31,12 @@ int hvm_get_ioreq_server_info(struct domain *d, ioservid_t id,
unsigned long *ioreq_gfn,
unsigned long *bufioreq_gfn,
evtchn_port_t *bufioreq_port);
+/*
+ * Get the mfn of either the buffered or synchronous ioreq frame.
+ * (idx == 0 -> buffered, idx == 1 -> synchronous).
+ */
+mfn_t hvm_get_ioreq_server_frame(struct domain *d, ioservid_t id,
+ unsigned int idx);
int hvm_map_io_range_to_ioreq_server(struct domain *d, ioservid_t id,
uint32_t type, uint64_t start,
uint64_t end);
@@ -90,6 +90,10 @@ struct xen_dm_op_create_ioreq_server {
* the frame numbers passed back in gfns <ioreq_gfn> and <bufioreq_gfn>
* respectively. (If the IOREQ Server is not handling buffered emulation
* only <ioreq_gfn> will be valid).
+ *
+ * NOTE: To access the synchronous ioreq structures and buffered ioreq
+ * ring, it is preferable to use the XENMEM_acquire_resource memory
+ * op specifying resource type XENMEM_resource_ioreq_server.
*/
#define XEN_DMOP_get_ioreq_server_info 2
@@ -664,10 +664,13 @@ struct xen_mem_acquire_resource {
uint16_t type;
#define XENMEM_resource_grant_table 0
+#define XENMEM_resource_ioreq_server 1
/*
* IN - a type-specific resource identifier, which must be zero
* unless stated otherwise.
+ *
+ * type == XENMEM_resource_ioreq_server -> id == ioreq server id
*/
uint32_t id;
/* IN - number of (4K) frames of the resource to be mapped */