@@ -259,6 +259,19 @@ static int hvm_map_ioreq_gfn(struct hvm_ioreq_server *s, bool buf)
struct hvm_ioreq_page *iorp = buf ? &s->bufioreq : &s->ioreq;
int rc;
+ if ( iorp->page )
+ {
+ /*
+ * If a page has already been allocated (which will happen on
+ * demand if hvm_get_ioreq_server_frame() is called), then
+ * mapping a guest frame is not permitted.
+ */
+ if ( gfn_eq(iorp->gfn, INVALID_GFN) )
+ return -EPERM;
+
+ return 0;
+ }
+
if ( d->is_dying )
return -EINVAL;
@@ -281,6 +294,69 @@ static int hvm_map_ioreq_gfn(struct hvm_ioreq_server *s, bool buf)
return rc;
}
+static int hvm_alloc_ioreq_mfn(struct hvm_ioreq_server *s, bool buf)
+{
+ struct domain *currd = current->domain;
+ struct hvm_ioreq_page *iorp = buf ? &s->bufioreq : &s->ioreq;
+
+ if ( iorp->page )
+ {
+ /*
+ * If a guest frame has already been mapped (which may happen
+ * on demand if hvm_get_ioreq_server_info() is called), then
+ * allocating a page is not permitted.
+ */
+ if ( !gfn_eq(iorp->gfn, INVALID_GFN) )
+ return -EPERM;
+
+ return 0;
+ }
+
+ /*
+ * Allocated IOREQ server pages are assigned to the emulating
+ * domain, not the target domain. This is because the emulator is
+ * likely to be destroyed after the target domain has been torn
+ * down, and we must use MEMF_no_refcount otherwise page allocation
+ * could fail if the emulating domain has already reached its
+ * maximum allocation.
+ */
+ iorp->page = alloc_domheap_page(currd, MEMF_no_refcount);
+ if ( !iorp->page )
+ return -ENOMEM;
+
+ if ( !get_page_type(iorp->page, PGT_writable_page) )
+ {
+ put_page(iorp->page);
+ iorp->page = NULL;
+ return -ENOMEM;
+ }
+
+ iorp->va = __map_domain_page_global(iorp->page);
+ if ( !iorp->va )
+ {
+ put_page_and_type(iorp->page);
+ iorp->page = NULL;
+ return -ENOMEM;
+ }
+
+ clear_page(iorp->va);
+ return 0;
+}
+
+static void hvm_free_ioreq_mfn(struct hvm_ioreq_server *s, bool buf)
+{
+ struct hvm_ioreq_page *iorp = buf ? &s->bufioreq : &s->ioreq;
+
+ if ( !iorp->page )
+ return;
+
+ unmap_domain_page_global(iorp->va);
+ iorp->va = NULL;
+
+ put_page_and_type(iorp->page);
+ iorp->page = NULL;
+}
+
bool is_ioreq_server_page(struct domain *d, const struct page_info *page)
{
const struct hvm_ioreq_server *s;
@@ -484,6 +560,27 @@ static void hvm_ioreq_server_unmap_pages(struct hvm_ioreq_server *s)
hvm_unmap_ioreq_gfn(s, false);
}
+static int hvm_ioreq_server_alloc_pages(struct hvm_ioreq_server *s)
+{
+ int rc;
+
+ rc = hvm_alloc_ioreq_mfn(s, false);
+
+ if ( !rc && (s->bufioreq_handling != HVM_IOREQSRV_BUFIOREQ_OFF) )
+ rc = hvm_alloc_ioreq_mfn(s, true);
+
+ if ( rc )
+ hvm_free_ioreq_mfn(s, false);
+
+ return rc;
+}
+
+static void hvm_ioreq_server_free_pages(struct hvm_ioreq_server *s)
+{
+ hvm_free_ioreq_mfn(s, true);
+ hvm_free_ioreq_mfn(s, false);
+}
+
static void hvm_ioreq_server_free_rangesets(struct hvm_ioreq_server *s)
{
unsigned int i;
@@ -612,7 +709,18 @@ static int hvm_ioreq_server_init(struct hvm_ioreq_server *s,
fail_add:
hvm_ioreq_server_remove_all_vcpus(s);
+
+ /*
+ * NOTE: It is safe to call both hvm_ioreq_server_unmap_pages() and
+ * hvm_ioreq_server_free_pages() in that order.
+ * This is because the former will do nothing if the pages
+ * are not mapped, leaving the page to be freed by the latter.
+ * However if the pages are mapped then the former will set
+ * the page_info pointer to NULL, meaning the latter will do
+ * nothing.
+ */
hvm_ioreq_server_unmap_pages(s);
+ hvm_ioreq_server_free_pages(s);
return rc;
}
@@ -622,6 +730,7 @@ static void hvm_ioreq_server_deinit(struct hvm_ioreq_server *s)
ASSERT(!s->enabled);
hvm_ioreq_server_remove_all_vcpus(s);
hvm_ioreq_server_unmap_pages(s);
+ hvm_ioreq_server_free_pages(s);
hvm_ioreq_server_free_rangesets(s);
}
@@ -777,6 +886,51 @@ int hvm_get_ioreq_server_info(struct domain *d, ioservid_t id,
return rc;
}
+int hvm_get_ioreq_server_frame(struct domain *d, ioservid_t id,
+ unsigned long idx, mfn_t *mfn)
+{
+ struct hvm_ioreq_server *s;
+ int rc;
+
+ spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
+
+ if ( id == DEFAULT_IOSERVID )
+ return -EOPNOTSUPP;
+
+ s = get_ioreq_server(d, id);
+
+ ASSERT(!IS_DEFAULT(s));
+
+ rc = hvm_ioreq_server_alloc_pages(s);
+ if ( rc )
+ goto out;
+
+ switch ( idx )
+ {
+ case XENMEM_resource_ioreq_server_frame_bufioreq:
+ rc = -ENOENT;
+ if ( !HANDLE_BUFIOREQ(s) )
+ goto out;
+
+ *mfn = _mfn(page_to_mfn(s->bufioreq.page));
+ rc = 0;
+ break;
+
+ case XENMEM_resource_ioreq_server_frame_ioreq:
+ *mfn = _mfn(page_to_mfn(s->ioreq.page));
+ break;
+
+ default:
+ rc = -EINVAL;
+ break;
+ }
+
+ out:
+ spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
+
+ return rc;
+}
+
int hvm_map_io_range_to_ioreq_server(struct domain *d, ioservid_t id,
uint32_t type, uint64_t start,
uint64_t end)
@@ -122,6 +122,7 @@
#include <asm/fixmap.h>
#include <asm/io_apic.h>
#include <asm/pci.h>
+#include <asm/hvm/ioreq.h>
#include <asm/hvm/grant_table.h>
#include <asm/pv/grant_table.h>
@@ -3866,6 +3867,27 @@ int xenmem_add_to_physmap_one(
return rc;
}
+int xenmem_acquire_ioreq_server(struct domain *d, unsigned int id,
+ unsigned long frame,
+ unsigned long nr_frames,
+ unsigned long mfn_list[])
+{
+ unsigned int i;
+
+ for ( i = 0; i < nr_frames; i++ )
+ {
+ mfn_t mfn;
+ int rc = hvm_get_ioreq_server_frame(d, id, frame + i, &mfn);
+
+ if ( rc )
+ return rc;
+
+ mfn_list[i] = mfn_x(mfn);
+ }
+
+ return 0;
+}
+
long arch_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void) arg)
{
int rc;
@@ -987,6 +987,11 @@ static int acquire_resource(const xen_mem_acquire_resource_t *xmar)
switch ( xmar->type )
{
+ case XENMEM_resource_ioreq_server:
+ rc = xenmem_acquire_ioreq_server(d, xmar->id, xmar->frame,
+ xmar->nr_frames, mfn_list);
+ break;
+
default:
rc = -EOPNOTSUPP;
break;
@@ -31,6 +31,8 @@ int hvm_get_ioreq_server_info(struct domain *d, ioservid_t id,
unsigned long *ioreq_gfn,
unsigned long *bufioreq_gfn,
evtchn_port_t *bufioreq_port);
+int hvm_get_ioreq_server_frame(struct domain *d, ioservid_t id,
+ unsigned long idx, mfn_t *mfn);
int hvm_map_io_range_to_ioreq_server(struct domain *d, ioservid_t id,
uint32_t type, uint64_t start,
uint64_t end);
@@ -615,4 +615,9 @@ static inline bool arch_mfn_in_directmap(unsigned long mfn)
return mfn <= (virt_to_mfn(eva - 1) + 1);
}
+int xenmem_acquire_ioreq_server(struct domain *d, unsigned int id,
+ unsigned long frame,
+ unsigned long nr_frames,
+ unsigned long mfn_list[]);
+
#endif /* __ASM_X86_MM_H__ */
@@ -90,6 +90,10 @@ struct xen_dm_op_create_ioreq_server {
* the frame numbers passed back in gfns <ioreq_gfn> and <bufioreq_gfn>
* respectively. (If the IOREQ Server is not handling buffered emulation
* only <ioreq_gfn> will be valid).
+ *
+ * NOTE: To access the synchronous ioreq structures and buffered ioreq
+ * ring, it is preferable to use the XENMEM_acquire_resource memory
+ * op specifying resource type XENMEM_resource_ioreq_server.
*/
#define XEN_DMOP_get_ioreq_server_info 2
@@ -609,9 +609,14 @@ struct xen_mem_acquire_resource {
domid_t domid;
/* IN - the type of resource */
uint16_t type;
+
+#define XENMEM_resource_ioreq_server 0
+
/*
* IN - a type-specific resource identifier, which must be zero
* unless stated otherwise.
+ *
+ * type == XENMEM_resource_ioreq_server -> id == ioreq server id
*/
uint32_t id;
/* IN - number of (4K) frames of the resource to be mapped */
@@ -619,6 +624,10 @@ struct xen_mem_acquire_resource {
uint32_t pad;
/* IN - the index of the initial frame to be mapped */
uint64_aligned_t frame;
+
+#define XENMEM_resource_ioreq_server_frame_bufioreq 0
+#define XENMEM_resource_ioreq_server_frame_ioreq 1
+
/* IN/OUT - If the tools domain is PV then, upon return, gmfn_list
* will be populated with the MFNs of the resource.
* If the tools domain is HVM then it is expected that, on