diff mbox

[10/11] hvmctl: convert HVMOP_*ioreq_server*

Message ID 5768046B02000078000F6C02@prv-mh.provo.novell.com (mailing list archive)
State New, archived
Headers show

Commit Message

Jan Beulich June 20, 2016, 12:57 p.m. UTC
Note that we can't adjust HVM_IOREQSRV_BUFIOREQ_* to properly obey
name space rules, as these constants as in use by callers of the libxc
interface.

Signed-off-by: Jan Beulich <jbeulich@suse.com>
hvmctl: convert HVMOP_*ioreq_server*

Note that we can't adjust HVM_IOREQSRV_BUFIOREQ_* to properly obey
name space rules, as these constants as in use by callers of the libxc
interface.

Signed-off-by: Jan Beulich <jbeulich@suse.com>

--- a/tools/libxc/include/xenctrl.h
+++ b/tools/libxc/include/xenctrl.h
@@ -41,6 +41,7 @@
 #include <xen/sched.h>
 #include <xen/memory.h>
 #include <xen/grant_table.h>
+#include <xen/hvm/control.h>
 #include <xen/hvm/params.h>
 #include <xen/xsm/flask_op.h>
 #include <xen/tmem.h>
--- a/tools/libxc/xc_domain.c
+++ b/tools/libxc/xc_domain.c
@@ -1416,23 +1416,14 @@ int xc_hvm_create_ioreq_server(xc_interf
                                int handle_bufioreq,
                                ioservid_t *id)
 {
-    DECLARE_HYPERCALL_BUFFER(xen_hvm_create_ioreq_server_t, arg);
+    DECLARE_HVMCTL(create_ioreq_server, domid,
+                   .handle_bufioreq = handle_bufioreq);
     int rc;
 
-    arg = xc_hypercall_buffer_alloc(xch, arg, sizeof(*arg));
-    if ( arg == NULL )
-        return -1;
-
-    arg->domid = domid;
-    arg->handle_bufioreq = handle_bufioreq;
-
-    rc = xencall2(xch->xcall, __HYPERVISOR_hvm_op,
-                  HVMOP_create_ioreq_server,
-                  HYPERCALL_BUFFER_AS_ARG(arg));
+    rc = do_hvmctl(xch, &hvmctl);
 
-    *id = arg->id;
+    *id = hvmctl.u.create_ioreq_server.id;
 
-    xc_hypercall_buffer_free(xch, arg);
     return rc;
 }
 
@@ -1443,84 +1434,52 @@ int xc_hvm_get_ioreq_server_info(xc_inte
                                  xen_pfn_t *bufioreq_pfn,
                                  evtchn_port_t *bufioreq_port)
 {
-    DECLARE_HYPERCALL_BUFFER(xen_hvm_get_ioreq_server_info_t, arg);
+    DECLARE_HVMCTL(get_ioreq_server_info, domid,
+                   .id = id);
     int rc;
 
-    arg = xc_hypercall_buffer_alloc(xch, arg, sizeof(*arg));
-    if ( arg == NULL )
-        return -1;
-
-    arg->domid = domid;
-    arg->id = id;
-
-    rc = xencall2(xch->xcall, __HYPERVISOR_hvm_op,
-                  HVMOP_get_ioreq_server_info,
-                  HYPERCALL_BUFFER_AS_ARG(arg));
+    rc = do_hvmctl(xch, &hvmctl);
     if ( rc != 0 )
-        goto done;
+        return rc;
 
     if ( ioreq_pfn )
-        *ioreq_pfn = arg->ioreq_pfn;
+        *ioreq_pfn = hvmctl.u.get_ioreq_server_info.ioreq_pfn;
 
     if ( bufioreq_pfn )
-        *bufioreq_pfn = arg->bufioreq_pfn;
+        *bufioreq_pfn = hvmctl.u.get_ioreq_server_info.bufioreq_pfn;
 
     if ( bufioreq_port )
-        *bufioreq_port = arg->bufioreq_port;
+        *bufioreq_port = hvmctl.u.get_ioreq_server_info.bufioreq_port;
 
-done:
-    xc_hypercall_buffer_free(xch, arg);
-    return rc;
+    return 0;
 }
 
 int xc_hvm_map_io_range_to_ioreq_server(xc_interface *xch, domid_t domid,
                                         ioservid_t id, int is_mmio,
                                         uint64_t start, uint64_t end)
 {
-    DECLARE_HYPERCALL_BUFFER(xen_hvm_io_range_t, arg);
-    int rc;
-
-    arg = xc_hypercall_buffer_alloc(xch, arg, sizeof(*arg));
-    if ( arg == NULL )
-        return -1;
-
-    arg->domid = domid;
-    arg->id = id;
-    arg->type = is_mmio ? HVMOP_IO_RANGE_MEMORY : HVMOP_IO_RANGE_PORT;
-    arg->start = start;
-    arg->end = end;
-
-    rc = xencall2(xch->xcall, __HYPERVISOR_hvm_op,
-                  HVMOP_map_io_range_to_ioreq_server,
-                  HYPERCALL_BUFFER_AS_ARG(arg));
+    DECLARE_HVMCTL(map_io_range_to_ioreq_server, domid,
+                   .id = id,
+                   .type = is_mmio ? XEN_HVMCTL_IO_RANGE_MEMORY
+                                   : XEN_HVMCTL_IO_RANGE_PORT,
+                   .start = start,
+                   .end = end);
 
-    xc_hypercall_buffer_free(xch, arg);
-    return rc;
+    return do_hvmctl(xch, &hvmctl);
 }
 
 int xc_hvm_unmap_io_range_from_ioreq_server(xc_interface *xch, domid_t domid,
                                             ioservid_t id, int is_mmio,
                                             uint64_t start, uint64_t end)
 {
-    DECLARE_HYPERCALL_BUFFER(xen_hvm_io_range_t, arg);
-    int rc;
-
-    arg = xc_hypercall_buffer_alloc(xch, arg, sizeof(*arg));
-    if ( arg == NULL )
-        return -1;
+    DECLARE_HVMCTL(unmap_io_range_from_ioreq_server, domid,
+                   .id = id,
+                   .type = is_mmio ? XEN_HVMCTL_IO_RANGE_MEMORY
+                                   : XEN_HVMCTL_IO_RANGE_PORT,
+                   .start = start,
+                   .end = end);
 
-    arg->domid = domid;
-    arg->id = id;
-    arg->type = is_mmio ? HVMOP_IO_RANGE_MEMORY : HVMOP_IO_RANGE_PORT;
-    arg->start = start;
-    arg->end = end;
-
-    rc = xencall2(xch->xcall, __HYPERVISOR_hvm_op,
-                  HVMOP_unmap_io_range_from_ioreq_server,
-                  HYPERCALL_BUFFER_AS_ARG(arg));
-
-    xc_hypercall_buffer_free(xch, arg);
-    return rc;
+    return do_hvmctl(xch, &hvmctl);
 }
 
 int xc_hvm_map_pcidev_to_ioreq_server(xc_interface *xch, domid_t domid,
@@ -1528,37 +1487,23 @@ int xc_hvm_map_pcidev_to_ioreq_server(xc
                                       uint8_t bus, uint8_t device,
                                       uint8_t function)
 {
-    DECLARE_HYPERCALL_BUFFER(xen_hvm_io_range_t, arg);
-    int rc;
+    /*
+     * The underlying hypercall will deal with ranges of PCI SBDF
+     * but, for simplicity, the API only uses singletons.
+     */
+    uint32_t sbdf = XEN_HVMCTL_PCI_SBDF(segment, bus, device, function);
+    DECLARE_HVMCTL(map_io_range_to_ioreq_server, domid,
+                   .id = id,
+                   .type = XEN_HVMCTL_IO_RANGE_PCI,
+                   .start = sbdf,
+                   .end = sbdf);
 
     if (device > 0x1f || function > 0x7) {
         errno = EINVAL;
         return -1;
     }
 
-    arg = xc_hypercall_buffer_alloc(xch, arg, sizeof(*arg));
-    if ( arg == NULL )
-        return -1;
-
-    arg->domid = domid;
-    arg->id = id;
-    arg->type = HVMOP_IO_RANGE_PCI;
-
-    /*
-     * The underlying hypercall will deal with ranges of PCI SBDF
-     * but, for simplicity, the API only uses singletons.
-     */
-    arg->start = arg->end = HVMOP_PCI_SBDF((uint64_t)segment,
-                                           (uint64_t)bus,
-                                           (uint64_t)device,
-                                           (uint64_t)function);
-
-    rc = xencall2(xch->xcall, __HYPERVISOR_hvm_op,
-                  HVMOP_map_io_range_to_ioreq_server,
-                  HYPERCALL_BUFFER_AS_ARG(arg));
-
-    xc_hypercall_buffer_free(xch, arg);
-    return rc;
+    return do_hvmctl(xch, &hvmctl);
 }
 
 int xc_hvm_unmap_pcidev_from_ioreq_server(xc_interface *xch, domid_t domid,
@@ -1566,54 +1511,29 @@ int xc_hvm_unmap_pcidev_from_ioreq_serve
                                           uint8_t bus, uint8_t device,
                                           uint8_t function)
 {
-    DECLARE_HYPERCALL_BUFFER(xen_hvm_io_range_t, arg);
-    int rc;
+    uint32_t sbdf = XEN_HVMCTL_PCI_SBDF(segment, bus, device, function);
+    DECLARE_HVMCTL(unmap_io_range_from_ioreq_server, domid,
+                   .id = id,
+                   .type = XEN_HVMCTL_IO_RANGE_PCI,
+                   .start = sbdf,
+                   .end = sbdf);
 
     if (device > 0x1f || function > 0x7) {
         errno = EINVAL;
         return -1;
     }
 
-    arg = xc_hypercall_buffer_alloc(xch, arg, sizeof(*arg));
-    if ( arg == NULL )
-        return -1;
-
-    arg->domid = domid;
-    arg->id = id;
-    arg->type = HVMOP_IO_RANGE_PCI;
-    arg->start = arg->end = HVMOP_PCI_SBDF((uint64_t)segment,
-                                           (uint64_t)bus,
-                                           (uint64_t)device,
-                                           (uint64_t)function);
-
-    rc = xencall2(xch->xcall, __HYPERVISOR_hvm_op,
-                  HVMOP_unmap_io_range_from_ioreq_server,
-                  HYPERCALL_BUFFER_AS_ARG(arg));
-
-    xc_hypercall_buffer_free(xch, arg);
-    return rc;
+    return do_hvmctl(xch, &hvmctl);
 }
 
 int xc_hvm_destroy_ioreq_server(xc_interface *xch,
                                 domid_t domid,
                                 ioservid_t id)
 {
-    DECLARE_HYPERCALL_BUFFER(xen_hvm_destroy_ioreq_server_t, arg);
-    int rc;
-
-    arg = xc_hypercall_buffer_alloc(xch, arg, sizeof(*arg));
-    if ( arg == NULL )
-        return -1;
+    DECLARE_HVMCTL(destroy_ioreq_server, domid,
+                   .id = id);
 
-    arg->domid = domid;
-    arg->id = id;
-
-    rc = xencall2(xch->xcall, __HYPERVISOR_hvm_op,
-                  HVMOP_destroy_ioreq_server,
-                  HYPERCALL_BUFFER_AS_ARG(arg));
-
-    xc_hypercall_buffer_free(xch, arg);
-    return rc;
+    return do_hvmctl(xch, &hvmctl);
 }
 
 int xc_hvm_set_ioreq_server_state(xc_interface *xch,
@@ -1621,23 +1541,11 @@ int xc_hvm_set_ioreq_server_state(xc_int
                                   ioservid_t id,
                                   int enabled)
 {
-    DECLARE_HYPERCALL_BUFFER(xen_hvm_set_ioreq_server_state_t, arg);
-    int rc;
-
-    arg = xc_hypercall_buffer_alloc(xch, arg, sizeof(*arg));
-    if ( arg == NULL )
-        return -1;
+    DECLARE_HVMCTL(set_ioreq_server_state, domid,
+                   .id = id,
+                   .enabled = !!enabled);
 
-    arg->domid = domid;
-    arg->id = id;
-    arg->enabled = !!enabled;
-
-    rc = xencall2(xch->xcall, __HYPERVISOR_hvm_op,
-                  HVMOP_set_ioreq_server_state,
-                  HYPERCALL_BUFFER_AS_ARG(arg));
-
-    xc_hypercall_buffer_free(xch, arg);
-    return rc;
+    return do_hvmctl(xch, &hvmctl);
 }
 
 int xc_domain_setdebugging(xc_interface *xch,
--- a/tools/libxc/xc_private.h
+++ b/tools/libxc/xc_private.h
@@ -34,8 +34,6 @@
 #define XC_INTERNAL_COMPAT_MAP_FOREIGN_API
 #include "xenctrl.h"
 
-#include <xen/hvm/control.h>
-
 #include <xencall.h>
 #include <xenforeignmemory.h>
 
--- a/xen/arch/x86/hvm/control.c
+++ b/xen/arch/x86/hvm/control.c
@@ -20,6 +20,7 @@
 #include <xen/sched.h>
 #include <asm/hap.h>
 #include <asm/shadow.h>
+#include <asm/hvm/ioreq.h>
 #include <xsm/xsm.h>
 
 static int set_pci_intx_level(struct domain *d,
@@ -299,6 +300,50 @@ long do_hvmctl(XEN_GUEST_HANDLE_PARAM(xe
         rc = hvm_inject_msi(d, op.u.inject_msi.addr, op.u.inject_msi.data);
         break;
 
+    case XEN_HVMCTL_create_ioreq_server:
+        rc = -EINVAL;
+        if ( op.u.create_ioreq_server.rsvd )
+            break;
+        rc = hvm_create_ioreq_server(d, current->domain->domain_id, 0,
+                                     op.u.create_ioreq_server.handle_bufioreq,
+                                     &op.u.create_ioreq_server.id);
+        if ( rc == 0 && copy_field_to_guest(u_hvmctl, &op,
+                                            u.create_ioreq_server.id) )
+            rc = -EFAULT;
+        break;
+
+    case XEN_HVMCTL_get_ioreq_server_info:
+        rc = -EINVAL;
+        if ( op.u.get_ioreq_server_info.rsvd )
+            break;
+        rc = hvm_get_ioreq_server_info(d, &op.u.get_ioreq_server_info);
+        if ( rc == 0 && copy_field_to_guest(u_hvmctl, &op,
+                                            u.get_ioreq_server_info) )
+            rc = -EFAULT;
+        break;
+
+    case XEN_HVMCTL_map_io_range_to_ioreq_server:
+        rc = hvm_map_io_range_to_ioreq_server(
+                 d, &op.u.map_io_range_to_ioreq_server);
+        break;
+
+    case XEN_HVMCTL_unmap_io_range_from_ioreq_server:
+        rc = hvm_unmap_io_range_from_ioreq_server(
+                 d, &op.u.unmap_io_range_from_ioreq_server);
+        break;
+
+    case XEN_HVMCTL_destroy_ioreq_server:
+        rc = hvm_destroy_ioreq_server(d, op.u.destroy_ioreq_server.id);
+        break;
+
+    case XEN_HVMCTL_set_ioreq_server_state:
+        rc = -EINVAL;
+        if ( op.u.set_ioreq_server_state.rsvd )
+            break;
+        rc = hvm_set_ioreq_server_state(d, op.u.set_ioreq_server_state.id,
+                                        !!op.u.set_ioreq_server_state.enabled);
+        break;
+
     default:
         rc = -EOPNOTSUPP;
         break;
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -4487,195 +4487,6 @@ static int hvmop_flush_tlb_all(void)
     return 0;
 }
 
-static int hvmop_create_ioreq_server(
-    XEN_GUEST_HANDLE_PARAM(xen_hvm_create_ioreq_server_t) uop)
-{
-    struct domain *curr_d = current->domain;
-    xen_hvm_create_ioreq_server_t op;
-    struct domain *d;
-    int rc;
-
-    if ( copy_from_guest(&op, uop, 1) )
-        return -EFAULT;
-
-    rc = rcu_lock_remote_domain_by_id(op.domid, &d);
-    if ( rc != 0 )
-        return rc;
-
-    rc = -EINVAL;
-    if ( !is_hvm_domain(d) )
-        goto out;
-
-    rc = xsm_hvm_ioreq_server(XSM_DM_PRIV, d, HVMOP_create_ioreq_server);
-    if ( rc != 0 )
-        goto out;
-
-    rc = hvm_create_ioreq_server(d, curr_d->domain_id, 0,
-                                 op.handle_bufioreq, &op.id);
-    if ( rc != 0 )
-        goto out;
-
-    rc = copy_to_guest(uop, &op, 1) ? -EFAULT : 0;
-    
- out:
-    rcu_unlock_domain(d);
-    return rc;
-}
-
-static int hvmop_get_ioreq_server_info(
-    XEN_GUEST_HANDLE_PARAM(xen_hvm_get_ioreq_server_info_t) uop)
-{
-    xen_hvm_get_ioreq_server_info_t op;
-    struct domain *d;
-    int rc;
-
-    if ( copy_from_guest(&op, uop, 1) )
-        return -EFAULT;
-
-    rc = rcu_lock_remote_domain_by_id(op.domid, &d);
-    if ( rc != 0 )
-        return rc;
-
-    rc = -EINVAL;
-    if ( !is_hvm_domain(d) )
-        goto out;
-
-    rc = xsm_hvm_ioreq_server(XSM_DM_PRIV, d, HVMOP_get_ioreq_server_info);
-    if ( rc != 0 )
-        goto out;
-
-    rc = hvm_get_ioreq_server_info(d, op.id,
-                                   &op.ioreq_pfn,
-                                   &op.bufioreq_pfn, 
-                                   &op.bufioreq_port);
-    if ( rc != 0 )
-        goto out;
-
-    rc = copy_to_guest(uop, &op, 1) ? -EFAULT : 0;
-    
- out:
-    rcu_unlock_domain(d);
-    return rc;
-}
-
-static int hvmop_map_io_range_to_ioreq_server(
-    XEN_GUEST_HANDLE_PARAM(xen_hvm_io_range_t) uop)
-{
-    xen_hvm_io_range_t op;
-    struct domain *d;
-    int rc;
-
-    if ( copy_from_guest(&op, uop, 1) )
-        return -EFAULT;
-
-    rc = rcu_lock_remote_domain_by_id(op.domid, &d);
-    if ( rc != 0 )
-        return rc;
-
-    rc = -EINVAL;
-    if ( !is_hvm_domain(d) )
-        goto out;
-
-    rc = xsm_hvm_ioreq_server(XSM_DM_PRIV, d, HVMOP_map_io_range_to_ioreq_server);
-    if ( rc != 0 )
-        goto out;
-
-    rc = hvm_map_io_range_to_ioreq_server(d, op.id, op.type,
-                                          op.start, op.end);
-
- out:
-    rcu_unlock_domain(d);
-    return rc;
-}
-
-static int hvmop_unmap_io_range_from_ioreq_server(
-    XEN_GUEST_HANDLE_PARAM(xen_hvm_io_range_t) uop)
-{
-    xen_hvm_io_range_t op;
-    struct domain *d;
-    int rc;
-
-    if ( copy_from_guest(&op, uop, 1) )
-        return -EFAULT;
-
-    rc = rcu_lock_remote_domain_by_id(op.domid, &d);
-    if ( rc != 0 )
-        return rc;
-
-    rc = -EINVAL;
-    if ( !is_hvm_domain(d) )
-        goto out;
-
-    rc = xsm_hvm_ioreq_server(XSM_DM_PRIV, d, HVMOP_unmap_io_range_from_ioreq_server);
-    if ( rc != 0 )
-        goto out;
-
-    rc = hvm_unmap_io_range_from_ioreq_server(d, op.id, op.type,
-                                              op.start, op.end);
-    
- out:
-    rcu_unlock_domain(d);
-    return rc;
-}
-
-static int hvmop_set_ioreq_server_state(
-    XEN_GUEST_HANDLE_PARAM(xen_hvm_set_ioreq_server_state_t) uop)
-{
-    xen_hvm_set_ioreq_server_state_t op;
-    struct domain *d;
-    int rc;
-
-    if ( copy_from_guest(&op, uop, 1) )
-        return -EFAULT;
-
-    rc = rcu_lock_remote_domain_by_id(op.domid, &d);
-    if ( rc != 0 )
-        return rc;
-
-    rc = -EINVAL;
-    if ( !is_hvm_domain(d) )
-        goto out;
-
-    rc = xsm_hvm_ioreq_server(XSM_DM_PRIV, d, HVMOP_set_ioreq_server_state);
-    if ( rc != 0 )
-        goto out;
-
-    rc = hvm_set_ioreq_server_state(d, op.id, !!op.enabled);
-
- out:
-    rcu_unlock_domain(d);
-    return rc;
-}
-
-static int hvmop_destroy_ioreq_server(
-    XEN_GUEST_HANDLE_PARAM(xen_hvm_destroy_ioreq_server_t) uop)
-{
-    xen_hvm_destroy_ioreq_server_t op;
-    struct domain *d;
-    int rc;
-
-    if ( copy_from_guest(&op, uop, 1) )
-        return -EFAULT;
-
-    rc = rcu_lock_remote_domain_by_id(op.domid, &d);
-    if ( rc != 0 )
-        return rc;
-
-    rc = -EINVAL;
-    if ( !is_hvm_domain(d) )
-        goto out;
-
-    rc = xsm_hvm_ioreq_server(XSM_DM_PRIV, d, HVMOP_destroy_ioreq_server);
-    if ( rc != 0 )
-        goto out;
-
-    rc = hvm_destroy_ioreq_server(d, op.id);
-
- out:
-    rcu_unlock_domain(d);
-    return rc;
-}
-
 static int hvmop_set_evtchn_upcall_vector(
     XEN_GUEST_HANDLE_PARAM(xen_hvm_evtchn_upcall_vector_t) uop)
 {
@@ -5192,36 +5003,6 @@ long do_hvm_op(unsigned long op, XEN_GUE
 
     switch ( op )
     {
-    case HVMOP_create_ioreq_server:
-        rc = hvmop_create_ioreq_server(
-            guest_handle_cast(arg, xen_hvm_create_ioreq_server_t));
-        break;
-    
-    case HVMOP_get_ioreq_server_info:
-        rc = hvmop_get_ioreq_server_info(
-            guest_handle_cast(arg, xen_hvm_get_ioreq_server_info_t));
-        break;
-    
-    case HVMOP_map_io_range_to_ioreq_server:
-        rc = hvmop_map_io_range_to_ioreq_server(
-            guest_handle_cast(arg, xen_hvm_io_range_t));
-        break;
-    
-    case HVMOP_unmap_io_range_from_ioreq_server:
-        rc = hvmop_unmap_io_range_from_ioreq_server(
-            guest_handle_cast(arg, xen_hvm_io_range_t));
-        break;
-
-    case HVMOP_set_ioreq_server_state:
-        rc = hvmop_set_ioreq_server_state(
-            guest_handle_cast(arg, xen_hvm_set_ioreq_server_state_t));
-        break;
-    
-    case HVMOP_destroy_ioreq_server:
-        rc = hvmop_destroy_ioreq_server(
-            guest_handle_cast(arg, xen_hvm_destroy_ioreq_server_t));
-        break;
-    
     case HVMOP_set_evtchn_upcall_vector:
         rc = hvmop_set_evtchn_upcall_vector(
             guest_handle_cast(arg, xen_hvm_evtchn_upcall_vector_t));
--- a/xen/arch/x86/hvm/ioreq.c
+++ b/xen/arch/x86/hvm/ioreq.c
@@ -513,9 +513,9 @@ static int hvm_ioreq_server_alloc_ranges
         char *name;
 
         rc = asprintf(&name, "ioreq_server %d %s", s->id,
-                      (i == HVMOP_IO_RANGE_PORT) ? "port" :
-                      (i == HVMOP_IO_RANGE_MEMORY) ? "memory" :
-                      (i == HVMOP_IO_RANGE_PCI) ? "pci" :
+                      (i == XEN_HVMCTL_IO_RANGE_PORT) ? "port" :
+                      (i == XEN_HVMCTL_IO_RANGE_MEMORY) ? "memory" :
+                      (i == XEN_HVMCTL_IO_RANGE_PCI) ? "pci" :
                       "");
         if ( rc )
             goto fail;
@@ -686,7 +686,8 @@ int hvm_create_ioreq_server(struct domai
     struct hvm_ioreq_server *s;
     int rc;
 
-    if ( bufioreq_handling > HVM_IOREQSRV_BUFIOREQ_ATOMIC )
+    if ( !is_hvm_domain(d) ||
+         bufioreq_handling > HVM_IOREQSRV_BUFIOREQ_ATOMIC )
         return -EINVAL;
 
     rc = -ENOMEM;
@@ -738,6 +739,9 @@ int hvm_destroy_ioreq_server(struct doma
     struct hvm_ioreq_server *s;
     int rc;
 
+    if ( !is_hvm_domain(d) )
+        return -EINVAL;
+
     spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
 
     rc = -ENOENT;
@@ -772,14 +776,15 @@ int hvm_destroy_ioreq_server(struct doma
     return rc;
 }
 
-int hvm_get_ioreq_server_info(struct domain *d, ioservid_t id,
-                              unsigned long *ioreq_pfn,
-                              unsigned long *bufioreq_pfn,
-                              evtchn_port_t *bufioreq_port)
+int hvm_get_ioreq_server_info(struct domain *d,
+                              struct xen_hvm_get_ioreq_server_info *info)
 {
     struct hvm_ioreq_server *s;
     int rc;
 
+    if ( !is_hvm_domain(d) )
+        return -EINVAL;
+
     spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
 
     rc = -ENOENT;
@@ -790,15 +795,15 @@ int hvm_get_ioreq_server_info(struct dom
         if ( s == d->arch.hvm_domain.default_ioreq_server )
             continue;
 
-        if ( s->id != id )
+        if ( s->id != info->id )
             continue;
 
-        *ioreq_pfn = s->ioreq.gmfn;
+        info->ioreq_pfn = s->ioreq.gmfn;
 
         if ( s->bufioreq.va != NULL )
         {
-            *bufioreq_pfn = s->bufioreq.gmfn;
-            *bufioreq_port = s->bufioreq_evtchn;
+            info->bufioreq_pfn = s->bufioreq.gmfn;
+            info->bufioreq_port = s->bufioreq_evtchn;
         }
 
         rc = 0;
@@ -810,13 +815,15 @@ int hvm_get_ioreq_server_info(struct dom
     return rc;
 }
 
-int hvm_map_io_range_to_ioreq_server(struct domain *d, ioservid_t id,
-                                     uint32_t type, uint64_t start,
-                                     uint64_t end)
+int hvm_map_io_range_to_ioreq_server(struct domain *d,
+                                     const struct xen_hvm_io_range *ior)
 {
     struct hvm_ioreq_server *s;
     int rc;
 
+    if ( ior->rsvd || !is_hvm_domain(d) )
+        return -EINVAL;
+
     spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
 
     rc = -ENOENT;
@@ -827,16 +834,16 @@ int hvm_map_io_range_to_ioreq_server(str
         if ( s == d->arch.hvm_domain.default_ioreq_server )
             continue;
 
-        if ( s->id == id )
+        if ( s->id == ior->id )
         {
             struct rangeset *r;
 
-            switch ( type )
+            switch ( ior->type )
             {
-            case HVMOP_IO_RANGE_PORT:
-            case HVMOP_IO_RANGE_MEMORY:
-            case HVMOP_IO_RANGE_PCI:
-                r = s->range[type];
+            case XEN_HVMCTL_IO_RANGE_PORT:
+            case XEN_HVMCTL_IO_RANGE_MEMORY:
+            case XEN_HVMCTL_IO_RANGE_PCI:
+                r = s->range[ior->type];
                 break;
 
             default:
@@ -849,10 +856,10 @@ int hvm_map_io_range_to_ioreq_server(str
                 break;
 
             rc = -EEXIST;
-            if ( rangeset_overlaps_range(r, start, end) )
+            if ( rangeset_overlaps_range(r, ior->start, ior->end) )
                 break;
 
-            rc = rangeset_add_range(r, start, end);
+            rc = rangeset_add_range(r, ior->start, ior->end);
             break;
         }
     }
@@ -862,13 +869,15 @@ int hvm_map_io_range_to_ioreq_server(str
     return rc;
 }
 
-int hvm_unmap_io_range_from_ioreq_server(struct domain *d, ioservid_t id,
-                                         uint32_t type, uint64_t start,
-                                         uint64_t end)
+int hvm_unmap_io_range_from_ioreq_server(struct domain *d,
+                                         const struct xen_hvm_io_range *ior)
 {
     struct hvm_ioreq_server *s;
     int rc;
 
+    if ( ior->rsvd || !is_hvm_domain(d) )
+        return -EINVAL;
+
     spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
 
     rc = -ENOENT;
@@ -879,16 +888,16 @@ int hvm_unmap_io_range_from_ioreq_server
         if ( s == d->arch.hvm_domain.default_ioreq_server )
             continue;
 
-        if ( s->id == id )
+        if ( s->id == ior->id )
         {
             struct rangeset *r;
 
-            switch ( type )
+            switch ( ior->type )
             {
-            case HVMOP_IO_RANGE_PORT:
-            case HVMOP_IO_RANGE_MEMORY:
-            case HVMOP_IO_RANGE_PCI:
-                r = s->range[type];
+            case XEN_HVMCTL_IO_RANGE_PORT:
+            case XEN_HVMCTL_IO_RANGE_MEMORY:
+            case XEN_HVMCTL_IO_RANGE_PCI:
+                r = s->range[ior->type];
                 break;
 
             default:
@@ -901,10 +910,10 @@ int hvm_unmap_io_range_from_ioreq_server
                 break;
 
             rc = -ENOENT;
-            if ( !rangeset_contains_range(r, start, end) )
+            if ( !rangeset_contains_range(r, ior->start, ior->end) )
                 break;
 
-            rc = rangeset_remove_range(r, start, end);
+            rc = rangeset_remove_range(r, ior->start, ior->end);
             break;
         }
     }
@@ -920,6 +929,9 @@ int hvm_set_ioreq_server_state(struct do
     struct list_head *entry;
     int rc;
 
+    if ( !is_hvm_domain(d) )
+        return -EINVAL;
+
     spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
 
     rc = -ENOENT;
@@ -1128,12 +1140,12 @@ struct hvm_ioreq_server *hvm_select_iore
 
         /* PCI config data cycle */
 
-        sbdf = HVMOP_PCI_SBDF(0,
-                              PCI_BUS(CF8_BDF(cf8)),
-                              PCI_SLOT(CF8_BDF(cf8)),
-                              PCI_FUNC(CF8_BDF(cf8)));
+        sbdf = XEN_HVMCTL_PCI_SBDF(0,
+                                   PCI_BUS(CF8_BDF(cf8)),
+                                   PCI_SLOT(CF8_BDF(cf8)),
+                                   PCI_FUNC(CF8_BDF(cf8)));
 
-        type = HVMOP_IO_RANGE_PCI;
+        type = XEN_HVMCTL_IO_RANGE_PCI;
         addr = ((uint64_t)sbdf << 32) |
                CF8_ADDR_LO(cf8) |
                (p->addr & 3);
@@ -1152,7 +1164,7 @@ struct hvm_ioreq_server *hvm_select_iore
     else
     {
         type = (p->type == IOREQ_TYPE_PIO) ?
-                HVMOP_IO_RANGE_PORT : HVMOP_IO_RANGE_MEMORY;
+                XEN_HVMCTL_IO_RANGE_PORT : XEN_HVMCTL_IO_RANGE_MEMORY;
         addr = p->addr;
     }
 
@@ -1174,19 +1186,19 @@ struct hvm_ioreq_server *hvm_select_iore
         {
             unsigned long end;
 
-        case HVMOP_IO_RANGE_PORT:
+        case XEN_HVMCTL_IO_RANGE_PORT:
             end = addr + p->size - 1;
             if ( rangeset_contains_range(r, addr, end) )
                 return s;
 
             break;
-        case HVMOP_IO_RANGE_MEMORY:
+        case XEN_HVMCTL_IO_RANGE_MEMORY:
             end = addr + (p->size * p->count) - 1;
             if ( rangeset_contains_range(r, addr, end) )
                 return s;
 
             break;
-        case HVMOP_IO_RANGE_PCI:
+        case XEN_HVMCTL_IO_RANGE_PCI:
             if ( rangeset_contains_singleton(r, addr >> 32) )
             {
                 p->type = IOREQ_TYPE_PCI_CONFIG;
--- a/xen/include/asm-x86/hvm/domain.h
+++ b/xen/include/asm-x86/hvm/domain.h
@@ -30,6 +30,7 @@
 #include <asm/hvm/vmx/vmcs.h>
 #include <asm/hvm/svm/vmcb.h>
 #include <public/grant_table.h>
+#include <public/hvm/control.h>
 #include <public/hvm/params.h>
 #include <public/hvm/save.h>
 #include <public/hvm/hvm_op.h>
@@ -47,7 +48,7 @@ struct hvm_ioreq_vcpu {
     bool_t           pending;
 };
 
-#define NR_IO_RANGE_TYPES (HVMOP_IO_RANGE_PCI + 1)
+#define NR_IO_RANGE_TYPES (XEN_HVMCTL_IO_RANGE_PCI + 1)
 #define MAX_NR_IO_RANGES  256
 
 struct hvm_ioreq_server {
--- a/xen/include/asm-x86/hvm/ioreq.h
+++ b/xen/include/asm-x86/hvm/ioreq.h
@@ -19,6 +19,8 @@
 #ifndef __ASM_X86_HVM_IOREQ_H__
 #define __ASM_X86_HVM_IOREQ_H__
 
+#include <public/hvm/control.h>
+
 bool_t hvm_io_pending(struct vcpu *v);
 bool_t handle_hvm_io_completion(struct vcpu *v);
 bool_t is_ioreq_server_page(struct domain *d, const struct page_info *page);
@@ -27,16 +29,12 @@ int hvm_create_ioreq_server(struct domai
                             bool_t is_default, int bufioreq_handling,
                             ioservid_t *id);
 int hvm_destroy_ioreq_server(struct domain *d, ioservid_t id);
-int hvm_get_ioreq_server_info(struct domain *d, ioservid_t id,
-                              unsigned long *ioreq_pfn,
-                              unsigned long *bufioreq_pfn,
-                              evtchn_port_t *bufioreq_port);
-int hvm_map_io_range_to_ioreq_server(struct domain *d, ioservid_t id,
-                                     uint32_t type, uint64_t start,
-                                     uint64_t end);
-int hvm_unmap_io_range_from_ioreq_server(struct domain *d, ioservid_t id,
-                                         uint32_t type, uint64_t start,
-                                         uint64_t end);
+int hvm_get_ioreq_server_info(struct domain *d,
+                              struct xen_hvm_get_ioreq_server_info *info);
+int hvm_map_io_range_to_ioreq_server(struct domain *d,
+                                     const struct xen_hvm_io_range *r);
+int hvm_unmap_io_range_from_ioreq_server(struct domain *d,
+                                         const struct xen_hvm_io_range *r);
 int hvm_set_ioreq_server_state(struct domain *d, ioservid_t id,
                                bool_t enabled);
 
--- a/xen/include/public/hvm/control.h
+++ b/xen/include/public/hvm/control.h
@@ -26,6 +26,7 @@
 #endif
 
 #include "../xen.h"
+#include "../event_channel.h"
 
 #define XEN_HVMCTL_INTERFACE_VERSION 0x00000001
 
@@ -130,6 +131,131 @@ struct xen_hvm_inject_msi {
     uint64_t  addr;
 };
 
+/*
+ * IOREQ Servers
+ *
+ * The interface between an I/O emulator an Xen is called an IOREQ Server.
+ * A domain supports a single 'legacy' IOREQ Server which is instantiated if
+ * parameter...
+ *
+ * HVM_PARAM_IOREQ_PFN is read (to get the gmfn containing the synchronous
+ * ioreq structures), or...
+ * HVM_PARAM_BUFIOREQ_PFN is read (to get the gmfn containing the buffered
+ * ioreq ring), or...
+ * HVM_PARAM_BUFIOREQ_EVTCHN is read (to get the event channel that Xen uses
+ * to request buffered I/O emulation).
+ *
+ * The following hypercalls facilitate the creation of IOREQ Servers for
+ * 'secondary' emulators which are invoked to implement port I/O, memory, or
+ * PCI config space ranges which they explicitly register.
+ */
+
+typedef uint16_t ioservid_t;
+
+/*
+ * XEN_HVMCTL_create_ioreq_server: Instantiate a new IOREQ Server for a
+ *                                 secondary emulator servicing domain
+ *                                 <domid>.
+ *
+ * The <id> handed back is unique for <domid>. If <handle_bufioreq> is zero
+ * the buffered ioreq ring will not be allocated and hence all emulation
+ * requestes to this server will be synchronous.
+ */
+struct xen_hvm_create_ioreq_server {
+#define HVM_IOREQSRV_BUFIOREQ_OFF    0
+#define HVM_IOREQSRV_BUFIOREQ_LEGACY 1
+/*
+ * Use this when read_pointer gets updated atomically and
+ * the pointer pair gets read atomically:
+ */
+#define HVM_IOREQSRV_BUFIOREQ_ATOMIC 2
+    uint8_t handle_bufioreq; /* IN - should server handle buffered ioreqs */
+    uint8_t rsvd;            /* IN - must be zero */
+    ioservid_t id;           /* OUT - server id */
+};
+
+/*
+ * XEN_HVMCTL_get_ioreq_server_info: Get all the information necessary to
+ *                                   access IOREQ Server <id>.
+ *
+ * The emulator needs to map the synchronous ioreq structures and buffered
+ * ioreq ring (if it exists) that Xen uses to request emulation. These are
+ * hosted in domain <domid>'s gmfns <ioreq_pfn> and <bufioreq_pfn>
+ * respectively. In addition, if the IOREQ Server is handling buffered
+ * emulation requests, the emulator needs to bind to event channel
+ * <bufioreq_port> to listen for them. (The event channels used for
+ * synchronous emulation requests are specified in the per-CPU ioreq
+ * structures in <ioreq_pfn>).
+ * If the IOREQ Server is not handling buffered emulation requests then the
+ * values handed back in <bufioreq_pfn> and <bufioreq_port> will both be 0.
+ */
+struct xen_hvm_get_ioreq_server_info {
+    ioservid_t id;                 /* IN - server id */
+    uint16_t rsvd;                 /* IN - must be zero */
+    evtchn_port_t bufioreq_port;   /* OUT - buffered ioreq port */
+    uint64_aligned_t ioreq_pfn;    /* OUT - sync ioreq pfn */
+    uint64_aligned_t bufioreq_pfn; /* OUT - buffered ioreq pfn */
+};
+
+/*
+ * XEN_HVMCTL_map_io_range_to_ioreq_server: Register an I/O range of domain
+ *                                          <domid> for emulation by the
+ *                                          client of IOREQ Server <id>
+ * XEN_HVMCTL_unmap_io_range_from_ioreq_server: Deregister an I/O range of
+ *                                              <domid> for emulation by the
+ *                                              client of IOREQ Server <id>
+ *
+ * There are three types of I/O that can be emulated: port I/O, memory accesses
+ * and PCI config space accesses. The <type> field denotes which type of range
+ * the <start> and <end> (inclusive) fields are specifying.
+ * PCI config space ranges are specified by segment/bus/device/function values
+ * which should be encoded using the XEN_HVMCTL_PCI_SBDF helper macro below.
+ *
+ * NOTE: unless an emulation request falls entirely within a range mapped
+ * by a secondary emulator, it will not be passed to that emulator.
+ */
+struct xen_hvm_io_range {
+    ioservid_t id;               /* IN - server id */
+    uint16_t type;               /* IN - type of range */
+    uint32_t rsvd;               /* IN - must be zero */
+#define XEN_HVMCTL_IO_RANGE_PORT   0 /* I/O port range */
+#define XEN_HVMCTL_IO_RANGE_MEMORY 1 /* MMIO range */
+#define XEN_HVMCTL_IO_RANGE_PCI    2 /* PCI segment/bus/dev/func range */
+    uint64_aligned_t start, end; /* IN - inclusive start and end of range */
+};
+
+#define XEN_HVMCTL_PCI_SBDF(s, b, d, f) \
+	((((s) & 0xffff) << 16) | \
+	 (((b) & 0xff) << 8) | \
+	 (((d) & 0x1f) << 3) | \
+	 ((f) & 0x07))
+
+/*
+ * XEN_HVMCTL_destroy_ioreq_server: Destroy the IOREQ Server <id> servicing
+ *                                  domain <domid>.
+ *
+ * Any registered I/O ranges will be automatically deregistered.
+ */
+struct xen_hvm_destroy_ioreq_server {
+    ioservid_t id; /* IN - server id */
+};
+
+/*
+ * XEN_HVMCTL_set_ioreq_server_state: Enable or disable the IOREQ Server <id>
+ *                                    servicing domain <domid>.
+ *
+ * The IOREQ Server will not be passed any emulation requests until it is in
+ * the enabled state.
+ * Note that the contents of the ioreq_pfn and bufioreq_fn (see
+ * XEN_HVMCTL_get_ioreq_server_info) are not meaningful until the IOREQ Server
+ * is in the enabled state.
+ */
+struct xen_hvm_set_ioreq_server_state {
+    ioservid_t id;   /* IN - server id */
+    uint8_t enabled; /* IN - enabled? */
+    uint8_t rsvd;    /* IN - must be zero */
+};
+
 struct xen_hvmctl {
     uint16_t interface_version;    /* XEN_HVMCTL_INTERFACE_VERSION */
     domid_t domain;
@@ -142,6 +268,12 @@ struct xen_hvmctl {
 #define XEN_HVMCTL_set_mem_type                  6
 #define XEN_HVMCTL_inject_trap                   7
 #define XEN_HVMCTL_inject_msi                    8
+#define XEN_HVMCTL_create_ioreq_server           9
+#define XEN_HVMCTL_get_ioreq_server_info        10
+#define XEN_HVMCTL_map_io_range_to_ioreq_server 11
+#define XEN_HVMCTL_unmap_io_range_from_ioreq_server 12
+#define XEN_HVMCTL_destroy_ioreq_server         13
+#define XEN_HVMCTL_set_ioreq_server_state       14
     uint16_t opaque;               /* Must be zero on initial invocation. */
     union {
         struct xen_hvm_set_pci_intx_level set_pci_intx_level;
@@ -152,6 +284,12 @@ struct xen_hvmctl {
         struct xen_hvm_set_mem_type set_mem_type;
         struct xen_hvm_inject_trap inject_trap;
         struct xen_hvm_inject_msi inject_msi;
+        struct xen_hvm_create_ioreq_server create_ioreq_server;
+        struct xen_hvm_get_ioreq_server_info get_ioreq_server_info;
+        struct xen_hvm_io_range map_io_range_to_ioreq_server;
+        struct xen_hvm_io_range unmap_io_range_from_ioreq_server;
+        struct xen_hvm_destroy_ioreq_server destroy_ioreq_server;
+        struct xen_hvm_set_ioreq_server_state set_ioreq_server_state;
         uint8_t pad[120];
     } u;
 };
--- a/xen/include/public/hvm/hvm_op.h
+++ b/xen/include/public/hvm/hvm_op.h
@@ -25,7 +25,6 @@
 
 #include "../xen.h"
 #include "../trace.h"
-#include "../event_channel.h"
 
 /* Get/set subcommands: extra argument == pointer to xen_hvm_param struct. */
 #define HVMOP_set_param           0
@@ -137,152 +136,6 @@ struct xen_hvm_get_mem_type {
 typedef struct xen_hvm_get_mem_type xen_hvm_get_mem_type_t;
 DEFINE_XEN_GUEST_HANDLE(xen_hvm_get_mem_type_t);
 
-/* Following tools-only interfaces may change in future. */
-#if defined(__XEN__) || defined(__XEN_TOOLS__)
-
-/*
- * IOREQ Servers
- *
- * The interface between an I/O emulator an Xen is called an IOREQ Server.
- * A domain supports a single 'legacy' IOREQ Server which is instantiated if
- * parameter...
- *
- * HVM_PARAM_IOREQ_PFN is read (to get the gmfn containing the synchronous
- * ioreq structures), or...
- * HVM_PARAM_BUFIOREQ_PFN is read (to get the gmfn containing the buffered
- * ioreq ring), or...
- * HVM_PARAM_BUFIOREQ_EVTCHN is read (to get the event channel that Xen uses
- * to request buffered I/O emulation).
- * 
- * The following hypercalls facilitate the creation of IOREQ Servers for
- * 'secondary' emulators which are invoked to implement port I/O, memory, or
- * PCI config space ranges which they explicitly register.
- */
-
-typedef uint16_t ioservid_t;
-
-/*
- * HVMOP_create_ioreq_server: Instantiate a new IOREQ Server for a secondary
- *                            emulator servicing domain <domid>.
- *
- * The <id> handed back is unique for <domid>. If <handle_bufioreq> is zero
- * the buffered ioreq ring will not be allocated and hence all emulation
- * requestes to this server will be synchronous.
- */
-#define HVMOP_create_ioreq_server 17
-struct xen_hvm_create_ioreq_server {
-    domid_t domid;           /* IN - domain to be serviced */
-#define HVM_IOREQSRV_BUFIOREQ_OFF    0
-#define HVM_IOREQSRV_BUFIOREQ_LEGACY 1
-/*
- * Use this when read_pointer gets updated atomically and
- * the pointer pair gets read atomically:
- */
-#define HVM_IOREQSRV_BUFIOREQ_ATOMIC 2
-    uint8_t handle_bufioreq; /* IN - should server handle buffered ioreqs */
-    ioservid_t id;           /* OUT - server id */
-};
-typedef struct xen_hvm_create_ioreq_server xen_hvm_create_ioreq_server_t;
-DEFINE_XEN_GUEST_HANDLE(xen_hvm_create_ioreq_server_t);
-
-/*
- * HVMOP_get_ioreq_server_info: Get all the information necessary to access
- *                              IOREQ Server <id>. 
- *
- * The emulator needs to map the synchronous ioreq structures and buffered
- * ioreq ring (if it exists) that Xen uses to request emulation. These are
- * hosted in domain <domid>'s gmfns <ioreq_pfn> and <bufioreq_pfn>
- * respectively. In addition, if the IOREQ Server is handling buffered
- * emulation requests, the emulator needs to bind to event channel
- * <bufioreq_port> to listen for them. (The event channels used for
- * synchronous emulation requests are specified in the per-CPU ioreq
- * structures in <ioreq_pfn>).
- * If the IOREQ Server is not handling buffered emulation requests then the
- * values handed back in <bufioreq_pfn> and <bufioreq_port> will both be 0.
- */
-#define HVMOP_get_ioreq_server_info 18
-struct xen_hvm_get_ioreq_server_info {
-    domid_t domid;                 /* IN - domain to be serviced */
-    ioservid_t id;                 /* IN - server id */
-    evtchn_port_t bufioreq_port;   /* OUT - buffered ioreq port */
-    uint64_aligned_t ioreq_pfn;    /* OUT - sync ioreq pfn */
-    uint64_aligned_t bufioreq_pfn; /* OUT - buffered ioreq pfn */
-};
-typedef struct xen_hvm_get_ioreq_server_info xen_hvm_get_ioreq_server_info_t;
-DEFINE_XEN_GUEST_HANDLE(xen_hvm_get_ioreq_server_info_t);
-
-/*
- * HVM_map_io_range_to_ioreq_server: Register an I/O range of domain <domid>
- *                                   for emulation by the client of IOREQ
- *                                   Server <id>
- * HVM_unmap_io_range_from_ioreq_server: Deregister an I/O range of <domid>
- *                                       for emulation by the client of IOREQ
- *                                       Server <id>
- *
- * There are three types of I/O that can be emulated: port I/O, memory accesses
- * and PCI config space accesses. The <type> field denotes which type of range
- * the <start> and <end> (inclusive) fields are specifying.
- * PCI config space ranges are specified by segment/bus/device/function values
- * which should be encoded using the HVMOP_PCI_SBDF helper macro below.
- *
- * NOTE: unless an emulation request falls entirely within a range mapped
- * by a secondary emulator, it will not be passed to that emulator.
- */
-#define HVMOP_map_io_range_to_ioreq_server 19
-#define HVMOP_unmap_io_range_from_ioreq_server 20
-struct xen_hvm_io_range {
-    domid_t domid;               /* IN - domain to be serviced */
-    ioservid_t id;               /* IN - server id */
-    uint32_t type;               /* IN - type of range */
-# define HVMOP_IO_RANGE_PORT   0 /* I/O port range */
-# define HVMOP_IO_RANGE_MEMORY 1 /* MMIO range */
-# define HVMOP_IO_RANGE_PCI    2 /* PCI segment/bus/dev/func range */
-    uint64_aligned_t start, end; /* IN - inclusive start and end of range */
-};
-typedef struct xen_hvm_io_range xen_hvm_io_range_t;
-DEFINE_XEN_GUEST_HANDLE(xen_hvm_io_range_t);
-
-#define HVMOP_PCI_SBDF(s,b,d,f)                 \
-	((((s) & 0xffff) << 16) |                   \
-	 (((b) & 0xff) << 8) |                      \
-	 (((d) & 0x1f) << 3) |                      \
-	 ((f) & 0x07))
-
-/*
- * HVMOP_destroy_ioreq_server: Destroy the IOREQ Server <id> servicing domain
- *                             <domid>.
- *
- * Any registered I/O ranges will be automatically deregistered.
- */
-#define HVMOP_destroy_ioreq_server 21
-struct xen_hvm_destroy_ioreq_server {
-    domid_t domid; /* IN - domain to be serviced */
-    ioservid_t id; /* IN - server id */
-};
-typedef struct xen_hvm_destroy_ioreq_server xen_hvm_destroy_ioreq_server_t;
-DEFINE_XEN_GUEST_HANDLE(xen_hvm_destroy_ioreq_server_t);
-
-/*
- * HVMOP_set_ioreq_server_state: Enable or disable the IOREQ Server <id> servicing
- *                               domain <domid>.
- *
- * The IOREQ Server will not be passed any emulation requests until it is in the
- * enabled state.
- * Note that the contents of the ioreq_pfn and bufioreq_fn (see
- * HVMOP_get_ioreq_server_info) are not meaningful until the IOREQ Server is in
- * the enabled state.
- */
-#define HVMOP_set_ioreq_server_state 22
-struct xen_hvm_set_ioreq_server_state {
-    domid_t domid;   /* IN - domain to be serviced */
-    ioservid_t id;   /* IN - server id */
-    uint8_t enabled; /* IN - enabled? */    
-};
-typedef struct xen_hvm_set_ioreq_server_state xen_hvm_set_ioreq_server_state_t;
-DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_ioreq_server_state_t);
-
-#endif /* defined(__XEN__) || defined(__XEN_TOOLS__) */
-
 #if defined(__i386__) || defined(__x86_64__)
 
 /*
--- a/xen/include/xsm/dummy.h
+++ b/xen/include/xsm/dummy.h
@@ -609,12 +609,6 @@ static XSM_INLINE int xsm_shadow_control
     return xsm_default_action(action, current->domain, d);
 }
 
-static XSM_INLINE int xsm_hvm_ioreq_server(XSM_DEFAULT_ARG struct domain *d, int op)
-{
-    XSM_ASSERT_ACTION(XSM_DM_PRIV);
-    return xsm_default_action(action, current->domain, d);
-}
-
 static XSM_INLINE int xsm_mem_sharing_op(XSM_DEFAULT_ARG struct domain *d, struct domain *cd, int op)
 {
     XSM_ASSERT_ACTION(XSM_DM_PRIV);
--- a/xen/include/xsm/xsm.h
+++ b/xen/include/xsm/xsm.h
@@ -174,7 +174,6 @@ struct xsm_operations {
     int (*do_mca) (void);
     int (*shadow_control) (struct domain *d, uint32_t op);
     int (*hvm_set_pci_link_route) (struct domain *d);
-    int (*hvm_ioreq_server) (struct domain *d, int op);
     int (*mem_sharing_op) (struct domain *d, struct domain *cd, int op);
     int (*apic) (struct domain *d, int cmd);
     int (*memtype) (uint32_t access);
@@ -648,11 +647,6 @@ static inline int xsm_hvm_set_pci_link_r
     return xsm_ops->hvm_set_pci_link_route(d);
 }
 
-static inline int xsm_hvm_ioreq_server (xsm_default_t def, struct domain *d, int op)
-{
-    return xsm_ops->hvm_ioreq_server(d, op);
-}
-
 static inline int xsm_mem_sharing_op (xsm_default_t def, struct domain *d, struct domain *cd, int op)
 {
     return xsm_ops->mem_sharing_op(d, cd, op);
--- a/xen/xsm/dummy.c
+++ b/xen/xsm/dummy.c
@@ -145,7 +145,6 @@ void xsm_fixup_ops (struct xsm_operation
 #ifdef CONFIG_X86
     set_to_dummy_if_null(ops, do_mca);
     set_to_dummy_if_null(ops, shadow_control);
-    set_to_dummy_if_null(ops, hvm_ioreq_server);
     set_to_dummy_if_null(ops, mem_sharing_op);
     set_to_dummy_if_null(ops, apic);
     set_to_dummy_if_null(ops, machine_memory_map);
--- a/xen/xsm/flask/hooks.c
+++ b/xen/xsm/flask/hooks.c
@@ -1526,11 +1526,6 @@ static int flask_ioport_mapping(struct d
     return flask_ioport_permission(d, start, end, access);
 }
 
-static int flask_hvm_ioreq_server(struct domain *d, int op)
-{
-    return current_has_perm(d, SECCLASS_HVM, HVM__HVMCTL);
-}
-
 static int flask_mem_sharing_op(struct domain *d, struct domain *cd, int op)
 {
     int rc = current_has_perm(cd, SECCLASS_HVM, HVM__MEM_SHARING);
@@ -1799,7 +1794,6 @@ static struct xsm_operations flask_ops =
 #ifdef CONFIG_X86
     .do_mca = flask_do_mca,
     .shadow_control = flask_shadow_control,
-    .hvm_ioreq_server = flask_hvm_ioreq_server,
     .mem_sharing_op = flask_mem_sharing_op,
     .apic = flask_apic,
     .machine_memory_map = flask_machine_memory_map,

Comments

Wei Liu June 21, 2016, 10:14 a.m. UTC | #1
On Mon, Jun 20, 2016 at 06:57:47AM -0600, Jan Beulich wrote:
> Note that we can't adjust HVM_IOREQSRV_BUFIOREQ_* to properly obey
> name space rules, as these constants as in use by callers of the libxc
> interface.
> 
> Signed-off-by: Jan Beulich <jbeulich@suse.com>
> 

Reviewed-by: Wei Liu <wei.liu2@citrix.com>
Paul Durrant June 21, 2016, 12:44 p.m. UTC | #2
> -----Original Message-----
> From: Jan Beulich [mailto:JBeulich@suse.com]
> Sent: 20 June 2016 13:58
> To: xen-devel
> Cc: Andrew Cooper; Paul Durrant; Wei Liu; George Dunlap; Ian Jackson;
> Stefano Stabellini; dgdegra@tycho.nsa.gov; Tim (Xen.org)
> Subject: [PATCH 10/11] hvmctl: convert HVMOP_*ioreq_server*
> 
> Note that we can't adjust HVM_IOREQSRV_BUFIOREQ_* to properly obey
> name space rules, as these constants as in use by callers of the libxc
> interface.
> 
> Signed-off-by: Jan Beulich <jbeulich@suse.com>

Reviewed-by: Paul Durrant <paul.durrant@citrix.com>

> 
> --- a/tools/libxc/include/xenctrl.h
> +++ b/tools/libxc/include/xenctrl.h
> @@ -41,6 +41,7 @@
>  #include <xen/sched.h>
>  #include <xen/memory.h>
>  #include <xen/grant_table.h>
> +#include <xen/hvm/control.h>
>  #include <xen/hvm/params.h>
>  #include <xen/xsm/flask_op.h>
>  #include <xen/tmem.h>
> --- a/tools/libxc/xc_domain.c
> +++ b/tools/libxc/xc_domain.c
> @@ -1416,23 +1416,14 @@ int xc_hvm_create_ioreq_server(xc_interf
>                                 int handle_bufioreq,
>                                 ioservid_t *id)
>  {
> -    DECLARE_HYPERCALL_BUFFER(xen_hvm_create_ioreq_server_t, arg);
> +    DECLARE_HVMCTL(create_ioreq_server, domid,
> +                   .handle_bufioreq = handle_bufioreq);
>      int rc;
> 
> -    arg = xc_hypercall_buffer_alloc(xch, arg, sizeof(*arg));
> -    if ( arg == NULL )
> -        return -1;
> -
> -    arg->domid = domid;
> -    arg->handle_bufioreq = handle_bufioreq;
> -
> -    rc = xencall2(xch->xcall, __HYPERVISOR_hvm_op,
> -                  HVMOP_create_ioreq_server,
> -                  HYPERCALL_BUFFER_AS_ARG(arg));
> +    rc = do_hvmctl(xch, &hvmctl);
> 
> -    *id = arg->id;
> +    *id = hvmctl.u.create_ioreq_server.id;
> 
> -    xc_hypercall_buffer_free(xch, arg);
>      return rc;
>  }
> 
> @@ -1443,84 +1434,52 @@ int xc_hvm_get_ioreq_server_info(xc_inte
>                                   xen_pfn_t *bufioreq_pfn,
>                                   evtchn_port_t *bufioreq_port)
>  {
> -    DECLARE_HYPERCALL_BUFFER(xen_hvm_get_ioreq_server_info_t, arg);
> +    DECLARE_HVMCTL(get_ioreq_server_info, domid,
> +                   .id = id);
>      int rc;
> 
> -    arg = xc_hypercall_buffer_alloc(xch, arg, sizeof(*arg));
> -    if ( arg == NULL )
> -        return -1;
> -
> -    arg->domid = domid;
> -    arg->id = id;
> -
> -    rc = xencall2(xch->xcall, __HYPERVISOR_hvm_op,
> -                  HVMOP_get_ioreq_server_info,
> -                  HYPERCALL_BUFFER_AS_ARG(arg));
> +    rc = do_hvmctl(xch, &hvmctl);
>      if ( rc != 0 )
> -        goto done;
> +        return rc;
> 
>      if ( ioreq_pfn )
> -        *ioreq_pfn = arg->ioreq_pfn;
> +        *ioreq_pfn = hvmctl.u.get_ioreq_server_info.ioreq_pfn;
> 
>      if ( bufioreq_pfn )
> -        *bufioreq_pfn = arg->bufioreq_pfn;
> +        *bufioreq_pfn = hvmctl.u.get_ioreq_server_info.bufioreq_pfn;
> 
>      if ( bufioreq_port )
> -        *bufioreq_port = arg->bufioreq_port;
> +        *bufioreq_port = hvmctl.u.get_ioreq_server_info.bufioreq_port;
> 
> -done:
> -    xc_hypercall_buffer_free(xch, arg);
> -    return rc;
> +    return 0;
>  }
> 
>  int xc_hvm_map_io_range_to_ioreq_server(xc_interface *xch, domid_t
> domid,
>                                          ioservid_t id, int is_mmio,
>                                          uint64_t start, uint64_t end)
>  {
> -    DECLARE_HYPERCALL_BUFFER(xen_hvm_io_range_t, arg);
> -    int rc;
> -
> -    arg = xc_hypercall_buffer_alloc(xch, arg, sizeof(*arg));
> -    if ( arg == NULL )
> -        return -1;
> -
> -    arg->domid = domid;
> -    arg->id = id;
> -    arg->type = is_mmio ? HVMOP_IO_RANGE_MEMORY :
> HVMOP_IO_RANGE_PORT;
> -    arg->start = start;
> -    arg->end = end;
> -
> -    rc = xencall2(xch->xcall, __HYPERVISOR_hvm_op,
> -                  HVMOP_map_io_range_to_ioreq_server,
> -                  HYPERCALL_BUFFER_AS_ARG(arg));
> +    DECLARE_HVMCTL(map_io_range_to_ioreq_server, domid,
> +                   .id = id,
> +                   .type = is_mmio ? XEN_HVMCTL_IO_RANGE_MEMORY
> +                                   : XEN_HVMCTL_IO_RANGE_PORT,
> +                   .start = start,
> +                   .end = end);
> 
> -    xc_hypercall_buffer_free(xch, arg);
> -    return rc;
> +    return do_hvmctl(xch, &hvmctl);
>  }
> 
>  int xc_hvm_unmap_io_range_from_ioreq_server(xc_interface *xch,
> domid_t domid,
>                                              ioservid_t id, int is_mmio,
>                                              uint64_t start, uint64_t end)
>  {
> -    DECLARE_HYPERCALL_BUFFER(xen_hvm_io_range_t, arg);
> -    int rc;
> -
> -    arg = xc_hypercall_buffer_alloc(xch, arg, sizeof(*arg));
> -    if ( arg == NULL )
> -        return -1;
> +    DECLARE_HVMCTL(unmap_io_range_from_ioreq_server, domid,
> +                   .id = id,
> +                   .type = is_mmio ? XEN_HVMCTL_IO_RANGE_MEMORY
> +                                   : XEN_HVMCTL_IO_RANGE_PORT,
> +                   .start = start,
> +                   .end = end);
> 
> -    arg->domid = domid;
> -    arg->id = id;
> -    arg->type = is_mmio ? HVMOP_IO_RANGE_MEMORY :
> HVMOP_IO_RANGE_PORT;
> -    arg->start = start;
> -    arg->end = end;
> -
> -    rc = xencall2(xch->xcall, __HYPERVISOR_hvm_op,
> -                  HVMOP_unmap_io_range_from_ioreq_server,
> -                  HYPERCALL_BUFFER_AS_ARG(arg));
> -
> -    xc_hypercall_buffer_free(xch, arg);
> -    return rc;
> +    return do_hvmctl(xch, &hvmctl);
>  }
> 
>  int xc_hvm_map_pcidev_to_ioreq_server(xc_interface *xch, domid_t
> domid,
> @@ -1528,37 +1487,23 @@ int xc_hvm_map_pcidev_to_ioreq_server(xc
>                                        uint8_t bus, uint8_t device,
>                                        uint8_t function)
>  {
> -    DECLARE_HYPERCALL_BUFFER(xen_hvm_io_range_t, arg);
> -    int rc;
> +    /*
> +     * The underlying hypercall will deal with ranges of PCI SBDF
> +     * but, for simplicity, the API only uses singletons.
> +     */
> +    uint32_t sbdf = XEN_HVMCTL_PCI_SBDF(segment, bus, device, function);
> +    DECLARE_HVMCTL(map_io_range_to_ioreq_server, domid,
> +                   .id = id,
> +                   .type = XEN_HVMCTL_IO_RANGE_PCI,
> +                   .start = sbdf,
> +                   .end = sbdf);
> 
>      if (device > 0x1f || function > 0x7) {
>          errno = EINVAL;
>          return -1;
>      }
> 
> -    arg = xc_hypercall_buffer_alloc(xch, arg, sizeof(*arg));
> -    if ( arg == NULL )
> -        return -1;
> -
> -    arg->domid = domid;
> -    arg->id = id;
> -    arg->type = HVMOP_IO_RANGE_PCI;
> -
> -    /*
> -     * The underlying hypercall will deal with ranges of PCI SBDF
> -     * but, for simplicity, the API only uses singletons.
> -     */
> -    arg->start = arg->end = HVMOP_PCI_SBDF((uint64_t)segment,
> -                                           (uint64_t)bus,
> -                                           (uint64_t)device,
> -                                           (uint64_t)function);
> -
> -    rc = xencall2(xch->xcall, __HYPERVISOR_hvm_op,
> -                  HVMOP_map_io_range_to_ioreq_server,
> -                  HYPERCALL_BUFFER_AS_ARG(arg));
> -
> -    xc_hypercall_buffer_free(xch, arg);
> -    return rc;
> +    return do_hvmctl(xch, &hvmctl);
>  }
> 
>  int xc_hvm_unmap_pcidev_from_ioreq_server(xc_interface *xch, domid_t
> domid,
> @@ -1566,54 +1511,29 @@ int xc_hvm_unmap_pcidev_from_ioreq_serve
>                                            uint8_t bus, uint8_t device,
>                                            uint8_t function)
>  {
> -    DECLARE_HYPERCALL_BUFFER(xen_hvm_io_range_t, arg);
> -    int rc;
> +    uint32_t sbdf = XEN_HVMCTL_PCI_SBDF(segment, bus, device, function);
> +    DECLARE_HVMCTL(unmap_io_range_from_ioreq_server, domid,
> +                   .id = id,
> +                   .type = XEN_HVMCTL_IO_RANGE_PCI,
> +                   .start = sbdf,
> +                   .end = sbdf);
> 
>      if (device > 0x1f || function > 0x7) {
>          errno = EINVAL;
>          return -1;
>      }
> 
> -    arg = xc_hypercall_buffer_alloc(xch, arg, sizeof(*arg));
> -    if ( arg == NULL )
> -        return -1;
> -
> -    arg->domid = domid;
> -    arg->id = id;
> -    arg->type = HVMOP_IO_RANGE_PCI;
> -    arg->start = arg->end = HVMOP_PCI_SBDF((uint64_t)segment,
> -                                           (uint64_t)bus,
> -                                           (uint64_t)device,
> -                                           (uint64_t)function);
> -
> -    rc = xencall2(xch->xcall, __HYPERVISOR_hvm_op,
> -                  HVMOP_unmap_io_range_from_ioreq_server,
> -                  HYPERCALL_BUFFER_AS_ARG(arg));
> -
> -    xc_hypercall_buffer_free(xch, arg);
> -    return rc;
> +    return do_hvmctl(xch, &hvmctl);
>  }
> 
>  int xc_hvm_destroy_ioreq_server(xc_interface *xch,
>                                  domid_t domid,
>                                  ioservid_t id)
>  {
> -    DECLARE_HYPERCALL_BUFFER(xen_hvm_destroy_ioreq_server_t, arg);
> -    int rc;
> -
> -    arg = xc_hypercall_buffer_alloc(xch, arg, sizeof(*arg));
> -    if ( arg == NULL )
> -        return -1;
> +    DECLARE_HVMCTL(destroy_ioreq_server, domid,
> +                   .id = id);
> 
> -    arg->domid = domid;
> -    arg->id = id;
> -
> -    rc = xencall2(xch->xcall, __HYPERVISOR_hvm_op,
> -                  HVMOP_destroy_ioreq_server,
> -                  HYPERCALL_BUFFER_AS_ARG(arg));
> -
> -    xc_hypercall_buffer_free(xch, arg);
> -    return rc;
> +    return do_hvmctl(xch, &hvmctl);
>  }
> 
>  int xc_hvm_set_ioreq_server_state(xc_interface *xch,
> @@ -1621,23 +1541,11 @@ int xc_hvm_set_ioreq_server_state(xc_int
>                                    ioservid_t id,
>                                    int enabled)
>  {
> -    DECLARE_HYPERCALL_BUFFER(xen_hvm_set_ioreq_server_state_t, arg);
> -    int rc;
> -
> -    arg = xc_hypercall_buffer_alloc(xch, arg, sizeof(*arg));
> -    if ( arg == NULL )
> -        return -1;
> +    DECLARE_HVMCTL(set_ioreq_server_state, domid,
> +                   .id = id,
> +                   .enabled = !!enabled);
> 
> -    arg->domid = domid;
> -    arg->id = id;
> -    arg->enabled = !!enabled;
> -
> -    rc = xencall2(xch->xcall, __HYPERVISOR_hvm_op,
> -                  HVMOP_set_ioreq_server_state,
> -                  HYPERCALL_BUFFER_AS_ARG(arg));
> -
> -    xc_hypercall_buffer_free(xch, arg);
> -    return rc;
> +    return do_hvmctl(xch, &hvmctl);
>  }
> 
>  int xc_domain_setdebugging(xc_interface *xch,
> --- a/tools/libxc/xc_private.h
> +++ b/tools/libxc/xc_private.h
> @@ -34,8 +34,6 @@
>  #define XC_INTERNAL_COMPAT_MAP_FOREIGN_API
>  #include "xenctrl.h"
> 
> -#include <xen/hvm/control.h>
> -
>  #include <xencall.h>
>  #include <xenforeignmemory.h>
> 
> --- a/xen/arch/x86/hvm/control.c
> +++ b/xen/arch/x86/hvm/control.c
> @@ -20,6 +20,7 @@
>  #include <xen/sched.h>
>  #include <asm/hap.h>
>  #include <asm/shadow.h>
> +#include <asm/hvm/ioreq.h>
>  #include <xsm/xsm.h>
> 
>  static int set_pci_intx_level(struct domain *d,
> @@ -299,6 +300,50 @@ long do_hvmctl(XEN_GUEST_HANDLE_PARAM(xe
>          rc = hvm_inject_msi(d, op.u.inject_msi.addr, op.u.inject_msi.data);
>          break;
> 
> +    case XEN_HVMCTL_create_ioreq_server:
> +        rc = -EINVAL;
> +        if ( op.u.create_ioreq_server.rsvd )
> +            break;
> +        rc = hvm_create_ioreq_server(d, current->domain->domain_id, 0,
> +                                     op.u.create_ioreq_server.handle_bufioreq,
> +                                     &op.u.create_ioreq_server.id);
> +        if ( rc == 0 && copy_field_to_guest(u_hvmctl, &op,
> +                                            u.create_ioreq_server.id) )
> +            rc = -EFAULT;
> +        break;
> +
> +    case XEN_HVMCTL_get_ioreq_server_info:
> +        rc = -EINVAL;
> +        if ( op.u.get_ioreq_server_info.rsvd )
> +            break;
> +        rc = hvm_get_ioreq_server_info(d, &op.u.get_ioreq_server_info);
> +        if ( rc == 0 && copy_field_to_guest(u_hvmctl, &op,
> +                                            u.get_ioreq_server_info) )
> +            rc = -EFAULT;
> +        break;
> +
> +    case XEN_HVMCTL_map_io_range_to_ioreq_server:
> +        rc = hvm_map_io_range_to_ioreq_server(
> +                 d, &op.u.map_io_range_to_ioreq_server);
> +        break;
> +
> +    case XEN_HVMCTL_unmap_io_range_from_ioreq_server:
> +        rc = hvm_unmap_io_range_from_ioreq_server(
> +                 d, &op.u.unmap_io_range_from_ioreq_server);
> +        break;
> +
> +    case XEN_HVMCTL_destroy_ioreq_server:
> +        rc = hvm_destroy_ioreq_server(d, op.u.destroy_ioreq_server.id);
> +        break;
> +
> +    case XEN_HVMCTL_set_ioreq_server_state:
> +        rc = -EINVAL;
> +        if ( op.u.set_ioreq_server_state.rsvd )
> +            break;
> +        rc = hvm_set_ioreq_server_state(d, op.u.set_ioreq_server_state.id,
> +                                        !!op.u.set_ioreq_server_state.enabled);
> +        break;
> +
>      default:
>          rc = -EOPNOTSUPP;
>          break;
> --- a/xen/arch/x86/hvm/hvm.c
> +++ b/xen/arch/x86/hvm/hvm.c
> @@ -4487,195 +4487,6 @@ static int hvmop_flush_tlb_all(void)
>      return 0;
>  }
> 
> -static int hvmop_create_ioreq_server(
> -    XEN_GUEST_HANDLE_PARAM(xen_hvm_create_ioreq_server_t) uop)
> -{
> -    struct domain *curr_d = current->domain;
> -    xen_hvm_create_ioreq_server_t op;
> -    struct domain *d;
> -    int rc;
> -
> -    if ( copy_from_guest(&op, uop, 1) )
> -        return -EFAULT;
> -
> -    rc = rcu_lock_remote_domain_by_id(op.domid, &d);
> -    if ( rc != 0 )
> -        return rc;
> -
> -    rc = -EINVAL;
> -    if ( !is_hvm_domain(d) )
> -        goto out;
> -
> -    rc = xsm_hvm_ioreq_server(XSM_DM_PRIV, d,
> HVMOP_create_ioreq_server);
> -    if ( rc != 0 )
> -        goto out;
> -
> -    rc = hvm_create_ioreq_server(d, curr_d->domain_id, 0,
> -                                 op.handle_bufioreq, &op.id);
> -    if ( rc != 0 )
> -        goto out;
> -
> -    rc = copy_to_guest(uop, &op, 1) ? -EFAULT : 0;
> -
> - out:
> -    rcu_unlock_domain(d);
> -    return rc;
> -}
> -
> -static int hvmop_get_ioreq_server_info(
> -    XEN_GUEST_HANDLE_PARAM(xen_hvm_get_ioreq_server_info_t) uop)
> -{
> -    xen_hvm_get_ioreq_server_info_t op;
> -    struct domain *d;
> -    int rc;
> -
> -    if ( copy_from_guest(&op, uop, 1) )
> -        return -EFAULT;
> -
> -    rc = rcu_lock_remote_domain_by_id(op.domid, &d);
> -    if ( rc != 0 )
> -        return rc;
> -
> -    rc = -EINVAL;
> -    if ( !is_hvm_domain(d) )
> -        goto out;
> -
> -    rc = xsm_hvm_ioreq_server(XSM_DM_PRIV, d,
> HVMOP_get_ioreq_server_info);
> -    if ( rc != 0 )
> -        goto out;
> -
> -    rc = hvm_get_ioreq_server_info(d, op.id,
> -                                   &op.ioreq_pfn,
> -                                   &op.bufioreq_pfn,
> -                                   &op.bufioreq_port);
> -    if ( rc != 0 )
> -        goto out;
> -
> -    rc = copy_to_guest(uop, &op, 1) ? -EFAULT : 0;
> -
> - out:
> -    rcu_unlock_domain(d);
> -    return rc;
> -}
> -
> -static int hvmop_map_io_range_to_ioreq_server(
> -    XEN_GUEST_HANDLE_PARAM(xen_hvm_io_range_t) uop)
> -{
> -    xen_hvm_io_range_t op;
> -    struct domain *d;
> -    int rc;
> -
> -    if ( copy_from_guest(&op, uop, 1) )
> -        return -EFAULT;
> -
> -    rc = rcu_lock_remote_domain_by_id(op.domid, &d);
> -    if ( rc != 0 )
> -        return rc;
> -
> -    rc = -EINVAL;
> -    if ( !is_hvm_domain(d) )
> -        goto out;
> -
> -    rc = xsm_hvm_ioreq_server(XSM_DM_PRIV, d,
> HVMOP_map_io_range_to_ioreq_server);
> -    if ( rc != 0 )
> -        goto out;
> -
> -    rc = hvm_map_io_range_to_ioreq_server(d, op.id, op.type,
> -                                          op.start, op.end);
> -
> - out:
> -    rcu_unlock_domain(d);
> -    return rc;
> -}
> -
> -static int hvmop_unmap_io_range_from_ioreq_server(
> -    XEN_GUEST_HANDLE_PARAM(xen_hvm_io_range_t) uop)
> -{
> -    xen_hvm_io_range_t op;
> -    struct domain *d;
> -    int rc;
> -
> -    if ( copy_from_guest(&op, uop, 1) )
> -        return -EFAULT;
> -
> -    rc = rcu_lock_remote_domain_by_id(op.domid, &d);
> -    if ( rc != 0 )
> -        return rc;
> -
> -    rc = -EINVAL;
> -    if ( !is_hvm_domain(d) )
> -        goto out;
> -
> -    rc = xsm_hvm_ioreq_server(XSM_DM_PRIV, d,
> HVMOP_unmap_io_range_from_ioreq_server);
> -    if ( rc != 0 )
> -        goto out;
> -
> -    rc = hvm_unmap_io_range_from_ioreq_server(d, op.id, op.type,
> -                                              op.start, op.end);
> -
> - out:
> -    rcu_unlock_domain(d);
> -    return rc;
> -}
> -
> -static int hvmop_set_ioreq_server_state(
> -    XEN_GUEST_HANDLE_PARAM(xen_hvm_set_ioreq_server_state_t) uop)
> -{
> -    xen_hvm_set_ioreq_server_state_t op;
> -    struct domain *d;
> -    int rc;
> -
> -    if ( copy_from_guest(&op, uop, 1) )
> -        return -EFAULT;
> -
> -    rc = rcu_lock_remote_domain_by_id(op.domid, &d);
> -    if ( rc != 0 )
> -        return rc;
> -
> -    rc = -EINVAL;
> -    if ( !is_hvm_domain(d) )
> -        goto out;
> -
> -    rc = xsm_hvm_ioreq_server(XSM_DM_PRIV, d,
> HVMOP_set_ioreq_server_state);
> -    if ( rc != 0 )
> -        goto out;
> -
> -    rc = hvm_set_ioreq_server_state(d, op.id, !!op.enabled);
> -
> - out:
> -    rcu_unlock_domain(d);
> -    return rc;
> -}
> -
> -static int hvmop_destroy_ioreq_server(
> -    XEN_GUEST_HANDLE_PARAM(xen_hvm_destroy_ioreq_server_t) uop)
> -{
> -    xen_hvm_destroy_ioreq_server_t op;
> -    struct domain *d;
> -    int rc;
> -
> -    if ( copy_from_guest(&op, uop, 1) )
> -        return -EFAULT;
> -
> -    rc = rcu_lock_remote_domain_by_id(op.domid, &d);
> -    if ( rc != 0 )
> -        return rc;
> -
> -    rc = -EINVAL;
> -    if ( !is_hvm_domain(d) )
> -        goto out;
> -
> -    rc = xsm_hvm_ioreq_server(XSM_DM_PRIV, d,
> HVMOP_destroy_ioreq_server);
> -    if ( rc != 0 )
> -        goto out;
> -
> -    rc = hvm_destroy_ioreq_server(d, op.id);
> -
> - out:
> -    rcu_unlock_domain(d);
> -    return rc;
> -}
> -
>  static int hvmop_set_evtchn_upcall_vector(
>      XEN_GUEST_HANDLE_PARAM(xen_hvm_evtchn_upcall_vector_t) uop)
>  {
> @@ -5192,36 +5003,6 @@ long do_hvm_op(unsigned long op, XEN_GUE
> 
>      switch ( op )
>      {
> -    case HVMOP_create_ioreq_server:
> -        rc = hvmop_create_ioreq_server(
> -            guest_handle_cast(arg, xen_hvm_create_ioreq_server_t));
> -        break;
> -
> -    case HVMOP_get_ioreq_server_info:
> -        rc = hvmop_get_ioreq_server_info(
> -            guest_handle_cast(arg, xen_hvm_get_ioreq_server_info_t));
> -        break;
> -
> -    case HVMOP_map_io_range_to_ioreq_server:
> -        rc = hvmop_map_io_range_to_ioreq_server(
> -            guest_handle_cast(arg, xen_hvm_io_range_t));
> -        break;
> -
> -    case HVMOP_unmap_io_range_from_ioreq_server:
> -        rc = hvmop_unmap_io_range_from_ioreq_server(
> -            guest_handle_cast(arg, xen_hvm_io_range_t));
> -        break;
> -
> -    case HVMOP_set_ioreq_server_state:
> -        rc = hvmop_set_ioreq_server_state(
> -            guest_handle_cast(arg, xen_hvm_set_ioreq_server_state_t));
> -        break;
> -
> -    case HVMOP_destroy_ioreq_server:
> -        rc = hvmop_destroy_ioreq_server(
> -            guest_handle_cast(arg, xen_hvm_destroy_ioreq_server_t));
> -        break;
> -
>      case HVMOP_set_evtchn_upcall_vector:
>          rc = hvmop_set_evtchn_upcall_vector(
>              guest_handle_cast(arg, xen_hvm_evtchn_upcall_vector_t));
> --- a/xen/arch/x86/hvm/ioreq.c
> +++ b/xen/arch/x86/hvm/ioreq.c
> @@ -513,9 +513,9 @@ static int hvm_ioreq_server_alloc_ranges
>          char *name;
> 
>          rc = asprintf(&name, "ioreq_server %d %s", s->id,
> -                      (i == HVMOP_IO_RANGE_PORT) ? "port" :
> -                      (i == HVMOP_IO_RANGE_MEMORY) ? "memory" :
> -                      (i == HVMOP_IO_RANGE_PCI) ? "pci" :
> +                      (i == XEN_HVMCTL_IO_RANGE_PORT) ? "port" :
> +                      (i == XEN_HVMCTL_IO_RANGE_MEMORY) ? "memory" :
> +                      (i == XEN_HVMCTL_IO_RANGE_PCI) ? "pci" :
>                        "");
>          if ( rc )
>              goto fail;
> @@ -686,7 +686,8 @@ int hvm_create_ioreq_server(struct domai
>      struct hvm_ioreq_server *s;
>      int rc;
> 
> -    if ( bufioreq_handling > HVM_IOREQSRV_BUFIOREQ_ATOMIC )
> +    if ( !is_hvm_domain(d) ||
> +         bufioreq_handling > HVM_IOREQSRV_BUFIOREQ_ATOMIC )
>          return -EINVAL;
> 
>      rc = -ENOMEM;
> @@ -738,6 +739,9 @@ int hvm_destroy_ioreq_server(struct doma
>      struct hvm_ioreq_server *s;
>      int rc;
> 
> +    if ( !is_hvm_domain(d) )
> +        return -EINVAL;
> +
>      spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
> 
>      rc = -ENOENT;
> @@ -772,14 +776,15 @@ int hvm_destroy_ioreq_server(struct doma
>      return rc;
>  }
> 
> -int hvm_get_ioreq_server_info(struct domain *d, ioservid_t id,
> -                              unsigned long *ioreq_pfn,
> -                              unsigned long *bufioreq_pfn,
> -                              evtchn_port_t *bufioreq_port)
> +int hvm_get_ioreq_server_info(struct domain *d,
> +                              struct xen_hvm_get_ioreq_server_info *info)
>  {
>      struct hvm_ioreq_server *s;
>      int rc;
> 
> +    if ( !is_hvm_domain(d) )
> +        return -EINVAL;
> +
>      spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
> 
>      rc = -ENOENT;
> @@ -790,15 +795,15 @@ int hvm_get_ioreq_server_info(struct dom
>          if ( s == d->arch.hvm_domain.default_ioreq_server )
>              continue;
> 
> -        if ( s->id != id )
> +        if ( s->id != info->id )
>              continue;
> 
> -        *ioreq_pfn = s->ioreq.gmfn;
> +        info->ioreq_pfn = s->ioreq.gmfn;
> 
>          if ( s->bufioreq.va != NULL )
>          {
> -            *bufioreq_pfn = s->bufioreq.gmfn;
> -            *bufioreq_port = s->bufioreq_evtchn;
> +            info->bufioreq_pfn = s->bufioreq.gmfn;
> +            info->bufioreq_port = s->bufioreq_evtchn;
>          }
> 
>          rc = 0;
> @@ -810,13 +815,15 @@ int hvm_get_ioreq_server_info(struct dom
>      return rc;
>  }
> 
> -int hvm_map_io_range_to_ioreq_server(struct domain *d, ioservid_t id,
> -                                     uint32_t type, uint64_t start,
> -                                     uint64_t end)
> +int hvm_map_io_range_to_ioreq_server(struct domain *d,
> +                                     const struct xen_hvm_io_range *ior)
>  {
>      struct hvm_ioreq_server *s;
>      int rc;
> 
> +    if ( ior->rsvd || !is_hvm_domain(d) )
> +        return -EINVAL;
> +
>      spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
> 
>      rc = -ENOENT;
> @@ -827,16 +834,16 @@ int hvm_map_io_range_to_ioreq_server(str
>          if ( s == d->arch.hvm_domain.default_ioreq_server )
>              continue;
> 
> -        if ( s->id == id )
> +        if ( s->id == ior->id )
>          {
>              struct rangeset *r;
> 
> -            switch ( type )
> +            switch ( ior->type )
>              {
> -            case HVMOP_IO_RANGE_PORT:
> -            case HVMOP_IO_RANGE_MEMORY:
> -            case HVMOP_IO_RANGE_PCI:
> -                r = s->range[type];
> +            case XEN_HVMCTL_IO_RANGE_PORT:
> +            case XEN_HVMCTL_IO_RANGE_MEMORY:
> +            case XEN_HVMCTL_IO_RANGE_PCI:
> +                r = s->range[ior->type];
>                  break;
> 
>              default:
> @@ -849,10 +856,10 @@ int hvm_map_io_range_to_ioreq_server(str
>                  break;
> 
>              rc = -EEXIST;
> -            if ( rangeset_overlaps_range(r, start, end) )
> +            if ( rangeset_overlaps_range(r, ior->start, ior->end) )
>                  break;
> 
> -            rc = rangeset_add_range(r, start, end);
> +            rc = rangeset_add_range(r, ior->start, ior->end);
>              break;
>          }
>      }
> @@ -862,13 +869,15 @@ int hvm_map_io_range_to_ioreq_server(str
>      return rc;
>  }
> 
> -int hvm_unmap_io_range_from_ioreq_server(struct domain *d, ioservid_t
> id,
> -                                         uint32_t type, uint64_t start,
> -                                         uint64_t end)
> +int hvm_unmap_io_range_from_ioreq_server(struct domain *d,
> +                                         const struct xen_hvm_io_range *ior)
>  {
>      struct hvm_ioreq_server *s;
>      int rc;
> 
> +    if ( ior->rsvd || !is_hvm_domain(d) )
> +        return -EINVAL;
> +
>      spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
> 
>      rc = -ENOENT;
> @@ -879,16 +888,16 @@ int hvm_unmap_io_range_from_ioreq_server
>          if ( s == d->arch.hvm_domain.default_ioreq_server )
>              continue;
> 
> -        if ( s->id == id )
> +        if ( s->id == ior->id )
>          {
>              struct rangeset *r;
> 
> -            switch ( type )
> +            switch ( ior->type )
>              {
> -            case HVMOP_IO_RANGE_PORT:
> -            case HVMOP_IO_RANGE_MEMORY:
> -            case HVMOP_IO_RANGE_PCI:
> -                r = s->range[type];
> +            case XEN_HVMCTL_IO_RANGE_PORT:
> +            case XEN_HVMCTL_IO_RANGE_MEMORY:
> +            case XEN_HVMCTL_IO_RANGE_PCI:
> +                r = s->range[ior->type];
>                  break;
> 
>              default:
> @@ -901,10 +910,10 @@ int hvm_unmap_io_range_from_ioreq_server
>                  break;
> 
>              rc = -ENOENT;
> -            if ( !rangeset_contains_range(r, start, end) )
> +            if ( !rangeset_contains_range(r, ior->start, ior->end) )
>                  break;
> 
> -            rc = rangeset_remove_range(r, start, end);
> +            rc = rangeset_remove_range(r, ior->start, ior->end);
>              break;
>          }
>      }
> @@ -920,6 +929,9 @@ int hvm_set_ioreq_server_state(struct do
>      struct list_head *entry;
>      int rc;
> 
> +    if ( !is_hvm_domain(d) )
> +        return -EINVAL;
> +
>      spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
> 
>      rc = -ENOENT;
> @@ -1128,12 +1140,12 @@ struct hvm_ioreq_server *hvm_select_iore
> 
>          /* PCI config data cycle */
> 
> -        sbdf = HVMOP_PCI_SBDF(0,
> -                              PCI_BUS(CF8_BDF(cf8)),
> -                              PCI_SLOT(CF8_BDF(cf8)),
> -                              PCI_FUNC(CF8_BDF(cf8)));
> +        sbdf = XEN_HVMCTL_PCI_SBDF(0,
> +                                   PCI_BUS(CF8_BDF(cf8)),
> +                                   PCI_SLOT(CF8_BDF(cf8)),
> +                                   PCI_FUNC(CF8_BDF(cf8)));
> 
> -        type = HVMOP_IO_RANGE_PCI;
> +        type = XEN_HVMCTL_IO_RANGE_PCI;
>          addr = ((uint64_t)sbdf << 32) |
>                 CF8_ADDR_LO(cf8) |
>                 (p->addr & 3);
> @@ -1152,7 +1164,7 @@ struct hvm_ioreq_server *hvm_select_iore
>      else
>      {
>          type = (p->type == IOREQ_TYPE_PIO) ?
> -                HVMOP_IO_RANGE_PORT : HVMOP_IO_RANGE_MEMORY;
> +                XEN_HVMCTL_IO_RANGE_PORT :
> XEN_HVMCTL_IO_RANGE_MEMORY;
>          addr = p->addr;
>      }
> 
> @@ -1174,19 +1186,19 @@ struct hvm_ioreq_server *hvm_select_iore
>          {
>              unsigned long end;
> 
> -        case HVMOP_IO_RANGE_PORT:
> +        case XEN_HVMCTL_IO_RANGE_PORT:
>              end = addr + p->size - 1;
>              if ( rangeset_contains_range(r, addr, end) )
>                  return s;
> 
>              break;
> -        case HVMOP_IO_RANGE_MEMORY:
> +        case XEN_HVMCTL_IO_RANGE_MEMORY:
>              end = addr + (p->size * p->count) - 1;
>              if ( rangeset_contains_range(r, addr, end) )
>                  return s;
> 
>              break;
> -        case HVMOP_IO_RANGE_PCI:
> +        case XEN_HVMCTL_IO_RANGE_PCI:
>              if ( rangeset_contains_singleton(r, addr >> 32) )
>              {
>                  p->type = IOREQ_TYPE_PCI_CONFIG;
> --- a/xen/include/asm-x86/hvm/domain.h
> +++ b/xen/include/asm-x86/hvm/domain.h
> @@ -30,6 +30,7 @@
>  #include <asm/hvm/vmx/vmcs.h>
>  #include <asm/hvm/svm/vmcb.h>
>  #include <public/grant_table.h>
> +#include <public/hvm/control.h>
>  #include <public/hvm/params.h>
>  #include <public/hvm/save.h>
>  #include <public/hvm/hvm_op.h>
> @@ -47,7 +48,7 @@ struct hvm_ioreq_vcpu {
>      bool_t           pending;
>  };
> 
> -#define NR_IO_RANGE_TYPES (HVMOP_IO_RANGE_PCI + 1)
> +#define NR_IO_RANGE_TYPES (XEN_HVMCTL_IO_RANGE_PCI + 1)
>  #define MAX_NR_IO_RANGES  256
> 
>  struct hvm_ioreq_server {
> --- a/xen/include/asm-x86/hvm/ioreq.h
> +++ b/xen/include/asm-x86/hvm/ioreq.h
> @@ -19,6 +19,8 @@
>  #ifndef __ASM_X86_HVM_IOREQ_H__
>  #define __ASM_X86_HVM_IOREQ_H__
> 
> +#include <public/hvm/control.h>
> +
>  bool_t hvm_io_pending(struct vcpu *v);
>  bool_t handle_hvm_io_completion(struct vcpu *v);
>  bool_t is_ioreq_server_page(struct domain *d, const struct page_info
> *page);
> @@ -27,16 +29,12 @@ int hvm_create_ioreq_server(struct domai
>                              bool_t is_default, int bufioreq_handling,
>                              ioservid_t *id);
>  int hvm_destroy_ioreq_server(struct domain *d, ioservid_t id);
> -int hvm_get_ioreq_server_info(struct domain *d, ioservid_t id,
> -                              unsigned long *ioreq_pfn,
> -                              unsigned long *bufioreq_pfn,
> -                              evtchn_port_t *bufioreq_port);
> -int hvm_map_io_range_to_ioreq_server(struct domain *d, ioservid_t id,
> -                                     uint32_t type, uint64_t start,
> -                                     uint64_t end);
> -int hvm_unmap_io_range_from_ioreq_server(struct domain *d, ioservid_t
> id,
> -                                         uint32_t type, uint64_t start,
> -                                         uint64_t end);
> +int hvm_get_ioreq_server_info(struct domain *d,
> +                              struct xen_hvm_get_ioreq_server_info *info);
> +int hvm_map_io_range_to_ioreq_server(struct domain *d,
> +                                     const struct xen_hvm_io_range *r);
> +int hvm_unmap_io_range_from_ioreq_server(struct domain *d,
> +                                         const struct xen_hvm_io_range *r);
>  int hvm_set_ioreq_server_state(struct domain *d, ioservid_t id,
>                                 bool_t enabled);
> 
> --- a/xen/include/public/hvm/control.h
> +++ b/xen/include/public/hvm/control.h
> @@ -26,6 +26,7 @@
>  #endif
> 
>  #include "../xen.h"
> +#include "../event_channel.h"
> 
>  #define XEN_HVMCTL_INTERFACE_VERSION 0x00000001
> 
> @@ -130,6 +131,131 @@ struct xen_hvm_inject_msi {
>      uint64_t  addr;
>  };
> 
> +/*
> + * IOREQ Servers
> + *
> + * The interface between an I/O emulator an Xen is called an IOREQ Server.
> + * A domain supports a single 'legacy' IOREQ Server which is instantiated if
> + * parameter...
> + *
> + * HVM_PARAM_IOREQ_PFN is read (to get the gmfn containing the
> synchronous
> + * ioreq structures), or...
> + * HVM_PARAM_BUFIOREQ_PFN is read (to get the gmfn containing the
> buffered
> + * ioreq ring), or...
> + * HVM_PARAM_BUFIOREQ_EVTCHN is read (to get the event channel that
> Xen uses
> + * to request buffered I/O emulation).
> + *
> + * The following hypercalls facilitate the creation of IOREQ Servers for
> + * 'secondary' emulators which are invoked to implement port I/O,
> memory, or
> + * PCI config space ranges which they explicitly register.
> + */
> +
> +typedef uint16_t ioservid_t;
> +
> +/*
> + * XEN_HVMCTL_create_ioreq_server: Instantiate a new IOREQ Server for a
> + *                                 secondary emulator servicing domain
> + *                                 <domid>.
> + *
> + * The <id> handed back is unique for <domid>. If <handle_bufioreq> is
> zero
> + * the buffered ioreq ring will not be allocated and hence all emulation
> + * requestes to this server will be synchronous.
> + */
> +struct xen_hvm_create_ioreq_server {
> +#define HVM_IOREQSRV_BUFIOREQ_OFF    0
> +#define HVM_IOREQSRV_BUFIOREQ_LEGACY 1
> +/*
> + * Use this when read_pointer gets updated atomically and
> + * the pointer pair gets read atomically:
> + */
> +#define HVM_IOREQSRV_BUFIOREQ_ATOMIC 2
> +    uint8_t handle_bufioreq; /* IN - should server handle buffered ioreqs */
> +    uint8_t rsvd;            /* IN - must be zero */
> +    ioservid_t id;           /* OUT - server id */
> +};
> +
> +/*
> + * XEN_HVMCTL_get_ioreq_server_info: Get all the information necessary
> to
> + *                                   access IOREQ Server <id>.
> + *
> + * The emulator needs to map the synchronous ioreq structures and
> buffered
> + * ioreq ring (if it exists) that Xen uses to request emulation. These are
> + * hosted in domain <domid>'s gmfns <ioreq_pfn> and <bufioreq_pfn>
> + * respectively. In addition, if the IOREQ Server is handling buffered
> + * emulation requests, the emulator needs to bind to event channel
> + * <bufioreq_port> to listen for them. (The event channels used for
> + * synchronous emulation requests are specified in the per-CPU ioreq
> + * structures in <ioreq_pfn>).
> + * If the IOREQ Server is not handling buffered emulation requests then the
> + * values handed back in <bufioreq_pfn> and <bufioreq_port> will both be
> 0.
> + */
> +struct xen_hvm_get_ioreq_server_info {
> +    ioservid_t id;                 /* IN - server id */
> +    uint16_t rsvd;                 /* IN - must be zero */
> +    evtchn_port_t bufioreq_port;   /* OUT - buffered ioreq port */
> +    uint64_aligned_t ioreq_pfn;    /* OUT - sync ioreq pfn */
> +    uint64_aligned_t bufioreq_pfn; /* OUT - buffered ioreq pfn */
> +};
> +
> +/*
> + * XEN_HVMCTL_map_io_range_to_ioreq_server: Register an I/O range of
> domain
> + *                                          <domid> for emulation by the
> + *                                          client of IOREQ Server <id>
> + * XEN_HVMCTL_unmap_io_range_from_ioreq_server: Deregister an I/O
> range of
> + *                                              <domid> for emulation by the
> + *                                              client of IOREQ Server <id>
> + *
> + * There are three types of I/O that can be emulated: port I/O, memory
> accesses
> + * and PCI config space accesses. The <type> field denotes which type of
> range
> + * the <start> and <end> (inclusive) fields are specifying.
> + * PCI config space ranges are specified by segment/bus/device/function
> values
> + * which should be encoded using the XEN_HVMCTL_PCI_SBDF helper
> macro below.
> + *
> + * NOTE: unless an emulation request falls entirely within a range mapped
> + * by a secondary emulator, it will not be passed to that emulator.
> + */
> +struct xen_hvm_io_range {
> +    ioservid_t id;               /* IN - server id */
> +    uint16_t type;               /* IN - type of range */
> +    uint32_t rsvd;               /* IN - must be zero */
> +#define XEN_HVMCTL_IO_RANGE_PORT   0 /* I/O port range */
> +#define XEN_HVMCTL_IO_RANGE_MEMORY 1 /* MMIO range */
> +#define XEN_HVMCTL_IO_RANGE_PCI    2 /* PCI segment/bus/dev/func
> range */
> +    uint64_aligned_t start, end; /* IN - inclusive start and end of range */
> +};
> +
> +#define XEN_HVMCTL_PCI_SBDF(s, b, d, f) \
> +	((((s) & 0xffff) << 16) | \
> +	 (((b) & 0xff) << 8) | \
> +	 (((d) & 0x1f) << 3) | \
> +	 ((f) & 0x07))
> +
> +/*
> + * XEN_HVMCTL_destroy_ioreq_server: Destroy the IOREQ Server <id>
> servicing
> + *                                  domain <domid>.
> + *
> + * Any registered I/O ranges will be automatically deregistered.
> + */
> +struct xen_hvm_destroy_ioreq_server {
> +    ioservid_t id; /* IN - server id */
> +};
> +
> +/*
> + * XEN_HVMCTL_set_ioreq_server_state: Enable or disable the IOREQ
> Server <id>
> + *                                    servicing domain <domid>.
> + *
> + * The IOREQ Server will not be passed any emulation requests until it is in
> + * the enabled state.
> + * Note that the contents of the ioreq_pfn and bufioreq_fn (see
> + * XEN_HVMCTL_get_ioreq_server_info) are not meaningful until the
> IOREQ Server
> + * is in the enabled state.
> + */
> +struct xen_hvm_set_ioreq_server_state {
> +    ioservid_t id;   /* IN - server id */
> +    uint8_t enabled; /* IN - enabled? */
> +    uint8_t rsvd;    /* IN - must be zero */
> +};
> +
>  struct xen_hvmctl {
>      uint16_t interface_version;    /* XEN_HVMCTL_INTERFACE_VERSION */
>      domid_t domain;
> @@ -142,6 +268,12 @@ struct xen_hvmctl {
>  #define XEN_HVMCTL_set_mem_type                  6
>  #define XEN_HVMCTL_inject_trap                   7
>  #define XEN_HVMCTL_inject_msi                    8
> +#define XEN_HVMCTL_create_ioreq_server           9
> +#define XEN_HVMCTL_get_ioreq_server_info        10
> +#define XEN_HVMCTL_map_io_range_to_ioreq_server 11
> +#define XEN_HVMCTL_unmap_io_range_from_ioreq_server 12
> +#define XEN_HVMCTL_destroy_ioreq_server         13
> +#define XEN_HVMCTL_set_ioreq_server_state       14
>      uint16_t opaque;               /* Must be zero on initial invocation. */
>      union {
>          struct xen_hvm_set_pci_intx_level set_pci_intx_level;
> @@ -152,6 +284,12 @@ struct xen_hvmctl {
>          struct xen_hvm_set_mem_type set_mem_type;
>          struct xen_hvm_inject_trap inject_trap;
>          struct xen_hvm_inject_msi inject_msi;
> +        struct xen_hvm_create_ioreq_server create_ioreq_server;
> +        struct xen_hvm_get_ioreq_server_info get_ioreq_server_info;
> +        struct xen_hvm_io_range map_io_range_to_ioreq_server;
> +        struct xen_hvm_io_range unmap_io_range_from_ioreq_server;
> +        struct xen_hvm_destroy_ioreq_server destroy_ioreq_server;
> +        struct xen_hvm_set_ioreq_server_state set_ioreq_server_state;
>          uint8_t pad[120];
>      } u;
>  };
> --- a/xen/include/public/hvm/hvm_op.h
> +++ b/xen/include/public/hvm/hvm_op.h
> @@ -25,7 +25,6 @@
> 
>  #include "../xen.h"
>  #include "../trace.h"
> -#include "../event_channel.h"
> 
>  /* Get/set subcommands: extra argument == pointer to xen_hvm_param
> struct. */
>  #define HVMOP_set_param           0
> @@ -137,152 +136,6 @@ struct xen_hvm_get_mem_type {
>  typedef struct xen_hvm_get_mem_type xen_hvm_get_mem_type_t;
>  DEFINE_XEN_GUEST_HANDLE(xen_hvm_get_mem_type_t);
> 
> -/* Following tools-only interfaces may change in future. */
> -#if defined(__XEN__) || defined(__XEN_TOOLS__)
> -
> -/*
> - * IOREQ Servers
> - *
> - * The interface between an I/O emulator an Xen is called an IOREQ Server.
> - * A domain supports a single 'legacy' IOREQ Server which is instantiated if
> - * parameter...
> - *
> - * HVM_PARAM_IOREQ_PFN is read (to get the gmfn containing the
> synchronous
> - * ioreq structures), or...
> - * HVM_PARAM_BUFIOREQ_PFN is read (to get the gmfn containing the
> buffered
> - * ioreq ring), or...
> - * HVM_PARAM_BUFIOREQ_EVTCHN is read (to get the event channel that
> Xen uses
> - * to request buffered I/O emulation).
> - *
> - * The following hypercalls facilitate the creation of IOREQ Servers for
> - * 'secondary' emulators which are invoked to implement port I/O, memory,
> or
> - * PCI config space ranges which they explicitly register.
> - */
> -
> -typedef uint16_t ioservid_t;
> -
> -/*
> - * HVMOP_create_ioreq_server: Instantiate a new IOREQ Server for a
> secondary
> - *                            emulator servicing domain <domid>.
> - *
> - * The <id> handed back is unique for <domid>. If <handle_bufioreq> is zero
> - * the buffered ioreq ring will not be allocated and hence all emulation
> - * requestes to this server will be synchronous.
> - */
> -#define HVMOP_create_ioreq_server 17
> -struct xen_hvm_create_ioreq_server {
> -    domid_t domid;           /* IN - domain to be serviced */
> -#define HVM_IOREQSRV_BUFIOREQ_OFF    0
> -#define HVM_IOREQSRV_BUFIOREQ_LEGACY 1
> -/*
> - * Use this when read_pointer gets updated atomically and
> - * the pointer pair gets read atomically:
> - */
> -#define HVM_IOREQSRV_BUFIOREQ_ATOMIC 2
> -    uint8_t handle_bufioreq; /* IN - should server handle buffered ioreqs */
> -    ioservid_t id;           /* OUT - server id */
> -};
> -typedef struct xen_hvm_create_ioreq_server
> xen_hvm_create_ioreq_server_t;
> -DEFINE_XEN_GUEST_HANDLE(xen_hvm_create_ioreq_server_t);
> -
> -/*
> - * HVMOP_get_ioreq_server_info: Get all the information necessary to
> access
> - *                              IOREQ Server <id>.
> - *
> - * The emulator needs to map the synchronous ioreq structures and
> buffered
> - * ioreq ring (if it exists) that Xen uses to request emulation. These are
> - * hosted in domain <domid>'s gmfns <ioreq_pfn> and <bufioreq_pfn>
> - * respectively. In addition, if the IOREQ Server is handling buffered
> - * emulation requests, the emulator needs to bind to event channel
> - * <bufioreq_port> to listen for them. (The event channels used for
> - * synchronous emulation requests are specified in the per-CPU ioreq
> - * structures in <ioreq_pfn>).
> - * If the IOREQ Server is not handling buffered emulation requests then the
> - * values handed back in <bufioreq_pfn> and <bufioreq_port> will both be
> 0.
> - */
> -#define HVMOP_get_ioreq_server_info 18
> -struct xen_hvm_get_ioreq_server_info {
> -    domid_t domid;                 /* IN - domain to be serviced */
> -    ioservid_t id;                 /* IN - server id */
> -    evtchn_port_t bufioreq_port;   /* OUT - buffered ioreq port */
> -    uint64_aligned_t ioreq_pfn;    /* OUT - sync ioreq pfn */
> -    uint64_aligned_t bufioreq_pfn; /* OUT - buffered ioreq pfn */
> -};
> -typedef struct xen_hvm_get_ioreq_server_info
> xen_hvm_get_ioreq_server_info_t;
> -DEFINE_XEN_GUEST_HANDLE(xen_hvm_get_ioreq_server_info_t);
> -
> -/*
> - * HVM_map_io_range_to_ioreq_server: Register an I/O range of domain
> <domid>
> - *                                   for emulation by the client of IOREQ
> - *                                   Server <id>
> - * HVM_unmap_io_range_from_ioreq_server: Deregister an I/O range of
> <domid>
> - *                                       for emulation by the client of IOREQ
> - *                                       Server <id>
> - *
> - * There are three types of I/O that can be emulated: port I/O, memory
> accesses
> - * and PCI config space accesses. The <type> field denotes which type of
> range
> - * the <start> and <end> (inclusive) fields are specifying.
> - * PCI config space ranges are specified by segment/bus/device/function
> values
> - * which should be encoded using the HVMOP_PCI_SBDF helper macro
> below.
> - *
> - * NOTE: unless an emulation request falls entirely within a range mapped
> - * by a secondary emulator, it will not be passed to that emulator.
> - */
> -#define HVMOP_map_io_range_to_ioreq_server 19
> -#define HVMOP_unmap_io_range_from_ioreq_server 20
> -struct xen_hvm_io_range {
> -    domid_t domid;               /* IN - domain to be serviced */
> -    ioservid_t id;               /* IN - server id */
> -    uint32_t type;               /* IN - type of range */
> -# define HVMOP_IO_RANGE_PORT   0 /* I/O port range */
> -# define HVMOP_IO_RANGE_MEMORY 1 /* MMIO range */
> -# define HVMOP_IO_RANGE_PCI    2 /* PCI segment/bus/dev/func range
> */
> -    uint64_aligned_t start, end; /* IN - inclusive start and end of range */
> -};
> -typedef struct xen_hvm_io_range xen_hvm_io_range_t;
> -DEFINE_XEN_GUEST_HANDLE(xen_hvm_io_range_t);
> -
> -#define HVMOP_PCI_SBDF(s,b,d,f)                 \
> -	((((s) & 0xffff) << 16) |                   \
> -	 (((b) & 0xff) << 8) |                      \
> -	 (((d) & 0x1f) << 3) |                      \
> -	 ((f) & 0x07))
> -
> -/*
> - * HVMOP_destroy_ioreq_server: Destroy the IOREQ Server <id> servicing
> domain
> - *                             <domid>.
> - *
> - * Any registered I/O ranges will be automatically deregistered.
> - */
> -#define HVMOP_destroy_ioreq_server 21
> -struct xen_hvm_destroy_ioreq_server {
> -    domid_t domid; /* IN - domain to be serviced */
> -    ioservid_t id; /* IN - server id */
> -};
> -typedef struct xen_hvm_destroy_ioreq_server
> xen_hvm_destroy_ioreq_server_t;
> -DEFINE_XEN_GUEST_HANDLE(xen_hvm_destroy_ioreq_server_t);
> -
> -/*
> - * HVMOP_set_ioreq_server_state: Enable or disable the IOREQ Server <id>
> servicing
> - *                               domain <domid>.
> - *
> - * The IOREQ Server will not be passed any emulation requests until it is in
> the
> - * enabled state.
> - * Note that the contents of the ioreq_pfn and bufioreq_fn (see
> - * HVMOP_get_ioreq_server_info) are not meaningful until the IOREQ
> Server is in
> - * the enabled state.
> - */
> -#define HVMOP_set_ioreq_server_state 22
> -struct xen_hvm_set_ioreq_server_state {
> -    domid_t domid;   /* IN - domain to be serviced */
> -    ioservid_t id;   /* IN - server id */
> -    uint8_t enabled; /* IN - enabled? */
> -};
> -typedef struct xen_hvm_set_ioreq_server_state
> xen_hvm_set_ioreq_server_state_t;
> -DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_ioreq_server_state_t);
> -
> -#endif /* defined(__XEN__) || defined(__XEN_TOOLS__) */
> -
>  #if defined(__i386__) || defined(__x86_64__)
> 
>  /*
> --- a/xen/include/xsm/dummy.h
> +++ b/xen/include/xsm/dummy.h
> @@ -609,12 +609,6 @@ static XSM_INLINE int xsm_shadow_control
>      return xsm_default_action(action, current->domain, d);
>  }
> 
> -static XSM_INLINE int xsm_hvm_ioreq_server(XSM_DEFAULT_ARG struct
> domain *d, int op)
> -{
> -    XSM_ASSERT_ACTION(XSM_DM_PRIV);
> -    return xsm_default_action(action, current->domain, d);
> -}
> -
>  static XSM_INLINE int xsm_mem_sharing_op(XSM_DEFAULT_ARG struct
> domain *d, struct domain *cd, int op)
>  {
>      XSM_ASSERT_ACTION(XSM_DM_PRIV);
> --- a/xen/include/xsm/xsm.h
> +++ b/xen/include/xsm/xsm.h
> @@ -174,7 +174,6 @@ struct xsm_operations {
>      int (*do_mca) (void);
>      int (*shadow_control) (struct domain *d, uint32_t op);
>      int (*hvm_set_pci_link_route) (struct domain *d);
> -    int (*hvm_ioreq_server) (struct domain *d, int op);
>      int (*mem_sharing_op) (struct domain *d, struct domain *cd, int op);
>      int (*apic) (struct domain *d, int cmd);
>      int (*memtype) (uint32_t access);
> @@ -648,11 +647,6 @@ static inline int xsm_hvm_set_pci_link_r
>      return xsm_ops->hvm_set_pci_link_route(d);
>  }
> 
> -static inline int xsm_hvm_ioreq_server (xsm_default_t def, struct domain
> *d, int op)
> -{
> -    return xsm_ops->hvm_ioreq_server(d, op);
> -}
> -
>  static inline int xsm_mem_sharing_op (xsm_default_t def, struct domain *d,
> struct domain *cd, int op)
>  {
>      return xsm_ops->mem_sharing_op(d, cd, op);
> --- a/xen/xsm/dummy.c
> +++ b/xen/xsm/dummy.c
> @@ -145,7 +145,6 @@ void xsm_fixup_ops (struct xsm_operation
>  #ifdef CONFIG_X86
>      set_to_dummy_if_null(ops, do_mca);
>      set_to_dummy_if_null(ops, shadow_control);
> -    set_to_dummy_if_null(ops, hvm_ioreq_server);
>      set_to_dummy_if_null(ops, mem_sharing_op);
>      set_to_dummy_if_null(ops, apic);
>      set_to_dummy_if_null(ops, machine_memory_map);
> --- a/xen/xsm/flask/hooks.c
> +++ b/xen/xsm/flask/hooks.c
> @@ -1526,11 +1526,6 @@ static int flask_ioport_mapping(struct d
>      return flask_ioport_permission(d, start, end, access);
>  }
> 
> -static int flask_hvm_ioreq_server(struct domain *d, int op)
> -{
> -    return current_has_perm(d, SECCLASS_HVM, HVM__HVMCTL);
> -}
> -
>  static int flask_mem_sharing_op(struct domain *d, struct domain *cd, int
> op)
>  {
>      int rc = current_has_perm(cd, SECCLASS_HVM, HVM__MEM_SHARING);
> @@ -1799,7 +1794,6 @@ static struct xsm_operations flask_ops =
>  #ifdef CONFIG_X86
>      .do_mca = flask_do_mca,
>      .shadow_control = flask_shadow_control,
> -    .hvm_ioreq_server = flask_hvm_ioreq_server,
>      .mem_sharing_op = flask_mem_sharing_op,
>      .apic = flask_apic,
>      .machine_memory_map = flask_machine_memory_map,
>
diff mbox

Patch

--- a/tools/libxc/include/xenctrl.h
+++ b/tools/libxc/include/xenctrl.h
@@ -41,6 +41,7 @@ 
 #include <xen/sched.h>
 #include <xen/memory.h>
 #include <xen/grant_table.h>
+#include <xen/hvm/control.h>
 #include <xen/hvm/params.h>
 #include <xen/xsm/flask_op.h>
 #include <xen/tmem.h>
--- a/tools/libxc/xc_domain.c
+++ b/tools/libxc/xc_domain.c
@@ -1416,23 +1416,14 @@  int xc_hvm_create_ioreq_server(xc_interf
                                int handle_bufioreq,
                                ioservid_t *id)
 {
-    DECLARE_HYPERCALL_BUFFER(xen_hvm_create_ioreq_server_t, arg);
+    DECLARE_HVMCTL(create_ioreq_server, domid,
+                   .handle_bufioreq = handle_bufioreq);
     int rc;
 
-    arg = xc_hypercall_buffer_alloc(xch, arg, sizeof(*arg));
-    if ( arg == NULL )
-        return -1;
-
-    arg->domid = domid;
-    arg->handle_bufioreq = handle_bufioreq;
-
-    rc = xencall2(xch->xcall, __HYPERVISOR_hvm_op,
-                  HVMOP_create_ioreq_server,
-                  HYPERCALL_BUFFER_AS_ARG(arg));
+    rc = do_hvmctl(xch, &hvmctl);
 
-    *id = arg->id;
+    *id = hvmctl.u.create_ioreq_server.id;
 
-    xc_hypercall_buffer_free(xch, arg);
     return rc;
 }
 
@@ -1443,84 +1434,52 @@  int xc_hvm_get_ioreq_server_info(xc_inte
                                  xen_pfn_t *bufioreq_pfn,
                                  evtchn_port_t *bufioreq_port)
 {
-    DECLARE_HYPERCALL_BUFFER(xen_hvm_get_ioreq_server_info_t, arg);
+    DECLARE_HVMCTL(get_ioreq_server_info, domid,
+                   .id = id);
     int rc;
 
-    arg = xc_hypercall_buffer_alloc(xch, arg, sizeof(*arg));
-    if ( arg == NULL )
-        return -1;
-
-    arg->domid = domid;
-    arg->id = id;
-
-    rc = xencall2(xch->xcall, __HYPERVISOR_hvm_op,
-                  HVMOP_get_ioreq_server_info,
-                  HYPERCALL_BUFFER_AS_ARG(arg));
+    rc = do_hvmctl(xch, &hvmctl);
     if ( rc != 0 )
-        goto done;
+        return rc;
 
     if ( ioreq_pfn )
-        *ioreq_pfn = arg->ioreq_pfn;
+        *ioreq_pfn = hvmctl.u.get_ioreq_server_info.ioreq_pfn;
 
     if ( bufioreq_pfn )
-        *bufioreq_pfn = arg->bufioreq_pfn;
+        *bufioreq_pfn = hvmctl.u.get_ioreq_server_info.bufioreq_pfn;
 
     if ( bufioreq_port )
-        *bufioreq_port = arg->bufioreq_port;
+        *bufioreq_port = hvmctl.u.get_ioreq_server_info.bufioreq_port;
 
-done:
-    xc_hypercall_buffer_free(xch, arg);
-    return rc;
+    return 0;
 }
 
 int xc_hvm_map_io_range_to_ioreq_server(xc_interface *xch, domid_t domid,
                                         ioservid_t id, int is_mmio,
                                         uint64_t start, uint64_t end)
 {
-    DECLARE_HYPERCALL_BUFFER(xen_hvm_io_range_t, arg);
-    int rc;
-
-    arg = xc_hypercall_buffer_alloc(xch, arg, sizeof(*arg));
-    if ( arg == NULL )
-        return -1;
-
-    arg->domid = domid;
-    arg->id = id;
-    arg->type = is_mmio ? HVMOP_IO_RANGE_MEMORY : HVMOP_IO_RANGE_PORT;
-    arg->start = start;
-    arg->end = end;
-
-    rc = xencall2(xch->xcall, __HYPERVISOR_hvm_op,
-                  HVMOP_map_io_range_to_ioreq_server,
-                  HYPERCALL_BUFFER_AS_ARG(arg));
+    DECLARE_HVMCTL(map_io_range_to_ioreq_server, domid,
+                   .id = id,
+                   .type = is_mmio ? XEN_HVMCTL_IO_RANGE_MEMORY
+                                   : XEN_HVMCTL_IO_RANGE_PORT,
+                   .start = start,
+                   .end = end);
 
-    xc_hypercall_buffer_free(xch, arg);
-    return rc;
+    return do_hvmctl(xch, &hvmctl);
 }
 
 int xc_hvm_unmap_io_range_from_ioreq_server(xc_interface *xch, domid_t domid,
                                             ioservid_t id, int is_mmio,
                                             uint64_t start, uint64_t end)
 {
-    DECLARE_HYPERCALL_BUFFER(xen_hvm_io_range_t, arg);
-    int rc;
-
-    arg = xc_hypercall_buffer_alloc(xch, arg, sizeof(*arg));
-    if ( arg == NULL )
-        return -1;
+    DECLARE_HVMCTL(unmap_io_range_from_ioreq_server, domid,
+                   .id = id,
+                   .type = is_mmio ? XEN_HVMCTL_IO_RANGE_MEMORY
+                                   : XEN_HVMCTL_IO_RANGE_PORT,
+                   .start = start,
+                   .end = end);
 
-    arg->domid = domid;
-    arg->id = id;
-    arg->type = is_mmio ? HVMOP_IO_RANGE_MEMORY : HVMOP_IO_RANGE_PORT;
-    arg->start = start;
-    arg->end = end;
-
-    rc = xencall2(xch->xcall, __HYPERVISOR_hvm_op,
-                  HVMOP_unmap_io_range_from_ioreq_server,
-                  HYPERCALL_BUFFER_AS_ARG(arg));
-
-    xc_hypercall_buffer_free(xch, arg);
-    return rc;
+    return do_hvmctl(xch, &hvmctl);
 }
 
 int xc_hvm_map_pcidev_to_ioreq_server(xc_interface *xch, domid_t domid,
@@ -1528,37 +1487,23 @@  int xc_hvm_map_pcidev_to_ioreq_server(xc
                                       uint8_t bus, uint8_t device,
                                       uint8_t function)
 {
-    DECLARE_HYPERCALL_BUFFER(xen_hvm_io_range_t, arg);
-    int rc;
+    /*
+     * The underlying hypercall will deal with ranges of PCI SBDF
+     * but, for simplicity, the API only uses singletons.
+     */
+    uint32_t sbdf = XEN_HVMCTL_PCI_SBDF(segment, bus, device, function);
+    DECLARE_HVMCTL(map_io_range_to_ioreq_server, domid,
+                   .id = id,
+                   .type = XEN_HVMCTL_IO_RANGE_PCI,
+                   .start = sbdf,
+                   .end = sbdf);
 
     if (device > 0x1f || function > 0x7) {
         errno = EINVAL;
         return -1;
     }
 
-    arg = xc_hypercall_buffer_alloc(xch, arg, sizeof(*arg));
-    if ( arg == NULL )
-        return -1;
-
-    arg->domid = domid;
-    arg->id = id;
-    arg->type = HVMOP_IO_RANGE_PCI;
-
-    /*
-     * The underlying hypercall will deal with ranges of PCI SBDF
-     * but, for simplicity, the API only uses singletons.
-     */
-    arg->start = arg->end = HVMOP_PCI_SBDF((uint64_t)segment,
-                                           (uint64_t)bus,
-                                           (uint64_t)device,
-                                           (uint64_t)function);
-
-    rc = xencall2(xch->xcall, __HYPERVISOR_hvm_op,
-                  HVMOP_map_io_range_to_ioreq_server,
-                  HYPERCALL_BUFFER_AS_ARG(arg));
-
-    xc_hypercall_buffer_free(xch, arg);
-    return rc;
+    return do_hvmctl(xch, &hvmctl);
 }
 
 int xc_hvm_unmap_pcidev_from_ioreq_server(xc_interface *xch, domid_t domid,
@@ -1566,54 +1511,29 @@  int xc_hvm_unmap_pcidev_from_ioreq_serve
                                           uint8_t bus, uint8_t device,
                                           uint8_t function)
 {
-    DECLARE_HYPERCALL_BUFFER(xen_hvm_io_range_t, arg);
-    int rc;
+    uint32_t sbdf = XEN_HVMCTL_PCI_SBDF(segment, bus, device, function);
+    DECLARE_HVMCTL(unmap_io_range_from_ioreq_server, domid,
+                   .id = id,
+                   .type = XEN_HVMCTL_IO_RANGE_PCI,
+                   .start = sbdf,
+                   .end = sbdf);
 
     if (device > 0x1f || function > 0x7) {
         errno = EINVAL;
         return -1;
     }
 
-    arg = xc_hypercall_buffer_alloc(xch, arg, sizeof(*arg));
-    if ( arg == NULL )
-        return -1;
-
-    arg->domid = domid;
-    arg->id = id;
-    arg->type = HVMOP_IO_RANGE_PCI;
-    arg->start = arg->end = HVMOP_PCI_SBDF((uint64_t)segment,
-                                           (uint64_t)bus,
-                                           (uint64_t)device,
-                                           (uint64_t)function);
-
-    rc = xencall2(xch->xcall, __HYPERVISOR_hvm_op,
-                  HVMOP_unmap_io_range_from_ioreq_server,
-                  HYPERCALL_BUFFER_AS_ARG(arg));
-
-    xc_hypercall_buffer_free(xch, arg);
-    return rc;
+    return do_hvmctl(xch, &hvmctl);
 }
 
 int xc_hvm_destroy_ioreq_server(xc_interface *xch,
                                 domid_t domid,
                                 ioservid_t id)
 {
-    DECLARE_HYPERCALL_BUFFER(xen_hvm_destroy_ioreq_server_t, arg);
-    int rc;
-
-    arg = xc_hypercall_buffer_alloc(xch, arg, sizeof(*arg));
-    if ( arg == NULL )
-        return -1;
+    DECLARE_HVMCTL(destroy_ioreq_server, domid,
+                   .id = id);
 
-    arg->domid = domid;
-    arg->id = id;
-
-    rc = xencall2(xch->xcall, __HYPERVISOR_hvm_op,
-                  HVMOP_destroy_ioreq_server,
-                  HYPERCALL_BUFFER_AS_ARG(arg));
-
-    xc_hypercall_buffer_free(xch, arg);
-    return rc;
+    return do_hvmctl(xch, &hvmctl);
 }
 
 int xc_hvm_set_ioreq_server_state(xc_interface *xch,
@@ -1621,23 +1541,11 @@  int xc_hvm_set_ioreq_server_state(xc_int
                                   ioservid_t id,
                                   int enabled)
 {
-    DECLARE_HYPERCALL_BUFFER(xen_hvm_set_ioreq_server_state_t, arg);
-    int rc;
-
-    arg = xc_hypercall_buffer_alloc(xch, arg, sizeof(*arg));
-    if ( arg == NULL )
-        return -1;
+    DECLARE_HVMCTL(set_ioreq_server_state, domid,
+                   .id = id,
+                   .enabled = !!enabled);
 
-    arg->domid = domid;
-    arg->id = id;
-    arg->enabled = !!enabled;
-
-    rc = xencall2(xch->xcall, __HYPERVISOR_hvm_op,
-                  HVMOP_set_ioreq_server_state,
-                  HYPERCALL_BUFFER_AS_ARG(arg));
-
-    xc_hypercall_buffer_free(xch, arg);
-    return rc;
+    return do_hvmctl(xch, &hvmctl);
 }
 
 int xc_domain_setdebugging(xc_interface *xch,
--- a/tools/libxc/xc_private.h
+++ b/tools/libxc/xc_private.h
@@ -34,8 +34,6 @@ 
 #define XC_INTERNAL_COMPAT_MAP_FOREIGN_API
 #include "xenctrl.h"
 
-#include <xen/hvm/control.h>
-
 #include <xencall.h>
 #include <xenforeignmemory.h>
 
--- a/xen/arch/x86/hvm/control.c
+++ b/xen/arch/x86/hvm/control.c
@@ -20,6 +20,7 @@ 
 #include <xen/sched.h>
 #include <asm/hap.h>
 #include <asm/shadow.h>
+#include <asm/hvm/ioreq.h>
 #include <xsm/xsm.h>
 
 static int set_pci_intx_level(struct domain *d,
@@ -299,6 +300,50 @@  long do_hvmctl(XEN_GUEST_HANDLE_PARAM(xe
         rc = hvm_inject_msi(d, op.u.inject_msi.addr, op.u.inject_msi.data);
         break;
 
+    case XEN_HVMCTL_create_ioreq_server:
+        rc = -EINVAL;
+        if ( op.u.create_ioreq_server.rsvd )
+            break;
+        rc = hvm_create_ioreq_server(d, current->domain->domain_id, 0,
+                                     op.u.create_ioreq_server.handle_bufioreq,
+                                     &op.u.create_ioreq_server.id);
+        if ( rc == 0 && copy_field_to_guest(u_hvmctl, &op,
+                                            u.create_ioreq_server.id) )
+            rc = -EFAULT;
+        break;
+
+    case XEN_HVMCTL_get_ioreq_server_info:
+        rc = -EINVAL;
+        if ( op.u.get_ioreq_server_info.rsvd )
+            break;
+        rc = hvm_get_ioreq_server_info(d, &op.u.get_ioreq_server_info);
+        if ( rc == 0 && copy_field_to_guest(u_hvmctl, &op,
+                                            u.get_ioreq_server_info) )
+            rc = -EFAULT;
+        break;
+
+    case XEN_HVMCTL_map_io_range_to_ioreq_server:
+        rc = hvm_map_io_range_to_ioreq_server(
+                 d, &op.u.map_io_range_to_ioreq_server);
+        break;
+
+    case XEN_HVMCTL_unmap_io_range_from_ioreq_server:
+        rc = hvm_unmap_io_range_from_ioreq_server(
+                 d, &op.u.unmap_io_range_from_ioreq_server);
+        break;
+
+    case XEN_HVMCTL_destroy_ioreq_server:
+        rc = hvm_destroy_ioreq_server(d, op.u.destroy_ioreq_server.id);
+        break;
+
+    case XEN_HVMCTL_set_ioreq_server_state:
+        rc = -EINVAL;
+        if ( op.u.set_ioreq_server_state.rsvd )
+            break;
+        rc = hvm_set_ioreq_server_state(d, op.u.set_ioreq_server_state.id,
+                                        !!op.u.set_ioreq_server_state.enabled);
+        break;
+
     default:
         rc = -EOPNOTSUPP;
         break;
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -4487,195 +4487,6 @@  static int hvmop_flush_tlb_all(void)
     return 0;
 }
 
-static int hvmop_create_ioreq_server(
-    XEN_GUEST_HANDLE_PARAM(xen_hvm_create_ioreq_server_t) uop)
-{
-    struct domain *curr_d = current->domain;
-    xen_hvm_create_ioreq_server_t op;
-    struct domain *d;
-    int rc;
-
-    if ( copy_from_guest(&op, uop, 1) )
-        return -EFAULT;
-
-    rc = rcu_lock_remote_domain_by_id(op.domid, &d);
-    if ( rc != 0 )
-        return rc;
-
-    rc = -EINVAL;
-    if ( !is_hvm_domain(d) )
-        goto out;
-
-    rc = xsm_hvm_ioreq_server(XSM_DM_PRIV, d, HVMOP_create_ioreq_server);
-    if ( rc != 0 )
-        goto out;
-
-    rc = hvm_create_ioreq_server(d, curr_d->domain_id, 0,
-                                 op.handle_bufioreq, &op.id);
-    if ( rc != 0 )
-        goto out;
-
-    rc = copy_to_guest(uop, &op, 1) ? -EFAULT : 0;
-    
- out:
-    rcu_unlock_domain(d);
-    return rc;
-}
-
-static int hvmop_get_ioreq_server_info(
-    XEN_GUEST_HANDLE_PARAM(xen_hvm_get_ioreq_server_info_t) uop)
-{
-    xen_hvm_get_ioreq_server_info_t op;
-    struct domain *d;
-    int rc;
-
-    if ( copy_from_guest(&op, uop, 1) )
-        return -EFAULT;
-
-    rc = rcu_lock_remote_domain_by_id(op.domid, &d);
-    if ( rc != 0 )
-        return rc;
-
-    rc = -EINVAL;
-    if ( !is_hvm_domain(d) )
-        goto out;
-
-    rc = xsm_hvm_ioreq_server(XSM_DM_PRIV, d, HVMOP_get_ioreq_server_info);
-    if ( rc != 0 )
-        goto out;
-
-    rc = hvm_get_ioreq_server_info(d, op.id,
-                                   &op.ioreq_pfn,
-                                   &op.bufioreq_pfn, 
-                                   &op.bufioreq_port);
-    if ( rc != 0 )
-        goto out;
-
-    rc = copy_to_guest(uop, &op, 1) ? -EFAULT : 0;
-    
- out:
-    rcu_unlock_domain(d);
-    return rc;
-}
-
-static int hvmop_map_io_range_to_ioreq_server(
-    XEN_GUEST_HANDLE_PARAM(xen_hvm_io_range_t) uop)
-{
-    xen_hvm_io_range_t op;
-    struct domain *d;
-    int rc;
-
-    if ( copy_from_guest(&op, uop, 1) )
-        return -EFAULT;
-
-    rc = rcu_lock_remote_domain_by_id(op.domid, &d);
-    if ( rc != 0 )
-        return rc;
-
-    rc = -EINVAL;
-    if ( !is_hvm_domain(d) )
-        goto out;
-
-    rc = xsm_hvm_ioreq_server(XSM_DM_PRIV, d, HVMOP_map_io_range_to_ioreq_server);
-    if ( rc != 0 )
-        goto out;
-
-    rc = hvm_map_io_range_to_ioreq_server(d, op.id, op.type,
-                                          op.start, op.end);
-
- out:
-    rcu_unlock_domain(d);
-    return rc;
-}
-
-static int hvmop_unmap_io_range_from_ioreq_server(
-    XEN_GUEST_HANDLE_PARAM(xen_hvm_io_range_t) uop)
-{
-    xen_hvm_io_range_t op;
-    struct domain *d;
-    int rc;
-
-    if ( copy_from_guest(&op, uop, 1) )
-        return -EFAULT;
-
-    rc = rcu_lock_remote_domain_by_id(op.domid, &d);
-    if ( rc != 0 )
-        return rc;
-
-    rc = -EINVAL;
-    if ( !is_hvm_domain(d) )
-        goto out;
-
-    rc = xsm_hvm_ioreq_server(XSM_DM_PRIV, d, HVMOP_unmap_io_range_from_ioreq_server);
-    if ( rc != 0 )
-        goto out;
-
-    rc = hvm_unmap_io_range_from_ioreq_server(d, op.id, op.type,
-                                              op.start, op.end);
-    
- out:
-    rcu_unlock_domain(d);
-    return rc;
-}
-
-static int hvmop_set_ioreq_server_state(
-    XEN_GUEST_HANDLE_PARAM(xen_hvm_set_ioreq_server_state_t) uop)
-{
-    xen_hvm_set_ioreq_server_state_t op;
-    struct domain *d;
-    int rc;
-
-    if ( copy_from_guest(&op, uop, 1) )
-        return -EFAULT;
-
-    rc = rcu_lock_remote_domain_by_id(op.domid, &d);
-    if ( rc != 0 )
-        return rc;
-
-    rc = -EINVAL;
-    if ( !is_hvm_domain(d) )
-        goto out;
-
-    rc = xsm_hvm_ioreq_server(XSM_DM_PRIV, d, HVMOP_set_ioreq_server_state);
-    if ( rc != 0 )
-        goto out;
-
-    rc = hvm_set_ioreq_server_state(d, op.id, !!op.enabled);
-
- out:
-    rcu_unlock_domain(d);
-    return rc;
-}
-
-static int hvmop_destroy_ioreq_server(
-    XEN_GUEST_HANDLE_PARAM(xen_hvm_destroy_ioreq_server_t) uop)
-{
-    xen_hvm_destroy_ioreq_server_t op;
-    struct domain *d;
-    int rc;
-
-    if ( copy_from_guest(&op, uop, 1) )
-        return -EFAULT;
-
-    rc = rcu_lock_remote_domain_by_id(op.domid, &d);
-    if ( rc != 0 )
-        return rc;
-
-    rc = -EINVAL;
-    if ( !is_hvm_domain(d) )
-        goto out;
-
-    rc = xsm_hvm_ioreq_server(XSM_DM_PRIV, d, HVMOP_destroy_ioreq_server);
-    if ( rc != 0 )
-        goto out;
-
-    rc = hvm_destroy_ioreq_server(d, op.id);
-
- out:
-    rcu_unlock_domain(d);
-    return rc;
-}
-
 static int hvmop_set_evtchn_upcall_vector(
     XEN_GUEST_HANDLE_PARAM(xen_hvm_evtchn_upcall_vector_t) uop)
 {
@@ -5192,36 +5003,6 @@  long do_hvm_op(unsigned long op, XEN_GUE
 
     switch ( op )
     {
-    case HVMOP_create_ioreq_server:
-        rc = hvmop_create_ioreq_server(
-            guest_handle_cast(arg, xen_hvm_create_ioreq_server_t));
-        break;
-    
-    case HVMOP_get_ioreq_server_info:
-        rc = hvmop_get_ioreq_server_info(
-            guest_handle_cast(arg, xen_hvm_get_ioreq_server_info_t));
-        break;
-    
-    case HVMOP_map_io_range_to_ioreq_server:
-        rc = hvmop_map_io_range_to_ioreq_server(
-            guest_handle_cast(arg, xen_hvm_io_range_t));
-        break;
-    
-    case HVMOP_unmap_io_range_from_ioreq_server:
-        rc = hvmop_unmap_io_range_from_ioreq_server(
-            guest_handle_cast(arg, xen_hvm_io_range_t));
-        break;
-
-    case HVMOP_set_ioreq_server_state:
-        rc = hvmop_set_ioreq_server_state(
-            guest_handle_cast(arg, xen_hvm_set_ioreq_server_state_t));
-        break;
-    
-    case HVMOP_destroy_ioreq_server:
-        rc = hvmop_destroy_ioreq_server(
-            guest_handle_cast(arg, xen_hvm_destroy_ioreq_server_t));
-        break;
-    
     case HVMOP_set_evtchn_upcall_vector:
         rc = hvmop_set_evtchn_upcall_vector(
             guest_handle_cast(arg, xen_hvm_evtchn_upcall_vector_t));
--- a/xen/arch/x86/hvm/ioreq.c
+++ b/xen/arch/x86/hvm/ioreq.c
@@ -513,9 +513,9 @@  static int hvm_ioreq_server_alloc_ranges
         char *name;
 
         rc = asprintf(&name, "ioreq_server %d %s", s->id,
-                      (i == HVMOP_IO_RANGE_PORT) ? "port" :
-                      (i == HVMOP_IO_RANGE_MEMORY) ? "memory" :
-                      (i == HVMOP_IO_RANGE_PCI) ? "pci" :
+                      (i == XEN_HVMCTL_IO_RANGE_PORT) ? "port" :
+                      (i == XEN_HVMCTL_IO_RANGE_MEMORY) ? "memory" :
+                      (i == XEN_HVMCTL_IO_RANGE_PCI) ? "pci" :
                       "");
         if ( rc )
             goto fail;
@@ -686,7 +686,8 @@  int hvm_create_ioreq_server(struct domai
     struct hvm_ioreq_server *s;
     int rc;
 
-    if ( bufioreq_handling > HVM_IOREQSRV_BUFIOREQ_ATOMIC )
+    if ( !is_hvm_domain(d) ||
+         bufioreq_handling > HVM_IOREQSRV_BUFIOREQ_ATOMIC )
         return -EINVAL;
 
     rc = -ENOMEM;
@@ -738,6 +739,9 @@  int hvm_destroy_ioreq_server(struct doma
     struct hvm_ioreq_server *s;
     int rc;
 
+    if ( !is_hvm_domain(d) )
+        return -EINVAL;
+
     spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
 
     rc = -ENOENT;
@@ -772,14 +776,15 @@  int hvm_destroy_ioreq_server(struct doma
     return rc;
 }
 
-int hvm_get_ioreq_server_info(struct domain *d, ioservid_t id,
-                              unsigned long *ioreq_pfn,
-                              unsigned long *bufioreq_pfn,
-                              evtchn_port_t *bufioreq_port)
+int hvm_get_ioreq_server_info(struct domain *d,
+                              struct xen_hvm_get_ioreq_server_info *info)
 {
     struct hvm_ioreq_server *s;
     int rc;
 
+    if ( !is_hvm_domain(d) )
+        return -EINVAL;
+
     spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
 
     rc = -ENOENT;
@@ -790,15 +795,15 @@  int hvm_get_ioreq_server_info(struct dom
         if ( s == d->arch.hvm_domain.default_ioreq_server )
             continue;
 
-        if ( s->id != id )
+        if ( s->id != info->id )
             continue;
 
-        *ioreq_pfn = s->ioreq.gmfn;
+        info->ioreq_pfn = s->ioreq.gmfn;
 
         if ( s->bufioreq.va != NULL )
         {
-            *bufioreq_pfn = s->bufioreq.gmfn;
-            *bufioreq_port = s->bufioreq_evtchn;
+            info->bufioreq_pfn = s->bufioreq.gmfn;
+            info->bufioreq_port = s->bufioreq_evtchn;
         }
 
         rc = 0;
@@ -810,13 +815,15 @@  int hvm_get_ioreq_server_info(struct dom
     return rc;
 }
 
-int hvm_map_io_range_to_ioreq_server(struct domain *d, ioservid_t id,
-                                     uint32_t type, uint64_t start,
-                                     uint64_t end)
+int hvm_map_io_range_to_ioreq_server(struct domain *d,
+                                     const struct xen_hvm_io_range *ior)
 {
     struct hvm_ioreq_server *s;
     int rc;
 
+    if ( ior->rsvd || !is_hvm_domain(d) )
+        return -EINVAL;
+
     spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
 
     rc = -ENOENT;
@@ -827,16 +834,16 @@  int hvm_map_io_range_to_ioreq_server(str
         if ( s == d->arch.hvm_domain.default_ioreq_server )
             continue;
 
-        if ( s->id == id )
+        if ( s->id == ior->id )
         {
             struct rangeset *r;
 
-            switch ( type )
+            switch ( ior->type )
             {
-            case HVMOP_IO_RANGE_PORT:
-            case HVMOP_IO_RANGE_MEMORY:
-            case HVMOP_IO_RANGE_PCI:
-                r = s->range[type];
+            case XEN_HVMCTL_IO_RANGE_PORT:
+            case XEN_HVMCTL_IO_RANGE_MEMORY:
+            case XEN_HVMCTL_IO_RANGE_PCI:
+                r = s->range[ior->type];
                 break;
 
             default:
@@ -849,10 +856,10 @@  int hvm_map_io_range_to_ioreq_server(str
                 break;
 
             rc = -EEXIST;
-            if ( rangeset_overlaps_range(r, start, end) )
+            if ( rangeset_overlaps_range(r, ior->start, ior->end) )
                 break;
 
-            rc = rangeset_add_range(r, start, end);
+            rc = rangeset_add_range(r, ior->start, ior->end);
             break;
         }
     }
@@ -862,13 +869,15 @@  int hvm_map_io_range_to_ioreq_server(str
     return rc;
 }
 
-int hvm_unmap_io_range_from_ioreq_server(struct domain *d, ioservid_t id,
-                                         uint32_t type, uint64_t start,
-                                         uint64_t end)
+int hvm_unmap_io_range_from_ioreq_server(struct domain *d,
+                                         const struct xen_hvm_io_range *ior)
 {
     struct hvm_ioreq_server *s;
     int rc;
 
+    if ( ior->rsvd || !is_hvm_domain(d) )
+        return -EINVAL;
+
     spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
 
     rc = -ENOENT;
@@ -879,16 +888,16 @@  int hvm_unmap_io_range_from_ioreq_server
         if ( s == d->arch.hvm_domain.default_ioreq_server )
             continue;
 
-        if ( s->id == id )
+        if ( s->id == ior->id )
         {
             struct rangeset *r;
 
-            switch ( type )
+            switch ( ior->type )
             {
-            case HVMOP_IO_RANGE_PORT:
-            case HVMOP_IO_RANGE_MEMORY:
-            case HVMOP_IO_RANGE_PCI:
-                r = s->range[type];
+            case XEN_HVMCTL_IO_RANGE_PORT:
+            case XEN_HVMCTL_IO_RANGE_MEMORY:
+            case XEN_HVMCTL_IO_RANGE_PCI:
+                r = s->range[ior->type];
                 break;
 
             default:
@@ -901,10 +910,10 @@  int hvm_unmap_io_range_from_ioreq_server
                 break;
 
             rc = -ENOENT;
-            if ( !rangeset_contains_range(r, start, end) )
+            if ( !rangeset_contains_range(r, ior->start, ior->end) )
                 break;
 
-            rc = rangeset_remove_range(r, start, end);
+            rc = rangeset_remove_range(r, ior->start, ior->end);
             break;
         }
     }
@@ -920,6 +929,9 @@  int hvm_set_ioreq_server_state(struct do
     struct list_head *entry;
     int rc;
 
+    if ( !is_hvm_domain(d) )
+        return -EINVAL;
+
     spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
 
     rc = -ENOENT;
@@ -1128,12 +1140,12 @@  struct hvm_ioreq_server *hvm_select_iore
 
         /* PCI config data cycle */
 
-        sbdf = HVMOP_PCI_SBDF(0,
-                              PCI_BUS(CF8_BDF(cf8)),
-                              PCI_SLOT(CF8_BDF(cf8)),
-                              PCI_FUNC(CF8_BDF(cf8)));
+        sbdf = XEN_HVMCTL_PCI_SBDF(0,
+                                   PCI_BUS(CF8_BDF(cf8)),
+                                   PCI_SLOT(CF8_BDF(cf8)),
+                                   PCI_FUNC(CF8_BDF(cf8)));
 
-        type = HVMOP_IO_RANGE_PCI;
+        type = XEN_HVMCTL_IO_RANGE_PCI;
         addr = ((uint64_t)sbdf << 32) |
                CF8_ADDR_LO(cf8) |
                (p->addr & 3);
@@ -1152,7 +1164,7 @@  struct hvm_ioreq_server *hvm_select_iore
     else
     {
         type = (p->type == IOREQ_TYPE_PIO) ?
-                HVMOP_IO_RANGE_PORT : HVMOP_IO_RANGE_MEMORY;
+                XEN_HVMCTL_IO_RANGE_PORT : XEN_HVMCTL_IO_RANGE_MEMORY;
         addr = p->addr;
     }
 
@@ -1174,19 +1186,19 @@  struct hvm_ioreq_server *hvm_select_iore
         {
             unsigned long end;
 
-        case HVMOP_IO_RANGE_PORT:
+        case XEN_HVMCTL_IO_RANGE_PORT:
             end = addr + p->size - 1;
             if ( rangeset_contains_range(r, addr, end) )
                 return s;
 
             break;
-        case HVMOP_IO_RANGE_MEMORY:
+        case XEN_HVMCTL_IO_RANGE_MEMORY:
             end = addr + (p->size * p->count) - 1;
             if ( rangeset_contains_range(r, addr, end) )
                 return s;
 
             break;
-        case HVMOP_IO_RANGE_PCI:
+        case XEN_HVMCTL_IO_RANGE_PCI:
             if ( rangeset_contains_singleton(r, addr >> 32) )
             {
                 p->type = IOREQ_TYPE_PCI_CONFIG;
--- a/xen/include/asm-x86/hvm/domain.h
+++ b/xen/include/asm-x86/hvm/domain.h
@@ -30,6 +30,7 @@ 
 #include <asm/hvm/vmx/vmcs.h>
 #include <asm/hvm/svm/vmcb.h>
 #include <public/grant_table.h>
+#include <public/hvm/control.h>
 #include <public/hvm/params.h>
 #include <public/hvm/save.h>
 #include <public/hvm/hvm_op.h>
@@ -47,7 +48,7 @@  struct hvm_ioreq_vcpu {
     bool_t           pending;
 };
 
-#define NR_IO_RANGE_TYPES (HVMOP_IO_RANGE_PCI + 1)
+#define NR_IO_RANGE_TYPES (XEN_HVMCTL_IO_RANGE_PCI + 1)
 #define MAX_NR_IO_RANGES  256
 
 struct hvm_ioreq_server {
--- a/xen/include/asm-x86/hvm/ioreq.h
+++ b/xen/include/asm-x86/hvm/ioreq.h
@@ -19,6 +19,8 @@ 
 #ifndef __ASM_X86_HVM_IOREQ_H__
 #define __ASM_X86_HVM_IOREQ_H__
 
+#include <public/hvm/control.h>
+
 bool_t hvm_io_pending(struct vcpu *v);
 bool_t handle_hvm_io_completion(struct vcpu *v);
 bool_t is_ioreq_server_page(struct domain *d, const struct page_info *page);
@@ -27,16 +29,12 @@  int hvm_create_ioreq_server(struct domai
                             bool_t is_default, int bufioreq_handling,
                             ioservid_t *id);
 int hvm_destroy_ioreq_server(struct domain *d, ioservid_t id);
-int hvm_get_ioreq_server_info(struct domain *d, ioservid_t id,
-                              unsigned long *ioreq_pfn,
-                              unsigned long *bufioreq_pfn,
-                              evtchn_port_t *bufioreq_port);
-int hvm_map_io_range_to_ioreq_server(struct domain *d, ioservid_t id,
-                                     uint32_t type, uint64_t start,
-                                     uint64_t end);
-int hvm_unmap_io_range_from_ioreq_server(struct domain *d, ioservid_t id,
-                                         uint32_t type, uint64_t start,
-                                         uint64_t end);
+int hvm_get_ioreq_server_info(struct domain *d,
+                              struct xen_hvm_get_ioreq_server_info *info);
+int hvm_map_io_range_to_ioreq_server(struct domain *d,
+                                     const struct xen_hvm_io_range *r);
+int hvm_unmap_io_range_from_ioreq_server(struct domain *d,
+                                         const struct xen_hvm_io_range *r);
 int hvm_set_ioreq_server_state(struct domain *d, ioservid_t id,
                                bool_t enabled);
 
--- a/xen/include/public/hvm/control.h
+++ b/xen/include/public/hvm/control.h
@@ -26,6 +26,7 @@ 
 #endif
 
 #include "../xen.h"
+#include "../event_channel.h"
 
 #define XEN_HVMCTL_INTERFACE_VERSION 0x00000001
 
@@ -130,6 +131,131 @@  struct xen_hvm_inject_msi {
     uint64_t  addr;
 };
 
+/*
+ * IOREQ Servers
+ *
+ * The interface between an I/O emulator an Xen is called an IOREQ Server.
+ * A domain supports a single 'legacy' IOREQ Server which is instantiated if
+ * parameter...
+ *
+ * HVM_PARAM_IOREQ_PFN is read (to get the gmfn containing the synchronous
+ * ioreq structures), or...
+ * HVM_PARAM_BUFIOREQ_PFN is read (to get the gmfn containing the buffered
+ * ioreq ring), or...
+ * HVM_PARAM_BUFIOREQ_EVTCHN is read (to get the event channel that Xen uses
+ * to request buffered I/O emulation).
+ *
+ * The following hypercalls facilitate the creation of IOREQ Servers for
+ * 'secondary' emulators which are invoked to implement port I/O, memory, or
+ * PCI config space ranges which they explicitly register.
+ */
+
+typedef uint16_t ioservid_t;
+
+/*
+ * XEN_HVMCTL_create_ioreq_server: Instantiate a new IOREQ Server for a
+ *                                 secondary emulator servicing domain
+ *                                 <domid>.
+ *
+ * The <id> handed back is unique for <domid>. If <handle_bufioreq> is zero
+ * the buffered ioreq ring will not be allocated and hence all emulation
+ * requestes to this server will be synchronous.
+ */
+struct xen_hvm_create_ioreq_server {
+#define HVM_IOREQSRV_BUFIOREQ_OFF    0
+#define HVM_IOREQSRV_BUFIOREQ_LEGACY 1
+/*
+ * Use this when read_pointer gets updated atomically and
+ * the pointer pair gets read atomically:
+ */
+#define HVM_IOREQSRV_BUFIOREQ_ATOMIC 2
+    uint8_t handle_bufioreq; /* IN - should server handle buffered ioreqs */
+    uint8_t rsvd;            /* IN - must be zero */
+    ioservid_t id;           /* OUT - server id */
+};
+
+/*
+ * XEN_HVMCTL_get_ioreq_server_info: Get all the information necessary to
+ *                                   access IOREQ Server <id>.
+ *
+ * The emulator needs to map the synchronous ioreq structures and buffered
+ * ioreq ring (if it exists) that Xen uses to request emulation. These are
+ * hosted in domain <domid>'s gmfns <ioreq_pfn> and <bufioreq_pfn>
+ * respectively. In addition, if the IOREQ Server is handling buffered
+ * emulation requests, the emulator needs to bind to event channel
+ * <bufioreq_port> to listen for them. (The event channels used for
+ * synchronous emulation requests are specified in the per-CPU ioreq
+ * structures in <ioreq_pfn>).
+ * If the IOREQ Server is not handling buffered emulation requests then the
+ * values handed back in <bufioreq_pfn> and <bufioreq_port> will both be 0.
+ */
+struct xen_hvm_get_ioreq_server_info {
+    ioservid_t id;                 /* IN - server id */
+    uint16_t rsvd;                 /* IN - must be zero */
+    evtchn_port_t bufioreq_port;   /* OUT - buffered ioreq port */
+    uint64_aligned_t ioreq_pfn;    /* OUT - sync ioreq pfn */
+    uint64_aligned_t bufioreq_pfn; /* OUT - buffered ioreq pfn */
+};
+
+/*
+ * XEN_HVMCTL_map_io_range_to_ioreq_server: Register an I/O range of domain
+ *                                          <domid> for emulation by the
+ *                                          client of IOREQ Server <id>
+ * XEN_HVMCTL_unmap_io_range_from_ioreq_server: Deregister an I/O range of
+ *                                              <domid> for emulation by the
+ *                                              client of IOREQ Server <id>
+ *
+ * There are three types of I/O that can be emulated: port I/O, memory accesses
+ * and PCI config space accesses. The <type> field denotes which type of range
+ * the <start> and <end> (inclusive) fields are specifying.
+ * PCI config space ranges are specified by segment/bus/device/function values
+ * which should be encoded using the XEN_HVMCTL_PCI_SBDF helper macro below.
+ *
+ * NOTE: unless an emulation request falls entirely within a range mapped
+ * by a secondary emulator, it will not be passed to that emulator.
+ */
+struct xen_hvm_io_range {
+    ioservid_t id;               /* IN - server id */
+    uint16_t type;               /* IN - type of range */
+    uint32_t rsvd;               /* IN - must be zero */
+#define XEN_HVMCTL_IO_RANGE_PORT   0 /* I/O port range */
+#define XEN_HVMCTL_IO_RANGE_MEMORY 1 /* MMIO range */
+#define XEN_HVMCTL_IO_RANGE_PCI    2 /* PCI segment/bus/dev/func range */
+    uint64_aligned_t start, end; /* IN - inclusive start and end of range */
+};
+
+#define XEN_HVMCTL_PCI_SBDF(s, b, d, f) \
+	((((s) & 0xffff) << 16) | \
+	 (((b) & 0xff) << 8) | \
+	 (((d) & 0x1f) << 3) | \
+	 ((f) & 0x07))
+
+/*
+ * XEN_HVMCTL_destroy_ioreq_server: Destroy the IOREQ Server <id> servicing
+ *                                  domain <domid>.
+ *
+ * Any registered I/O ranges will be automatically deregistered.
+ */
+struct xen_hvm_destroy_ioreq_server {
+    ioservid_t id; /* IN - server id */
+};
+
+/*
+ * XEN_HVMCTL_set_ioreq_server_state: Enable or disable the IOREQ Server <id>
+ *                                    servicing domain <domid>.
+ *
+ * The IOREQ Server will not be passed any emulation requests until it is in
+ * the enabled state.
+ * Note that the contents of the ioreq_pfn and bufioreq_fn (see
+ * XEN_HVMCTL_get_ioreq_server_info) are not meaningful until the IOREQ Server
+ * is in the enabled state.
+ */
+struct xen_hvm_set_ioreq_server_state {
+    ioservid_t id;   /* IN - server id */
+    uint8_t enabled; /* IN - enabled? */
+    uint8_t rsvd;    /* IN - must be zero */
+};
+
 struct xen_hvmctl {
     uint16_t interface_version;    /* XEN_HVMCTL_INTERFACE_VERSION */
     domid_t domain;
@@ -142,6 +268,12 @@  struct xen_hvmctl {
 #define XEN_HVMCTL_set_mem_type                  6
 #define XEN_HVMCTL_inject_trap                   7
 #define XEN_HVMCTL_inject_msi                    8
+#define XEN_HVMCTL_create_ioreq_server           9
+#define XEN_HVMCTL_get_ioreq_server_info        10
+#define XEN_HVMCTL_map_io_range_to_ioreq_server 11
+#define XEN_HVMCTL_unmap_io_range_from_ioreq_server 12
+#define XEN_HVMCTL_destroy_ioreq_server         13
+#define XEN_HVMCTL_set_ioreq_server_state       14
     uint16_t opaque;               /* Must be zero on initial invocation. */
     union {
         struct xen_hvm_set_pci_intx_level set_pci_intx_level;
@@ -152,6 +284,12 @@  struct xen_hvmctl {
         struct xen_hvm_set_mem_type set_mem_type;
         struct xen_hvm_inject_trap inject_trap;
         struct xen_hvm_inject_msi inject_msi;
+        struct xen_hvm_create_ioreq_server create_ioreq_server;
+        struct xen_hvm_get_ioreq_server_info get_ioreq_server_info;
+        struct xen_hvm_io_range map_io_range_to_ioreq_server;
+        struct xen_hvm_io_range unmap_io_range_from_ioreq_server;
+        struct xen_hvm_destroy_ioreq_server destroy_ioreq_server;
+        struct xen_hvm_set_ioreq_server_state set_ioreq_server_state;
         uint8_t pad[120];
     } u;
 };
--- a/xen/include/public/hvm/hvm_op.h
+++ b/xen/include/public/hvm/hvm_op.h
@@ -25,7 +25,6 @@ 
 
 #include "../xen.h"
 #include "../trace.h"
-#include "../event_channel.h"
 
 /* Get/set subcommands: extra argument == pointer to xen_hvm_param struct. */
 #define HVMOP_set_param           0
@@ -137,152 +136,6 @@  struct xen_hvm_get_mem_type {
 typedef struct xen_hvm_get_mem_type xen_hvm_get_mem_type_t;
 DEFINE_XEN_GUEST_HANDLE(xen_hvm_get_mem_type_t);
 
-/* Following tools-only interfaces may change in future. */
-#if defined(__XEN__) || defined(__XEN_TOOLS__)
-
-/*
- * IOREQ Servers
- *
- * The interface between an I/O emulator an Xen is called an IOREQ Server.
- * A domain supports a single 'legacy' IOREQ Server which is instantiated if
- * parameter...
- *
- * HVM_PARAM_IOREQ_PFN is read (to get the gmfn containing the synchronous
- * ioreq structures), or...
- * HVM_PARAM_BUFIOREQ_PFN is read (to get the gmfn containing the buffered
- * ioreq ring), or...
- * HVM_PARAM_BUFIOREQ_EVTCHN is read (to get the event channel that Xen uses
- * to request buffered I/O emulation).
- * 
- * The following hypercalls facilitate the creation of IOREQ Servers for
- * 'secondary' emulators which are invoked to implement port I/O, memory, or
- * PCI config space ranges which they explicitly register.
- */
-
-typedef uint16_t ioservid_t;
-
-/*
- * HVMOP_create_ioreq_server: Instantiate a new IOREQ Server for a secondary
- *                            emulator servicing domain <domid>.
- *
- * The <id> handed back is unique for <domid>. If <handle_bufioreq> is zero
- * the buffered ioreq ring will not be allocated and hence all emulation
- * requestes to this server will be synchronous.
- */
-#define HVMOP_create_ioreq_server 17
-struct xen_hvm_create_ioreq_server {
-    domid_t domid;           /* IN - domain to be serviced */
-#define HVM_IOREQSRV_BUFIOREQ_OFF    0
-#define HVM_IOREQSRV_BUFIOREQ_LEGACY 1
-/*
- * Use this when read_pointer gets updated atomically and
- * the pointer pair gets read atomically:
- */
-#define HVM_IOREQSRV_BUFIOREQ_ATOMIC 2
-    uint8_t handle_bufioreq; /* IN - should server handle buffered ioreqs */
-    ioservid_t id;           /* OUT - server id */
-};
-typedef struct xen_hvm_create_ioreq_server xen_hvm_create_ioreq_server_t;
-DEFINE_XEN_GUEST_HANDLE(xen_hvm_create_ioreq_server_t);
-
-/*
- * HVMOP_get_ioreq_server_info: Get all the information necessary to access
- *                              IOREQ Server <id>. 
- *
- * The emulator needs to map the synchronous ioreq structures and buffered
- * ioreq ring (if it exists) that Xen uses to request emulation. These are
- * hosted in domain <domid>'s gmfns <ioreq_pfn> and <bufioreq_pfn>
- * respectively. In addition, if the IOREQ Server is handling buffered
- * emulation requests, the emulator needs to bind to event channel
- * <bufioreq_port> to listen for them. (The event channels used for
- * synchronous emulation requests are specified in the per-CPU ioreq
- * structures in <ioreq_pfn>).
- * If the IOREQ Server is not handling buffered emulation requests then the
- * values handed back in <bufioreq_pfn> and <bufioreq_port> will both be 0.
- */
-#define HVMOP_get_ioreq_server_info 18
-struct xen_hvm_get_ioreq_server_info {
-    domid_t domid;                 /* IN - domain to be serviced */
-    ioservid_t id;                 /* IN - server id */
-    evtchn_port_t bufioreq_port;   /* OUT - buffered ioreq port */
-    uint64_aligned_t ioreq_pfn;    /* OUT - sync ioreq pfn */
-    uint64_aligned_t bufioreq_pfn; /* OUT - buffered ioreq pfn */
-};
-typedef struct xen_hvm_get_ioreq_server_info xen_hvm_get_ioreq_server_info_t;
-DEFINE_XEN_GUEST_HANDLE(xen_hvm_get_ioreq_server_info_t);
-
-/*
- * HVM_map_io_range_to_ioreq_server: Register an I/O range of domain <domid>
- *                                   for emulation by the client of IOREQ
- *                                   Server <id>
- * HVM_unmap_io_range_from_ioreq_server: Deregister an I/O range of <domid>
- *                                       for emulation by the client of IOREQ
- *                                       Server <id>
- *
- * There are three types of I/O that can be emulated: port I/O, memory accesses
- * and PCI config space accesses. The <type> field denotes which type of range
- * the <start> and <end> (inclusive) fields are specifying.
- * PCI config space ranges are specified by segment/bus/device/function values
- * which should be encoded using the HVMOP_PCI_SBDF helper macro below.
- *
- * NOTE: unless an emulation request falls entirely within a range mapped
- * by a secondary emulator, it will not be passed to that emulator.
- */
-#define HVMOP_map_io_range_to_ioreq_server 19
-#define HVMOP_unmap_io_range_from_ioreq_server 20
-struct xen_hvm_io_range {
-    domid_t domid;               /* IN - domain to be serviced */
-    ioservid_t id;               /* IN - server id */
-    uint32_t type;               /* IN - type of range */
-# define HVMOP_IO_RANGE_PORT   0 /* I/O port range */
-# define HVMOP_IO_RANGE_MEMORY 1 /* MMIO range */
-# define HVMOP_IO_RANGE_PCI    2 /* PCI segment/bus/dev/func range */
-    uint64_aligned_t start, end; /* IN - inclusive start and end of range */
-};
-typedef struct xen_hvm_io_range xen_hvm_io_range_t;
-DEFINE_XEN_GUEST_HANDLE(xen_hvm_io_range_t);
-
-#define HVMOP_PCI_SBDF(s,b,d,f)                 \
-	((((s) & 0xffff) << 16) |                   \
-	 (((b) & 0xff) << 8) |                      \
-	 (((d) & 0x1f) << 3) |                      \
-	 ((f) & 0x07))
-
-/*
- * HVMOP_destroy_ioreq_server: Destroy the IOREQ Server <id> servicing domain
- *                             <domid>.
- *
- * Any registered I/O ranges will be automatically deregistered.
- */
-#define HVMOP_destroy_ioreq_server 21
-struct xen_hvm_destroy_ioreq_server {
-    domid_t domid; /* IN - domain to be serviced */
-    ioservid_t id; /* IN - server id */
-};
-typedef struct xen_hvm_destroy_ioreq_server xen_hvm_destroy_ioreq_server_t;
-DEFINE_XEN_GUEST_HANDLE(xen_hvm_destroy_ioreq_server_t);
-
-/*
- * HVMOP_set_ioreq_server_state: Enable or disable the IOREQ Server <id> servicing
- *                               domain <domid>.
- *
- * The IOREQ Server will not be passed any emulation requests until it is in the
- * enabled state.
- * Note that the contents of the ioreq_pfn and bufioreq_fn (see
- * HVMOP_get_ioreq_server_info) are not meaningful until the IOREQ Server is in
- * the enabled state.
- */
-#define HVMOP_set_ioreq_server_state 22
-struct xen_hvm_set_ioreq_server_state {
-    domid_t domid;   /* IN - domain to be serviced */
-    ioservid_t id;   /* IN - server id */
-    uint8_t enabled; /* IN - enabled? */    
-};
-typedef struct xen_hvm_set_ioreq_server_state xen_hvm_set_ioreq_server_state_t;
-DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_ioreq_server_state_t);
-
-#endif /* defined(__XEN__) || defined(__XEN_TOOLS__) */
-
 #if defined(__i386__) || defined(__x86_64__)
 
 /*
--- a/xen/include/xsm/dummy.h
+++ b/xen/include/xsm/dummy.h
@@ -609,12 +609,6 @@  static XSM_INLINE int xsm_shadow_control
     return xsm_default_action(action, current->domain, d);
 }
 
-static XSM_INLINE int xsm_hvm_ioreq_server(XSM_DEFAULT_ARG struct domain *d, int op)
-{
-    XSM_ASSERT_ACTION(XSM_DM_PRIV);
-    return xsm_default_action(action, current->domain, d);
-}
-
 static XSM_INLINE int xsm_mem_sharing_op(XSM_DEFAULT_ARG struct domain *d, struct domain *cd, int op)
 {
     XSM_ASSERT_ACTION(XSM_DM_PRIV);
--- a/xen/include/xsm/xsm.h
+++ b/xen/include/xsm/xsm.h
@@ -174,7 +174,6 @@  struct xsm_operations {
     int (*do_mca) (void);
     int (*shadow_control) (struct domain *d, uint32_t op);
     int (*hvm_set_pci_link_route) (struct domain *d);
-    int (*hvm_ioreq_server) (struct domain *d, int op);
     int (*mem_sharing_op) (struct domain *d, struct domain *cd, int op);
     int (*apic) (struct domain *d, int cmd);
     int (*memtype) (uint32_t access);
@@ -648,11 +647,6 @@  static inline int xsm_hvm_set_pci_link_r
     return xsm_ops->hvm_set_pci_link_route(d);
 }
 
-static inline int xsm_hvm_ioreq_server (xsm_default_t def, struct domain *d, int op)
-{
-    return xsm_ops->hvm_ioreq_server(d, op);
-}
-
 static inline int xsm_mem_sharing_op (xsm_default_t def, struct domain *d, struct domain *cd, int op)
 {
     return xsm_ops->mem_sharing_op(d, cd, op);
--- a/xen/xsm/dummy.c
+++ b/xen/xsm/dummy.c
@@ -145,7 +145,6 @@  void xsm_fixup_ops (struct xsm_operation
 #ifdef CONFIG_X86
     set_to_dummy_if_null(ops, do_mca);
     set_to_dummy_if_null(ops, shadow_control);
-    set_to_dummy_if_null(ops, hvm_ioreq_server);
     set_to_dummy_if_null(ops, mem_sharing_op);
     set_to_dummy_if_null(ops, apic);
     set_to_dummy_if_null(ops, machine_memory_map);
--- a/xen/xsm/flask/hooks.c
+++ b/xen/xsm/flask/hooks.c
@@ -1526,11 +1526,6 @@  static int flask_ioport_mapping(struct d
     return flask_ioport_permission(d, start, end, access);
 }
 
-static int flask_hvm_ioreq_server(struct domain *d, int op)
-{
-    return current_has_perm(d, SECCLASS_HVM, HVM__HVMCTL);
-}
-
 static int flask_mem_sharing_op(struct domain *d, struct domain *cd, int op)
 {
     int rc = current_has_perm(cd, SECCLASS_HVM, HVM__MEM_SHARING);
@@ -1799,7 +1794,6 @@  static struct xsm_operations flask_ops =
 #ifdef CONFIG_X86
     .do_mca = flask_do_mca,
     .shadow_control = flask_shadow_control,
-    .hvm_ioreq_server = flask_hvm_ioreq_server,
     .mem_sharing_op = flask_mem_sharing_op,
     .apic = flask_apic,
     .machine_memory_map = flask_machine_memory_map,