@@ -151,7 +151,7 @@ define(`device_model', `
allow $1 $2_target:domain { getdomaininfo shutdown };
allow $1 $2_target:mmu { map_read map_write adjust physmap target_hack };
- allow $1 $2_target:hvm { getparam setparam hvmctl cacheattr send_irq dm };
+ allow $1 $2_target:hvm { getparam setparam hvmctl cacheattr dm };
')
# make_device_model(priv, dm_dom, hvm_dom)
@@ -1641,8 +1641,8 @@ int xc_hvm_set_mem_type(
* resumes.
*/
int xc_hvm_inject_trap(
- xc_interface *xch, domid_t dom, int vcpu, uint32_t vector,
- uint32_t type, uint32_t error_code, uint32_t insn_len,
+ xc_interface *xch, domid_t dom, int vcpu, uint8_t vector,
+ uint8_t type, uint32_t error_code, uint8_t insn_len,
uint64_t cr2);
/*
@@ -527,29 +527,20 @@ int xc_hvm_set_pci_link_route(
}
int xc_hvm_inject_msi(
- xc_interface *xch, domid_t dom, uint64_t addr, uint32_t data)
+ xc_interface *xch, domid_t dom, uint64_t msi_addr, uint32_t msi_data)
{
- DECLARE_HYPERCALL_BUFFER(struct xen_hvm_inject_msi, arg);
- int rc;
-
- arg = xc_hypercall_buffer_alloc(xch, arg, sizeof(*arg));
- if ( arg == NULL )
- {
- PERROR("Could not allocate memory for xc_hvm_inject_msi hypercall");
- return -1;
- }
+ struct xen_dm_op op;
+ struct xen_dm_op_inject_msi *data;
- arg->domid = dom;
- arg->addr = addr;
- arg->data = data;
+ memset(&op, 0, sizeof(op));
- rc = xencall2(xch->xcall, __HYPERVISOR_hvm_op,
- HVMOP_inject_msi,
- HYPERCALL_BUFFER_AS_ARG(arg));
+ op.op = XEN_DMOP_inject_msi;
+ data = &op.u.inject_msi;
- xc_hypercall_buffer_free(xch, arg);
+ data->addr = msi_addr;
+ data->data = msi_data;
- return rc;
+ return do_dm_op(xch, dom, 1, &op, sizeof(op));
}
int xc_hvm_track_dirty_vram(
@@ -608,35 +599,26 @@ int xc_hvm_set_mem_type(
}
int xc_hvm_inject_trap(
- xc_interface *xch, domid_t dom, int vcpu, uint32_t vector,
- uint32_t type, uint32_t error_code, uint32_t insn_len,
+ xc_interface *xch, domid_t dom, int vcpu, uint8_t vector,
+ uint8_t type, uint32_t error_code, uint8_t insn_len,
uint64_t cr2)
{
- DECLARE_HYPERCALL_BUFFER(struct xen_hvm_inject_trap, arg);
- int rc;
-
- arg = xc_hypercall_buffer_alloc(xch, arg, sizeof(*arg));
- if ( arg == NULL )
- {
- PERROR("Could not allocate memory for xc_hvm_inject_trap hypercall");
- return -1;
- }
+ struct xen_dm_op op;
+ struct xen_dm_op_inject_trap *data;
- arg->domid = dom;
- arg->vcpuid = vcpu;
- arg->vector = vector;
- arg->type = type;
- arg->error_code = error_code;
- arg->insn_len = insn_len;
- arg->cr2 = cr2;
+ memset(&op, 0, sizeof(op));
- rc = xencall2(xch->xcall, __HYPERVISOR_hvm_op,
- HVMOP_inject_trap,
- HYPERCALL_BUFFER_AS_ARG(arg));
+ op.op = XEN_DMOP_inject_trap;
+ data = &op.u.inject_trap;
- xc_hypercall_buffer_free(xch, arg);
+ data->vcpuid = vcpu;
+ data->vector = vector;
+ data->type = type;
+ data->error_code = error_code;
+ data->insn_len = insn_len;
+ data->cr2 = cr2;
- return rc;
+ return do_dm_op(xch, dom, 1, &op, sizeof(op));
}
int xc_livepatch_upload(xc_interface *xch,
@@ -276,6 +276,28 @@ static int set_mem_type(struct domain *d, hvmmem_type_t mem_type,
return rc;
}
+static int inject_trap(struct domain *d, unsigned int vcpuid,
+ uint8_t vector, uint8_t type,
+ uint8_t insn_len, uint32_t error_code,
+ unsigned long cr2)
+{
+ struct vcpu *v;
+
+ if ( vcpuid >= d->max_vcpus || !(v = d->vcpu[vcpuid]) )
+ return -EINVAL;
+
+ if ( v->arch.hvm_vcpu.inject_trap.vector != -1 )
+ return -EBUSY;
+
+ v->arch.hvm_vcpu.inject_trap.type = type;
+ v->arch.hvm_vcpu.inject_trap.insn_len = insn_len;
+ v->arch.hvm_vcpu.inject_trap.error_code = error_code;
+ v->arch.hvm_vcpu.inject_trap.cr2 = cr2;
+ v->arch.hvm_vcpu.inject_trap.vector = vector;
+
+ return 0;
+}
+
long do_dm_op(domid_t domid,
unsigned int nr_bufs,
XEN_GUEST_HANDLE_PARAM(xen_dm_op_buf_t) bufs)
@@ -461,6 +483,33 @@ long do_dm_op(domid_t domid,
break;
}
+ case XEN_DMOP_inject_trap:
+ {
+ const struct xen_dm_op_inject_trap *data =
+ &op.u.inject_trap;
+
+ rc = -EINVAL;
+ if ( data->pad0 || data->pad1 )
+ break;
+
+ rc = inject_trap(d, data->vcpuid, data->vector,
+ data->type, data->insn_len,
+ data->error_code, data->cr2);
+ break;
+ }
+ case XEN_DMOP_inject_msi:
+ {
+ const struct xen_dm_op_inject_msi *data =
+ &op.u.inject_msi;
+
+ rc = -EINVAL;
+ if ( data->pad )
+ break;
+
+ rc = hvm_inject_msi(d, data->addr, data->data);
+ break;
+ }
+
default:
rc = -EOPNOTSUPP;
break;
@@ -4570,35 +4570,6 @@ static void hvm_s3_resume(struct domain *d)
}
}
-static int hvmop_inject_msi(
- XEN_GUEST_HANDLE_PARAM(xen_hvm_inject_msi_t) uop)
-{
- struct xen_hvm_inject_msi op;
- struct domain *d;
- int rc;
-
- if ( copy_from_guest(&op, uop, 1) )
- return -EFAULT;
-
- rc = rcu_lock_remote_domain_by_id(op.domid, &d);
- if ( rc != 0 )
- return rc;
-
- rc = -EINVAL;
- if ( !is_hvm_domain(d) )
- goto out;
-
- rc = xsm_hvm_inject_msi(XSM_DM_PRIV, d);
- if ( rc )
- goto out;
-
- rc = hvm_inject_msi(d, op.addr, op.data);
-
- out:
- rcu_unlock_domain(d);
- return rc;
-}
-
static int hvmop_flush_tlb_all(void)
{
struct domain *d = current->domain;
@@ -5249,11 +5220,6 @@ long do_hvm_op(unsigned long op, XEN_GUEST_HANDLE_PARAM(void) arg)
guest_handle_cast(arg, xen_hvm_param_t));
break;
- case HVMOP_inject_msi:
- rc = hvmop_inject_msi(
- guest_handle_cast(arg, xen_hvm_inject_msi_t));
- break;
-
case HVMOP_flush_tlbs:
rc = guest_handle_is_null(arg) ? hvmop_flush_tlb_all() : -EINVAL;
break;
@@ -5310,48 +5276,6 @@ long do_hvm_op(unsigned long op, XEN_GUEST_HANDLE_PARAM(void) arg)
break;
}
- case HVMOP_inject_trap:
- {
- xen_hvm_inject_trap_t tr;
- struct domain *d;
- struct vcpu *v;
-
- if ( copy_from_guest(&tr, arg, 1 ) )
- return -EFAULT;
-
- rc = rcu_lock_remote_domain_by_id(tr.domid, &d);
- if ( rc != 0 )
- return rc;
-
- rc = -EINVAL;
- if ( !is_hvm_domain(d) )
- goto injtrap_fail;
-
- rc = xsm_hvm_control(XSM_DM_PRIV, d, op);
- if ( rc )
- goto injtrap_fail;
-
- rc = -ENOENT;
- if ( tr.vcpuid >= d->max_vcpus || (v = d->vcpu[tr.vcpuid]) == NULL )
- goto injtrap_fail;
-
- if ( v->arch.hvm_vcpu.inject_trap.vector != -1 )
- rc = -EBUSY;
- else
- {
- v->arch.hvm_vcpu.inject_trap.vector = tr.vector;
- v->arch.hvm_vcpu.inject_trap.type = tr.type;
- v->arch.hvm_vcpu.inject_trap.error_code = tr.error_code;
- v->arch.hvm_vcpu.inject_trap.insn_len = tr.insn_len;
- v->arch.hvm_vcpu.inject_trap.cr2 = tr.cr2;
- rc = 0;
- }
-
- injtrap_fail:
- rcu_unlock_domain(d);
- break;
- }
-
case HVMOP_guest_request_vm_event:
if ( guest_handle_is_null(arg) )
monitor_guest_request();
@@ -269,6 +269,52 @@ struct xen_dm_op_set_mem_type {
uint64_aligned_t first_pfn;
};
+/*
+ * XEN_DMOP_inject_trap: Inject a trap into a VCPU, which will get taken up
+ * when it is next scheduled.
+ *
+ * Note that the caller should know enough of the state of the CPU before
+ * injecting, to know what the effect of injecting the trap will be.
+ */
+#define XEN_DMOP_inject_trap 13
+
+struct xen_dm_op_inject_trap {
+ /* IN - index of vCPU */
+ uint32_t vcpuid;
+ /* IN - interrupt vector */
+ uint8_t vector;
+ /* IN - trap type (DMOP_TRAP_* ) */
+ uint8_t type;
+/* NB. This enumeration precisely matches hvm.h:X86_EVENTTYPE_* */
+# define XEN_DMOP_TRAP_ext_int 0 /* external interrupt */
+# define XEN_DMOP_TRAP_nmi 2 /* nmi */
+# define XEN_DMOP_TRAP_hw_exc 3 /* hardware exception */
+# define XEN_DMOP_TRAP_sw_int 4 /* software interrupt (CD nn) */
+# define XEN_DMOP_TRAP_pri_sw_exc 5 /* ICEBP (F1) */
+# define XEN_DMOP_TRAP_sw_exc 6 /* INT3 (CC), INTO (CE) */
+ /* IN - enstruction length */
+ uint8_t insn_len;
+ uint8_t pad0;
+ /* IN - error code (or ~0 to skip) */
+ uint32_t error_code;
+ uint32_t pad1;
+ /* IN - CR2 for page faults */
+ uint64_aligned_t cr2;
+};
+
+/*
+ * XEN_DMOP_inject_msi: Inject an MSI for an emulated device.
+ */
+#define XEN_DMOP_inject_msi 14
+
+struct xen_dm_op_inject_msi {
+ /* IN - MSI data (lower 32 bits) */
+ uint32_t data;
+ uint32_t pad;
+ /* IN - MSI address (0xfeexxxxx) */
+ uint64_aligned_t addr;
+};
+
struct xen_dm_op {
uint32_t op;
uint32_t pad;
@@ -285,6 +331,8 @@ struct xen_dm_op {
struct xen_dm_op_set_pci_link_route set_pci_link_route;
struct xen_dm_op_modified_memory modified_memory;
struct xen_dm_op_set_mem_type set_mem_type;
+ struct xen_dm_op_inject_trap inject_trap;
+ struct xen_dm_op_inject_msi inject_msi;
} u;
};
@@ -133,38 +133,6 @@ DEFINE_XEN_GUEST_HANDLE(xen_hvm_xentrace_t);
/* Deprecated by XENMEM_access_op_get_access */
#define HVMOP_get_mem_access 13
-#define HVMOP_inject_trap 14
-/* Inject a trap into a VCPU, which will get taken up on the next
- * scheduling of it. Note that the caller should know enough of the
- * state of the CPU before injecting, to know what the effect of
- * injecting the trap will be.
- */
-struct xen_hvm_inject_trap {
- /* Domain to be queried. */
- domid_t domid;
- /* VCPU */
- uint32_t vcpuid;
- /* Vector number */
- uint32_t vector;
- /* Trap type (HVMOP_TRAP_*) */
- uint32_t type;
-/* NB. This enumeration precisely matches hvm.h:X86_EVENTTYPE_* */
-# define HVMOP_TRAP_ext_int 0 /* external interrupt */
-# define HVMOP_TRAP_nmi 2 /* nmi */
-# define HVMOP_TRAP_hw_exc 3 /* hardware exception */
-# define HVMOP_TRAP_sw_int 4 /* software interrupt (CD nn) */
-# define HVMOP_TRAP_pri_sw_exc 5 /* ICEBP (F1) */
-# define HVMOP_TRAP_sw_exc 6 /* INT3 (CC), INTO (CE) */
- /* Error code, or ~0u to skip */
- uint32_t error_code;
- /* Intruction length */
- uint32_t insn_len;
- /* CR2 for page faults */
- uint64_aligned_t cr2;
-};
-typedef struct xen_hvm_inject_trap xen_hvm_inject_trap_t;
-DEFINE_XEN_GUEST_HANDLE(xen_hvm_inject_trap_t);
-
#endif /* defined(__XEN__) || defined(__XEN_TOOLS__) */
#define HVMOP_get_mem_type 15
@@ -184,19 +152,6 @@ DEFINE_XEN_GUEST_HANDLE(xen_hvm_get_mem_type_t);
/* Following tools-only interfaces may change in future. */
#if defined(__XEN__) || defined(__XEN_TOOLS__)
-/* MSI injection for emulated devices */
-#define HVMOP_inject_msi 16
-struct xen_hvm_inject_msi {
- /* Domain to be injected */
- domid_t domid;
- /* Data -- lower 32 bits */
- uint32_t data;
- /* Address (0xfeexxxxx) */
- uint64_t addr;
-};
-typedef struct xen_hvm_inject_msi xen_hvm_inject_msi_t;
-DEFINE_XEN_GUEST_HANDLE(xen_hvm_inject_msi_t);
-
/*
* Definitions relating to DMOP_create_ioreq_server. (Defined here for
* backwards compatibility).
@@ -610,12 +610,6 @@ static XSM_INLINE int xsm_shadow_control(XSM_DEFAULT_ARG struct domain *d, uint3
return xsm_default_action(action, current->domain, d);
}
-static XSM_INLINE int xsm_hvm_inject_msi(XSM_DEFAULT_ARG struct domain *d)
-{
- XSM_ASSERT_ACTION(XSM_DM_PRIV);
- return xsm_default_action(action, current->domain, d);
-}
-
static XSM_INLINE int xsm_mem_sharing_op(XSM_DEFAULT_ARG struct domain *d, struct domain *cd, int op)
{
XSM_ASSERT_ACTION(XSM_DM_PRIV);
@@ -162,7 +162,6 @@ struct xsm_operations {
#ifdef CONFIG_X86
int (*do_mca) (void);
int (*shadow_control) (struct domain *d, uint32_t op);
- int (*hvm_inject_msi) (struct domain *d);
int (*mem_sharing_op) (struct domain *d, struct domain *cd, int op);
int (*apic) (struct domain *d, int cmd);
int (*memtype) (uint32_t access);
@@ -632,11 +631,6 @@ static inline int xsm_shadow_control (xsm_default_t def, struct domain *d, uint3
return xsm_ops->shadow_control(d, op);
}
-static inline int xsm_hvm_inject_msi (xsm_default_t def, struct domain *d)
-{
- return xsm_ops->hvm_inject_msi(d);
-}
-
static inline int xsm_mem_sharing_op (xsm_default_t def, struct domain *d, struct domain *cd, int op)
{
return xsm_ops->mem_sharing_op(d, cd, op);
@@ -145,7 +145,6 @@ void __init xsm_fixup_ops (struct xsm_operations *ops)
#ifdef CONFIG_X86
set_to_dummy_if_null(ops, do_mca);
set_to_dummy_if_null(ops, shadow_control);
- set_to_dummy_if_null(ops, hvm_inject_msi);
set_to_dummy_if_null(ops, mem_sharing_op);
set_to_dummy_if_null(ops, apic);
set_to_dummy_if_null(ops, machine_memory_map);
@@ -1502,11 +1502,6 @@ static int flask_ioport_mapping(struct domain *d, uint32_t start, uint32_t end,
return flask_ioport_permission(d, start, end, access);
}
-static int flask_hvm_inject_msi(struct domain *d)
-{
- return current_has_perm(d, SECCLASS_HVM, HVM__SEND_IRQ);
-}
-
static int flask_mem_sharing_op(struct domain *d, struct domain *cd, int op)
{
int rc = current_has_perm(cd, SECCLASS_HVM, HVM__MEM_SHARING);
@@ -1784,7 +1779,6 @@ static struct xsm_operations flask_ops = {
.hvm_set_pci_intx_level = flask_hvm_set_pci_intx_level,
.hvm_set_isa_irq_level = flask_hvm_set_isa_irq_level,
.hvm_set_pci_link_route = flask_hvm_set_pci_link_route,
- .hvm_inject_msi = flask_hvm_inject_msi,
.mem_sharing_op = flask_mem_sharing_op,
.apic = flask_apic,
.machine_memory_map = flask_machine_memory_map,
@@ -263,8 +263,7 @@ class hvm
# XEN_DOMCTL_pin_mem_cacheattr
cacheattr
# HVMOP_get_mem_type,
-# HVMOP_set_mem_access, HVMOP_get_mem_access, HVMOP_pagetable_dying,
-# HVMOP_inject_trap
+# HVMOP_set_mem_access, HVMOP_get_mem_access, HVMOP_pagetable_dying
hvmctl
# XEN_DOMCTL_mem_sharing_op and XENMEM_sharing_op_{share,add_physmap} with:
# source = the domain making the hypercall
@@ -272,8 +271,6 @@ class hvm
mem_sharing
# XEN_DOMCTL_audit_p2m
audit_p2m
-# HVMOP_inject_msi
- send_irq
# checked in XENMEM_sharing_op_{share,add_physmap} with:
# source = domain whose memory is being shared
# target = client domain