@@ -508,27 +508,11 @@ int xc_hvm_set_pci_link_route(
int xc_hvm_inject_msi(
xc_interface *xch, domid_t dom, uint64_t addr, uint32_t data)
{
- DECLARE_HYPERCALL_BUFFER(struct xen_hvm_inject_msi, arg);
- int rc;
+ DECLARE_HVMCTL(inject_msi, dom,
+ .data = data,
+ .addr = addr);
- arg = xc_hypercall_buffer_alloc(xch, arg, sizeof(*arg));
- if ( arg == NULL )
- {
- PERROR("Could not allocate memory for xc_hvm_inject_msi hypercall");
- return -1;
- }
-
- arg->domid = dom;
- arg->addr = addr;
- arg->data = data;
-
- rc = xencall2(xch->xcall, __HYPERVISOR_hvm_op,
- HVMOP_inject_msi,
- HYPERCALL_BUFFER_AS_ARG(arg));
-
- xc_hypercall_buffer_free(xch, arg);
-
- return rc;
+ return do_hvmctl(xch, &hvmctl);
}
int xc_hvm_track_dirty_vram(
@@ -295,6 +295,10 @@ long do_hvmctl(XEN_GUEST_HANDLE_PARAM(xe
rc = inject_trap(d, &op.u.inject_trap);
break;
+ case XEN_HVMCTL_inject_msi:
+ rc = hvm_inject_msi(d, op.u.inject_msi.addr, op.u.inject_msi.data);
+ break;
+
default:
rc = -EOPNOTSUPP;
break;
@@ -4446,35 +4446,6 @@ static void hvm_s3_resume(struct domain
}
}
-static int hvmop_inject_msi(
- XEN_GUEST_HANDLE_PARAM(xen_hvm_inject_msi_t) uop)
-{
- struct xen_hvm_inject_msi op;
- struct domain *d;
- int rc;
-
- if ( copy_from_guest(&op, uop, 1) )
- return -EFAULT;
-
- rc = rcu_lock_remote_domain_by_id(op.domid, &d);
- if ( rc != 0 )
- return rc;
-
- rc = -EINVAL;
- if ( !is_hvm_domain(d) )
- goto out;
-
- rc = xsm_hvm_inject_msi(XSM_DM_PRIV, d);
- if ( rc )
- goto out;
-
- rc = hvm_inject_msi(d, op.addr, op.data);
-
- out:
- rcu_unlock_domain(d);
- return rc;
-}
-
static int hvmop_flush_tlb_all(void)
{
struct domain *d = current->domain;
@@ -5266,11 +5237,6 @@ long do_hvm_op(unsigned long op, XEN_GUE
guest_handle_cast(arg, xen_hvm_param_t));
break;
- case HVMOP_inject_msi:
- rc = hvmop_inject_msi(
- guest_handle_cast(arg, xen_hvm_inject_msi_t));
- break;
-
case HVMOP_flush_tlbs:
rc = guest_handle_is_null(arg) ? hvmop_flush_tlb_all() : -ENOSYS;
break;
@@ -292,6 +292,9 @@ int hvm_inject_msi(struct domain *d, uin
>> MSI_DATA_TRIGGER_SHIFT;
uint8_t vector = data & MSI_DATA_VECTOR_MASK;
+ if ( !is_hvm_domain(d) )
+ return -EINVAL;
+
if ( !vector )
{
int pirq = ((addr >> 32) & 0xffffff00) | dest;
@@ -120,6 +120,16 @@ struct xen_hvm_inject_trap {
uint64_aligned_t cr2;
};
+/* XEN_HVMCTL_inject_msi */
+/* MSI injection for emulated devices. */
+struct xen_hvm_inject_msi {
+ /* Message data. */
+ uint32_t data;
+ uint32_t rsvd;
+ /* Message address (x86: 0xFEExxxxx). */
+ uint64_t addr;
+};
+
struct xen_hvmctl {
uint16_t interface_version; /* XEN_HVMCTL_INTERFACE_VERSION */
domid_t domain;
@@ -131,6 +141,7 @@ struct xen_hvmctl {
#define XEN_HVMCTL_modified_memory 5
#define XEN_HVMCTL_set_mem_type 6
#define XEN_HVMCTL_inject_trap 7
+#define XEN_HVMCTL_inject_msi 8
uint16_t opaque; /* Must be zero on initial invocation. */
union {
struct xen_hvm_set_pci_intx_level set_pci_intx_level;
@@ -140,6 +151,7 @@ struct xen_hvmctl {
struct xen_hvm_modified_memory modified_memory;
struct xen_hvm_set_mem_type set_mem_type;
struct xen_hvm_inject_trap inject_trap;
+ struct xen_hvm_inject_msi inject_msi;
uint8_t pad[120];
} u;
};
@@ -140,19 +140,6 @@ DEFINE_XEN_GUEST_HANDLE(xen_hvm_get_mem_
/* Following tools-only interfaces may change in future. */
#if defined(__XEN__) || defined(__XEN_TOOLS__)
-/* MSI injection for emulated devices */
-#define HVMOP_inject_msi 16
-struct xen_hvm_inject_msi {
- /* Domain to be injected */
- domid_t domid;
- /* Data -- lower 32 bits */
- uint32_t data;
- /* Address (0xfeexxxxx) */
- uint64_t addr;
-};
-typedef struct xen_hvm_inject_msi xen_hvm_inject_msi_t;
-DEFINE_XEN_GUEST_HANDLE(xen_hvm_inject_msi_t);
-
/*
* IOREQ Servers
*
@@ -609,12 +609,6 @@ static XSM_INLINE int xsm_shadow_control
return xsm_default_action(action, current->domain, d);
}
-static XSM_INLINE int xsm_hvm_inject_msi(XSM_DEFAULT_ARG struct domain *d)
-{
- XSM_ASSERT_ACTION(XSM_DM_PRIV);
- return xsm_default_action(action, current->domain, d);
-}
-
static XSM_INLINE int xsm_hvm_ioreq_server(XSM_DEFAULT_ARG struct domain *d, int op)
{
XSM_ASSERT_ACTION(XSM_DM_PRIV);
@@ -174,7 +174,6 @@ struct xsm_operations {
int (*do_mca) (void);
int (*shadow_control) (struct domain *d, uint32_t op);
int (*hvm_set_pci_link_route) (struct domain *d);
- int (*hvm_inject_msi) (struct domain *d);
int (*hvm_ioreq_server) (struct domain *d, int op);
int (*mem_sharing_op) (struct domain *d, struct domain *cd, int op);
int (*apic) (struct domain *d, int cmd);
@@ -649,11 +648,6 @@ static inline int xsm_hvm_set_pci_link_r
return xsm_ops->hvm_set_pci_link_route(d);
}
-static inline int xsm_hvm_inject_msi (xsm_default_t def, struct domain *d)
-{
- return xsm_ops->hvm_inject_msi(d);
-}
-
static inline int xsm_hvm_ioreq_server (xsm_default_t def, struct domain *d, int op)
{
return xsm_ops->hvm_ioreq_server(d, op);
@@ -145,7 +145,6 @@ void xsm_fixup_ops (struct xsm_operation
#ifdef CONFIG_X86
set_to_dummy_if_null(ops, do_mca);
set_to_dummy_if_null(ops, shadow_control);
- set_to_dummy_if_null(ops, hvm_inject_msi);
set_to_dummy_if_null(ops, hvm_ioreq_server);
set_to_dummy_if_null(ops, mem_sharing_op);
set_to_dummy_if_null(ops, apic);
@@ -1199,6 +1199,9 @@ static int flask_hvm_control(struct doma
case XEN_HVMCTL_track_dirty_vram:
perm = HVM__TRACKDIRTYVRAM;
break;
+ case XEN_HVMCTL_inject_msi:
+ perm = HVM__SEND_IRQ;
+ break;
default:
perm = HVM__HVMCTL;
break;
@@ -1523,11 +1526,6 @@ static int flask_ioport_mapping(struct d
return flask_ioport_permission(d, start, end, access);
}
-static int flask_hvm_inject_msi(struct domain *d)
-{
- return current_has_perm(d, SECCLASS_HVM, HVM__SEND_IRQ);
-}
-
static int flask_hvm_ioreq_server(struct domain *d, int op)
{
return current_has_perm(d, SECCLASS_HVM, HVM__HVMCTL);
@@ -1801,7 +1799,6 @@ static struct xsm_operations flask_ops =
#ifdef CONFIG_X86
.do_mca = flask_do_mca,
.shadow_control = flask_shadow_control,
- .hvm_inject_msi = flask_hvm_inject_msi,
.hvm_ioreq_server = flask_hvm_ioreq_server,
.mem_sharing_op = flask_mem_sharing_op,
.apic = flask_apic,
@@ -281,7 +281,7 @@ class hvm
mem_sharing
# XEN_DOMCTL_audit_p2m
audit_p2m
-# HVMOP_inject_msi
+# XEN_HVMCTL_inject_msi
send_irq
# checked in XENMEM_sharing_op_{share,add_physmap} with:
# source = domain whose memory is being shared