@@ -57,8 +57,8 @@ define(`create_domain_common', `
allow $1 $2:shadow enable;
allow $1 $2:mmu { map_read map_write adjust memorymap physmap pinpage mmuext_op updatemp };
allow $1 $2:grant setup;
- allow $1 $2:hvm { cacheattr getparam hvmctl irqlevel pciroute sethvmc
- setparam pcilevel nested altp2mhvm altp2mhvm_op };
+ allow $1 $2:hvm { cacheattr getparam hvmctl sethvmc
+ setparam nested altp2mhvm altp2mhvm_op };
')
# create_domain(priv, target)
@@ -93,7 +93,7 @@ define(`manage_domain', `
# (inbound migration is the same as domain creation)
define(`migrate_domain_out', `
allow $1 domxen_t:mmu map_read;
- allow $1 $2:hvm { gethvmc getparam irqlevel };
+ allow $1 $2:hvm { gethvmc getparam };
allow $1 $2:mmu { stat pageinfo map_read };
allow $1 $2:domain { getaddrsize getvcpucontext pause destroy };
allow $1 $2:domain2 gettsc;
@@ -151,7 +151,7 @@ define(`device_model', `
allow $1 $2_target:domain { getdomaininfo shutdown };
allow $1 $2_target:mmu { map_read map_write adjust physmap target_hack };
- allow $1 $2_target:hvm { getparam setparam hvmctl irqlevel pciroute pcilevel cacheattr send_irq dm };
+ allow $1 $2_target:hvm { getparam setparam hvmctl cacheattr send_irq dm };
')
# make_device_model(priv, dm_dom, hvm_dom)
@@ -1594,7 +1594,7 @@ int xc_physdev_unmap_pirq(xc_interface *xch,
int xc_hvm_set_pci_intx_level(
xc_interface *xch, domid_t dom,
- uint8_t domain, uint8_t bus, uint8_t device, uint8_t intx,
+ uint16_t domain, uint8_t bus, uint8_t device, uint8_t intx,
unsigned int level);
int xc_hvm_set_isa_irq_level(
xc_interface *xch, domid_t dom,
@@ -470,33 +470,24 @@ int xc_getcpuinfo(xc_interface *xch, int max_cpus,
int xc_hvm_set_pci_intx_level(
xc_interface *xch, domid_t dom,
- uint8_t domain, uint8_t bus, uint8_t device, uint8_t intx,
+ uint16_t domain, uint8_t bus, uint8_t device, uint8_t intx,
unsigned int level)
{
- DECLARE_HYPERCALL_BUFFER(struct xen_hvm_set_pci_intx_level, arg);
- int rc;
-
- arg = xc_hypercall_buffer_alloc(xch, arg, sizeof(*arg));
- if ( arg == NULL )
- {
- PERROR("Could not allocate memory for xc_hvm_set_pci_intx_level hypercall");
- return -1;
- }
+ struct xen_dm_op op;
+ struct xen_dm_op_set_pci_intx_level *data;
- arg->domid = dom;
- arg->domain = domain;
- arg->bus = bus;
- arg->device = device;
- arg->intx = intx;
- arg->level = level;
+ memset(&op, 0, sizeof(op));
- rc = xencall2(xch->xcall, __HYPERVISOR_hvm_op,
- HVMOP_set_pci_intx_level,
- HYPERCALL_BUFFER_AS_ARG(arg));
+ op.op = XEN_DMOP_set_pci_intx_level;
+ data = &op.u.set_pci_intx_level;
- xc_hypercall_buffer_free(xch, arg);
+ data->domain = domain;
+ data->bus = bus;
+ data->device = device;
+ data->intx = intx;
+ data->level = level;
- return rc;
+ return do_dm_op(xch, dom, 1, &op, sizeof(op));
}
int xc_hvm_set_isa_irq_level(
@@ -504,53 +495,35 @@ int xc_hvm_set_isa_irq_level(
uint8_t isa_irq,
unsigned int level)
{
- DECLARE_HYPERCALL_BUFFER(struct xen_hvm_set_isa_irq_level, arg);
- int rc;
-
- arg = xc_hypercall_buffer_alloc(xch, arg, sizeof(*arg));
- if ( arg == NULL )
- {
- PERROR("Could not allocate memory for xc_hvm_set_isa_irq_level hypercall");
- return -1;
- }
+ struct xen_dm_op op;
+ struct xen_dm_op_set_isa_irq_level *data;
- arg->domid = dom;
- arg->isa_irq = isa_irq;
- arg->level = level;
+ memset(&op, 0, sizeof(op));
- rc = xencall2(xch->xcall, __HYPERVISOR_hvm_op,
- HVMOP_set_isa_irq_level,
- HYPERCALL_BUFFER_AS_ARG(arg));
+ op.op = XEN_DMOP_set_isa_irq_level;
+ data = &op.u.set_isa_irq_level;
- xc_hypercall_buffer_free(xch, arg);
+ data->isa_irq = isa_irq;
+ data->level = level;
- return rc;
+ return do_dm_op(xch, dom, 1, &op, sizeof(op));
}
int xc_hvm_set_pci_link_route(
xc_interface *xch, domid_t dom, uint8_t link, uint8_t isa_irq)
{
- DECLARE_HYPERCALL_BUFFER(struct xen_hvm_set_pci_link_route, arg);
- int rc;
-
- arg = xc_hypercall_buffer_alloc(xch, arg, sizeof(*arg));
- if ( arg == NULL )
- {
- PERROR("Could not allocate memory for xc_hvm_set_pci_link_route hypercall");
- return -1;
- }
+ struct xen_dm_op op;
+ struct xen_dm_op_set_pci_link_route *data;
- arg->domid = dom;
- arg->link = link;
- arg->isa_irq = isa_irq;
+ memset(&op, 0, sizeof(op));
- rc = xencall2(xch->xcall, __HYPERVISOR_hvm_op,
- HVMOP_set_pci_link_route,
- HYPERCALL_BUFFER_AS_ARG(arg));
+ op.op = XEN_DMOP_set_pci_link_route;
+ data = &op.u.set_pci_link_route;
- xc_hypercall_buffer_free(xch, arg);
+ data->link = link;
+ data->isa_irq = isa_irq;
- return rc;
+ return do_dm_op(xch, dom, 1, &op, sizeof(op));
}
int xc_hvm_inject_msi(
@@ -99,6 +99,49 @@ static int track_dirty_vram(struct domain *d,
hap_track_dirty_vram(d, first_pfn, nr, buf.h);
}
+static int set_pci_intx_level(struct domain *d, uint16_t domain,
+ uint8_t bus, uint8_t device,
+ uint8_t intx, uint8_t level)
+{
+ if ( domain != 0 || bus != 0 || device > 0x1f || intx > 3 )
+ return -EINVAL;
+
+ switch ( level )
+ {
+ case 0:
+ hvm_pci_intx_deassert(d, device, intx);
+ break;
+ case 1:
+ hvm_pci_intx_assert(d, device, intx);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int set_isa_irq_level(struct domain *d, uint8_t isa_irq,
+ uint8_t level)
+{
+ if ( isa_irq > 15 )
+ return -EINVAL;
+
+ switch ( level )
+ {
+ case 0:
+ hvm_isa_irq_deassert(d, isa_irq);
+ break;
+ case 1:
+ hvm_isa_irq_assert(d, isa_irq);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
long do_dm_op(domid_t domid,
unsigned int nr_bufs,
XEN_GUEST_HANDLE_PARAM(xen_dm_op_buf_t) bufs)
@@ -227,6 +270,47 @@ long do_dm_op(domid_t domid,
break;
}
+ case XEN_DMOP_set_pci_intx_level:
+ {
+ const struct xen_dm_op_set_pci_intx_level *data =
+ &op.u.set_pci_intx_level;
+
+ rc = -EINVAL;
+ if ( data->pad )
+ break;
+
+ rc = set_pci_intx_level(d, data->domain, data->bus,
+ data->device, data->intx,
+ data->level);
+ break;
+ }
+
+ case XEN_DMOP_set_isa_irq_level:
+ {
+ const struct xen_dm_op_set_isa_irq_level *data =
+ &op.u.set_isa_irq_level;
+
+ rc = -EINVAL;
+ if ( data->pad )
+ break;
+
+ rc = set_isa_irq_level(d, data->isa_irq, data->level);
+ break;
+ }
+
+ case XEN_DMOP_set_pci_link_route:
+ {
+ const struct xen_dm_op_set_pci_link_route *data =
+ &op.u.set_pci_link_route;
+
+ rc = -EINVAL;
+ if ( data->pad )
+ break;
+
+ rc = hvm_set_pci_link_route(d, data->link, data->isa_irq);
+ break;
+ }
+
default:
rc = -EOPNOTSUPP;
break;
@@ -4472,50 +4472,6 @@ void hvm_hypercall_page_initialise(struct domain *d,
hvm_funcs.init_hypercall_page(d, hypercall_page);
}
-static int hvmop_set_pci_intx_level(
- XEN_GUEST_HANDLE_PARAM(xen_hvm_set_pci_intx_level_t) uop)
-{
- struct xen_hvm_set_pci_intx_level op;
- struct domain *d;
- int rc;
-
- if ( copy_from_guest(&op, uop, 1) )
- return -EFAULT;
-
- if ( (op.domain > 0) || (op.bus > 0) || (op.device > 31) || (op.intx > 3) )
- return -EINVAL;
-
- rc = rcu_lock_remote_domain_by_id(op.domid, &d);
- if ( rc != 0 )
- return rc;
-
- rc = -EINVAL;
- if ( !is_hvm_domain(d) )
- goto out;
-
- rc = xsm_hvm_set_pci_intx_level(XSM_DM_PRIV, d);
- if ( rc )
- goto out;
-
- rc = 0;
- switch ( op.level )
- {
- case 0:
- hvm_pci_intx_deassert(d, op.device, op.intx);
- break;
- case 1:
- hvm_pci_intx_assert(d, op.device, op.intx);
- break;
- default:
- rc = -EINVAL;
- break;
- }
-
- out:
- rcu_unlock_domain(d);
- return rc;
-}
-
void hvm_vcpu_reset_state(struct vcpu *v, uint16_t cs, uint16_t ip)
{
struct domain *d = v->domain;
@@ -4659,83 +4615,6 @@ static void hvm_s3_resume(struct domain *d)
}
}
-static int hvmop_set_isa_irq_level(
- XEN_GUEST_HANDLE_PARAM(xen_hvm_set_isa_irq_level_t) uop)
-{
- struct xen_hvm_set_isa_irq_level op;
- struct domain *d;
- int rc;
-
- if ( copy_from_guest(&op, uop, 1) )
- return -EFAULT;
-
- if ( op.isa_irq > 15 )
- return -EINVAL;
-
- rc = rcu_lock_remote_domain_by_id(op.domid, &d);
- if ( rc != 0 )
- return rc;
-
- rc = -EINVAL;
- if ( !is_hvm_domain(d) )
- goto out;
-
- rc = xsm_hvm_set_isa_irq_level(XSM_DM_PRIV, d);
- if ( rc )
- goto out;
-
- rc = 0;
- switch ( op.level )
- {
- case 0:
- hvm_isa_irq_deassert(d, op.isa_irq);
- break;
- case 1:
- hvm_isa_irq_assert(d, op.isa_irq);
- break;
- default:
- rc = -EINVAL;
- break;
- }
-
- out:
- rcu_unlock_domain(d);
- return rc;
-}
-
-static int hvmop_set_pci_link_route(
- XEN_GUEST_HANDLE_PARAM(xen_hvm_set_pci_link_route_t) uop)
-{
- struct xen_hvm_set_pci_link_route op;
- struct domain *d;
- int rc;
-
- if ( copy_from_guest(&op, uop, 1) )
- return -EFAULT;
-
- if ( (op.link > 3) || (op.isa_irq > 15) )
- return -EINVAL;
-
- rc = rcu_lock_remote_domain_by_id(op.domid, &d);
- if ( rc != 0 )
- return rc;
-
- rc = -EINVAL;
- if ( !is_hvm_domain(d) )
- goto out;
-
- rc = xsm_hvm_set_pci_link_route(XSM_DM_PRIV, d);
- if ( rc )
- goto out;
-
- rc = 0;
- hvm_set_pci_link_route(d, op.link, op.isa_irq);
-
- out:
- rcu_unlock_domain(d);
- return rc;
-}
-
static int hvmop_inject_msi(
XEN_GUEST_HANDLE_PARAM(xen_hvm_inject_msi_t) uop)
{
@@ -5530,26 +5409,11 @@ long do_hvm_op(unsigned long op, XEN_GUEST_HANDLE_PARAM(void) arg)
guest_handle_cast(arg, xen_hvm_param_t));
break;
- case HVMOP_set_pci_intx_level:
- rc = hvmop_set_pci_intx_level(
- guest_handle_cast(arg, xen_hvm_set_pci_intx_level_t));
- break;
-
- case HVMOP_set_isa_irq_level:
- rc = hvmop_set_isa_irq_level(
- guest_handle_cast(arg, xen_hvm_set_isa_irq_level_t));
- break;
-
case HVMOP_inject_msi:
rc = hvmop_inject_msi(
guest_handle_cast(arg, xen_hvm_inject_msi_t));
break;
- case HVMOP_set_pci_link_route:
- rc = hvmop_set_pci_link_route(
- guest_handle_cast(arg, xen_hvm_set_pci_link_route_t));
- break;
-
case HVMOP_flush_tlbs:
rc = guest_handle_is_null(arg) ? hvmop_flush_tlb_all() : -EINVAL;
break;
@@ -229,13 +229,14 @@ void hvm_assert_evtchn_irq(struct vcpu *v)
hvm_set_callback_irq_level(v);
}
-void hvm_set_pci_link_route(struct domain *d, u8 link, u8 isa_irq)
+int hvm_set_pci_link_route(struct domain *d, u8 link, u8 isa_irq)
{
struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq;
u8 old_isa_irq;
int i;
- ASSERT((link <= 3) && (isa_irq <= 15));
+ if ( (link > 3) || (isa_irq > 15) )
+ return -EINVAL;
spin_lock(&d->arch.hvm_domain.irq_lock);
@@ -273,6 +274,8 @@ void hvm_set_pci_link_route(struct domain *d, u8 link, u8 isa_irq)
dprintk(XENLOG_G_INFO, "Dom%u PCI link %u changed %u -> %u\n",
d->domain_id, link, old_isa_irq, isa_irq);
+
+ return 0;
}
int hvm_inject_msi(struct domain *d, uint64_t addr, uint32_t data)
@@ -191,6 +191,49 @@ struct xen_dm_op_track_dirty_vram {
uint64_aligned_t first_pfn;
};
+/*
+ * XEN_DMOP_set_pci_intx_level: Set the logical level of one of a domain's
+ * PCI INTx pins.
+ */
+#define XEN_DMOP_set_pci_intx_level 8
+
+struct xen_dm_op_set_pci_intx_level {
+ /* IN - PCI INTx identification (domain:bus:device:intx) */
+ uint16_t domain;
+ uint8_t bus, device, intx;
+ /* IN - Level: 0 -> deasserted, 1 -> asserted */
+ uint8_t level;
+ uint16_t pad;
+};
+
+/*
+ * XEN_DMOP_set_isa_irq_level: Set the logical level of a one of a domain's
+ * ISA IRQ lines.
+ */
+#define XEN_DMOP_set_isa_irq_level 9
+
+struct xen_dm_op_set_isa_irq_level {
+ /* IN - ISA IRQ (0-15) */
+ uint8_t isa_irq;
+ /* IN - Level: 0 -> deasserted, 1 -> asserted */
+ uint8_t level;
+ uint16_t pad;
+};
+
+/*
+ * XEN_DMOP_set_pci_link_route: Map a PCI INTx line to an IRQ line.
+ */
+#define XEN_DMOP_set_pci_link_route 10
+
+struct xen_dm_op_set_pci_link_route {
+ /* PCI INTx line (0-3) */
+ uint8_t link;
+ /* ISA IRQ (1-15) or 0 -> disable link */
+ uint8_t isa_irq;
+ uint16_t pad;
+};
+
+
struct xen_dm_op {
uint32_t op;
uint32_t pad;
@@ -202,6 +245,9 @@ struct xen_dm_op {
struct xen_dm_op_set_ioreq_server_state set_ioreq_server_state;
struct xen_dm_op_destroy_ioreq_server destroy_ioreq_server;
struct xen_dm_op_track_dirty_vram track_dirty_vram;
+ struct xen_dm_op_set_pci_intx_level set_pci_intx_level;
+ struct xen_dm_op_set_isa_irq_level set_isa_irq_level;
+ struct xen_dm_op_set_pci_link_route set_pci_link_route;
} u;
};
@@ -38,6 +38,8 @@ struct xen_hvm_param {
typedef struct xen_hvm_param xen_hvm_param_t;
DEFINE_XEN_GUEST_HANDLE(xen_hvm_param_t);
+#if __XEN_INTERFACE_VERSION__ < 0x00040900
+
/* Set the logical level of one of a domain's PCI INTx wires. */
#define HVMOP_set_pci_intx_level 2
struct xen_hvm_set_pci_intx_level {
@@ -76,6 +78,8 @@ struct xen_hvm_set_pci_link_route {
typedef struct xen_hvm_set_pci_link_route xen_hvm_set_pci_link_route_t;
DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_pci_link_route_t);
+#endif /* __XEN_INTERFACE_VERSION__ < 0x00040900 */
+
/* Flushes all VCPU TLBs: @arg must be NULL. */
#define HVMOP_flush_tlbs 5
@@ -27,7 +27,7 @@
#ifndef __XEN_PUBLIC_XEN_COMPAT_H__
#define __XEN_PUBLIC_XEN_COMPAT_H__
-#define __XEN_LATEST_INTERFACE_VERSION__ 0x00040800
+#define __XEN_LATEST_INTERFACE_VERSION__ 0x00040900
#if defined(__XEN__) || defined(__XEN_TOOLS__)
/* Xen is built with matching headers and implements the latest interface. */
@@ -122,7 +122,7 @@ void hvm_isa_irq_assert(
void hvm_isa_irq_deassert(
struct domain *d, unsigned int isa_irq);
-void hvm_set_pci_link_route(struct domain *d, u8 link, u8 isa_irq);
+int hvm_set_pci_link_route(struct domain *d, u8 link, u8 isa_irq);
int hvm_inject_msi(struct domain *d, uint64_t addr, uint32_t data);
@@ -610,24 +610,6 @@ static XSM_INLINE int xsm_shadow_control(XSM_DEFAULT_ARG struct domain *d, uint3
return xsm_default_action(action, current->domain, d);
}
-static XSM_INLINE int xsm_hvm_set_pci_intx_level(XSM_DEFAULT_ARG struct domain *d)
-{
- XSM_ASSERT_ACTION(XSM_DM_PRIV);
- return xsm_default_action(action, current->domain, d);
-}
-
-static XSM_INLINE int xsm_hvm_set_isa_irq_level(XSM_DEFAULT_ARG struct domain *d)
-{
- XSM_ASSERT_ACTION(XSM_DM_PRIV);
- return xsm_default_action(action, current->domain, d);
-}
-
-static XSM_INLINE int xsm_hvm_set_pci_link_route(XSM_DEFAULT_ARG struct domain *d)
-{
- XSM_ASSERT_ACTION(XSM_DM_PRIV);
- return xsm_default_action(action, current->domain, d);
-}
-
static XSM_INLINE int xsm_hvm_inject_msi(XSM_DEFAULT_ARG struct domain *d)
{
XSM_ASSERT_ACTION(XSM_DM_PRIV);
@@ -162,9 +162,6 @@ struct xsm_operations {
#ifdef CONFIG_X86
int (*do_mca) (void);
int (*shadow_control) (struct domain *d, uint32_t op);
- int (*hvm_set_pci_intx_level) (struct domain *d);
- int (*hvm_set_isa_irq_level) (struct domain *d);
- int (*hvm_set_pci_link_route) (struct domain *d);
int (*hvm_inject_msi) (struct domain *d);
int (*mem_sharing_op) (struct domain *d, struct domain *cd, int op);
int (*apic) (struct domain *d, int cmd);
@@ -635,21 +632,6 @@ static inline int xsm_shadow_control (xsm_default_t def, struct domain *d, uint3
return xsm_ops->shadow_control(d, op);
}
-static inline int xsm_hvm_set_pci_intx_level (xsm_default_t def, struct domain *d)
-{
- return xsm_ops->hvm_set_pci_intx_level(d);
-}
-
-static inline int xsm_hvm_set_isa_irq_level (xsm_default_t def, struct domain *d)
-{
- return xsm_ops->hvm_set_isa_irq_level(d);
-}
-
-static inline int xsm_hvm_set_pci_link_route (xsm_default_t def, struct domain *d)
-{
- return xsm_ops->hvm_set_pci_link_route(d);
-}
-
static inline int xsm_hvm_inject_msi (xsm_default_t def, struct domain *d)
{
return xsm_ops->hvm_inject_msi(d);
@@ -145,9 +145,6 @@ void __init xsm_fixup_ops (struct xsm_operations *ops)
#ifdef CONFIG_X86
set_to_dummy_if_null(ops, do_mca);
set_to_dummy_if_null(ops, shadow_control);
- set_to_dummy_if_null(ops, hvm_set_pci_intx_level);
- set_to_dummy_if_null(ops, hvm_set_isa_irq_level);
- set_to_dummy_if_null(ops, hvm_set_pci_link_route);
set_to_dummy_if_null(ops, hvm_inject_msi);
set_to_dummy_if_null(ops, mem_sharing_op);
set_to_dummy_if_null(ops, apic);
@@ -1499,21 +1499,6 @@ static int flask_ioport_mapping(struct domain *d, uint32_t start, uint32_t end,
return flask_ioport_permission(d, start, end, access);
}
-static int flask_hvm_set_pci_intx_level(struct domain *d)
-{
- return current_has_perm(d, SECCLASS_HVM, HVM__PCILEVEL);
-}
-
-static int flask_hvm_set_isa_irq_level(struct domain *d)
-{
- return current_has_perm(d, SECCLASS_HVM, HVM__IRQLEVEL);
-}
-
-static int flask_hvm_set_pci_link_route(struct domain *d)
-{
- return current_has_perm(d, SECCLASS_HVM, HVM__PCIROUTE);
-}
-
static int flask_hvm_inject_msi(struct domain *d)
{
return current_has_perm(d, SECCLASS_HVM, HVM__SEND_IRQ);
@@ -257,12 +257,6 @@ class hvm
setparam
# HVMOP_get_param
getparam
-# HVMOP_set_pci_intx_level (also needs hvmctl)
- pcilevel
-# HVMOP_set_isa_irq_level
- irqlevel
-# HVMOP_set_pci_link_route
- pciroute
bind_irq
# XEN_DOMCTL_pin_mem_cacheattr
cacheattr
... HVMOP_set_pci_link_route These HVMOPs were exposed to guests so their definitions need to be preserved for compatibility. This patch therefore updates __XEN_LATEST_INTERFACE_VERSION__ to 0x00040900 and makes the HVMOP defintions conditional on __XEN_INTERFACE_VERSION__ less than that value. NOTE: This patch also widens the 'domain' parameter of xc_hvm_set_pci_intx_level() from a uint8_t to a uint16_t. Suggested-by: Jan Beulich <jbeulich@suse.com> Signed-off-by: Paul Durrant <paul.durrant@citrix.com> --- Cc: Jan Beulich <jbeulich@suse.com> Cc: Daniel De Graaf <dgdegra@tycho.nsa.gov> Cc: Ian Jackson <ian.jackson@eu.citrix.com> Cc: Wei Liu <wei.liu2@citrix.com> Cc: Andrew Cooper <andrew.cooper3@citrix.com> v2: - Interface version modification moved to this patch, where it is needed. - Addressed several comments from Jan. --- tools/flask/policy/modules/xen.if | 8 +-- tools/libxc/include/xenctrl.h | 2 +- tools/libxc/xc_misc.c | 83 ++++++++-------------- xen/arch/x86/hvm/dm.c | 84 ++++++++++++++++++++++ xen/arch/x86/hvm/hvm.c | 136 ------------------------------------ xen/arch/x86/hvm/irq.c | 7 +- xen/include/public/hvm/dm_op.h | 46 ++++++++++++ xen/include/public/hvm/hvm_op.h | 4 ++ xen/include/public/xen-compat.h | 2 +- xen/include/xen/hvm/irq.h | 2 +- xen/include/xsm/dummy.h | 18 ----- xen/include/xsm/xsm.h | 18 ----- xen/xsm/dummy.c | 3 - xen/xsm/flask/hooks.c | 15 ---- xen/xsm/flask/policy/access_vectors | 6 -- 15 files changed, 174 insertions(+), 260 deletions(-)