@@ -1683,6 +1683,15 @@ int xc_domain_ioport_mapping(xc_interface *xch,
uint32_t nr_ports,
uint32_t add_mapping);
+int xc_domain_update_msi_irq_remapping(
+ xc_interface *xch,
+ uint32_t domid,
+ uint32_t pirq,
+ uint32_t source_id,
+ uint32_t data,
+ uint64_t addr,
+ uint64_t gtable);
+
int xc_domain_update_msi_irq(
xc_interface *xch,
uint32_t domid,
@@ -1697,6 +1706,14 @@ int xc_domain_unbind_msi_irq(xc_interface *xch,
uint32_t pirq,
uint32_t gflags);
+int xc_domain_unbind_msi_irq_remapping(
+ xc_interface *xch,
+ uint32_t domid,
+ uint32_t pirq,
+ uint32_t source_id,
+ uint32_t data,
+ uint64_t addr);
+
int xc_domain_bind_pt_irq(xc_interface *xch,
uint32_t domid,
uint8_t machine_irq,
@@ -1592,8 +1592,35 @@ int xc_deassign_dt_device(
return rc;
}
+int xc_domain_update_msi_irq_remapping(
+ xc_interface *xch,
+ uint32_t domid,
+ uint32_t pirq,
+ uint32_t source_id,
+ uint32_t data,
+ uint64_t addr,
+ uint64_t gtable)
+{
+ int rc;
+ xen_domctl_bind_pt_irq_t *bind;
+ DECLARE_DOMCTL;
+ domctl.cmd = XEN_DOMCTL_bind_pt_irq;
+ domctl.domain = (domid_t)domid;
+
+ bind = &(domctl.u.bind_pt_irq);
+ bind->hvm_domid = domid;
+ bind->irq_type = PT_IRQ_TYPE_MSI_IR;
+ bind->machine_irq = pirq;
+ bind->u.msi_ir.source_id = source_id;
+ bind->u.msi_ir.data = data;
+ bind->u.msi_ir.addr = addr;
+ bind->u.msi_ir.gtable = gtable;
+
+ rc = do_domctl(xch, &domctl);
+ return rc;
+}
int xc_domain_update_msi_irq(
xc_interface *xch,
@@ -1623,6 +1650,34 @@ int xc_domain_update_msi_irq(
return rc;
}
+int xc_domain_unbind_msi_irq_remapping(
+ xc_interface *xch,
+ uint32_t domid,
+ uint32_t pirq,
+ uint32_t source_id,
+ uint32_t data,
+ uint64_t addr)
+{
+ int rc;
+ xen_domctl_bind_pt_irq_t *bind;
+
+ DECLARE_DOMCTL;
+
+ domctl.cmd = XEN_DOMCTL_unbind_pt_irq;
+ domctl.domain = (domid_t)domid;
+
+ bind = &(domctl.u.bind_pt_irq);
+ bind->hvm_domid = domid;
+ bind->irq_type = PT_IRQ_TYPE_MSI_IR;
+ bind->machine_irq = pirq;
+ bind->u.msi_ir.source_id = source_id;
+ bind->u.msi_ir.data = data;
+ bind->u.msi_ir.addr = addr;
+
+ rc = do_domctl(xch, &domctl);
+ return rc;
+}
+
int xc_domain_unbind_msi_irq(
xc_interface *xch,
uint32_t domid,
@@ -28,3 +28,13 @@ void irq_request_ioapic_fill(struct irq_remapping_request *req,
req->source_id = ioapic_id;
req->msg.rte = rte;
}
+
+void irq_request_msi_fill(struct irq_remapping_request *req,
+ uint32_t source_id, uint64_t addr, uint32_t data)
+{
+ ASSERT(req);
+ req->type = VIOMMU_REQUEST_IRQ_MSI;
+ req->source_id = source_id;
+ req->msg.msi.addr = addr;
+ req->msg.msi.data = data;
+}
@@ -305,6 +305,88 @@ static struct vcpu *pi_find_dest_vcpu(const struct domain *d, uint32_t dest_id,
return NULL;
}
+static inline void set_hvm_gmsi_info(struct hvm_gmsi_info *msi,
+ xen_domctl_bind_pt_irq_t *pt_irq_bind,
+ int irq_type)
+{
+ if ( irq_type == PT_IRQ_TYPE_MSI )
+ {
+ msi->legacy.gvec = pt_irq_bind->u.msi.gvec;
+ msi->legacy.gflags = pt_irq_bind->u.msi.gflags;
+ }
+ else if ( irq_type == PT_IRQ_TYPE_MSI_IR )
+ {
+ msi->intremap.source_id = pt_irq_bind->u.msi_ir.source_id;
+ msi->intremap.data = pt_irq_bind->u.msi_ir.data;
+ msi->intremap.addr = pt_irq_bind->u.msi_ir.addr;
+ }
+ else
+ BUG();
+}
+
+static inline void clear_hvm_gmsi_info(struct hvm_gmsi_info *msi, int irq_type)
+{
+ if ( irq_type == PT_IRQ_TYPE_MSI )
+ {
+ msi->legacy.gvec = 0;
+ msi->legacy.gflags = 0;
+ }
+ else if ( irq_type == PT_IRQ_TYPE_MSI_IR )
+ {
+ msi->intremap.source_id = 0;
+ msi->intremap.data = 0;
+ msi->intremap.addr = 0;
+ }
+ BUG();
+}
+
+static inline bool hvm_gmsi_info_need_update(struct hvm_gmsi_info *msi,
+ xen_domctl_bind_pt_irq_t *pt_irq_bind,
+ int irq_type)
+{
+ if ( irq_type == PT_IRQ_TYPE_MSI )
+ return ((msi->legacy.gvec != pt_irq_bind->u.msi.gvec) ||
+ (msi->legacy.gflags != pt_irq_bind->u.msi.gflags));
+ else if ( irq_type == PT_IRQ_TYPE_MSI_IR )
+ return ((msi->intremap.source_id != pt_irq_bind->u.msi_ir.source_id) ||
+ (msi->intremap.data != pt_irq_bind->u.msi_ir.data) ||
+ (msi->intremap.addr != pt_irq_bind->u.msi_ir.addr));
+ BUG();
+ return 0;
+}
+
+static int pirq_dpci_2_msi_attr(struct domain *d,
+ struct hvm_pirq_dpci *pirq_dpci, uint8_t *gvec,
+ uint8_t *dest, uint8_t *dm, uint8_t *dlm)
+{
+ int rc = 0;
+ if ( pirq_dpci->flags & HVM_IRQ_DPCI_GUEST_MSI )
+ {
+ *gvec = pirq_dpci->gmsi.legacy.gvec;
+ *dest = pirq_dpci->gmsi.legacy.gflags & VMSI_DEST_ID_MASK;
+ *dm = !!(pirq_dpci->gmsi.legacy.gflags & VMSI_DM_MASK);
+ *dlm = (pirq_dpci->gmsi.legacy.gflags & VMSI_DELIV_MASK) >>
+ GFLAGS_SHIFT_DELIV_MODE;
+ }
+ else if ( pirq_dpci->flags & HVM_IRQ_DPCI_GUEST_MSI_IR )
+ {
+ struct irq_remapping_request request;
+ struct irq_remapping_info irq_info;
+ irq_request_msi_fill(&request, pirq_dpci->gmsi.intremap.source_id,
+ pirq_dpci->gmsi.intremap.addr,
+ pirq_dpci->gmsi.intremap.data);
+ rc = viommu_get_irq_info(d, &request, &irq_info);
+ if ( !rc )
+ {
+ *gvec = irq_info.vector;
+ *dest = irq_info.dest;
+ *dm = irq_info.dest_mode;
+ *dlm = irq_info.delivery_mode;
+ }
+ }
+ return rc;
+}
+
int pt_irq_create_bind(
struct domain *d, xen_domctl_bind_pt_irq_t *pt_irq_bind)
{
@@ -362,16 +444,21 @@ int pt_irq_create_bind(
switch ( pt_irq_bind->irq_type )
{
case PT_IRQ_TYPE_MSI:
+ case PT_IRQ_TYPE_MSI_IR:
{
- uint8_t dest, dest_mode, delivery_mode;
+ uint8_t dest = 0, dest_mode = 0, delivery_mode = 0, gvec;
int dest_vcpu_id;
+ int irq_type = pt_irq_bind->irq_type;
+ bool ir = (pt_irq_bind->irq_type == PT_IRQ_TYPE_MSI_IR);
+ uint64_t gtable = ir ? pt_irq_bind->u.msi_ir.gtable :
+ pt_irq_bind->u.msi.gtable;
if ( !(pirq_dpci->flags & HVM_IRQ_DPCI_MAPPED) )
{
pirq_dpci->flags = HVM_IRQ_DPCI_MAPPED | HVM_IRQ_DPCI_MACH_MSI |
- HVM_IRQ_DPCI_GUEST_MSI;
- pirq_dpci->gmsi.legacy.gvec = pt_irq_bind->u.msi.gvec;
- pirq_dpci->gmsi.legacy.gflags = pt_irq_bind->u.msi.gflags;
+ (ir ? HVM_IRQ_DPCI_GUEST_MSI_IR :
+ HVM_IRQ_DPCI_GUEST_MSI);
+ set_hvm_gmsi_info(&pirq_dpci->gmsi, pt_irq_bind, irq_type);
/*
* 'pt_irq_create_bind' can be called after 'pt_irq_destroy_bind'.
* The 'pirq_cleanup_check' which would free the structure is only
@@ -386,9 +473,9 @@ int pt_irq_create_bind(
pirq_dpci->dom = d;
/* bind after hvm_irq_dpci is setup to avoid race with irq handler*/
rc = pirq_guest_bind(d->vcpu[0], info, 0);
- if ( rc == 0 && pt_irq_bind->u.msi.gtable )
+ if ( rc == 0 && gtable )
{
- rc = msixtbl_pt_register(d, info, pt_irq_bind->u.msi.gtable);
+ rc = msixtbl_pt_register(d, info, gtable);
if ( unlikely(rc) )
{
pirq_guest_unbind(d, info);
@@ -403,8 +490,7 @@ int pt_irq_create_bind(
}
if ( unlikely(rc) )
{
- pirq_dpci->gmsi.legacy.gflags = 0;
- pirq_dpci->gmsi.legacy.gvec = 0;
+ clear_hvm_gmsi_info(&pirq_dpci->gmsi, irq_type);
pirq_dpci->dom = NULL;
pirq_dpci->flags = 0;
pirq_cleanup_check(info, d);
@@ -414,7 +500,8 @@ int pt_irq_create_bind(
}
else
{
- uint32_t mask = HVM_IRQ_DPCI_MACH_MSI | HVM_IRQ_DPCI_GUEST_MSI;
+ uint32_t mask = HVM_IRQ_DPCI_MACH_MSI |
+ (ir ? HVM_IRQ_DPCI_GUEST_MSI_IR : HVM_IRQ_DPCI_GUEST_MSI);
if ( (pirq_dpci->flags & mask) != mask )
{
@@ -423,30 +510,29 @@ int pt_irq_create_bind(
}
/* If pirq is already mapped as vmsi, update guest data/addr. */
- if ( pirq_dpci->gmsi.legacy.gvec != pt_irq_bind->u.msi.gvec ||
- pirq_dpci->gmsi.legacy.gflags != pt_irq_bind->u.msi.gflags )
+ if ( hvm_gmsi_info_need_update(&pirq_dpci->gmsi, pt_irq_bind,
+ irq_type) )
{
/* Directly clear pending EOIs before enabling new MSI info. */
pirq_guest_eoi(info);
- pirq_dpci->gmsi.legacy.gvec = pt_irq_bind->u.msi.gvec;
- pirq_dpci->gmsi.legacy.gflags = pt_irq_bind->u.msi.gflags;
+ set_hvm_gmsi_info(&pirq_dpci->gmsi, pt_irq_bind, irq_type);
}
}
/* Calculate dest_vcpu_id for MSI-type pirq migration. */
- dest = pirq_dpci->gmsi.legacy.gflags & VMSI_DEST_ID_MASK;
- dest_mode = !!(pirq_dpci->gmsi.legacy.gflags & VMSI_DM_MASK);
- delivery_mode = (pirq_dpci->gmsi.legacy.gflags & VMSI_DELIV_MASK) >>
- GFLAGS_SHIFT_DELIV_MODE;
-
- dest_vcpu_id = hvm_girq_dest_2_vcpu_id(d, dest, dest_mode);
+ rc = pirq_dpci_2_msi_attr(d, pirq_dpci, &gvec, &dest, &dest_mode,
+ &delivery_mode);
+ if ( rc )
+ dest_vcpu_id = -2; /* -2: Internal Error */
+ else
+ dest_vcpu_id = hvm_girq_dest_2_vcpu_id(d, dest, dest_mode);
pirq_dpci->gmsi.dest_vcpu_id = dest_vcpu_id;
spin_unlock(&d->event_lock);
if ( dest_vcpu_id >= 0 )
hvm_migrate_pirqs(d->vcpu[dest_vcpu_id]);
/* Use interrupt posting if it is supported. */
- if ( iommu_intpost )
+ if ( iommu_intpost && !ir )
{
const struct vcpu *vcpu = pi_find_dest_vcpu(d, dest, dest_mode,
delivery_mode, pirq_dpci->gmsi.legacy.gvec);
@@ -588,6 +674,7 @@ int pt_irq_destroy_bind(
}
break;
case PT_IRQ_TYPE_MSI:
+ case PT_IRQ_TYPE_MSI_IR:
break;
default:
return -EOPNOTSUPP;
@@ -51,6 +51,8 @@ struct irq_remapping_request
void irq_request_ioapic_fill(struct irq_remapping_request *req,
uint32_t ioapic_id, uint64_t rte);
+void irq_request_msi_fill(struct irq_remapping_request *req,
+ uint32_t source_id, uint64_t addr, uint32_t data);
static inline const struct viommu_ops *viommu_get_ops(void)
{
@@ -560,6 +560,7 @@ typedef enum pt_irq_type_e {
PT_IRQ_TYPE_MSI,
PT_IRQ_TYPE_MSI_TRANSLATE,
PT_IRQ_TYPE_SPI, /* ARM: valid range 32-1019 */
+ PT_IRQ_TYPE_MSI_IR,
} pt_irq_type_t;
struct xen_domctl_bind_pt_irq {
uint32_t machine_irq;
@@ -581,6 +582,12 @@ struct xen_domctl_bind_pt_irq {
uint64_aligned_t gtable;
} msi;
struct {
+ uint32_t source_id;
+ uint32_t data;
+ uint64_t addr;
+ uint64_aligned_t gtable;
+ } msi_ir;
+ struct {
uint16_t spi;
} spi;
} u;
@@ -40,6 +40,7 @@ struct dev_intx_gsi_link {
#define _HVM_IRQ_DPCI_EOI_LATCH_SHIFT 3
#define _HVM_IRQ_DPCI_GUEST_PCI_SHIFT 4
#define _HVM_IRQ_DPCI_GUEST_MSI_SHIFT 5
+#define _HVM_IRQ_DPCI_GUEST_MSI_IR_SHIFT 6
#define _HVM_IRQ_DPCI_TRANSLATE_SHIFT 15
#define HVM_IRQ_DPCI_MACH_PCI (1 << _HVM_IRQ_DPCI_MACH_PCI_SHIFT)
#define HVM_IRQ_DPCI_MACH_MSI (1 << _HVM_IRQ_DPCI_MACH_MSI_SHIFT)
@@ -47,6 +48,7 @@ struct dev_intx_gsi_link {
#define HVM_IRQ_DPCI_EOI_LATCH (1 << _HVM_IRQ_DPCI_EOI_LATCH_SHIFT)
#define HVM_IRQ_DPCI_GUEST_PCI (1 << _HVM_IRQ_DPCI_GUEST_PCI_SHIFT)
#define HVM_IRQ_DPCI_GUEST_MSI (1 << _HVM_IRQ_DPCI_GUEST_MSI_SHIFT)
+#define HVM_IRQ_DPCI_GUEST_MSI_IR (1 << _HVM_IRQ_DPCI_GUEST_MSI_IR_SHIFT)
#define HVM_IRQ_DPCI_TRANSLATE (1 << _HVM_IRQ_DPCI_TRANSLATE_SHIFT)
#define VMSI_DEST_ID_MASK 0xff
@@ -65,8 +67,14 @@ struct hvm_gmsi_info {
uint32_t gvec;
uint32_t gflags;
} legacy;
+ struct {
+ uint32_t source_id;
+ uint32_t data;
+ uint64_t addr;
+ } intremap;
};
- int dest_vcpu_id; /* -1 :multi-dest, non-negative: dest_vcpu_id */
+ /* -2 :internal error, -1 :multi-dest, non-negative: dest_vcpu_id */
+ int dest_vcpu_id;
};
#define IR_MSI_INDEX(data, addr) (((((addr) & 0x4) << 13) + (((addr) & 0xfffff) >> 5)) + (!!((addr) & 0x8)) * ((data) & 0xffff))