Message ID | 1490764315-7162-6-git-send-email-chao.gao@intel.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
> From: Gao, Chao > Sent: Wednesday, March 29, 2017 1:12 PM > > We used structure assignment to update irte which was non-atomic when > the > whole IRTE was to be updated. It is unsafe when a interrupt happened > during > update. Furthermore, no bug or warning would be reported when this > happened. > > This patch introduces two variants, atomic and non-atomic, to update > irte. Both variants will update IRTE if possible. If the caller requests a > atomic update but we can't meet it, we raise a bug. > > Signed-off-by: Chao Gao <chao.gao@intel.com> > --- > v11: > - Add two variant function to update IRTE. Call the non-atomic one for init > and clear operations. Call the atomic one for other cases. > - Add a new field to indicate the remap_entry associated with msi_desc is > initialized or not. > > v10: > - rename copy_irte_to_irt to update_irte > - remove copy_from_to_irt > - change commmit message and add some comments to illustrate on which > condition update_irte() is safe. > > xen/arch/x86/msi.c | 1 + > xen/drivers/passthrough/vtd/intremap.c | 78 > ++++++++++++++++++++++++++++++++-- > xen/include/asm-x86/msi.h | 1 + > 3 files changed, 76 insertions(+), 4 deletions(-) > > diff --git a/xen/arch/x86/msi.c b/xen/arch/x86/msi.c > index 3374cd4..7ed1243 100644 > --- a/xen/arch/x86/msi.c > +++ b/xen/arch/x86/msi.c > @@ -578,6 +578,7 @@ static struct msi_desc *alloc_msi_entry(unsigned int > nr) > entry[nr].dev = NULL; > entry[nr].irq = -1; > entry[nr].remap_index = -1; > + entry[nr].remap_entry_initialized = false; > entry[nr].pi_desc = NULL; > } > > diff --git a/xen/drivers/passthrough/vtd/intremap.c > b/xen/drivers/passthrough/vtd/intremap.c > index b992f23..b7f3cf1 100644 > --- a/xen/drivers/passthrough/vtd/intremap.c > +++ b/xen/drivers/passthrough/vtd/intremap.c > @@ -169,10 +169,64 @@ bool_t __init iommu_supports_eim(void) > return 1; > } > > +static void update_irte(struct iremap_entry *entry, > + const struct iremap_entry *new_ire, > + bool atomic) > +{ > + if ( cpu_has_cx16 ) > + { > + __uint128_t ret; > + struct iremap_entry old_ire; > + > + old_ire = *entry; > + ret = cmpxchg16b(entry, &old_ire, new_ire); > + > + /* > + * In the above, we use cmpxchg16 to atomically update the 128-bit > + * IRTE, and the hardware cannot update the IRTE behind us, so > + * the return value of cmpxchg16 should be the same as old_ire. > + * This ASSERT validate it. > + */ > + ASSERT(ret == old_ire.val); > + } > + else > + { > + /* > + * The following code will update irte atomically if possible. > + * If the caller requests a atomic update but we can't meet it, a -> an > + * a bug will be raised. > + */ > + if ( entry->lo == new_ire->lo ) > + entry->hi = new_ire->hi; > + else if ( entry->hi == new_ire->hi ) > + entry->lo = new_ire->lo; > + else if ( !atomic ) > + { > + entry->lo = new_ire->lo; > + entry->hi = new_ire->hi; > + } > + else > + BUG(); suppose you need same ASSERT as for cmxchg16 here in atomic case. > + } > +} > + > +static inline void update_irte_non_atomic(struct iremap_entry *entry, > + const struct iremap_entry *new_ire) > +{ > + update_irte(entry, new_ire, false); > +} > + > +static inline void update_irte_atomic(struct iremap_entry *entry, > + const struct iremap_entry *new_ire) > +{ > + update_irte(entry, new_ire, true); > +} > + > + > /* Mark specified intr remap entry as free */ > static void free_remap_entry(struct iommu *iommu, int index) > { > - struct iremap_entry *iremap_entry = NULL, *iremap_entries; > + struct iremap_entry *iremap_entry = NULL, *iremap_entries, new_ire = { }; > struct ir_ctrl *ir_ctrl = iommu_ir_ctrl(iommu); > > if ( index < 0 || index > IREMAP_ENTRY_NR - 1 ) > @@ -183,7 +237,7 @@ static void free_remap_entry(struct iommu *iommu, > int index) > GET_IREMAP_ENTRY(ir_ctrl->iremap_maddr, index, > iremap_entries, iremap_entry); > > - memset(iremap_entry, 0, sizeof(*iremap_entry)); > + update_irte_non_atomic(iremap_entry, &new_ire); > iommu_flush_cache_entry(iremap_entry, sizeof(*iremap_entry)); > iommu_flush_iec_index(iommu, 0, index); > > @@ -286,6 +340,7 @@ static int ioapic_rte_to_remap_entry(struct iommu > *iommu, > int index; > unsigned long flags; > struct ir_ctrl *ir_ctrl = iommu_ir_ctrl(iommu); > + bool init = false; > > remap_rte = (struct IO_APIC_route_remap_entry *) old_rte; > spin_lock_irqsave(&ir_ctrl->iremap_lock, flags); > @@ -296,6 +351,7 @@ static int ioapic_rte_to_remap_entry(struct iommu > *iommu, > index = alloc_remap_entry(iommu, 1); > if ( index < IREMAP_ENTRY_NR ) > apic_pin_2_ir_idx[apic][ioapic_pin] = index; > + init = true; > } > > if ( index > IREMAP_ENTRY_NR - 1 ) > @@ -353,7 +409,11 @@ static int ioapic_rte_to_remap_entry(struct iommu > *iommu, > remap_rte->format = 1; /* indicate remap format */ > } > > - *iremap_entry = new_ire; > + if ( init ) > + update_irte_non_atomic(iremap_entry, &new_ire); > + else > + update_irte_atomic(iremap_entry, &new_ire); > + > iommu_flush_cache_entry(iremap_entry, sizeof(*iremap_entry)); > iommu_flush_iec_index(iommu, 0, index); > > @@ -567,7 +627,10 @@ static int msi_msg_to_remap_entry( > { > /* Free specified unused IRTEs */ > for ( i = 0; i < nr; ++i ) > + { > free_remap_entry(iommu, msi_desc->remap_index + i); > + msi_desc[i].remap_entry_initialized = false; > + } > spin_unlock_irqrestore(&ir_ctrl->iremap_lock, flags); > return 0; > } > @@ -639,7 +702,14 @@ static int msi_msg_to_remap_entry( > remap_rte->address_hi = 0; > remap_rte->data = index - i; > > - *iremap_entry = new_ire; > + if ( msi_desc->remap_entry_initialized ) > + update_irte_atomic(iremap_entry, &new_ire); > + else > + { > + update_irte_non_atomic(iremap_entry, &new_ire); > + msi_desc->remap_entry_initialized = true; > + } > + > iommu_flush_cache_entry(iremap_entry, sizeof(*iremap_entry)); > iommu_flush_iec_index(iommu, 0, index); > > diff --git a/xen/include/asm-x86/msi.h b/xen/include/asm-x86/msi.h > index fc9ab04..a0bd3af 100644 > --- a/xen/include/asm-x86/msi.h > +++ b/xen/include/asm-x86/msi.h > @@ -118,6 +118,7 @@ struct msi_desc { > struct msi_msg msg; /* Last set MSI message */ > > int remap_index; /* index in interrupt remapping table > */ > + bool remap_entry_initialized; > const struct pi_desc *pi_desc; /* pointer to posted descriptor */ > uint8_t gvec; /* guest vector. valid when pi_desc > isn't NULL */ > }; > -- > 1.8.3.1
>>> On 29.03.17 at 07:11, <chao.gao@intel.com> wrote: > We used structure assignment to update irte which was non-atomic when the > whole IRTE was to be updated. It is unsafe when a interrupt happened during > update. Furthermore, no bug or warning would be reported when this happened. > > This patch introduces two variants, atomic and non-atomic, to update > irte. Both variants will update IRTE if possible. If the caller requests a > atomic update but we can't meet it, we raise a bug. > > Signed-off-by: Chao Gao <chao.gao@intel.com> > --- > v11: > - Add two variant function to update IRTE. Call the non-atomic one for init > and clear operations. Call the atomic one for other cases. > - Add a new field to indicate the remap_entry associated with msi_desc is > initialized or not. > > v10: > - rename copy_irte_to_irt to update_irte > - remove copy_from_to_irt > - change commmit message and add some comments to illustrate on which > condition update_irte() is safe. > > xen/arch/x86/msi.c | 1 + > xen/drivers/passthrough/vtd/intremap.c | 78 > ++++++++++++++++++++++++++++++++-- > xen/include/asm-x86/msi.h | 1 + > 3 files changed, 76 insertions(+), 4 deletions(-) > > diff --git a/xen/arch/x86/msi.c b/xen/arch/x86/msi.c > index 3374cd4..7ed1243 100644 > --- a/xen/arch/x86/msi.c > +++ b/xen/arch/x86/msi.c > @@ -578,6 +578,7 @@ static struct msi_desc *alloc_msi_entry(unsigned int nr) > entry[nr].dev = NULL; > entry[nr].irq = -1; > entry[nr].remap_index = -1; > + entry[nr].remap_entry_initialized = false; > entry[nr].pi_desc = NULL; > } > > diff --git a/xen/drivers/passthrough/vtd/intremap.c > b/xen/drivers/passthrough/vtd/intremap.c > index b992f23..b7f3cf1 100644 > --- a/xen/drivers/passthrough/vtd/intremap.c > +++ b/xen/drivers/passthrough/vtd/intremap.c > @@ -169,10 +169,64 @@ bool_t __init iommu_supports_eim(void) > return 1; > } > > +static void update_irte(struct iremap_entry *entry, > + const struct iremap_entry *new_ire, > + bool atomic) > +{ > + if ( cpu_has_cx16 ) > + { > + __uint128_t ret; > + struct iremap_entry old_ire; > + > + old_ire = *entry; > + ret = cmpxchg16b(entry, &old_ire, new_ire); > + > + /* > + * In the above, we use cmpxchg16 to atomically update the 128-bit > + * IRTE, and the hardware cannot update the IRTE behind us, so > + * the return value of cmpxchg16 should be the same as old_ire. > + * This ASSERT validate it. > + */ > + ASSERT(ret == old_ire.val); > + } > + else > + { > + /* > + * The following code will update irte atomically if possible. There's nothing atomic below - between the compares and stores the value in the table could change. Please don't make false promises in comments. > + * If the caller requests a atomic update but we can't meet it, > + * a bug will be raised. > + */ > + if ( entry->lo == new_ire->lo ) > + entry->hi = new_ire->hi; > + else if ( entry->hi == new_ire->hi ) > + entry->lo = new_ire->lo; Best effort would still call for use of write_atomic() instead of both of the assignments above. > @@ -353,7 +409,11 @@ static int ioapic_rte_to_remap_entry(struct iommu *iommu, > remap_rte->format = 1; /* indicate remap format */ > } > > - *iremap_entry = new_ire; > + if ( init ) > + update_irte_non_atomic(iremap_entry, &new_ire); > + else > + update_irte_atomic(iremap_entry, &new_ire); Seems like you'd better call update_irte() here directly, instead of having this if/else. Which puts under question the usefulness of the two wrappers. > @@ -639,7 +702,14 @@ static int msi_msg_to_remap_entry( > remap_rte->address_hi = 0; > remap_rte->data = index - i; > > - *iremap_entry = new_ire; > + if ( msi_desc->remap_entry_initialized ) > + update_irte_atomic(iremap_entry, &new_ire); > + else > + { > + update_irte_non_atomic(iremap_entry, &new_ire); > + msi_desc->remap_entry_initialized = true; > + } Same here. I also wonder whether you really need the new flag: The function knows whether it's initializing the IRTE for the first time, as it allocates a table index just like ioapic_rte_to_remap_entry() does (where you get away without a new flag). Jan
On Fri, Mar 31, 2017 at 04:01:31AM -0600, Jan Beulich wrote: >>>> On 29.03.17 at 07:11, <chao.gao@intel.com> wrote: >> +static void update_irte(struct iremap_entry *entry, >> + const struct iremap_entry *new_ire, >> + bool atomic) >> +{ >> + if ( cpu_has_cx16 ) >> + { >> + __uint128_t ret; >> + struct iremap_entry old_ire; >> + >> + old_ire = *entry; >> + ret = cmpxchg16b(entry, &old_ire, new_ire); >> + >> + /* >> + * In the above, we use cmpxchg16 to atomically update the 128-bit >> + * IRTE, and the hardware cannot update the IRTE behind us, so >> + * the return value of cmpxchg16 should be the same as old_ire. >> + * This ASSERT validate it. >> + */ >> + ASSERT(ret == old_ire.val); >> + } >> + else >> + { >> + /* >> + * The following code will update irte atomically if possible. > >There's nothing atomic below - between the compares and stores >the value in the table could change. Please don't make false >promises in comments. Ok. I agree. Then do you think the parameter 'atomic' of this function is proper? I think this atomic means the caller wants this update to be presented to VT-d hardware as a atomic update. That's to say, no intermediate, invalid IRTE can be seen by hardware. > >> + * If the caller requests a atomic update but we can't meet it, >> + * a bug will be raised. >> + */ >> + if ( entry->lo == new_ire->lo ) >> + entry->hi = new_ire->hi; >> + else if ( entry->hi == new_ire->hi ) >> + entry->lo = new_ire->lo; > >Best effort would still call for use of write_atomic() instead of both >of the assignments above. Will fix. Your concern is compiler would wrongly optimize the assignments? > >> @@ -353,7 +409,11 @@ static int ioapic_rte_to_remap_entry(struct iommu *iommu, >> remap_rte->format = 1; /* indicate remap format */ >> } >> >> - *iremap_entry = new_ire; >> + if ( init ) >> + update_irte_non_atomic(iremap_entry, &new_ire); >> + else >> + update_irte_atomic(iremap_entry, &new_ire); > >Seems like you'd better call update_irte() here directly, instead of >having this if/else. Which puts under question the usefulness of the >two wrappers. agree. Will remove the two wrappers. > >> @@ -639,7 +702,14 @@ static int msi_msg_to_remap_entry( >> remap_rte->address_hi = 0; >> remap_rte->data = index - i; >> >> - *iremap_entry = new_ire; >> + if ( msi_desc->remap_entry_initialized ) >> + update_irte_atomic(iremap_entry, &new_ire); >> + else >> + { >> + update_irte_non_atomic(iremap_entry, &new_ire); >> + msi_desc->remap_entry_initialized = true; >> + } > >Same here. > >I also wonder whether you really need the new flag: The function >knows whether it's initializing the IRTE for the first time, as it >allocates a table index just like ioapic_rte_to_remap_entry() does >(where you get away without a new flag). For multi-vector msi case, I don't have a clean solution to get away this flag. The problem here is that it allocates several table indexes for multi-vector msi and only initialize the first one. For others, it isn't aware whether the IRTE is initialized or not. Thank Chao > >Jan
>>> On 04.04.17 at 21:12, <chao.gao@intel.com> wrote: > On Fri, Mar 31, 2017 at 04:01:31AM -0600, Jan Beulich wrote: >>>>> On 29.03.17 at 07:11, <chao.gao@intel.com> wrote: >>> +static void update_irte(struct iremap_entry *entry, >>> + const struct iremap_entry *new_ire, >>> + bool atomic) >>> +{ >>> + if ( cpu_has_cx16 ) >>> + { >>> + __uint128_t ret; >>> + struct iremap_entry old_ire; >>> + >>> + old_ire = *entry; >>> + ret = cmpxchg16b(entry, &old_ire, new_ire); >>> + >>> + /* >>> + * In the above, we use cmpxchg16 to atomically update the 128-bit >>> + * IRTE, and the hardware cannot update the IRTE behind us, so >>> + * the return value of cmpxchg16 should be the same as old_ire. >>> + * This ASSERT validate it. >>> + */ >>> + ASSERT(ret == old_ire.val); >>> + } >>> + else >>> + { >>> + /* >>> + * The following code will update irte atomically if possible. >> >>There's nothing atomic below - between the compares and stores >>the value in the table could change. Please don't make false >>promises in comments. > > Ok. I agree. Then do you think the parameter 'atomic' of this function is > proper? That depends: As long as no changes behind our backs are possible, I think it could be left with this name. > I think this atomic means the caller wants this update to be presented to > VT-d hardware > as a atomic update. That's to say, no intermediate, invalid IRTE can be seen > by hardware. Right, that's what the comment should say imo. >>> + * If the caller requests a atomic update but we can't meet it, >>> + * a bug will be raised. >>> + */ >>> + if ( entry->lo == new_ire->lo ) >>> + entry->hi = new_ire->hi; >>> + else if ( entry->hi == new_ire->hi ) >>> + entry->lo = new_ire->lo; >> >>Best effort would still call for use of write_atomic() instead of both >>of the assignments above. > > Will fix. Your concern is compiler would wrongly optimize the assignments? With s/optimize/translate/, yes. If you care about atomicity, you should guarantee as much of it as is possible. As per what you've said above, if you weren't using write_atomic() here, you'd have to prove that byte-wise writing in any order could not lead to a transiently inconsistent IRTE. >>> @@ -639,7 +702,14 @@ static int msi_msg_to_remap_entry( >>> remap_rte->address_hi = 0; >>> remap_rte->data = index - i; >>> >>> - *iremap_entry = new_ire; >>> + if ( msi_desc->remap_entry_initialized ) >>> + update_irte_atomic(iremap_entry, &new_ire); >>> + else >>> + { >>> + update_irte_non_atomic(iremap_entry, &new_ire); >>> + msi_desc->remap_entry_initialized = true; >>> + } >> >>Same here. >> >>I also wonder whether you really need the new flag: The function >>knows whether it's initializing the IRTE for the first time, as it >>allocates a table index just like ioapic_rte_to_remap_entry() does >>(where you get away without a new flag). > > For multi-vector msi case, I don't have a clean solution to get away this flag. > The problem here is that it allocates several table indexes for multi-vector msi > and only initialize the first one. For others, it isn't aware whether the IRTE > is initialized or not. Oh, indeed. I did overlook this aspect. May I then suggest to shorten the field name to e.g. irte_initialized? Jan
diff --git a/xen/arch/x86/msi.c b/xen/arch/x86/msi.c index 3374cd4..7ed1243 100644 --- a/xen/arch/x86/msi.c +++ b/xen/arch/x86/msi.c @@ -578,6 +578,7 @@ static struct msi_desc *alloc_msi_entry(unsigned int nr) entry[nr].dev = NULL; entry[nr].irq = -1; entry[nr].remap_index = -1; + entry[nr].remap_entry_initialized = false; entry[nr].pi_desc = NULL; } diff --git a/xen/drivers/passthrough/vtd/intremap.c b/xen/drivers/passthrough/vtd/intremap.c index b992f23..b7f3cf1 100644 --- a/xen/drivers/passthrough/vtd/intremap.c +++ b/xen/drivers/passthrough/vtd/intremap.c @@ -169,10 +169,64 @@ bool_t __init iommu_supports_eim(void) return 1; } +static void update_irte(struct iremap_entry *entry, + const struct iremap_entry *new_ire, + bool atomic) +{ + if ( cpu_has_cx16 ) + { + __uint128_t ret; + struct iremap_entry old_ire; + + old_ire = *entry; + ret = cmpxchg16b(entry, &old_ire, new_ire); + + /* + * In the above, we use cmpxchg16 to atomically update the 128-bit + * IRTE, and the hardware cannot update the IRTE behind us, so + * the return value of cmpxchg16 should be the same as old_ire. + * This ASSERT validate it. + */ + ASSERT(ret == old_ire.val); + } + else + { + /* + * The following code will update irte atomically if possible. + * If the caller requests a atomic update but we can't meet it, + * a bug will be raised. + */ + if ( entry->lo == new_ire->lo ) + entry->hi = new_ire->hi; + else if ( entry->hi == new_ire->hi ) + entry->lo = new_ire->lo; + else if ( !atomic ) + { + entry->lo = new_ire->lo; + entry->hi = new_ire->hi; + } + else + BUG(); + } +} + +static inline void update_irte_non_atomic(struct iremap_entry *entry, + const struct iremap_entry *new_ire) +{ + update_irte(entry, new_ire, false); +} + +static inline void update_irte_atomic(struct iremap_entry *entry, + const struct iremap_entry *new_ire) +{ + update_irte(entry, new_ire, true); +} + + /* Mark specified intr remap entry as free */ static void free_remap_entry(struct iommu *iommu, int index) { - struct iremap_entry *iremap_entry = NULL, *iremap_entries; + struct iremap_entry *iremap_entry = NULL, *iremap_entries, new_ire = { }; struct ir_ctrl *ir_ctrl = iommu_ir_ctrl(iommu); if ( index < 0 || index > IREMAP_ENTRY_NR - 1 ) @@ -183,7 +237,7 @@ static void free_remap_entry(struct iommu *iommu, int index) GET_IREMAP_ENTRY(ir_ctrl->iremap_maddr, index, iremap_entries, iremap_entry); - memset(iremap_entry, 0, sizeof(*iremap_entry)); + update_irte_non_atomic(iremap_entry, &new_ire); iommu_flush_cache_entry(iremap_entry, sizeof(*iremap_entry)); iommu_flush_iec_index(iommu, 0, index); @@ -286,6 +340,7 @@ static int ioapic_rte_to_remap_entry(struct iommu *iommu, int index; unsigned long flags; struct ir_ctrl *ir_ctrl = iommu_ir_ctrl(iommu); + bool init = false; remap_rte = (struct IO_APIC_route_remap_entry *) old_rte; spin_lock_irqsave(&ir_ctrl->iremap_lock, flags); @@ -296,6 +351,7 @@ static int ioapic_rte_to_remap_entry(struct iommu *iommu, index = alloc_remap_entry(iommu, 1); if ( index < IREMAP_ENTRY_NR ) apic_pin_2_ir_idx[apic][ioapic_pin] = index; + init = true; } if ( index > IREMAP_ENTRY_NR - 1 ) @@ -353,7 +409,11 @@ static int ioapic_rte_to_remap_entry(struct iommu *iommu, remap_rte->format = 1; /* indicate remap format */ } - *iremap_entry = new_ire; + if ( init ) + update_irte_non_atomic(iremap_entry, &new_ire); + else + update_irte_atomic(iremap_entry, &new_ire); + iommu_flush_cache_entry(iremap_entry, sizeof(*iremap_entry)); iommu_flush_iec_index(iommu, 0, index); @@ -567,7 +627,10 @@ static int msi_msg_to_remap_entry( { /* Free specified unused IRTEs */ for ( i = 0; i < nr; ++i ) + { free_remap_entry(iommu, msi_desc->remap_index + i); + msi_desc[i].remap_entry_initialized = false; + } spin_unlock_irqrestore(&ir_ctrl->iremap_lock, flags); return 0; } @@ -639,7 +702,14 @@ static int msi_msg_to_remap_entry( remap_rte->address_hi = 0; remap_rte->data = index - i; - *iremap_entry = new_ire; + if ( msi_desc->remap_entry_initialized ) + update_irte_atomic(iremap_entry, &new_ire); + else + { + update_irte_non_atomic(iremap_entry, &new_ire); + msi_desc->remap_entry_initialized = true; + } + iommu_flush_cache_entry(iremap_entry, sizeof(*iremap_entry)); iommu_flush_iec_index(iommu, 0, index); diff --git a/xen/include/asm-x86/msi.h b/xen/include/asm-x86/msi.h index fc9ab04..a0bd3af 100644 --- a/xen/include/asm-x86/msi.h +++ b/xen/include/asm-x86/msi.h @@ -118,6 +118,7 @@ struct msi_desc { struct msi_msg msg; /* Last set MSI message */ int remap_index; /* index in interrupt remapping table */ + bool remap_entry_initialized; const struct pi_desc *pi_desc; /* pointer to posted descriptor */ uint8_t gvec; /* guest vector. valid when pi_desc isn't NULL */ };
We used structure assignment to update irte which was non-atomic when the whole IRTE was to be updated. It is unsafe when a interrupt happened during update. Furthermore, no bug or warning would be reported when this happened. This patch introduces two variants, atomic and non-atomic, to update irte. Both variants will update IRTE if possible. If the caller requests a atomic update but we can't meet it, we raise a bug. Signed-off-by: Chao Gao <chao.gao@intel.com> --- v11: - Add two variant function to update IRTE. Call the non-atomic one for init and clear operations. Call the atomic one for other cases. - Add a new field to indicate the remap_entry associated with msi_desc is initialized or not. v10: - rename copy_irte_to_irt to update_irte - remove copy_from_to_irt - change commmit message and add some comments to illustrate on which condition update_irte() is safe. xen/arch/x86/msi.c | 1 + xen/drivers/passthrough/vtd/intremap.c | 78 ++++++++++++++++++++++++++++++++-- xen/include/asm-x86/msi.h | 1 + 3 files changed, 76 insertions(+), 4 deletions(-)