Message ID | 20190930133238.49868-8-roger.pau@citrix.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | ioreq: add support for internal servers | expand |
> -----Original Message----- > From: Xen-devel <xen-devel-bounces@lists.xenproject.org> On Behalf Of Roger Pau Monne > Sent: 30 September 2019 14:33 > To: xen-devel@lists.xenproject.org > Cc: Andrew Cooper <Andrew.Cooper3@citrix.com>; Paul Durrant <paul@xen.org>; Wei Liu <wl@xen.org>; Jan > Beulich <jbeulich@suse.com>; Roger Pau Monne <roger.pau@citrix.com> > Subject: [Xen-devel] [PATCH v3 07/10] ioreq: allow decoding accesses to MMCFG regions > > Pick up on the infrastructure already added for vPCI and allow ioreq > to decode accesses to MMCFG regions registered for a domain. This > infrastructure is still only accessible from internal callers, so > MMCFG regions can only be registered from the internal domain builder > used by PVH dom0. > > Note that the vPCI infrastructure to decode and handle accesses to > MMCFG regions will be removed in following patches when vPCI is > switched to become an internal ioreq server. > > Signed-off-by: Roger Pau Monné <roger.pau@citrix.com> Reviewed-by: Paul Durrant <paul@xen.org> ...with one nit below... > --- > Changes since v2: > - Don't prevent mapping MCFG ranges by ioreq servers. > > Changes since v1: > - Remove prototype for destroy_vpci_mmcfg. > - Keep the code in io.c so PCI accesses to MMCFG regions can be > decoded before ioreq processing. > --- > xen/arch/x86/hvm/dom0_build.c | 8 +-- > xen/arch/x86/hvm/hvm.c | 2 +- > xen/arch/x86/hvm/io.c | 79 ++++++++++++----------------- > xen/arch/x86/hvm/ioreq.c | 18 +++++-- > xen/arch/x86/physdev.c | 5 +- > xen/drivers/passthrough/x86/iommu.c | 2 +- > xen/include/asm-x86/hvm/io.h | 29 ++++++++--- > 7 files changed, 75 insertions(+), 68 deletions(-) > > diff --git a/xen/arch/x86/hvm/dom0_build.c b/xen/arch/x86/hvm/dom0_build.c > index 831325150b..b30042d8f3 100644 > --- a/xen/arch/x86/hvm/dom0_build.c > +++ b/xen/arch/x86/hvm/dom0_build.c > @@ -1108,10 +1108,10 @@ static void __hwdom_init pvh_setup_mmcfg(struct domain *d) > > for ( i = 0; i < pci_mmcfg_config_num; i++ ) > { > - rc = register_vpci_mmcfg_handler(d, pci_mmcfg_config[i].address, > - pci_mmcfg_config[i].start_bus_number, > - pci_mmcfg_config[i].end_bus_number, > - pci_mmcfg_config[i].pci_segment); > + rc = hvm_register_mmcfg(d, pci_mmcfg_config[i].address, > + pci_mmcfg_config[i].start_bus_number, > + pci_mmcfg_config[i].end_bus_number, > + pci_mmcfg_config[i].pci_segment); > if ( rc ) > printk("Unable to setup MMCFG handler at %#lx for segment %u\n", > pci_mmcfg_config[i].address, > diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c > index c22cb39cf3..5348186c0c 100644 > --- a/xen/arch/x86/hvm/hvm.c > +++ b/xen/arch/x86/hvm/hvm.c > @@ -753,7 +753,7 @@ void hvm_domain_destroy(struct domain *d) > xfree(ioport); > } > > - destroy_vpci_mmcfg(d); > + hvm_free_mmcfg(d); > } > > static int hvm_save_tsc_adjust(struct vcpu *v, hvm_domain_context_t *h) > diff --git a/xen/arch/x86/hvm/io.c b/xen/arch/x86/hvm/io.c > index a5b0a23f06..3334888136 100644 > --- a/xen/arch/x86/hvm/io.c > +++ b/xen/arch/x86/hvm/io.c > @@ -279,6 +279,18 @@ unsigned int hvm_pci_decode_addr(unsigned int cf8, unsigned int addr, > return CF8_ADDR_LO(cf8) | (addr & 3); > } > > +unsigned int hvm_mmcfg_decode_addr(const struct hvm_mmcfg *mmcfg, > + paddr_t addr, pci_sbdf_t *sbdf) > +{ > + addr -= mmcfg->addr; > + sbdf->bdf = MMCFG_BDF(addr); > + sbdf->bus += mmcfg->start_bus; > + sbdf->seg = mmcfg->segment; > + > + return addr & (PCI_CFG_SPACE_EXP_SIZE - 1); > +} > + > + Extraneous blank line here. Paul
On 30.09.2019 15:32, Roger Pau Monne wrote: > --- a/xen/arch/x86/hvm/io.c > +++ b/xen/arch/x86/hvm/io.c > @@ -279,6 +279,18 @@ unsigned int hvm_pci_decode_addr(unsigned int cf8, unsigned int addr, > return CF8_ADDR_LO(cf8) | (addr & 3); > } > > +unsigned int hvm_mmcfg_decode_addr(const struct hvm_mmcfg *mmcfg, > + paddr_t addr, pci_sbdf_t *sbdf) > +{ > + addr -= mmcfg->addr; As the function isn't even static, wouldn't it be helpful to at least assert that addr >= mmcfg->addr ahead of this, and addr < mmcfg->size afterwards? > @@ -574,6 +550,17 @@ void destroy_vpci_mmcfg(struct domain *d) > write_unlock(&d->arch.hvm.mmcfg_lock); > } > > +const struct hvm_mmcfg *hvm_mmcfg_find(const struct domain *d, paddr_t addr) > +{ > + const struct hvm_mmcfg *mmcfg; > + > + list_for_each_entry ( mmcfg, &d->arch.hvm.mmcfg_regions, next ) > + if ( addr >= mmcfg->addr && addr < mmcfg->addr + mmcfg->size ) As a minor remark, this could be coded without even a theoretical risk of overflow as if ( addr >= mmcfg->addr && addr - mmcfg->addr < mmcfg->size ) > --- a/xen/arch/x86/hvm/ioreq.c > +++ b/xen/arch/x86/hvm/ioreq.c > @@ -1326,27 +1326,34 @@ ioservid_t hvm_select_ioreq_server(struct domain *d, ioreq_t *p) > uint8_t type; > uint64_t addr; > unsigned int id; > + const struct hvm_mmcfg *mmcfg; > > if ( p->type != IOREQ_TYPE_COPY && p->type != IOREQ_TYPE_PIO ) > return XEN_INVALID_IOSERVID; > > cf8 = d->arch.hvm.pci_cf8; > > - if ( p->type == IOREQ_TYPE_PIO && > - (p->addr & ~3) == 0xcfc && > - CF8_ENABLED(cf8) ) > + read_lock(&d->arch.hvm.mmcfg_lock); > + if ( (p->type == IOREQ_TYPE_PIO && > + (p->addr & ~3) == 0xcfc && > + CF8_ENABLED(cf8)) || > + (p->type == IOREQ_TYPE_COPY && > + (mmcfg = hvm_mmcfg_find(d, p->addr)) != NULL) ) > { > uint32_t x86_fam; > pci_sbdf_t sbdf; > unsigned int reg; > > - reg = hvm_pci_decode_addr(cf8, p->addr, &sbdf); > + reg = p->type == IOREQ_TYPE_PIO ? hvm_pci_decode_addr(cf8, p->addr, > + &sbdf) > + : hvm_mmcfg_decode_addr(mmcfg, p->addr, > + &sbdf); I'm afraid old gcc will consider this use of mmcfg "potentially uninitialized". I.e. I think the variable wants to gain a NULL initializer, at which point you can slightly shorten the above to reg = mmcfg ? hvm_mmcfg_decode_addr(mmcfg, p->addr, &sbdf) : hvm_pci_decode_addr(cf8, p->addr, &sbdf); also eliminating a pointer deref (i.e. likely producing minimally more efficient code). > /* PCI config data cycle */ > type = XEN_DMOP_IO_RANGE_PCI; > addr = ((uint64_t)sbdf.sbdf << 32) | reg; > /* AMD extended configuration space access? */ > - if ( CF8_ADDR_HI(cf8) && > + if ( p->type == IOREQ_TYPE_PIO && CF8_ADDR_HI(cf8) && Would be "!mmcfg && ..." then here. > --- a/xen/include/asm-x86/hvm/io.h > +++ b/xen/include/asm-x86/hvm/io.h > @@ -165,9 +165,19 @@ void stdvga_deinit(struct domain *d); > > extern void hvm_dpci_msi_eoi(struct domain *d, int vector); > > -/* Decode a PCI port IO access into a bus/slot/func/reg. */ > +struct hvm_mmcfg { > + struct list_head next; > + paddr_t addr; > + unsigned int size; > + uint16_t segment; > + uint8_t start_bus; > +}; > + > +/* Decode a PCI port IO or MMCFG access into a bus/slot/func/reg. */ > unsigned int hvm_pci_decode_addr(unsigned int cf8, unsigned int addr, > pci_sbdf_t *sbdf); > +unsigned int hvm_mmcfg_decode_addr(const struct hvm_mmcfg *mmcfg, > + paddr_t addr, pci_sbdf_t *sbdf); The latest now the comment also wants to include "seg/", I think. > @@ -178,15 +188,18 @@ void register_g2m_portio_handler(struct domain *d); > /* HVM port IO handler for vPCI accesses. */ > void register_vpci_portio_handler(struct domain *d); > > -/* HVM MMIO handler for PCI MMCFG accesses. */ > -int register_vpci_mmcfg_handler(struct domain *d, paddr_t addr, > - unsigned int start_bus, unsigned int end_bus, > - unsigned int seg); > -/* Destroy tracked MMCFG areas. */ > -void destroy_vpci_mmcfg(struct domain *d); > +/* HVM PCI MMCFG regions registration. */ > +int hvm_register_mmcfg(struct domain *d, paddr_t addr, > + unsigned int start_bus, unsigned int end_bus, > + unsigned int seg); As - from a hierarchy perspective - the segment is more significant, can you put it ahead of the start/end bus numbers please? > +void hvm_free_mmcfg(struct domain *d); > +const struct hvm_mmcfg *hvm_mmcfg_find(const struct domain *d, paddr_t addr); > > /* Check if an address is between a MMCFG region for a domain. */ > -bool vpci_is_mmcfg_address(const struct domain *d, paddr_t addr); > +static inline bool hvm_is_mmcfg_address(const struct domain *d, paddr_t addr) > +{ > + return hvm_mmcfg_find(d, addr); > +} Hmm, yet another instance where the size of an access doesn't matter in such a predicate. Is it a good idea (or is there a fair reason) to extend this bad practice? But yes, I do see that vpci_mmcfg_accept() has no way to pass a size here, and that you you effectively only move the code here. Jan
diff --git a/xen/arch/x86/hvm/dom0_build.c b/xen/arch/x86/hvm/dom0_build.c index 831325150b..b30042d8f3 100644 --- a/xen/arch/x86/hvm/dom0_build.c +++ b/xen/arch/x86/hvm/dom0_build.c @@ -1108,10 +1108,10 @@ static void __hwdom_init pvh_setup_mmcfg(struct domain *d) for ( i = 0; i < pci_mmcfg_config_num; i++ ) { - rc = register_vpci_mmcfg_handler(d, pci_mmcfg_config[i].address, - pci_mmcfg_config[i].start_bus_number, - pci_mmcfg_config[i].end_bus_number, - pci_mmcfg_config[i].pci_segment); + rc = hvm_register_mmcfg(d, pci_mmcfg_config[i].address, + pci_mmcfg_config[i].start_bus_number, + pci_mmcfg_config[i].end_bus_number, + pci_mmcfg_config[i].pci_segment); if ( rc ) printk("Unable to setup MMCFG handler at %#lx for segment %u\n", pci_mmcfg_config[i].address, diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c index c22cb39cf3..5348186c0c 100644 --- a/xen/arch/x86/hvm/hvm.c +++ b/xen/arch/x86/hvm/hvm.c @@ -753,7 +753,7 @@ void hvm_domain_destroy(struct domain *d) xfree(ioport); } - destroy_vpci_mmcfg(d); + hvm_free_mmcfg(d); } static int hvm_save_tsc_adjust(struct vcpu *v, hvm_domain_context_t *h) diff --git a/xen/arch/x86/hvm/io.c b/xen/arch/x86/hvm/io.c index a5b0a23f06..3334888136 100644 --- a/xen/arch/x86/hvm/io.c +++ b/xen/arch/x86/hvm/io.c @@ -279,6 +279,18 @@ unsigned int hvm_pci_decode_addr(unsigned int cf8, unsigned int addr, return CF8_ADDR_LO(cf8) | (addr & 3); } +unsigned int hvm_mmcfg_decode_addr(const struct hvm_mmcfg *mmcfg, + paddr_t addr, pci_sbdf_t *sbdf) +{ + addr -= mmcfg->addr; + sbdf->bdf = MMCFG_BDF(addr); + sbdf->bus += mmcfg->start_bus; + sbdf->seg = mmcfg->segment; + + return addr & (PCI_CFG_SPACE_EXP_SIZE - 1); +} + + /* Do some sanity checks. */ static bool vpci_access_allowed(unsigned int reg, unsigned int len) { @@ -383,50 +395,14 @@ void register_vpci_portio_handler(struct domain *d) handler->ops = &vpci_portio_ops; } -struct hvm_mmcfg { - struct list_head next; - paddr_t addr; - unsigned int size; - uint16_t segment; - uint8_t start_bus; -}; - /* Handlers to trap PCI MMCFG config accesses. */ -static const struct hvm_mmcfg *vpci_mmcfg_find(const struct domain *d, - paddr_t addr) -{ - const struct hvm_mmcfg *mmcfg; - - list_for_each_entry ( mmcfg, &d->arch.hvm.mmcfg_regions, next ) - if ( addr >= mmcfg->addr && addr < mmcfg->addr + mmcfg->size ) - return mmcfg; - - return NULL; -} - -bool vpci_is_mmcfg_address(const struct domain *d, paddr_t addr) -{ - return vpci_mmcfg_find(d, addr); -} - -static unsigned int vpci_mmcfg_decode_addr(const struct hvm_mmcfg *mmcfg, - paddr_t addr, pci_sbdf_t *sbdf) -{ - addr -= mmcfg->addr; - sbdf->bdf = MMCFG_BDF(addr); - sbdf->bus += mmcfg->start_bus; - sbdf->seg = mmcfg->segment; - - return addr & (PCI_CFG_SPACE_EXP_SIZE - 1); -} - static int vpci_mmcfg_accept(struct vcpu *v, unsigned long addr) { struct domain *d = v->domain; bool found; read_lock(&d->arch.hvm.mmcfg_lock); - found = vpci_mmcfg_find(d, addr); + found = hvm_is_mmcfg_address(d, addr); read_unlock(&d->arch.hvm.mmcfg_lock); return found; @@ -443,14 +419,14 @@ static int vpci_mmcfg_read(struct vcpu *v, unsigned long addr, *data = ~0ul; read_lock(&d->arch.hvm.mmcfg_lock); - mmcfg = vpci_mmcfg_find(d, addr); + mmcfg = hvm_mmcfg_find(d, addr); if ( !mmcfg ) { read_unlock(&d->arch.hvm.mmcfg_lock); return X86EMUL_RETRY; } - reg = vpci_mmcfg_decode_addr(mmcfg, addr, &sbdf); + reg = hvm_mmcfg_decode_addr(mmcfg, addr, &sbdf); read_unlock(&d->arch.hvm.mmcfg_lock); if ( !vpci_access_allowed(reg, len) || @@ -485,14 +461,14 @@ static int vpci_mmcfg_write(struct vcpu *v, unsigned long addr, pci_sbdf_t sbdf; read_lock(&d->arch.hvm.mmcfg_lock); - mmcfg = vpci_mmcfg_find(d, addr); + mmcfg = hvm_mmcfg_find(d, addr); if ( !mmcfg ) { read_unlock(&d->arch.hvm.mmcfg_lock); return X86EMUL_RETRY; } - reg = vpci_mmcfg_decode_addr(mmcfg, addr, &sbdf); + reg = hvm_mmcfg_decode_addr(mmcfg, addr, &sbdf); read_unlock(&d->arch.hvm.mmcfg_lock); if ( !vpci_access_allowed(reg, len) || @@ -512,9 +488,9 @@ static const struct hvm_mmio_ops vpci_mmcfg_ops = { .write = vpci_mmcfg_write, }; -int register_vpci_mmcfg_handler(struct domain *d, paddr_t addr, - unsigned int start_bus, unsigned int end_bus, - unsigned int seg) +int hvm_register_mmcfg(struct domain *d, paddr_t addr, + unsigned int start_bus, unsigned int end_bus, + unsigned int seg) { struct hvm_mmcfg *mmcfg, *new; @@ -549,7 +525,7 @@ int register_vpci_mmcfg_handler(struct domain *d, paddr_t addr, return ret; } - if ( list_empty(&d->arch.hvm.mmcfg_regions) ) + if ( list_empty(&d->arch.hvm.mmcfg_regions) && has_vpci(d) ) register_mmio_handler(d, &vpci_mmcfg_ops); list_add(&new->next, &d->arch.hvm.mmcfg_regions); @@ -558,7 +534,7 @@ int register_vpci_mmcfg_handler(struct domain *d, paddr_t addr, return 0; } -void destroy_vpci_mmcfg(struct domain *d) +void hvm_free_mmcfg(struct domain *d) { struct list_head *mmcfg_regions = &d->arch.hvm.mmcfg_regions; @@ -574,6 +550,17 @@ void destroy_vpci_mmcfg(struct domain *d) write_unlock(&d->arch.hvm.mmcfg_lock); } +const struct hvm_mmcfg *hvm_mmcfg_find(const struct domain *d, paddr_t addr) +{ + const struct hvm_mmcfg *mmcfg; + + list_for_each_entry ( mmcfg, &d->arch.hvm.mmcfg_regions, next ) + if ( addr >= mmcfg->addr && addr < mmcfg->addr + mmcfg->size ) + return mmcfg; + + return NULL; +} + /* * Local variables: * mode: C diff --git a/xen/arch/x86/hvm/ioreq.c b/xen/arch/x86/hvm/ioreq.c index 57719c607c..6b87a55db5 100644 --- a/xen/arch/x86/hvm/ioreq.c +++ b/xen/arch/x86/hvm/ioreq.c @@ -1326,27 +1326,34 @@ ioservid_t hvm_select_ioreq_server(struct domain *d, ioreq_t *p) uint8_t type; uint64_t addr; unsigned int id; + const struct hvm_mmcfg *mmcfg; if ( p->type != IOREQ_TYPE_COPY && p->type != IOREQ_TYPE_PIO ) return XEN_INVALID_IOSERVID; cf8 = d->arch.hvm.pci_cf8; - if ( p->type == IOREQ_TYPE_PIO && - (p->addr & ~3) == 0xcfc && - CF8_ENABLED(cf8) ) + read_lock(&d->arch.hvm.mmcfg_lock); + if ( (p->type == IOREQ_TYPE_PIO && + (p->addr & ~3) == 0xcfc && + CF8_ENABLED(cf8)) || + (p->type == IOREQ_TYPE_COPY && + (mmcfg = hvm_mmcfg_find(d, p->addr)) != NULL) ) { uint32_t x86_fam; pci_sbdf_t sbdf; unsigned int reg; - reg = hvm_pci_decode_addr(cf8, p->addr, &sbdf); + reg = p->type == IOREQ_TYPE_PIO ? hvm_pci_decode_addr(cf8, p->addr, + &sbdf) + : hvm_mmcfg_decode_addr(mmcfg, p->addr, + &sbdf); /* PCI config data cycle */ type = XEN_DMOP_IO_RANGE_PCI; addr = ((uint64_t)sbdf.sbdf << 32) | reg; /* AMD extended configuration space access? */ - if ( CF8_ADDR_HI(cf8) && + if ( p->type == IOREQ_TYPE_PIO && CF8_ADDR_HI(cf8) && d->arch.cpuid->x86_vendor == X86_VENDOR_AMD && (x86_fam = get_cpu_family( d->arch.cpuid->basic.raw_fms, NULL, NULL)) > 0x10 && @@ -1365,6 +1372,7 @@ ioservid_t hvm_select_ioreq_server(struct domain *d, ioreq_t *p) XEN_DMOP_IO_RANGE_PORT : XEN_DMOP_IO_RANGE_MEMORY; addr = p->addr; } + read_unlock(&d->arch.hvm.mmcfg_lock); FOR_EACH_IOREQ_SERVER(d, id, s) { diff --git a/xen/arch/x86/physdev.c b/xen/arch/x86/physdev.c index 3a3c15890b..f61f66df5f 100644 --- a/xen/arch/x86/physdev.c +++ b/xen/arch/x86/physdev.c @@ -562,9 +562,8 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_HANDLE_PARAM(void) arg) * For HVM (PVH) domains try to add the newly found MMCFG to the * domain. */ - ret = register_vpci_mmcfg_handler(currd, info.address, - info.start_bus, info.end_bus, - info.segment); + ret = hvm_register_mmcfg(currd, info.address, info.start_bus, + info.end_bus, info.segment); } break; diff --git a/xen/drivers/passthrough/x86/iommu.c b/xen/drivers/passthrough/x86/iommu.c index 59905629e1..53cdbb45f0 100644 --- a/xen/drivers/passthrough/x86/iommu.c +++ b/xen/drivers/passthrough/x86/iommu.c @@ -152,7 +152,7 @@ static bool __hwdom_init hwdom_iommu_map(const struct domain *d, * TODO: runtime added MMCFG regions are not checked to make sure they * don't overlap with already mapped regions, thus preventing trapping. */ - if ( has_vpci(d) && vpci_is_mmcfg_address(d, pfn_to_paddr(pfn)) ) + if ( has_vpci(d) && hvm_is_mmcfg_address(d, pfn_to_paddr(pfn)) ) return false; return true; diff --git a/xen/include/asm-x86/hvm/io.h b/xen/include/asm-x86/hvm/io.h index 7ceb119b64..86ebbd1e7e 100644 --- a/xen/include/asm-x86/hvm/io.h +++ b/xen/include/asm-x86/hvm/io.h @@ -165,9 +165,19 @@ void stdvga_deinit(struct domain *d); extern void hvm_dpci_msi_eoi(struct domain *d, int vector); -/* Decode a PCI port IO access into a bus/slot/func/reg. */ +struct hvm_mmcfg { + struct list_head next; + paddr_t addr; + unsigned int size; + uint16_t segment; + uint8_t start_bus; +}; + +/* Decode a PCI port IO or MMCFG access into a bus/slot/func/reg. */ unsigned int hvm_pci_decode_addr(unsigned int cf8, unsigned int addr, pci_sbdf_t *sbdf); +unsigned int hvm_mmcfg_decode_addr(const struct hvm_mmcfg *mmcfg, + paddr_t addr, pci_sbdf_t *sbdf); /* * HVM port IO handler that performs forwarding of guest IO ports into machine @@ -178,15 +188,18 @@ void register_g2m_portio_handler(struct domain *d); /* HVM port IO handler for vPCI accesses. */ void register_vpci_portio_handler(struct domain *d); -/* HVM MMIO handler for PCI MMCFG accesses. */ -int register_vpci_mmcfg_handler(struct domain *d, paddr_t addr, - unsigned int start_bus, unsigned int end_bus, - unsigned int seg); -/* Destroy tracked MMCFG areas. */ -void destroy_vpci_mmcfg(struct domain *d); +/* HVM PCI MMCFG regions registration. */ +int hvm_register_mmcfg(struct domain *d, paddr_t addr, + unsigned int start_bus, unsigned int end_bus, + unsigned int seg); +void hvm_free_mmcfg(struct domain *d); +const struct hvm_mmcfg *hvm_mmcfg_find(const struct domain *d, paddr_t addr); /* Check if an address is between a MMCFG region for a domain. */ -bool vpci_is_mmcfg_address(const struct domain *d, paddr_t addr); +static inline bool hvm_is_mmcfg_address(const struct domain *d, paddr_t addr) +{ + return hvm_mmcfg_find(d, addr); +} #endif /* __ASM_X86_HVM_IO_H__ */
Pick up on the infrastructure already added for vPCI and allow ioreq to decode accesses to MMCFG regions registered for a domain. This infrastructure is still only accessible from internal callers, so MMCFG regions can only be registered from the internal domain builder used by PVH dom0. Note that the vPCI infrastructure to decode and handle accesses to MMCFG regions will be removed in following patches when vPCI is switched to become an internal ioreq server. Signed-off-by: Roger Pau Monné <roger.pau@citrix.com> --- Changes since v2: - Don't prevent mapping MCFG ranges by ioreq servers. Changes since v1: - Remove prototype for destroy_vpci_mmcfg. - Keep the code in io.c so PCI accesses to MMCFG regions can be decoded before ioreq processing. --- xen/arch/x86/hvm/dom0_build.c | 8 +-- xen/arch/x86/hvm/hvm.c | 2 +- xen/arch/x86/hvm/io.c | 79 ++++++++++++----------------- xen/arch/x86/hvm/ioreq.c | 18 +++++-- xen/arch/x86/physdev.c | 5 +- xen/drivers/passthrough/x86/iommu.c | 2 +- xen/include/asm-x86/hvm/io.h | 29 ++++++++--- 7 files changed, 75 insertions(+), 68 deletions(-)