diff mbox

[for-next,4/6] x86/ecam: add handlers for the PVH Dom0 MMCFG areas

Message ID 20170411100402.56246-5-roger.pau@citrix.com (mailing list archive)
State New, archived
Headers show

Commit Message

Roger Pau Monné April 11, 2017, 10:04 a.m. UTC
Introduce a set of handlers for the accesses to the ECAM areas. Those areas are
setup based on the contents of the hardware MMCFG tables, and the list of
handled ECAM areas is stored inside of the hvm_domain struct.

The read/writes are forwarded to the generic vpci handlers once the address is
decoded in order to obtain the device and register the guest is trying to
access.

Signed-off-by: Roger Pau Monné <roger.pau@citrix.com>
---
Cc: Jan Beulich <jbeulich@suse.com>
Cc: Andrew Cooper <andrew.cooper3@citrix.com>
Cc: Paul Durrant <paul.durrant@citrix.com>
---
 xen/arch/x86/hvm/dom0_build.c    |  27 +++++++++++
 xen/arch/x86/hvm/hvm.c           |   1 +
 xen/arch/x86/hvm/io.c            | 101 +++++++++++++++++++++++++++++++++++++++
 xen/include/asm-x86/hvm/domain.h |  10 ++++
 xen/include/asm-x86/hvm/io.h     |   4 ++
 5 files changed, 143 insertions(+)
diff mbox

Patch

diff --git a/xen/arch/x86/hvm/dom0_build.c b/xen/arch/x86/hvm/dom0_build.c
index 07bfa0be46..47600345ab 100644
--- a/xen/arch/x86/hvm/dom0_build.c
+++ b/xen/arch/x86/hvm/dom0_build.c
@@ -38,6 +38,8 @@ 
 #include <public/hvm/hvm_info_table.h>
 #include <public/hvm/hvm_vcpu.h>
 
+#include "../x86_64/mmconfig.h"
+
 /*
  * Have the TSS cover the ISA port range, which makes it
  * - 104 bytes base structure
@@ -1022,6 +1024,24 @@  static int __init pvh_setup_acpi(struct domain *d, paddr_t start_info)
     return 0;
 }
 
+int __init pvh_setup_ecam(struct domain *d)
+{
+    unsigned int i;
+    int rc;
+
+    for ( i = 0; i < pci_mmcfg_config_num; i++ )
+    {
+        size_t size = (pci_mmcfg_config[i].end_bus_number + 1) << 20;
+
+        rc = register_vpci_ecam_handler(d, pci_mmcfg_config[i].address, size,
+                                        pci_mmcfg_config[i].pci_segment);
+        if ( rc )
+            return rc;
+    }
+
+    return 0;
+}
+
 int __init dom0_construct_pvh(struct domain *d, const module_t *image,
                               unsigned long image_headroom,
                               module_t *initrd,
@@ -1064,6 +1084,13 @@  int __init dom0_construct_pvh(struct domain *d, const module_t *image,
         return rc;
     }
 
+    rc = pvh_setup_ecam(d);
+    if ( rc )
+    {
+        printk("Failed to setup Dom0 PCI ECAM areas: %d\n", rc);
+        return rc;
+    }
+
     panic("Building a PVHv2 Dom0 is not yet supported.");
     return 0;
 }
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index c2a9b76a81..d8f3dcdc7c 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -613,6 +613,7 @@  int hvm_domain_initialise(struct domain *d)
     spin_lock_init(&d->arch.hvm_domain.write_map.lock);
     INIT_LIST_HEAD(&d->arch.hvm_domain.write_map.list);
     INIT_LIST_HEAD(&d->arch.hvm_domain.g2m_ioport_list);
+    INIT_LIST_HEAD(&d->arch.hvm_domain.ecam_regions);
 
     hvm_init_cacheattr_region_list(d);
 
diff --git a/xen/arch/x86/hvm/io.c b/xen/arch/x86/hvm/io.c
index 11d561e861..b78d668fc1 100644
--- a/xen/arch/x86/hvm/io.c
+++ b/xen/arch/x86/hvm/io.c
@@ -375,6 +375,107 @@  void register_vpci_portio_handler(struct domain *d)
     handler->ops = &vpci_portio_ops;
 }
 
+/* Handlers to trap PCI ECAM config accesses. */
+static struct hvm_ecam *vpci_ecam_find(struct domain *d, unsigned long addr)
+{
+    struct hvm_ecam *ecam = NULL;
+
+    list_for_each_entry ( ecam, &d->arch.hvm_domain.ecam_regions, next )
+        if ( addr >= ecam->addr && addr < ecam->addr + ecam->size )
+            return ecam;
+
+    return NULL;
+}
+
+static void vpci_ecam_decode_addr(unsigned long addr, unsigned int *bus,
+                                  unsigned int *devfn, unsigned int *reg)
+{
+    *bus = (addr >> 20) & 0xff;
+    *devfn = (addr >> 12) & 0xff;
+    *reg = addr & 0xfff;
+}
+
+static int vpci_ecam_accept(struct vcpu *v, unsigned long addr)
+{
+
+    return !!vpci_ecam_find(v->domain, addr);
+}
+
+static int vpci_ecam_read(struct vcpu *v, unsigned long addr,
+                          unsigned int len, unsigned long *data)
+{
+    struct domain *d = v->domain;
+    struct hvm_ecam *ecam = vpci_ecam_find(d, addr);
+    unsigned int bus, devfn, reg;
+    uint32_t data32;
+    int rc;
+
+    ASSERT(ecam);
+
+    vpci_ecam_decode_addr(addr - ecam->addr, &bus, &devfn, &reg);
+
+    if ( vpci_access_check(reg, len) || reg >= 0xfff )
+        return X86EMUL_UNHANDLEABLE;
+
+    rc = xen_vpci_read(ecam->segment, bus, devfn, reg, len, &data32);
+    if ( !rc )
+        *data = data32;
+
+    return rc ? X86EMUL_UNHANDLEABLE : X86EMUL_OKAY;
+}
+
+static int vpci_ecam_write(struct vcpu *v, unsigned long addr,
+                           unsigned int len, unsigned long data)
+{
+    struct domain *d = v->domain;
+    struct hvm_ecam *ecam = vpci_ecam_find(d, addr);
+    unsigned int bus, devfn, reg;
+    int rc;
+
+    ASSERT(ecam);
+
+    vpci_ecam_decode_addr(addr - ecam->addr, &bus, &devfn, &reg);
+
+    if ( vpci_access_check(reg, len) || reg >= 0xfff )
+        return X86EMUL_UNHANDLEABLE;
+
+    rc = xen_vpci_write(ecam->segment, bus, devfn, reg, len, data);
+
+    return rc ? X86EMUL_UNHANDLEABLE : X86EMUL_OKAY;
+}
+
+static const struct hvm_mmio_ops vpci_ecam_ops = {
+    .check = vpci_ecam_accept,
+    .read = vpci_ecam_read,
+    .write = vpci_ecam_write,
+};
+
+int register_vpci_ecam_handler(struct domain *d, paddr_t addr, size_t size,
+                               unsigned int seg)
+{
+    struct hvm_ecam *ecam;
+
+    ASSERT(is_hardware_domain(d));
+    ASSERT(atomic_read(&d->pause_count));
+
+    if ( vpci_ecam_find(d, addr) )
+        return -EEXIST;
+
+    ecam = xzalloc(struct hvm_ecam);
+    if ( !ecam )
+        return -ENOMEM;
+
+    if ( list_empty(&d->arch.hvm_domain.ecam_regions) )
+        register_mmio_handler(d, &vpci_ecam_ops);
+
+    ecam->addr = addr;
+    ecam->segment = seg;
+    ecam->size = size;
+    list_add(&ecam->next,  &d->arch.hvm_domain.ecam_regions);
+
+    return 0;
+}
+
 /*
  * Local variables:
  * mode: C
diff --git a/xen/include/asm-x86/hvm/domain.h b/xen/include/asm-x86/hvm/domain.h
index d2899c9bb2..9b5425f0a4 100644
--- a/xen/include/asm-x86/hvm/domain.h
+++ b/xen/include/asm-x86/hvm/domain.h
@@ -100,6 +100,13 @@  struct hvm_pi_ops {
     void (*do_resume)(struct vcpu *v);
 };
 
+struct hvm_ecam {
+    paddr_t addr;
+    size_t size;
+    unsigned int segment;
+    struct list_head next;
+};
+
 struct hvm_domain {
     /* Guest page range used for non-default ioreq servers */
     struct {
@@ -184,6 +191,9 @@  struct hvm_domain {
     /* List of guest to machine IO ports mapping. */
     struct list_head g2m_ioport_list;
 
+    /* List of ECAM regions. */
+    struct list_head ecam_regions;
+
     /* List of permanently write-mapped pages. */
     struct {
         spinlock_t lock;
diff --git a/xen/include/asm-x86/hvm/io.h b/xen/include/asm-x86/hvm/io.h
index 2dbf92f13e..0434aca706 100644
--- a/xen/include/asm-x86/hvm/io.h
+++ b/xen/include/asm-x86/hvm/io.h
@@ -158,6 +158,10 @@  void register_g2m_portio_handler(struct domain *d);
 /* HVM port IO handler for PCI accesses. */
 void register_vpci_portio_handler(struct domain *d);
 
+/* HVM MMIO handler for PCI ECAM accesses. */
+int register_vpci_ecam_handler(struct domain *d, paddr_t addr, size_t size,
+                               unsigned int seg);
+
 #endif /* __ASM_X86_HVM_IO_H__ */