diff mbox series

[1/6] x86/iommu: re-arrange arch_iommu to separate common fields...

Message ID 20200724164619.1245-2-paul@xen.org (mailing list archive)
State Superseded
Headers show
Series IOMMU cleanup | expand

Commit Message

Paul Durrant July 24, 2020, 4:46 p.m. UTC
From: Paul Durrant <pdurrant@amazon.com>

... from those specific to VT-d or AMD IOMMU, and put the latter in a union.

There is no functional change in this patch, although the initialization of
the 'mapped_rmrrs' list occurs slightly later in iommu_domain_init() since
it is now done (correctly) in VT-d specific code rather than in general x86
code.

Signed-off-by: Paul Durrant <pdurrant@amazon.com>
---
Cc: Lukasz Hawrylko <lukasz.hawrylko@linux.intel.com>
Cc: Jan Beulich <jbeulich@suse.com>
Cc: Andrew Cooper <andrew.cooper3@citrix.com>
Cc: Wei Liu <wl@xen.org>
Cc: "Roger Pau Monné" <roger.pau@citrix.com>
Cc: Kevin Tian <kevin.tian@intel.com>
---
 xen/arch/x86/tboot.c                        |  4 +-
 xen/drivers/passthrough/amd/iommu_guest.c   |  8 ++--
 xen/drivers/passthrough/amd/iommu_map.c     | 14 +++---
 xen/drivers/passthrough/amd/pci_amd_iommu.c | 35 +++++++-------
 xen/drivers/passthrough/vtd/iommu.c         | 53 +++++++++++----------
 xen/drivers/passthrough/x86/iommu.c         |  1 -
 xen/include/asm-x86/iommu.h                 | 27 +++++++----
 7 files changed, 78 insertions(+), 64 deletions(-)

Comments

Andrew Cooper July 24, 2020, 5:29 p.m. UTC | #1
On 24/07/2020 17:46, Paul Durrant wrote:
> diff --git a/xen/include/asm-x86/iommu.h b/xen/include/asm-x86/iommu.h
> index 6c9d5e5632..a7add5208c 100644
> --- a/xen/include/asm-x86/iommu.h
> +++ b/xen/include/asm-x86/iommu.h
> @@ -45,16 +45,23 @@ typedef uint64_t daddr_t;
>  
>  struct arch_iommu
>  {
> -    u64 pgd_maddr;                 /* io page directory machine address */
> -    spinlock_t mapping_lock;            /* io page table lock */
> -    int agaw;     /* adjusted guest address width, 0 is level 2 30-bit */
> -    u64 iommu_bitmap;              /* bitmap of iommu(s) that the domain uses */
> -    struct list_head mapped_rmrrs;
> -
> -    /* amd iommu support */
> -    int paging_mode;
> -    struct page_info *root_table;
> -    struct guest_iommu *g_iommu;
> +    spinlock_t mapping_lock; /* io page table lock */
> +
> +    union {
> +        /* Intel VT-d */
> +        struct {
> +            u64 pgd_maddr; /* io page directory machine address */
> +            int agaw; /* adjusted guest address width, 0 is level 2 30-bit */
> +            u64 iommu_bitmap; /* bitmap of iommu(s) that the domain uses */
> +            struct list_head mapped_rmrrs;
> +        } vtd;
> +        /* AMD IOMMU */
> +        struct {
> +            int paging_mode;
> +            struct page_info *root_table;
> +            struct guest_iommu *g_iommu;
> +        } amd_iommu;
> +    };

The naming split here is weird.

Ideally we'd have struct {vtd,amd}_iommu in appropriate headers, and
this would be simply

union {
    struct vtd_iommu vtd;
    struct amd_iommu amd;
};

If this isn't trivial to arrange, can we at least s/amd_iommu/amd/ here ?

~Andrew
Paul Durrant July 24, 2020, 6:49 p.m. UTC | #2
> -----Original Message-----
> From: Andrew Cooper <andrew.cooper3@citrix.com>
> Sent: 24 July 2020 18:29
> To: Paul Durrant <paul@xen.org>; xen-devel@lists.xenproject.org
> Cc: Paul Durrant <pdurrant@amazon.com>; Lukasz Hawrylko <lukasz.hawrylko@linux.intel.com>; Jan Beulich
> <jbeulich@suse.com>; Wei Liu <wl@xen.org>; Roger Pau Monné <roger.pau@citrix.com>; Kevin Tian
> <kevin.tian@intel.com>
> Subject: Re: [PATCH 1/6] x86/iommu: re-arrange arch_iommu to separate common fields...
> 
> On 24/07/2020 17:46, Paul Durrant wrote:
> > diff --git a/xen/include/asm-x86/iommu.h b/xen/include/asm-x86/iommu.h
> > index 6c9d5e5632..a7add5208c 100644
> > --- a/xen/include/asm-x86/iommu.h
> > +++ b/xen/include/asm-x86/iommu.h
> > @@ -45,16 +45,23 @@ typedef uint64_t daddr_t;
> >
> >  struct arch_iommu
> >  {
> > -    u64 pgd_maddr;                 /* io page directory machine address */
> > -    spinlock_t mapping_lock;            /* io page table lock */
> > -    int agaw;     /* adjusted guest address width, 0 is level 2 30-bit */
> > -    u64 iommu_bitmap;              /* bitmap of iommu(s) that the domain uses */
> > -    struct list_head mapped_rmrrs;
> > -
> > -    /* amd iommu support */
> > -    int paging_mode;
> > -    struct page_info *root_table;
> > -    struct guest_iommu *g_iommu;
> > +    spinlock_t mapping_lock; /* io page table lock */
> > +
> > +    union {
> > +        /* Intel VT-d */
> > +        struct {
> > +            u64 pgd_maddr; /* io page directory machine address */
> > +            int agaw; /* adjusted guest address width, 0 is level 2 30-bit */
> > +            u64 iommu_bitmap; /* bitmap of iommu(s) that the domain uses */
> > +            struct list_head mapped_rmrrs;
> > +        } vtd;
> > +        /* AMD IOMMU */
> > +        struct {
> > +            int paging_mode;
> > +            struct page_info *root_table;
> > +            struct guest_iommu *g_iommu;
> > +        } amd_iommu;
> > +    };
> 
> The naming split here is weird.
> 
> Ideally we'd have struct {vtd,amd}_iommu in appropriate headers, and
> this would be simply
> 
> union {
>     struct vtd_iommu vtd;
>     struct amd_iommu amd;
> };
> 
> If this isn't trivial to arrange, can we at least s/amd_iommu/amd/ here ?

I was in two minds. I tried to look for a TLA for the AMD IOMMU and 'amd' seemed a little too non-descript. I don't really mind though if there's a strong preference to shorted it.
I can certainly try moving the struct definitions into the implementation headers.

  Paul

> 
> ~Andrew
Jan Beulich July 26, 2020, 8:13 a.m. UTC | #3
On 24.07.2020 20:49, Paul Durrant wrote:
>> From: Andrew Cooper <andrew.cooper3@citrix.com>
>> Sent: 24 July 2020 18:29
>>
>> On 24/07/2020 17:46, Paul Durrant wrote:
>>> diff --git a/xen/include/asm-x86/iommu.h b/xen/include/asm-x86/iommu.h
>>> index 6c9d5e5632..a7add5208c 100644
>>> --- a/xen/include/asm-x86/iommu.h
>>> +++ b/xen/include/asm-x86/iommu.h
>>> @@ -45,16 +45,23 @@ typedef uint64_t daddr_t;
>>>
>>>  struct arch_iommu
>>>  {
>>> -    u64 pgd_maddr;                 /* io page directory machine address */
>>> -    spinlock_t mapping_lock;            /* io page table lock */
>>> -    int agaw;     /* adjusted guest address width, 0 is level 2 30-bit */
>>> -    u64 iommu_bitmap;              /* bitmap of iommu(s) that the domain uses */
>>> -    struct list_head mapped_rmrrs;
>>> -
>>> -    /* amd iommu support */
>>> -    int paging_mode;
>>> -    struct page_info *root_table;
>>> -    struct guest_iommu *g_iommu;
>>> +    spinlock_t mapping_lock; /* io page table lock */
>>> +
>>> +    union {
>>> +        /* Intel VT-d */
>>> +        struct {
>>> +            u64 pgd_maddr; /* io page directory machine address */
>>> +            int agaw; /* adjusted guest address width, 0 is level 2 30-bit */
>>> +            u64 iommu_bitmap; /* bitmap of iommu(s) that the domain uses */
>>> +            struct list_head mapped_rmrrs;
>>> +        } vtd;
>>> +        /* AMD IOMMU */
>>> +        struct {
>>> +            int paging_mode;
>>> +            struct page_info *root_table;
>>> +            struct guest_iommu *g_iommu;
>>> +        } amd_iommu;
>>> +    };
>>
>> The naming split here is weird.
>>
>> Ideally we'd have struct {vtd,amd}_iommu in appropriate headers, and
>> this would be simply
>>
>> union {
>>     struct vtd_iommu vtd;
>>     struct amd_iommu amd;
>> };
>>
>> If this isn't trivial to arrange, can we at least s/amd_iommu/amd/ here ?
> 
> I was in two minds. I tried to look for a TLA for the AMD IOMMU and 'amd' seemed a little too non-descript. I don't really mind though if there's a strong preference to shorted it.

+1 for shortening in some way. Even amd_vi would already be better imo,
albeit I'm with Andrew and would think just amd is fine here (and
matches how things are in the file system structure).

While at it, may I ask that you also switch the plain "int" fields to
"unsigned int" - I think that's doable for both of them.

Jan
Durrant, Paul July 27, 2020, 9:30 a.m. UTC | #4
> -----Original Message-----
> From: Jan Beulich <jbeulich@suse.com>
> Sent: 26 July 2020 09:14
> To: paul@xen.org
> Cc: 'Andrew Cooper' <andrew.cooper3@citrix.com>; xen-devel@lists.xenproject.org; Durrant, Paul
> <pdurrant@amazon.co.uk>; 'Lukasz Hawrylko' <lukasz.hawrylko@linux.intel.com>; 'Wei Liu' <wl@xen.org>;
> 'Roger Pau Monné' <roger.pau@citrix.com>; 'Kevin Tian' <kevin.tian@intel.com>
> Subject: RE: [EXTERNAL] [PATCH 1/6] x86/iommu: re-arrange arch_iommu to separate common fields...
> 
> CAUTION: This email originated from outside of the organization. Do not click links or open
> attachments unless you can confirm the sender and know the content is safe.
> 
> 
> 
> On 24.07.2020 20:49, Paul Durrant wrote:
> >> From: Andrew Cooper <andrew.cooper3@citrix.com>
> >> Sent: 24 July 2020 18:29
> >>
> >> On 24/07/2020 17:46, Paul Durrant wrote:
> >>> diff --git a/xen/include/asm-x86/iommu.h b/xen/include/asm-x86/iommu.h
> >>> index 6c9d5e5632..a7add5208c 100644
> >>> --- a/xen/include/asm-x86/iommu.h
> >>> +++ b/xen/include/asm-x86/iommu.h
> >>> @@ -45,16 +45,23 @@ typedef uint64_t daddr_t;
> >>>
> >>>  struct arch_iommu
> >>>  {
> >>> -    u64 pgd_maddr;                 /* io page directory machine address */
> >>> -    spinlock_t mapping_lock;            /* io page table lock */
> >>> -    int agaw;     /* adjusted guest address width, 0 is level 2 30-bit */
> >>> -    u64 iommu_bitmap;              /* bitmap of iommu(s) that the domain uses */
> >>> -    struct list_head mapped_rmrrs;
> >>> -
> >>> -    /* amd iommu support */
> >>> -    int paging_mode;
> >>> -    struct page_info *root_table;
> >>> -    struct guest_iommu *g_iommu;
> >>> +    spinlock_t mapping_lock; /* io page table lock */
> >>> +
> >>> +    union {
> >>> +        /* Intel VT-d */
> >>> +        struct {
> >>> +            u64 pgd_maddr; /* io page directory machine address */
> >>> +            int agaw; /* adjusted guest address width, 0 is level 2 30-bit */
> >>> +            u64 iommu_bitmap; /* bitmap of iommu(s) that the domain uses */
> >>> +            struct list_head mapped_rmrrs;
> >>> +        } vtd;
> >>> +        /* AMD IOMMU */
> >>> +        struct {
> >>> +            int paging_mode;
> >>> +            struct page_info *root_table;
> >>> +            struct guest_iommu *g_iommu;
> >>> +        } amd_iommu;
> >>> +    };
> >>
> >> The naming split here is weird.
> >>
> >> Ideally we'd have struct {vtd,amd}_iommu in appropriate headers, and
> >> this would be simply
> >>
> >> union {
> >>     struct vtd_iommu vtd;
> >>     struct amd_iommu amd;
> >> };
> >>
> >> If this isn't trivial to arrange, can we at least s/amd_iommu/amd/ here ?
> >
> > I was in two minds. I tried to look for a TLA for the AMD IOMMU and 'amd' seemed a little too non-
> descript. I don't really mind though if there's a strong preference to shorted it.
> 
> +1 for shortening in some way. Even amd_vi would already be better imo,
> albeit I'm with Andrew and would think just amd is fine here (and
> matches how things are in the file system structure).
> 

OK, I'll shorten to 'amd'.

> While at it, may I ask that you also switch the plain "int" fields to
> "unsigned int" - I think that's doable for both of them.
> 

Sure, I can do that.

  Paul

> Jan
diff mbox series

Patch

diff --git a/xen/arch/x86/tboot.c b/xen/arch/x86/tboot.c
index 320e06f129..e66b0940c4 100644
--- a/xen/arch/x86/tboot.c
+++ b/xen/arch/x86/tboot.c
@@ -230,8 +230,8 @@  static void tboot_gen_domain_integrity(const uint8_t key[TB_KEY_SIZE],
         {
             const struct domain_iommu *dio = dom_iommu(d);
 
-            update_iommu_mac(&ctx, dio->arch.pgd_maddr,
-                             agaw_to_level(dio->arch.agaw));
+            update_iommu_mac(&ctx, dio->arch.vtd.pgd_maddr,
+                             agaw_to_level(dio->arch.vtd.agaw));
         }
     }
 
diff --git a/xen/drivers/passthrough/amd/iommu_guest.c b/xen/drivers/passthrough/amd/iommu_guest.c
index 014a72a54b..26819e82a8 100644
--- a/xen/drivers/passthrough/amd/iommu_guest.c
+++ b/xen/drivers/passthrough/amd/iommu_guest.c
@@ -50,12 +50,12 @@  static uint16_t guest_bdf(struct domain *d, uint16_t machine_bdf)
 
 static inline struct guest_iommu *domain_iommu(struct domain *d)
 {
-    return dom_iommu(d)->arch.g_iommu;
+    return dom_iommu(d)->arch.amd_iommu.g_iommu;
 }
 
 static inline struct guest_iommu *vcpu_iommu(struct vcpu *v)
 {
-    return dom_iommu(v->domain)->arch.g_iommu;
+    return dom_iommu(v->domain)->arch.amd_iommu.g_iommu;
 }
 
 static void guest_iommu_enable(struct guest_iommu *iommu)
@@ -823,7 +823,7 @@  int guest_iommu_init(struct domain* d)
     guest_iommu_reg_init(iommu);
     iommu->mmio_base = ~0ULL;
     iommu->domain = d;
-    hd->arch.g_iommu = iommu;
+    hd->arch.amd_iommu.g_iommu = iommu;
 
     tasklet_init(&iommu->cmd_buffer_tasklet, guest_iommu_process_command, d);
 
@@ -845,5 +845,5 @@  void guest_iommu_destroy(struct domain *d)
     tasklet_kill(&iommu->cmd_buffer_tasklet);
     xfree(iommu);
 
-    dom_iommu(d)->arch.g_iommu = NULL;
+    dom_iommu(d)->arch.amd_iommu.g_iommu = NULL;
 }
diff --git a/xen/drivers/passthrough/amd/iommu_map.c b/xen/drivers/passthrough/amd/iommu_map.c
index 93e96cd69c..06c564968c 100644
--- a/xen/drivers/passthrough/amd/iommu_map.c
+++ b/xen/drivers/passthrough/amd/iommu_map.c
@@ -180,8 +180,8 @@  static int iommu_pde_from_dfn(struct domain *d, unsigned long dfn,
     struct page_info *table;
     const struct domain_iommu *hd = dom_iommu(d);
 
-    table = hd->arch.root_table;
-    level = hd->arch.paging_mode;
+    table = hd->arch.amd_iommu.root_table;
+    level = hd->arch.amd_iommu.paging_mode;
 
     BUG_ON( table == NULL || level < 1 || level > 6 );
 
@@ -325,7 +325,7 @@  int amd_iommu_unmap_page(struct domain *d, dfn_t dfn,
 
     spin_lock(&hd->arch.mapping_lock);
 
-    if ( !hd->arch.root_table )
+    if ( !hd->arch.amd_iommu.root_table )
     {
         spin_unlock(&hd->arch.mapping_lock);
         return 0;
@@ -450,7 +450,7 @@  int __init amd_iommu_quarantine_init(struct domain *d)
     unsigned int level = amd_iommu_get_paging_mode(end_gfn);
     struct amd_iommu_pte *table;
 
-    if ( hd->arch.root_table )
+    if ( hd->arch.amd_iommu.root_table )
     {
         ASSERT_UNREACHABLE();
         return 0;
@@ -458,11 +458,11 @@  int __init amd_iommu_quarantine_init(struct domain *d)
 
     spin_lock(&hd->arch.mapping_lock);
 
-    hd->arch.root_table = alloc_amd_iommu_pgtable();
-    if ( !hd->arch.root_table )
+    hd->arch.amd_iommu.root_table = alloc_amd_iommu_pgtable();
+    if ( !hd->arch.amd_iommu.root_table )
         goto out;
 
-    table = __map_domain_page(hd->arch.root_table);
+    table = __map_domain_page(hd->arch.amd_iommu.root_table);
     while ( level )
     {
         struct page_info *pg;
diff --git a/xen/drivers/passthrough/amd/pci_amd_iommu.c b/xen/drivers/passthrough/amd/pci_amd_iommu.c
index 8d6309cc8c..c386dc4387 100644
--- a/xen/drivers/passthrough/amd/pci_amd_iommu.c
+++ b/xen/drivers/passthrough/amd/pci_amd_iommu.c
@@ -92,7 +92,8 @@  static void amd_iommu_setup_domain_device(
     u8 bus = pdev->bus;
     const struct domain_iommu *hd = dom_iommu(domain);
 
-    BUG_ON( !hd->arch.root_table || !hd->arch.paging_mode ||
+    BUG_ON( !hd->arch.amd_iommu.root_table ||
+            !hd->arch.amd_iommu.paging_mode ||
             !iommu->dev_table.buffer );
 
     if ( iommu_hwdom_passthrough && is_hardware_domain(domain) )
@@ -111,8 +112,8 @@  static void amd_iommu_setup_domain_device(
 
         /* bind DTE to domain page-tables */
         amd_iommu_set_root_page_table(
-            dte, page_to_maddr(hd->arch.root_table), domain->domain_id,
-            hd->arch.paging_mode, valid);
+            dte, page_to_maddr(hd->arch.amd_iommu.root_table),
+            domain->domain_id, hd->arch.amd_iommu.paging_mode, valid);
 
         /* Undo what amd_iommu_disable_domain_device() may have done. */
         ivrs_dev = &get_ivrs_mappings(iommu->seg)[req_id];
@@ -132,8 +133,8 @@  static void amd_iommu_setup_domain_device(
                         "root table = %#"PRIx64", "
                         "domain = %d, paging mode = %d\n",
                         req_id, pdev->type,
-                        page_to_maddr(hd->arch.root_table),
-                        domain->domain_id, hd->arch.paging_mode);
+                        page_to_maddr(hd->arch.amd_iommu.root_table),
+                        domain->domain_id, hd->arch.amd_iommu.paging_mode);
     }
 
     spin_unlock_irqrestore(&iommu->lock, flags);
@@ -207,10 +208,10 @@  static int iov_enable_xt(void)
 
 int amd_iommu_alloc_root(struct domain_iommu *hd)
 {
-    if ( unlikely(!hd->arch.root_table) )
+    if ( unlikely(!hd->arch.amd_iommu.root_table) )
     {
-        hd->arch.root_table = alloc_amd_iommu_pgtable();
-        if ( !hd->arch.root_table )
+        hd->arch.amd_iommu.root_table = alloc_amd_iommu_pgtable();
+        if ( !hd->arch.amd_iommu.root_table )
             return -ENOMEM;
     }
 
@@ -240,7 +241,7 @@  static int amd_iommu_domain_init(struct domain *d)
      *   physical address space we give it, but this isn't known yet so use 4
      *   unilaterally.
      */
-    hd->arch.paging_mode = amd_iommu_get_paging_mode(
+    hd->arch.amd_iommu.paging_mode = amd_iommu_get_paging_mode(
         is_hvm_domain(d)
         ? 1ul << (DEFAULT_DOMAIN_ADDRESS_WIDTH - PAGE_SHIFT)
         : get_upper_mfn_bound() + 1);
@@ -306,7 +307,7 @@  static void amd_iommu_disable_domain_device(const struct domain *domain,
         AMD_IOMMU_DEBUG("Disable: device id = %#x, "
                         "domain = %d, paging mode = %d\n",
                         req_id,  domain->domain_id,
-                        dom_iommu(domain)->arch.paging_mode);
+                        dom_iommu(domain)->arch.amd_iommu.paging_mode);
     }
     spin_unlock_irqrestore(&iommu->lock, flags);
 
@@ -422,10 +423,11 @@  static void deallocate_iommu_page_tables(struct domain *d)
     struct domain_iommu *hd = dom_iommu(d);
 
     spin_lock(&hd->arch.mapping_lock);
-    if ( hd->arch.root_table )
+    if ( hd->arch.amd_iommu.root_table )
     {
-        deallocate_next_page_table(hd->arch.root_table, hd->arch.paging_mode);
-        hd->arch.root_table = NULL;
+        deallocate_next_page_table(hd->arch.amd_iommu.root_table,
+                                   hd->arch.amd_iommu.paging_mode);
+        hd->arch.amd_iommu.root_table = NULL;
     }
     spin_unlock(&hd->arch.mapping_lock);
 }
@@ -605,11 +607,12 @@  static void amd_dump_p2m_table(struct domain *d)
 {
     const struct domain_iommu *hd = dom_iommu(d);
 
-    if ( !hd->arch.root_table )
+    if ( !hd->arch.amd_iommu.root_table )
         return;
 
-    printk("p2m table has %d levels\n", hd->arch.paging_mode);
-    amd_dump_p2m_table_level(hd->arch.root_table, hd->arch.paging_mode, 0, 0);
+    printk("p2m table has %d levels\n", hd->arch.amd_iommu.paging_mode);
+    amd_dump_p2m_table_level(hd->arch.amd_iommu.root_table,
+                             hd->arch.amd_iommu.paging_mode, 0, 0);
 }
 
 static const struct iommu_ops __initconstrel _iommu_ops = {
diff --git a/xen/drivers/passthrough/vtd/iommu.c b/xen/drivers/passthrough/vtd/iommu.c
index 01dc444771..ac1373fb99 100644
--- a/xen/drivers/passthrough/vtd/iommu.c
+++ b/xen/drivers/passthrough/vtd/iommu.c
@@ -257,20 +257,20 @@  static u64 bus_to_context_maddr(struct vtd_iommu *iommu, u8 bus)
 static u64 addr_to_dma_page_maddr(struct domain *domain, u64 addr, int alloc)
 {
     struct domain_iommu *hd = dom_iommu(domain);
-    int addr_width = agaw_to_width(hd->arch.agaw);
+    int addr_width = agaw_to_width(hd->arch.vtd.agaw);
     struct dma_pte *parent, *pte = NULL;
-    int level = agaw_to_level(hd->arch.agaw);
+    int level = agaw_to_level(hd->arch.vtd.agaw);
     int offset;
     u64 pte_maddr = 0;
 
     addr &= (((u64)1) << addr_width) - 1;
     ASSERT(spin_is_locked(&hd->arch.mapping_lock));
-    if ( !hd->arch.pgd_maddr &&
+    if ( !hd->arch.vtd.pgd_maddr &&
          (!alloc ||
-          ((hd->arch.pgd_maddr = alloc_pgtable_maddr(1, hd->node)) == 0)) )
+          ((hd->arch.vtd.pgd_maddr = alloc_pgtable_maddr(1, hd->node)) == 0)) )
         goto out;
 
-    parent = (struct dma_pte *)map_vtd_domain_page(hd->arch.pgd_maddr);
+    parent = (struct dma_pte *)map_vtd_domain_page(hd->arch.vtd.pgd_maddr);
     while ( level > 1 )
     {
         offset = address_level_offset(addr, level);
@@ -593,7 +593,7 @@  static int __must_check iommu_flush_iotlb(struct domain *d, dfn_t dfn,
     {
         iommu = drhd->iommu;
 
-        if ( !test_bit(iommu->index, &hd->arch.iommu_bitmap) )
+        if ( !test_bit(iommu->index, &hd->arch.vtd.iommu_bitmap) )
             continue;
 
         flush_dev_iotlb = !!find_ats_dev_drhd(iommu);
@@ -1281,7 +1281,10 @@  void __init iommu_free(struct acpi_drhd_unit *drhd)
 
 static int intel_iommu_domain_init(struct domain *d)
 {
-    dom_iommu(d)->arch.agaw = width_to_agaw(DEFAULT_DOMAIN_ADDRESS_WIDTH);
+    struct domain_iommu *hd = dom_iommu(d);
+
+    hd->arch.vtd.agaw = width_to_agaw(DEFAULT_DOMAIN_ADDRESS_WIDTH);
+    INIT_LIST_HEAD(&hd->arch.vtd.mapped_rmrrs);
 
     return 0;
 }
@@ -1381,10 +1384,10 @@  int domain_context_mapping_one(
         spin_lock(&hd->arch.mapping_lock);
 
         /* Ensure we have pagetables allocated down to leaf PTE. */
-        if ( hd->arch.pgd_maddr == 0 )
+        if ( hd->arch.vtd.pgd_maddr == 0 )
         {
             addr_to_dma_page_maddr(domain, 0, 1);
-            if ( hd->arch.pgd_maddr == 0 )
+            if ( hd->arch.vtd.pgd_maddr == 0 )
             {
             nomem:
                 spin_unlock(&hd->arch.mapping_lock);
@@ -1395,7 +1398,7 @@  int domain_context_mapping_one(
         }
 
         /* Skip top levels of page tables for 2- and 3-level DRHDs. */
-        pgd_maddr = hd->arch.pgd_maddr;
+        pgd_maddr = hd->arch.vtd.pgd_maddr;
         for ( agaw = level_to_agaw(4);
               agaw != level_to_agaw(iommu->nr_pt_levels);
               agaw-- )
@@ -1449,7 +1452,7 @@  int domain_context_mapping_one(
     if ( rc > 0 )
         rc = 0;
 
-    set_bit(iommu->index, &hd->arch.iommu_bitmap);
+    set_bit(iommu->index, &hd->arch.vtd.iommu_bitmap);
 
     unmap_vtd_domain_page(context_entries);
 
@@ -1727,7 +1730,7 @@  static int domain_context_unmap(struct domain *domain, u8 devfn,
     {
         int iommu_domid;
 
-        clear_bit(iommu->index, &dom_iommu(domain)->arch.iommu_bitmap);
+        clear_bit(iommu->index, &dom_iommu(domain)->arch.vtd.iommu_bitmap);
 
         iommu_domid = domain_iommu_domid(domain, iommu);
         if ( iommu_domid == -1 )
@@ -1752,7 +1755,7 @@  static void iommu_domain_teardown(struct domain *d)
     if ( list_empty(&acpi_drhd_units) )
         return;
 
-    list_for_each_entry_safe ( mrmrr, tmp, &hd->arch.mapped_rmrrs, list )
+    list_for_each_entry_safe ( mrmrr, tmp, &hd->arch.vtd.mapped_rmrrs, list )
     {
         list_del(&mrmrr->list);
         xfree(mrmrr);
@@ -1764,8 +1767,9 @@  static void iommu_domain_teardown(struct domain *d)
         return;
 
     spin_lock(&hd->arch.mapping_lock);
-    iommu_free_pagetable(hd->arch.pgd_maddr, agaw_to_level(hd->arch.agaw));
-    hd->arch.pgd_maddr = 0;
+    iommu_free_pagetable(hd->arch.vtd.pgd_maddr,
+                         agaw_to_level(hd->arch.vtd.agaw));
+    hd->arch.vtd.pgd_maddr = 0;
     spin_unlock(&hd->arch.mapping_lock);
 }
 
@@ -1905,7 +1909,7 @@  static void iommu_set_pgd(struct domain *d)
     mfn_t pgd_mfn;
 
     pgd_mfn = pagetable_get_mfn(p2m_get_pagetable(p2m_get_hostp2m(d)));
-    dom_iommu(d)->arch.pgd_maddr =
+    dom_iommu(d)->arch.vtd.pgd_maddr =
         pagetable_get_paddr(pagetable_from_mfn(pgd_mfn));
 }
 
@@ -1925,7 +1929,7 @@  static int rmrr_identity_mapping(struct domain *d, bool_t map,
      * No need to acquire hd->arch.mapping_lock: Both insertion and removal
      * get done while holding pcidevs_lock.
      */
-    list_for_each_entry( mrmrr, &hd->arch.mapped_rmrrs, list )
+    list_for_each_entry( mrmrr, &hd->arch.vtd.mapped_rmrrs, list )
     {
         if ( mrmrr->base == rmrr->base_address &&
              mrmrr->end == rmrr->end_address )
@@ -1972,7 +1976,7 @@  static int rmrr_identity_mapping(struct domain *d, bool_t map,
     mrmrr->base = rmrr->base_address;
     mrmrr->end = rmrr->end_address;
     mrmrr->count = 1;
-    list_add_tail(&mrmrr->list, &hd->arch.mapped_rmrrs);
+    list_add_tail(&mrmrr->list, &hd->arch.vtd.mapped_rmrrs);
 
     return 0;
 }
@@ -2671,8 +2675,9 @@  static void vtd_dump_p2m_table(struct domain *d)
         return;
 
     hd = dom_iommu(d);
-    printk("p2m table has %d levels\n", agaw_to_level(hd->arch.agaw));
-    vtd_dump_p2m_table_level(hd->arch.pgd_maddr, agaw_to_level(hd->arch.agaw), 0, 0);
+    printk("p2m table has %d levels\n", agaw_to_level(hd->arch.vtd.agaw));
+    vtd_dump_p2m_table_level(hd->arch.vtd.pgd_maddr,
+                             agaw_to_level(hd->arch.vtd.agaw), 0, 0);
 }
 
 static int __init intel_iommu_quarantine_init(struct domain *d)
@@ -2683,7 +2688,7 @@  static int __init intel_iommu_quarantine_init(struct domain *d)
     unsigned int level = agaw_to_level(agaw);
     int rc;
 
-    if ( hd->arch.pgd_maddr )
+    if ( hd->arch.vtd.pgd_maddr )
     {
         ASSERT_UNREACHABLE();
         return 0;
@@ -2691,11 +2696,11 @@  static int __init intel_iommu_quarantine_init(struct domain *d)
 
     spin_lock(&hd->arch.mapping_lock);
 
-    hd->arch.pgd_maddr = alloc_pgtable_maddr(1, hd->node);
-    if ( !hd->arch.pgd_maddr )
+    hd->arch.vtd.pgd_maddr = alloc_pgtable_maddr(1, hd->node);
+    if ( !hd->arch.vtd.pgd_maddr )
         goto out;
 
-    parent = map_vtd_domain_page(hd->arch.pgd_maddr);
+    parent = map_vtd_domain_page(hd->arch.vtd.pgd_maddr);
     while ( level )
     {
         uint64_t maddr;
diff --git a/xen/drivers/passthrough/x86/iommu.c b/xen/drivers/passthrough/x86/iommu.c
index 3d7670e8c6..a12109a1de 100644
--- a/xen/drivers/passthrough/x86/iommu.c
+++ b/xen/drivers/passthrough/x86/iommu.c
@@ -139,7 +139,6 @@  int arch_iommu_domain_init(struct domain *d)
     struct domain_iommu *hd = dom_iommu(d);
 
     spin_lock_init(&hd->arch.mapping_lock);
-    INIT_LIST_HEAD(&hd->arch.mapped_rmrrs);
 
     return 0;
 }
diff --git a/xen/include/asm-x86/iommu.h b/xen/include/asm-x86/iommu.h
index 6c9d5e5632..a7add5208c 100644
--- a/xen/include/asm-x86/iommu.h
+++ b/xen/include/asm-x86/iommu.h
@@ -45,16 +45,23 @@  typedef uint64_t daddr_t;
 
 struct arch_iommu
 {
-    u64 pgd_maddr;                 /* io page directory machine address */
-    spinlock_t mapping_lock;            /* io page table lock */
-    int agaw;     /* adjusted guest address width, 0 is level 2 30-bit */
-    u64 iommu_bitmap;              /* bitmap of iommu(s) that the domain uses */
-    struct list_head mapped_rmrrs;
-
-    /* amd iommu support */
-    int paging_mode;
-    struct page_info *root_table;
-    struct guest_iommu *g_iommu;
+    spinlock_t mapping_lock; /* io page table lock */
+
+    union {
+        /* Intel VT-d */
+        struct {
+            u64 pgd_maddr; /* io page directory machine address */
+            int agaw; /* adjusted guest address width, 0 is level 2 30-bit */
+            u64 iommu_bitmap; /* bitmap of iommu(s) that the domain uses */
+            struct list_head mapped_rmrrs;
+        } vtd;
+        /* AMD IOMMU */
+        struct {
+            int paging_mode;
+            struct page_info *root_table;
+            struct guest_iommu *g_iommu;
+        } amd_iommu;
+    };
 };
 
 extern struct iommu_ops iommu_ops;