diff mbox

[V6,1/4] hw/i386: Introduce AMD IOMMU

Message ID 1456078260-6669-2-git-send-email-davidkiarie4@gmail.com (mailing list archive)
State New, archived
Headers show

Commit Message

David Kiarie Feb. 21, 2016, 6:10 p.m. UTC
Add AMD IOMMU emulaton to Qemu in addition to Intel IOMMU
The IOMMU does basic translation, error checking and has a
mininal IOTLB implementation

Signed-off-by: David Kiarie <davidkiarie4@gmail.com>
---
 hw/i386/Makefile.objs |    1 +
 hw/i386/amd_iommu.c   | 1432 +++++++++++++++++++++++++++++++++++++++++++++++++
 hw/i386/amd_iommu.h   |  395 ++++++++++++++
 include/hw/pci/pci.h  |    2 +
 4 files changed, 1830 insertions(+)
 create mode 100644 hw/i386/amd_iommu.c
 create mode 100644 hw/i386/amd_iommu.h

Comments

Marcel Apfelbaum Feb. 25, 2016, 3:43 p.m. UTC | #1
On 02/21/2016 08:10 PM, David Kiarie wrote:
> Add AMD IOMMU emulaton to Qemu in addition to Intel IOMMU
> The IOMMU does basic translation, error checking and has a
> mininal IOTLB implementation

Hi,

>
> Signed-off-by: David Kiarie <davidkiarie4@gmail.com>
> ---
>   hw/i386/Makefile.objs |    1 +
>   hw/i386/amd_iommu.c   | 1432 +++++++++++++++++++++++++++++++++++++++++++++++++
>   hw/i386/amd_iommu.h   |  395 ++++++++++++++
>   include/hw/pci/pci.h  |    2 +
>   4 files changed, 1830 insertions(+)
>   create mode 100644 hw/i386/amd_iommu.c
>   create mode 100644 hw/i386/amd_iommu.h
>
> diff --git a/hw/i386/Makefile.objs b/hw/i386/Makefile.objs
> index b52d5b8..2f1a265 100644
> --- a/hw/i386/Makefile.objs
> +++ b/hw/i386/Makefile.objs
> @@ -3,6 +3,7 @@ obj-y += multiboot.o
>   obj-y += pc.o pc_piix.o pc_q35.o
>   obj-y += pc_sysfw.o
>   obj-y += intel_iommu.o
> +obj-y += amd_iommu.o
>   obj-$(CONFIG_XEN) += ../xenpv/ xen/
>
>   obj-y += kvmvapic.o
> diff --git a/hw/i386/amd_iommu.c b/hw/i386/amd_iommu.c
> new file mode 100644
> index 0000000..3dac043
> --- /dev/null
> +++ b/hw/i386/amd_iommu.c
> @@ -0,0 +1,1432 @@
> +/*
> + * QEMU emulation of AMD IOMMU (AMD-Vi)
> + *
> + * Copyright (C) 2011 Eduard - Gabriel Munteanu
> + * Copyright (C) 2015 David Kiarie, <davidkiarie4@gmail.com>
> + *
> + * This program is free software; you can redistribute it and/or modify
> + * it under the terms of the GNU General Public License as published by
> + * the Free Software Foundation; either version 2 of the License, or
> + * (at your option) any later version.
> +
> + * This program is distributed in the hope that it will be useful,
> + * but WITHOUT ANY WARRANTY; without even the implied warranty of
> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
> + * GNU General Public License for more details.
> +
> + * You should have received a copy of the GNU General Public License along
> + * with this program; if not, see <http://www.gnu.org/licenses/>.
> + *
> + * Cache implementation inspired by hw/i386/intel_iommu.c
> + *
> + */
> +#include "hw/i386/amd_iommu.h"
> +
> +/*#define DEBUG_AMD_IOMMU*/
> +#ifdef DEBUG_AMD_IOMMU
> +enum {
> +    DEBUG_GENERAL, DEBUG_CAPAB, DEBUG_MMIO, DEBUG_ELOG,
> +    DEBUG_CACHE, DEBUG_COMMAND, DEBUG_MMU
> +};
> +
> +#define IOMMU_DBGBIT(x)   (1 << DEBUG_##x)
> +static int iommu_dbgflags = IOMMU_DBGBIT(MMIO);
> +
> +#define IOMMU_DPRINTF(what, fmt, ...) do { \
> +    if (iommu_dbgflags & IOMMU_DBGBIT(what)) { \
> +        fprintf(stderr, "(amd-iommu)%s: " fmt "\n", __func__, \
> +                ## __VA_ARGS__); } \
> +    } while (0)
> +#else
> +#define IOMMU_DPRINTF(what, fmt, ...) do {} while (0)
> +#endif
> +
> +typedef struct AMDIOMMUAddressSpace {
> +    uint8_t bus_num;            /* bus number                           */
> +    uint8_t devfn;              /* device function                      */
> +    AMDIOMMUState *iommu_state; /* IOMMU - one per machine              */
> +    MemoryRegion iommu;         /* Device's iommu region                */
> +    AddressSpace as;            /* device's corresponding address space */
> +} AMDIOMMUAddressSpace;
> +
> +/* IOMMU cache entry */
> +typedef struct IOMMUIOTLBEntry {
> +    uint64_t gfn;
> +    uint16_t domid;
> +    uint64_t devid;
> +    uint64_t perms;
> +    uint64_t translated_addr;
> +} IOMMUIOTLBEntry;
> +
> +/* configure MMIO registers at startup/reset */
> +static void amd_iommu_set_quad(AMDIOMMUState *s, hwaddr addr, uint64_t val,
> +                               uint64_t romask, uint64_t w1cmask)
> +{
> +    stq_le_p(&s->mmior[addr], val);
> +    stq_le_p(&s->romask[addr], romask);
> +    stq_le_p(&s->w1cmask[addr], w1cmask);
> +}
> +
> +static uint16_t amd_iommu_readw(AMDIOMMUState *s, hwaddr addr)
> +{
> +    return lduw_le_p(&s->mmior[addr]);
> +}
> +
> +static uint32_t amd_iommu_readl(AMDIOMMUState *s, hwaddr addr)
> +{
> +    return ldl_le_p(&s->mmior[addr]);
> +}
> +
> +static uint64_t amd_iommu_readq(AMDIOMMUState *s, hwaddr addr)
> +{
> +    return ldq_le_p(&s->mmior[addr]);
> +}
> +
> +/* internal write */
> +static void amd_iommu_writeq_raw(AMDIOMMUState *s, uint64_t val, hwaddr addr)
> +{
> +    stq_le_p(&s->mmior[addr], val);
> +}
> +
> +/* external write */
> +static void amd_iommu_writew(AMDIOMMUState *s, hwaddr addr, uint16_t val)
> +{
> +    uint16_t romask = lduw_le_p(&s->romask[addr]);
> +    uint16_t w1cmask = lduw_le_p(&s->w1cmask[addr]);
> +    uint16_t oldval = lduw_le_p(&s->mmior[addr]);
> +    stw_le_p(&s->mmior[addr], (val & ~(val & w1cmask)) | (romask & oldval));
> +}
> +
> +static void amd_iommu_writel(AMDIOMMUState *s, hwaddr addr, uint32_t val)
> +{
> +    uint32_t romask = ldl_le_p(&s->romask[addr]);
> +    uint32_t w1cmask = ldl_le_p(&s->w1cmask[addr]);
> +    uint32_t oldval = ldl_le_p(&s->mmior[addr]);
> +    stl_le_p(&s->mmior[addr], (val & ~(val & w1cmask)) | (romask & oldval));
> +}
> +
> +static void amd_iommu_writeq(AMDIOMMUState *s, hwaddr addr, uint64_t val)
> +{
> +    uint64_t romask = ldq_le_p(&s->romask[addr]);
> +    uint64_t w1cmask = ldq_le_p(&s->w1cmask[addr]);
> +    uint32_t oldval = ldq_le_p(&s->mmior[addr]);
> +    stq_le_p(&s->mmior[addr], (val & ~(val & w1cmask)) | (romask & oldval));
> +}
> +
> +static void amd_iommu_log_event(AMDIOMMUState *s, uint16_t *evt)
> +{
> +    /* event logging not enabled */
> +    if (!s->evtlog_enabled || *(uint64_t *)&s->mmior[IOMMU_MMIO_STATUS] |
> +        IOMMU_MMIO_STATUS_EVT_OVF) {
> +        return;
> +    }
> +
> +    /* event log buffer full */
> +    if (s->evtlog_tail >= s->evtlog_len) {
> +        *(uint64_t *)&s->mmior[IOMMU_MMIO_STATUS] |= IOMMU_MMIO_STATUS_EVT_OVF;
> +        /* generate interrupt */
> +        msi_notify(&s->dev, 0);
> +    }
> +
> +    if (dma_memory_write(&address_space_memory, s->evtlog_len + s->evtlog_tail,
> +       &evt, IOMMU_EVENT_LEN)) {
> +        IOMMU_DPRINTF(ELOG, "error: fail to write at address 0x%"PRIx64
> +                      " + offset 0x%"PRIx32, s->evtlog, s->evtlog_tail);
> +    }
> +
> +     s->evtlog_tail += IOMMU_EVENT_LEN;
> +     *(uint64_t *)&s->mmior[IOMMU_MMIO_STATUS] |= IOMMU_MMIO_STATUS_COMP_INT;
> +}
> +
> +/* log an error encountered page-walking
> + *
> + * @addr: virtual address in translation request
> + */
> +static void amd_iommu_page_fault(AMDIOMMUState *s, uint16_t devid,
> +                                 dma_addr_t addr, uint16_t info)
> +{
> +    IOMMU_DPRINTF(ELOG, "");
> +
> +    uint16_t evt[8];
> +
> +    info |= IOMMU_EVENT_IOPF_I;
> +
> +    /* encode information */
> +    *(uint16_t *)&evt[0] = devid;
> +    *(uint16_t *)&evt[3] = info;
> +    *(uint64_t *)&evt[4] = cpu_to_le64(addr);
> +
> +    /* log a page fault */
> +    amd_iommu_log_event(s, evt);
> +
> +    /* Abort the translation */
> +    pci_word_test_and_set_mask(s->dev.config + PCI_STATUS,
> +            PCI_STATUS_SIG_TARGET_ABORT);
> +}
> +/*
> + * log a master abort accessing device table
> + *  @devtab : address of device table entry
> + *  @info : error flags
> + */
> +static void amd_iommu_log_devtab_error(AMDIOMMUState *s, uint16_t devid,
> +                                       dma_addr_t devtab, uint16_t info)
> +{
> +
> +    IOMMU_DPRINTF(ELOG, "");
> +
> +    uint16_t evt[8];
> +
> +    info |= IOMMU_EVENT_DEV_TAB_HW_ERROR;
> +
> +    /* encode information */
> +    *(uint16_t *)&evt[0] = devid;
> +    *(uint8_t *)&evt[3]  = info;
> +    *(uint64_t *)&evt[4] = cpu_to_le64(devtab);
> +
> +    amd_iommu_log_event(s, evt);
> +
> +    /* Abort the translation */
> +    pci_word_test_and_set_mask(s->dev.config + PCI_STATUS,
> +            PCI_STATUS_SIG_TARGET_ABORT);
> +
> +}
> +
> +/* log a master abort encountered during a page-walk
> + *  @addr : address that couldn't be accessed
> + */
> +static void amd_iommu_log_pagetab_error(AMDIOMMUState *s, uint16_t devid,
> +                                        dma_addr_t addr, uint16_t info)
> +{
> +    IOMMU_DPRINTF(ELOG, "");
> +
> +    uint16_t evt[8];
> +
> +    info |= IOMMU_EVENT_PAGE_TAB_HW_ERROR;
> +
> +    /* encode information */
> +    *(uint16_t *)&evt[0] = devid;
> +    *(uint8_t *)&evt[3]  = info;
> +    *(uint64_t *)&evt[4] = (cpu_to_le64(addr) >> 3);
> +
> +    amd_iommu_log_event(s, evt);
> +
> +    /* Abort the translation */
> +    pci_word_test_and_set_mask(s->dev.config + PCI_STATUS,
> +            PCI_STATUS_SIG_TARGET_ABORT);
> +
> +}
> +
> +/* log an event trying to access command buffer
> + *   @addr : address that couldn't be accessed
> + */
> +static void amd_iommu_log_command_error(AMDIOMMUState *s, dma_addr_t addr)
> +{
> +    IOMMU_DPRINTF(ELOG, "");
> +
> +    uint16_t evt[8];
> +
> +    /* encode information */
> +    *(uint8_t *)&evt[3]  = (uint8_t)IOMMU_EVENT_COMMAND_HW_ERROR;
> +    *(uint64_t *)&evt[4] = (cpu_to_le64(addr) >> 3);
> +
> +    amd_iommu_log_event(s, evt);
> +
> +    /* Abort the translation */
> +    pci_word_test_and_set_mask(s->dev.config + PCI_STATUS,
> +            PCI_STATUS_SIG_TARGET_ABORT);
> +}
> +
> +/* log an illegal comand event
> + *   @addr : address of illegal command
> + */
> +static void amd_iommu_log_illegalcom_error(AMDIOMMUState *s, uint16_t info,
> +                                           dma_addr_t addr)
> +{
> +    IOMMU_DPRINTF(ELOG, "");
> +
> +    uint16_t evt[8];
> +
> +    /* encode information */
> +    *(uint8_t *)&evt[3]  = (uint8_t)IOMMU_EVENT_ILLEGAL_COMMAND_ERROR;
> +    *(uint64_t *)&evt[4] = (cpu_to_le64(addr) >> 3);

Can you please use a macro instead of 3 literal?

> +
> +    amd_iommu_log_event(s, evt);
> +}
> +
> +/* log an error accessing device table
> + *
> + *  @devid : device owning the table entry
> + *  @devtab : address of device table entry
> + *  @info : error flags
> + */
> +static void amd_iommu_log_illegaldevtab_error(AMDIOMMUState *s, uint16_t devid,
> +                                              dma_addr_t addr, uint16_t info)
> +{
> +    IOMMU_DPRINTF(ELOG, "");
> +
> +    uint16_t evt[8];
> +
> +    info |= IOMMU_EVENT_ILLEGAL_DEVTAB_ENTRY;
> +
> +    *(uint16_t *)&evt[0] = devid;
> +    *(uint8_t *)&evt[3]  = info;
> +    *(uint64_t *)&evt[4] = (cpu_to_le64(addr) >> 3);
> +
> +    amd_iommu_log_event(s, evt);
> +}

It seems that the all log functions do the same:
create an event, log it and optionally set PCI_STATUS_SIG_TARGET_ABORT

I would consider to unite them in the same function. (not a must)

> +
> +static gboolean amd_iommu_uint64_equal(gconstpointer v1, gconstpointer v2)
> +{
> +    return *((const uint64_t *)v1) == *((const uint64_t *)v2);
> +}
> +
> +static guint amd_iommu_uint64_hash(gconstpointer v)
> +{
> +    return (guint)*(const uint64_t *)v;
> +}
> +
> +static IOMMUIOTLBEntry *amd_iommu_iotlb_lookup(AMDIOMMUState *s, hwaddr addr,
> +                                               uint64_t devid)
> +{
> +    uint64_t key = (addr >> IOMMU_PAGE_SHIFT_4K) |
> +                   ((uint64_t)(devid) << IOMMU_DEVID_SHIFT);
> +    return g_hash_table_lookup(s->iotlb, &key);
> +}
> +
> +static void amd_iommu_iotlb_reset(AMDIOMMUState *s)
> +{
> +    assert(s->iotlb);
> +    g_hash_table_remove_all(s->iotlb);
> +}
> +
> +static gboolean amd_iommu_iotlb_remove_by_devid(gpointer key, gpointer value,
> +                                                gpointer user_data)
> +{
> +    IOMMUIOTLBEntry *entry = (IOMMUIOTLBEntry *)value;
> +    uint16_t devid = *(uint16_t *)user_data;
> +    return entry->devid == devid;
> +}
> +
> +static void amd_iommu_iotlb_remove_page(AMDIOMMUState *s, hwaddr addr,
> +                                        uint64_t devid)
> +{
> +    uint64_t key = (addr >> IOMMU_PAGE_SHIFT_4K) |
> +                   ((uint64_t)(devid) << IOMMU_DEVID_SHIFT);
> +    g_hash_table_remove(s->iotlb, &key);
> +}
> +
> +/* extract device id */
> +static inline uint16_t devid_extract(uint8_t *cmd)
> +{
> +    return (uint16_t)cmd[2] & IOMMU_INVAL_DEV_ID_MASK;
> +}
> +
> +static void amd_iommu_invalidate_iotlb(AMDIOMMUState *s, uint64_t *cmd)
> +{
> +    uint16_t devid = devid_extract((uint8_t *)cmd);
> +    /* if invalidation of more than one page requested */
> +    if (IOMMU_INVAL_ALL(cmd[0])) {
> +        g_hash_table_foreach_remove(s->iotlb, amd_iommu_iotlb_remove_by_devid,
> +                                    &devid);
> +    } else {
> +        hwaddr addr = (hwaddr)(cmd[1] & IOMMU_INVAL_ADDR_MASK);
> +        amd_iommu_iotlb_remove_page(s, addr, devid);
> +    }
> +}
> +
> +static void amd_iommu_update_iotlb(AMDIOMMUState *s, uint16_t devid,
> +                                   uint64_t gpa, uint64_t spa, uint64_t perms,
> +                                   uint16_t domid)
> +{
> +    IOMMUIOTLBEntry *entry = g_malloc(sizeof(*entry));
> +    uint64_t *key = g_malloc(sizeof(key));
> +    uint64_t gfn = gpa >> IOMMU_PAGE_SHIFT_4K;
> +
> +    IOMMU_DPRINTF(CACHE, " update iotlb devid: %02x:%02x.%x gpa 0x%"PRIx64
> +                  " hpa 0x%"PRIx64, PCI_BUS_NUM(devid), PCI_SLOT(devid),
> +                  PCI_FUNC(devid), gpa, spa);
> +
> +    if (g_hash_table_size(s->iotlb) >= IOMMU_IOTLB_MAX_SIZE) {
> +        IOMMU_DPRINTF(CACHE, "iotlb exceeds size limit - reset");
> +        amd_iommu_iotlb_reset(s);
> +    }
> +
> +    entry->gfn = gfn;
> +    entry->domid = domid;
> +    entry->perms = perms;
> +    entry->translated_addr = spa;
> +    *key = gfn | ((uint64_t)(devid) << IOMMU_DEVID_SHIFT);
> +    g_hash_table_replace(s->iotlb, key, entry);
> +}
> +
> +/* execute a completion wait command */
> +static void amd_iommu_completion_wait(AMDIOMMUState *s, uint8_t *cmd)
> +{
> +    IOMMU_DPRINTF(COMMAND, "");
> +    unsigned int addr;
> +
> +    /* completion store */
> +    if (cmd[0] & IOMMU_COM_COMPLETION_STORE_MASK) {
> +        addr = le64_to_cpu(*(uint64_t *)cmd) & IOMMU_COM_STORE_ADDRESS_MASK;
> +        if (dma_memory_write(&address_space_memory, addr, cmd + 8, 8)) {
> +            IOMMU_DPRINTF(ELOG, "error: fail to write at address 0%x"PRIx64,
> +                          addr);
> +        }
> +    }
> +
> +    /* set completion interrupt */
> +    if (cmd[0] & IOMMU_COM_COMPLETION_INTR) {
> +        s->mmior[IOMMU_MMIO_STATUS] |= IOMMU_MMIO_STATUS_COMP_INT;
> +    }
> +}
> +
> +/* get command type */
> +static uint8_t opcode(uint8_t *cmd)
> +{
> +    return cmd[IOMMU_CMDBUF_ID_BYTE] >> IOMMU_CMDBUF_ID_RSHIFT;
> +}
> +
> +/* linux seems to be using reserved bits so I just log without abortig bug */

I couldn't quite understand the comment

> +static void iommu_inval_devtab_entry(AMDIOMMUState *s, uint8_t *cmd,
> +                                     uint8_t type)
> +{
> +    IOMMU_DPRINTF(COMMAND, "");
> +
> +    /* This command should invalidate internal caches of which there isn't */
> +    if (*(uint64_t *)&cmd[0] & IOMMU_CMD_INVAL_DEV_RSVD ||
> +            *(uint64_t *)&cmd[1]) {
> +        amd_iommu_log_illegalcom_error(s, type, s->cmdbuf + s->cmdbuf_head);
> +    }
> +#ifdef DEBUG_AMD_IOMMU
> +    uint16_t devid = devid_extract(cmd);
> +#endif
> +    IOMMU_DPRINTF(COMMAND, "device table entry for devid: %02x:%02x.%x"
> +                  "invalidated", PCI_BUS_NUM(devid), PCI_SLOT(devid),
> +                  PCI_FUNC(devid));
> +}
> +
> +static void iommu_completion_wait(AMDIOMMUState *s, uint8_t *cmd, uint8_t type)
> +{
> +    IOMMU_DPRINTF(COMMAND, "");
> +
> +    if (*(uint32_t *)&cmd[1] & IOMMU_COMPLETION_WAIT_RSVD) {
> +        amd_iommu_log_illegalcom_error(s, type, s->cmdbuf + s->cmdbuf_head);
> +    }
> +    /* pretend to wait for command execution to complete */
> +    IOMMU_DPRINTF(COMMAND, "completion wait requested with store address 0x%"
> +                  PRIx64 " and store data 0x%"PRIx64, (cmd[0] &
> +                  IOMMU_COM_STORE_ADDRESS_MASK), *(uint64_t *)(cmd + 8));
> +    amd_iommu_completion_wait(s, cmd);
> +}
> +
> +static void iommu_complete_ppr(AMDIOMMUState *s, uint8_t *cmd, uint8_t type)
> +{
> +    IOMMU_DPRINTF(COMMAND, "");
> +
> +    if ((*(uint64_t *)&cmd[0] & IOMMU_COMPLETE_PPR_RQ_RSVD) ||
> +       *(uint64_t *)&cmd[1] & 0xffff000000000000) {


Can you please document this mask?

> +        amd_iommu_log_illegalcom_error(s, type, s->cmdbuf + s->cmdbuf_head);
> +    }
> +
> +    IOMMU_DPRINTF(COMMAND, "Execution of PPR queue requested");
> +}
> +
> +static void iommu_inval_all(AMDIOMMUState *s, uint8_t *cmd, uint8_t type)
> +{
> +    IOMMU_DPRINTF(COMMAND, "");
> +
> +    if ((*(uint64_t *)&cmd[0] & IOMMU_INVAL_IOMMU_ALL_RSVD) ||
> +       *(uint64_t *)&cmd[1]) {
> +        amd_iommu_log_illegalcom_error(s, type, s->cmdbuf + s->cmdbuf_head);
> +    }
> +
> +    amd_iommu_iotlb_reset(s);
> +    IOMMU_DPRINTF(COMMAND, "Invalidation of all IOMMU cache requested");
> +}
> +
> +static inline uint16_t domid_extract(uint64_t *cmd)
> +{
> +    return (uint16_t)cmd[0] & IOMMU_INVAL_PAGES_DOMID;
> +}
> +
> +static gboolean amd_iommu_iotlb_remove_by_domid(gpointer key, gpointer value,
> +                                                gpointer user_data)
> +{
> +    IOMMUIOTLBEntry *entry = (IOMMUIOTLBEntry *)value;
> +    uint16_t domid = *(uint16_t *)user_data;
> +    return entry->domid == domid;
> +}
> +
> +/* we don't have devid - we can't remove pages by address */
> +static void iommu_inval_pages(AMDIOMMUState *s, uint8_t *cmd, uint8_t type)
> +{
> +    IOMMU_DPRINTF(COMMAND, "");
> +    uint16_t domid = domid_extract((uint64_t *)cmd);
> +
> +    if (*(uint64_t *)&cmd[0] & IOMMU_INVAL_IOMMU_PAGES_RSVD ||
> +       *(uint32_t *)&cmd[1] & 0x00000ff0) {
> +        amd_iommu_log_illegalcom_error(s, type, s->cmdbuf + s->cmdbuf_head);
> +    }
> +
> +    g_hash_table_foreach_remove(s->iotlb, amd_iommu_iotlb_remove_by_domid,
> +                                &domid);
> +
> +    IOMMU_DPRINTF(COMMAND, "IOMMU pages for domain 0x%"PRIx16 "invalidated",
> +                  domid);
> +}
> +
> +static void iommu_prefetch_pages(AMDIOMMUState *s, uint8_t *cmd, uint8_t type)
> +{
> +    IOMMU_DPRINTF(COMMAND, "");
> +
> +    if ((*(uint64_t *)&cmd[0] & IOMMU_PRF_IOMMU_PAGES_RSVD) ||
> +       (*(uint32_t *)&cmd[1] & 0x00000fd4)) {

Here the same, maybe you can name the mask, so we can easier follow the spec.

> +        amd_iommu_log_illegalcom_error(s, type, s->cmdbuf + s->cmdbuf_head);
> +    }
> +
> +    IOMMU_DPRINTF(COMMAND, "Pre-fetch of IOMMU pages requested");
> +}
> +
> +static void iommu_inval_inttable(AMDIOMMUState *s, uint8_t *cmd, uint8_t type)
> +{
> +    IOMMU_DPRINTF(COMMAND, "");
> +
> +    if ((*(uint64_t *)&cmd[0] & IOMMU_INVAL_INTR_TABLE_RSVD) ||
> +       *(uint64_t *)&cmd[1]) {
> +        amd_iommu_log_illegalcom_error(s, type, s->cmdbuf + s->cmdbuf_head);
> +        return;
> +    }
> +
> +    IOMMU_DPRINTF(COMMAND, "interrupt table invalidated");
> +}
> +
> +static void iommu_inval_iotlb(AMDIOMMUState *s, uint8_t *cmd, uint8_t type)
> +{
> +    IOMMU_DPRINTF(COMMAND, "");
> +
> +    if (*(uint32_t *)&cmd[2] & IOMMU_INVAL_IOTLB_PAGES_RSVD) {
> +        amd_iommu_log_illegalcom_error(s, type, s->cmdbuf + s->cmdbuf_head);
> +        return;
> +    }
> +
> +    amd_iommu_invalidate_iotlb(s, (uint64_t *)cmd);
> +    IOMMU_DPRINTF(COMMAND, "IOTLB pages invalidated");
> +}
> +
> +/* not honouring reserved bits is regarded as an illegal command */
> +static void amd_iommu_cmdbuf_exec(AMDIOMMUState *s)
> +{
> +    IOMMU_DPRINTF(COMMAND, "");
> +
> +    uint8_t type;
> +    uint8_t cmd[IOMMU_COMMAND_SIZE];
> +
> +    memset(cmd, 0, IOMMU_COMMAND_SIZE);
> +
> +    if (dma_memory_read(&address_space_memory, s->cmdbuf + s->cmdbuf_head, cmd,
> +       IOMMU_COMMAND_SIZE)) {
> +        IOMMU_DPRINTF(COMMAND, "error: fail to access memory at 0x%"PRIx64
> +                      " + 0x%"PRIu8, s->cmdbuf, s->cmdbuf_head);
> +        amd_iommu_log_command_error(s, s->cmdbuf + s->cmdbuf_head);
> +        return;
> +    }
> +
> +    type = opcode(cmd);
> +
> +    switch (type) {
> +    case IOMMU_CMD_COMPLETION_WAIT:
> +        iommu_completion_wait(s, cmd, type);
> +        break;
> +
> +    case IOMMU_CMD_INVAL_DEVTAB_ENTRY:
> +        iommu_inval_devtab_entry(s, cmd, type);
> +        break;
> +
> +    case IOMMU_CMD_INVAL_IOMMU_PAGES:
> +        iommu_inval_pages(s, cmd, type);
> +        break;
> +
> +    case IOMMU_CMD_INVAL_IOTLB_PAGES:
> +        iommu_inval_iotlb(s, cmd, type);
> +        break;
> +
> +    case IOMMU_CMD_INVAL_INTR_TABLE:
> +        iommu_inval_inttable(s, cmd, type);
> +        break;
> +
> +    case IOMMU_CMD_PREFETCH_IOMMU_PAGES:
> +        iommu_prefetch_pages(s, cmd, type);
> +        break;
> +
> +    case IOMMU_CMD_COMPLETE_PPR_REQUEST:
> +        iommu_complete_ppr(s, cmd, type);
> +        break;
> +
> +    case IOMMU_CMD_INVAL_IOMMU_ALL:
> +        iommu_inval_all(s, cmd, type);
> +        break;
> +
> +    default:
> +        IOMMU_DPRINTF(COMMAND, "unhandled command %d", type);
> +        /* log illegal command */
> +        amd_iommu_log_illegalcom_error(s, type,
> +                                       s->cmdbuf + s->cmdbuf_head);
> +        break;
> +    }
> +
> +}
> +
> +static void amd_iommu_cmdbuf_run(AMDIOMMUState *s)
> +{
> +    IOMMU_DPRINTF(COMMAND, "");
> +
> +    uint64_t *mmio_cmdbuf_head = (uint64_t *)(s->mmior +
> +                                 IOMMU_MMIO_COMMAND_HEAD);
> +
> +    if (!s->cmdbuf_enabled) {
> +        IOMMU_DPRINTF(COMMAND, "error: IOMMU trying to execute commands with "
> +                      "command buffer disabled. IOMMU control value 0x%"PRIx64,
> +                      amd_iommu_readq(s, IOMMU_MMIO_CONTROL));
> +        return;
> +    }
> +
> +    while (s->cmdbuf_head != s->cmdbuf_tail) {
> +        /* check if there is work to do. */
> +        IOMMU_DPRINTF(COMMAND, "command buffer head at 0x%"PRIx32 " command "
> +                      "buffer tail at 0x%"PRIx32" command buffer base at 0x%"
> +                      PRIx64, s->cmdbuf_head, s->cmdbuf_tail, s->cmdbuf);
> +         amd_iommu_cmdbuf_exec(s);
> +         s->cmdbuf_head += IOMMU_COMMAND_SIZE;
> +         amd_iommu_writeq_raw(s, s->cmdbuf_head, IOMMU_MMIO_COMMAND_HEAD);
> +
> +        /* wrap head pointer */
> +        if (s->cmdbuf_head >= s->cmdbuf_len * IOMMU_COMMAND_SIZE) {
> +            s->cmdbuf_head = 0;
> +        }
> +    }
> +
> +    *mmio_cmdbuf_head = cpu_to_le64(s->cmdbuf_head);
> +}
> +
> +/* System Software might never read from some of this fields but anyways */
> +static uint64_t amd_iommu_mmio_read(void *opaque, hwaddr addr, unsigned size)
> +{
> +    AMDIOMMUState *s = opaque;
> +
> +    uint64_t val = -1;

The above might work, but it looks a little weird

> +    if (addr + size > IOMMU_MMIO_SIZE) {
> +        IOMMU_DPRINTF(MMIO, "error: addr outside region: max 0x%"PRIX64
> +                      ", got 0x%"PRIx64 " %d", (uint64_t)IOMMU_MMIO_SIZE, addr,
> +                      size);
> +        return (uint64_t)-1;
> +    }
> +
> +    if (size == 2) {
> +        val = amd_iommu_readw(s, addr);
> +    } else if (size == 4) {
> +        val = amd_iommu_readl(s, addr);
> +    } else if (size == 8) {
> +        val = amd_iommu_readq(s, addr);
> +    }
> +
> +    switch (addr & ~0x07) {
> +    case IOMMU_MMIO_DEVICE_TABLE:
> +        IOMMU_DPRINTF(MMIO, "MMIO_DEVICE_TABLE read addr 0x%"PRIx64
> +                      ", size %d offset 0x%"PRIx64, addr, size,
> +                       addr & ~0x07);
> +        break;
> +
> +    case IOMMU_MMIO_COMMAND_BASE:
> +        IOMMU_DPRINTF(MMIO, "MMIO_COMMAND_BASE read addr 0x%"PRIx64
> +                      ", size %d offset 0x%"PRIx64, addr, size,
> +                      addr & ~0x07);
> +        break;
> +
> +    case IOMMU_MMIO_EVENT_BASE:
> +        IOMMU_DPRINTF(MMIO, "MMIO_EVENT_BASE read addr 0x%"PRIx64
> +                      ", size %d offset 0x%"PRIx64, addr, size,
> +                      addr & ~0x07);
> +        break;
> +
> +    case IOMMU_MMIO_CONTROL:
> +        IOMMU_DPRINTF(MMIO, "MMIO_CONTROL read addr 0x%"PRIx64
> +                      ", size %d offset 0x%"PRIx64, addr, size,
> +                       addr & ~0x07);
> +        break;
> +
> +    case IOMMU_MMIO_EXCL_BASE:
> +        IOMMU_DPRINTF(MMIO, "MMIO_EXCL_BASE read addr 0x%"PRIx64
> +                      ", size %d offset 0x%"PRIx64, addr, size,
> +                      addr & ~0x07);
> +        break;
> +
> +    case IOMMU_MMIO_EXCL_LIMIT:
> +        IOMMU_DPRINTF(MMIO, "MMIO_EXCL_LIMIT read addr 0x%"PRIx64
> +                      ", size %d offset 0x%"PRIx64, addr, size,
> +                      addr & ~0x07);
> +        break;
> +
> +    case IOMMU_MMIO_COMMAND_HEAD:
> +        IOMMU_DPRINTF(MMIO, "MMIO_COMMAND_HEAD read addr 0x%"PRIx64
> +                      ", size %d offset 0x%"PRIx64, addr, size,
> +                      addr & ~0x07);
> +        break;
> +
> +    case IOMMU_MMIO_COMMAND_TAIL:
> +        IOMMU_DPRINTF(MMIO, "MMIO_COMMAND_TAIL read addr 0x%"PRIx64
> +                      ", size %d offset 0x%"PRIx64, addr, size,
> +                      addr & ~0x07);
> +        break;
> +
> +    case IOMMU_MMIO_EVENT_HEAD:
> +        IOMMU_DPRINTF(MMIO, "MMIO_EVENT_HEAD read addr 0x%"PRIx64
> +                      ", size %d offset 0x%"PRIx64, addr, size,
> +                      addr & ~0x07);
> +        break;
> +
> +    case IOMMU_MMIO_EVENT_TAIL:
> +        IOMMU_DPRINTF(MMIO, "MMIO_EVENT_TAIL read addr 0x%"PRIx64
> +                      ", size %d offset 0x%"PRIx64, addr, size,
> +                      addr & ~0x07);
> +        break;
> +
> +    case IOMMU_MMIO_STATUS:
> +        IOMMU_DPRINTF(MMIO, "MMIO_STATUS read addr 0x%"PRIx64
> +                      ", size %d offset 0x%"PRIx64, addr, size,
> +                      addr & ~0x07);
> +        break;
> +
> +    case IOMMU_MMIO_EXT_FEATURES:
> +        IOMMU_DPRINTF(MMU, "MMIO_EXT_FEATURES read addr 0x%"PRIx64
> +                      ", size %d offset 0x%"PRIx64 "value 0x%"PRIx64,
> +                      addr, size, addr & ~0x07, val);
> +        break;
> +
> +    default:
> +        IOMMU_DPRINTF(MMIO, "UNHANDLED MMIO read addr 0x%"PRIx64
> +                      ", size %d offset 0x%"PRIx64, addr, size,
> +                       addr & ~0x07);
> +    }
> +    return val;
> +}
> +
> +static void iommu_handle_control_write(AMDIOMMUState *s)
> +{
> +    IOMMU_DPRINTF(COMMAND, "");
> +    /*
> +     * read whatever is already written in case
> +     * software is writing in chucks less than 8 bytes
> +     */
> +    unsigned long control = amd_iommu_readq(s, IOMMU_MMIO_CONTROL);
> +    s->enabled = !!(control & IOMMU_MMIO_CONTROL_IOMMUEN);
> +
> +    s->ats_enabled = !!(control & IOMMU_MMIO_CONTROL_HTTUNEN);
> +    s->evtlog_enabled = s->enabled && !!(control &
> +                        IOMMU_MMIO_CONTROL_EVENTLOGEN);
> +
> +    s->evtlog_intr = !!(control & IOMMU_MMIO_CONTROL_EVENTINTEN);
> +    s->completion_wait_intr = !!(control & IOMMU_MMIO_CONTROL_COMWAITINTEN);
> +    s->cmdbuf_enabled = s->enabled && !!(control &
> +                        IOMMU_MMIO_CONTROL_CMDBUFLEN);
> +
> +    /* update the flags depending on the control register */
> +    if (s->cmdbuf_enabled) {
> +        (*(uint64_t *)&s->mmior[IOMMU_MMIO_STATUS]) |=
> +            IOMMU_MMIO_STATUS_CMDBUF_RUN;
> +    } else {
> +        (*(uint64_t *)&s->mmior[IOMMU_MMIO_STATUS]) &=
> +            ~IOMMU_MMIO_STATUS_CMDBUF_RUN;
> +    }
> +    if (s->evtlog_enabled) {
> +        (*(uint64_t *)&s->mmior[IOMMU_MMIO_STATUS]) |=
> +            IOMMU_MMIO_STATUS_EVT_RUN;
> +    } else {
> +        (*(uint64_t *)&s->mmior[IOMMU_MMIO_STATUS]) &=
> +            ~IOMMU_MMIO_STATUS_EVT_RUN;
> +    }
> +
> +    IOMMU_DPRINTF(COMMAND, "MMIO_STATUS state 0x%"PRIx64, control);
> +
> +    amd_iommu_cmdbuf_run(s);
> +}
> +
> +static inline void iommu_handle_devtab_write(AMDIOMMUState *s)
> +
> +{
> +    IOMMU_DPRINTF(COMMAND, "");
> +
> +    uint64_t val = amd_iommu_readq(s, IOMMU_MMIO_DEVICE_TABLE);
> +    s->devtab = (dma_addr_t)(val & IOMMU_MMIO_DEVTAB_BASE_MASK);
> +
> +    /* set device table length */
> +    s->devtab_len = ((val & IOMMU_MMIO_DEVTAB_SIZE_MASK) + 1 *
> +                    (IOMMU_MMIO_DEVTAB_SIZE_UNIT /
> +                     IOMMU_MMIO_DEVTAB_ENTRY_SIZE));
> +}
> +
> +static inline void iommu_handle_cmdhead_write(AMDIOMMUState *s)
> +{
> +    IOMMU_DPRINTF(COMMAND, "");
> +
> +    s->cmdbuf_head = (dma_addr_t)amd_iommu_readq(s, IOMMU_MMIO_COMMAND_HEAD)
> +                     & IOMMU_MMIO_CMDBUF_HEAD_MASK;
> +    amd_iommu_cmdbuf_run(s);
> +}
> +
> +static inline void iommu_handle_cmdbase_write(AMDIOMMUState *s)
> +{
> +    IOMMU_DPRINTF(COMMAND, "");
> +
> +    s->cmdbuf = (dma_addr_t)amd_iommu_readq(s, IOMMU_MMIO_COMMAND_BASE)
> +                & IOMMU_MMIO_CMDBUF_BASE_MASK;
> +    s->cmdbuf_len = 1UL << (s->mmior[IOMMU_MMIO_CMDBUF_SIZE_BYTE]
> +                    & IOMMU_MMIO_CMDBUF_SIZE_MASK);
> +    s->cmdbuf_head = s->cmdbuf_tail = 0;
> +
> +}
> +
> +static inline void iommu_handle_cmdtail_write(AMDIOMMUState *s)
> +{
> +    s->cmdbuf_tail = amd_iommu_readq(s, IOMMU_MMIO_COMMAND_TAIL)
> +                     & IOMMU_MMIO_CMDBUF_TAIL_MASK;
> +    amd_iommu_cmdbuf_run(s);
> +}
> +
> +static inline void iommu_handle_excllim_write(AMDIOMMUState *s)
> +{
> +    IOMMU_DPRINTF(COMMAND, "");
> +
> +    uint64_t val = amd_iommu_readq(s, IOMMU_MMIO_EXCL_LIMIT);
> +    s->excl_limit = (val & IOMMU_MMIO_EXCL_LIMIT_MASK) |
> +                    IOMMU_MMIO_EXCL_LIMIT_LOW;
> +}
> +
> +static inline void iommu_handle_evtbase_write(AMDIOMMUState *s)
> +{
> +    IOMMU_DPRINTF(COMMAND, "");
> +
> +    uint64_t val = amd_iommu_readq(s, IOMMU_MMIO_EVENT_BASE);
> +    s->evtlog = val & IOMMU_MMIO_EVTLOG_BASE_MASK;
> +    s->evtlog_len = 1UL << (*(uint64_t *)&s->mmior[IOMMU_MMIO_EVTLOG_SIZE_BYTE]
> +                    & IOMMU_MMIO_EVTLOG_SIZE_MASK);
> +}
> +
> +static inline void iommu_handle_evttail_write(AMDIOMMUState *s)
> +{
> +    IOMMU_DPRINTF(COMMAND, "");
> +
> +    uint64_t val = amd_iommu_readq(s, IOMMU_MMIO_EVENT_TAIL);
> +    s->evtlog_tail = val & IOMMU_MMIO_EVTLOG_TAIL_MASK;
> +}
> +
> +static inline void iommu_handle_evthead_write(AMDIOMMUState *s)
> +{
> +    IOMMU_DPRINTF(COMMAND, "");
> +
> +    uint64_t val = amd_iommu_readq(s, IOMMU_MMIO_EVENT_HEAD);
> +    s->evtlog_head = val & IOMMU_MMIO_EVTLOG_HEAD_MASK;
> +}
> +
> +static inline void iommu_handle_pprbase_write(AMDIOMMUState *s)
> +{
> +    IOMMU_DPRINTF(COMMAND, "");
> +
> +    uint64_t val = amd_iommu_readq(s, IOMMU_MMIO_PPR_BASE);
> +    s->ppr_log = val & IOMMU_MMIO_PPRLOG_BASE_MASK;
> +    s->pprlog_len = 1UL << (*(uint64_t *)&s->mmior[IOMMU_MMIO_PPRLOG_SIZE_BYTE]
> +                    & IOMMU_MMIO_PPRLOG_SIZE_MASK);
> +}
> +
> +static inline void iommu_handle_pprhead_write(AMDIOMMUState *s)
> +{
> +    IOMMU_DPRINTF(COMMAND, "");
> +
> +    uint64_t val = amd_iommu_readq(s, IOMMU_MMIO_PPR_HEAD);
> +    s->pprlog_head = val & IOMMU_MMIO_PPRLOG_HEAD_MASK;
> +}
> +
> +static inline void iommu_handle_pprtail_write(AMDIOMMUState *s)
> +{
> +    IOMMU_DPRINTF(COMMAND, "");
> +
> +    uint64_t val = amd_iommu_readq(s, IOMMU_MMIO_PPR_TAIL);
> +    s->pprlog_tail = val & IOMMU_MMIO_PPRLOG_TAIL_MASK;
> +}
> +
> +/* FIXME: something might go wrong if System Software writes in chunks
> + * of one byte but linux writes in chunks of 4 bytes so currently it
> + * works correctly with linux but will definitely be busted if software
> + * reads/writes 8 bytes
> + */
> +static void amd_iommu_mmio_write(void *opaque, hwaddr addr, uint64_t val,
> +                                 unsigned size)
> +{
> +
> +    IOMMU_DPRINTF(COMMAND, "");
> +
> +    AMDIOMMUState *s = opaque;
> +    unsigned long offset = addr & 0x07;
> +
> +    if (addr + size > IOMMU_MMIO_SIZE) {
> +        IOMMU_DPRINTF(MMIO, "error: addr outside region: max 0x%"PRIx64
> +                      ", got 0x%"PRIx64 " %d", (uint64_t)IOMMU_MMIO_SIZE, addr,
> +                      size);
> +        return;
> +    }
> +
> +    switch (addr & ~0x07) {
> +    case IOMMU_MMIO_CONTROL:
> +        if (size == 2) {
> +            amd_iommu_writew(s, addr, val);
> +        } else if (size == 4) {
> +            amd_iommu_writel(s, addr,  val);
> +        } else if (size == 8) {
> +            amd_iommu_writeq(s, addr, val);
> +        }
> +
> +        IOMMU_DPRINTF(COMMAND, "MMIO_CONTROL write addr 0x%"PRIx64
> +                      ", size %d, val 0x%"PRIx64 ", offset 0x%"PRIx64,
> +                      addr, size, val, offset);
> +        iommu_handle_control_write(s);
> +        break;
> +
> +    case IOMMU_MMIO_DEVICE_TABLE:
> +        IOMMU_DPRINTF(MMIO, "MMIO_DEVICE_TABLE write addr 0x%"PRIx64
> +                      ", size %d, val 0x%"PRIx64 ", offset 0x%"PRIx64,
> +                      addr, size, val, offset);
> +        if (size == 2) {
> +            amd_iommu_writew(s, addr, val);
> +        } else if (size == 4) {
> +            amd_iommu_writel(s, addr, val);
> +        } else if (size == 8) {
> +            amd_iommu_writeq(s, addr, val);
> +        }
> +
> +       /*  set device table address
> +        *   This also suffers from inability to tell whether software
> +        *   is done writing
> +        */
> +
> +        if (offset || (size == 8)) {
> +            iommu_handle_devtab_write(s);
> +        }
> +        break;
> +
> +    case IOMMU_MMIO_COMMAND_HEAD:
> +        IOMMU_DPRINTF(MMIO, "MMIO_COMMAND_HEAD write addr 0x%"PRIx64
> +                      ", size %d, val 0x%"PRIx64 ", offset 0x%"PRIx64,
> +                      addr, size, val, offset);
> +        if (size == 2) {
> +            amd_iommu_writew(s, addr, val);
> +        } else if (size == 4) {
> +            amd_iommu_writel(s, addr, val);
> +        } else if (size == 8) {
> +            amd_iommu_writeq(s, addr, val);
> +        }
> +
> +        iommu_handle_cmdhead_write(s);
> +        break;
> +
> +    case IOMMU_MMIO_COMMAND_BASE:
> +        IOMMU_DPRINTF(COMMAND, "MMIO_COMMAND_BASE write addr 0x%"PRIx64
> +                      ", size %d, val 0x%"PRIx64 ", offset 0x%"PRIx64,
> +                      addr, size, val, offset);
> +        if (size == 2) {
> +            amd_iommu_writew(s, addr, val);
> +        } else if (size == 4) {
> +            amd_iommu_writel(s, addr, val);
> +        } else if (size == 8) {
> +            amd_iommu_writeq(s, addr, val);
> +        }
> +
> +        /* FIXME - make sure System Software has finished writing incase
> +         * it writes in chucks less than 8 bytes in a robust way.As for
> +         * now, this hacks works for the linux driver
> +         */
> +        if (offset || (size == 8)) {
> +            iommu_handle_cmdbase_write(s);
> +        }
> +        break;
> +
> +    case IOMMU_MMIO_COMMAND_TAIL:
> +        IOMMU_DPRINTF(COMMAND, "MMIO_COMMAND_TAIL write addr 0x%"PRIx64
> +                      ", size %d, val 0x%"PRIx64 ", offset 0x%"PRIx64,
> +                      addr, size, val, offset);
> +        if (size == 2) {
> +            amd_iommu_writew(s, addr, val);
> +        } else if (size == 4) {
> +            amd_iommu_writel(s, addr, val);
> +        } else if (size == 8) {
> +            amd_iommu_writeq(s, addr, val);
> +        }
> +        iommu_handle_cmdtail_write(s);
> +        break;
> +
> +    case IOMMU_MMIO_EVENT_BASE:
> +        IOMMU_DPRINTF(MMIO, "MMIO_EVENT_BASE write addr 0x%"PRIx64
> +                      ", size %d, val 0x%"PRIx64 ", offset 0x%"PRIx64,
> +                      addr, size, val, offset);
> +        if (size == 2) {
> +            amd_iommu_writew(s, addr, val);
> +        } else if (size == 4) {
> +            amd_iommu_writel(s, addr, val);
> +        } else if (size == 8) {
> +            amd_iommu_writeq(s, addr, val);
> +        }
> +        iommu_handle_evtbase_write(s);
> +        break;
> +
> +    case IOMMU_MMIO_EVENT_HEAD:
> +        IOMMU_DPRINTF(MMIO, "MMIO_EVENT_HEAD write addr 0x%"PRIx64
> +                      ", size %d, val 0x%"PRIx64 ", offset 0x%"PRIx64,
> +                      addr, size, val, offset);
> +        if (size == 2) {
> +            amd_iommu_writew(s, addr, val);
> +        } else if (size == 4) {
> +            amd_iommu_writel(s, addr, val);
> +        } else if (size == 8) {
> +            amd_iommu_writeq(s, addr, val);
> +        }
> +        iommu_handle_evthead_write(s);
> +        break;
> +
> +    case IOMMU_MMIO_EVENT_TAIL:
> +        IOMMU_DPRINTF(MMIO, "MMIO_EVENT_TAIL write addr 0x%"PRIx64
> +                      ", size %d, val 0x%"PRIx64 ", offset 0x%"PRIx64,
> +                      addr, size, val, offset);
> +        if (size == 2) {
> +            amd_iommu_writew(s, addr, val);
> +        } else if (size == 4) {
> +            amd_iommu_writel(s, addr, val);
> +        } else if (size == 8) {
> +            amd_iommu_writeq(s, addr, val);
> +        }
> +        iommu_handle_evttail_write(s);
> +        break;
> +
> +    case IOMMU_MMIO_EXCL_LIMIT:
> +        IOMMU_DPRINTF(MMIO, "MMIO_EXCL_LIMIT write addr 0x%"PRIx64
> +                      ", size %d, val 0x%"PRIx64 ", offset 0x%"PRIx64,
> +                      addr, size, val, offset);
> +        if (size == 2) {
> +            amd_iommu_writew(s, addr, val);
> +        } else if (size == 4) {
> +            amd_iommu_writel(s, addr, val);
> +        } else if (size == 8) {
> +            amd_iommu_writeq(s, addr, val);
> +        }
> +        iommu_handle_excllim_write(s);
> +        break;
> +
> +        /* PPR log base - unused for now */
> +    case IOMMU_MMIO_PPR_BASE:
> +        IOMMU_DPRINTF(MMIO, "MMIO_PPR_BASE write addr 0x%"PRIx64
> +                      ", size %d, val 0x%"PRIx64 ", offset 0x%"PRIx64,
> +                      addr, size, val, offset);
> +        if (size == 2) {
> +            amd_iommu_writew(s, addr, val);
> +        } else if (size == 4) {
> +            amd_iommu_writel(s, addr, val);
> +        } else if (size == 8) {
> +            amd_iommu_writeq(s, addr, val);
> +        }
> +        iommu_handle_pprbase_write(s);
> +        break;
> +        /* PPR log head - also unused for now */
> +    case IOMMU_MMIO_PPR_HEAD:
> +        IOMMU_DPRINTF(MMIO, "MMIO_PPR_HEAD write addr 0x%"PRIx64
> +                      ", size %d, val 0x%"PRIx64 ", offset 0x%"PRIx64,
> +                       addr, size, val, offset);
> +        if (size == 2) {
> +            amd_iommu_writew(s, addr, val);
> +        } else if (size == 4) {
> +            amd_iommu_writel(s, addr, val);
> +        } else if (size == 8) {
> +            amd_iommu_writeq(s, addr, val);
> +        }
> +        iommu_handle_pprhead_write(s);
> +        break;
> +        /* PPR log tail - unused for now */
> +    case IOMMU_MMIO_PPR_TAIL:
> +        IOMMU_DPRINTF(MMIO, "MMIO_PPR_TAIL write addr 0x%"PRIx64
> +                      ", size %d, val 0x%"PRIx64 ", offset 0x%"PRIx64,
> +                      addr, size, val, offset);
> +        if (size == 2) {
> +            amd_iommu_writew(s, addr, val);
> +        } else if (size == 4) {
> +            amd_iommu_writel(s, addr, val);
> +        } else if (size == 8) {
> +            amd_iommu_writeq(s, addr, val);
> +        }
> +        iommu_handle_pprtail_write(s);
> +        break;
> +
> +        /* ignore write to ext_features */
> +    default:
> +        IOMMU_DPRINTF(MMIO, "UNHANDLED MMIO write addr 0x%"PRIx64
> +                      ", size %d, val 0x%"PRIx64 ", offset 0x%"PRIx64,
> +                      addr, size, val, offset);
> +    }
> +
> +}
> +
> +static inline uint64_t amd_iommu_get_perms(uint64_t entry)
> +{
> +    return (entry & (IOMMU_DEV_PERM_READ | IOMMU_DEV_PERM_WRITE)) >>
> +           IOMMU_DEV_PERM_SHIFT;
> +}
> +
> +AddressSpace *bridge_host_amd_iommu(PCIBus *bus, void *opaque, int devfn)
> +{
> +    AMDIOMMUState *s = opaque;
> +    AMDIOMMUAddressSpace **iommu_as;
> +    int bus_num = pci_bus_num(bus);
> +
> +    /* just in case */

This comment troubles me, do we need the assert?

> +    assert(0 <= bus_num && bus_num <= PCI_BUS_MAX);

bus_num < PCI_BUS_MAX, right ?

> +    assert(0 <= devfn && devfn <= PCI_DEVFN_MAX);

same with devfn I suppose.

> +
> +    iommu_as = s->address_spaces[bus_num];
> +
> +    /* allocate memory during the first run */
> +    if (!iommu_as) {

Why lazy init? We can do that at AMDIOMMUState init, right?

> +        iommu_as = g_malloc0(sizeof(AMDIOMMUAddressSpace *) * PCI_DEVFN_MAX);
> +        s->address_spaces[bus_num] = iommu_as;
> +    }
> +
> +    /* set up IOMMU region */
> +    if (!iommu_as[devfn]) {
> +        iommu_as[devfn] = g_malloc0(sizeof(AMDIOMMUAddressSpace));

same here

> +        iommu_as[devfn]->bus_num = (uint8_t)bus_num;
> +        iommu_as[devfn]->devfn = (uint8_t)devfn;
> +        iommu_as[devfn]->iommu_state = s;
> +
> +        memory_region_init_iommu(&iommu_as[devfn]->iommu, OBJECT(s),
> +                                 &s->iommu_ops, "amd-iommu", UINT64_MAX);
> +        address_space_init(&iommu_as[devfn]->as, &iommu_as[devfn]->iommu,
> +                           "amd-iommu");
> +    }
> +    return &iommu_as[devfn]->as;
> +}
> +
> +/* validate a page table entry */
> +static bool amd_iommu_validate_dte(AMDIOMMUState *s, uint16_t devid,
> +                                   uint64_t *dte)
> +{
> +    if ((dte[0] & IOMMU_DTE_LOWER_QUAD_RESERVED)
> +        || (dte[1] & IOMMU_DTE_MIDDLE_QUAD_RESERVED)
> +        || (dte[2] & IOMMU_DTE_UPPER_QUAD_RESERVED) || dte[3]) {
> +        amd_iommu_log_illegaldevtab_error(s, devid,
> +                                s->devtab + devid * IOMMU_DEVTAB_ENTRY_SIZE, 0);
> +        return false;
> +    }
> +
> +    return dte[0] & IOMMU_DEV_VALID && (dte[0] & IOMMU_DEV_TRANSLATION_VALID)
> +           && (dte[0] & IOMMU_DEV_PT_ROOT_MASK);
> +}
> +
> +/* get a device table entry given the devid */
> +static bool amd_iommu_get_dte(AMDIOMMUState *s, int devid, uint64_t *entry)
> +{
> +    uint32_t offset = devid * IOMMU_DEVTAB_ENTRY_SIZE;
> +
> +    IOMMU_DPRINTF(MMU, "Device Table at 0x%"PRIx64, s->devtab);
> +
> +    if (dma_memory_read(&address_space_memory, s->devtab + offset, entry,
> +                        IOMMU_DEVTAB_ENTRY_SIZE)) {
> +        IOMMU_DPRINTF(MMU, "error: fail to access Device Entry devtab 0x%"PRIx64
> +                      "offset 0x%"PRIx32, s->devtab, offset);
> +        /* log ever accessing dte */
> +        amd_iommu_log_devtab_error(s, devid, s->devtab + offset, 0);
> +        return false;
> +    }
> +
> +    if (!amd_iommu_validate_dte(s, devid, entry)) {
> +        IOMMU_DPRINTF(MMU,
> +                      "Pte entry at 0x%"PRIx64" is invalid", entry[0]);
> +        return false;
> +    }
> +
> +    return true;
> +}
> +
> +/* get pte translation mode */
> +static inline uint8_t get_pte_translation_mode(uint64_t pte)
> +{
> +    return (pte >> IOMMU_DEV_MODE_RSHIFT) & IOMMU_DEV_MODE_MASK;
> +}
> +
> +static int amd_iommu_page_walk(AMDIOMMUAddressSpace *as, uint64_t *dte,
> +                               IOMMUTLBEntry *ret, unsigned perms,
> +                               hwaddr addr)
> +{
> +    uint8_t level, oldlevel;
> +    unsigned present;
> +    uint64_t pte, pte_addr;
> +    uint64_t pte_perms;
> +    pte = dte[0];
> +
> +    level = get_pte_translation_mode(pte);
> +
> +    if (level >= 7 || level == 0) {
> +        IOMMU_DPRINTF(MMU, "error: translation level 0x%"PRIu8 " detected"
> +                      "while translating 0x%"PRIx64, level, addr);
> +        return -1;
> +    }
> +
> +    while (level > 0) {
> +        pte_perms = amd_iommu_get_perms(pte);
> +        present = pte & 1;
> +        if (!present || perms != (perms & pte_perms)) {
> +            amd_iommu_page_fault(as->iommu_state, as->devfn, addr, perms);
> +            IOMMU_DPRINTF(MMU, "error: page fault accessing virtual addr 0x%"
> +                          PRIx64, addr);
> +            return -1;
> +        }
> +
> +        /* go to the next lower level */
> +        pte_addr = pte & IOMMU_DEV_PT_ROOT_MASK;
> +        /* add offset and load pte */
> +        pte_addr += ((addr >> (3 + 9 * level)) & 0x1FF) << 3;
> +        pte = ldq_phys(&address_space_memory, pte_addr);
> +        oldlevel = level;
> +        level = get_pte_translation_mode(pte);
> +
> +        /* PT is corrupted or not there */
> +        if (level != oldlevel - 1) {
> +            return -1;
> +        }
> +    }
> +
> +    ret->iova = addr & IOMMU_PAGE_MASK_4K;
> +    ret->translated_addr = (pte & IOMMU_DEV_PT_ROOT_MASK) & IOMMU_PAGE_MASK_4K;
> +    ret->addr_mask = ~IOMMU_PAGE_MASK_4K;
> +    ret->perm = IOMMU_RW;
> +    return 0;
> +}
> +
> +/* TODO : Mark addresses as Accessed and Dirty */

If you don't mark addresses as dirty, can't this cause the sporadic errors
of arbitrary programs Jan talked about?

> +static void amd_iommu_do_translate(AMDIOMMUAddressSpace *as, hwaddr addr,
> +                                   bool is_write, IOMMUTLBEntry *ret)
> +{
> +    AMDIOMMUState *s = as->iommu_state;
> +    uint16_t devid = PCI_DEVID(as->bus_num, as->devfn);
> +    IOMMUIOTLBEntry *iotlb_entry;
> +    uint8_t err;
> +    uint64_t entry[4];
> +
> +    /* try getting a cache entry first */
> +    iotlb_entry = amd_iommu_iotlb_lookup(s, addr, as->devfn);
> +
> +    if (iotlb_entry) {
> +        IOMMU_DPRINTF(CACHE, "hit  iotlb devid: %02x:%02x.%x gpa 0x%"PRIx64
> +                      " hpa 0x%"PRIx64, PCI_BUS_NUM(devid), PCI_SLOT(devid),
> +                      PCI_FUNC(devid), addr, iotlb_entry->translated_addr);
> +        ret->iova = addr & IOMMU_PAGE_MASK_4K;
> +        ret->translated_addr = iotlb_entry->translated_addr;
> +        ret->addr_mask = ~IOMMU_PAGE_MASK_4K;
> +        ret->perm = iotlb_entry->perms;
> +        return;
> +    } else {

you return from the if clause so you don't need the else

> +        if (!amd_iommu_get_dte(s, devid, entry)) {

is not an error if you did not find the device id?

> +            goto out;
> +        }
> +
> +        err = amd_iommu_page_walk(as, entry, ret,
> +                                  is_write ? IOMMU_PERM_WRITE : IOMMU_PERM_READ,
> +                                  addr);
> +        if (err) {
> +            IOMMU_DPRINTF(MMU, "error: hardware error accessing page tables"
> +                          " while translating addr 0x%"PRIx64, addr);
> +            amd_iommu_log_pagetab_error(s, as->devfn, addr, 0);
> +            goto out;
> +        }
> +
> +        amd_iommu_update_iotlb(s, as->devfn, addr, ret->translated_addr,
> +                               ret->perm, entry[1] & IOMMU_DEV_DOMID_ID_MASK);
> +        return;
> +    }
> +
> +out:
> +    ret->iova = addr;
> +    ret->translated_addr = addr & IOMMU_PAGE_MASK_4K;
> +    ret->addr_mask = ~IOMMU_PAGE_MASK_4K;
> +    ret->perm = IOMMU_RW;
> +    return;

you don't need the above return

> +}
> +
> +static IOMMUTLBEntry amd_iommu_translate(MemoryRegion *iommu, hwaddr addr,
> +                                         bool is_write)
> +{
> +    IOMMU_DPRINTF(GENERAL, "");
> +
> +    AMDIOMMUAddressSpace *as = container_of(iommu, AMDIOMMUAddressSpace, iommu);
> +    AMDIOMMUState *s = as->iommu_state;
> +
> +    IOMMUTLBEntry ret = {
> +        .target_as = &address_space_memory,
> +        .iova = addr,
> +        .translated_addr = 0,
> +        .addr_mask = ~(hwaddr)0,
> +        .perm = IOMMU_NONE,
> +    };
> +
> +    if (!s->enabled) {
> +        /* IOMMU disabled - corresponds to iommu=off not
> +         * failure to provide any parameter
> +         */
> +        ret.iova = addr & IOMMU_PAGE_MASK_4K;
> +        ret.translated_addr = addr & IOMMU_PAGE_MASK_4K;
> +        ret.addr_mask = ~IOMMU_PAGE_MASK_4K;
> +        ret.perm = IOMMU_RW;
> +        return ret;
> +    }
> +
> +    amd_iommu_do_translate(as, addr, is_write, &ret);
> +    IOMMU_DPRINTF(MMU, "devid: %02x:%02x.%x gpa 0x%"PRIx64 " hpa 0x%"PRIx64,
> +                  as->bus_num, PCI_SLOT(as->devfn), PCI_FUNC(as->devfn), addr,
> +                  ret.translated_addr);
> +
> +    return ret;
> +}
> +
> +static const MemoryRegionOps mmio_mem_ops = {
> +    .read = amd_iommu_mmio_read,
> +    .write = amd_iommu_mmio_write,
> +    .endianness = DEVICE_LITTLE_ENDIAN,
> +    .impl = {
> +        .min_access_size = 1,
> +        .max_access_size = 8,
> +        .unaligned = false,
> +    },
> +    .valid = {
> +        .min_access_size = 1,
> +        .max_access_size = 8,
> +    }
> +};
> +
> +static void amd_iommu_init(AMDIOMMUState *s)
> +{
> +    printf("amd_iommu_init");

you should use the debug macro here

> +
> +    amd_iommu_iotlb_reset(s);
> +
> +    s->iommu_ops.translate = amd_iommu_translate;
> +
> +    s->devtab_len = 0;
> +    s->cmdbuf_len = 0;
> +    s->cmdbuf_head = 0;
> +    s->cmdbuf_tail = 0;
> +    s->evtlog_head = 0;
> +    s->evtlog_tail = 0;
> +    s->excl_enabled = false;
> +    s->excl_allow = false;
> +    s->mmio_enabled = false;
> +    s->enabled = false;
> +    s->ats_enabled = false;
> +    s->cmdbuf_enabled = false;
> +
> +    /* reset MMIO */
> +    memset(s->mmior, 0, IOMMU_MMIO_SIZE);
> +    amd_iommu_set_quad(s, IOMMU_MMIO_EXT_FEATURES, IOMMU_EXT_FEATURES,
> +            0xffffffffffffffef, 0);
> +    amd_iommu_set_quad(s, IOMMU_MMIO_STATUS, 0, 0x98, 0x67);
> +    /* reset device ident */
> +    pci_config_set_vendor_id(s->dev.config, PCI_VENDOR_ID_AMD);
> +    pci_config_set_device_id(s->dev.config, PCI_DEVICE_ID_RD890_IOMMU);
> +    pci_config_set_prog_interface(s->dev.config, 00);
> +    pci_config_set_class(s->dev.config, 0x0806);
> +
> +    /* reset IOMMU specific capabilities  */
> +    pci_set_long(s->dev.config + s->capab_offset, IOMMU_CAPAB_FEATURES);
> +    pci_set_long(s->dev.config + s->capab_offset + IOMMU_CAPAB_BAR_LOW,
> +                 s->mmio.addr & ~(0xffff0000));
> +    pci_set_long(s->dev.config + s->capab_offset + IOMMU_CAPAB_BAR_HIGH,
> +                (s->mmio.addr & ~(0xffff)) >> 16);
> +    pci_set_long(s->dev.config + s->capab_offset + IOMMU_CAPAB_RANGE,
> +                 0xff000000);
> +    pci_set_long(s->dev.config + s->capab_offset + IOMMU_CAPAB_MISC, 0);
> +    pci_set_long(s->dev.config + s->capab_offset + IOMMU_CAPAB_MISC,
> +            IOMMU_MAX_PH_ADDR | IOMMU_MAX_GVA_ADDR | IOMMU_MAX_VA_ADDR);

All the capabilities are read-write? Otherwise you need to set the wmask
to indicate what fields are writable.

> +}
> +
> +static void amd_iommu_reset(DeviceState *dev)
> +{
> +    AMDIOMMUState *s = AMD_IOMMU_DEVICE(dev);
> +
> +    amd_iommu_init(s);
> +}
> +
> +static void amd_iommu_realize(PCIDevice *dev, Error **error)
> +{
> +    AMDIOMMUState *s = container_of(dev, AMDIOMMUState, dev);
> +
> +    s->iotlb = g_hash_table_new_full(amd_iommu_uint64_hash,
> +                                     amd_iommu_uint64_equal, g_free, g_free);
> +
> +    s->capab_offset = pci_add_capability(dev, IOMMU_CAPAB_ID_SEC, 0,
> +                                         IOMMU_CAPAB_SIZE);
> +
> +    /* add msi and hypertransport capabilities */
> +    pci_add_capability(&s->dev, PCI_CAP_ID_MSI, 0, IOMMU_CAPAB_REG_SIZE);
> +    pci_add_capability(&s->dev, PCI_CAP_ID_HT, 0, IOMMU_CAPAB_REG_SIZE);
> +
> +    amd_iommu_init(s);
> +
> +    /* set up MMIO */
> +    memory_region_init_io(&s->mmio, OBJECT(s), &mmio_mem_ops, s, "mmio",
> +                          IOMMU_MMIO_SIZE);
> +
> +    if (s->mmio.addr == IOMMU_BASE_ADDR) {

I don't understand why is need here. realize is called only once in the init process
and you set it a few lines below.

> +        return;
> +    }
> +
> +    s->mmio.addr = IOMMU_BASE_ADDR;
> +    memory_region_add_subregion(get_system_memory(), IOMMU_BASE_ADDR, &s->mmio);
> +}
> +
> +static const VMStateDescription vmstate_amd_iommu = {
> +    .name = "amd-iommu",
> +    .fields  = (VMStateField[]) {
> +        VMSTATE_PCI_DEVICE(dev, AMDIOMMUState),
> +        VMSTATE_END_OF_LIST()
> +    }
> +};
> +
> +static Property amd_iommu_properties[] = {
> +    DEFINE_PROP_UINT32("version", AMDIOMMUState, version, 2),
> +    DEFINE_PROP_END_OF_LIST(),
> +};
> +
> +static void amd_iommu_uninit(PCIDevice *dev)
> +{
> +    AMDIOMMUState *s = container_of(dev, AMDIOMMUState, dev);
> +    amd_iommu_iotlb_reset(s);

at this point you also need to clean also the memory regions you use.

> +}
> +
> +static void amd_iommu_class_init(ObjectClass *klass, void* data)
> +{
> +    DeviceClass *dc = DEVICE_CLASS(klass);
> +    PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
> +
> +    k->realize = amd_iommu_realize;
> +    k->exit = amd_iommu_uninit;
> +
> +    dc->reset = amd_iommu_reset;
> +    dc->vmsd = &vmstate_amd_iommu;
> +    dc->props = amd_iommu_properties;
> +}
> +
> +static const TypeInfo amd_iommu = {
> +    .name = TYPE_AMD_IOMMU_DEVICE,
> +    .parent = TYPE_PCI_DEVICE,
> +    .instance_size = sizeof(AMDIOMMUState),
> +    .class_init = amd_iommu_class_init
> +};
> +
> +static void amd_iommu_register_types(void)
> +{
> +    type_register_static(&amd_iommu);
> +}
> +
> +type_init(amd_iommu_register_types);
> diff --git a/hw/i386/amd_iommu.h b/hw/i386/amd_iommu.h
> new file mode 100644
> index 0000000..7d317e1
> --- /dev/null
> +++ b/hw/i386/amd_iommu.h
> @@ -0,0 +1,395 @@
> +/*
> + * QEMU emulation of an AMD IOMMU (AMD-Vi)
> + *
> + * Copyright (C) 2011 Eduard - Gabriel Munteanu
> + * Copyright (C) 2015 David Kiarie, <davidkiarie4@gmail.com>
> + *
> + * This program is free software; you can redistribute it and/or modify
> + * it under the terms of the GNU General Public License as published by
> + * the Free Software Foundation; either version 2 of the License, or
> + * (at your option) any later version.
> +
> + * This program is distributed in the hope that it will be useful,
> + * but WITHOUT ANY WARRANTY; without even the implied warranty of
> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
> + * GNU General Public License for more details.
> +
> + * You should have received a copy of the GNU General Public License along
> + * with this program; if not, see <http://www.gnu.org/licenses/>.
> + */
> +
> +#ifndef AMD_IOMMU_H_
> +#define AMD_IOMMU_H_
> +
> +#include "hw/hw.h"
> +#include "hw/pci/pci.h"
> +#include "hw/pci/msi.h"
> +#include "hw/sysbus.h"
> +#include "sysemu/dma.h"
> +
> +/* Capability registers */
> +#define IOMMU_CAPAB_HEADER            0x00
> +#define   IOMMU_CAPAB_REV_TYPE        0x02
> +#define   IOMMU_CAPAB_FLAGS           0x03
> +#define IOMMU_CAPAB_BAR_LOW           0x04
> +#define IOMMU_CAPAB_BAR_HIGH          0x08
> +#define IOMMU_CAPAB_RANGE             0x0C
> +#define IOMMU_CAPAB_MISC              0x10
> +#define IOMMU_CAPAB_MISC1             0x14
> +
> +#define IOMMU_CAPAB_SIZE              0x18
> +#define IOMMU_CAPAB_REG_SIZE          0x04
> +
> +/* Capability header data */
> +#define IOMMU_CAPAB_ID_SEC            0xf
> +#define IOMMU_CAPAB_FLAT_EXT          (1 << 28)
> +#define IOMMU_CAPAB_EFR_SUP           (1 << 27)
> +#define IOMMU_CAPAB_FLAG_NPCACHE      (1 << 26)
> +#define IOMMU_CAPAB_FLAG_HTTUNNEL     (1 << 25)
> +#define IOMMU_CAPAB_FLAG_IOTLBSUP     (1 << 24)
> +#define IOMMU_CAPAB_INIT_REV          (1 << 19)
> +#define IOMMU_CAPAB_INIT_TYPE         (3 << 16)
> +#define IOMMU_CAPAB_INIT_REV_TYPE     (IOMMU_CAPAB_REV | IOMMU_CAPAB_TYPE)
> +#define IOMMU_CAPAB_INIT_FLAGS        (IOMMU_CAPAB_FLAG_NPCACHE | \
> +                                       IOMMU_CAPAB_FLAG_HTTUNNEL)
> +#define IOMMU_CAPAB_INIT_MISC         ((64 << 15) | (48 << 8))
> +#define IOMMU_CAPAB_BAR_MASK          (~((1UL << 14) - 1))
> +
> +/* MMIO registers */
> +#define IOMMU_MMIO_DEVICE_TABLE       0x0000
> +#define IOMMU_MMIO_COMMAND_BASE       0x0008
> +#define IOMMU_MMIO_EVENT_BASE         0x0010
> +#define IOMMU_MMIO_CONTROL            0x0018
> +#define IOMMU_MMIO_EXCL_BASE          0x0020
> +#define IOMMU_MMIO_EXCL_LIMIT         0x0028
> +#define IOMMU_MMIO_EXT_FEATURES       0x0030
> +#define IOMMU_MMIO_COMMAND_HEAD       0x2000
> +#define IOMMU_MMIO_COMMAND_TAIL       0x2008
> +#define IOMMU_MMIO_EVENT_HEAD         0x2010
> +#define IOMMU_MMIO_EVENT_TAIL         0x2018
> +#define IOMMU_MMIO_STATUS             0x2020
> +#define IOMMU_MMIO_PPR_BASE           0x0038
> +#define IOMMU_MMIO_PPR_HEAD           0x2030
> +#define IOMMU_MMIO_PPR_TAIL           0x2038
> +
> +#define IOMMU_MMIO_SIZE               0x4000
> +
> +#define IOMMU_MMIO_DEVTAB_SIZE_MASK   ((1ULL << 12) - 1)
> +#define IOMMU_MMIO_DEVTAB_BASE_MASK   (((1ULL << 52) - 1) & ~ \
> +                                       IOMMU_MMIO_DEVTAB_SIZE_MASK)
> +#define IOMMU_MMIO_DEVTAB_ENTRY_SIZE  32
> +#define IOMMU_MMIO_DEVTAB_SIZE_UNIT   4096
> +
> +/* some of this are similar but just for readability */
> +#define IOMMU_MMIO_CMDBUF_SIZE_BYTE       (IOMMU_MMIO_COMMAND_BASE + 7)
> +#define IOMMU_MMIO_CMDBUF_SIZE_MASK       0x0F
> +#define IOMMU_MMIO_CMDBUF_BASE_MASK       IOMMU_MMIO_DEVTAB_BASE_MASK
> +#define IOMMU_MMIO_CMDBUF_DEFAULT_SIZE    8
> +#define IOMMU_MMIO_CMDBUF_HEAD_MASK       (((1ULL << 19) - 1) & ~0x0F)
> +#define IOMMU_MMIO_CMDBUF_TAIL_MASK       IOMMU_MMIO_EVTLOG_HEAD_MASK
> +
> +#define IOMMU_MMIO_EVTLOG_SIZE_BYTE       (IOMMU_MMIO_EVENT_BASE + 7)
> +#define IOMMU_MMIO_EVTLOG_SIZE_MASK       IOMMU_MMIO_CMDBUF_SIZE_MASK
> +#define IOMMU_MMIO_EVTLOG_BASE_MASK       IOMMU_MMIO_CMDBUF_BASE_MASK
> +#define IOMMU_MMIO_EVTLOG_DEFAULT_SIZE    IOMMU_MMIO_CMDBUF_DEFAULT_SIZE
> +#define IOMMU_MMIO_EVTLOG_HEAD_MASK       (((1ULL << 19) - 1) & ~0x0F)
> +#define IOMMU_MMIO_EVTLOG_TAIL_MASK       IOMMU_MMIO_EVTLOG_HEAD_MASK
> +
> +#define IOMMU_MMIO_PPRLOG_SIZE_BYTE       (IOMMU_MMIO_EVENT_BASE + 7)
> +#define IOMMU_MMIO_PPRLOG_HEAD_MASK       IOMMU_MMIO_EVTLOG_HEAD_MASK
> +#define IOMMU_MMIO_PPRLOG_TAIL_MASK       IOMMU_MMIO_EVTLOG_HEAD_MASK
> +#define IOMMU_MMIO_PPRLOG_BASE_MASK       IOMMU_MMIO_EVTLOG_BASE_MASK
> +#define IOMMU_MMIO_PPRLOG_SIZE_MASK       IOMMU_MMIO_EVTLOG_SIZE_MASK
> +
> +#define IOMMU_MMIO_EXCL_BASE_MASK         IOMMU_MMIO_DEVTAB_BASE_MASK
> +#define IOMMU_MMIO_EXCL_ENABLED_MASK      (1ULL << 0)
> +#define IOMMU_MMIO_EXCL_ALLOW_MASK        (1ULL << 1)
> +#define IOMMU_MMIO_EXCL_LIMIT_MASK        IOMMU_MMIO_DEVTAB_BASE_MASK
> +#define IOMMU_MMIO_EXCL_LIMIT_LOW         0xFFF
> +
> +/* mmio control register flags */
> +#define IOMMU_MMIO_CONTROL_IOMMUEN        (1ULL << 0)
> +#define IOMMU_MMIO_CONTROL_HTTUNEN        (1ULL << 1)
> +#define IOMMU_MMIO_CONTROL_EVENTLOGEN     (1ULL << 2)
> +#define IOMMU_MMIO_CONTROL_EVENTINTEN     (1ULL << 3)
> +#define IOMMU_MMIO_CONTROL_COMWAITINTEN   (1ULL << 4)
> +#define IOMMU_MMIO_CONTROL_PASSPW         (1ULL << 7)
> +#define IOMMU_MMIO_CONTROL_REPASSPW       (1ULL << 9)
> +#define IOMMU_MMIO_CONTROL_COHERENT       (1ULL << 10)
> +#define IOMMU_MMIO_CONTROL_ISOC           (1ULL << 11)
> +#define IOMMU_MMIO_CONTROL_CMDBUFLEN      (1ULL << 12)
> +#define IOMMU_MMIO_CONTROL_PPRLOGEN       (1ULL << 13)
> +#define IOMMU_MMIO_CONTROL_PPRINTEN       (1ULL << 14)
> +#define IOMMU_MMIO_CONTROL_PPREN          (1ULL << 15)
> +#define IOMMU_MMIO_CONTROL_GAEN           (1ULL << 16)
> +#define IOMMU_MMIO_CONTROL_GTEN           (1ULL << 17)
> +
> +/* MMIO status register bits */
> +#define IOMMU_MMIO_STATUS_PPR_OVFE    (1 << 18)
> +#define IOMMU_MMIO_STATUS_PPR_OVFEB   (1 << 17)
> +#define IOMMU_MMIO_STATUS_EVT_ACTIVE  (1 << 16)
> +#define IOMMU_MMIO_STATUS_EVT_OVFB    (1 << 15)
> +#define IOMMU_MMIO_STATUS_PPR_ACTIVE  (1 << 12)
> +#define IOMMU_MMIO_STATUS_PPR_OVFB    (1 << 11)
> +#define IOMMU_MMIO_STATUS_GA_INT      (1 << 10)
> +#define IOMMU_MMIO_STATUS_GA_RUN      (1 << 9)
> +#define IOMMU_MMIO_STATUS_GA_OVF      (1 << 8)
> +#define IOMMU_MMIO_STATUS_PPR_RUN     (1 << 7)
> +#define IOMMU_MMIO_STATUS_PPR_INT     (1 << 6)
> +#define IOMMU_MMIO_STATUS_PPR_OVF     (1 << 5)
> +#define IOMMU_MMIO_STATUS_CMDBUF_RUN  (1 << 4)
> +#define IOMMU_MMIO_STATUS_EVT_RUN     (1 << 3)
> +#define IOMMU_MMIO_STATUS_COMP_INT    (1 << 2)
> +#define IOMMU_MMIO_STATUS_EVT_INT     (1 << 1)
> +#define IOMMU_MMIO_STATUS_EVT_OVF     (1 << 0)
> +
> +#define IOMMU_CMDBUF_ID_BYTE              0x07
> +#define IOMMU_CMDBUF_ID_RSHIFT            4
> +
> +#define IOMMU_CMD_COMPLETION_WAIT         0x01
> +#define IOMMU_CMD_INVAL_DEVTAB_ENTRY      0x02
> +#define IOMMU_CMD_INVAL_IOMMU_PAGES       0x03
> +#define IOMMU_CMD_INVAL_IOTLB_PAGES       0x04
> +#define IOMMU_CMD_INVAL_INTR_TABLE        0x05
> +#define IOMMU_CMD_PREFETCH_IOMMU_PAGES    0x06
> +#define IOMMU_CMD_COMPLETE_PPR_REQUEST    0x07
> +#define IOMMU_CMD_INVAL_IOMMU_ALL         0x08
> +
> +#define IOMMU_DEVTAB_ENTRY_SIZE           32
> +
> +/* Device table entry bits 0:63 */
> +#define IOMMU_DEV_VALID                   (1ULL << 0)
> +#define IOMMU_DEV_TRANSLATION_VALID       (1ULL << 1)
> +#define IOMMU_DEV_MODE_MASK               0x7
> +#define IOMMU_DEV_MODE_RSHIFT             9
> +#define IOMMU_DEV_PT_ROOT_MASK            0xFFFFFFFFFF000
> +#define IOMMU_DEV_PT_ROOT_RSHIFT          12
> +#define IOMMU_DEV_PERM_SHIFT              61
> +#define IOMMU_DEV_PERM_READ               (1ULL << 61)
> +#define IOMMU_DEV_PERM_WRITE              (1ULL << 62)
> +
> +/* Device table entry bits 64:127 */
> +#define IOMMU_DEV_DOMID_ID_MASK          ((1ULL << 16) - 1)
> +#define IOMMU_DEV_IOTLB_SUPPORT           (1ULL << 17)
> +#define IOMMU_DEV_SUPPRESS_PF             (1ULL << 18)
> +#define IOMMU_DEV_SUPPRESS_ALL_PF         (1ULL << 19)
> +#define IOMMU_DEV_IOCTL_MASK              (~3)
> +#define IOMMU_DEV_IOCTL_RSHIFT            20
> +#define   IOMMU_DEV_IOCTL_DENY            0
> +#define   IOMMU_DEV_IOCTL_PASSTHROUGH     1
> +#define   IOMMU_DEV_IOCTL_TRANSLATE       2
> +#define IOMMU_DEV_CACHE                   (1ULL << 37)
> +#define IOMMU_DEV_SNOOP_DISABLE           (1ULL << 38)
> +#define IOMMU_DEV_EXCL                    (1ULL << 39)
> +
> +/* Event codes and flags, as stored in the info field */
> +#define IOMMU_EVENT_ILLEGAL_DEVTAB_ENTRY  (0x1U << 12)
> +#define IOMMU_EVENT_IOPF                  (0x2U << 12)
> +#define   IOMMU_EVENT_IOPF_I              (1U << 3)
> +#define   IOMMU_EVENT_IOPF_PR             (1U << 4)
> +#define   IOMMU_EVENT_IOPF_RW             (1U << 5)
> +#define   IOMMU_EVENT_IOPF_PE             (1U << 6)
> +#define   IOMMU_EVENT_IOPF_RZ             (1U << 7)
> +#define   IOMMU_EVENT_IOPF_TR             (1U << 8)
> +#define IOMMU_EVENT_DEV_TAB_HW_ERROR      (0x3U << 12)
> +#define IOMMU_EVENT_PAGE_TAB_HW_ERROR     (0x4U << 12)
> +#define IOMMU_EVENT_ILLEGAL_COMMAND_ERROR (0x5U << 12)
> +#define IOMMU_EVENT_COMMAND_HW_ERROR      (0x6U << 12)
> +#define IOMMU_EVENT_IOTLB_INV_TIMEOUT     (0x7U << 12)
> +#define IOMMU_EVENT_INVALID_DEV_REQUEST   (0x8U << 12)
> +
> +#define IOMMU_EVENT_LEN                   16
> +#define IOMMU_PERM_READ             (1 << 0)
> +#define IOMMU_PERM_WRITE            (1 << 1)
> +#define IOMMU_PERM_RW               (IOMMU_PERM_READ | IOMMU_PERM_WRITE)
> +
> +/* AMD RD890 Chipset */
> +#define PCI_DEVICE_ID_RD890_IOMMU   0x20

We keep the pci ids in include/hw/pci/pci_ids.h

> +
> +#define IOMMU_FEATURE_PREFETCH            (1ULL << 0)
> +#define IOMMU_FEATURE_PPR                 (1ULL << 1)
> +#define IOMMU_FEATURE_NX                  (1ULL << 3)
> +#define IOMMU_FEATURE_GT                  (1ULL << 4)
> +#define IOMMU_FEATURE_IA                  (1ULL << 6)
> +#define IOMMU_FEATURE_GA                  (1ULL << 7)
> +#define IOMMU_FEATURE_HE                  (1ULL << 8)
> +#define IOMMU_FEATURE_PC                  (1ULL << 9)
> +
> +/* reserved DTE bits */
> +#define IOMMU_DTE_LOWER_QUAD_RESERVED  0x80300000000000fc
> +#define IOMMU_DTE_MIDDLE_QUAD_RESERVED 0x0000000000000100
> +#define IOMMU_DTE_UPPER_QUAD_RESERVED  0x08f0000000000000
> +
> +/* IOMMU paging mode */
> +#define IOMMU_GATS_MODE                 (6ULL <<  12)
> +#define IOMMU_HATS_MODE                 (6ULL <<  10)
> +
> +/* PCI SIG constants */
> +#define PCI_BUS_MAX 256
> +#define PCI_SLOT_MAX 32
> +#define PCI_FUNC_MAX 8
> +#define PCI_DEVFN_MAX 256

Maybe we can move the PCI macros to include/hw/pci/pci.h, those are not IOMMU specific.

> +
> +/* IOTLB */
> +#define IOMMU_IOTLB_MAX_SIZE 1024
> +#define IOMMU_DEVID_SHIFT    36
> +
> +/* extended feature support */
> +#define IOMMU_EXT_FEATURES (IOMMU_FEATURE_PREFETCH | IOMMU_FEATURE_PPR | \
> +        IOMMU_FEATURE_NX | IOMMU_FEATURE_IA | IOMMU_FEATURE_GT | \
> +        IOMMU_FEATURE_GA | IOMMU_FEATURE_HE | IOMMU_GATS_MODE | \
> +        IOMMU_HATS_MODE)
> +
> +/* capabilities header */
> +#define IOMMU_CAPAB_FEATURES (IOMMU_CAPAB_FLAT_EXT | \
> +        IOMMU_CAPAB_FLAG_NPCACHE | IOMMU_CAPAB_FLAG_IOTLBSUP \
> +        | IOMMU_CAPAB_ID_SEC | IOMMU_CAPAB_INIT_TYPE | \
> +        IOMMU_CAPAB_FLAG_HTTUNNEL |  IOMMU_CAPAB_EFR_SUP)
> +
> +/* command constants */
> +#define IOMMU_COM_STORE_ADDRESS_MASK 0xffffffffffff8
> +#define IOMMU_COM_COMPLETION_STORE_MASK 0x1
> +#define IOMMU_COM_COMPLETION_INTR 0x2
> +#define IOMMU_COM_COMPLETION_DATA_OFF 0x8
> +#define IOMMU_COMMAND_SIZE 0x10
> +
> +/* IOMMU default address */
> +#define IOMMU_BASE_ADDR 0xfed80000
> +
> +/* page management constants */
> +#define IOMMU_PAGE_SHIFT 12
> +#define IOMMU_PAGE_SIZE  (1ULL << IOMMU_PAGE_SHIFT)
> +
> +#define IOMMU_PAGE_SHIFT_4K 12
> +#define IOMMU_PAGE_MASK_4K  (~((1ULL << IOMMU_PAGE_SHIFT_4K) - 1))
> +#define IOMMU_PAGE_SHIFT_2M 21
> +#define IOMMU_PAGE_MASK_2M  (~((1ULL << IOMMU_PAGE_SHIFT_2M) - 1))
> +#define IOMMU_PAGE_SHIFT_1G 30
> +#define IOMMU_PAGE_MASK_1G (~((1ULL << IOMMU_PAGE_SHIFT_1G) - 1))
> +
> +#define IOMMU_MAX_VA_ADDR          (48UL << 5)
> +#define IOMMU_MAX_PH_ADDR          (40UL << 8)
> +#define IOMMU_MAX_GVA_ADDR         (48UL << 15)
> +
> +/* invalidation command device id */
> +#define IOMMU_INVAL_DEV_ID_SHIFT  32
> +#define IOMMU_INVAL_DEV_ID_MASK   (~((1UL << IOMMU_INVAL_DEV_ID_SHIFT) - 1))
> +
> +/* invalidation address */
> +#define IOMMU_INVAL_ADDR_MASK_SHIFT 12
> +#define IOMMU_INVAL_ADDR_MASK     (~((1UL << IOMMU_INVAL_ADDR_MASK_SHIFT) - 1))
> +
> +/* invalidation S bit mask */
> +#define IOMMU_INVAL_ALL(val) ((val) & (0x1))
> +
> +/* reserved bits */
> +#define IOMMU_COMPLETION_WAIT_RSVD    0x0ff000000
> +#define IOMMU_CMD_INVAL_DEV_RSVD      0xffff00000fffffff
> +#define IOMMU_INVAL_IOMMU_PAGES_RSVD  0xfff000000fff0000
> +#define IOMMU_INVAL_IOTLB_PAGES_RSVD  0x00000ff4
> +#define IOMMU_INVAL_INTR_TABLE_RSVD   0xffff00000fffffff
> +#define IOMMU_PRF_IOMMU_PAGES_RSVD    0x00ff00000ff00000
> +#define IOMMU_COMPLETE_PPR_RQ_RSVD    0xffff00000ff00000
> +#define IOMMU_INVAL_IOMMU_ALL_RSVD    0x0fffffff00000000
> +
> +/* command masks - inval iommu pages */
> +#define IOMMU_INVAL_PAGES_PASID       (~((1UL << 20) - 1))
> +#define IOMMU_INVAL_PAGES_DOMID       (((1UL << 16) - 1) << 32)
> +#define IOMMU_INVAL_PAGES_ADDRESS     (~((1UL << 12) - 1))
> +#define IOMMU_INVAL_PAGES_SBIT        (1UL << 0)
> +#define IOMMU_INVAL_PAGES_PDE         (1UL << 1)
> +#define IOMMU_INVAL_PAGES_GN          (1UL << 2)
> +
> +/* masks - inval iotlb pages */
> +#define IOMMU_INVAL_IOTLB_DEVID       (~((1UL << 16) - 1))
> +#define IOMMU_INVAL_IOTLB_PASID_LOW   (0xff << 15)
> +#define IOMMU_INVAL_IOTLB_MAXPEND     (0xff << 23)
> +#define IOMMU_INVAL_IOTLB_QUEUEID     (~((1UL << 16) - 1))
> +#define IOMMU_INVAL_IOTLB_PASID_HIGH  (0xff << 46)
> +#define IOMMU_INVAL_IOTLB_GN          IOMMU_INVAL_PAGES_GN
> +#define IOMMU_INVAL_IOTLB_S           IOMMU_INVAL_PAGES_S
> +#define IOMMU_INVAL_IOTLB_ADDRESS     IOMMU_INVAL_PAGES_ADDRESS
> +#define IOMMU_INVAL_IOTLB_MAKEPASID(low, high)
> +
> +/* masks - prefetch pages   */
> +#define IOMMU_PREFETCH_PAGES_DEVID     IOMMU_INVAL_IOTLB_DEVID
> +#define IOMMU_PREFETCH_PAGES_PFCOUNT   IOMMU_INVAL_IOTLB_MAXPEND
> +
> +#define TYPE_AMD_IOMMU_DEVICE "amd-iommu"
> +#define AMD_IOMMU_DEVICE(obj)\
> +    OBJECT_CHECK(AMDIOMMUState, (obj), TYPE_AMD_IOMMU_DEVICE)
> +
> +#define AMD_IOMMU_STR "amd"
> +
> +typedef struct AMDIOMMUAddressSpace AMDIOMMUAddressSpace;
> +
> +typedef struct AMDIOMMUState {
> +    PCIDevice dev;               /* The PCI device itself        */
> +
> +    uint32_t version;
> +
> +    uint32_t capab_offset;       /* capability offset pointer    */
> +    uint64_t mmio_addr;
> +    uint8_t *capab;              /* capabilities registers       */
> +
> +    bool enabled;                /* IOMMU enabled                */
> +    bool ats_enabled;            /* address translation enabled  */
> +    bool cmdbuf_enabled;         /* command buffer enabled       */
> +    bool evtlog_enabled;         /* event log enabled            */
> +    bool excl_enabled;
> +
> +    dma_addr_t devtab;           /* base address device table    */
> +    size_t devtab_len;           /* device table length          */
> +
> +    dma_addr_t cmdbuf;           /* command buffer base address  */
> +    uint64_t cmdbuf_len;         /* command buffer length        */
> +    uint32_t cmdbuf_head;        /* current IOMMU read position  */
> +    uint32_t cmdbuf_tail;        /* next Software write position */
> +    bool completion_wait_intr;
> +
> +    dma_addr_t evtlog;           /* base address event log       */
> +    bool evtlog_intr;
> +    uint32_t evtlog_len;         /* event log length             */
> +    uint32_t evtlog_head;        /* current IOMMU write position */
> +    uint32_t evtlog_tail;        /* current Software read position */
> +
> +    /* unused for now */

I suggest what is not used to remove for now

> +    dma_addr_t excl_base;        /* base DVA - IOMMU exclusion range */
> +    dma_addr_t excl_limit;       /* limit of IOMMU exclusion range   */
> +    bool excl_allow;             /* translate accesses to the exclusion range */
> +    bool excl_enable;            /* exclusion range enabled          */
> +
> +    dma_addr_t ppr_log;          /* base address ppr log */
> +    uint32_t pprlog_len;         /* ppr log len  */
> +    uint32_t pprlog_head;        /* ppr log head */
> +    uint32_t pprlog_tail;        /* ppr log tail */
> +
> +    MemoryRegion mmio;           /* MMIO region                  */
> +    uint8_t mmior[IOMMU_MMIO_SIZE];    /* read/write MMIO              */
> +    uint8_t w1cmask[IOMMU_MMIO_SIZE];  /* read/write 1 clear mask      */
> +    uint8_t romask[IOMMU_MMIO_SIZE];   /* MMIO read/only mask          */
> +    bool mmio_enabled;
> +
> +    /* IOMMU function */
> +    MemoryRegionIOMMUOps iommu_ops;
> +
> +    /* for each served device */
> +    AMDIOMMUAddressSpace **address_spaces[PCI_BUS_MAX];
> +
> +    /* IOTLB */
> +    GHashTable *iotlb;
> +} AMDIOMMUState;
> +
> +/*
> + * bridge_host_amd_iommu: setup an IOMMU function on a bus
> + *
> + * called for all PCI devices
> + *
> + * @bus: PCI bus to host the IOMMU
> + * @opaque: opaque pointer to AMDIOMMUState struct
> + * @defvn: PCI function of device for which to setup IOMMU region for
> + *
> + */
> +AddressSpace *bridge_host_amd_iommu(PCIBus *bus, void *opaque, int devfn);
> +
> +#endif
> diff --git a/include/hw/pci/pci.h b/include/hw/pci/pci.h
> index dedf277..61deace 100644
> --- a/include/hw/pci/pci.h
> +++ b/include/hw/pci/pci.h
> @@ -15,6 +15,8 @@
>
>   /* PCI bus */
>
> +#define PCI_BUS_NUM(x)          (((x) >> 8) & 0xff)
> +#define PCI_DEVID(bus, devfn)   ((((uint16_t)(bus)) << 8) | (devfn))
>   #define PCI_DEVFN(slot, func)   ((((slot) & 0x1f) << 3) | ((func) & 0x07))
>   #define PCI_SLOT(devfn)         (((devfn) >> 3) & 0x1f)
>   #define PCI_FUNC(devfn)         ((devfn) & 0x07)
>


Thanks,
Marcel
David Kiarie Feb. 26, 2016, 6:23 a.m. UTC | #2
On Thu, Feb 25, 2016 at 6:43 PM, Marcel Apfelbaum <marcel@redhat.com> wrote:
> On 02/21/2016 08:10 PM, David Kiarie wrote:
>>
>> Add AMD IOMMU emulaton to Qemu in addition to Intel IOMMU
>> The IOMMU does basic translation, error checking and has a
>> mininal IOTLB implementation
>
>
> Hi,
>
>>
>> Signed-off-by: David Kiarie <davidkiarie4@gmail.com>
>> ---
>>   hw/i386/Makefile.objs |    1 +
>>   hw/i386/amd_iommu.c   | 1432
>> +++++++++++++++++++++++++++++++++++++++++++++++++
>>   hw/i386/amd_iommu.h   |  395 ++++++++++++++
>>   include/hw/pci/pci.h  |    2 +
>>   4 files changed, 1830 insertions(+)
>>   create mode 100644 hw/i386/amd_iommu.c
>>   create mode 100644 hw/i386/amd_iommu.h
>>
>> diff --git a/hw/i386/Makefile.objs b/hw/i386/Makefile.objs
>> index b52d5b8..2f1a265 100644
>> --- a/hw/i386/Makefile.objs
>> +++ b/hw/i386/Makefile.objs
>> @@ -3,6 +3,7 @@ obj-y += multiboot.o
>>   obj-y += pc.o pc_piix.o pc_q35.o
>>   obj-y += pc_sysfw.o
>>   obj-y += intel_iommu.o
>> +obj-y += amd_iommu.o
>>   obj-$(CONFIG_XEN) += ../xenpv/ xen/
>>
>>   obj-y += kvmvapic.o
>> diff --git a/hw/i386/amd_iommu.c b/hw/i386/amd_iommu.c
>> new file mode 100644
>> index 0000000..3dac043
>> --- /dev/null
>> +++ b/hw/i386/amd_iommu.c
>> @@ -0,0 +1,1432 @@
>> +/*
>> + * QEMU emulation of AMD IOMMU (AMD-Vi)
>> + *
>> + * Copyright (C) 2011 Eduard - Gabriel Munteanu
>> + * Copyright (C) 2015 David Kiarie, <davidkiarie4@gmail.com>
>> + *
>> + * This program is free software; you can redistribute it and/or modify
>> + * it under the terms of the GNU General Public License as published by
>> + * the Free Software Foundation; either version 2 of the License, or
>> + * (at your option) any later version.
>> +
>> + * This program is distributed in the hope that it will be useful,
>> + * but WITHOUT ANY WARRANTY; without even the implied warranty of
>> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
>> + * GNU General Public License for more details.
>> +
>> + * You should have received a copy of the GNU General Public License
>> along
>> + * with this program; if not, see <http://www.gnu.org/licenses/>.
>> + *
>> + * Cache implementation inspired by hw/i386/intel_iommu.c
>> + *
>> + */
>> +#include "hw/i386/amd_iommu.h"
>> +
>> +/*#define DEBUG_AMD_IOMMU*/
>> +#ifdef DEBUG_AMD_IOMMU
>> +enum {
>> +    DEBUG_GENERAL, DEBUG_CAPAB, DEBUG_MMIO, DEBUG_ELOG,
>> +    DEBUG_CACHE, DEBUG_COMMAND, DEBUG_MMU
>> +};
>> +
>> +#define IOMMU_DBGBIT(x)   (1 << DEBUG_##x)
>> +static int iommu_dbgflags = IOMMU_DBGBIT(MMIO);
>> +
>> +#define IOMMU_DPRINTF(what, fmt, ...) do { \
>> +    if (iommu_dbgflags & IOMMU_DBGBIT(what)) { \
>> +        fprintf(stderr, "(amd-iommu)%s: " fmt "\n", __func__, \
>> +                ## __VA_ARGS__); } \
>> +    } while (0)
>> +#else
>> +#define IOMMU_DPRINTF(what, fmt, ...) do {} while (0)
>> +#endif
>> +
>> +typedef struct AMDIOMMUAddressSpace {
>> +    uint8_t bus_num;            /* bus number
>> */
>> +    uint8_t devfn;              /* device function
>> */
>> +    AMDIOMMUState *iommu_state; /* IOMMU - one per machine
>> */
>> +    MemoryRegion iommu;         /* Device's iommu region
>> */
>> +    AddressSpace as;            /* device's corresponding address space
>> */
>> +} AMDIOMMUAddressSpace;
>> +
>> +/* IOMMU cache entry */
>> +typedef struct IOMMUIOTLBEntry {
>> +    uint64_t gfn;
>> +    uint16_t domid;
>> +    uint64_t devid;
>> +    uint64_t perms;
>> +    uint64_t translated_addr;
>> +} IOMMUIOTLBEntry;
>> +
>> +/* configure MMIO registers at startup/reset */
>> +static void amd_iommu_set_quad(AMDIOMMUState *s, hwaddr addr, uint64_t
>> val,
>> +                               uint64_t romask, uint64_t w1cmask)
>> +{
>> +    stq_le_p(&s->mmior[addr], val);
>> +    stq_le_p(&s->romask[addr], romask);
>> +    stq_le_p(&s->w1cmask[addr], w1cmask);
>> +}
>> +
>> +static uint16_t amd_iommu_readw(AMDIOMMUState *s, hwaddr addr)
>> +{
>> +    return lduw_le_p(&s->mmior[addr]);
>> +}
>> +
>> +static uint32_t amd_iommu_readl(AMDIOMMUState *s, hwaddr addr)
>> +{
>> +    return ldl_le_p(&s->mmior[addr]);
>> +}
>> +
>> +static uint64_t amd_iommu_readq(AMDIOMMUState *s, hwaddr addr)
>> +{
>> +    return ldq_le_p(&s->mmior[addr]);
>> +}
>> +
>> +/* internal write */
>> +static void amd_iommu_writeq_raw(AMDIOMMUState *s, uint64_t val, hwaddr
>> addr)
>> +{
>> +    stq_le_p(&s->mmior[addr], val);
>> +}
>> +
>> +/* external write */
>> +static void amd_iommu_writew(AMDIOMMUState *s, hwaddr addr, uint16_t val)
>> +{
>> +    uint16_t romask = lduw_le_p(&s->romask[addr]);
>> +    uint16_t w1cmask = lduw_le_p(&s->w1cmask[addr]);
>> +    uint16_t oldval = lduw_le_p(&s->mmior[addr]);
>> +    stw_le_p(&s->mmior[addr], (val & ~(val & w1cmask)) | (romask &
>> oldval));
>> +}
>> +
>> +static void amd_iommu_writel(AMDIOMMUState *s, hwaddr addr, uint32_t val)
>> +{
>> +    uint32_t romask = ldl_le_p(&s->romask[addr]);
>> +    uint32_t w1cmask = ldl_le_p(&s->w1cmask[addr]);
>> +    uint32_t oldval = ldl_le_p(&s->mmior[addr]);
>> +    stl_le_p(&s->mmior[addr], (val & ~(val & w1cmask)) | (romask &
>> oldval));
>> +}
>> +
>> +static void amd_iommu_writeq(AMDIOMMUState *s, hwaddr addr, uint64_t val)
>> +{
>> +    uint64_t romask = ldq_le_p(&s->romask[addr]);
>> +    uint64_t w1cmask = ldq_le_p(&s->w1cmask[addr]);
>> +    uint32_t oldval = ldq_le_p(&s->mmior[addr]);
>> +    stq_le_p(&s->mmior[addr], (val & ~(val & w1cmask)) | (romask &
>> oldval));
>> +}
>> +
>> +static void amd_iommu_log_event(AMDIOMMUState *s, uint16_t *evt)
>> +{
>> +    /* event logging not enabled */
>> +    if (!s->evtlog_enabled || *(uint64_t *)&s->mmior[IOMMU_MMIO_STATUS] |
>> +        IOMMU_MMIO_STATUS_EVT_OVF) {
>> +        return;
>> +    }
>> +
>> +    /* event log buffer full */
>> +    if (s->evtlog_tail >= s->evtlog_len) {
>> +        *(uint64_t *)&s->mmior[IOMMU_MMIO_STATUS] |=
>> IOMMU_MMIO_STATUS_EVT_OVF;
>> +        /* generate interrupt */
>> +        msi_notify(&s->dev, 0);
>> +    }
>> +
>> +    if (dma_memory_write(&address_space_memory, s->evtlog_len +
>> s->evtlog_tail,
>> +       &evt, IOMMU_EVENT_LEN)) {
>> +        IOMMU_DPRINTF(ELOG, "error: fail to write at address 0x%"PRIx64
>> +                      " + offset 0x%"PRIx32, s->evtlog, s->evtlog_tail);
>> +    }
>> +
>> +     s->evtlog_tail += IOMMU_EVENT_LEN;
>> +     *(uint64_t *)&s->mmior[IOMMU_MMIO_STATUS] |=
>> IOMMU_MMIO_STATUS_COMP_INT;
>> +}
>> +
>> +/* log an error encountered page-walking
>> + *
>> + * @addr: virtual address in translation request
>> + */
>> +static void amd_iommu_page_fault(AMDIOMMUState *s, uint16_t devid,
>> +                                 dma_addr_t addr, uint16_t info)
>> +{
>> +    IOMMU_DPRINTF(ELOG, "");
>> +
>> +    uint16_t evt[8];
>> +
>> +    info |= IOMMU_EVENT_IOPF_I;
>> +
>> +    /* encode information */
>> +    *(uint16_t *)&evt[0] = devid;
>> +    *(uint16_t *)&evt[3] = info;
>> +    *(uint64_t *)&evt[4] = cpu_to_le64(addr);
>> +
>> +    /* log a page fault */
>> +    amd_iommu_log_event(s, evt);
>> +
>> +    /* Abort the translation */
>> +    pci_word_test_and_set_mask(s->dev.config + PCI_STATUS,
>> +            PCI_STATUS_SIG_TARGET_ABORT);
>> +}
>> +/*
>> + * log a master abort accessing device table
>> + *  @devtab : address of device table entry
>> + *  @info : error flags
>> + */
>> +static void amd_iommu_log_devtab_error(AMDIOMMUState *s, uint16_t devid,
>> +                                       dma_addr_t devtab, uint16_t info)
>> +{
>> +
>> +    IOMMU_DPRINTF(ELOG, "");
>> +
>> +    uint16_t evt[8];
>> +
>> +    info |= IOMMU_EVENT_DEV_TAB_HW_ERROR;
>> +
>> +    /* encode information */
>> +    *(uint16_t *)&evt[0] = devid;
>> +    *(uint8_t *)&evt[3]  = info;
>> +    *(uint64_t *)&evt[4] = cpu_to_le64(devtab);
>> +
>> +    amd_iommu_log_event(s, evt);
>> +
>> +    /* Abort the translation */
>> +    pci_word_test_and_set_mask(s->dev.config + PCI_STATUS,
>> +            PCI_STATUS_SIG_TARGET_ABORT);
>> +
>> +}
>> +
>> +/* log a master abort encountered during a page-walk
>> + *  @addr : address that couldn't be accessed
>> + */
>> +static void amd_iommu_log_pagetab_error(AMDIOMMUState *s, uint16_t devid,
>> +                                        dma_addr_t addr, uint16_t info)
>> +{
>> +    IOMMU_DPRINTF(ELOG, "");
>> +
>> +    uint16_t evt[8];
>> +
>> +    info |= IOMMU_EVENT_PAGE_TAB_HW_ERROR;
>> +
>> +    /* encode information */
>> +    *(uint16_t *)&evt[0] = devid;
>> +    *(uint8_t *)&evt[3]  = info;
>> +    *(uint64_t *)&evt[4] = (cpu_to_le64(addr) >> 3);
>> +
>> +    amd_iommu_log_event(s, evt);
>> +
>> +    /* Abort the translation */
>> +    pci_word_test_and_set_mask(s->dev.config + PCI_STATUS,
>> +            PCI_STATUS_SIG_TARGET_ABORT);
>> +
>> +}
>> +
>> +/* log an event trying to access command buffer
>> + *   @addr : address that couldn't be accessed
>> + */
>> +static void amd_iommu_log_command_error(AMDIOMMUState *s, dma_addr_t
>> addr)
>> +{
>> +    IOMMU_DPRINTF(ELOG, "");
>> +
>> +    uint16_t evt[8];
>> +
>> +    /* encode information */
>> +    *(uint8_t *)&evt[3]  = (uint8_t)IOMMU_EVENT_COMMAND_HW_ERROR;
>> +    *(uint64_t *)&evt[4] = (cpu_to_le64(addr) >> 3);
>> +
>> +    amd_iommu_log_event(s, evt);
>> +
>> +    /* Abort the translation */
>> +    pci_word_test_and_set_mask(s->dev.config + PCI_STATUS,
>> +            PCI_STATUS_SIG_TARGET_ABORT);
>> +}
>> +
>> +/* log an illegal comand event
>> + *   @addr : address of illegal command
>> + */
>> +static void amd_iommu_log_illegalcom_error(AMDIOMMUState *s, uint16_t
>> info,
>> +                                           dma_addr_t addr)
>> +{
>> +    IOMMU_DPRINTF(ELOG, "");
>> +
>> +    uint16_t evt[8];
>> +
>> +    /* encode information */
>> +    *(uint8_t *)&evt[3]  = (uint8_t)IOMMU_EVENT_ILLEGAL_COMMAND_ERROR;
>> +    *(uint64_t *)&evt[4] = (cpu_to_le64(addr) >> 3);
>
>
> Can you please use a macro instead of 3 literal?
>
>> +
>> +    amd_iommu_log_event(s, evt);
>> +}
>> +
>> +/* log an error accessing device table
>> + *
>> + *  @devid : device owning the table entry
>> + *  @devtab : address of device table entry
>> + *  @info : error flags
>> + */
>> +static void amd_iommu_log_illegaldevtab_error(AMDIOMMUState *s, uint16_t
>> devid,
>> +                                              dma_addr_t addr, uint16_t
>> info)
>> +{
>> +    IOMMU_DPRINTF(ELOG, "");
>> +
>> +    uint16_t evt[8];
>> +
>> +    info |= IOMMU_EVENT_ILLEGAL_DEVTAB_ENTRY;
>> +
>> +    *(uint16_t *)&evt[0] = devid;
>> +    *(uint8_t *)&evt[3]  = info;
>> +    *(uint64_t *)&evt[4] = (cpu_to_le64(addr) >> 3);
>> +
>> +    amd_iommu_log_event(s, evt);
>> +}
>
>
> It seems that the all log functions do the same:
> create an event, log it and optionally set PCI_STATUS_SIG_TARGET_ABORT
>
> I would consider to unite them in the same function. (not a must)
>
>> +
>> +static gboolean amd_iommu_uint64_equal(gconstpointer v1, gconstpointer
>> v2)
>> +{
>> +    return *((const uint64_t *)v1) == *((const uint64_t *)v2);
>> +}
>> +
>> +static guint amd_iommu_uint64_hash(gconstpointer v)
>> +{
>> +    return (guint)*(const uint64_t *)v;
>> +}
>> +
>> +static IOMMUIOTLBEntry *amd_iommu_iotlb_lookup(AMDIOMMUState *s, hwaddr
>> addr,
>> +                                               uint64_t devid)
>> +{
>> +    uint64_t key = (addr >> IOMMU_PAGE_SHIFT_4K) |
>> +                   ((uint64_t)(devid) << IOMMU_DEVID_SHIFT);
>> +    return g_hash_table_lookup(s->iotlb, &key);
>> +}
>> +
>> +static void amd_iommu_iotlb_reset(AMDIOMMUState *s)
>> +{
>> +    assert(s->iotlb);
>> +    g_hash_table_remove_all(s->iotlb);
>> +}
>> +
>> +static gboolean amd_iommu_iotlb_remove_by_devid(gpointer key, gpointer
>> value,
>> +                                                gpointer user_data)
>> +{
>> +    IOMMUIOTLBEntry *entry = (IOMMUIOTLBEntry *)value;
>> +    uint16_t devid = *(uint16_t *)user_data;
>> +    return entry->devid == devid;
>> +}
>> +
>> +static void amd_iommu_iotlb_remove_page(AMDIOMMUState *s, hwaddr addr,
>> +                                        uint64_t devid)
>> +{
>> +    uint64_t key = (addr >> IOMMU_PAGE_SHIFT_4K) |
>> +                   ((uint64_t)(devid) << IOMMU_DEVID_SHIFT);
>> +    g_hash_table_remove(s->iotlb, &key);
>> +}
>> +
>> +/* extract device id */
>> +static inline uint16_t devid_extract(uint8_t *cmd)
>> +{
>> +    return (uint16_t)cmd[2] & IOMMU_INVAL_DEV_ID_MASK;
>> +}
>> +
>> +static void amd_iommu_invalidate_iotlb(AMDIOMMUState *s, uint64_t *cmd)
>> +{
>> +    uint16_t devid = devid_extract((uint8_t *)cmd);
>> +    /* if invalidation of more than one page requested */
>> +    if (IOMMU_INVAL_ALL(cmd[0])) {
>> +        g_hash_table_foreach_remove(s->iotlb,
>> amd_iommu_iotlb_remove_by_devid,
>> +                                    &devid);
>> +    } else {
>> +        hwaddr addr = (hwaddr)(cmd[1] & IOMMU_INVAL_ADDR_MASK);
>> +        amd_iommu_iotlb_remove_page(s, addr, devid);
>> +    }
>> +}
>> +
>> +static void amd_iommu_update_iotlb(AMDIOMMUState *s, uint16_t devid,
>> +                                   uint64_t gpa, uint64_t spa, uint64_t
>> perms,
>> +                                   uint16_t domid)
>> +{
>> +    IOMMUIOTLBEntry *entry = g_malloc(sizeof(*entry));
>> +    uint64_t *key = g_malloc(sizeof(key));
>> +    uint64_t gfn = gpa >> IOMMU_PAGE_SHIFT_4K;
>> +
>> +    IOMMU_DPRINTF(CACHE, " update iotlb devid: %02x:%02x.%x gpa
>> 0x%"PRIx64
>> +                  " hpa 0x%"PRIx64, PCI_BUS_NUM(devid), PCI_SLOT(devid),
>> +                  PCI_FUNC(devid), gpa, spa);
>> +
>> +    if (g_hash_table_size(s->iotlb) >= IOMMU_IOTLB_MAX_SIZE) {
>> +        IOMMU_DPRINTF(CACHE, "iotlb exceeds size limit - reset");
>> +        amd_iommu_iotlb_reset(s);
>> +    }
>> +
>> +    entry->gfn = gfn;
>> +    entry->domid = domid;
>> +    entry->perms = perms;
>> +    entry->translated_addr = spa;
>> +    *key = gfn | ((uint64_t)(devid) << IOMMU_DEVID_SHIFT);
>> +    g_hash_table_replace(s->iotlb, key, entry);
>> +}
>> +
>> +/* execute a completion wait command */
>> +static void amd_iommu_completion_wait(AMDIOMMUState *s, uint8_t *cmd)
>> +{
>> +    IOMMU_DPRINTF(COMMAND, "");
>> +    unsigned int addr;
>> +
>> +    /* completion store */
>> +    if (cmd[0] & IOMMU_COM_COMPLETION_STORE_MASK) {
>> +        addr = le64_to_cpu(*(uint64_t *)cmd) &
>> IOMMU_COM_STORE_ADDRESS_MASK;
>> +        if (dma_memory_write(&address_space_memory, addr, cmd + 8, 8)) {
>> +            IOMMU_DPRINTF(ELOG, "error: fail to write at address
>> 0%x"PRIx64,
>> +                          addr);
>> +        }
>> +    }
>> +
>> +    /* set completion interrupt */
>> +    if (cmd[0] & IOMMU_COM_COMPLETION_INTR) {
>> +        s->mmior[IOMMU_MMIO_STATUS] |= IOMMU_MMIO_STATUS_COMP_INT;
>> +    }
>> +}
>> +
>> +/* get command type */
>> +static uint8_t opcode(uint8_t *cmd)
>> +{
>> +    return cmd[IOMMU_CMDBUF_ID_BYTE] >> IOMMU_CMDBUF_ID_RSHIFT;
>> +}
>> +
>> +/* linux seems to be using reserved bits so I just log without abortig
>> bug */
>
>
> I couldn't quite understand the comment
>
>> +static void iommu_inval_devtab_entry(AMDIOMMUState *s, uint8_t *cmd,
>> +                                     uint8_t type)
>> +{
>> +    IOMMU_DPRINTF(COMMAND, "");
>> +
>> +    /* This command should invalidate internal caches of which there
>> isn't */
>> +    if (*(uint64_t *)&cmd[0] & IOMMU_CMD_INVAL_DEV_RSVD ||
>> +            *(uint64_t *)&cmd[1]) {
>> +        amd_iommu_log_illegalcom_error(s, type, s->cmdbuf +
>> s->cmdbuf_head);
>> +    }
>> +#ifdef DEBUG_AMD_IOMMU
>> +    uint16_t devid = devid_extract(cmd);
>> +#endif
>> +    IOMMU_DPRINTF(COMMAND, "device table entry for devid: %02x:%02x.%x"
>> +                  "invalidated", PCI_BUS_NUM(devid), PCI_SLOT(devid),
>> +                  PCI_FUNC(devid));
>> +}
>> +
>> +static void iommu_completion_wait(AMDIOMMUState *s, uint8_t *cmd, uint8_t
>> type)
>> +{
>> +    IOMMU_DPRINTF(COMMAND, "");
>> +
>> +    if (*(uint32_t *)&cmd[1] & IOMMU_COMPLETION_WAIT_RSVD) {
>> +        amd_iommu_log_illegalcom_error(s, type, s->cmdbuf +
>> s->cmdbuf_head);
>> +    }
>> +    /* pretend to wait for command execution to complete */
>> +    IOMMU_DPRINTF(COMMAND, "completion wait requested with store address
>> 0x%"
>> +                  PRIx64 " and store data 0x%"PRIx64, (cmd[0] &
>> +                  IOMMU_COM_STORE_ADDRESS_MASK), *(uint64_t *)(cmd + 8));
>> +    amd_iommu_completion_wait(s, cmd);
>> +}
>> +
>> +static void iommu_complete_ppr(AMDIOMMUState *s, uint8_t *cmd, uint8_t
>> type)
>> +{
>> +    IOMMU_DPRINTF(COMMAND, "");
>> +
>> +    if ((*(uint64_t *)&cmd[0] & IOMMU_COMPLETE_PPR_RQ_RSVD) ||
>> +       *(uint64_t *)&cmd[1] & 0xffff000000000000) {
>
>
>
> Can you please document this mask?
>
>> +        amd_iommu_log_illegalcom_error(s, type, s->cmdbuf +
>> s->cmdbuf_head);
>> +    }
>> +
>> +    IOMMU_DPRINTF(COMMAND, "Execution of PPR queue requested");
>> +}
>> +
>> +static void iommu_inval_all(AMDIOMMUState *s, uint8_t *cmd, uint8_t type)
>> +{
>> +    IOMMU_DPRINTF(COMMAND, "");
>> +
>> +    if ((*(uint64_t *)&cmd[0] & IOMMU_INVAL_IOMMU_ALL_RSVD) ||
>> +       *(uint64_t *)&cmd[1]) {
>> +        amd_iommu_log_illegalcom_error(s, type, s->cmdbuf +
>> s->cmdbuf_head);
>> +    }
>> +
>> +    amd_iommu_iotlb_reset(s);
>> +    IOMMU_DPRINTF(COMMAND, "Invalidation of all IOMMU cache requested");
>> +}
>> +
>> +static inline uint16_t domid_extract(uint64_t *cmd)
>> +{
>> +    return (uint16_t)cmd[0] & IOMMU_INVAL_PAGES_DOMID;
>> +}
>> +
>> +static gboolean amd_iommu_iotlb_remove_by_domid(gpointer key, gpointer
>> value,
>> +                                                gpointer user_data)
>> +{
>> +    IOMMUIOTLBEntry *entry = (IOMMUIOTLBEntry *)value;
>> +    uint16_t domid = *(uint16_t *)user_data;
>> +    return entry->domid == domid;
>> +}
>> +
>> +/* we don't have devid - we can't remove pages by address */
>> +static void iommu_inval_pages(AMDIOMMUState *s, uint8_t *cmd, uint8_t
>> type)
>> +{
>> +    IOMMU_DPRINTF(COMMAND, "");
>> +    uint16_t domid = domid_extract((uint64_t *)cmd);
>> +
>> +    if (*(uint64_t *)&cmd[0] & IOMMU_INVAL_IOMMU_PAGES_RSVD ||
>> +       *(uint32_t *)&cmd[1] & 0x00000ff0) {
>> +        amd_iommu_log_illegalcom_error(s, type, s->cmdbuf +
>> s->cmdbuf_head);
>> +    }
>> +
>> +    g_hash_table_foreach_remove(s->iotlb,
>> amd_iommu_iotlb_remove_by_domid,
>> +                                &domid);
>> +
>> +    IOMMU_DPRINTF(COMMAND, "IOMMU pages for domain 0x%"PRIx16
>> "invalidated",
>> +                  domid);
>> +}
>> +
>> +static void iommu_prefetch_pages(AMDIOMMUState *s, uint8_t *cmd, uint8_t
>> type)
>> +{
>> +    IOMMU_DPRINTF(COMMAND, "");
>> +
>> +    if ((*(uint64_t *)&cmd[0] & IOMMU_PRF_IOMMU_PAGES_RSVD) ||
>> +       (*(uint32_t *)&cmd[1] & 0x00000fd4)) {
>
>
> Here the same, maybe you can name the mask, so we can easier follow the
> spec.
>
>> +        amd_iommu_log_illegalcom_error(s, type, s->cmdbuf +
>> s->cmdbuf_head);
>> +    }
>> +
>> +    IOMMU_DPRINTF(COMMAND, "Pre-fetch of IOMMU pages requested");
>> +}
>> +
>> +static void iommu_inval_inttable(AMDIOMMUState *s, uint8_t *cmd, uint8_t
>> type)
>> +{
>> +    IOMMU_DPRINTF(COMMAND, "");
>> +
>> +    if ((*(uint64_t *)&cmd[0] & IOMMU_INVAL_INTR_TABLE_RSVD) ||
>> +       *(uint64_t *)&cmd[1]) {
>> +        amd_iommu_log_illegalcom_error(s, type, s->cmdbuf +
>> s->cmdbuf_head);
>> +        return;
>> +    }
>> +
>> +    IOMMU_DPRINTF(COMMAND, "interrupt table invalidated");
>> +}
>> +
>> +static void iommu_inval_iotlb(AMDIOMMUState *s, uint8_t *cmd, uint8_t
>> type)
>> +{
>> +    IOMMU_DPRINTF(COMMAND, "");
>> +
>> +    if (*(uint32_t *)&cmd[2] & IOMMU_INVAL_IOTLB_PAGES_RSVD) {
>> +        amd_iommu_log_illegalcom_error(s, type, s->cmdbuf +
>> s->cmdbuf_head);
>> +        return;
>> +    }
>> +
>> +    amd_iommu_invalidate_iotlb(s, (uint64_t *)cmd);
>> +    IOMMU_DPRINTF(COMMAND, "IOTLB pages invalidated");
>> +}
>> +
>> +/* not honouring reserved bits is regarded as an illegal command */
>> +static void amd_iommu_cmdbuf_exec(AMDIOMMUState *s)
>> +{
>> +    IOMMU_DPRINTF(COMMAND, "");
>> +
>> +    uint8_t type;
>> +    uint8_t cmd[IOMMU_COMMAND_SIZE];
>> +
>> +    memset(cmd, 0, IOMMU_COMMAND_SIZE);
>> +
>> +    if (dma_memory_read(&address_space_memory, s->cmdbuf +
>> s->cmdbuf_head, cmd,
>> +       IOMMU_COMMAND_SIZE)) {
>> +        IOMMU_DPRINTF(COMMAND, "error: fail to access memory at
>> 0x%"PRIx64
>> +                      " + 0x%"PRIu8, s->cmdbuf, s->cmdbuf_head);
>> +        amd_iommu_log_command_error(s, s->cmdbuf + s->cmdbuf_head);
>> +        return;
>> +    }
>> +
>> +    type = opcode(cmd);
>> +
>> +    switch (type) {
>> +    case IOMMU_CMD_COMPLETION_WAIT:
>> +        iommu_completion_wait(s, cmd, type);
>> +        break;
>> +
>> +    case IOMMU_CMD_INVAL_DEVTAB_ENTRY:
>> +        iommu_inval_devtab_entry(s, cmd, type);
>> +        break;
>> +
>> +    case IOMMU_CMD_INVAL_IOMMU_PAGES:
>> +        iommu_inval_pages(s, cmd, type);
>> +        break;
>> +
>> +    case IOMMU_CMD_INVAL_IOTLB_PAGES:
>> +        iommu_inval_iotlb(s, cmd, type);
>> +        break;
>> +
>> +    case IOMMU_CMD_INVAL_INTR_TABLE:
>> +        iommu_inval_inttable(s, cmd, type);
>> +        break;
>> +
>> +    case IOMMU_CMD_PREFETCH_IOMMU_PAGES:
>> +        iommu_prefetch_pages(s, cmd, type);
>> +        break;
>> +
>> +    case IOMMU_CMD_COMPLETE_PPR_REQUEST:
>> +        iommu_complete_ppr(s, cmd, type);
>> +        break;
>> +
>> +    case IOMMU_CMD_INVAL_IOMMU_ALL:
>> +        iommu_inval_all(s, cmd, type);
>> +        break;
>> +
>> +    default:
>> +        IOMMU_DPRINTF(COMMAND, "unhandled command %d", type);
>> +        /* log illegal command */
>> +        amd_iommu_log_illegalcom_error(s, type,
>> +                                       s->cmdbuf + s->cmdbuf_head);
>> +        break;
>> +    }
>> +
>> +}
>> +
>> +static void amd_iommu_cmdbuf_run(AMDIOMMUState *s)
>> +{
>> +    IOMMU_DPRINTF(COMMAND, "");
>> +
>> +    uint64_t *mmio_cmdbuf_head = (uint64_t *)(s->mmior +
>> +                                 IOMMU_MMIO_COMMAND_HEAD);
>> +
>> +    if (!s->cmdbuf_enabled) {
>> +        IOMMU_DPRINTF(COMMAND, "error: IOMMU trying to execute commands
>> with "
>> +                      "command buffer disabled. IOMMU control value
>> 0x%"PRIx64,
>> +                      amd_iommu_readq(s, IOMMU_MMIO_CONTROL));
>> +        return;
>> +    }
>> +
>> +    while (s->cmdbuf_head != s->cmdbuf_tail) {
>> +        /* check if there is work to do. */
>> +        IOMMU_DPRINTF(COMMAND, "command buffer head at 0x%"PRIx32 "
>> command "
>> +                      "buffer tail at 0x%"PRIx32" command buffer base at
>> 0x%"
>> +                      PRIx64, s->cmdbuf_head, s->cmdbuf_tail, s->cmdbuf);
>> +         amd_iommu_cmdbuf_exec(s);
>> +         s->cmdbuf_head += IOMMU_COMMAND_SIZE;
>> +         amd_iommu_writeq_raw(s, s->cmdbuf_head,
>> IOMMU_MMIO_COMMAND_HEAD);
>> +
>> +        /* wrap head pointer */
>> +        if (s->cmdbuf_head >= s->cmdbuf_len * IOMMU_COMMAND_SIZE) {
>> +            s->cmdbuf_head = 0;
>> +        }
>> +    }
>> +
>> +    *mmio_cmdbuf_head = cpu_to_le64(s->cmdbuf_head);
>> +}
>> +
>> +/* System Software might never read from some of this fields but anyways
>> */
>> +static uint64_t amd_iommu_mmio_read(void *opaque, hwaddr addr, unsigned
>> size)
>> +{
>> +    AMDIOMMUState *s = opaque;
>> +
>> +    uint64_t val = -1;
>
>
> The above might work, but it looks a little weird
>
>> +    if (addr + size > IOMMU_MMIO_SIZE) {
>> +        IOMMU_DPRINTF(MMIO, "error: addr outside region: max 0x%"PRIX64
>> +                      ", got 0x%"PRIx64 " %d", (uint64_t)IOMMU_MMIO_SIZE,
>> addr,
>> +                      size);
>> +        return (uint64_t)-1;
>> +    }
>> +
>> +    if (size == 2) {
>> +        val = amd_iommu_readw(s, addr);
>> +    } else if (size == 4) {
>> +        val = amd_iommu_readl(s, addr);
>> +    } else if (size == 8) {
>> +        val = amd_iommu_readq(s, addr);
>> +    }
>> +
>> +    switch (addr & ~0x07) {
>> +    case IOMMU_MMIO_DEVICE_TABLE:
>> +        IOMMU_DPRINTF(MMIO, "MMIO_DEVICE_TABLE read addr 0x%"PRIx64
>> +                      ", size %d offset 0x%"PRIx64, addr, size,
>> +                       addr & ~0x07);
>> +        break;
>> +
>> +    case IOMMU_MMIO_COMMAND_BASE:
>> +        IOMMU_DPRINTF(MMIO, "MMIO_COMMAND_BASE read addr 0x%"PRIx64
>> +                      ", size %d offset 0x%"PRIx64, addr, size,
>> +                      addr & ~0x07);
>> +        break;
>> +
>> +    case IOMMU_MMIO_EVENT_BASE:
>> +        IOMMU_DPRINTF(MMIO, "MMIO_EVENT_BASE read addr 0x%"PRIx64
>> +                      ", size %d offset 0x%"PRIx64, addr, size,
>> +                      addr & ~0x07);
>> +        break;
>> +
>> +    case IOMMU_MMIO_CONTROL:
>> +        IOMMU_DPRINTF(MMIO, "MMIO_CONTROL read addr 0x%"PRIx64
>> +                      ", size %d offset 0x%"PRIx64, addr, size,
>> +                       addr & ~0x07);
>> +        break;
>> +
>> +    case IOMMU_MMIO_EXCL_BASE:
>> +        IOMMU_DPRINTF(MMIO, "MMIO_EXCL_BASE read addr 0x%"PRIx64
>> +                      ", size %d offset 0x%"PRIx64, addr, size,
>> +                      addr & ~0x07);
>> +        break;
>> +
>> +    case IOMMU_MMIO_EXCL_LIMIT:
>> +        IOMMU_DPRINTF(MMIO, "MMIO_EXCL_LIMIT read addr 0x%"PRIx64
>> +                      ", size %d offset 0x%"PRIx64, addr, size,
>> +                      addr & ~0x07);
>> +        break;
>> +
>> +    case IOMMU_MMIO_COMMAND_HEAD:
>> +        IOMMU_DPRINTF(MMIO, "MMIO_COMMAND_HEAD read addr 0x%"PRIx64
>> +                      ", size %d offset 0x%"PRIx64, addr, size,
>> +                      addr & ~0x07);
>> +        break;
>> +
>> +    case IOMMU_MMIO_COMMAND_TAIL:
>> +        IOMMU_DPRINTF(MMIO, "MMIO_COMMAND_TAIL read addr 0x%"PRIx64
>> +                      ", size %d offset 0x%"PRIx64, addr, size,
>> +                      addr & ~0x07);
>> +        break;
>> +
>> +    case IOMMU_MMIO_EVENT_HEAD:
>> +        IOMMU_DPRINTF(MMIO, "MMIO_EVENT_HEAD read addr 0x%"PRIx64
>> +                      ", size %d offset 0x%"PRIx64, addr, size,
>> +                      addr & ~0x07);
>> +        break;
>> +
>> +    case IOMMU_MMIO_EVENT_TAIL:
>> +        IOMMU_DPRINTF(MMIO, "MMIO_EVENT_TAIL read addr 0x%"PRIx64
>> +                      ", size %d offset 0x%"PRIx64, addr, size,
>> +                      addr & ~0x07);
>> +        break;
>> +
>> +    case IOMMU_MMIO_STATUS:
>> +        IOMMU_DPRINTF(MMIO, "MMIO_STATUS read addr 0x%"PRIx64
>> +                      ", size %d offset 0x%"PRIx64, addr, size,
>> +                      addr & ~0x07);
>> +        break;
>> +
>> +    case IOMMU_MMIO_EXT_FEATURES:
>> +        IOMMU_DPRINTF(MMU, "MMIO_EXT_FEATURES read addr 0x%"PRIx64
>> +                      ", size %d offset 0x%"PRIx64 "value 0x%"PRIx64,
>> +                      addr, size, addr & ~0x07, val);
>> +        break;
>> +
>> +    default:
>> +        IOMMU_DPRINTF(MMIO, "UNHANDLED MMIO read addr 0x%"PRIx64
>> +                      ", size %d offset 0x%"PRIx64, addr, size,
>> +                       addr & ~0x07);
>> +    }
>> +    return val;
>> +}
>> +
>> +static void iommu_handle_control_write(AMDIOMMUState *s)
>> +{
>> +    IOMMU_DPRINTF(COMMAND, "");
>> +    /*
>> +     * read whatever is already written in case
>> +     * software is writing in chucks less than 8 bytes
>> +     */
>> +    unsigned long control = amd_iommu_readq(s, IOMMU_MMIO_CONTROL);
>> +    s->enabled = !!(control & IOMMU_MMIO_CONTROL_IOMMUEN);
>> +
>> +    s->ats_enabled = !!(control & IOMMU_MMIO_CONTROL_HTTUNEN);
>> +    s->evtlog_enabled = s->enabled && !!(control &
>> +                        IOMMU_MMIO_CONTROL_EVENTLOGEN);
>> +
>> +    s->evtlog_intr = !!(control & IOMMU_MMIO_CONTROL_EVENTINTEN);
>> +    s->completion_wait_intr = !!(control &
>> IOMMU_MMIO_CONTROL_COMWAITINTEN);
>> +    s->cmdbuf_enabled = s->enabled && !!(control &
>> +                        IOMMU_MMIO_CONTROL_CMDBUFLEN);
>> +
>> +    /* update the flags depending on the control register */
>> +    if (s->cmdbuf_enabled) {
>> +        (*(uint64_t *)&s->mmior[IOMMU_MMIO_STATUS]) |=
>> +            IOMMU_MMIO_STATUS_CMDBUF_RUN;
>> +    } else {
>> +        (*(uint64_t *)&s->mmior[IOMMU_MMIO_STATUS]) &=
>> +            ~IOMMU_MMIO_STATUS_CMDBUF_RUN;
>> +    }
>> +    if (s->evtlog_enabled) {
>> +        (*(uint64_t *)&s->mmior[IOMMU_MMIO_STATUS]) |=
>> +            IOMMU_MMIO_STATUS_EVT_RUN;
>> +    } else {
>> +        (*(uint64_t *)&s->mmior[IOMMU_MMIO_STATUS]) &=
>> +            ~IOMMU_MMIO_STATUS_EVT_RUN;
>> +    }
>> +
>> +    IOMMU_DPRINTF(COMMAND, "MMIO_STATUS state 0x%"PRIx64, control);
>> +
>> +    amd_iommu_cmdbuf_run(s);
>> +}
>> +
>> +static inline void iommu_handle_devtab_write(AMDIOMMUState *s)
>> +
>> +{
>> +    IOMMU_DPRINTF(COMMAND, "");
>> +
>> +    uint64_t val = amd_iommu_readq(s, IOMMU_MMIO_DEVICE_TABLE);
>> +    s->devtab = (dma_addr_t)(val & IOMMU_MMIO_DEVTAB_BASE_MASK);
>> +
>> +    /* set device table length */
>> +    s->devtab_len = ((val & IOMMU_MMIO_DEVTAB_SIZE_MASK) + 1 *
>> +                    (IOMMU_MMIO_DEVTAB_SIZE_UNIT /
>> +                     IOMMU_MMIO_DEVTAB_ENTRY_SIZE));
>> +}
>> +
>> +static inline void iommu_handle_cmdhead_write(AMDIOMMUState *s)
>> +{
>> +    IOMMU_DPRINTF(COMMAND, "");
>> +
>> +    s->cmdbuf_head = (dma_addr_t)amd_iommu_readq(s,
>> IOMMU_MMIO_COMMAND_HEAD)
>> +                     & IOMMU_MMIO_CMDBUF_HEAD_MASK;
>> +    amd_iommu_cmdbuf_run(s);
>> +}
>> +
>> +static inline void iommu_handle_cmdbase_write(AMDIOMMUState *s)
>> +{
>> +    IOMMU_DPRINTF(COMMAND, "");
>> +
>> +    s->cmdbuf = (dma_addr_t)amd_iommu_readq(s, IOMMU_MMIO_COMMAND_BASE)
>> +                & IOMMU_MMIO_CMDBUF_BASE_MASK;
>> +    s->cmdbuf_len = 1UL << (s->mmior[IOMMU_MMIO_CMDBUF_SIZE_BYTE]
>> +                    & IOMMU_MMIO_CMDBUF_SIZE_MASK);
>> +    s->cmdbuf_head = s->cmdbuf_tail = 0;
>> +
>> +}
>> +
>> +static inline void iommu_handle_cmdtail_write(AMDIOMMUState *s)
>> +{
>> +    s->cmdbuf_tail = amd_iommu_readq(s, IOMMU_MMIO_COMMAND_TAIL)
>> +                     & IOMMU_MMIO_CMDBUF_TAIL_MASK;
>> +    amd_iommu_cmdbuf_run(s);
>> +}
>> +
>> +static inline void iommu_handle_excllim_write(AMDIOMMUState *s)
>> +{
>> +    IOMMU_DPRINTF(COMMAND, "");
>> +
>> +    uint64_t val = amd_iommu_readq(s, IOMMU_MMIO_EXCL_LIMIT);
>> +    s->excl_limit = (val & IOMMU_MMIO_EXCL_LIMIT_MASK) |
>> +                    IOMMU_MMIO_EXCL_LIMIT_LOW;
>> +}
>> +
>> +static inline void iommu_handle_evtbase_write(AMDIOMMUState *s)
>> +{
>> +    IOMMU_DPRINTF(COMMAND, "");
>> +
>> +    uint64_t val = amd_iommu_readq(s, IOMMU_MMIO_EVENT_BASE);
>> +    s->evtlog = val & IOMMU_MMIO_EVTLOG_BASE_MASK;
>> +    s->evtlog_len = 1UL << (*(uint64_t
>> *)&s->mmior[IOMMU_MMIO_EVTLOG_SIZE_BYTE]
>> +                    & IOMMU_MMIO_EVTLOG_SIZE_MASK);
>> +}
>> +
>> +static inline void iommu_handle_evttail_write(AMDIOMMUState *s)
>> +{
>> +    IOMMU_DPRINTF(COMMAND, "");
>> +
>> +    uint64_t val = amd_iommu_readq(s, IOMMU_MMIO_EVENT_TAIL);
>> +    s->evtlog_tail = val & IOMMU_MMIO_EVTLOG_TAIL_MASK;
>> +}
>> +
>> +static inline void iommu_handle_evthead_write(AMDIOMMUState *s)
>> +{
>> +    IOMMU_DPRINTF(COMMAND, "");
>> +
>> +    uint64_t val = amd_iommu_readq(s, IOMMU_MMIO_EVENT_HEAD);
>> +    s->evtlog_head = val & IOMMU_MMIO_EVTLOG_HEAD_MASK;
>> +}
>> +
>> +static inline void iommu_handle_pprbase_write(AMDIOMMUState *s)
>> +{
>> +    IOMMU_DPRINTF(COMMAND, "");
>> +
>> +    uint64_t val = amd_iommu_readq(s, IOMMU_MMIO_PPR_BASE);
>> +    s->ppr_log = val & IOMMU_MMIO_PPRLOG_BASE_MASK;
>> +    s->pprlog_len = 1UL << (*(uint64_t
>> *)&s->mmior[IOMMU_MMIO_PPRLOG_SIZE_BYTE]
>> +                    & IOMMU_MMIO_PPRLOG_SIZE_MASK);
>> +}
>> +
>> +static inline void iommu_handle_pprhead_write(AMDIOMMUState *s)
>> +{
>> +    IOMMU_DPRINTF(COMMAND, "");
>> +
>> +    uint64_t val = amd_iommu_readq(s, IOMMU_MMIO_PPR_HEAD);
>> +    s->pprlog_head = val & IOMMU_MMIO_PPRLOG_HEAD_MASK;
>> +}
>> +
>> +static inline void iommu_handle_pprtail_write(AMDIOMMUState *s)
>> +{
>> +    IOMMU_DPRINTF(COMMAND, "");
>> +
>> +    uint64_t val = amd_iommu_readq(s, IOMMU_MMIO_PPR_TAIL);
>> +    s->pprlog_tail = val & IOMMU_MMIO_PPRLOG_TAIL_MASK;
>> +}
>> +
>> +/* FIXME: something might go wrong if System Software writes in chunks
>> + * of one byte but linux writes in chunks of 4 bytes so currently it
>> + * works correctly with linux but will definitely be busted if software
>> + * reads/writes 8 bytes
>> + */
>> +static void amd_iommu_mmio_write(void *opaque, hwaddr addr, uint64_t val,
>> +                                 unsigned size)
>> +{
>> +
>> +    IOMMU_DPRINTF(COMMAND, "");
>> +
>> +    AMDIOMMUState *s = opaque;
>> +    unsigned long offset = addr & 0x07;
>> +
>> +    if (addr + size > IOMMU_MMIO_SIZE) {
>> +        IOMMU_DPRINTF(MMIO, "error: addr outside region: max 0x%"PRIx64
>> +                      ", got 0x%"PRIx64 " %d", (uint64_t)IOMMU_MMIO_SIZE,
>> addr,
>> +                      size);
>> +        return;
>> +    }
>> +
>> +    switch (addr & ~0x07) {
>> +    case IOMMU_MMIO_CONTROL:
>> +        if (size == 2) {
>> +            amd_iommu_writew(s, addr, val);
>> +        } else if (size == 4) {
>> +            amd_iommu_writel(s, addr,  val);
>> +        } else if (size == 8) {
>> +            amd_iommu_writeq(s, addr, val);
>> +        }
>> +
>> +        IOMMU_DPRINTF(COMMAND, "MMIO_CONTROL write addr 0x%"PRIx64
>> +                      ", size %d, val 0x%"PRIx64 ", offset 0x%"PRIx64,
>> +                      addr, size, val, offset);
>> +        iommu_handle_control_write(s);
>> +        break;
>> +
>> +    case IOMMU_MMIO_DEVICE_TABLE:
>> +        IOMMU_DPRINTF(MMIO, "MMIO_DEVICE_TABLE write addr 0x%"PRIx64
>> +                      ", size %d, val 0x%"PRIx64 ", offset 0x%"PRIx64,
>> +                      addr, size, val, offset);
>> +        if (size == 2) {
>> +            amd_iommu_writew(s, addr, val);
>> +        } else if (size == 4) {
>> +            amd_iommu_writel(s, addr, val);
>> +        } else if (size == 8) {
>> +            amd_iommu_writeq(s, addr, val);
>> +        }
>> +
>> +       /*  set device table address
>> +        *   This also suffers from inability to tell whether software
>> +        *   is done writing
>> +        */
>> +
>> +        if (offset || (size == 8)) {
>> +            iommu_handle_devtab_write(s);
>> +        }
>> +        break;
>> +
>> +    case IOMMU_MMIO_COMMAND_HEAD:
>> +        IOMMU_DPRINTF(MMIO, "MMIO_COMMAND_HEAD write addr 0x%"PRIx64
>> +                      ", size %d, val 0x%"PRIx64 ", offset 0x%"PRIx64,
>> +                      addr, size, val, offset);
>> +        if (size == 2) {
>> +            amd_iommu_writew(s, addr, val);
>> +        } else if (size == 4) {
>> +            amd_iommu_writel(s, addr, val);
>> +        } else if (size == 8) {
>> +            amd_iommu_writeq(s, addr, val);
>> +        }
>> +
>> +        iommu_handle_cmdhead_write(s);
>> +        break;
>> +
>> +    case IOMMU_MMIO_COMMAND_BASE:
>> +        IOMMU_DPRINTF(COMMAND, "MMIO_COMMAND_BASE write addr 0x%"PRIx64
>> +                      ", size %d, val 0x%"PRIx64 ", offset 0x%"PRIx64,
>> +                      addr, size, val, offset);
>> +        if (size == 2) {
>> +            amd_iommu_writew(s, addr, val);
>> +        } else if (size == 4) {
>> +            amd_iommu_writel(s, addr, val);
>> +        } else if (size == 8) {
>> +            amd_iommu_writeq(s, addr, val);
>> +        }
>> +
>> +        /* FIXME - make sure System Software has finished writing incase
>> +         * it writes in chucks less than 8 bytes in a robust way.As for
>> +         * now, this hacks works for the linux driver
>> +         */
>> +        if (offset || (size == 8)) {
>> +            iommu_handle_cmdbase_write(s);
>> +        }
>> +        break;
>> +
>> +    case IOMMU_MMIO_COMMAND_TAIL:
>> +        IOMMU_DPRINTF(COMMAND, "MMIO_COMMAND_TAIL write addr 0x%"PRIx64
>> +                      ", size %d, val 0x%"PRIx64 ", offset 0x%"PRIx64,
>> +                      addr, size, val, offset);
>> +        if (size == 2) {
>> +            amd_iommu_writew(s, addr, val);
>> +        } else if (size == 4) {
>> +            amd_iommu_writel(s, addr, val);
>> +        } else if (size == 8) {
>> +            amd_iommu_writeq(s, addr, val);
>> +        }
>> +        iommu_handle_cmdtail_write(s);
>> +        break;
>> +
>> +    case IOMMU_MMIO_EVENT_BASE:
>> +        IOMMU_DPRINTF(MMIO, "MMIO_EVENT_BASE write addr 0x%"PRIx64
>> +                      ", size %d, val 0x%"PRIx64 ", offset 0x%"PRIx64,
>> +                      addr, size, val, offset);
>> +        if (size == 2) {
>> +            amd_iommu_writew(s, addr, val);
>> +        } else if (size == 4) {
>> +            amd_iommu_writel(s, addr, val);
>> +        } else if (size == 8) {
>> +            amd_iommu_writeq(s, addr, val);
>> +        }
>> +        iommu_handle_evtbase_write(s);
>> +        break;
>> +
>> +    case IOMMU_MMIO_EVENT_HEAD:
>> +        IOMMU_DPRINTF(MMIO, "MMIO_EVENT_HEAD write addr 0x%"PRIx64
>> +                      ", size %d, val 0x%"PRIx64 ", offset 0x%"PRIx64,
>> +                      addr, size, val, offset);
>> +        if (size == 2) {
>> +            amd_iommu_writew(s, addr, val);
>> +        } else if (size == 4) {
>> +            amd_iommu_writel(s, addr, val);
>> +        } else if (size == 8) {
>> +            amd_iommu_writeq(s, addr, val);
>> +        }
>> +        iommu_handle_evthead_write(s);
>> +        break;
>> +
>> +    case IOMMU_MMIO_EVENT_TAIL:
>> +        IOMMU_DPRINTF(MMIO, "MMIO_EVENT_TAIL write addr 0x%"PRIx64
>> +                      ", size %d, val 0x%"PRIx64 ", offset 0x%"PRIx64,
>> +                      addr, size, val, offset);
>> +        if (size == 2) {
>> +            amd_iommu_writew(s, addr, val);
>> +        } else if (size == 4) {
>> +            amd_iommu_writel(s, addr, val);
>> +        } else if (size == 8) {
>> +            amd_iommu_writeq(s, addr, val);
>> +        }
>> +        iommu_handle_evttail_write(s);
>> +        break;
>> +
>> +    case IOMMU_MMIO_EXCL_LIMIT:
>> +        IOMMU_DPRINTF(MMIO, "MMIO_EXCL_LIMIT write addr 0x%"PRIx64
>> +                      ", size %d, val 0x%"PRIx64 ", offset 0x%"PRIx64,
>> +                      addr, size, val, offset);
>> +        if (size == 2) {
>> +            amd_iommu_writew(s, addr, val);
>> +        } else if (size == 4) {
>> +            amd_iommu_writel(s, addr, val);
>> +        } else if (size == 8) {
>> +            amd_iommu_writeq(s, addr, val);
>> +        }
>> +        iommu_handle_excllim_write(s);
>> +        break;
>> +
>> +        /* PPR log base - unused for now */
>> +    case IOMMU_MMIO_PPR_BASE:
>> +        IOMMU_DPRINTF(MMIO, "MMIO_PPR_BASE write addr 0x%"PRIx64
>> +                      ", size %d, val 0x%"PRIx64 ", offset 0x%"PRIx64,
>> +                      addr, size, val, offset);
>> +        if (size == 2) {
>> +            amd_iommu_writew(s, addr, val);
>> +        } else if (size == 4) {
>> +            amd_iommu_writel(s, addr, val);
>> +        } else if (size == 8) {
>> +            amd_iommu_writeq(s, addr, val);
>> +        }
>> +        iommu_handle_pprbase_write(s);
>> +        break;
>> +        /* PPR log head - also unused for now */
>> +    case IOMMU_MMIO_PPR_HEAD:
>> +        IOMMU_DPRINTF(MMIO, "MMIO_PPR_HEAD write addr 0x%"PRIx64
>> +                      ", size %d, val 0x%"PRIx64 ", offset 0x%"PRIx64,
>> +                       addr, size, val, offset);
>> +        if (size == 2) {
>> +            amd_iommu_writew(s, addr, val);
>> +        } else if (size == 4) {
>> +            amd_iommu_writel(s, addr, val);
>> +        } else if (size == 8) {
>> +            amd_iommu_writeq(s, addr, val);
>> +        }
>> +        iommu_handle_pprhead_write(s);
>> +        break;
>> +        /* PPR log tail - unused for now */
>> +    case IOMMU_MMIO_PPR_TAIL:
>> +        IOMMU_DPRINTF(MMIO, "MMIO_PPR_TAIL write addr 0x%"PRIx64
>> +                      ", size %d, val 0x%"PRIx64 ", offset 0x%"PRIx64,
>> +                      addr, size, val, offset);
>> +        if (size == 2) {
>> +            amd_iommu_writew(s, addr, val);
>> +        } else if (size == 4) {
>> +            amd_iommu_writel(s, addr, val);
>> +        } else if (size == 8) {
>> +            amd_iommu_writeq(s, addr, val);
>> +        }
>> +        iommu_handle_pprtail_write(s);
>> +        break;
>> +
>> +        /* ignore write to ext_features */
>> +    default:
>> +        IOMMU_DPRINTF(MMIO, "UNHANDLED MMIO write addr 0x%"PRIx64
>> +                      ", size %d, val 0x%"PRIx64 ", offset 0x%"PRIx64,
>> +                      addr, size, val, offset);
>> +    }
>> +
>> +}
>> +
>> +static inline uint64_t amd_iommu_get_perms(uint64_t entry)
>> +{
>> +    return (entry & (IOMMU_DEV_PERM_READ | IOMMU_DEV_PERM_WRITE)) >>
>> +           IOMMU_DEV_PERM_SHIFT;
>> +}
>> +
>> +AddressSpace *bridge_host_amd_iommu(PCIBus *bus, void *opaque, int devfn)
>> +{
>> +    AMDIOMMUState *s = opaque;
>> +    AMDIOMMUAddressSpace **iommu_as;
>> +    int bus_num = pci_bus_num(bus);
>> +
>> +    /* just in case */
>
>
> This comment troubles me, do we need the assert?
>
>> +    assert(0 <= bus_num && bus_num <= PCI_BUS_MAX);
>
>
> bus_num < PCI_BUS_MAX, right ?
>
>> +    assert(0 <= devfn && devfn <= PCI_DEVFN_MAX);
>
>
> same with devfn I suppose.
>
>> +
>> +    iommu_as = s->address_spaces[bus_num];
>> +
>> +    /* allocate memory during the first run */
>> +    if (!iommu_as) {
>
>
> Why lazy init? We can do that at AMDIOMMUState init, right?
>
>> +        iommu_as = g_malloc0(sizeof(AMDIOMMUAddressSpace *) *
>> PCI_DEVFN_MAX);
>> +        s->address_spaces[bus_num] = iommu_as;
>> +    }
>> +
>> +    /* set up IOMMU region */
>> +    if (!iommu_as[devfn]) {
>> +        iommu_as[devfn] = g_malloc0(sizeof(AMDIOMMUAddressSpace));
>
>
> same here
>
>> +        iommu_as[devfn]->bus_num = (uint8_t)bus_num;
>> +        iommu_as[devfn]->devfn = (uint8_t)devfn;
>> +        iommu_as[devfn]->iommu_state = s;
>> +
>> +        memory_region_init_iommu(&iommu_as[devfn]->iommu, OBJECT(s),
>> +                                 &s->iommu_ops, "amd-iommu", UINT64_MAX);
>> +        address_space_init(&iommu_as[devfn]->as, &iommu_as[devfn]->iommu,
>> +                           "amd-iommu");
>> +    }
>> +    return &iommu_as[devfn]->as;
>> +}
>> +
>> +/* validate a page table entry */
>> +static bool amd_iommu_validate_dte(AMDIOMMUState *s, uint16_t devid,
>> +                                   uint64_t *dte)
>> +{
>> +    if ((dte[0] & IOMMU_DTE_LOWER_QUAD_RESERVED)
>> +        || (dte[1] & IOMMU_DTE_MIDDLE_QUAD_RESERVED)
>> +        || (dte[2] & IOMMU_DTE_UPPER_QUAD_RESERVED) || dte[3]) {
>> +        amd_iommu_log_illegaldevtab_error(s, devid,
>> +                                s->devtab + devid *
>> IOMMU_DEVTAB_ENTRY_SIZE, 0);
>> +        return false;
>> +    }
>> +
>> +    return dte[0] & IOMMU_DEV_VALID && (dte[0] &
>> IOMMU_DEV_TRANSLATION_VALID)
>> +           && (dte[0] & IOMMU_DEV_PT_ROOT_MASK);
>> +}
>> +
>> +/* get a device table entry given the devid */
>> +static bool amd_iommu_get_dte(AMDIOMMUState *s, int devid, uint64_t
>> *entry)
>> +{
>> +    uint32_t offset = devid * IOMMU_DEVTAB_ENTRY_SIZE;
>> +
>> +    IOMMU_DPRINTF(MMU, "Device Table at 0x%"PRIx64, s->devtab);
>> +
>> +    if (dma_memory_read(&address_space_memory, s->devtab + offset, entry,
>> +                        IOMMU_DEVTAB_ENTRY_SIZE)) {
>> +        IOMMU_DPRINTF(MMU, "error: fail to access Device Entry devtab
>> 0x%"PRIx64
>> +                      "offset 0x%"PRIx32, s->devtab, offset);
>> +        /* log ever accessing dte */
>> +        amd_iommu_log_devtab_error(s, devid, s->devtab + offset, 0);
>> +        return false;
>> +    }
>> +
>> +    if (!amd_iommu_validate_dte(s, devid, entry)) {
>> +        IOMMU_DPRINTF(MMU,
>> +                      "Pte entry at 0x%"PRIx64" is invalid", entry[0]);
>> +        return false;
>> +    }
>> +
>> +    return true;
>> +}
>> +
>> +/* get pte translation mode */
>> +static inline uint8_t get_pte_translation_mode(uint64_t pte)
>> +{
>> +    return (pte >> IOMMU_DEV_MODE_RSHIFT) & IOMMU_DEV_MODE_MASK;
>> +}
>> +
>> +static int amd_iommu_page_walk(AMDIOMMUAddressSpace *as, uint64_t *dte,
>> +                               IOMMUTLBEntry *ret, unsigned perms,
>> +                               hwaddr addr)
>> +{
>> +    uint8_t level, oldlevel;
>> +    unsigned present;
>> +    uint64_t pte, pte_addr;
>> +    uint64_t pte_perms;
>> +    pte = dte[0];
>> +
>> +    level = get_pte_translation_mode(pte);
>> +
>> +    if (level >= 7 || level == 0) {
>> +        IOMMU_DPRINTF(MMU, "error: translation level 0x%"PRIu8 "
>> detected"
>> +                      "while translating 0x%"PRIx64, level, addr);
>> +        return -1;
>> +    }
>> +
>> +    while (level > 0) {
>> +        pte_perms = amd_iommu_get_perms(pte);
>> +        present = pte & 1;
>> +        if (!present || perms != (perms & pte_perms)) {
>> +            amd_iommu_page_fault(as->iommu_state, as->devfn, addr,
>> perms);
>> +            IOMMU_DPRINTF(MMU, "error: page fault accessing virtual addr
>> 0x%"
>> +                          PRIx64, addr);
>> +            return -1;
>> +        }
>> +
>> +        /* go to the next lower level */
>> +        pte_addr = pte & IOMMU_DEV_PT_ROOT_MASK;
>> +        /* add offset and load pte */
>> +        pte_addr += ((addr >> (3 + 9 * level)) & 0x1FF) << 3;
>> +        pte = ldq_phys(&address_space_memory, pte_addr);
>> +        oldlevel = level;
>> +        level = get_pte_translation_mode(pte);
>> +
>> +        /* PT is corrupted or not there */
>> +        if (level != oldlevel - 1) {
>> +            return -1;
>> +        }
>> +    }
>> +
>> +    ret->iova = addr & IOMMU_PAGE_MASK_4K;
>> +    ret->translated_addr = (pte & IOMMU_DEV_PT_ROOT_MASK) &
>> IOMMU_PAGE_MASK_4K;
>> +    ret->addr_mask = ~IOMMU_PAGE_MASK_4K;
>> +    ret->perm = IOMMU_RW;
>> +    return 0;
>> +}
>> +
>> +/* TODO : Mark addresses as Accessed and Dirty */
>
>
> If you don't mark addresses as dirty, can't this cause the sporadic errors
> of arbitrary programs Jan talked about?

I don't think this the issue, am seem to be receiving wrong 'host
physical addresses' in the last few kernel version. This issue is not
there in older kernels.

>
>> +static void amd_iommu_do_translate(AMDIOMMUAddressSpace *as, hwaddr addr,
>> +                                   bool is_write, IOMMUTLBEntry *ret)
>> +{
>> +    AMDIOMMUState *s = as->iommu_state;
>> +    uint16_t devid = PCI_DEVID(as->bus_num, as->devfn);
>> +    IOMMUIOTLBEntry *iotlb_entry;
>> +    uint8_t err;
>> +    uint64_t entry[4];
>> +
>> +    /* try getting a cache entry first */
>> +    iotlb_entry = amd_iommu_iotlb_lookup(s, addr, as->devfn);
>> +
>> +    if (iotlb_entry) {
>> +        IOMMU_DPRINTF(CACHE, "hit  iotlb devid: %02x:%02x.%x gpa
>> 0x%"PRIx64
>> +                      " hpa 0x%"PRIx64, PCI_BUS_NUM(devid),
>> PCI_SLOT(devid),
>> +                      PCI_FUNC(devid), addr,
>> iotlb_entry->translated_addr);
>> +        ret->iova = addr & IOMMU_PAGE_MASK_4K;
>> +        ret->translated_addr = iotlb_entry->translated_addr;
>> +        ret->addr_mask = ~IOMMU_PAGE_MASK_4K;
>> +        ret->perm = iotlb_entry->perms;
>> +        return;
>> +    } else {
>
>
> you return from the if clause so you don't need the else
>
>> +        if (!amd_iommu_get_dte(s, devid, entry)) {
>
>
> is not an error if you did not find the device id?
>
>> +            goto out;
>> +        }
>> +
>> +        err = amd_iommu_page_walk(as, entry, ret,
>> +                                  is_write ? IOMMU_PERM_WRITE :
>> IOMMU_PERM_READ,
>> +                                  addr);
>> +        if (err) {
>> +            IOMMU_DPRINTF(MMU, "error: hardware error accessing page
>> tables"
>> +                          " while translating addr 0x%"PRIx64, addr);
>> +            amd_iommu_log_pagetab_error(s, as->devfn, addr, 0);
>> +            goto out;
>> +        }
>> +
>> +        amd_iommu_update_iotlb(s, as->devfn, addr, ret->translated_addr,
>> +                               ret->perm, entry[1] &
>> IOMMU_DEV_DOMID_ID_MASK);
>> +        return;
>> +    }
>> +
>> +out:
>> +    ret->iova = addr;
>> +    ret->translated_addr = addr & IOMMU_PAGE_MASK_4K;
>> +    ret->addr_mask = ~IOMMU_PAGE_MASK_4K;
>> +    ret->perm = IOMMU_RW;
>> +    return;
>
>
> you don't need the above return
>
>> +}
>> +
>> +static IOMMUTLBEntry amd_iommu_translate(MemoryRegion *iommu, hwaddr
>> addr,
>> +                                         bool is_write)
>> +{
>> +    IOMMU_DPRINTF(GENERAL, "");
>> +
>> +    AMDIOMMUAddressSpace *as = container_of(iommu, AMDIOMMUAddressSpace,
>> iommu);
>> +    AMDIOMMUState *s = as->iommu_state;
>> +
>> +    IOMMUTLBEntry ret = {
>> +        .target_as = &address_space_memory,
>> +        .iova = addr,
>> +        .translated_addr = 0,
>> +        .addr_mask = ~(hwaddr)0,
>> +        .perm = IOMMU_NONE,
>> +    };
>> +
>> +    if (!s->enabled) {
>> +        /* IOMMU disabled - corresponds to iommu=off not
>> +         * failure to provide any parameter
>> +         */
>> +        ret.iova = addr & IOMMU_PAGE_MASK_4K;
>> +        ret.translated_addr = addr & IOMMU_PAGE_MASK_4K;
>> +        ret.addr_mask = ~IOMMU_PAGE_MASK_4K;
>> +        ret.perm = IOMMU_RW;
>> +        return ret;
>> +    }
>> +
>> +    amd_iommu_do_translate(as, addr, is_write, &ret);
>> +    IOMMU_DPRINTF(MMU, "devid: %02x:%02x.%x gpa 0x%"PRIx64 " hpa
>> 0x%"PRIx64,
>> +                  as->bus_num, PCI_SLOT(as->devfn), PCI_FUNC(as->devfn),
>> addr,
>> +                  ret.translated_addr);
>> +
>> +    return ret;
>> +}
>> +
>> +static const MemoryRegionOps mmio_mem_ops = {
>> +    .read = amd_iommu_mmio_read,
>> +    .write = amd_iommu_mmio_write,
>> +    .endianness = DEVICE_LITTLE_ENDIAN,
>> +    .impl = {
>> +        .min_access_size = 1,
>> +        .max_access_size = 8,
>> +        .unaligned = false,
>> +    },
>> +    .valid = {
>> +        .min_access_size = 1,
>> +        .max_access_size = 8,
>> +    }
>> +};
>> +
>> +static void amd_iommu_init(AMDIOMMUState *s)
>> +{
>> +    printf("amd_iommu_init");
>
>
> you should use the debug macro here
>
>> +
>> +    amd_iommu_iotlb_reset(s);
>> +
>> +    s->iommu_ops.translate = amd_iommu_translate;
>> +
>> +    s->devtab_len = 0;
>> +    s->cmdbuf_len = 0;
>> +    s->cmdbuf_head = 0;
>> +    s->cmdbuf_tail = 0;
>> +    s->evtlog_head = 0;
>> +    s->evtlog_tail = 0;
>> +    s->excl_enabled = false;
>> +    s->excl_allow = false;
>> +    s->mmio_enabled = false;
>> +    s->enabled = false;
>> +    s->ats_enabled = false;
>> +    s->cmdbuf_enabled = false;
>> +
>> +    /* reset MMIO */
>> +    memset(s->mmior, 0, IOMMU_MMIO_SIZE);
>> +    amd_iommu_set_quad(s, IOMMU_MMIO_EXT_FEATURES, IOMMU_EXT_FEATURES,
>> +            0xffffffffffffffef, 0);
>> +    amd_iommu_set_quad(s, IOMMU_MMIO_STATUS, 0, 0x98, 0x67);
>> +    /* reset device ident */
>> +    pci_config_set_vendor_id(s->dev.config, PCI_VENDOR_ID_AMD);
>> +    pci_config_set_device_id(s->dev.config, PCI_DEVICE_ID_RD890_IOMMU);
>> +    pci_config_set_prog_interface(s->dev.config, 00);
>> +    pci_config_set_class(s->dev.config, 0x0806);
>> +
>> +    /* reset IOMMU specific capabilities  */
>> +    pci_set_long(s->dev.config + s->capab_offset, IOMMU_CAPAB_FEATURES);
>> +    pci_set_long(s->dev.config + s->capab_offset + IOMMU_CAPAB_BAR_LOW,
>> +                 s->mmio.addr & ~(0xffff0000));
>> +    pci_set_long(s->dev.config + s->capab_offset + IOMMU_CAPAB_BAR_HIGH,
>> +                (s->mmio.addr & ~(0xffff)) >> 16);
>> +    pci_set_long(s->dev.config + s->capab_offset + IOMMU_CAPAB_RANGE,
>> +                 0xff000000);
>> +    pci_set_long(s->dev.config + s->capab_offset + IOMMU_CAPAB_MISC, 0);
>> +    pci_set_long(s->dev.config + s->capab_offset + IOMMU_CAPAB_MISC,
>> +            IOMMU_MAX_PH_ADDR | IOMMU_MAX_GVA_ADDR | IOMMU_MAX_VA_ADDR);
>
>
> All the capabilities are read-write? Otherwise you need to set the wmask
> to indicate what fields are writable.
>
>> +}
>> +
>> +static void amd_iommu_reset(DeviceState *dev)
>> +{
>> +    AMDIOMMUState *s = AMD_IOMMU_DEVICE(dev);
>> +
>> +    amd_iommu_init(s);
>> +}
>> +
>> +static void amd_iommu_realize(PCIDevice *dev, Error **error)
>> +{
>> +    AMDIOMMUState *s = container_of(dev, AMDIOMMUState, dev);
>> +
>> +    s->iotlb = g_hash_table_new_full(amd_iommu_uint64_hash,
>> +                                     amd_iommu_uint64_equal, g_free,
>> g_free);
>> +
>> +    s->capab_offset = pci_add_capability(dev, IOMMU_CAPAB_ID_SEC, 0,
>> +                                         IOMMU_CAPAB_SIZE);
>> +
>> +    /* add msi and hypertransport capabilities */
>> +    pci_add_capability(&s->dev, PCI_CAP_ID_MSI, 0, IOMMU_CAPAB_REG_SIZE);
>> +    pci_add_capability(&s->dev, PCI_CAP_ID_HT, 0, IOMMU_CAPAB_REG_SIZE);
>> +
>> +    amd_iommu_init(s);
>> +
>> +    /* set up MMIO */
>> +    memory_region_init_io(&s->mmio, OBJECT(s), &mmio_mem_ops, s, "mmio",
>> +                          IOMMU_MMIO_SIZE);
>> +
>> +    if (s->mmio.addr == IOMMU_BASE_ADDR) {
>
>
> I don't understand why is need here. realize is called only once in the init
> process
> and you set it a few lines below.
>
>> +        return;
>> +    }
>> +
>> +    s->mmio.addr = IOMMU_BASE_ADDR;
>> +    memory_region_add_subregion(get_system_memory(), IOMMU_BASE_ADDR,
>> &s->mmio);
>> +}
>> +
>> +static const VMStateDescription vmstate_amd_iommu = {
>> +    .name = "amd-iommu",
>> +    .fields  = (VMStateField[]) {
>> +        VMSTATE_PCI_DEVICE(dev, AMDIOMMUState),
>> +        VMSTATE_END_OF_LIST()
>> +    }
>> +};
>> +
>> +static Property amd_iommu_properties[] = {
>> +    DEFINE_PROP_UINT32("version", AMDIOMMUState, version, 2),
>> +    DEFINE_PROP_END_OF_LIST(),
>> +};
>> +
>> +static void amd_iommu_uninit(PCIDevice *dev)
>> +{
>> +    AMDIOMMUState *s = container_of(dev, AMDIOMMUState, dev);
>> +    amd_iommu_iotlb_reset(s);
>
>
> at this point you also need to clean also the memory regions you use.
>
>> +}
>> +
>> +static void amd_iommu_class_init(ObjectClass *klass, void* data)
>> +{
>> +    DeviceClass *dc = DEVICE_CLASS(klass);
>> +    PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
>> +
>> +    k->realize = amd_iommu_realize;
>> +    k->exit = amd_iommu_uninit;
>> +
>> +    dc->reset = amd_iommu_reset;
>> +    dc->vmsd = &vmstate_amd_iommu;
>> +    dc->props = amd_iommu_properties;
>> +}
>> +
>> +static const TypeInfo amd_iommu = {
>> +    .name = TYPE_AMD_IOMMU_DEVICE,
>> +    .parent = TYPE_PCI_DEVICE,
>> +    .instance_size = sizeof(AMDIOMMUState),
>> +    .class_init = amd_iommu_class_init
>> +};
>> +
>> +static void amd_iommu_register_types(void)
>> +{
>> +    type_register_static(&amd_iommu);
>> +}
>> +
>> +type_init(amd_iommu_register_types);
>> diff --git a/hw/i386/amd_iommu.h b/hw/i386/amd_iommu.h
>> new file mode 100644
>> index 0000000..7d317e1
>> --- /dev/null
>> +++ b/hw/i386/amd_iommu.h
>> @@ -0,0 +1,395 @@
>> +/*
>> + * QEMU emulation of an AMD IOMMU (AMD-Vi)
>> + *
>> + * Copyright (C) 2011 Eduard - Gabriel Munteanu
>> + * Copyright (C) 2015 David Kiarie, <davidkiarie4@gmail.com>
>> + *
>> + * This program is free software; you can redistribute it and/or modify
>> + * it under the terms of the GNU General Public License as published by
>> + * the Free Software Foundation; either version 2 of the License, or
>> + * (at your option) any later version.
>> +
>> + * This program is distributed in the hope that it will be useful,
>> + * but WITHOUT ANY WARRANTY; without even the implied warranty of
>> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
>> + * GNU General Public License for more details.
>> +
>> + * You should have received a copy of the GNU General Public License
>> along
>> + * with this program; if not, see <http://www.gnu.org/licenses/>.
>> + */
>> +
>> +#ifndef AMD_IOMMU_H_
>> +#define AMD_IOMMU_H_
>> +
>> +#include "hw/hw.h"
>> +#include "hw/pci/pci.h"
>> +#include "hw/pci/msi.h"
>> +#include "hw/sysbus.h"
>> +#include "sysemu/dma.h"
>> +
>> +/* Capability registers */
>> +#define IOMMU_CAPAB_HEADER            0x00
>> +#define   IOMMU_CAPAB_REV_TYPE        0x02
>> +#define   IOMMU_CAPAB_FLAGS           0x03
>> +#define IOMMU_CAPAB_BAR_LOW           0x04
>> +#define IOMMU_CAPAB_BAR_HIGH          0x08
>> +#define IOMMU_CAPAB_RANGE             0x0C
>> +#define IOMMU_CAPAB_MISC              0x10
>> +#define IOMMU_CAPAB_MISC1             0x14
>> +
>> +#define IOMMU_CAPAB_SIZE              0x18
>> +#define IOMMU_CAPAB_REG_SIZE          0x04
>> +
>> +/* Capability header data */
>> +#define IOMMU_CAPAB_ID_SEC            0xf
>> +#define IOMMU_CAPAB_FLAT_EXT          (1 << 28)
>> +#define IOMMU_CAPAB_EFR_SUP           (1 << 27)
>> +#define IOMMU_CAPAB_FLAG_NPCACHE      (1 << 26)
>> +#define IOMMU_CAPAB_FLAG_HTTUNNEL     (1 << 25)
>> +#define IOMMU_CAPAB_FLAG_IOTLBSUP     (1 << 24)
>> +#define IOMMU_CAPAB_INIT_REV          (1 << 19)
>> +#define IOMMU_CAPAB_INIT_TYPE         (3 << 16)
>> +#define IOMMU_CAPAB_INIT_REV_TYPE     (IOMMU_CAPAB_REV |
>> IOMMU_CAPAB_TYPE)
>> +#define IOMMU_CAPAB_INIT_FLAGS        (IOMMU_CAPAB_FLAG_NPCACHE | \
>> +                                       IOMMU_CAPAB_FLAG_HTTUNNEL)
>> +#define IOMMU_CAPAB_INIT_MISC         ((64 << 15) | (48 << 8))
>> +#define IOMMU_CAPAB_BAR_MASK          (~((1UL << 14) - 1))
>> +
>> +/* MMIO registers */
>> +#define IOMMU_MMIO_DEVICE_TABLE       0x0000
>> +#define IOMMU_MMIO_COMMAND_BASE       0x0008
>> +#define IOMMU_MMIO_EVENT_BASE         0x0010
>> +#define IOMMU_MMIO_CONTROL            0x0018
>> +#define IOMMU_MMIO_EXCL_BASE          0x0020
>> +#define IOMMU_MMIO_EXCL_LIMIT         0x0028
>> +#define IOMMU_MMIO_EXT_FEATURES       0x0030
>> +#define IOMMU_MMIO_COMMAND_HEAD       0x2000
>> +#define IOMMU_MMIO_COMMAND_TAIL       0x2008
>> +#define IOMMU_MMIO_EVENT_HEAD         0x2010
>> +#define IOMMU_MMIO_EVENT_TAIL         0x2018
>> +#define IOMMU_MMIO_STATUS             0x2020
>> +#define IOMMU_MMIO_PPR_BASE           0x0038
>> +#define IOMMU_MMIO_PPR_HEAD           0x2030
>> +#define IOMMU_MMIO_PPR_TAIL           0x2038
>> +
>> +#define IOMMU_MMIO_SIZE               0x4000
>> +
>> +#define IOMMU_MMIO_DEVTAB_SIZE_MASK   ((1ULL << 12) - 1)
>> +#define IOMMU_MMIO_DEVTAB_BASE_MASK   (((1ULL << 52) - 1) & ~ \
>> +                                       IOMMU_MMIO_DEVTAB_SIZE_MASK)
>> +#define IOMMU_MMIO_DEVTAB_ENTRY_SIZE  32
>> +#define IOMMU_MMIO_DEVTAB_SIZE_UNIT   4096
>> +
>> +/* some of this are similar but just for readability */
>> +#define IOMMU_MMIO_CMDBUF_SIZE_BYTE       (IOMMU_MMIO_COMMAND_BASE + 7)
>> +#define IOMMU_MMIO_CMDBUF_SIZE_MASK       0x0F
>> +#define IOMMU_MMIO_CMDBUF_BASE_MASK       IOMMU_MMIO_DEVTAB_BASE_MASK
>> +#define IOMMU_MMIO_CMDBUF_DEFAULT_SIZE    8
>> +#define IOMMU_MMIO_CMDBUF_HEAD_MASK       (((1ULL << 19) - 1) & ~0x0F)
>> +#define IOMMU_MMIO_CMDBUF_TAIL_MASK       IOMMU_MMIO_EVTLOG_HEAD_MASK
>> +
>> +#define IOMMU_MMIO_EVTLOG_SIZE_BYTE       (IOMMU_MMIO_EVENT_BASE + 7)
>> +#define IOMMU_MMIO_EVTLOG_SIZE_MASK       IOMMU_MMIO_CMDBUF_SIZE_MASK
>> +#define IOMMU_MMIO_EVTLOG_BASE_MASK       IOMMU_MMIO_CMDBUF_BASE_MASK
>> +#define IOMMU_MMIO_EVTLOG_DEFAULT_SIZE    IOMMU_MMIO_CMDBUF_DEFAULT_SIZE
>> +#define IOMMU_MMIO_EVTLOG_HEAD_MASK       (((1ULL << 19) - 1) & ~0x0F)
>> +#define IOMMU_MMIO_EVTLOG_TAIL_MASK       IOMMU_MMIO_EVTLOG_HEAD_MASK
>> +
>> +#define IOMMU_MMIO_PPRLOG_SIZE_BYTE       (IOMMU_MMIO_EVENT_BASE + 7)
>> +#define IOMMU_MMIO_PPRLOG_HEAD_MASK       IOMMU_MMIO_EVTLOG_HEAD_MASK
>> +#define IOMMU_MMIO_PPRLOG_TAIL_MASK       IOMMU_MMIO_EVTLOG_HEAD_MASK
>> +#define IOMMU_MMIO_PPRLOG_BASE_MASK       IOMMU_MMIO_EVTLOG_BASE_MASK
>> +#define IOMMU_MMIO_PPRLOG_SIZE_MASK       IOMMU_MMIO_EVTLOG_SIZE_MASK
>> +
>> +#define IOMMU_MMIO_EXCL_BASE_MASK         IOMMU_MMIO_DEVTAB_BASE_MASK
>> +#define IOMMU_MMIO_EXCL_ENABLED_MASK      (1ULL << 0)
>> +#define IOMMU_MMIO_EXCL_ALLOW_MASK        (1ULL << 1)
>> +#define IOMMU_MMIO_EXCL_LIMIT_MASK        IOMMU_MMIO_DEVTAB_BASE_MASK
>> +#define IOMMU_MMIO_EXCL_LIMIT_LOW         0xFFF
>> +
>> +/* mmio control register flags */
>> +#define IOMMU_MMIO_CONTROL_IOMMUEN        (1ULL << 0)
>> +#define IOMMU_MMIO_CONTROL_HTTUNEN        (1ULL << 1)
>> +#define IOMMU_MMIO_CONTROL_EVENTLOGEN     (1ULL << 2)
>> +#define IOMMU_MMIO_CONTROL_EVENTINTEN     (1ULL << 3)
>> +#define IOMMU_MMIO_CONTROL_COMWAITINTEN   (1ULL << 4)
>> +#define IOMMU_MMIO_CONTROL_PASSPW         (1ULL << 7)
>> +#define IOMMU_MMIO_CONTROL_REPASSPW       (1ULL << 9)
>> +#define IOMMU_MMIO_CONTROL_COHERENT       (1ULL << 10)
>> +#define IOMMU_MMIO_CONTROL_ISOC           (1ULL << 11)
>> +#define IOMMU_MMIO_CONTROL_CMDBUFLEN      (1ULL << 12)
>> +#define IOMMU_MMIO_CONTROL_PPRLOGEN       (1ULL << 13)
>> +#define IOMMU_MMIO_CONTROL_PPRINTEN       (1ULL << 14)
>> +#define IOMMU_MMIO_CONTROL_PPREN          (1ULL << 15)
>> +#define IOMMU_MMIO_CONTROL_GAEN           (1ULL << 16)
>> +#define IOMMU_MMIO_CONTROL_GTEN           (1ULL << 17)
>> +
>> +/* MMIO status register bits */
>> +#define IOMMU_MMIO_STATUS_PPR_OVFE    (1 << 18)
>> +#define IOMMU_MMIO_STATUS_PPR_OVFEB   (1 << 17)
>> +#define IOMMU_MMIO_STATUS_EVT_ACTIVE  (1 << 16)
>> +#define IOMMU_MMIO_STATUS_EVT_OVFB    (1 << 15)
>> +#define IOMMU_MMIO_STATUS_PPR_ACTIVE  (1 << 12)
>> +#define IOMMU_MMIO_STATUS_PPR_OVFB    (1 << 11)
>> +#define IOMMU_MMIO_STATUS_GA_INT      (1 << 10)
>> +#define IOMMU_MMIO_STATUS_GA_RUN      (1 << 9)
>> +#define IOMMU_MMIO_STATUS_GA_OVF      (1 << 8)
>> +#define IOMMU_MMIO_STATUS_PPR_RUN     (1 << 7)
>> +#define IOMMU_MMIO_STATUS_PPR_INT     (1 << 6)
>> +#define IOMMU_MMIO_STATUS_PPR_OVF     (1 << 5)
>> +#define IOMMU_MMIO_STATUS_CMDBUF_RUN  (1 << 4)
>> +#define IOMMU_MMIO_STATUS_EVT_RUN     (1 << 3)
>> +#define IOMMU_MMIO_STATUS_COMP_INT    (1 << 2)
>> +#define IOMMU_MMIO_STATUS_EVT_INT     (1 << 1)
>> +#define IOMMU_MMIO_STATUS_EVT_OVF     (1 << 0)
>> +
>> +#define IOMMU_CMDBUF_ID_BYTE              0x07
>> +#define IOMMU_CMDBUF_ID_RSHIFT            4
>> +
>> +#define IOMMU_CMD_COMPLETION_WAIT         0x01
>> +#define IOMMU_CMD_INVAL_DEVTAB_ENTRY      0x02
>> +#define IOMMU_CMD_INVAL_IOMMU_PAGES       0x03
>> +#define IOMMU_CMD_INVAL_IOTLB_PAGES       0x04
>> +#define IOMMU_CMD_INVAL_INTR_TABLE        0x05
>> +#define IOMMU_CMD_PREFETCH_IOMMU_PAGES    0x06
>> +#define IOMMU_CMD_COMPLETE_PPR_REQUEST    0x07
>> +#define IOMMU_CMD_INVAL_IOMMU_ALL         0x08
>> +
>> +#define IOMMU_DEVTAB_ENTRY_SIZE           32
>> +
>> +/* Device table entry bits 0:63 */
>> +#define IOMMU_DEV_VALID                   (1ULL << 0)
>> +#define IOMMU_DEV_TRANSLATION_VALID       (1ULL << 1)
>> +#define IOMMU_DEV_MODE_MASK               0x7
>> +#define IOMMU_DEV_MODE_RSHIFT             9
>> +#define IOMMU_DEV_PT_ROOT_MASK            0xFFFFFFFFFF000
>> +#define IOMMU_DEV_PT_ROOT_RSHIFT          12
>> +#define IOMMU_DEV_PERM_SHIFT              61
>> +#define IOMMU_DEV_PERM_READ               (1ULL << 61)
>> +#define IOMMU_DEV_PERM_WRITE              (1ULL << 62)
>> +
>> +/* Device table entry bits 64:127 */
>> +#define IOMMU_DEV_DOMID_ID_MASK          ((1ULL << 16) - 1)
>> +#define IOMMU_DEV_IOTLB_SUPPORT           (1ULL << 17)
>> +#define IOMMU_DEV_SUPPRESS_PF             (1ULL << 18)
>> +#define IOMMU_DEV_SUPPRESS_ALL_PF         (1ULL << 19)
>> +#define IOMMU_DEV_IOCTL_MASK              (~3)
>> +#define IOMMU_DEV_IOCTL_RSHIFT            20
>> +#define   IOMMU_DEV_IOCTL_DENY            0
>> +#define   IOMMU_DEV_IOCTL_PASSTHROUGH     1
>> +#define   IOMMU_DEV_IOCTL_TRANSLATE       2
>> +#define IOMMU_DEV_CACHE                   (1ULL << 37)
>> +#define IOMMU_DEV_SNOOP_DISABLE           (1ULL << 38)
>> +#define IOMMU_DEV_EXCL                    (1ULL << 39)
>> +
>> +/* Event codes and flags, as stored in the info field */
>> +#define IOMMU_EVENT_ILLEGAL_DEVTAB_ENTRY  (0x1U << 12)
>> +#define IOMMU_EVENT_IOPF                  (0x2U << 12)
>> +#define   IOMMU_EVENT_IOPF_I              (1U << 3)
>> +#define   IOMMU_EVENT_IOPF_PR             (1U << 4)
>> +#define   IOMMU_EVENT_IOPF_RW             (1U << 5)
>> +#define   IOMMU_EVENT_IOPF_PE             (1U << 6)
>> +#define   IOMMU_EVENT_IOPF_RZ             (1U << 7)
>> +#define   IOMMU_EVENT_IOPF_TR             (1U << 8)
>> +#define IOMMU_EVENT_DEV_TAB_HW_ERROR      (0x3U << 12)
>> +#define IOMMU_EVENT_PAGE_TAB_HW_ERROR     (0x4U << 12)
>> +#define IOMMU_EVENT_ILLEGAL_COMMAND_ERROR (0x5U << 12)
>> +#define IOMMU_EVENT_COMMAND_HW_ERROR      (0x6U << 12)
>> +#define IOMMU_EVENT_IOTLB_INV_TIMEOUT     (0x7U << 12)
>> +#define IOMMU_EVENT_INVALID_DEV_REQUEST   (0x8U << 12)
>> +
>> +#define IOMMU_EVENT_LEN                   16
>> +#define IOMMU_PERM_READ             (1 << 0)
>> +#define IOMMU_PERM_WRITE            (1 << 1)
>> +#define IOMMU_PERM_RW               (IOMMU_PERM_READ | IOMMU_PERM_WRITE)
>> +
>> +/* AMD RD890 Chipset */
>> +#define PCI_DEVICE_ID_RD890_IOMMU   0x20
>
>
> We keep the pci ids in include/hw/pci/pci_ids.h
>
>> +
>> +#define IOMMU_FEATURE_PREFETCH            (1ULL << 0)
>> +#define IOMMU_FEATURE_PPR                 (1ULL << 1)
>> +#define IOMMU_FEATURE_NX                  (1ULL << 3)
>> +#define IOMMU_FEATURE_GT                  (1ULL << 4)
>> +#define IOMMU_FEATURE_IA                  (1ULL << 6)
>> +#define IOMMU_FEATURE_GA                  (1ULL << 7)
>> +#define IOMMU_FEATURE_HE                  (1ULL << 8)
>> +#define IOMMU_FEATURE_PC                  (1ULL << 9)
>> +
>> +/* reserved DTE bits */
>> +#define IOMMU_DTE_LOWER_QUAD_RESERVED  0x80300000000000fc
>> +#define IOMMU_DTE_MIDDLE_QUAD_RESERVED 0x0000000000000100
>> +#define IOMMU_DTE_UPPER_QUAD_RESERVED  0x08f0000000000000
>> +
>> +/* IOMMU paging mode */
>> +#define IOMMU_GATS_MODE                 (6ULL <<  12)
>> +#define IOMMU_HATS_MODE                 (6ULL <<  10)
>> +
>> +/* PCI SIG constants */
>> +#define PCI_BUS_MAX 256
>> +#define PCI_SLOT_MAX 32
>> +#define PCI_FUNC_MAX 8
>> +#define PCI_DEVFN_MAX 256
>
>
> Maybe we can move the PCI macros to include/hw/pci/pci.h, those are not
> IOMMU specific.
>
>> +
>> +/* IOTLB */
>> +#define IOMMU_IOTLB_MAX_SIZE 1024
>> +#define IOMMU_DEVID_SHIFT    36
>> +
>> +/* extended feature support */
>> +#define IOMMU_EXT_FEATURES (IOMMU_FEATURE_PREFETCH | IOMMU_FEATURE_PPR |
>> \
>> +        IOMMU_FEATURE_NX | IOMMU_FEATURE_IA | IOMMU_FEATURE_GT | \
>> +        IOMMU_FEATURE_GA | IOMMU_FEATURE_HE | IOMMU_GATS_MODE | \
>> +        IOMMU_HATS_MODE)
>> +
>> +/* capabilities header */
>> +#define IOMMU_CAPAB_FEATURES (IOMMU_CAPAB_FLAT_EXT | \
>> +        IOMMU_CAPAB_FLAG_NPCACHE | IOMMU_CAPAB_FLAG_IOTLBSUP \
>> +        | IOMMU_CAPAB_ID_SEC | IOMMU_CAPAB_INIT_TYPE | \
>> +        IOMMU_CAPAB_FLAG_HTTUNNEL |  IOMMU_CAPAB_EFR_SUP)
>> +
>> +/* command constants */
>> +#define IOMMU_COM_STORE_ADDRESS_MASK 0xffffffffffff8
>> +#define IOMMU_COM_COMPLETION_STORE_MASK 0x1
>> +#define IOMMU_COM_COMPLETION_INTR 0x2
>> +#define IOMMU_COM_COMPLETION_DATA_OFF 0x8
>> +#define IOMMU_COMMAND_SIZE 0x10
>> +
>> +/* IOMMU default address */
>> +#define IOMMU_BASE_ADDR 0xfed80000
>> +
>> +/* page management constants */
>> +#define IOMMU_PAGE_SHIFT 12
>> +#define IOMMU_PAGE_SIZE  (1ULL << IOMMU_PAGE_SHIFT)
>> +
>> +#define IOMMU_PAGE_SHIFT_4K 12
>> +#define IOMMU_PAGE_MASK_4K  (~((1ULL << IOMMU_PAGE_SHIFT_4K) - 1))
>> +#define IOMMU_PAGE_SHIFT_2M 21
>> +#define IOMMU_PAGE_MASK_2M  (~((1ULL << IOMMU_PAGE_SHIFT_2M) - 1))
>> +#define IOMMU_PAGE_SHIFT_1G 30
>> +#define IOMMU_PAGE_MASK_1G (~((1ULL << IOMMU_PAGE_SHIFT_1G) - 1))
>> +
>> +#define IOMMU_MAX_VA_ADDR          (48UL << 5)
>> +#define IOMMU_MAX_PH_ADDR          (40UL << 8)
>> +#define IOMMU_MAX_GVA_ADDR         (48UL << 15)
>> +
>> +/* invalidation command device id */
>> +#define IOMMU_INVAL_DEV_ID_SHIFT  32
>> +#define IOMMU_INVAL_DEV_ID_MASK   (~((1UL << IOMMU_INVAL_DEV_ID_SHIFT) -
>> 1))
David Kiarie March 2, 2016, 4 a.m. UTC | #3
On Fri, Feb 26, 2016 at 9:23 AM, David Kiarie <davidkiarie4@gmail.com> wrote:
> On Thu, Feb 25, 2016 at 6:43 PM, Marcel Apfelbaum <marcel@redhat.com> wrote:
>> On 02/21/2016 08:10 PM, David Kiarie wrote:
>>>
>>> Add AMD IOMMU emulaton to Qemu in addition to Intel IOMMU
>>> The IOMMU does basic translation, error checking and has a
>>> mininal IOTLB implementation
>>
>>
>> Hi,
>>
>>>
>>> Signed-off-by: David Kiarie <davidkiarie4@gmail.com>
>>> ---
>>>   hw/i386/Makefile.objs |    1 +
>>>   hw/i386/amd_iommu.c   | 1432
>>> +++++++++++++++++++++++++++++++++++++++++++++++++
>>>   hw/i386/amd_iommu.h   |  395 ++++++++++++++
>>>   include/hw/pci/pci.h  |    2 +
>>>   4 files changed, 1830 insertions(+)
>>>   create mode 100644 hw/i386/amd_iommu.c
>>>   create mode 100644 hw/i386/amd_iommu.h
>>>
>>> diff --git a/hw/i386/Makefile.objs b/hw/i386/Makefile.objs
>>> index b52d5b8..2f1a265 100644
>>> --- a/hw/i386/Makefile.objs
>>> +++ b/hw/i386/Makefile.objs
>>> @@ -3,6 +3,7 @@ obj-y += multiboot.o
>>>   obj-y += pc.o pc_piix.o pc_q35.o
>>>   obj-y += pc_sysfw.o
>>>   obj-y += intel_iommu.o
>>> +obj-y += amd_iommu.o
>>>   obj-$(CONFIG_XEN) += ../xenpv/ xen/
>>>
>>>   obj-y += kvmvapic.o
>>> diff --git a/hw/i386/amd_iommu.c b/hw/i386/amd_iommu.c
>>> new file mode 100644
>>> index 0000000..3dac043
>>> --- /dev/null
>>> +++ b/hw/i386/amd_iommu.c
>>> @@ -0,0 +1,1432 @@
>>> +/*
>>> + * QEMU emulation of AMD IOMMU (AMD-Vi)
>>> + *
>>> + * Copyright (C) 2011 Eduard - Gabriel Munteanu
>>> + * Copyright (C) 2015 David Kiarie, <davidkiarie4@gmail.com>
>>> + *
>>> + * This program is free software; you can redistribute it and/or modify
>>> + * it under the terms of the GNU General Public License as published by
>>> + * the Free Software Foundation; either version 2 of the License, or
>>> + * (at your option) any later version.
>>> +
>>> + * This program is distributed in the hope that it will be useful,
>>> + * but WITHOUT ANY WARRANTY; without even the implied warranty of
>>> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
>>> + * GNU General Public License for more details.
>>> +
>>> + * You should have received a copy of the GNU General Public License
>>> along
>>> + * with this program; if not, see <http://www.gnu.org/licenses/>.
>>> + *
>>> + * Cache implementation inspired by hw/i386/intel_iommu.c
>>> + *
>>> + */
>>> +#include "hw/i386/amd_iommu.h"
>>> +
>>> +/*#define DEBUG_AMD_IOMMU*/
>>> +#ifdef DEBUG_AMD_IOMMU
>>> +enum {
>>> +    DEBUG_GENERAL, DEBUG_CAPAB, DEBUG_MMIO, DEBUG_ELOG,
>>> +    DEBUG_CACHE, DEBUG_COMMAND, DEBUG_MMU
>>> +};
>>> +
>>> +#define IOMMU_DBGBIT(x)   (1 << DEBUG_##x)
>>> +static int iommu_dbgflags = IOMMU_DBGBIT(MMIO);
>>> +
>>> +#define IOMMU_DPRINTF(what, fmt, ...) do { \
>>> +    if (iommu_dbgflags & IOMMU_DBGBIT(what)) { \
>>> +        fprintf(stderr, "(amd-iommu)%s: " fmt "\n", __func__, \
>>> +                ## __VA_ARGS__); } \
>>> +    } while (0)
>>> +#else
>>> +#define IOMMU_DPRINTF(what, fmt, ...) do {} while (0)
>>> +#endif
>>> +
>>> +typedef struct AMDIOMMUAddressSpace {
>>> +    uint8_t bus_num;            /* bus number
>>> */
>>> +    uint8_t devfn;              /* device function
>>> */
>>> +    AMDIOMMUState *iommu_state; /* IOMMU - one per machine
>>> */
>>> +    MemoryRegion iommu;         /* Device's iommu region
>>> */
>>> +    AddressSpace as;            /* device's corresponding address space
>>> */
>>> +} AMDIOMMUAddressSpace;
>>> +
>>> +/* IOMMU cache entry */
>>> +typedef struct IOMMUIOTLBEntry {
>>> +    uint64_t gfn;
>>> +    uint16_t domid;
>>> +    uint64_t devid;
>>> +    uint64_t perms;
>>> +    uint64_t translated_addr;
>>> +} IOMMUIOTLBEntry;
>>> +
>>> +/* configure MMIO registers at startup/reset */
>>> +static void amd_iommu_set_quad(AMDIOMMUState *s, hwaddr addr, uint64_t
>>> val,
>>> +                               uint64_t romask, uint64_t w1cmask)
>>> +{
>>> +    stq_le_p(&s->mmior[addr], val);
>>> +    stq_le_p(&s->romask[addr], romask);
>>> +    stq_le_p(&s->w1cmask[addr], w1cmask);
>>> +}
>>> +
>>> +static uint16_t amd_iommu_readw(AMDIOMMUState *s, hwaddr addr)
>>> +{
>>> +    return lduw_le_p(&s->mmior[addr]);
>>> +}
>>> +
>>> +static uint32_t amd_iommu_readl(AMDIOMMUState *s, hwaddr addr)
>>> +{
>>> +    return ldl_le_p(&s->mmior[addr]);
>>> +}
>>> +
>>> +static uint64_t amd_iommu_readq(AMDIOMMUState *s, hwaddr addr)
>>> +{
>>> +    return ldq_le_p(&s->mmior[addr]);
>>> +}
>>> +
>>> +/* internal write */
>>> +static void amd_iommu_writeq_raw(AMDIOMMUState *s, uint64_t val, hwaddr
>>> addr)
>>> +{
>>> +    stq_le_p(&s->mmior[addr], val);
>>> +}
>>> +
>>> +/* external write */
>>> +static void amd_iommu_writew(AMDIOMMUState *s, hwaddr addr, uint16_t val)
>>> +{
>>> +    uint16_t romask = lduw_le_p(&s->romask[addr]);
>>> +    uint16_t w1cmask = lduw_le_p(&s->w1cmask[addr]);
>>> +    uint16_t oldval = lduw_le_p(&s->mmior[addr]);
>>> +    stw_le_p(&s->mmior[addr], (val & ~(val & w1cmask)) | (romask &
>>> oldval));
>>> +}
>>> +
>>> +static void amd_iommu_writel(AMDIOMMUState *s, hwaddr addr, uint32_t val)
>>> +{
>>> +    uint32_t romask = ldl_le_p(&s->romask[addr]);
>>> +    uint32_t w1cmask = ldl_le_p(&s->w1cmask[addr]);
>>> +    uint32_t oldval = ldl_le_p(&s->mmior[addr]);
>>> +    stl_le_p(&s->mmior[addr], (val & ~(val & w1cmask)) | (romask &
>>> oldval));
>>> +}
>>> +
>>> +static void amd_iommu_writeq(AMDIOMMUState *s, hwaddr addr, uint64_t val)
>>> +{
>>> +    uint64_t romask = ldq_le_p(&s->romask[addr]);
>>> +    uint64_t w1cmask = ldq_le_p(&s->w1cmask[addr]);
>>> +    uint32_t oldval = ldq_le_p(&s->mmior[addr]);
>>> +    stq_le_p(&s->mmior[addr], (val & ~(val & w1cmask)) | (romask &
>>> oldval));
>>> +}
>>> +
>>> +static void amd_iommu_log_event(AMDIOMMUState *s, uint16_t *evt)
>>> +{
>>> +    /* event logging not enabled */
>>> +    if (!s->evtlog_enabled || *(uint64_t *)&s->mmior[IOMMU_MMIO_STATUS] |
>>> +        IOMMU_MMIO_STATUS_EVT_OVF) {
>>> +        return;
>>> +    }
>>> +
>>> +    /* event log buffer full */
>>> +    if (s->evtlog_tail >= s->evtlog_len) {
>>> +        *(uint64_t *)&s->mmior[IOMMU_MMIO_STATUS] |=
>>> IOMMU_MMIO_STATUS_EVT_OVF;
>>> +        /* generate interrupt */
>>> +        msi_notify(&s->dev, 0);
>>> +    }
>>> +
>>> +    if (dma_memory_write(&address_space_memory, s->evtlog_len +
>>> s->evtlog_tail,
>>> +       &evt, IOMMU_EVENT_LEN)) {
>>> +        IOMMU_DPRINTF(ELOG, "error: fail to write at address 0x%"PRIx64
>>> +                      " + offset 0x%"PRIx32, s->evtlog, s->evtlog_tail);
>>> +    }
>>> +
>>> +     s->evtlog_tail += IOMMU_EVENT_LEN;
>>> +     *(uint64_t *)&s->mmior[IOMMU_MMIO_STATUS] |=
>>> IOMMU_MMIO_STATUS_COMP_INT;
>>> +}
>>> +
>>> +/* log an error encountered page-walking
>>> + *
>>> + * @addr: virtual address in translation request
>>> + */
>>> +static void amd_iommu_page_fault(AMDIOMMUState *s, uint16_t devid,
>>> +                                 dma_addr_t addr, uint16_t info)
>>> +{
>>> +    IOMMU_DPRINTF(ELOG, "");
>>> +
>>> +    uint16_t evt[8];
>>> +
>>> +    info |= IOMMU_EVENT_IOPF_I;
>>> +
>>> +    /* encode information */
>>> +    *(uint16_t *)&evt[0] = devid;
>>> +    *(uint16_t *)&evt[3] = info;
>>> +    *(uint64_t *)&evt[4] = cpu_to_le64(addr);
>>> +
>>> +    /* log a page fault */
>>> +    amd_iommu_log_event(s, evt);
>>> +
>>> +    /* Abort the translation */
>>> +    pci_word_test_and_set_mask(s->dev.config + PCI_STATUS,
>>> +            PCI_STATUS_SIG_TARGET_ABORT);
>>> +}
>>> +/*
>>> + * log a master abort accessing device table
>>> + *  @devtab : address of device table entry
>>> + *  @info : error flags
>>> + */
>>> +static void amd_iommu_log_devtab_error(AMDIOMMUState *s, uint16_t devid,
>>> +                                       dma_addr_t devtab, uint16_t info)
>>> +{
>>> +
>>> +    IOMMU_DPRINTF(ELOG, "");
>>> +
>>> +    uint16_t evt[8];
>>> +
>>> +    info |= IOMMU_EVENT_DEV_TAB_HW_ERROR;
>>> +
>>> +    /* encode information */
>>> +    *(uint16_t *)&evt[0] = devid;
>>> +    *(uint8_t *)&evt[3]  = info;
>>> +    *(uint64_t *)&evt[4] = cpu_to_le64(devtab);
>>> +
>>> +    amd_iommu_log_event(s, evt);
>>> +
>>> +    /* Abort the translation */
>>> +    pci_word_test_and_set_mask(s->dev.config + PCI_STATUS,
>>> +            PCI_STATUS_SIG_TARGET_ABORT);
>>> +
>>> +}
>>> +
>>> +/* log a master abort encountered during a page-walk
>>> + *  @addr : address that couldn't be accessed
>>> + */
>>> +static void amd_iommu_log_pagetab_error(AMDIOMMUState *s, uint16_t devid,
>>> +                                        dma_addr_t addr, uint16_t info)
>>> +{
>>> +    IOMMU_DPRINTF(ELOG, "");
>>> +
>>> +    uint16_t evt[8];
>>> +
>>> +    info |= IOMMU_EVENT_PAGE_TAB_HW_ERROR;
>>> +
>>> +    /* encode information */
>>> +    *(uint16_t *)&evt[0] = devid;
>>> +    *(uint8_t *)&evt[3]  = info;
>>> +    *(uint64_t *)&evt[4] = (cpu_to_le64(addr) >> 3);
>>> +
>>> +    amd_iommu_log_event(s, evt);
>>> +
>>> +    /* Abort the translation */
>>> +    pci_word_test_and_set_mask(s->dev.config + PCI_STATUS,
>>> +            PCI_STATUS_SIG_TARGET_ABORT);
>>> +
>>> +}
>>> +
>>> +/* log an event trying to access command buffer
>>> + *   @addr : address that couldn't be accessed
>>> + */
>>> +static void amd_iommu_log_command_error(AMDIOMMUState *s, dma_addr_t
>>> addr)
>>> +{
>>> +    IOMMU_DPRINTF(ELOG, "");
>>> +
>>> +    uint16_t evt[8];
>>> +
>>> +    /* encode information */
>>> +    *(uint8_t *)&evt[3]  = (uint8_t)IOMMU_EVENT_COMMAND_HW_ERROR;
>>> +    *(uint64_t *)&evt[4] = (cpu_to_le64(addr) >> 3);
>>> +
>>> +    amd_iommu_log_event(s, evt);
>>> +
>>> +    /* Abort the translation */
>>> +    pci_word_test_and_set_mask(s->dev.config + PCI_STATUS,
>>> +            PCI_STATUS_SIG_TARGET_ABORT);
>>> +}
>>> +
>>> +/* log an illegal comand event
>>> + *   @addr : address of illegal command
>>> + */
>>> +static void amd_iommu_log_illegalcom_error(AMDIOMMUState *s, uint16_t
>>> info,
>>> +                                           dma_addr_t addr)
>>> +{
>>> +    IOMMU_DPRINTF(ELOG, "");
>>> +
>>> +    uint16_t evt[8];
>>> +
>>> +    /* encode information */
>>> +    *(uint8_t *)&evt[3]  = (uint8_t)IOMMU_EVENT_ILLEGAL_COMMAND_ERROR;
>>> +    *(uint64_t *)&evt[4] = (cpu_to_le64(addr) >> 3);
>>
>>
>> Can you please use a macro instead of 3 literal?
>>
>>> +
>>> +    amd_iommu_log_event(s, evt);
>>> +}
>>> +
>>> +/* log an error accessing device table
>>> + *
>>> + *  @devid : device owning the table entry
>>> + *  @devtab : address of device table entry
>>> + *  @info : error flags
>>> + */
>>> +static void amd_iommu_log_illegaldevtab_error(AMDIOMMUState *s, uint16_t
>>> devid,
>>> +                                              dma_addr_t addr, uint16_t
>>> info)
>>> +{
>>> +    IOMMU_DPRINTF(ELOG, "");
>>> +
>>> +    uint16_t evt[8];
>>> +
>>> +    info |= IOMMU_EVENT_ILLEGAL_DEVTAB_ENTRY;
>>> +
>>> +    *(uint16_t *)&evt[0] = devid;
>>> +    *(uint8_t *)&evt[3]  = info;
>>> +    *(uint64_t *)&evt[4] = (cpu_to_le64(addr) >> 3);
>>> +
>>> +    amd_iommu_log_event(s, evt);
>>> +}
>>
>>
>> It seems that the all log functions do the same:
>> create an event, log it and optionally set PCI_STATUS_SIG_TARGET_ABORT
>>
>> I would consider to unite them in the same function. (not a must)

I would prefer to leave the event code as separate but I could
probably add a macro. Currently we are logging just a lot less
information that we should be logging and with the logging of more
information it could become a bit ugly.

>>
>>> +
>>> +static gboolean amd_iommu_uint64_equal(gconstpointer v1, gconstpointer
>>> v2)
>>> +{
>>> +    return *((const uint64_t *)v1) == *((const uint64_t *)v2);
>>> +}
>>> +
>>> +static guint amd_iommu_uint64_hash(gconstpointer v)
>>> +{
>>> +    return (guint)*(const uint64_t *)v;
>>> +}
>>> +
>>> +static IOMMUIOTLBEntry *amd_iommu_iotlb_lookup(AMDIOMMUState *s, hwaddr
>>> addr,
>>> +                                               uint64_t devid)
>>> +{
>>> +    uint64_t key = (addr >> IOMMU_PAGE_SHIFT_4K) |
>>> +                   ((uint64_t)(devid) << IOMMU_DEVID_SHIFT);
>>> +    return g_hash_table_lookup(s->iotlb, &key);
>>> +}
>>> +
>>> +static void amd_iommu_iotlb_reset(AMDIOMMUState *s)
>>> +{
>>> +    assert(s->iotlb);
>>> +    g_hash_table_remove_all(s->iotlb);
>>> +}
>>> +
>>> +static gboolean amd_iommu_iotlb_remove_by_devid(gpointer key, gpointer
>>> value,
>>> +                                                gpointer user_data)
>>> +{
>>> +    IOMMUIOTLBEntry *entry = (IOMMUIOTLBEntry *)value;
>>> +    uint16_t devid = *(uint16_t *)user_data;
>>> +    return entry->devid == devid;
>>> +}
>>> +
>>> +static void amd_iommu_iotlb_remove_page(AMDIOMMUState *s, hwaddr addr,
>>> +                                        uint64_t devid)
>>> +{
>>> +    uint64_t key = (addr >> IOMMU_PAGE_SHIFT_4K) |
>>> +                   ((uint64_t)(devid) << IOMMU_DEVID_SHIFT);
>>> +    g_hash_table_remove(s->iotlb, &key);
>>> +}
>>> +
>>> +/* extract device id */
>>> +static inline uint16_t devid_extract(uint8_t *cmd)
>>> +{
>>> +    return (uint16_t)cmd[2] & IOMMU_INVAL_DEV_ID_MASK;
>>> +}
>>> +
>>> +static void amd_iommu_invalidate_iotlb(AMDIOMMUState *s, uint64_t *cmd)
>>> +{
>>> +    uint16_t devid = devid_extract((uint8_t *)cmd);
>>> +    /* if invalidation of more than one page requested */
>>> +    if (IOMMU_INVAL_ALL(cmd[0])) {
>>> +        g_hash_table_foreach_remove(s->iotlb,
>>> amd_iommu_iotlb_remove_by_devid,
>>> +                                    &devid);
>>> +    } else {
>>> +        hwaddr addr = (hwaddr)(cmd[1] & IOMMU_INVAL_ADDR_MASK);
>>> +        amd_iommu_iotlb_remove_page(s, addr, devid);
>>> +    }
>>> +}
>>> +
>>> +static void amd_iommu_update_iotlb(AMDIOMMUState *s, uint16_t devid,
>>> +                                   uint64_t gpa, uint64_t spa, uint64_t
>>> perms,
>>> +                                   uint16_t domid)
>>> +{
>>> +    IOMMUIOTLBEntry *entry = g_malloc(sizeof(*entry));
>>> +    uint64_t *key = g_malloc(sizeof(key));
>>> +    uint64_t gfn = gpa >> IOMMU_PAGE_SHIFT_4K;
>>> +
>>> +    IOMMU_DPRINTF(CACHE, " update iotlb devid: %02x:%02x.%x gpa
>>> 0x%"PRIx64
>>> +                  " hpa 0x%"PRIx64, PCI_BUS_NUM(devid), PCI_SLOT(devid),
>>> +                  PCI_FUNC(devid), gpa, spa);
>>> +
>>> +    if (g_hash_table_size(s->iotlb) >= IOMMU_IOTLB_MAX_SIZE) {
>>> +        IOMMU_DPRINTF(CACHE, "iotlb exceeds size limit - reset");
>>> +        amd_iommu_iotlb_reset(s);
>>> +    }
>>> +
>>> +    entry->gfn = gfn;
>>> +    entry->domid = domid;
>>> +    entry->perms = perms;
>>> +    entry->translated_addr = spa;
>>> +    *key = gfn | ((uint64_t)(devid) << IOMMU_DEVID_SHIFT);
>>> +    g_hash_table_replace(s->iotlb, key, entry);
>>> +}
>>> +
>>> +/* execute a completion wait command */
>>> +static void amd_iommu_completion_wait(AMDIOMMUState *s, uint8_t *cmd)
>>> +{
>>> +    IOMMU_DPRINTF(COMMAND, "");
>>> +    unsigned int addr;
>>> +
>>> +    /* completion store */
>>> +    if (cmd[0] & IOMMU_COM_COMPLETION_STORE_MASK) {
>>> +        addr = le64_to_cpu(*(uint64_t *)cmd) &
>>> IOMMU_COM_STORE_ADDRESS_MASK;
>>> +        if (dma_memory_write(&address_space_memory, addr, cmd + 8, 8)) {
>>> +            IOMMU_DPRINTF(ELOG, "error: fail to write at address
>>> 0%x"PRIx64,
>>> +                          addr);
>>> +        }
>>> +    }
>>> +
>>> +    /* set completion interrupt */
>>> +    if (cmd[0] & IOMMU_COM_COMPLETION_INTR) {
>>> +        s->mmior[IOMMU_MMIO_STATUS] |= IOMMU_MMIO_STATUS_COMP_INT;
>>> +    }
>>> +}
>>> +
>>> +/* get command type */
>>> +static uint8_t opcode(uint8_t *cmd)
>>> +{
>>> +    return cmd[IOMMU_CMDBUF_ID_BYTE] >> IOMMU_CMDBUF_ID_RSHIFT;
>>> +}
>>> +
>>> +/* linux seems to be using reserved bits so I just log without abortig
>>> bug */
>>
>>
>> I couldn't quite understand the comment
>>
>>> +static void iommu_inval_devtab_entry(AMDIOMMUState *s, uint8_t *cmd,
>>> +                                     uint8_t type)
>>> +{
>>> +    IOMMU_DPRINTF(COMMAND, "");
>>> +
>>> +    /* This command should invalidate internal caches of which there
>>> isn't */
>>> +    if (*(uint64_t *)&cmd[0] & IOMMU_CMD_INVAL_DEV_RSVD ||
>>> +            *(uint64_t *)&cmd[1]) {
>>> +        amd_iommu_log_illegalcom_error(s, type, s->cmdbuf +
>>> s->cmdbuf_head);
>>> +    }
>>> +#ifdef DEBUG_AMD_IOMMU
>>> +    uint16_t devid = devid_extract(cmd);
>>> +#endif
>>> +    IOMMU_DPRINTF(COMMAND, "device table entry for devid: %02x:%02x.%x"
>>> +                  "invalidated", PCI_BUS_NUM(devid), PCI_SLOT(devid),
>>> +                  PCI_FUNC(devid));
>>> +}
>>> +
>>> +static void iommu_completion_wait(AMDIOMMUState *s, uint8_t *cmd, uint8_t
>>> type)
>>> +{
>>> +    IOMMU_DPRINTF(COMMAND, "");
>>> +
>>> +    if (*(uint32_t *)&cmd[1] & IOMMU_COMPLETION_WAIT_RSVD) {
>>> +        amd_iommu_log_illegalcom_error(s, type, s->cmdbuf +
>>> s->cmdbuf_head);
>>> +    }
>>> +    /* pretend to wait for command execution to complete */
>>> +    IOMMU_DPRINTF(COMMAND, "completion wait requested with store address
>>> 0x%"
>>> +                  PRIx64 " and store data 0x%"PRIx64, (cmd[0] &
>>> +                  IOMMU_COM_STORE_ADDRESS_MASK), *(uint64_t *)(cmd + 8));
>>> +    amd_iommu_completion_wait(s, cmd);
>>> +}
>>> +
>>> +static void iommu_complete_ppr(AMDIOMMUState *s, uint8_t *cmd, uint8_t
>>> type)
>>> +{
>>> +    IOMMU_DPRINTF(COMMAND, "");
>>> +
>>> +    if ((*(uint64_t *)&cmd[0] & IOMMU_COMPLETE_PPR_RQ_RSVD) ||
>>> +       *(uint64_t *)&cmd[1] & 0xffff000000000000) {
>>
>>
>>
>> Can you please document this mask?
>>
>>> +        amd_iommu_log_illegalcom_error(s, type, s->cmdbuf +
>>> s->cmdbuf_head);
>>> +    }
>>> +
>>> +    IOMMU_DPRINTF(COMMAND, "Execution of PPR queue requested");
>>> +}
>>> +
>>> +static void iommu_inval_all(AMDIOMMUState *s, uint8_t *cmd, uint8_t type)
>>> +{
>>> +    IOMMU_DPRINTF(COMMAND, "");
>>> +
>>> +    if ((*(uint64_t *)&cmd[0] & IOMMU_INVAL_IOMMU_ALL_RSVD) ||
>>> +       *(uint64_t *)&cmd[1]) {
>>> +        amd_iommu_log_illegalcom_error(s, type, s->cmdbuf +
>>> s->cmdbuf_head);
>>> +    }
>>> +
>>> +    amd_iommu_iotlb_reset(s);
>>> +    IOMMU_DPRINTF(COMMAND, "Invalidation of all IOMMU cache requested");
>>> +}
>>> +
>>> +static inline uint16_t domid_extract(uint64_t *cmd)
>>> +{
>>> +    return (uint16_t)cmd[0] & IOMMU_INVAL_PAGES_DOMID;
>>> +}
>>> +
>>> +static gboolean amd_iommu_iotlb_remove_by_domid(gpointer key, gpointer
>>> value,
>>> +                                                gpointer user_data)
>>> +{
>>> +    IOMMUIOTLBEntry *entry = (IOMMUIOTLBEntry *)value;
>>> +    uint16_t domid = *(uint16_t *)user_data;
>>> +    return entry->domid == domid;
>>> +}
>>> +
>>> +/* we don't have devid - we can't remove pages by address */
>>> +static void iommu_inval_pages(AMDIOMMUState *s, uint8_t *cmd, uint8_t
>>> type)
>>> +{
>>> +    IOMMU_DPRINTF(COMMAND, "");
>>> +    uint16_t domid = domid_extract((uint64_t *)cmd);
>>> +
>>> +    if (*(uint64_t *)&cmd[0] & IOMMU_INVAL_IOMMU_PAGES_RSVD ||
>>> +       *(uint32_t *)&cmd[1] & 0x00000ff0) {
>>> +        amd_iommu_log_illegalcom_error(s, type, s->cmdbuf +
>>> s->cmdbuf_head);
>>> +    }
>>> +
>>> +    g_hash_table_foreach_remove(s->iotlb,
>>> amd_iommu_iotlb_remove_by_domid,
>>> +                                &domid);
>>> +
>>> +    IOMMU_DPRINTF(COMMAND, "IOMMU pages for domain 0x%"PRIx16
>>> "invalidated",
>>> +                  domid);
>>> +}
>>> +
>>> +static void iommu_prefetch_pages(AMDIOMMUState *s, uint8_t *cmd, uint8_t
>>> type)
>>> +{
>>> +    IOMMU_DPRINTF(COMMAND, "");
>>> +
>>> +    if ((*(uint64_t *)&cmd[0] & IOMMU_PRF_IOMMU_PAGES_RSVD) ||
>>> +       (*(uint32_t *)&cmd[1] & 0x00000fd4)) {
>>
>>
>> Here the same, maybe you can name the mask, so we can easier follow the
>> spec.
>>
>>> +        amd_iommu_log_illegalcom_error(s, type, s->cmdbuf +
>>> s->cmdbuf_head);
>>> +    }
>>> +
>>> +    IOMMU_DPRINTF(COMMAND, "Pre-fetch of IOMMU pages requested");
>>> +}
>>> +
>>> +static void iommu_inval_inttable(AMDIOMMUState *s, uint8_t *cmd, uint8_t
>>> type)
>>> +{
>>> +    IOMMU_DPRINTF(COMMAND, "");
>>> +
>>> +    if ((*(uint64_t *)&cmd[0] & IOMMU_INVAL_INTR_TABLE_RSVD) ||
>>> +       *(uint64_t *)&cmd[1]) {
>>> +        amd_iommu_log_illegalcom_error(s, type, s->cmdbuf +
>>> s->cmdbuf_head);
>>> +        return;
>>> +    }
>>> +
>>> +    IOMMU_DPRINTF(COMMAND, "interrupt table invalidated");
>>> +}
>>> +
>>> +static void iommu_inval_iotlb(AMDIOMMUState *s, uint8_t *cmd, uint8_t
>>> type)
>>> +{
>>> +    IOMMU_DPRINTF(COMMAND, "");
>>> +
>>> +    if (*(uint32_t *)&cmd[2] & IOMMU_INVAL_IOTLB_PAGES_RSVD) {
>>> +        amd_iommu_log_illegalcom_error(s, type, s->cmdbuf +
>>> s->cmdbuf_head);
>>> +        return;
>>> +    }
>>> +
>>> +    amd_iommu_invalidate_iotlb(s, (uint64_t *)cmd);
>>> +    IOMMU_DPRINTF(COMMAND, "IOTLB pages invalidated");
>>> +}
>>> +
>>> +/* not honouring reserved bits is regarded as an illegal command */
>>> +static void amd_iommu_cmdbuf_exec(AMDIOMMUState *s)
>>> +{
>>> +    IOMMU_DPRINTF(COMMAND, "");
>>> +
>>> +    uint8_t type;
>>> +    uint8_t cmd[IOMMU_COMMAND_SIZE];
>>> +
>>> +    memset(cmd, 0, IOMMU_COMMAND_SIZE);
>>> +
>>> +    if (dma_memory_read(&address_space_memory, s->cmdbuf +
>>> s->cmdbuf_head, cmd,
>>> +       IOMMU_COMMAND_SIZE)) {
>>> +        IOMMU_DPRINTF(COMMAND, "error: fail to access memory at
>>> 0x%"PRIx64
>>> +                      " + 0x%"PRIu8, s->cmdbuf, s->cmdbuf_head);
>>> +        amd_iommu_log_command_error(s, s->cmdbuf + s->cmdbuf_head);
>>> +        return;
>>> +    }
>>> +
>>> +    type = opcode(cmd);
>>> +
>>> +    switch (type) {
>>> +    case IOMMU_CMD_COMPLETION_WAIT:
>>> +        iommu_completion_wait(s, cmd, type);
>>> +        break;
>>> +
>>> +    case IOMMU_CMD_INVAL_DEVTAB_ENTRY:
>>> +        iommu_inval_devtab_entry(s, cmd, type);
>>> +        break;
>>> +
>>> +    case IOMMU_CMD_INVAL_IOMMU_PAGES:
>>> +        iommu_inval_pages(s, cmd, type);
>>> +        break;
>>> +
>>> +    case IOMMU_CMD_INVAL_IOTLB_PAGES:
>>> +        iommu_inval_iotlb(s, cmd, type);
>>> +        break;
>>> +
>>> +    case IOMMU_CMD_INVAL_INTR_TABLE:
>>> +        iommu_inval_inttable(s, cmd, type);
>>> +        break;
>>> +
>>> +    case IOMMU_CMD_PREFETCH_IOMMU_PAGES:
>>> +        iommu_prefetch_pages(s, cmd, type);
>>> +        break;
>>> +
>>> +    case IOMMU_CMD_COMPLETE_PPR_REQUEST:
>>> +        iommu_complete_ppr(s, cmd, type);
>>> +        break;
>>> +
>>> +    case IOMMU_CMD_INVAL_IOMMU_ALL:
>>> +        iommu_inval_all(s, cmd, type);
>>> +        break;
>>> +
>>> +    default:
>>> +        IOMMU_DPRINTF(COMMAND, "unhandled command %d", type);
>>> +        /* log illegal command */
>>> +        amd_iommu_log_illegalcom_error(s, type,
>>> +                                       s->cmdbuf + s->cmdbuf_head);
>>> +        break;
>>> +    }
>>> +
>>> +}
>>> +
>>> +static void amd_iommu_cmdbuf_run(AMDIOMMUState *s)
>>> +{
>>> +    IOMMU_DPRINTF(COMMAND, "");
>>> +
>>> +    uint64_t *mmio_cmdbuf_head = (uint64_t *)(s->mmior +
>>> +                                 IOMMU_MMIO_COMMAND_HEAD);
>>> +
>>> +    if (!s->cmdbuf_enabled) {
>>> +        IOMMU_DPRINTF(COMMAND, "error: IOMMU trying to execute commands
>>> with "
>>> +                      "command buffer disabled. IOMMU control value
>>> 0x%"PRIx64,
>>> +                      amd_iommu_readq(s, IOMMU_MMIO_CONTROL));
>>> +        return;
>>> +    }
>>> +
>>> +    while (s->cmdbuf_head != s->cmdbuf_tail) {
>>> +        /* check if there is work to do. */
>>> +        IOMMU_DPRINTF(COMMAND, "command buffer head at 0x%"PRIx32 "
>>> command "
>>> +                      "buffer tail at 0x%"PRIx32" command buffer base at
>>> 0x%"
>>> +                      PRIx64, s->cmdbuf_head, s->cmdbuf_tail, s->cmdbuf);
>>> +         amd_iommu_cmdbuf_exec(s);
>>> +         s->cmdbuf_head += IOMMU_COMMAND_SIZE;
>>> +         amd_iommu_writeq_raw(s, s->cmdbuf_head,
>>> IOMMU_MMIO_COMMAND_HEAD);
>>> +
>>> +        /* wrap head pointer */
>>> +        if (s->cmdbuf_head >= s->cmdbuf_len * IOMMU_COMMAND_SIZE) {
>>> +            s->cmdbuf_head = 0;
>>> +        }
>>> +    }
>>> +
>>> +    *mmio_cmdbuf_head = cpu_to_le64(s->cmdbuf_head);
>>> +}
>>> +
>>> +/* System Software might never read from some of this fields but anyways
>>> */
>>> +static uint64_t amd_iommu_mmio_read(void *opaque, hwaddr addr, unsigned
>>> size)
>>> +{
>>> +    AMDIOMMUState *s = opaque;
>>> +
>>> +    uint64_t val = -1;
>>
>>
>> The above might work, but it looks a little weird
>>
>>> +    if (addr + size > IOMMU_MMIO_SIZE) {
>>> +        IOMMU_DPRINTF(MMIO, "error: addr outside region: max 0x%"PRIX64
>>> +                      ", got 0x%"PRIx64 " %d", (uint64_t)IOMMU_MMIO_SIZE,
>>> addr,
>>> +                      size);
>>> +        return (uint64_t)-1;
>>> +    }
>>> +
>>> +    if (size == 2) {
>>> +        val = amd_iommu_readw(s, addr);
>>> +    } else if (size == 4) {
>>> +        val = amd_iommu_readl(s, addr);
>>> +    } else if (size == 8) {
>>> +        val = amd_iommu_readq(s, addr);
>>> +    }
>>> +
>>> +    switch (addr & ~0x07) {
>>> +    case IOMMU_MMIO_DEVICE_TABLE:
>>> +        IOMMU_DPRINTF(MMIO, "MMIO_DEVICE_TABLE read addr 0x%"PRIx64
>>> +                      ", size %d offset 0x%"PRIx64, addr, size,
>>> +                       addr & ~0x07);
>>> +        break;
>>> +
>>> +    case IOMMU_MMIO_COMMAND_BASE:
>>> +        IOMMU_DPRINTF(MMIO, "MMIO_COMMAND_BASE read addr 0x%"PRIx64
>>> +                      ", size %d offset 0x%"PRIx64, addr, size,
>>> +                      addr & ~0x07);
>>> +        break;
>>> +
>>> +    case IOMMU_MMIO_EVENT_BASE:
>>> +        IOMMU_DPRINTF(MMIO, "MMIO_EVENT_BASE read addr 0x%"PRIx64
>>> +                      ", size %d offset 0x%"PRIx64, addr, size,
>>> +                      addr & ~0x07);
>>> +        break;
>>> +
>>> +    case IOMMU_MMIO_CONTROL:
>>> +        IOMMU_DPRINTF(MMIO, "MMIO_CONTROL read addr 0x%"PRIx64
>>> +                      ", size %d offset 0x%"PRIx64, addr, size,
>>> +                       addr & ~0x07);
>>> +        break;
>>> +
>>> +    case IOMMU_MMIO_EXCL_BASE:
>>> +        IOMMU_DPRINTF(MMIO, "MMIO_EXCL_BASE read addr 0x%"PRIx64
>>> +                      ", size %d offset 0x%"PRIx64, addr, size,
>>> +                      addr & ~0x07);
>>> +        break;
>>> +
>>> +    case IOMMU_MMIO_EXCL_LIMIT:
>>> +        IOMMU_DPRINTF(MMIO, "MMIO_EXCL_LIMIT read addr 0x%"PRIx64
>>> +                      ", size %d offset 0x%"PRIx64, addr, size,
>>> +                      addr & ~0x07);
>>> +        break;
>>> +
>>> +    case IOMMU_MMIO_COMMAND_HEAD:
>>> +        IOMMU_DPRINTF(MMIO, "MMIO_COMMAND_HEAD read addr 0x%"PRIx64
>>> +                      ", size %d offset 0x%"PRIx64, addr, size,
>>> +                      addr & ~0x07);
>>> +        break;
>>> +
>>> +    case IOMMU_MMIO_COMMAND_TAIL:
>>> +        IOMMU_DPRINTF(MMIO, "MMIO_COMMAND_TAIL read addr 0x%"PRIx64
>>> +                      ", size %d offset 0x%"PRIx64, addr, size,
>>> +                      addr & ~0x07);
>>> +        break;
>>> +
>>> +    case IOMMU_MMIO_EVENT_HEAD:
>>> +        IOMMU_DPRINTF(MMIO, "MMIO_EVENT_HEAD read addr 0x%"PRIx64
>>> +                      ", size %d offset 0x%"PRIx64, addr, size,
>>> +                      addr & ~0x07);
>>> +        break;
>>> +
>>> +    case IOMMU_MMIO_EVENT_TAIL:
>>> +        IOMMU_DPRINTF(MMIO, "MMIO_EVENT_TAIL read addr 0x%"PRIx64
>>> +                      ", size %d offset 0x%"PRIx64, addr, size,
>>> +                      addr & ~0x07);
>>> +        break;
>>> +
>>> +    case IOMMU_MMIO_STATUS:
>>> +        IOMMU_DPRINTF(MMIO, "MMIO_STATUS read addr 0x%"PRIx64
>>> +                      ", size %d offset 0x%"PRIx64, addr, size,
>>> +                      addr & ~0x07);
>>> +        break;
>>> +
>>> +    case IOMMU_MMIO_EXT_FEATURES:
>>> +        IOMMU_DPRINTF(MMU, "MMIO_EXT_FEATURES read addr 0x%"PRIx64
>>> +                      ", size %d offset 0x%"PRIx64 "value 0x%"PRIx64,
>>> +                      addr, size, addr & ~0x07, val);
>>> +        break;
>>> +
>>> +    default:
>>> +        IOMMU_DPRINTF(MMIO, "UNHANDLED MMIO read addr 0x%"PRIx64
>>> +                      ", size %d offset 0x%"PRIx64, addr, size,
>>> +                       addr & ~0x07);
>>> +    }
>>> +    return val;
>>> +}
>>> +
>>> +static void iommu_handle_control_write(AMDIOMMUState *s)
>>> +{
>>> +    IOMMU_DPRINTF(COMMAND, "");
>>> +    /*
>>> +     * read whatever is already written in case
>>> +     * software is writing in chucks less than 8 bytes
>>> +     */
>>> +    unsigned long control = amd_iommu_readq(s, IOMMU_MMIO_CONTROL);
>>> +    s->enabled = !!(control & IOMMU_MMIO_CONTROL_IOMMUEN);
>>> +
>>> +    s->ats_enabled = !!(control & IOMMU_MMIO_CONTROL_HTTUNEN);
>>> +    s->evtlog_enabled = s->enabled && !!(control &
>>> +                        IOMMU_MMIO_CONTROL_EVENTLOGEN);
>>> +
>>> +    s->evtlog_intr = !!(control & IOMMU_MMIO_CONTROL_EVENTINTEN);
>>> +    s->completion_wait_intr = !!(control &
>>> IOMMU_MMIO_CONTROL_COMWAITINTEN);
>>> +    s->cmdbuf_enabled = s->enabled && !!(control &
>>> +                        IOMMU_MMIO_CONTROL_CMDBUFLEN);
>>> +
>>> +    /* update the flags depending on the control register */
>>> +    if (s->cmdbuf_enabled) {
>>> +        (*(uint64_t *)&s->mmior[IOMMU_MMIO_STATUS]) |=
>>> +            IOMMU_MMIO_STATUS_CMDBUF_RUN;
>>> +    } else {
>>> +        (*(uint64_t *)&s->mmior[IOMMU_MMIO_STATUS]) &=
>>> +            ~IOMMU_MMIO_STATUS_CMDBUF_RUN;
>>> +    }
>>> +    if (s->evtlog_enabled) {
>>> +        (*(uint64_t *)&s->mmior[IOMMU_MMIO_STATUS]) |=
>>> +            IOMMU_MMIO_STATUS_EVT_RUN;
>>> +    } else {
>>> +        (*(uint64_t *)&s->mmior[IOMMU_MMIO_STATUS]) &=
>>> +            ~IOMMU_MMIO_STATUS_EVT_RUN;
>>> +    }
>>> +
>>> +    IOMMU_DPRINTF(COMMAND, "MMIO_STATUS state 0x%"PRIx64, control);
>>> +
>>> +    amd_iommu_cmdbuf_run(s);
>>> +}
>>> +
>>> +static inline void iommu_handle_devtab_write(AMDIOMMUState *s)
>>> +
>>> +{
>>> +    IOMMU_DPRINTF(COMMAND, "");
>>> +
>>> +    uint64_t val = amd_iommu_readq(s, IOMMU_MMIO_DEVICE_TABLE);
>>> +    s->devtab = (dma_addr_t)(val & IOMMU_MMIO_DEVTAB_BASE_MASK);
>>> +
>>> +    /* set device table length */
>>> +    s->devtab_len = ((val & IOMMU_MMIO_DEVTAB_SIZE_MASK) + 1 *
>>> +                    (IOMMU_MMIO_DEVTAB_SIZE_UNIT /
>>> +                     IOMMU_MMIO_DEVTAB_ENTRY_SIZE));
>>> +}
>>> +
>>> +static inline void iommu_handle_cmdhead_write(AMDIOMMUState *s)
>>> +{
>>> +    IOMMU_DPRINTF(COMMAND, "");
>>> +
>>> +    s->cmdbuf_head = (dma_addr_t)amd_iommu_readq(s,
>>> IOMMU_MMIO_COMMAND_HEAD)
>>> +                     & IOMMU_MMIO_CMDBUF_HEAD_MASK;
>>> +    amd_iommu_cmdbuf_run(s);
>>> +}
>>> +
>>> +static inline void iommu_handle_cmdbase_write(AMDIOMMUState *s)
>>> +{
>>> +    IOMMU_DPRINTF(COMMAND, "");
>>> +
>>> +    s->cmdbuf = (dma_addr_t)amd_iommu_readq(s, IOMMU_MMIO_COMMAND_BASE)
>>> +                & IOMMU_MMIO_CMDBUF_BASE_MASK;
>>> +    s->cmdbuf_len = 1UL << (s->mmior[IOMMU_MMIO_CMDBUF_SIZE_BYTE]
>>> +                    & IOMMU_MMIO_CMDBUF_SIZE_MASK);
>>> +    s->cmdbuf_head = s->cmdbuf_tail = 0;
>>> +
>>> +}
>>> +
>>> +static inline void iommu_handle_cmdtail_write(AMDIOMMUState *s)
>>> +{
>>> +    s->cmdbuf_tail = amd_iommu_readq(s, IOMMU_MMIO_COMMAND_TAIL)
>>> +                     & IOMMU_MMIO_CMDBUF_TAIL_MASK;
>>> +    amd_iommu_cmdbuf_run(s);
>>> +}
>>> +
>>> +static inline void iommu_handle_excllim_write(AMDIOMMUState *s)
>>> +{
>>> +    IOMMU_DPRINTF(COMMAND, "");
>>> +
>>> +    uint64_t val = amd_iommu_readq(s, IOMMU_MMIO_EXCL_LIMIT);
>>> +    s->excl_limit = (val & IOMMU_MMIO_EXCL_LIMIT_MASK) |
>>> +                    IOMMU_MMIO_EXCL_LIMIT_LOW;
>>> +}
>>> +
>>> +static inline void iommu_handle_evtbase_write(AMDIOMMUState *s)
>>> +{
>>> +    IOMMU_DPRINTF(COMMAND, "");
>>> +
>>> +    uint64_t val = amd_iommu_readq(s, IOMMU_MMIO_EVENT_BASE);
>>> +    s->evtlog = val & IOMMU_MMIO_EVTLOG_BASE_MASK;
>>> +    s->evtlog_len = 1UL << (*(uint64_t
>>> *)&s->mmior[IOMMU_MMIO_EVTLOG_SIZE_BYTE]
>>> +                    & IOMMU_MMIO_EVTLOG_SIZE_MASK);
>>> +}
>>> +
>>> +static inline void iommu_handle_evttail_write(AMDIOMMUState *s)
>>> +{
>>> +    IOMMU_DPRINTF(COMMAND, "");
>>> +
>>> +    uint64_t val = amd_iommu_readq(s, IOMMU_MMIO_EVENT_TAIL);
>>> +    s->evtlog_tail = val & IOMMU_MMIO_EVTLOG_TAIL_MASK;
>>> +}
>>> +
>>> +static inline void iommu_handle_evthead_write(AMDIOMMUState *s)
>>> +{
>>> +    IOMMU_DPRINTF(COMMAND, "");
>>> +
>>> +    uint64_t val = amd_iommu_readq(s, IOMMU_MMIO_EVENT_HEAD);
>>> +    s->evtlog_head = val & IOMMU_MMIO_EVTLOG_HEAD_MASK;
>>> +}
>>> +
>>> +static inline void iommu_handle_pprbase_write(AMDIOMMUState *s)
>>> +{
>>> +    IOMMU_DPRINTF(COMMAND, "");
>>> +
>>> +    uint64_t val = amd_iommu_readq(s, IOMMU_MMIO_PPR_BASE);
>>> +    s->ppr_log = val & IOMMU_MMIO_PPRLOG_BASE_MASK;
>>> +    s->pprlog_len = 1UL << (*(uint64_t
>>> *)&s->mmior[IOMMU_MMIO_PPRLOG_SIZE_BYTE]
>>> +                    & IOMMU_MMIO_PPRLOG_SIZE_MASK);
>>> +}
>>> +
>>> +static inline void iommu_handle_pprhead_write(AMDIOMMUState *s)
>>> +{
>>> +    IOMMU_DPRINTF(COMMAND, "");
>>> +
>>> +    uint64_t val = amd_iommu_readq(s, IOMMU_MMIO_PPR_HEAD);
>>> +    s->pprlog_head = val & IOMMU_MMIO_PPRLOG_HEAD_MASK;
>>> +}
>>> +
>>> +static inline void iommu_handle_pprtail_write(AMDIOMMUState *s)
>>> +{
>>> +    IOMMU_DPRINTF(COMMAND, "");
>>> +
>>> +    uint64_t val = amd_iommu_readq(s, IOMMU_MMIO_PPR_TAIL);
>>> +    s->pprlog_tail = val & IOMMU_MMIO_PPRLOG_TAIL_MASK;
>>> +}
>>> +
>>> +/* FIXME: something might go wrong if System Software writes in chunks
>>> + * of one byte but linux writes in chunks of 4 bytes so currently it
>>> + * works correctly with linux but will definitely be busted if software
>>> + * reads/writes 8 bytes
>>> + */
>>> +static void amd_iommu_mmio_write(void *opaque, hwaddr addr, uint64_t val,
>>> +                                 unsigned size)
>>> +{
>>> +
>>> +    IOMMU_DPRINTF(COMMAND, "");
>>> +
>>> +    AMDIOMMUState *s = opaque;
>>> +    unsigned long offset = addr & 0x07;
>>> +
>>> +    if (addr + size > IOMMU_MMIO_SIZE) {
>>> +        IOMMU_DPRINTF(MMIO, "error: addr outside region: max 0x%"PRIx64
>>> +                      ", got 0x%"PRIx64 " %d", (uint64_t)IOMMU_MMIO_SIZE,
>>> addr,
>>> +                      size);
>>> +        return;
>>> +    }
>>> +
>>> +    switch (addr & ~0x07) {
>>> +    case IOMMU_MMIO_CONTROL:
>>> +        if (size == 2) {
>>> +            amd_iommu_writew(s, addr, val);
>>> +        } else if (size == 4) {
>>> +            amd_iommu_writel(s, addr,  val);
>>> +        } else if (size == 8) {
>>> +            amd_iommu_writeq(s, addr, val);
>>> +        }
>>> +
>>> +        IOMMU_DPRINTF(COMMAND, "MMIO_CONTROL write addr 0x%"PRIx64
>>> +                      ", size %d, val 0x%"PRIx64 ", offset 0x%"PRIx64,
>>> +                      addr, size, val, offset);
>>> +        iommu_handle_control_write(s);
>>> +        break;
>>> +
>>> +    case IOMMU_MMIO_DEVICE_TABLE:
>>> +        IOMMU_DPRINTF(MMIO, "MMIO_DEVICE_TABLE write addr 0x%"PRIx64
>>> +                      ", size %d, val 0x%"PRIx64 ", offset 0x%"PRIx64,
>>> +                      addr, size, val, offset);
>>> +        if (size == 2) {
>>> +            amd_iommu_writew(s, addr, val);
>>> +        } else if (size == 4) {
>>> +            amd_iommu_writel(s, addr, val);
>>> +        } else if (size == 8) {
>>> +            amd_iommu_writeq(s, addr, val);
>>> +        }
>>> +
>>> +       /*  set device table address
>>> +        *   This also suffers from inability to tell whether software
>>> +        *   is done writing
>>> +        */
>>> +
>>> +        if (offset || (size == 8)) {
>>> +            iommu_handle_devtab_write(s);
>>> +        }
>>> +        break;
>>> +
>>> +    case IOMMU_MMIO_COMMAND_HEAD:
>>> +        IOMMU_DPRINTF(MMIO, "MMIO_COMMAND_HEAD write addr 0x%"PRIx64
>>> +                      ", size %d, val 0x%"PRIx64 ", offset 0x%"PRIx64,
>>> +                      addr, size, val, offset);
>>> +        if (size == 2) {
>>> +            amd_iommu_writew(s, addr, val);
>>> +        } else if (size == 4) {
>>> +            amd_iommu_writel(s, addr, val);
>>> +        } else if (size == 8) {
>>> +            amd_iommu_writeq(s, addr, val);
>>> +        }
>>> +
>>> +        iommu_handle_cmdhead_write(s);
>>> +        break;
>>> +
>>> +    case IOMMU_MMIO_COMMAND_BASE:
>>> +        IOMMU_DPRINTF(COMMAND, "MMIO_COMMAND_BASE write addr 0x%"PRIx64
>>> +                      ", size %d, val 0x%"PRIx64 ", offset 0x%"PRIx64,
>>> +                      addr, size, val, offset);
>>> +        if (size == 2) {
>>> +            amd_iommu_writew(s, addr, val);
>>> +        } else if (size == 4) {
>>> +            amd_iommu_writel(s, addr, val);
>>> +        } else if (size == 8) {
>>> +            amd_iommu_writeq(s, addr, val);
>>> +        }
>>> +
>>> +        /* FIXME - make sure System Software has finished writing incase
>>> +         * it writes in chucks less than 8 bytes in a robust way.As for
>>> +         * now, this hacks works for the linux driver
>>> +         */
>>> +        if (offset || (size == 8)) {
>>> +            iommu_handle_cmdbase_write(s);
>>> +        }
>>> +        break;
>>> +
>>> +    case IOMMU_MMIO_COMMAND_TAIL:
>>> +        IOMMU_DPRINTF(COMMAND, "MMIO_COMMAND_TAIL write addr 0x%"PRIx64
>>> +                      ", size %d, val 0x%"PRIx64 ", offset 0x%"PRIx64,
>>> +                      addr, size, val, offset);
>>> +        if (size == 2) {
>>> +            amd_iommu_writew(s, addr, val);
>>> +        } else if (size == 4) {
>>> +            amd_iommu_writel(s, addr, val);
>>> +        } else if (size == 8) {
>>> +            amd_iommu_writeq(s, addr, val);
>>> +        }
>>> +        iommu_handle_cmdtail_write(s);
>>> +        break;
>>> +
>>> +    case IOMMU_MMIO_EVENT_BASE:
>>> +        IOMMU_DPRINTF(MMIO, "MMIO_EVENT_BASE write addr 0x%"PRIx64
>>> +                      ", size %d, val 0x%"PRIx64 ", offset 0x%"PRIx64,
>>> +                      addr, size, val, offset);
>>> +        if (size == 2) {
>>> +            amd_iommu_writew(s, addr, val);
>>> +        } else if (size == 4) {
>>> +            amd_iommu_writel(s, addr, val);
>>> +        } else if (size == 8) {
>>> +            amd_iommu_writeq(s, addr, val);
>>> +        }
>>> +        iommu_handle_evtbase_write(s);
>>> +        break;
>>> +
>>> +    case IOMMU_MMIO_EVENT_HEAD:
>>> +        IOMMU_DPRINTF(MMIO, "MMIO_EVENT_HEAD write addr 0x%"PRIx64
>>> +                      ", size %d, val 0x%"PRIx64 ", offset 0x%"PRIx64,
>>> +                      addr, size, val, offset);
>>> +        if (size == 2) {
>>> +            amd_iommu_writew(s, addr, val);
>>> +        } else if (size == 4) {
>>> +            amd_iommu_writel(s, addr, val);
>>> +        } else if (size == 8) {
>>> +            amd_iommu_writeq(s, addr, val);
>>> +        }
>>> +        iommu_handle_evthead_write(s);
>>> +        break;
>>> +
>>> +    case IOMMU_MMIO_EVENT_TAIL:
>>> +        IOMMU_DPRINTF(MMIO, "MMIO_EVENT_TAIL write addr 0x%"PRIx64
>>> +                      ", size %d, val 0x%"PRIx64 ", offset 0x%"PRIx64,
>>> +                      addr, size, val, offset);
>>> +        if (size == 2) {
>>> +            amd_iommu_writew(s, addr, val);
>>> +        } else if (size == 4) {
>>> +            amd_iommu_writel(s, addr, val);
>>> +        } else if (size == 8) {
>>> +            amd_iommu_writeq(s, addr, val);
>>> +        }
>>> +        iommu_handle_evttail_write(s);
>>> +        break;
>>> +
>>> +    case IOMMU_MMIO_EXCL_LIMIT:
>>> +        IOMMU_DPRINTF(MMIO, "MMIO_EXCL_LIMIT write addr 0x%"PRIx64
>>> +                      ", size %d, val 0x%"PRIx64 ", offset 0x%"PRIx64,
>>> +                      addr, size, val, offset);
>>> +        if (size == 2) {
>>> +            amd_iommu_writew(s, addr, val);
>>> +        } else if (size == 4) {
>>> +            amd_iommu_writel(s, addr, val);
>>> +        } else if (size == 8) {
>>> +            amd_iommu_writeq(s, addr, val);
>>> +        }
>>> +        iommu_handle_excllim_write(s);
>>> +        break;
>>> +
>>> +        /* PPR log base - unused for now */
>>> +    case IOMMU_MMIO_PPR_BASE:
>>> +        IOMMU_DPRINTF(MMIO, "MMIO_PPR_BASE write addr 0x%"PRIx64
>>> +                      ", size %d, val 0x%"PRIx64 ", offset 0x%"PRIx64,
>>> +                      addr, size, val, offset);
>>> +        if (size == 2) {
>>> +            amd_iommu_writew(s, addr, val);
>>> +        } else if (size == 4) {
>>> +            amd_iommu_writel(s, addr, val);
>>> +        } else if (size == 8) {
>>> +            amd_iommu_writeq(s, addr, val);
>>> +        }
>>> +        iommu_handle_pprbase_write(s);
>>> +        break;
>>> +        /* PPR log head - also unused for now */
>>> +    case IOMMU_MMIO_PPR_HEAD:
>>> +        IOMMU_DPRINTF(MMIO, "MMIO_PPR_HEAD write addr 0x%"PRIx64
>>> +                      ", size %d, val 0x%"PRIx64 ", offset 0x%"PRIx64,
>>> +                       addr, size, val, offset);
>>> +        if (size == 2) {
>>> +            amd_iommu_writew(s, addr, val);
>>> +        } else if (size == 4) {
>>> +            amd_iommu_writel(s, addr, val);
>>> +        } else if (size == 8) {
>>> +            amd_iommu_writeq(s, addr, val);
>>> +        }
>>> +        iommu_handle_pprhead_write(s);
>>> +        break;
>>> +        /* PPR log tail - unused for now */
>>> +    case IOMMU_MMIO_PPR_TAIL:
>>> +        IOMMU_DPRINTF(MMIO, "MMIO_PPR_TAIL write addr 0x%"PRIx64
>>> +                      ", size %d, val 0x%"PRIx64 ", offset 0x%"PRIx64,
>>> +                      addr, size, val, offset);
>>> +        if (size == 2) {
>>> +            amd_iommu_writew(s, addr, val);
>>> +        } else if (size == 4) {
>>> +            amd_iommu_writel(s, addr, val);
>>> +        } else if (size == 8) {
>>> +            amd_iommu_writeq(s, addr, val);
>>> +        }
>>> +        iommu_handle_pprtail_write(s);
>>> +        break;
>>> +
>>> +        /* ignore write to ext_features */
>>> +    default:
>>> +        IOMMU_DPRINTF(MMIO, "UNHANDLED MMIO write addr 0x%"PRIx64
>>> +                      ", size %d, val 0x%"PRIx64 ", offset 0x%"PRIx64,
>>> +                      addr, size, val, offset);
>>> +    }
>>> +
>>> +}
>>> +
>>> +static inline uint64_t amd_iommu_get_perms(uint64_t entry)
>>> +{
>>> +    return (entry & (IOMMU_DEV_PERM_READ | IOMMU_DEV_PERM_WRITE)) >>
>>> +           IOMMU_DEV_PERM_SHIFT;
>>> +}
>>> +
>>> +AddressSpace *bridge_host_amd_iommu(PCIBus *bus, void *opaque, int devfn)
>>> +{
>>> +    AMDIOMMUState *s = opaque;
>>> +    AMDIOMMUAddressSpace **iommu_as;
>>> +    int bus_num = pci_bus_num(bus);
>>> +
>>> +    /* just in case */
>>
>>
>> This comment troubles me, do we need the assert?

In case the bus_num or devfn is invalid. Anyway, I could get of rid of
this assert.

>>
>>> +    assert(0 <= bus_num && bus_num <= PCI_BUS_MAX);
>>
>>
>> bus_num < PCI_BUS_MAX, right ?
>>
>>> +    assert(0 <= devfn && devfn <= PCI_DEVFN_MAX);
>>
>>
>> same with devfn I suppose.
>>
>>> +
>>> +    iommu_as = s->address_spaces[bus_num];
>>> +
>>> +    /* allocate memory during the first run */
>>> +    if (!iommu_as) {
>>
>>
>> Why lazy init? We can do that at AMDIOMMUState init, right?

This code has to be called for all emulated devices when the bus is
initialized. If you have it on AMDIOMMUState init - it will only be
called for one or two devices already initiliazed.

>>
>>> +        iommu_as = g_malloc0(sizeof(AMDIOMMUAddressSpace *) *
>>> PCI_DEVFN_MAX);
>>> +        s->address_spaces[bus_num] = iommu_as;
>>> +    }
>>> +
>>> +    /* set up IOMMU region */
>>> +    if (!iommu_as[devfn]) {
>>> +        iommu_as[devfn] = g_malloc0(sizeof(AMDIOMMUAddressSpace));
>>
>>
>> same here
>>
>>> +        iommu_as[devfn]->bus_num = (uint8_t)bus_num;
>>> +        iommu_as[devfn]->devfn = (uint8_t)devfn;
>>> +        iommu_as[devfn]->iommu_state = s;
>>> +
>>> +        memory_region_init_iommu(&iommu_as[devfn]->iommu, OBJECT(s),
>>> +                                 &s->iommu_ops, "amd-iommu", UINT64_MAX);
>>> +        address_space_init(&iommu_as[devfn]->as, &iommu_as[devfn]->iommu,
>>> +                           "amd-iommu");
>>> +    }
>>> +    return &iommu_as[devfn]->as;
>>> +}
>>> +
>>> +/* validate a page table entry */
>>> +static bool amd_iommu_validate_dte(AMDIOMMUState *s, uint16_t devid,
>>> +                                   uint64_t *dte)
>>> +{
>>> +    if ((dte[0] & IOMMU_DTE_LOWER_QUAD_RESERVED)
>>> +        || (dte[1] & IOMMU_DTE_MIDDLE_QUAD_RESERVED)
>>> +        || (dte[2] & IOMMU_DTE_UPPER_QUAD_RESERVED) || dte[3]) {
>>> +        amd_iommu_log_illegaldevtab_error(s, devid,
>>> +                                s->devtab + devid *
>>> IOMMU_DEVTAB_ENTRY_SIZE, 0);
>>> +        return false;
>>> +    }
>>> +
>>> +    return dte[0] & IOMMU_DEV_VALID && (dte[0] &
>>> IOMMU_DEV_TRANSLATION_VALID)
>>> +           && (dte[0] & IOMMU_DEV_PT_ROOT_MASK);
>>> +}
>>> +
>>> +/* get a device table entry given the devid */
>>> +static bool amd_iommu_get_dte(AMDIOMMUState *s, int devid, uint64_t
>>> *entry)
>>> +{
>>> +    uint32_t offset = devid * IOMMU_DEVTAB_ENTRY_SIZE;
>>> +
>>> +    IOMMU_DPRINTF(MMU, "Device Table at 0x%"PRIx64, s->devtab);
>>> +
>>> +    if (dma_memory_read(&address_space_memory, s->devtab + offset, entry,
>>> +                        IOMMU_DEVTAB_ENTRY_SIZE)) {
>>> +        IOMMU_DPRINTF(MMU, "error: fail to access Device Entry devtab
>>> 0x%"PRIx64
>>> +                      "offset 0x%"PRIx32, s->devtab, offset);
>>> +        /* log ever accessing dte */
>>> +        amd_iommu_log_devtab_error(s, devid, s->devtab + offset, 0);
>>> +        return false;
>>> +    }
>>> +
>>> +    if (!amd_iommu_validate_dte(s, devid, entry)) {
>>> +        IOMMU_DPRINTF(MMU,
>>> +                      "Pte entry at 0x%"PRIx64" is invalid", entry[0]);
>>> +        return false;
>>> +    }
>>> +
>>> +    return true;
>>> +}
>>> +
>>> +/* get pte translation mode */
>>> +static inline uint8_t get_pte_translation_mode(uint64_t pte)
>>> +{
>>> +    return (pte >> IOMMU_DEV_MODE_RSHIFT) & IOMMU_DEV_MODE_MASK;
>>> +}
>>> +
>>> +static int amd_iommu_page_walk(AMDIOMMUAddressSpace *as, uint64_t *dte,
>>> +                               IOMMUTLBEntry *ret, unsigned perms,
>>> +                               hwaddr addr)
>>> +{
>>> +    uint8_t level, oldlevel;
>>> +    unsigned present;
>>> +    uint64_t pte, pte_addr;
>>> +    uint64_t pte_perms;
>>> +    pte = dte[0];
>>> +
>>> +    level = get_pte_translation_mode(pte);
>>> +
>>> +    if (level >= 7 || level == 0) {
>>> +        IOMMU_DPRINTF(MMU, "error: translation level 0x%"PRIu8 "
>>> detected"
>>> +                      "while translating 0x%"PRIx64, level, addr);
>>> +        return -1;
>>> +    }
>>> +
>>> +    while (level > 0) {
>>> +        pte_perms = amd_iommu_get_perms(pte);
>>> +        present = pte & 1;
>>> +        if (!present || perms != (perms & pte_perms)) {
>>> +            amd_iommu_page_fault(as->iommu_state, as->devfn, addr,
>>> perms);
>>> +            IOMMU_DPRINTF(MMU, "error: page fault accessing virtual addr
>>> 0x%"
>>> +                          PRIx64, addr);
>>> +            return -1;
>>> +        }
>>> +
>>> +        /* go to the next lower level */
>>> +        pte_addr = pte & IOMMU_DEV_PT_ROOT_MASK;
>>> +        /* add offset and load pte */
>>> +        pte_addr += ((addr >> (3 + 9 * level)) & 0x1FF) << 3;
>>> +        pte = ldq_phys(&address_space_memory, pte_addr);
>>> +        oldlevel = level;
>>> +        level = get_pte_translation_mode(pte);
>>> +
>>> +        /* PT is corrupted or not there */
>>> +        if (level != oldlevel - 1) {
>>> +            return -1;
>>> +        }
>>> +    }
>>> +
>>> +    ret->iova = addr & IOMMU_PAGE_MASK_4K;
>>> +    ret->translated_addr = (pte & IOMMU_DEV_PT_ROOT_MASK) &
>>> IOMMU_PAGE_MASK_4K;
>>> +    ret->addr_mask = ~IOMMU_PAGE_MASK_4K;
>>> +    ret->perm = IOMMU_RW;
>>> +    return 0;
>>> +}
>>> +
>>> +/* TODO : Mark addresses as Accessed and Dirty */
>>
>>
>> If you don't mark addresses as dirty, can't this cause the sporadic errors
>> of arbitrary programs Jan talked about?
>
> I don't think this the issue, am seem to be receiving wrong 'host
> physical addresses' in the last few kernel version. This issue is not
> there in older kernels.
>
>>
>>> +static void amd_iommu_do_translate(AMDIOMMUAddressSpace *as, hwaddr addr,
>>> +                                   bool is_write, IOMMUTLBEntry *ret)
>>> +{
>>> +    AMDIOMMUState *s = as->iommu_state;
>>> +    uint16_t devid = PCI_DEVID(as->bus_num, as->devfn);
>>> +    IOMMUIOTLBEntry *iotlb_entry;
>>> +    uint8_t err;
>>> +    uint64_t entry[4];
>>> +
>>> +    /* try getting a cache entry first */
>>> +    iotlb_entry = amd_iommu_iotlb_lookup(s, addr, as->devfn);
>>> +
>>> +    if (iotlb_entry) {
>>> +        IOMMU_DPRINTF(CACHE, "hit  iotlb devid: %02x:%02x.%x gpa
>>> 0x%"PRIx64
>>> +                      " hpa 0x%"PRIx64, PCI_BUS_NUM(devid),
>>> PCI_SLOT(devid),
>>> +                      PCI_FUNC(devid), addr,
>>> iotlb_entry->translated_addr);
>>> +        ret->iova = addr & IOMMU_PAGE_MASK_4K;
>>> +        ret->translated_addr = iotlb_entry->translated_addr;
>>> +        ret->addr_mask = ~IOMMU_PAGE_MASK_4K;
>>> +        ret->perm = iotlb_entry->perms;
>>> +        return;
>>> +    } else {
>>
>>
>> you return from the if clause so you don't need the else
>>
>>> +        if (!amd_iommu_get_dte(s, devid, entry)) {
>>
>>
>> is not an error if you did not find the device id?
>>
>>> +            goto out;
>>> +        }
>>> +
>>> +        err = amd_iommu_page_walk(as, entry, ret,
>>> +                                  is_write ? IOMMU_PERM_WRITE :
>>> IOMMU_PERM_READ,
>>> +                                  addr);
>>> +        if (err) {
>>> +            IOMMU_DPRINTF(MMU, "error: hardware error accessing page
>>> tables"
>>> +                          " while translating addr 0x%"PRIx64, addr);
>>> +            amd_iommu_log_pagetab_error(s, as->devfn, addr, 0);
>>> +            goto out;
>>> +        }
>>> +
>>> +        amd_iommu_update_iotlb(s, as->devfn, addr, ret->translated_addr,
>>> +                               ret->perm, entry[1] &
>>> IOMMU_DEV_DOMID_ID_MASK);
>>> +        return;
>>> +    }
>>> +
>>> +out:
>>> +    ret->iova = addr;
>>> +    ret->translated_addr = addr & IOMMU_PAGE_MASK_4K;
>>> +    ret->addr_mask = ~IOMMU_PAGE_MASK_4K;
>>> +    ret->perm = IOMMU_RW;
>>> +    return;
>>
>>
>> you don't need the above return
>>
>>> +}
>>> +
>>> +static IOMMUTLBEntry amd_iommu_translate(MemoryRegion *iommu, hwaddr
>>> addr,
>>> +                                         bool is_write)
>>> +{
>>> +    IOMMU_DPRINTF(GENERAL, "");
>>> +
>>> +    AMDIOMMUAddressSpace *as = container_of(iommu, AMDIOMMUAddressSpace,
>>> iommu);
>>> +    AMDIOMMUState *s = as->iommu_state;
>>> +
>>> +    IOMMUTLBEntry ret = {
>>> +        .target_as = &address_space_memory,
>>> +        .iova = addr,
>>> +        .translated_addr = 0,
>>> +        .addr_mask = ~(hwaddr)0,
>>> +        .perm = IOMMU_NONE,
>>> +    };
>>> +
>>> +    if (!s->enabled) {
>>> +        /* IOMMU disabled - corresponds to iommu=off not
>>> +         * failure to provide any parameter
>>> +         */
>>> +        ret.iova = addr & IOMMU_PAGE_MASK_4K;
>>> +        ret.translated_addr = addr & IOMMU_PAGE_MASK_4K;
>>> +        ret.addr_mask = ~IOMMU_PAGE_MASK_4K;
>>> +        ret.perm = IOMMU_RW;
>>> +        return ret;
>>> +    }
>>> +
>>> +    amd_iommu_do_translate(as, addr, is_write, &ret);
>>> +    IOMMU_DPRINTF(MMU, "devid: %02x:%02x.%x gpa 0x%"PRIx64 " hpa
>>> 0x%"PRIx64,
>>> +                  as->bus_num, PCI_SLOT(as->devfn), PCI_FUNC(as->devfn),
>>> addr,
>>> +                  ret.translated_addr);
>>> +
>>> +    return ret;
>>> +}
>>> +
>>> +static const MemoryRegionOps mmio_mem_ops = {
>>> +    .read = amd_iommu_mmio_read,
>>> +    .write = amd_iommu_mmio_write,
>>> +    .endianness = DEVICE_LITTLE_ENDIAN,
>>> +    .impl = {
>>> +        .min_access_size = 1,
>>> +        .max_access_size = 8,
>>> +        .unaligned = false,
>>> +    },
>>> +    .valid = {
>>> +        .min_access_size = 1,
>>> +        .max_access_size = 8,
>>> +    }
>>> +};
>>> +
>>> +static void amd_iommu_init(AMDIOMMUState *s)
>>> +{
>>> +    printf("amd_iommu_init");
>>
>>
>> you should use the debug macro here
>>
>>> +
>>> +    amd_iommu_iotlb_reset(s);
>>> +
>>> +    s->iommu_ops.translate = amd_iommu_translate;
>>> +
>>> +    s->devtab_len = 0;
>>> +    s->cmdbuf_len = 0;
>>> +    s->cmdbuf_head = 0;
>>> +    s->cmdbuf_tail = 0;
>>> +    s->evtlog_head = 0;
>>> +    s->evtlog_tail = 0;
>>> +    s->excl_enabled = false;
>>> +    s->excl_allow = false;
>>> +    s->mmio_enabled = false;
>>> +    s->enabled = false;
>>> +    s->ats_enabled = false;
>>> +    s->cmdbuf_enabled = false;
>>> +
>>> +    /* reset MMIO */
>>> +    memset(s->mmior, 0, IOMMU_MMIO_SIZE);
>>> +    amd_iommu_set_quad(s, IOMMU_MMIO_EXT_FEATURES, IOMMU_EXT_FEATURES,
>>> +            0xffffffffffffffef, 0);
>>> +    amd_iommu_set_quad(s, IOMMU_MMIO_STATUS, 0, 0x98, 0x67);
>>> +    /* reset device ident */
>>> +    pci_config_set_vendor_id(s->dev.config, PCI_VENDOR_ID_AMD);
>>> +    pci_config_set_device_id(s->dev.config, PCI_DEVICE_ID_RD890_IOMMU);
>>> +    pci_config_set_prog_interface(s->dev.config, 00);
>>> +    pci_config_set_class(s->dev.config, 0x0806);
>>> +
>>> +    /* reset IOMMU specific capabilities  */
>>> +    pci_set_long(s->dev.config + s->capab_offset, IOMMU_CAPAB_FEATURES);
>>> +    pci_set_long(s->dev.config + s->capab_offset + IOMMU_CAPAB_BAR_LOW,
>>> +                 s->mmio.addr & ~(0xffff0000));
>>> +    pci_set_long(s->dev.config + s->capab_offset + IOMMU_CAPAB_BAR_HIGH,
>>> +                (s->mmio.addr & ~(0xffff)) >> 16);
>>> +    pci_set_long(s->dev.config + s->capab_offset + IOMMU_CAPAB_RANGE,
>>> +                 0xff000000);
>>> +    pci_set_long(s->dev.config + s->capab_offset + IOMMU_CAPAB_MISC, 0);
>>> +    pci_set_long(s->dev.config + s->capab_offset + IOMMU_CAPAB_MISC,
>>> +            IOMMU_MAX_PH_ADDR | IOMMU_MAX_GVA_ADDR | IOMMU_MAX_VA_ADDR);
>>
>>
>> All the capabilities are read-write? Otherwise you need to set the wmask
>> to indicate what fields are writable.
>>
>>> +}
>>> +
>>> +static void amd_iommu_reset(DeviceState *dev)
>>> +{
>>> +    AMDIOMMUState *s = AMD_IOMMU_DEVICE(dev);
>>> +
>>> +    amd_iommu_init(s);
>>> +}
>>> +
>>> +static void amd_iommu_realize(PCIDevice *dev, Error **error)
>>> +{
>>> +    AMDIOMMUState *s = container_of(dev, AMDIOMMUState, dev);
>>> +
>>> +    s->iotlb = g_hash_table_new_full(amd_iommu_uint64_hash,
>>> +                                     amd_iommu_uint64_equal, g_free,
>>> g_free);
>>> +
>>> +    s->capab_offset = pci_add_capability(dev, IOMMU_CAPAB_ID_SEC, 0,
>>> +                                         IOMMU_CAPAB_SIZE);
>>> +
>>> +    /* add msi and hypertransport capabilities */
>>> +    pci_add_capability(&s->dev, PCI_CAP_ID_MSI, 0, IOMMU_CAPAB_REG_SIZE);
>>> +    pci_add_capability(&s->dev, PCI_CAP_ID_HT, 0, IOMMU_CAPAB_REG_SIZE);
>>> +
>>> +    amd_iommu_init(s);
>>> +
>>> +    /* set up MMIO */
>>> +    memory_region_init_io(&s->mmio, OBJECT(s), &mmio_mem_ops, s, "mmio",
>>> +                          IOMMU_MMIO_SIZE);
>>> +
>>> +    if (s->mmio.addr == IOMMU_BASE_ADDR) {
>>
>>
>> I don't understand why is need here. realize is called only once in the init
>> process
>> and you set it a few lines below.
>>
>>> +        return;
>>> +    }
>>> +
>>> +    s->mmio.addr = IOMMU_BASE_ADDR;
>>> +    memory_region_add_subregion(get_system_memory(), IOMMU_BASE_ADDR,
>>> &s->mmio);
>>> +}
>>> +
>>> +static const VMStateDescription vmstate_amd_iommu = {
>>> +    .name = "amd-iommu",
>>> +    .fields  = (VMStateField[]) {
>>> +        VMSTATE_PCI_DEVICE(dev, AMDIOMMUState),
>>> +        VMSTATE_END_OF_LIST()
>>> +    }
>>> +};
>>> +
>>> +static Property amd_iommu_properties[] = {
>>> +    DEFINE_PROP_UINT32("version", AMDIOMMUState, version, 2),
>>> +    DEFINE_PROP_END_OF_LIST(),
>>> +};
>>> +
>>> +static void amd_iommu_uninit(PCIDevice *dev)
>>> +{
>>> +    AMDIOMMUState *s = container_of(dev, AMDIOMMUState, dev);
>>> +    amd_iommu_iotlb_reset(s);
>>
>>
>> at this point you also need to clean also the memory regions you use.
>>
>>> +}
>>> +
>>> +static void amd_iommu_class_init(ObjectClass *klass, void* data)
>>> +{
>>> +    DeviceClass *dc = DEVICE_CLASS(klass);
>>> +    PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
>>> +
>>> +    k->realize = amd_iommu_realize;
>>> +    k->exit = amd_iommu_uninit;
>>> +
>>> +    dc->reset = amd_iommu_reset;
>>> +    dc->vmsd = &vmstate_amd_iommu;
>>> +    dc->props = amd_iommu_properties;
>>> +}
>>> +
>>> +static const TypeInfo amd_iommu = {
>>> +    .name = TYPE_AMD_IOMMU_DEVICE,
>>> +    .parent = TYPE_PCI_DEVICE,
>>> +    .instance_size = sizeof(AMDIOMMUState),
>>> +    .class_init = amd_iommu_class_init
>>> +};
>>> +
>>> +static void amd_iommu_register_types(void)
>>> +{
>>> +    type_register_static(&amd_iommu);
>>> +}
>>> +
>>> +type_init(amd_iommu_register_types);
>>> diff --git a/hw/i386/amd_iommu.h b/hw/i386/amd_iommu.h
>>> new file mode 100644
>>> index 0000000..7d317e1
>>> --- /dev/null
>>> +++ b/hw/i386/amd_iommu.h
>>> @@ -0,0 +1,395 @@
>>> +/*
>>> + * QEMU emulation of an AMD IOMMU (AMD-Vi)
>>> + *
>>> + * Copyright (C) 2011 Eduard - Gabriel Munteanu
>>> + * Copyright (C) 2015 David Kiarie, <davidkiarie4@gmail.com>
>>> + *
>>> + * This program is free software; you can redistribute it and/or modify
>>> + * it under the terms of the GNU General Public License as published by
>>> + * the Free Software Foundation; either version 2 of the License, or
>>> + * (at your option) any later version.
>>> +
>>> + * This program is distributed in the hope that it will be useful,
>>> + * but WITHOUT ANY WARRANTY; without even the implied warranty of
>>> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
>>> + * GNU General Public License for more details.
>>> +
>>> + * You should have received a copy of the GNU General Public License
>>> along
>>> + * with this program; if not, see <http://www.gnu.org/licenses/>.
>>> + */
>>> +
>>> +#ifndef AMD_IOMMU_H_
>>> +#define AMD_IOMMU_H_
>>> +
>>> +#include "hw/hw.h"
>>> +#include "hw/pci/pci.h"
>>> +#include "hw/pci/msi.h"
>>> +#include "hw/sysbus.h"
>>> +#include "sysemu/dma.h"
>>> +
>>> +/* Capability registers */
>>> +#define IOMMU_CAPAB_HEADER            0x00
>>> +#define   IOMMU_CAPAB_REV_TYPE        0x02
>>> +#define   IOMMU_CAPAB_FLAGS           0x03
>>> +#define IOMMU_CAPAB_BAR_LOW           0x04
>>> +#define IOMMU_CAPAB_BAR_HIGH          0x08
>>> +#define IOMMU_CAPAB_RANGE             0x0C
>>> +#define IOMMU_CAPAB_MISC              0x10
>>> +#define IOMMU_CAPAB_MISC1             0x14
>>> +
>>> +#define IOMMU_CAPAB_SIZE              0x18
>>> +#define IOMMU_CAPAB_REG_SIZE          0x04
>>> +
>>> +/* Capability header data */
>>> +#define IOMMU_CAPAB_ID_SEC            0xf
>>> +#define IOMMU_CAPAB_FLAT_EXT          (1 << 28)
>>> +#define IOMMU_CAPAB_EFR_SUP           (1 << 27)
>>> +#define IOMMU_CAPAB_FLAG_NPCACHE      (1 << 26)
>>> +#define IOMMU_CAPAB_FLAG_HTTUNNEL     (1 << 25)
>>> +#define IOMMU_CAPAB_FLAG_IOTLBSUP     (1 << 24)
>>> +#define IOMMU_CAPAB_INIT_REV          (1 << 19)
>>> +#define IOMMU_CAPAB_INIT_TYPE         (3 << 16)
>>> +#define IOMMU_CAPAB_INIT_REV_TYPE     (IOMMU_CAPAB_REV |
>>> IOMMU_CAPAB_TYPE)
>>> +#define IOMMU_CAPAB_INIT_FLAGS        (IOMMU_CAPAB_FLAG_NPCACHE | \
>>> +                                       IOMMU_CAPAB_FLAG_HTTUNNEL)
>>> +#define IOMMU_CAPAB_INIT_MISC         ((64 << 15) | (48 << 8))
>>> +#define IOMMU_CAPAB_BAR_MASK          (~((1UL << 14) - 1))
>>> +
>>> +/* MMIO registers */
>>> +#define IOMMU_MMIO_DEVICE_TABLE       0x0000
>>> +#define IOMMU_MMIO_COMMAND_BASE       0x0008
>>> +#define IOMMU_MMIO_EVENT_BASE         0x0010
>>> +#define IOMMU_MMIO_CONTROL            0x0018
>>> +#define IOMMU_MMIO_EXCL_BASE          0x0020
>>> +#define IOMMU_MMIO_EXCL_LIMIT         0x0028
>>> +#define IOMMU_MMIO_EXT_FEATURES       0x0030
>>> +#define IOMMU_MMIO_COMMAND_HEAD       0x2000
>>> +#define IOMMU_MMIO_COMMAND_TAIL       0x2008
>>> +#define IOMMU_MMIO_EVENT_HEAD         0x2010
>>> +#define IOMMU_MMIO_EVENT_TAIL         0x2018
>>> +#define IOMMU_MMIO_STATUS             0x2020
>>> +#define IOMMU_MMIO_PPR_BASE           0x0038
>>> +#define IOMMU_MMIO_PPR_HEAD           0x2030
>>> +#define IOMMU_MMIO_PPR_TAIL           0x2038
>>> +
>>> +#define IOMMU_MMIO_SIZE               0x4000
>>> +
>>> +#define IOMMU_MMIO_DEVTAB_SIZE_MASK   ((1ULL << 12) - 1)
>>> +#define IOMMU_MMIO_DEVTAB_BASE_MASK   (((1ULL << 52) - 1) & ~ \
>>> +                                       IOMMU_MMIO_DEVTAB_SIZE_MASK)
>>> +#define IOMMU_MMIO_DEVTAB_ENTRY_SIZE  32
>>> +#define IOMMU_MMIO_DEVTAB_SIZE_UNIT   4096
>>> +
>>> +/* some of this are similar but just for readability */
>>> +#define IOMMU_MMIO_CMDBUF_SIZE_BYTE       (IOMMU_MMIO_COMMAND_BASE + 7)
>>> +#define IOMMU_MMIO_CMDBUF_SIZE_MASK       0x0F
>>> +#define IOMMU_MMIO_CMDBUF_BASE_MASK       IOMMU_MMIO_DEVTAB_BASE_MASK
>>> +#define IOMMU_MMIO_CMDBUF_DEFAULT_SIZE    8
>>> +#define IOMMU_MMIO_CMDBUF_HEAD_MASK       (((1ULL << 19) - 1) & ~0x0F)
>>> +#define IOMMU_MMIO_CMDBUF_TAIL_MASK       IOMMU_MMIO_EVTLOG_HEAD_MASK
>>> +
>>> +#define IOMMU_MMIO_EVTLOG_SIZE_BYTE       (IOMMU_MMIO_EVENT_BASE + 7)
>>> +#define IOMMU_MMIO_EVTLOG_SIZE_MASK       IOMMU_MMIO_CMDBUF_SIZE_MASK
>>> +#define IOMMU_MMIO_EVTLOG_BASE_MASK       IOMMU_MMIO_CMDBUF_BASE_MASK
>>> +#define IOMMU_MMIO_EVTLOG_DEFAULT_SIZE    IOMMU_MMIO_CMDBUF_DEFAULT_SIZE
>>> +#define IOMMU_MMIO_EVTLOG_HEAD_MASK       (((1ULL << 19) - 1) & ~0x0F)
>>> +#define IOMMU_MMIO_EVTLOG_TAIL_MASK       IOMMU_MMIO_EVTLOG_HEAD_MASK
>>> +
>>> +#define IOMMU_MMIO_PPRLOG_SIZE_BYTE       (IOMMU_MMIO_EVENT_BASE + 7)
>>> +#define IOMMU_MMIO_PPRLOG_HEAD_MASK       IOMMU_MMIO_EVTLOG_HEAD_MASK
>>> +#define IOMMU_MMIO_PPRLOG_TAIL_MASK       IOMMU_MMIO_EVTLOG_HEAD_MASK
>>> +#define IOMMU_MMIO_PPRLOG_BASE_MASK       IOMMU_MMIO_EVTLOG_BASE_MASK
>>> +#define IOMMU_MMIO_PPRLOG_SIZE_MASK       IOMMU_MMIO_EVTLOG_SIZE_MASK
>>> +
>>> +#define IOMMU_MMIO_EXCL_BASE_MASK         IOMMU_MMIO_DEVTAB_BASE_MASK
>>> +#define IOMMU_MMIO_EXCL_ENABLED_MASK      (1ULL << 0)
>>> +#define IOMMU_MMIO_EXCL_ALLOW_MASK        (1ULL << 1)
>>> +#define IOMMU_MMIO_EXCL_LIMIT_MASK        IOMMU_MMIO_DEVTAB_BASE_MASK
>>> +#define IOMMU_MMIO_EXCL_LIMIT_LOW         0xFFF
>>> +
>>> +/* mmio control register flags */
>>> +#define IOMMU_MMIO_CONTROL_IOMMUEN        (1ULL << 0)
>>> +#define IOMMU_MMIO_CONTROL_HTTUNEN        (1ULL << 1)
>>> +#define IOMMU_MMIO_CONTROL_EVENTLOGEN     (1ULL << 2)
>>> +#define IOMMU_MMIO_CONTROL_EVENTINTEN     (1ULL << 3)
>>> +#define IOMMU_MMIO_CONTROL_COMWAITINTEN   (1ULL << 4)
>>> +#define IOMMU_MMIO_CONTROL_PASSPW         (1ULL << 7)
>>> +#define IOMMU_MMIO_CONTROL_REPASSPW       (1ULL << 9)
>>> +#define IOMMU_MMIO_CONTROL_COHERENT       (1ULL << 10)
>>> +#define IOMMU_MMIO_CONTROL_ISOC           (1ULL << 11)
>>> +#define IOMMU_MMIO_CONTROL_CMDBUFLEN      (1ULL << 12)
>>> +#define IOMMU_MMIO_CONTROL_PPRLOGEN       (1ULL << 13)
>>> +#define IOMMU_MMIO_CONTROL_PPRINTEN       (1ULL << 14)
>>> +#define IOMMU_MMIO_CONTROL_PPREN          (1ULL << 15)
>>> +#define IOMMU_MMIO_CONTROL_GAEN           (1ULL << 16)
>>> +#define IOMMU_MMIO_CONTROL_GTEN           (1ULL << 17)
>>> +
>>> +/* MMIO status register bits */
>>> +#define IOMMU_MMIO_STATUS_PPR_OVFE    (1 << 18)
>>> +#define IOMMU_MMIO_STATUS_PPR_OVFEB   (1 << 17)
>>> +#define IOMMU_MMIO_STATUS_EVT_ACTIVE  (1 << 16)
>>> +#define IOMMU_MMIO_STATUS_EVT_OVFB    (1 << 15)
>>> +#define IOMMU_MMIO_STATUS_PPR_ACTIVE  (1 << 12)
>>> +#define IOMMU_MMIO_STATUS_PPR_OVFB    (1 << 11)
>>> +#define IOMMU_MMIO_STATUS_GA_INT      (1 << 10)
>>> +#define IOMMU_MMIO_STATUS_GA_RUN      (1 << 9)
>>> +#define IOMMU_MMIO_STATUS_GA_OVF      (1 << 8)
>>> +#define IOMMU_MMIO_STATUS_PPR_RUN     (1 << 7)
>>> +#define IOMMU_MMIO_STATUS_PPR_INT     (1 << 6)
>>> +#define IOMMU_MMIO_STATUS_PPR_OVF     (1 << 5)
>>> +#define IOMMU_MMIO_STATUS_CMDBUF_RUN  (1 << 4)
>>> +#define IOMMU_MMIO_STATUS_EVT_RUN     (1 << 3)
>>> +#define IOMMU_MMIO_STATUS_COMP_INT    (1 << 2)
>>> +#define IOMMU_MMIO_STATUS_EVT_INT     (1 << 1)
>>> +#define IOMMU_MMIO_STATUS_EVT_OVF     (1 << 0)
>>> +
>>> +#define IOMMU_CMDBUF_ID_BYTE              0x07
>>> +#define IOMMU_CMDBUF_ID_RSHIFT            4
>>> +
>>> +#define IOMMU_CMD_COMPLETION_WAIT         0x01
>>> +#define IOMMU_CMD_INVAL_DEVTAB_ENTRY      0x02
>>> +#define IOMMU_CMD_INVAL_IOMMU_PAGES       0x03
>>> +#define IOMMU_CMD_INVAL_IOTLB_PAGES       0x04
>>> +#define IOMMU_CMD_INVAL_INTR_TABLE        0x05
>>> +#define IOMMU_CMD_PREFETCH_IOMMU_PAGES    0x06
>>> +#define IOMMU_CMD_COMPLETE_PPR_REQUEST    0x07
>>> +#define IOMMU_CMD_INVAL_IOMMU_ALL         0x08
>>> +
>>> +#define IOMMU_DEVTAB_ENTRY_SIZE           32
>>> +
>>> +/* Device table entry bits 0:63 */
>>> +#define IOMMU_DEV_VALID                   (1ULL << 0)
>>> +#define IOMMU_DEV_TRANSLATION_VALID       (1ULL << 1)
>>> +#define IOMMU_DEV_MODE_MASK               0x7
>>> +#define IOMMU_DEV_MODE_RSHIFT             9
>>> +#define IOMMU_DEV_PT_ROOT_MASK            0xFFFFFFFFFF000
>>> +#define IOMMU_DEV_PT_ROOT_RSHIFT          12
>>> +#define IOMMU_DEV_PERM_SHIFT              61
>>> +#define IOMMU_DEV_PERM_READ               (1ULL << 61)
>>> +#define IOMMU_DEV_PERM_WRITE              (1ULL << 62)
>>> +
>>> +/* Device table entry bits 64:127 */
>>> +#define IOMMU_DEV_DOMID_ID_MASK          ((1ULL << 16) - 1)
>>> +#define IOMMU_DEV_IOTLB_SUPPORT           (1ULL << 17)
>>> +#define IOMMU_DEV_SUPPRESS_PF             (1ULL << 18)
>>> +#define IOMMU_DEV_SUPPRESS_ALL_PF         (1ULL << 19)
>>> +#define IOMMU_DEV_IOCTL_MASK              (~3)
>>> +#define IOMMU_DEV_IOCTL_RSHIFT            20
>>> +#define   IOMMU_DEV_IOCTL_DENY            0
>>> +#define   IOMMU_DEV_IOCTL_PASSTHROUGH     1
>>> +#define   IOMMU_DEV_IOCTL_TRANSLATE       2
>>> +#define IOMMU_DEV_CACHE                   (1ULL << 37)
>>> +#define IOMMU_DEV_SNOOP_DISABLE           (1ULL << 38)
>>> +#define IOMMU_DEV_EXCL                    (1ULL << 39)
>>> +
>>> +/* Event codes and flags, as stored in the info field */
>>> +#define IOMMU_EVENT_ILLEGAL_DEVTAB_ENTRY  (0x1U << 12)
>>> +#define IOMMU_EVENT_IOPF                  (0x2U << 12)
>>> +#define   IOMMU_EVENT_IOPF_I              (1U << 3)
>>> +#define   IOMMU_EVENT_IOPF_PR             (1U << 4)
>>> +#define   IOMMU_EVENT_IOPF_RW             (1U << 5)
>>> +#define   IOMMU_EVENT_IOPF_PE             (1U << 6)
>>> +#define   IOMMU_EVENT_IOPF_RZ             (1U << 7)
>>> +#define   IOMMU_EVENT_IOPF_TR             (1U << 8)
>>> +#define IOMMU_EVENT_DEV_TAB_HW_ERROR      (0x3U << 12)
>>> +#define IOMMU_EVENT_PAGE_TAB_HW_ERROR     (0x4U << 12)
>>> +#define IOMMU_EVENT_ILLEGAL_COMMAND_ERROR (0x5U << 12)
>>> +#define IOMMU_EVENT_COMMAND_HW_ERROR      (0x6U << 12)
>>> +#define IOMMU_EVENT_IOTLB_INV_TIMEOUT     (0x7U << 12)
>>> +#define IOMMU_EVENT_INVALID_DEV_REQUEST   (0x8U << 12)
>>> +
>>> +#define IOMMU_EVENT_LEN                   16
>>> +#define IOMMU_PERM_READ             (1 << 0)
>>> +#define IOMMU_PERM_WRITE            (1 << 1)
>>> +#define IOMMU_PERM_RW               (IOMMU_PERM_READ | IOMMU_PERM_WRITE)
>>> +
>>> +/* AMD RD890 Chipset */
>>> +#define PCI_DEVICE_ID_RD890_IOMMU   0x20


>>
>>
>> We keep the pci ids in include/hw/pci/pci_ids.h

This a dummy device id I use for IOMMU - IOMMU doesn't have a specific
device id. There's a device id on linux include files for a certain
AMD IOMMU but it makes IOMMU seem to be on a non-existant bus so I
don't use it.

>>
>>> +
>>> +#define IOMMU_FEATURE_PREFETCH            (1ULL << 0)
>>> +#define IOMMU_FEATURE_PPR                 (1ULL << 1)
>>> +#define IOMMU_FEATURE_NX                  (1ULL << 3)
>>> +#define IOMMU_FEATURE_GT                  (1ULL << 4)
>>> +#define IOMMU_FEATURE_IA                  (1ULL << 6)
>>> +#define IOMMU_FEATURE_GA                  (1ULL << 7)
>>> +#define IOMMU_FEATURE_HE                  (1ULL << 8)
>>> +#define IOMMU_FEATURE_PC                  (1ULL << 9)
>>> +
>>> +/* reserved DTE bits */
>>> +#define IOMMU_DTE_LOWER_QUAD_RESERVED  0x80300000000000fc
>>> +#define IOMMU_DTE_MIDDLE_QUAD_RESERVED 0x0000000000000100
>>> +#define IOMMU_DTE_UPPER_QUAD_RESERVED  0x08f0000000000000
>>> +
>>> +/* IOMMU paging mode */
>>> +#define IOMMU_GATS_MODE                 (6ULL <<  12)
>>> +#define IOMMU_HATS_MODE                 (6ULL <<  10)
>>> +
>>> +/* PCI SIG constants */
>>> +#define PCI_BUS_MAX 256
>>> +#define PCI_SLOT_MAX 32
>>> +#define PCI_FUNC_MAX 8
>>> +#define PCI_DEVFN_MAX 256
>>
>>
>> Maybe we can move the PCI macros to include/hw/pci/pci.h, those are not
>> IOMMU specific.

Yeah, this are PCI macros but they are a not copied from linux while
the macros in pci.h seem to have been copied from linux.

>>
>>> +
>>> +/* IOTLB */
>>> +#define IOMMU_IOTLB_MAX_SIZE 1024
>>> +#define IOMMU_DEVID_SHIFT    36
>>> +
>>> +/* extended feature support */
>>> +#define IOMMU_EXT_FEATURES (IOMMU_FEATURE_PREFETCH | IOMMU_FEATURE_PPR |
>>> \
>>> +        IOMMU_FEATURE_NX | IOMMU_FEATURE_IA | IOMMU_FEATURE_GT | \
>>> +        IOMMU_FEATURE_GA | IOMMU_FEATURE_HE | IOMMU_GATS_MODE | \
>>> +        IOMMU_HATS_MODE)
>>> +
>>> +/* capabilities header */
>>> +#define IOMMU_CAPAB_FEATURES (IOMMU_CAPAB_FLAT_EXT | \
>>> +        IOMMU_CAPAB_FLAG_NPCACHE | IOMMU_CAPAB_FLAG_IOTLBSUP \
>>> +        | IOMMU_CAPAB_ID_SEC | IOMMU_CAPAB_INIT_TYPE | \
>>> +        IOMMU_CAPAB_FLAG_HTTUNNEL |  IOMMU_CAPAB_EFR_SUP)
>>> +
>>> +/* command constants */
>>> +#define IOMMU_COM_STORE_ADDRESS_MASK 0xffffffffffff8
>>> +#define IOMMU_COM_COMPLETION_STORE_MASK 0x1
>>> +#define IOMMU_COM_COMPLETION_INTR 0x2
>>> +#define IOMMU_COM_COMPLETION_DATA_OFF 0x8
>>> +#define IOMMU_COMMAND_SIZE 0x10
>>> +
>>> +/* IOMMU default address */
>>> +#define IOMMU_BASE_ADDR 0xfed80000
>>> +
>>> +/* page management constants */
>>> +#define IOMMU_PAGE_SHIFT 12
>>> +#define IOMMU_PAGE_SIZE  (1ULL << IOMMU_PAGE_SHIFT)
>>> +
>>> +#define IOMMU_PAGE_SHIFT_4K 12
>>> +#define IOMMU_PAGE_MASK_4K  (~((1ULL << IOMMU_PAGE_SHIFT_4K) - 1))
>>> +#define IOMMU_PAGE_SHIFT_2M 21
>>> +#define IOMMU_PAGE_MASK_2M  (~((1ULL << IOMMU_PAGE_SHIFT_2M) - 1))
>>> +#define IOMMU_PAGE_SHIFT_1G 30
>>> +#define IOMMU_PAGE_MASK_1G (~((1ULL << IOMMU_PAGE_SHIFT_1G) - 1))
>>> +
>>> +#define IOMMU_MAX_VA_ADDR          (48UL << 5)
>>> +#define IOMMU_MAX_PH_ADDR          (40UL << 8)
>>> +#define IOMMU_MAX_GVA_ADDR         (48UL << 15)
>>> +
>>> +/* invalidation command device id */
>>> +#define IOMMU_INVAL_DEV_ID_SHIFT  32
>>> +#define IOMMU_INVAL_DEV_ID_MASK   (~((1UL << IOMMU_INVAL_DEV_ID_SHIFT) -
>>> 1))
David Kiarie March 2, 2016, 4:08 a.m. UTC | #4
On Wed, Mar 2, 2016 at 7:00 AM, David Kiarie <davidkiarie4@gmail.com> wrote:
> On Fri, Feb 26, 2016 at 9:23 AM, David Kiarie <davidkiarie4@gmail.com> wrote:
>> On Thu, Feb 25, 2016 at 6:43 PM, Marcel Apfelbaum <marcel@redhat.com> wrote:
>>> On 02/21/2016 08:10 PM, David Kiarie wrote:
>>>>
>>>> Add AMD IOMMU emulaton to Qemu in addition to Intel IOMMU
>>>> The IOMMU does basic translation, error checking and has a
>>>> mininal IOTLB implementation
>>>
>>>
>>> Hi,
>>>
>>>>
>>>> Signed-off-by: David Kiarie <davidkiarie4@gmail.com>
>>>> ---
>>>>   hw/i386/Makefile.objs |    1 +
>>>>   hw/i386/amd_iommu.c   | 1432
>>>> +++++++++++++++++++++++++++++++++++++++++++++++++
>>>>   hw/i386/amd_iommu.h   |  395 ++++++++++++++
>>>>   include/hw/pci/pci.h  |    2 +
>>>>   4 files changed, 1830 insertions(+)
>>>>   create mode 100644 hw/i386/amd_iommu.c
>>>>   create mode 100644 hw/i386/amd_iommu.h
>>>>
>>>> diff --git a/hw/i386/Makefile.objs b/hw/i386/Makefile.objs
>>>> index b52d5b8..2f1a265 100644
>>>> --- a/hw/i386/Makefile.objs
>>>> +++ b/hw/i386/Makefile.objs
>>>> @@ -3,6 +3,7 @@ obj-y += multiboot.o
>>>>   obj-y += pc.o pc_piix.o pc_q35.o
>>>>   obj-y += pc_sysfw.o
>>>>   obj-y += intel_iommu.o
>>>> +obj-y += amd_iommu.o
>>>>   obj-$(CONFIG_XEN) += ../xenpv/ xen/
>>>>
>>>>   obj-y += kvmvapic.o
>>>> diff --git a/hw/i386/amd_iommu.c b/hw/i386/amd_iommu.c
>>>> new file mode 100644
>>>> index 0000000..3dac043
>>>> --- /dev/null
>>>> +++ b/hw/i386/amd_iommu.c
>>>> @@ -0,0 +1,1432 @@
>>>> +/*
>>>> + * QEMU emulation of AMD IOMMU (AMD-Vi)
>>>> + *
>>>> + * Copyright (C) 2011 Eduard - Gabriel Munteanu
>>>> + * Copyright (C) 2015 David Kiarie, <davidkiarie4@gmail.com>
>>>> + *
>>>> + * This program is free software; you can redistribute it and/or modify
>>>> + * it under the terms of the GNU General Public License as published by
>>>> + * the Free Software Foundation; either version 2 of the License, or
>>>> + * (at your option) any later version.
>>>> +
>>>> + * This program is distributed in the hope that it will be useful,
>>>> + * but WITHOUT ANY WARRANTY; without even the implied warranty of
>>>> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
>>>> + * GNU General Public License for more details.
>>>> +
>>>> + * You should have received a copy of the GNU General Public License
>>>> along
>>>> + * with this program; if not, see <http://www.gnu.org/licenses/>.
>>>> + *
>>>> + * Cache implementation inspired by hw/i386/intel_iommu.c
>>>> + *
>>>> + */
>>>> +#include "hw/i386/amd_iommu.h"
>>>> +
>>>> +/*#define DEBUG_AMD_IOMMU*/
>>>> +#ifdef DEBUG_AMD_IOMMU
>>>> +enum {
>>>> +    DEBUG_GENERAL, DEBUG_CAPAB, DEBUG_MMIO, DEBUG_ELOG,
>>>> +    DEBUG_CACHE, DEBUG_COMMAND, DEBUG_MMU
>>>> +};
>>>> +
>>>> +#define IOMMU_DBGBIT(x)   (1 << DEBUG_##x)
>>>> +static int iommu_dbgflags = IOMMU_DBGBIT(MMIO);
>>>> +
>>>> +#define IOMMU_DPRINTF(what, fmt, ...) do { \
>>>> +    if (iommu_dbgflags & IOMMU_DBGBIT(what)) { \
>>>> +        fprintf(stderr, "(amd-iommu)%s: " fmt "\n", __func__, \
>>>> +                ## __VA_ARGS__); } \
>>>> +    } while (0)
>>>> +#else
>>>> +#define IOMMU_DPRINTF(what, fmt, ...) do {} while (0)
>>>> +#endif
>>>> +
>>>> +typedef struct AMDIOMMUAddressSpace {
>>>> +    uint8_t bus_num;            /* bus number
>>>> */
>>>> +    uint8_t devfn;              /* device function
>>>> */
>>>> +    AMDIOMMUState *iommu_state; /* IOMMU - one per machine
>>>> */
>>>> +    MemoryRegion iommu;         /* Device's iommu region
>>>> */
>>>> +    AddressSpace as;            /* device's corresponding address space
>>>> */
>>>> +} AMDIOMMUAddressSpace;
>>>> +
>>>> +/* IOMMU cache entry */
>>>> +typedef struct IOMMUIOTLBEntry {
>>>> +    uint64_t gfn;
>>>> +    uint16_t domid;
>>>> +    uint64_t devid;
>>>> +    uint64_t perms;
>>>> +    uint64_t translated_addr;
>>>> +} IOMMUIOTLBEntry;
>>>> +
>>>> +/* configure MMIO registers at startup/reset */
>>>> +static void amd_iommu_set_quad(AMDIOMMUState *s, hwaddr addr, uint64_t
>>>> val,
>>>> +                               uint64_t romask, uint64_t w1cmask)
>>>> +{
>>>> +    stq_le_p(&s->mmior[addr], val);
>>>> +    stq_le_p(&s->romask[addr], romask);
>>>> +    stq_le_p(&s->w1cmask[addr], w1cmask);
>>>> +}
>>>> +
>>>> +static uint16_t amd_iommu_readw(AMDIOMMUState *s, hwaddr addr)
>>>> +{
>>>> +    return lduw_le_p(&s->mmior[addr]);
>>>> +}
>>>> +
>>>> +static uint32_t amd_iommu_readl(AMDIOMMUState *s, hwaddr addr)
>>>> +{
>>>> +    return ldl_le_p(&s->mmior[addr]);
>>>> +}
>>>> +
>>>> +static uint64_t amd_iommu_readq(AMDIOMMUState *s, hwaddr addr)
>>>> +{
>>>> +    return ldq_le_p(&s->mmior[addr]);
>>>> +}
>>>> +
>>>> +/* internal write */
>>>> +static void amd_iommu_writeq_raw(AMDIOMMUState *s, uint64_t val, hwaddr
>>>> addr)
>>>> +{
>>>> +    stq_le_p(&s->mmior[addr], val);
>>>> +}
>>>> +
>>>> +/* external write */
>>>> +static void amd_iommu_writew(AMDIOMMUState *s, hwaddr addr, uint16_t val)
>>>> +{
>>>> +    uint16_t romask = lduw_le_p(&s->romask[addr]);
>>>> +    uint16_t w1cmask = lduw_le_p(&s->w1cmask[addr]);
>>>> +    uint16_t oldval = lduw_le_p(&s->mmior[addr]);
>>>> +    stw_le_p(&s->mmior[addr], (val & ~(val & w1cmask)) | (romask &
>>>> oldval));
>>>> +}
>>>> +
>>>> +static void amd_iommu_writel(AMDIOMMUState *s, hwaddr addr, uint32_t val)
>>>> +{
>>>> +    uint32_t romask = ldl_le_p(&s->romask[addr]);
>>>> +    uint32_t w1cmask = ldl_le_p(&s->w1cmask[addr]);
>>>> +    uint32_t oldval = ldl_le_p(&s->mmior[addr]);
>>>> +    stl_le_p(&s->mmior[addr], (val & ~(val & w1cmask)) | (romask &
>>>> oldval));
>>>> +}
>>>> +
>>>> +static void amd_iommu_writeq(AMDIOMMUState *s, hwaddr addr, uint64_t val)
>>>> +{
>>>> +    uint64_t romask = ldq_le_p(&s->romask[addr]);
>>>> +    uint64_t w1cmask = ldq_le_p(&s->w1cmask[addr]);
>>>> +    uint32_t oldval = ldq_le_p(&s->mmior[addr]);
>>>> +    stq_le_p(&s->mmior[addr], (val & ~(val & w1cmask)) | (romask &
>>>> oldval));
>>>> +}
>>>> +
>>>> +static void amd_iommu_log_event(AMDIOMMUState *s, uint16_t *evt)
>>>> +{
>>>> +    /* event logging not enabled */
>>>> +    if (!s->evtlog_enabled || *(uint64_t *)&s->mmior[IOMMU_MMIO_STATUS] |
>>>> +        IOMMU_MMIO_STATUS_EVT_OVF) {
>>>> +        return;
>>>> +    }
>>>> +
>>>> +    /* event log buffer full */
>>>> +    if (s->evtlog_tail >= s->evtlog_len) {
>>>> +        *(uint64_t *)&s->mmior[IOMMU_MMIO_STATUS] |=
>>>> IOMMU_MMIO_STATUS_EVT_OVF;
>>>> +        /* generate interrupt */
>>>> +        msi_notify(&s->dev, 0);
>>>> +    }
>>>> +
>>>> +    if (dma_memory_write(&address_space_memory, s->evtlog_len +
>>>> s->evtlog_tail,
>>>> +       &evt, IOMMU_EVENT_LEN)) {
>>>> +        IOMMU_DPRINTF(ELOG, "error: fail to write at address 0x%"PRIx64
>>>> +                      " + offset 0x%"PRIx32, s->evtlog, s->evtlog_tail);
>>>> +    }
>>>> +
>>>> +     s->evtlog_tail += IOMMU_EVENT_LEN;
>>>> +     *(uint64_t *)&s->mmior[IOMMU_MMIO_STATUS] |=
>>>> IOMMU_MMIO_STATUS_COMP_INT;
>>>> +}
>>>> +
>>>> +/* log an error encountered page-walking
>>>> + *
>>>> + * @addr: virtual address in translation request
>>>> + */
>>>> +static void amd_iommu_page_fault(AMDIOMMUState *s, uint16_t devid,
>>>> +                                 dma_addr_t addr, uint16_t info)
>>>> +{
>>>> +    IOMMU_DPRINTF(ELOG, "");
>>>> +
>>>> +    uint16_t evt[8];
>>>> +
>>>> +    info |= IOMMU_EVENT_IOPF_I;
>>>> +
>>>> +    /* encode information */
>>>> +    *(uint16_t *)&evt[0] = devid;
>>>> +    *(uint16_t *)&evt[3] = info;
>>>> +    *(uint64_t *)&evt[4] = cpu_to_le64(addr);
>>>> +
>>>> +    /* log a page fault */
>>>> +    amd_iommu_log_event(s, evt);
>>>> +
>>>> +    /* Abort the translation */
>>>> +    pci_word_test_and_set_mask(s->dev.config + PCI_STATUS,
>>>> +            PCI_STATUS_SIG_TARGET_ABORT);
>>>> +}
>>>> +/*
>>>> + * log a master abort accessing device table
>>>> + *  @devtab : address of device table entry
>>>> + *  @info : error flags
>>>> + */
>>>> +static void amd_iommu_log_devtab_error(AMDIOMMUState *s, uint16_t devid,
>>>> +                                       dma_addr_t devtab, uint16_t info)
>>>> +{
>>>> +
>>>> +    IOMMU_DPRINTF(ELOG, "");
>>>> +
>>>> +    uint16_t evt[8];
>>>> +
>>>> +    info |= IOMMU_EVENT_DEV_TAB_HW_ERROR;
>>>> +
>>>> +    /* encode information */
>>>> +    *(uint16_t *)&evt[0] = devid;
>>>> +    *(uint8_t *)&evt[3]  = info;
>>>> +    *(uint64_t *)&evt[4] = cpu_to_le64(devtab);
>>>> +
>>>> +    amd_iommu_log_event(s, evt);
>>>> +
>>>> +    /* Abort the translation */
>>>> +    pci_word_test_and_set_mask(s->dev.config + PCI_STATUS,
>>>> +            PCI_STATUS_SIG_TARGET_ABORT);
>>>> +
>>>> +}
>>>> +
>>>> +/* log a master abort encountered during a page-walk
>>>> + *  @addr : address that couldn't be accessed
>>>> + */
>>>> +static void amd_iommu_log_pagetab_error(AMDIOMMUState *s, uint16_t devid,
>>>> +                                        dma_addr_t addr, uint16_t info)
>>>> +{
>>>> +    IOMMU_DPRINTF(ELOG, "");
>>>> +
>>>> +    uint16_t evt[8];
>>>> +
>>>> +    info |= IOMMU_EVENT_PAGE_TAB_HW_ERROR;
>>>> +
>>>> +    /* encode information */
>>>> +    *(uint16_t *)&evt[0] = devid;
>>>> +    *(uint8_t *)&evt[3]  = info;
>>>> +    *(uint64_t *)&evt[4] = (cpu_to_le64(addr) >> 3);
>>>> +
>>>> +    amd_iommu_log_event(s, evt);
>>>> +
>>>> +    /* Abort the translation */
>>>> +    pci_word_test_and_set_mask(s->dev.config + PCI_STATUS,
>>>> +            PCI_STATUS_SIG_TARGET_ABORT);
>>>> +
>>>> +}
>>>> +
>>>> +/* log an event trying to access command buffer
>>>> + *   @addr : address that couldn't be accessed
>>>> + */
>>>> +static void amd_iommu_log_command_error(AMDIOMMUState *s, dma_addr_t
>>>> addr)
>>>> +{
>>>> +    IOMMU_DPRINTF(ELOG, "");
>>>> +
>>>> +    uint16_t evt[8];
>>>> +
>>>> +    /* encode information */
>>>> +    *(uint8_t *)&evt[3]  = (uint8_t)IOMMU_EVENT_COMMAND_HW_ERROR;
>>>> +    *(uint64_t *)&evt[4] = (cpu_to_le64(addr) >> 3);
>>>> +
>>>> +    amd_iommu_log_event(s, evt);
>>>> +
>>>> +    /* Abort the translation */
>>>> +    pci_word_test_and_set_mask(s->dev.config + PCI_STATUS,
>>>> +            PCI_STATUS_SIG_TARGET_ABORT);
>>>> +}
>>>> +
>>>> +/* log an illegal comand event
>>>> + *   @addr : address of illegal command
>>>> + */
>>>> +static void amd_iommu_log_illegalcom_error(AMDIOMMUState *s, uint16_t
>>>> info,
>>>> +                                           dma_addr_t addr)
>>>> +{
>>>> +    IOMMU_DPRINTF(ELOG, "");
>>>> +
>>>> +    uint16_t evt[8];
>>>> +
>>>> +    /* encode information */
>>>> +    *(uint8_t *)&evt[3]  = (uint8_t)IOMMU_EVENT_ILLEGAL_COMMAND_ERROR;
>>>> +    *(uint64_t *)&evt[4] = (cpu_to_le64(addr) >> 3);
>>>
>>>
>>> Can you please use a macro instead of 3 literal?
>>>
>>>> +
>>>> +    amd_iommu_log_event(s, evt);
>>>> +}
>>>> +
>>>> +/* log an error accessing device table
>>>> + *
>>>> + *  @devid : device owning the table entry
>>>> + *  @devtab : address of device table entry
>>>> + *  @info : error flags
>>>> + */
>>>> +static void amd_iommu_log_illegaldevtab_error(AMDIOMMUState *s, uint16_t
>>>> devid,
>>>> +                                              dma_addr_t addr, uint16_t
>>>> info)
>>>> +{
>>>> +    IOMMU_DPRINTF(ELOG, "");
>>>> +
>>>> +    uint16_t evt[8];
>>>> +
>>>> +    info |= IOMMU_EVENT_ILLEGAL_DEVTAB_ENTRY;
>>>> +
>>>> +    *(uint16_t *)&evt[0] = devid;
>>>> +    *(uint8_t *)&evt[3]  = info;
>>>> +    *(uint64_t *)&evt[4] = (cpu_to_le64(addr) >> 3);
>>>> +
>>>> +    amd_iommu_log_event(s, evt);
>>>> +}
>>>
>>>
>>> It seems that the all log functions do the same:
>>> create an event, log it and optionally set PCI_STATUS_SIG_TARGET_ABORT
>>>
>>> I would consider to unite them in the same function. (not a must)
>
> I would prefer to leave the event code as separate but I could
> probably add a macro. Currently we are logging just a lot less
> information that we should be logging and with the logging of more
> information it could become a bit ugly.
>
>>>
>>>> +
>>>> +static gboolean amd_iommu_uint64_equal(gconstpointer v1, gconstpointer
>>>> v2)
>>>> +{
>>>> +    return *((const uint64_t *)v1) == *((const uint64_t *)v2);
>>>> +}
>>>> +
>>>> +static guint amd_iommu_uint64_hash(gconstpointer v)
>>>> +{
>>>> +    return (guint)*(const uint64_t *)v;
>>>> +}
>>>> +
>>>> +static IOMMUIOTLBEntry *amd_iommu_iotlb_lookup(AMDIOMMUState *s, hwaddr
>>>> addr,
>>>> +                                               uint64_t devid)
>>>> +{
>>>> +    uint64_t key = (addr >> IOMMU_PAGE_SHIFT_4K) |
>>>> +                   ((uint64_t)(devid) << IOMMU_DEVID_SHIFT);
>>>> +    return g_hash_table_lookup(s->iotlb, &key);
>>>> +}
>>>> +
>>>> +static void amd_iommu_iotlb_reset(AMDIOMMUState *s)
>>>> +{
>>>> +    assert(s->iotlb);
>>>> +    g_hash_table_remove_all(s->iotlb);
>>>> +}
>>>> +
>>>> +static gboolean amd_iommu_iotlb_remove_by_devid(gpointer key, gpointer
>>>> value,
>>>> +                                                gpointer user_data)
>>>> +{
>>>> +    IOMMUIOTLBEntry *entry = (IOMMUIOTLBEntry *)value;
>>>> +    uint16_t devid = *(uint16_t *)user_data;
>>>> +    return entry->devid == devid;
>>>> +}
>>>> +
>>>> +static void amd_iommu_iotlb_remove_page(AMDIOMMUState *s, hwaddr addr,
>>>> +                                        uint64_t devid)
>>>> +{
>>>> +    uint64_t key = (addr >> IOMMU_PAGE_SHIFT_4K) |
>>>> +                   ((uint64_t)(devid) << IOMMU_DEVID_SHIFT);
>>>> +    g_hash_table_remove(s->iotlb, &key);
>>>> +}
>>>> +
>>>> +/* extract device id */
>>>> +static inline uint16_t devid_extract(uint8_t *cmd)
>>>> +{
>>>> +    return (uint16_t)cmd[2] & IOMMU_INVAL_DEV_ID_MASK;
>>>> +}
>>>> +
>>>> +static void amd_iommu_invalidate_iotlb(AMDIOMMUState *s, uint64_t *cmd)
>>>> +{
>>>> +    uint16_t devid = devid_extract((uint8_t *)cmd);
>>>> +    /* if invalidation of more than one page requested */
>>>> +    if (IOMMU_INVAL_ALL(cmd[0])) {
>>>> +        g_hash_table_foreach_remove(s->iotlb,
>>>> amd_iommu_iotlb_remove_by_devid,
>>>> +                                    &devid);
>>>> +    } else {
>>>> +        hwaddr addr = (hwaddr)(cmd[1] & IOMMU_INVAL_ADDR_MASK);
>>>> +        amd_iommu_iotlb_remove_page(s, addr, devid);
>>>> +    }
>>>> +}
>>>> +
>>>> +static void amd_iommu_update_iotlb(AMDIOMMUState *s, uint16_t devid,
>>>> +                                   uint64_t gpa, uint64_t spa, uint64_t
>>>> perms,
>>>> +                                   uint16_t domid)
>>>> +{
>>>> +    IOMMUIOTLBEntry *entry = g_malloc(sizeof(*entry));
>>>> +    uint64_t *key = g_malloc(sizeof(key));
>>>> +    uint64_t gfn = gpa >> IOMMU_PAGE_SHIFT_4K;
>>>> +
>>>> +    IOMMU_DPRINTF(CACHE, " update iotlb devid: %02x:%02x.%x gpa
>>>> 0x%"PRIx64
>>>> +                  " hpa 0x%"PRIx64, PCI_BUS_NUM(devid), PCI_SLOT(devid),
>>>> +                  PCI_FUNC(devid), gpa, spa);
>>>> +
>>>> +    if (g_hash_table_size(s->iotlb) >= IOMMU_IOTLB_MAX_SIZE) {
>>>> +        IOMMU_DPRINTF(CACHE, "iotlb exceeds size limit - reset");
>>>> +        amd_iommu_iotlb_reset(s);
>>>> +    }
>>>> +
>>>> +    entry->gfn = gfn;
>>>> +    entry->domid = domid;
>>>> +    entry->perms = perms;
>>>> +    entry->translated_addr = spa;
>>>> +    *key = gfn | ((uint64_t)(devid) << IOMMU_DEVID_SHIFT);
>>>> +    g_hash_table_replace(s->iotlb, key, entry);
>>>> +}
>>>> +
>>>> +/* execute a completion wait command */
>>>> +static void amd_iommu_completion_wait(AMDIOMMUState *s, uint8_t *cmd)
>>>> +{
>>>> +    IOMMU_DPRINTF(COMMAND, "");
>>>> +    unsigned int addr;
>>>> +
>>>> +    /* completion store */
>>>> +    if (cmd[0] & IOMMU_COM_COMPLETION_STORE_MASK) {
>>>> +        addr = le64_to_cpu(*(uint64_t *)cmd) &
>>>> IOMMU_COM_STORE_ADDRESS_MASK;
>>>> +        if (dma_memory_write(&address_space_memory, addr, cmd + 8, 8)) {
>>>> +            IOMMU_DPRINTF(ELOG, "error: fail to write at address
>>>> 0%x"PRIx64,
>>>> +                          addr);
>>>> +        }
>>>> +    }
>>>> +
>>>> +    /* set completion interrupt */
>>>> +    if (cmd[0] & IOMMU_COM_COMPLETION_INTR) {
>>>> +        s->mmior[IOMMU_MMIO_STATUS] |= IOMMU_MMIO_STATUS_COMP_INT;
>>>> +    }
>>>> +}
>>>> +
>>>> +/* get command type */
>>>> +static uint8_t opcode(uint8_t *cmd)
>>>> +{
>>>> +    return cmd[IOMMU_CMDBUF_ID_BYTE] >> IOMMU_CMDBUF_ID_RSHIFT;
>>>> +}
>>>> +
>>>> +/* linux seems to be using reserved bits so I just log without abortig
>>>> bug */
>>>
>>>
>>> I couldn't quite understand the comment
>>>
>>>> +static void iommu_inval_devtab_entry(AMDIOMMUState *s, uint8_t *cmd,
>>>> +                                     uint8_t type)
>>>> +{
>>>> +    IOMMU_DPRINTF(COMMAND, "");
>>>> +
>>>> +    /* This command should invalidate internal caches of which there
>>>> isn't */
>>>> +    if (*(uint64_t *)&cmd[0] & IOMMU_CMD_INVAL_DEV_RSVD ||
>>>> +            *(uint64_t *)&cmd[1]) {
>>>> +        amd_iommu_log_illegalcom_error(s, type, s->cmdbuf +
>>>> s->cmdbuf_head);
>>>> +    }
>>>> +#ifdef DEBUG_AMD_IOMMU
>>>> +    uint16_t devid = devid_extract(cmd);
>>>> +#endif
>>>> +    IOMMU_DPRINTF(COMMAND, "device table entry for devid: %02x:%02x.%x"
>>>> +                  "invalidated", PCI_BUS_NUM(devid), PCI_SLOT(devid),
>>>> +                  PCI_FUNC(devid));
>>>> +}
>>>> +
>>>> +static void iommu_completion_wait(AMDIOMMUState *s, uint8_t *cmd, uint8_t
>>>> type)
>>>> +{
>>>> +    IOMMU_DPRINTF(COMMAND, "");
>>>> +
>>>> +    if (*(uint32_t *)&cmd[1] & IOMMU_COMPLETION_WAIT_RSVD) {
>>>> +        amd_iommu_log_illegalcom_error(s, type, s->cmdbuf +
>>>> s->cmdbuf_head);
>>>> +    }
>>>> +    /* pretend to wait for command execution to complete */
>>>> +    IOMMU_DPRINTF(COMMAND, "completion wait requested with store address
>>>> 0x%"
>>>> +                  PRIx64 " and store data 0x%"PRIx64, (cmd[0] &
>>>> +                  IOMMU_COM_STORE_ADDRESS_MASK), *(uint64_t *)(cmd + 8));
>>>> +    amd_iommu_completion_wait(s, cmd);
>>>> +}
>>>> +
>>>> +static void iommu_complete_ppr(AMDIOMMUState *s, uint8_t *cmd, uint8_t
>>>> type)
>>>> +{
>>>> +    IOMMU_DPRINTF(COMMAND, "");
>>>> +
>>>> +    if ((*(uint64_t *)&cmd[0] & IOMMU_COMPLETE_PPR_RQ_RSVD) ||
>>>> +       *(uint64_t *)&cmd[1] & 0xffff000000000000) {
>>>
>>>
>>>
>>> Can you please document this mask?
>>>
>>>> +        amd_iommu_log_illegalcom_error(s, type, s->cmdbuf +
>>>> s->cmdbuf_head);
>>>> +    }
>>>> +
>>>> +    IOMMU_DPRINTF(COMMAND, "Execution of PPR queue requested");
>>>> +}
>>>> +
>>>> +static void iommu_inval_all(AMDIOMMUState *s, uint8_t *cmd, uint8_t type)
>>>> +{
>>>> +    IOMMU_DPRINTF(COMMAND, "");
>>>> +
>>>> +    if ((*(uint64_t *)&cmd[0] & IOMMU_INVAL_IOMMU_ALL_RSVD) ||
>>>> +       *(uint64_t *)&cmd[1]) {
>>>> +        amd_iommu_log_illegalcom_error(s, type, s->cmdbuf +
>>>> s->cmdbuf_head);
>>>> +    }
>>>> +
>>>> +    amd_iommu_iotlb_reset(s);
>>>> +    IOMMU_DPRINTF(COMMAND, "Invalidation of all IOMMU cache requested");
>>>> +}
>>>> +
>>>> +static inline uint16_t domid_extract(uint64_t *cmd)
>>>> +{
>>>> +    return (uint16_t)cmd[0] & IOMMU_INVAL_PAGES_DOMID;
>>>> +}
>>>> +
>>>> +static gboolean amd_iommu_iotlb_remove_by_domid(gpointer key, gpointer
>>>> value,
>>>> +                                                gpointer user_data)
>>>> +{
>>>> +    IOMMUIOTLBEntry *entry = (IOMMUIOTLBEntry *)value;
>>>> +    uint16_t domid = *(uint16_t *)user_data;
>>>> +    return entry->domid == domid;
>>>> +}
>>>> +
>>>> +/* we don't have devid - we can't remove pages by address */
>>>> +static void iommu_inval_pages(AMDIOMMUState *s, uint8_t *cmd, uint8_t
>>>> type)
>>>> +{
>>>> +    IOMMU_DPRINTF(COMMAND, "");
>>>> +    uint16_t domid = domid_extract((uint64_t *)cmd);
>>>> +
>>>> +    if (*(uint64_t *)&cmd[0] & IOMMU_INVAL_IOMMU_PAGES_RSVD ||
>>>> +       *(uint32_t *)&cmd[1] & 0x00000ff0) {
>>>> +        amd_iommu_log_illegalcom_error(s, type, s->cmdbuf +
>>>> s->cmdbuf_head);
>>>> +    }
>>>> +
>>>> +    g_hash_table_foreach_remove(s->iotlb,
>>>> amd_iommu_iotlb_remove_by_domid,
>>>> +                                &domid);
>>>> +
>>>> +    IOMMU_DPRINTF(COMMAND, "IOMMU pages for domain 0x%"PRIx16
>>>> "invalidated",
>>>> +                  domid);
>>>> +}
>>>> +
>>>> +static void iommu_prefetch_pages(AMDIOMMUState *s, uint8_t *cmd, uint8_t
>>>> type)
>>>> +{
>>>> +    IOMMU_DPRINTF(COMMAND, "");
>>>> +
>>>> +    if ((*(uint64_t *)&cmd[0] & IOMMU_PRF_IOMMU_PAGES_RSVD) ||
>>>> +       (*(uint32_t *)&cmd[1] & 0x00000fd4)) {
>>>
>>>
>>> Here the same, maybe you can name the mask, so we can easier follow the
>>> spec.
>>>
>>>> +        amd_iommu_log_illegalcom_error(s, type, s->cmdbuf +
>>>> s->cmdbuf_head);
>>>> +    }
>>>> +
>>>> +    IOMMU_DPRINTF(COMMAND, "Pre-fetch of IOMMU pages requested");
>>>> +}
>>>> +
>>>> +static void iommu_inval_inttable(AMDIOMMUState *s, uint8_t *cmd, uint8_t
>>>> type)
>>>> +{
>>>> +    IOMMU_DPRINTF(COMMAND, "");
>>>> +
>>>> +    if ((*(uint64_t *)&cmd[0] & IOMMU_INVAL_INTR_TABLE_RSVD) ||
>>>> +       *(uint64_t *)&cmd[1]) {
>>>> +        amd_iommu_log_illegalcom_error(s, type, s->cmdbuf +
>>>> s->cmdbuf_head);
>>>> +        return;
>>>> +    }
>>>> +
>>>> +    IOMMU_DPRINTF(COMMAND, "interrupt table invalidated");
>>>> +}
>>>> +
>>>> +static void iommu_inval_iotlb(AMDIOMMUState *s, uint8_t *cmd, uint8_t
>>>> type)
>>>> +{
>>>> +    IOMMU_DPRINTF(COMMAND, "");
>>>> +
>>>> +    if (*(uint32_t *)&cmd[2] & IOMMU_INVAL_IOTLB_PAGES_RSVD) {
>>>> +        amd_iommu_log_illegalcom_error(s, type, s->cmdbuf +
>>>> s->cmdbuf_head);
>>>> +        return;
>>>> +    }
>>>> +
>>>> +    amd_iommu_invalidate_iotlb(s, (uint64_t *)cmd);
>>>> +    IOMMU_DPRINTF(COMMAND, "IOTLB pages invalidated");
>>>> +}
>>>> +
>>>> +/* not honouring reserved bits is regarded as an illegal command */
>>>> +static void amd_iommu_cmdbuf_exec(AMDIOMMUState *s)
>>>> +{
>>>> +    IOMMU_DPRINTF(COMMAND, "");
>>>> +
>>>> +    uint8_t type;
>>>> +    uint8_t cmd[IOMMU_COMMAND_SIZE];
>>>> +
>>>> +    memset(cmd, 0, IOMMU_COMMAND_SIZE);
>>>> +
>>>> +    if (dma_memory_read(&address_space_memory, s->cmdbuf +
>>>> s->cmdbuf_head, cmd,
>>>> +       IOMMU_COMMAND_SIZE)) {
>>>> +        IOMMU_DPRINTF(COMMAND, "error: fail to access memory at
>>>> 0x%"PRIx64
>>>> +                      " + 0x%"PRIu8, s->cmdbuf, s->cmdbuf_head);
>>>> +        amd_iommu_log_command_error(s, s->cmdbuf + s->cmdbuf_head);
>>>> +        return;
>>>> +    }
>>>> +
>>>> +    type = opcode(cmd);
>>>> +
>>>> +    switch (type) {
>>>> +    case IOMMU_CMD_COMPLETION_WAIT:
>>>> +        iommu_completion_wait(s, cmd, type);
>>>> +        break;
>>>> +
>>>> +    case IOMMU_CMD_INVAL_DEVTAB_ENTRY:
>>>> +        iommu_inval_devtab_entry(s, cmd, type);
>>>> +        break;
>>>> +
>>>> +    case IOMMU_CMD_INVAL_IOMMU_PAGES:
>>>> +        iommu_inval_pages(s, cmd, type);
>>>> +        break;
>>>> +
>>>> +    case IOMMU_CMD_INVAL_IOTLB_PAGES:
>>>> +        iommu_inval_iotlb(s, cmd, type);
>>>> +        break;
>>>> +
>>>> +    case IOMMU_CMD_INVAL_INTR_TABLE:
>>>> +        iommu_inval_inttable(s, cmd, type);
>>>> +        break;
>>>> +
>>>> +    case IOMMU_CMD_PREFETCH_IOMMU_PAGES:
>>>> +        iommu_prefetch_pages(s, cmd, type);
>>>> +        break;
>>>> +
>>>> +    case IOMMU_CMD_COMPLETE_PPR_REQUEST:
>>>> +        iommu_complete_ppr(s, cmd, type);
>>>> +        break;
>>>> +
>>>> +    case IOMMU_CMD_INVAL_IOMMU_ALL:
>>>> +        iommu_inval_all(s, cmd, type);
>>>> +        break;
>>>> +
>>>> +    default:
>>>> +        IOMMU_DPRINTF(COMMAND, "unhandled command %d", type);
>>>> +        /* log illegal command */
>>>> +        amd_iommu_log_illegalcom_error(s, type,
>>>> +                                       s->cmdbuf + s->cmdbuf_head);
>>>> +        break;
>>>> +    }
>>>> +
>>>> +}
>>>> +
>>>> +static void amd_iommu_cmdbuf_run(AMDIOMMUState *s)
>>>> +{
>>>> +    IOMMU_DPRINTF(COMMAND, "");
>>>> +
>>>> +    uint64_t *mmio_cmdbuf_head = (uint64_t *)(s->mmior +
>>>> +                                 IOMMU_MMIO_COMMAND_HEAD);
>>>> +
>>>> +    if (!s->cmdbuf_enabled) {
>>>> +        IOMMU_DPRINTF(COMMAND, "error: IOMMU trying to execute commands
>>>> with "
>>>> +                      "command buffer disabled. IOMMU control value
>>>> 0x%"PRIx64,
>>>> +                      amd_iommu_readq(s, IOMMU_MMIO_CONTROL));
>>>> +        return;
>>>> +    }
>>>> +
>>>> +    while (s->cmdbuf_head != s->cmdbuf_tail) {
>>>> +        /* check if there is work to do. */
>>>> +        IOMMU_DPRINTF(COMMAND, "command buffer head at 0x%"PRIx32 "
>>>> command "
>>>> +                      "buffer tail at 0x%"PRIx32" command buffer base at
>>>> 0x%"
>>>> +                      PRIx64, s->cmdbuf_head, s->cmdbuf_tail, s->cmdbuf);
>>>> +         amd_iommu_cmdbuf_exec(s);
>>>> +         s->cmdbuf_head += IOMMU_COMMAND_SIZE;
>>>> +         amd_iommu_writeq_raw(s, s->cmdbuf_head,
>>>> IOMMU_MMIO_COMMAND_HEAD);
>>>> +
>>>> +        /* wrap head pointer */
>>>> +        if (s->cmdbuf_head >= s->cmdbuf_len * IOMMU_COMMAND_SIZE) {
>>>> +            s->cmdbuf_head = 0;
>>>> +        }
>>>> +    }
>>>> +
>>>> +    *mmio_cmdbuf_head = cpu_to_le64(s->cmdbuf_head);
>>>> +}
>>>> +
>>>> +/* System Software might never read from some of this fields but anyways
>>>> */
>>>> +static uint64_t amd_iommu_mmio_read(void *opaque, hwaddr addr, unsigned
>>>> size)
>>>> +{
>>>> +    AMDIOMMUState *s = opaque;
>>>> +
>>>> +    uint64_t val = -1;
>>>
>>>
>>> The above might work, but it looks a little weird
>>>
>>>> +    if (addr + size > IOMMU_MMIO_SIZE) {
>>>> +        IOMMU_DPRINTF(MMIO, "error: addr outside region: max 0x%"PRIX64
>>>> +                      ", got 0x%"PRIx64 " %d", (uint64_t)IOMMU_MMIO_SIZE,
>>>> addr,
>>>> +                      size);
>>>> +        return (uint64_t)-1;
>>>> +    }
>>>> +
>>>> +    if (size == 2) {
>>>> +        val = amd_iommu_readw(s, addr);
>>>> +    } else if (size == 4) {
>>>> +        val = amd_iommu_readl(s, addr);
>>>> +    } else if (size == 8) {
>>>> +        val = amd_iommu_readq(s, addr);
>>>> +    }
>>>> +
>>>> +    switch (addr & ~0x07) {
>>>> +    case IOMMU_MMIO_DEVICE_TABLE:
>>>> +        IOMMU_DPRINTF(MMIO, "MMIO_DEVICE_TABLE read addr 0x%"PRIx64
>>>> +                      ", size %d offset 0x%"PRIx64, addr, size,
>>>> +                       addr & ~0x07);
>>>> +        break;
>>>> +
>>>> +    case IOMMU_MMIO_COMMAND_BASE:
>>>> +        IOMMU_DPRINTF(MMIO, "MMIO_COMMAND_BASE read addr 0x%"PRIx64
>>>> +                      ", size %d offset 0x%"PRIx64, addr, size,
>>>> +                      addr & ~0x07);
>>>> +        break;
>>>> +
>>>> +    case IOMMU_MMIO_EVENT_BASE:
>>>> +        IOMMU_DPRINTF(MMIO, "MMIO_EVENT_BASE read addr 0x%"PRIx64
>>>> +                      ", size %d offset 0x%"PRIx64, addr, size,
>>>> +                      addr & ~0x07);
>>>> +        break;
>>>> +
>>>> +    case IOMMU_MMIO_CONTROL:
>>>> +        IOMMU_DPRINTF(MMIO, "MMIO_CONTROL read addr 0x%"PRIx64
>>>> +                      ", size %d offset 0x%"PRIx64, addr, size,
>>>> +                       addr & ~0x07);
>>>> +        break;
>>>> +
>>>> +    case IOMMU_MMIO_EXCL_BASE:
>>>> +        IOMMU_DPRINTF(MMIO, "MMIO_EXCL_BASE read addr 0x%"PRIx64
>>>> +                      ", size %d offset 0x%"PRIx64, addr, size,
>>>> +                      addr & ~0x07);
>>>> +        break;
>>>> +
>>>> +    case IOMMU_MMIO_EXCL_LIMIT:
>>>> +        IOMMU_DPRINTF(MMIO, "MMIO_EXCL_LIMIT read addr 0x%"PRIx64
>>>> +                      ", size %d offset 0x%"PRIx64, addr, size,
>>>> +                      addr & ~0x07);
>>>> +        break;
>>>> +
>>>> +    case IOMMU_MMIO_COMMAND_HEAD:
>>>> +        IOMMU_DPRINTF(MMIO, "MMIO_COMMAND_HEAD read addr 0x%"PRIx64
>>>> +                      ", size %d offset 0x%"PRIx64, addr, size,
>>>> +                      addr & ~0x07);
>>>> +        break;
>>>> +
>>>> +    case IOMMU_MMIO_COMMAND_TAIL:
>>>> +        IOMMU_DPRINTF(MMIO, "MMIO_COMMAND_TAIL read addr 0x%"PRIx64
>>>> +                      ", size %d offset 0x%"PRIx64, addr, size,
>>>> +                      addr & ~0x07);
>>>> +        break;
>>>> +
>>>> +    case IOMMU_MMIO_EVENT_HEAD:
>>>> +        IOMMU_DPRINTF(MMIO, "MMIO_EVENT_HEAD read addr 0x%"PRIx64
>>>> +                      ", size %d offset 0x%"PRIx64, addr, size,
>>>> +                      addr & ~0x07);
>>>> +        break;
>>>> +
>>>> +    case IOMMU_MMIO_EVENT_TAIL:
>>>> +        IOMMU_DPRINTF(MMIO, "MMIO_EVENT_TAIL read addr 0x%"PRIx64
>>>> +                      ", size %d offset 0x%"PRIx64, addr, size,
>>>> +                      addr & ~0x07);
>>>> +        break;
>>>> +
>>>> +    case IOMMU_MMIO_STATUS:
>>>> +        IOMMU_DPRINTF(MMIO, "MMIO_STATUS read addr 0x%"PRIx64
>>>> +                      ", size %d offset 0x%"PRIx64, addr, size,
>>>> +                      addr & ~0x07);
>>>> +        break;
>>>> +
>>>> +    case IOMMU_MMIO_EXT_FEATURES:
>>>> +        IOMMU_DPRINTF(MMU, "MMIO_EXT_FEATURES read addr 0x%"PRIx64
>>>> +                      ", size %d offset 0x%"PRIx64 "value 0x%"PRIx64,
>>>> +                      addr, size, addr & ~0x07, val);
>>>> +        break;
>>>> +
>>>> +    default:
>>>> +        IOMMU_DPRINTF(MMIO, "UNHANDLED MMIO read addr 0x%"PRIx64
>>>> +                      ", size %d offset 0x%"PRIx64, addr, size,
>>>> +                       addr & ~0x07);
>>>> +    }
>>>> +    return val;
>>>> +}
>>>> +
>>>> +static void iommu_handle_control_write(AMDIOMMUState *s)
>>>> +{
>>>> +    IOMMU_DPRINTF(COMMAND, "");
>>>> +    /*
>>>> +     * read whatever is already written in case
>>>> +     * software is writing in chucks less than 8 bytes
>>>> +     */
>>>> +    unsigned long control = amd_iommu_readq(s, IOMMU_MMIO_CONTROL);
>>>> +    s->enabled = !!(control & IOMMU_MMIO_CONTROL_IOMMUEN);
>>>> +
>>>> +    s->ats_enabled = !!(control & IOMMU_MMIO_CONTROL_HTTUNEN);
>>>> +    s->evtlog_enabled = s->enabled && !!(control &
>>>> +                        IOMMU_MMIO_CONTROL_EVENTLOGEN);
>>>> +
>>>> +    s->evtlog_intr = !!(control & IOMMU_MMIO_CONTROL_EVENTINTEN);
>>>> +    s->completion_wait_intr = !!(control &
>>>> IOMMU_MMIO_CONTROL_COMWAITINTEN);
>>>> +    s->cmdbuf_enabled = s->enabled && !!(control &
>>>> +                        IOMMU_MMIO_CONTROL_CMDBUFLEN);
>>>> +
>>>> +    /* update the flags depending on the control register */
>>>> +    if (s->cmdbuf_enabled) {
>>>> +        (*(uint64_t *)&s->mmior[IOMMU_MMIO_STATUS]) |=
>>>> +            IOMMU_MMIO_STATUS_CMDBUF_RUN;
>>>> +    } else {
>>>> +        (*(uint64_t *)&s->mmior[IOMMU_MMIO_STATUS]) &=
>>>> +            ~IOMMU_MMIO_STATUS_CMDBUF_RUN;
>>>> +    }
>>>> +    if (s->evtlog_enabled) {
>>>> +        (*(uint64_t *)&s->mmior[IOMMU_MMIO_STATUS]) |=
>>>> +            IOMMU_MMIO_STATUS_EVT_RUN;
>>>> +    } else {
>>>> +        (*(uint64_t *)&s->mmior[IOMMU_MMIO_STATUS]) &=
>>>> +            ~IOMMU_MMIO_STATUS_EVT_RUN;
>>>> +    }
>>>> +
>>>> +    IOMMU_DPRINTF(COMMAND, "MMIO_STATUS state 0x%"PRIx64, control);
>>>> +
>>>> +    amd_iommu_cmdbuf_run(s);
>>>> +}
>>>> +
>>>> +static inline void iommu_handle_devtab_write(AMDIOMMUState *s)
>>>> +
>>>> +{
>>>> +    IOMMU_DPRINTF(COMMAND, "");
>>>> +
>>>> +    uint64_t val = amd_iommu_readq(s, IOMMU_MMIO_DEVICE_TABLE);
>>>> +    s->devtab = (dma_addr_t)(val & IOMMU_MMIO_DEVTAB_BASE_MASK);
>>>> +
>>>> +    /* set device table length */
>>>> +    s->devtab_len = ((val & IOMMU_MMIO_DEVTAB_SIZE_MASK) + 1 *
>>>> +                    (IOMMU_MMIO_DEVTAB_SIZE_UNIT /
>>>> +                     IOMMU_MMIO_DEVTAB_ENTRY_SIZE));
>>>> +}
>>>> +
>>>> +static inline void iommu_handle_cmdhead_write(AMDIOMMUState *s)
>>>> +{
>>>> +    IOMMU_DPRINTF(COMMAND, "");
>>>> +
>>>> +    s->cmdbuf_head = (dma_addr_t)amd_iommu_readq(s,
>>>> IOMMU_MMIO_COMMAND_HEAD)
>>>> +                     & IOMMU_MMIO_CMDBUF_HEAD_MASK;
>>>> +    amd_iommu_cmdbuf_run(s);
>>>> +}
>>>> +
>>>> +static inline void iommu_handle_cmdbase_write(AMDIOMMUState *s)
>>>> +{
>>>> +    IOMMU_DPRINTF(COMMAND, "");
>>>> +
>>>> +    s->cmdbuf = (dma_addr_t)amd_iommu_readq(s, IOMMU_MMIO_COMMAND_BASE)
>>>> +                & IOMMU_MMIO_CMDBUF_BASE_MASK;
>>>> +    s->cmdbuf_len = 1UL << (s->mmior[IOMMU_MMIO_CMDBUF_SIZE_BYTE]
>>>> +                    & IOMMU_MMIO_CMDBUF_SIZE_MASK);
>>>> +    s->cmdbuf_head = s->cmdbuf_tail = 0;
>>>> +
>>>> +}
>>>> +
>>>> +static inline void iommu_handle_cmdtail_write(AMDIOMMUState *s)
>>>> +{
>>>> +    s->cmdbuf_tail = amd_iommu_readq(s, IOMMU_MMIO_COMMAND_TAIL)
>>>> +                     & IOMMU_MMIO_CMDBUF_TAIL_MASK;
>>>> +    amd_iommu_cmdbuf_run(s);
>>>> +}
>>>> +
>>>> +static inline void iommu_handle_excllim_write(AMDIOMMUState *s)
>>>> +{
>>>> +    IOMMU_DPRINTF(COMMAND, "");
>>>> +
>>>> +    uint64_t val = amd_iommu_readq(s, IOMMU_MMIO_EXCL_LIMIT);
>>>> +    s->excl_limit = (val & IOMMU_MMIO_EXCL_LIMIT_MASK) |
>>>> +                    IOMMU_MMIO_EXCL_LIMIT_LOW;
>>>> +}
>>>> +
>>>> +static inline void iommu_handle_evtbase_write(AMDIOMMUState *s)
>>>> +{
>>>> +    IOMMU_DPRINTF(COMMAND, "");
>>>> +
>>>> +    uint64_t val = amd_iommu_readq(s, IOMMU_MMIO_EVENT_BASE);
>>>> +    s->evtlog = val & IOMMU_MMIO_EVTLOG_BASE_MASK;
>>>> +    s->evtlog_len = 1UL << (*(uint64_t
>>>> *)&s->mmior[IOMMU_MMIO_EVTLOG_SIZE_BYTE]
>>>> +                    & IOMMU_MMIO_EVTLOG_SIZE_MASK);
>>>> +}
>>>> +
>>>> +static inline void iommu_handle_evttail_write(AMDIOMMUState *s)
>>>> +{
>>>> +    IOMMU_DPRINTF(COMMAND, "");
>>>> +
>>>> +    uint64_t val = amd_iommu_readq(s, IOMMU_MMIO_EVENT_TAIL);
>>>> +    s->evtlog_tail = val & IOMMU_MMIO_EVTLOG_TAIL_MASK;
>>>> +}
>>>> +
>>>> +static inline void iommu_handle_evthead_write(AMDIOMMUState *s)
>>>> +{
>>>> +    IOMMU_DPRINTF(COMMAND, "");
>>>> +
>>>> +    uint64_t val = amd_iommu_readq(s, IOMMU_MMIO_EVENT_HEAD);
>>>> +    s->evtlog_head = val & IOMMU_MMIO_EVTLOG_HEAD_MASK;
>>>> +}
>>>> +
>>>> +static inline void iommu_handle_pprbase_write(AMDIOMMUState *s)
>>>> +{
>>>> +    IOMMU_DPRINTF(COMMAND, "");
>>>> +
>>>> +    uint64_t val = amd_iommu_readq(s, IOMMU_MMIO_PPR_BASE);
>>>> +    s->ppr_log = val & IOMMU_MMIO_PPRLOG_BASE_MASK;
>>>> +    s->pprlog_len = 1UL << (*(uint64_t
>>>> *)&s->mmior[IOMMU_MMIO_PPRLOG_SIZE_BYTE]
>>>> +                    & IOMMU_MMIO_PPRLOG_SIZE_MASK);
>>>> +}
>>>> +
>>>> +static inline void iommu_handle_pprhead_write(AMDIOMMUState *s)
>>>> +{
>>>> +    IOMMU_DPRINTF(COMMAND, "");
>>>> +
>>>> +    uint64_t val = amd_iommu_readq(s, IOMMU_MMIO_PPR_HEAD);
>>>> +    s->pprlog_head = val & IOMMU_MMIO_PPRLOG_HEAD_MASK;
>>>> +}
>>>> +
>>>> +static inline void iommu_handle_pprtail_write(AMDIOMMUState *s)
>>>> +{
>>>> +    IOMMU_DPRINTF(COMMAND, "");
>>>> +
>>>> +    uint64_t val = amd_iommu_readq(s, IOMMU_MMIO_PPR_TAIL);
>>>> +    s->pprlog_tail = val & IOMMU_MMIO_PPRLOG_TAIL_MASK;
>>>> +}
>>>> +
>>>> +/* FIXME: something might go wrong if System Software writes in chunks
>>>> + * of one byte but linux writes in chunks of 4 bytes so currently it
>>>> + * works correctly with linux but will definitely be busted if software
>>>> + * reads/writes 8 bytes
>>>> + */
>>>> +static void amd_iommu_mmio_write(void *opaque, hwaddr addr, uint64_t val,
>>>> +                                 unsigned size)
>>>> +{
>>>> +
>>>> +    IOMMU_DPRINTF(COMMAND, "");
>>>> +
>>>> +    AMDIOMMUState *s = opaque;
>>>> +    unsigned long offset = addr & 0x07;
>>>> +
>>>> +    if (addr + size > IOMMU_MMIO_SIZE) {
>>>> +        IOMMU_DPRINTF(MMIO, "error: addr outside region: max 0x%"PRIx64
>>>> +                      ", got 0x%"PRIx64 " %d", (uint64_t)IOMMU_MMIO_SIZE,
>>>> addr,
>>>> +                      size);
>>>> +        return;
>>>> +    }
>>>> +
>>>> +    switch (addr & ~0x07) {
>>>> +    case IOMMU_MMIO_CONTROL:
>>>> +        if (size == 2) {
>>>> +            amd_iommu_writew(s, addr, val);
>>>> +        } else if (size == 4) {
>>>> +            amd_iommu_writel(s, addr,  val);
>>>> +        } else if (size == 8) {
>>>> +            amd_iommu_writeq(s, addr, val);
>>>> +        }
>>>> +
>>>> +        IOMMU_DPRINTF(COMMAND, "MMIO_CONTROL write addr 0x%"PRIx64
>>>> +                      ", size %d, val 0x%"PRIx64 ", offset 0x%"PRIx64,
>>>> +                      addr, size, val, offset);
>>>> +        iommu_handle_control_write(s);
>>>> +        break;
>>>> +
>>>> +    case IOMMU_MMIO_DEVICE_TABLE:
>>>> +        IOMMU_DPRINTF(MMIO, "MMIO_DEVICE_TABLE write addr 0x%"PRIx64
>>>> +                      ", size %d, val 0x%"PRIx64 ", offset 0x%"PRIx64,
>>>> +                      addr, size, val, offset);
>>>> +        if (size == 2) {
>>>> +            amd_iommu_writew(s, addr, val);
>>>> +        } else if (size == 4) {
>>>> +            amd_iommu_writel(s, addr, val);
>>>> +        } else if (size == 8) {
>>>> +            amd_iommu_writeq(s, addr, val);
>>>> +        }
>>>> +
>>>> +       /*  set device table address
>>>> +        *   This also suffers from inability to tell whether software
>>>> +        *   is done writing
>>>> +        */
>>>> +
>>>> +        if (offset || (size == 8)) {
>>>> +            iommu_handle_devtab_write(s);
>>>> +        }
>>>> +        break;
>>>> +
>>>> +    case IOMMU_MMIO_COMMAND_HEAD:
>>>> +        IOMMU_DPRINTF(MMIO, "MMIO_COMMAND_HEAD write addr 0x%"PRIx64
>>>> +                      ", size %d, val 0x%"PRIx64 ", offset 0x%"PRIx64,
>>>> +                      addr, size, val, offset);
>>>> +        if (size == 2) {
>>>> +            amd_iommu_writew(s, addr, val);
>>>> +        } else if (size == 4) {
>>>> +            amd_iommu_writel(s, addr, val);
>>>> +        } else if (size == 8) {
>>>> +            amd_iommu_writeq(s, addr, val);
>>>> +        }
>>>> +
>>>> +        iommu_handle_cmdhead_write(s);
>>>> +        break;
>>>> +
>>>> +    case IOMMU_MMIO_COMMAND_BASE:
>>>> +        IOMMU_DPRINTF(COMMAND, "MMIO_COMMAND_BASE write addr 0x%"PRIx64
>>>> +                      ", size %d, val 0x%"PRIx64 ", offset 0x%"PRIx64,
>>>> +                      addr, size, val, offset);
>>>> +        if (size == 2) {
>>>> +            amd_iommu_writew(s, addr, val);
>>>> +        } else if (size == 4) {
>>>> +            amd_iommu_writel(s, addr, val);
>>>> +        } else if (size == 8) {
>>>> +            amd_iommu_writeq(s, addr, val);
>>>> +        }
>>>> +
>>>> +        /* FIXME - make sure System Software has finished writing incase
>>>> +         * it writes in chucks less than 8 bytes in a robust way.As for
>>>> +         * now, this hacks works for the linux driver
>>>> +         */
>>>> +        if (offset || (size == 8)) {
>>>> +            iommu_handle_cmdbase_write(s);
>>>> +        }
>>>> +        break;
>>>> +
>>>> +    case IOMMU_MMIO_COMMAND_TAIL:
>>>> +        IOMMU_DPRINTF(COMMAND, "MMIO_COMMAND_TAIL write addr 0x%"PRIx64
>>>> +                      ", size %d, val 0x%"PRIx64 ", offset 0x%"PRIx64,
>>>> +                      addr, size, val, offset);
>>>> +        if (size == 2) {
>>>> +            amd_iommu_writew(s, addr, val);
>>>> +        } else if (size == 4) {
>>>> +            amd_iommu_writel(s, addr, val);
>>>> +        } else if (size == 8) {
>>>> +            amd_iommu_writeq(s, addr, val);
>>>> +        }
>>>> +        iommu_handle_cmdtail_write(s);
>>>> +        break;
>>>> +
>>>> +    case IOMMU_MMIO_EVENT_BASE:
>>>> +        IOMMU_DPRINTF(MMIO, "MMIO_EVENT_BASE write addr 0x%"PRIx64
>>>> +                      ", size %d, val 0x%"PRIx64 ", offset 0x%"PRIx64,
>>>> +                      addr, size, val, offset);
>>>> +        if (size == 2) {
>>>> +            amd_iommu_writew(s, addr, val);
>>>> +        } else if (size == 4) {
>>>> +            amd_iommu_writel(s, addr, val);
>>>> +        } else if (size == 8) {
>>>> +            amd_iommu_writeq(s, addr, val);
>>>> +        }
>>>> +        iommu_handle_evtbase_write(s);
>>>> +        break;
>>>> +
>>>> +    case IOMMU_MMIO_EVENT_HEAD:
>>>> +        IOMMU_DPRINTF(MMIO, "MMIO_EVENT_HEAD write addr 0x%"PRIx64
>>>> +                      ", size %d, val 0x%"PRIx64 ", offset 0x%"PRIx64,
>>>> +                      addr, size, val, offset);
>>>> +        if (size == 2) {
>>>> +            amd_iommu_writew(s, addr, val);
>>>> +        } else if (size == 4) {
>>>> +            amd_iommu_writel(s, addr, val);
>>>> +        } else if (size == 8) {
>>>> +            amd_iommu_writeq(s, addr, val);
>>>> +        }
>>>> +        iommu_handle_evthead_write(s);
>>>> +        break;
>>>> +
>>>> +    case IOMMU_MMIO_EVENT_TAIL:
>>>> +        IOMMU_DPRINTF(MMIO, "MMIO_EVENT_TAIL write addr 0x%"PRIx64
>>>> +                      ", size %d, val 0x%"PRIx64 ", offset 0x%"PRIx64,
>>>> +                      addr, size, val, offset);
>>>> +        if (size == 2) {
>>>> +            amd_iommu_writew(s, addr, val);
>>>> +        } else if (size == 4) {
>>>> +            amd_iommu_writel(s, addr, val);
>>>> +        } else if (size == 8) {
>>>> +            amd_iommu_writeq(s, addr, val);
>>>> +        }
>>>> +        iommu_handle_evttail_write(s);
>>>> +        break;
>>>> +
>>>> +    case IOMMU_MMIO_EXCL_LIMIT:
>>>> +        IOMMU_DPRINTF(MMIO, "MMIO_EXCL_LIMIT write addr 0x%"PRIx64
>>>> +                      ", size %d, val 0x%"PRIx64 ", offset 0x%"PRIx64,
>>>> +                      addr, size, val, offset);
>>>> +        if (size == 2) {
>>>> +            amd_iommu_writew(s, addr, val);
>>>> +        } else if (size == 4) {
>>>> +            amd_iommu_writel(s, addr, val);
>>>> +        } else if (size == 8) {
>>>> +            amd_iommu_writeq(s, addr, val);
>>>> +        }
>>>> +        iommu_handle_excllim_write(s);
>>>> +        break;
>>>> +
>>>> +        /* PPR log base - unused for now */
>>>> +    case IOMMU_MMIO_PPR_BASE:
>>>> +        IOMMU_DPRINTF(MMIO, "MMIO_PPR_BASE write addr 0x%"PRIx64
>>>> +                      ", size %d, val 0x%"PRIx64 ", offset 0x%"PRIx64,
>>>> +                      addr, size, val, offset);
>>>> +        if (size == 2) {
>>>> +            amd_iommu_writew(s, addr, val);
>>>> +        } else if (size == 4) {
>>>> +            amd_iommu_writel(s, addr, val);
>>>> +        } else if (size == 8) {
>>>> +            amd_iommu_writeq(s, addr, val);
>>>> +        }
>>>> +        iommu_handle_pprbase_write(s);
>>>> +        break;
>>>> +        /* PPR log head - also unused for now */
>>>> +    case IOMMU_MMIO_PPR_HEAD:
>>>> +        IOMMU_DPRINTF(MMIO, "MMIO_PPR_HEAD write addr 0x%"PRIx64
>>>> +                      ", size %d, val 0x%"PRIx64 ", offset 0x%"PRIx64,
>>>> +                       addr, size, val, offset);
>>>> +        if (size == 2) {
>>>> +            amd_iommu_writew(s, addr, val);
>>>> +        } else if (size == 4) {
>>>> +            amd_iommu_writel(s, addr, val);
>>>> +        } else if (size == 8) {
>>>> +            amd_iommu_writeq(s, addr, val);
>>>> +        }
>>>> +        iommu_handle_pprhead_write(s);
>>>> +        break;
>>>> +        /* PPR log tail - unused for now */
>>>> +    case IOMMU_MMIO_PPR_TAIL:
>>>> +        IOMMU_DPRINTF(MMIO, "MMIO_PPR_TAIL write addr 0x%"PRIx64
>>>> +                      ", size %d, val 0x%"PRIx64 ", offset 0x%"PRIx64,
>>>> +                      addr, size, val, offset);
>>>> +        if (size == 2) {
>>>> +            amd_iommu_writew(s, addr, val);
>>>> +        } else if (size == 4) {
>>>> +            amd_iommu_writel(s, addr, val);
>>>> +        } else if (size == 8) {
>>>> +            amd_iommu_writeq(s, addr, val);
>>>> +        }
>>>> +        iommu_handle_pprtail_write(s);
>>>> +        break;
>>>> +
>>>> +        /* ignore write to ext_features */
>>>> +    default:
>>>> +        IOMMU_DPRINTF(MMIO, "UNHANDLED MMIO write addr 0x%"PRIx64
>>>> +                      ", size %d, val 0x%"PRIx64 ", offset 0x%"PRIx64,
>>>> +                      addr, size, val, offset);
>>>> +    }
>>>> +
>>>> +}
>>>> +
>>>> +static inline uint64_t amd_iommu_get_perms(uint64_t entry)
>>>> +{
>>>> +    return (entry & (IOMMU_DEV_PERM_READ | IOMMU_DEV_PERM_WRITE)) >>
>>>> +           IOMMU_DEV_PERM_SHIFT;
>>>> +}
>>>> +
>>>> +AddressSpace *bridge_host_amd_iommu(PCIBus *bus, void *opaque, int devfn)
>>>> +{
>>>> +    AMDIOMMUState *s = opaque;
>>>> +    AMDIOMMUAddressSpace **iommu_as;
>>>> +    int bus_num = pci_bus_num(bus);
>>>> +
>>>> +    /* just in case */
>>>
>>>
>>> This comment troubles me, do we need the assert?
>
> In case the bus_num or devfn is invalid. Anyway, I could get of rid of
> this assert.
>
>>>
>>>> +    assert(0 <= bus_num && bus_num <= PCI_BUS_MAX);
>>>
>>>
>>> bus_num < PCI_BUS_MAX, right ?
>>>
>>>> +    assert(0 <= devfn && devfn <= PCI_DEVFN_MAX);
>>>
>>>
>>> same with devfn I suppose.
>>>
>>>> +
>>>> +    iommu_as = s->address_spaces[bus_num];
>>>> +
>>>> +    /* allocate memory during the first run */
>>>> +    if (!iommu_as) {
>>>
>>>
>>> Why lazy init? We can do that at AMDIOMMUState init, right?
>
> This code has to be called for all emulated devices when the bus is
> initialized. If you have it on AMDIOMMUState init - it will only be
> called for one or two devices already initiliazed.
>
>>>
>>>> +        iommu_as = g_malloc0(sizeof(AMDIOMMUAddressSpace *) *
>>>> PCI_DEVFN_MAX);
>>>> +        s->address_spaces[bus_num] = iommu_as;
>>>> +    }
>>>> +
>>>> +    /* set up IOMMU region */
>>>> +    if (!iommu_as[devfn]) {
>>>> +        iommu_as[devfn] = g_malloc0(sizeof(AMDIOMMUAddressSpace));
>>>
>>>
>>> same here
>>>
>>>> +        iommu_as[devfn]->bus_num = (uint8_t)bus_num;
>>>> +        iommu_as[devfn]->devfn = (uint8_t)devfn;
>>>> +        iommu_as[devfn]->iommu_state = s;
>>>> +
>>>> +        memory_region_init_iommu(&iommu_as[devfn]->iommu, OBJECT(s),
>>>> +                                 &s->iommu_ops, "amd-iommu", UINT64_MAX);
>>>> +        address_space_init(&iommu_as[devfn]->as, &iommu_as[devfn]->iommu,
>>>> +                           "amd-iommu");
>>>> +    }
>>>> +    return &iommu_as[devfn]->as;
>>>> +}
>>>> +
>>>> +/* validate a page table entry */
>>>> +static bool amd_iommu_validate_dte(AMDIOMMUState *s, uint16_t devid,
>>>> +                                   uint64_t *dte)
>>>> +{
>>>> +    if ((dte[0] & IOMMU_DTE_LOWER_QUAD_RESERVED)
>>>> +        || (dte[1] & IOMMU_DTE_MIDDLE_QUAD_RESERVED)
>>>> +        || (dte[2] & IOMMU_DTE_UPPER_QUAD_RESERVED) || dte[3]) {
>>>> +        amd_iommu_log_illegaldevtab_error(s, devid,
>>>> +                                s->devtab + devid *
>>>> IOMMU_DEVTAB_ENTRY_SIZE, 0);
>>>> +        return false;
>>>> +    }
>>>> +
>>>> +    return dte[0] & IOMMU_DEV_VALID && (dte[0] &
>>>> IOMMU_DEV_TRANSLATION_VALID)
>>>> +           && (dte[0] & IOMMU_DEV_PT_ROOT_MASK);
>>>> +}
>>>> +
>>>> +/* get a device table entry given the devid */
>>>> +static bool amd_iommu_get_dte(AMDIOMMUState *s, int devid, uint64_t
>>>> *entry)
>>>> +{
>>>> +    uint32_t offset = devid * IOMMU_DEVTAB_ENTRY_SIZE;
>>>> +
>>>> +    IOMMU_DPRINTF(MMU, "Device Table at 0x%"PRIx64, s->devtab);
>>>> +
>>>> +    if (dma_memory_read(&address_space_memory, s->devtab + offset, entry,
>>>> +                        IOMMU_DEVTAB_ENTRY_SIZE)) {
>>>> +        IOMMU_DPRINTF(MMU, "error: fail to access Device Entry devtab
>>>> 0x%"PRIx64
>>>> +                      "offset 0x%"PRIx32, s->devtab, offset);
>>>> +        /* log ever accessing dte */
>>>> +        amd_iommu_log_devtab_error(s, devid, s->devtab + offset, 0);
>>>> +        return false;
>>>> +    }
>>>> +
>>>> +    if (!amd_iommu_validate_dte(s, devid, entry)) {
>>>> +        IOMMU_DPRINTF(MMU,
>>>> +                      "Pte entry at 0x%"PRIx64" is invalid", entry[0]);
>>>> +        return false;
>>>> +    }
>>>> +
>>>> +    return true;
>>>> +}
>>>> +
>>>> +/* get pte translation mode */
>>>> +static inline uint8_t get_pte_translation_mode(uint64_t pte)
>>>> +{
>>>> +    return (pte >> IOMMU_DEV_MODE_RSHIFT) & IOMMU_DEV_MODE_MASK;
>>>> +}
>>>> +
>>>> +static int amd_iommu_page_walk(AMDIOMMUAddressSpace *as, uint64_t *dte,
>>>> +                               IOMMUTLBEntry *ret, unsigned perms,
>>>> +                               hwaddr addr)
>>>> +{
>>>> +    uint8_t level, oldlevel;
>>>> +    unsigned present;
>>>> +    uint64_t pte, pte_addr;
>>>> +    uint64_t pte_perms;
>>>> +    pte = dte[0];
>>>> +
>>>> +    level = get_pte_translation_mode(pte);
>>>> +
>>>> +    if (level >= 7 || level == 0) {
>>>> +        IOMMU_DPRINTF(MMU, "error: translation level 0x%"PRIu8 "
>>>> detected"
>>>> +                      "while translating 0x%"PRIx64, level, addr);
>>>> +        return -1;
>>>> +    }
>>>> +
>>>> +    while (level > 0) {
>>>> +        pte_perms = amd_iommu_get_perms(pte);
>>>> +        present = pte & 1;
>>>> +        if (!present || perms != (perms & pte_perms)) {
>>>> +            amd_iommu_page_fault(as->iommu_state, as->devfn, addr,
>>>> perms);
>>>> +            IOMMU_DPRINTF(MMU, "error: page fault accessing virtual addr
>>>> 0x%"
>>>> +                          PRIx64, addr);
>>>> +            return -1;
>>>> +        }
>>>> +
>>>> +        /* go to the next lower level */
>>>> +        pte_addr = pte & IOMMU_DEV_PT_ROOT_MASK;
>>>> +        /* add offset and load pte */
>>>> +        pte_addr += ((addr >> (3 + 9 * level)) & 0x1FF) << 3;
>>>> +        pte = ldq_phys(&address_space_memory, pte_addr);
>>>> +        oldlevel = level;
>>>> +        level = get_pte_translation_mode(pte);
>>>> +
>>>> +        /* PT is corrupted or not there */
>>>> +        if (level != oldlevel - 1) {
>>>> +            return -1;
>>>> +        }
>>>> +    }
>>>> +
>>>> +    ret->iova = addr & IOMMU_PAGE_MASK_4K;
>>>> +    ret->translated_addr = (pte & IOMMU_DEV_PT_ROOT_MASK) &
>>>> IOMMU_PAGE_MASK_4K;
>>>> +    ret->addr_mask = ~IOMMU_PAGE_MASK_4K;
>>>> +    ret->perm = IOMMU_RW;
>>>> +    return 0;
>>>> +}
>>>> +
>>>> +/* TODO : Mark addresses as Accessed and Dirty */
>>>
>>>
>>> If you don't mark addresses as dirty, can't this cause the sporadic errors
>>> of arbitrary programs Jan talked about?
>>
>> I don't think this the issue, am seem to be receiving wrong 'host
>> physical addresses' in the last few kernel version. This issue is not
>> there in older kernels.
>>
>>>
>>>> +static void amd_iommu_do_translate(AMDIOMMUAddressSpace *as, hwaddr addr,
>>>> +                                   bool is_write, IOMMUTLBEntry *ret)
>>>> +{
>>>> +    AMDIOMMUState *s = as->iommu_state;
>>>> +    uint16_t devid = PCI_DEVID(as->bus_num, as->devfn);
>>>> +    IOMMUIOTLBEntry *iotlb_entry;
>>>> +    uint8_t err;
>>>> +    uint64_t entry[4];
>>>> +
>>>> +    /* try getting a cache entry first */
>>>> +    iotlb_entry = amd_iommu_iotlb_lookup(s, addr, as->devfn);
>>>> +
>>>> +    if (iotlb_entry) {
>>>> +        IOMMU_DPRINTF(CACHE, "hit  iotlb devid: %02x:%02x.%x gpa
>>>> 0x%"PRIx64
>>>> +                      " hpa 0x%"PRIx64, PCI_BUS_NUM(devid),
>>>> PCI_SLOT(devid),
>>>> +                      PCI_FUNC(devid), addr,
>>>> iotlb_entry->translated_addr);
>>>> +        ret->iova = addr & IOMMU_PAGE_MASK_4K;
>>>> +        ret->translated_addr = iotlb_entry->translated_addr;
>>>> +        ret->addr_mask = ~IOMMU_PAGE_MASK_4K;
>>>> +        ret->perm = iotlb_entry->perms;
>>>> +        return;
>>>> +    } else {
>>>
>>>
>>> you return from the if clause so you don't need the else
>>>
>>>> +        if (!amd_iommu_get_dte(s, devid, entry)) {
>>>
>>>
>>> is not an error if you did not find the device id?

I get device id from Qemu - so I assume it's correct.

>>>
>>>> +            goto out;
>>>> +        }
>>>> +
>>>> +        err = amd_iommu_page_walk(as, entry, ret,
>>>> +                                  is_write ? IOMMU_PERM_WRITE :
>>>> IOMMU_PERM_READ,
>>>> +                                  addr);
>>>> +        if (err) {
>>>> +            IOMMU_DPRINTF(MMU, "error: hardware error accessing page
>>>> tables"
>>>> +                          " while translating addr 0x%"PRIx64, addr);
>>>> +            amd_iommu_log_pagetab_error(s, as->devfn, addr, 0);
>>>> +            goto out;
>>>> +        }
>>>> +
>>>> +        amd_iommu_update_iotlb(s, as->devfn, addr, ret->translated_addr,
>>>> +                               ret->perm, entry[1] &
>>>> IOMMU_DEV_DOMID_ID_MASK);
>>>> +        return;
>>>> +    }
>>>> +
>>>> +out:
>>>> +    ret->iova = addr;
>>>> +    ret->translated_addr = addr & IOMMU_PAGE_MASK_4K;
>>>> +    ret->addr_mask = ~IOMMU_PAGE_MASK_4K;
>>>> +    ret->perm = IOMMU_RW;
>>>> +    return;
>>>
>>>
>>> you don't need the above return
>>>
>>>> +}
>>>> +
>>>> +static IOMMUTLBEntry amd_iommu_translate(MemoryRegion *iommu, hwaddr
>>>> addr,
>>>> +                                         bool is_write)
>>>> +{
>>>> +    IOMMU_DPRINTF(GENERAL, "");
>>>> +
>>>> +    AMDIOMMUAddressSpace *as = container_of(iommu, AMDIOMMUAddressSpace,
>>>> iommu);
>>>> +    AMDIOMMUState *s = as->iommu_state;
>>>> +
>>>> +    IOMMUTLBEntry ret = {
>>>> +        .target_as = &address_space_memory,
>>>> +        .iova = addr,
>>>> +        .translated_addr = 0,
>>>> +        .addr_mask = ~(hwaddr)0,
>>>> +        .perm = IOMMU_NONE,
>>>> +    };
>>>> +
>>>> +    if (!s->enabled) {
>>>> +        /* IOMMU disabled - corresponds to iommu=off not
>>>> +         * failure to provide any parameter
>>>> +         */
>>>> +        ret.iova = addr & IOMMU_PAGE_MASK_4K;
>>>> +        ret.translated_addr = addr & IOMMU_PAGE_MASK_4K;
>>>> +        ret.addr_mask = ~IOMMU_PAGE_MASK_4K;
>>>> +        ret.perm = IOMMU_RW;
>>>> +        return ret;
>>>> +    }
>>>> +
>>>> +    amd_iommu_do_translate(as, addr, is_write, &ret);
>>>> +    IOMMU_DPRINTF(MMU, "devid: %02x:%02x.%x gpa 0x%"PRIx64 " hpa
>>>> 0x%"PRIx64,
>>>> +                  as->bus_num, PCI_SLOT(as->devfn), PCI_FUNC(as->devfn),
>>>> addr,
>>>> +                  ret.translated_addr);
>>>> +
>>>> +    return ret;
>>>> +}
>>>> +
>>>> +static const MemoryRegionOps mmio_mem_ops = {
>>>> +    .read = amd_iommu_mmio_read,
>>>> +    .write = amd_iommu_mmio_write,
>>>> +    .endianness = DEVICE_LITTLE_ENDIAN,
>>>> +    .impl = {
>>>> +        .min_access_size = 1,
>>>> +        .max_access_size = 8,
>>>> +        .unaligned = false,
>>>> +    },
>>>> +    .valid = {
>>>> +        .min_access_size = 1,
>>>> +        .max_access_size = 8,
>>>> +    }
>>>> +};
>>>> +
>>>> +static void amd_iommu_init(AMDIOMMUState *s)
>>>> +{
>>>> +    printf("amd_iommu_init");
>>>
>>>
>>> you should use the debug macro here
>>>
>>>> +
>>>> +    amd_iommu_iotlb_reset(s);
>>>> +
>>>> +    s->iommu_ops.translate = amd_iommu_translate;
>>>> +
>>>> +    s->devtab_len = 0;
>>>> +    s->cmdbuf_len = 0;
>>>> +    s->cmdbuf_head = 0;
>>>> +    s->cmdbuf_tail = 0;
>>>> +    s->evtlog_head = 0;
>>>> +    s->evtlog_tail = 0;
>>>> +    s->excl_enabled = false;
>>>> +    s->excl_allow = false;
>>>> +    s->mmio_enabled = false;
>>>> +    s->enabled = false;
>>>> +    s->ats_enabled = false;
>>>> +    s->cmdbuf_enabled = false;
>>>> +
>>>> +    /* reset MMIO */
>>>> +    memset(s->mmior, 0, IOMMU_MMIO_SIZE);
>>>> +    amd_iommu_set_quad(s, IOMMU_MMIO_EXT_FEATURES, IOMMU_EXT_FEATURES,
>>>> +            0xffffffffffffffef, 0);
>>>> +    amd_iommu_set_quad(s, IOMMU_MMIO_STATUS, 0, 0x98, 0x67);
>>>> +    /* reset device ident */
>>>> +    pci_config_set_vendor_id(s->dev.config, PCI_VENDOR_ID_AMD);
>>>> +    pci_config_set_device_id(s->dev.config, PCI_DEVICE_ID_RD890_IOMMU);
>>>> +    pci_config_set_prog_interface(s->dev.config, 00);
>>>> +    pci_config_set_class(s->dev.config, 0x0806);
>>>> +
>>>> +    /* reset IOMMU specific capabilities  */
>>>> +    pci_set_long(s->dev.config + s->capab_offset, IOMMU_CAPAB_FEATURES);
>>>> +    pci_set_long(s->dev.config + s->capab_offset + IOMMU_CAPAB_BAR_LOW,
>>>> +                 s->mmio.addr & ~(0xffff0000));
>>>> +    pci_set_long(s->dev.config + s->capab_offset + IOMMU_CAPAB_BAR_HIGH,
>>>> +                (s->mmio.addr & ~(0xffff)) >> 16);
>>>> +    pci_set_long(s->dev.config + s->capab_offset + IOMMU_CAPAB_RANGE,
>>>> +                 0xff000000);
>>>> +    pci_set_long(s->dev.config + s->capab_offset + IOMMU_CAPAB_MISC, 0);
>>>> +    pci_set_long(s->dev.config + s->capab_offset + IOMMU_CAPAB_MISC,
>>>> +            IOMMU_MAX_PH_ADDR | IOMMU_MAX_GVA_ADDR | IOMMU_MAX_VA_ADDR);
>>>
>>>
>>> All the capabilities are read-write? Otherwise you need to set the wmask
>>> to indicate what fields are writable.
>>>
>>>> +}
>>>> +
>>>> +static void amd_iommu_reset(DeviceState *dev)
>>>> +{
>>>> +    AMDIOMMUState *s = AMD_IOMMU_DEVICE(dev);
>>>> +
>>>> +    amd_iommu_init(s);
>>>> +}
>>>> +
>>>> +static void amd_iommu_realize(PCIDevice *dev, Error **error)
>>>> +{
>>>> +    AMDIOMMUState *s = container_of(dev, AMDIOMMUState, dev);
>>>> +
>>>> +    s->iotlb = g_hash_table_new_full(amd_iommu_uint64_hash,
>>>> +                                     amd_iommu_uint64_equal, g_free,
>>>> g_free);
>>>> +
>>>> +    s->capab_offset = pci_add_capability(dev, IOMMU_CAPAB_ID_SEC, 0,
>>>> +                                         IOMMU_CAPAB_SIZE);
>>>> +
>>>> +    /* add msi and hypertransport capabilities */
>>>> +    pci_add_capability(&s->dev, PCI_CAP_ID_MSI, 0, IOMMU_CAPAB_REG_SIZE);
>>>> +    pci_add_capability(&s->dev, PCI_CAP_ID_HT, 0, IOMMU_CAPAB_REG_SIZE);
>>>> +
>>>> +    amd_iommu_init(s);
>>>> +
>>>> +    /* set up MMIO */
>>>> +    memory_region_init_io(&s->mmio, OBJECT(s), &mmio_mem_ops, s, "mmio",
>>>> +                          IOMMU_MMIO_SIZE);
>>>> +
>>>> +    if (s->mmio.addr == IOMMU_BASE_ADDR) {
>>>
>>>
>>> I don't understand why is need here. realize is called only once in the init
>>> process
>>> and you set it a few lines below.
>>>
>>>> +        return;
>>>> +    }
>>>> +
>>>> +    s->mmio.addr = IOMMU_BASE_ADDR;
>>>> +    memory_region_add_subregion(get_system_memory(), IOMMU_BASE_ADDR,
>>>> &s->mmio);
>>>> +}
>>>> +
>>>> +static const VMStateDescription vmstate_amd_iommu = {
>>>> +    .name = "amd-iommu",
>>>> +    .fields  = (VMStateField[]) {
>>>> +        VMSTATE_PCI_DEVICE(dev, AMDIOMMUState),
>>>> +        VMSTATE_END_OF_LIST()
>>>> +    }
>>>> +};
>>>> +
>>>> +static Property amd_iommu_properties[] = {
>>>> +    DEFINE_PROP_UINT32("version", AMDIOMMUState, version, 2),
>>>> +    DEFINE_PROP_END_OF_LIST(),
>>>> +};
>>>> +
>>>> +static void amd_iommu_uninit(PCIDevice *dev)
>>>> +{
>>>> +    AMDIOMMUState *s = container_of(dev, AMDIOMMUState, dev);
>>>> +    amd_iommu_iotlb_reset(s);
>>>
>>>
>>> at this point you also need to clean also the memory regions you use.
>>>
>>>> +}
>>>> +
>>>> +static void amd_iommu_class_init(ObjectClass *klass, void* data)
>>>> +{
>>>> +    DeviceClass *dc = DEVICE_CLASS(klass);
>>>> +    PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
>>>> +
>>>> +    k->realize = amd_iommu_realize;
>>>> +    k->exit = amd_iommu_uninit;
>>>> +
>>>> +    dc->reset = amd_iommu_reset;
>>>> +    dc->vmsd = &vmstate_amd_iommu;
>>>> +    dc->props = amd_iommu_properties;
>>>> +}
>>>> +
>>>> +static const TypeInfo amd_iommu = {
>>>> +    .name = TYPE_AMD_IOMMU_DEVICE,
>>>> +    .parent = TYPE_PCI_DEVICE,
>>>> +    .instance_size = sizeof(AMDIOMMUState),
>>>> +    .class_init = amd_iommu_class_init
>>>> +};
>>>> +
>>>> +static void amd_iommu_register_types(void)
>>>> +{
>>>> +    type_register_static(&amd_iommu);
>>>> +}
>>>> +
>>>> +type_init(amd_iommu_register_types);
>>>> diff --git a/hw/i386/amd_iommu.h b/hw/i386/amd_iommu.h
>>>> new file mode 100644
>>>> index 0000000..7d317e1
>>>> --- /dev/null
>>>> +++ b/hw/i386/amd_iommu.h
>>>> @@ -0,0 +1,395 @@
>>>> +/*
>>>> + * QEMU emulation of an AMD IOMMU (AMD-Vi)
>>>> + *
>>>> + * Copyright (C) 2011 Eduard - Gabriel Munteanu
>>>> + * Copyright (C) 2015 David Kiarie, <davidkiarie4@gmail.com>
>>>> + *
>>>> + * This program is free software; you can redistribute it and/or modify
>>>> + * it under the terms of the GNU General Public License as published by
>>>> + * the Free Software Foundation; either version 2 of the License, or
>>>> + * (at your option) any later version.
>>>> +
>>>> + * This program is distributed in the hope that it will be useful,
>>>> + * but WITHOUT ANY WARRANTY; without even the implied warranty of
>>>> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
>>>> + * GNU General Public License for more details.
>>>> +
>>>> + * You should have received a copy of the GNU General Public License
>>>> along
>>>> + * with this program; if not, see <http://www.gnu.org/licenses/>.
>>>> + */
>>>> +
>>>> +#ifndef AMD_IOMMU_H_
>>>> +#define AMD_IOMMU_H_
>>>> +
>>>> +#include "hw/hw.h"
>>>> +#include "hw/pci/pci.h"
>>>> +#include "hw/pci/msi.h"
>>>> +#include "hw/sysbus.h"
>>>> +#include "sysemu/dma.h"
>>>> +
>>>> +/* Capability registers */
>>>> +#define IOMMU_CAPAB_HEADER            0x00
>>>> +#define   IOMMU_CAPAB_REV_TYPE        0x02
>>>> +#define   IOMMU_CAPAB_FLAGS           0x03
>>>> +#define IOMMU_CAPAB_BAR_LOW           0x04
>>>> +#define IOMMU_CAPAB_BAR_HIGH          0x08
>>>> +#define IOMMU_CAPAB_RANGE             0x0C
>>>> +#define IOMMU_CAPAB_MISC              0x10
>>>> +#define IOMMU_CAPAB_MISC1             0x14
>>>> +
>>>> +#define IOMMU_CAPAB_SIZE              0x18
>>>> +#define IOMMU_CAPAB_REG_SIZE          0x04
>>>> +
>>>> +/* Capability header data */
>>>> +#define IOMMU_CAPAB_ID_SEC            0xf
>>>> +#define IOMMU_CAPAB_FLAT_EXT          (1 << 28)
>>>> +#define IOMMU_CAPAB_EFR_SUP           (1 << 27)
>>>> +#define IOMMU_CAPAB_FLAG_NPCACHE      (1 << 26)
>>>> +#define IOMMU_CAPAB_FLAG_HTTUNNEL     (1 << 25)
>>>> +#define IOMMU_CAPAB_FLAG_IOTLBSUP     (1 << 24)
>>>> +#define IOMMU_CAPAB_INIT_REV          (1 << 19)
>>>> +#define IOMMU_CAPAB_INIT_TYPE         (3 << 16)
>>>> +#define IOMMU_CAPAB_INIT_REV_TYPE     (IOMMU_CAPAB_REV |
>>>> IOMMU_CAPAB_TYPE)
>>>> +#define IOMMU_CAPAB_INIT_FLAGS        (IOMMU_CAPAB_FLAG_NPCACHE | \
>>>> +                                       IOMMU_CAPAB_FLAG_HTTUNNEL)
>>>> +#define IOMMU_CAPAB_INIT_MISC         ((64 << 15) | (48 << 8))
>>>> +#define IOMMU_CAPAB_BAR_MASK          (~((1UL << 14) - 1))
>>>> +
>>>> +/* MMIO registers */
>>>> +#define IOMMU_MMIO_DEVICE_TABLE       0x0000
>>>> +#define IOMMU_MMIO_COMMAND_BASE       0x0008
>>>> +#define IOMMU_MMIO_EVENT_BASE         0x0010
>>>> +#define IOMMU_MMIO_CONTROL            0x0018
>>>> +#define IOMMU_MMIO_EXCL_BASE          0x0020
>>>> +#define IOMMU_MMIO_EXCL_LIMIT         0x0028
>>>> +#define IOMMU_MMIO_EXT_FEATURES       0x0030
>>>> +#define IOMMU_MMIO_COMMAND_HEAD       0x2000
>>>> +#define IOMMU_MMIO_COMMAND_TAIL       0x2008
>>>> +#define IOMMU_MMIO_EVENT_HEAD         0x2010
>>>> +#define IOMMU_MMIO_EVENT_TAIL         0x2018
>>>> +#define IOMMU_MMIO_STATUS             0x2020
>>>> +#define IOMMU_MMIO_PPR_BASE           0x0038
>>>> +#define IOMMU_MMIO_PPR_HEAD           0x2030
>>>> +#define IOMMU_MMIO_PPR_TAIL           0x2038
>>>> +
>>>> +#define IOMMU_MMIO_SIZE               0x4000
>>>> +
>>>> +#define IOMMU_MMIO_DEVTAB_SIZE_MASK   ((1ULL << 12) - 1)
>>>> +#define IOMMU_MMIO_DEVTAB_BASE_MASK   (((1ULL << 52) - 1) & ~ \
>>>> +                                       IOMMU_MMIO_DEVTAB_SIZE_MASK)
>>>> +#define IOMMU_MMIO_DEVTAB_ENTRY_SIZE  32
>>>> +#define IOMMU_MMIO_DEVTAB_SIZE_UNIT   4096
>>>> +
>>>> +/* some of this are similar but just for readability */
>>>> +#define IOMMU_MMIO_CMDBUF_SIZE_BYTE       (IOMMU_MMIO_COMMAND_BASE + 7)
>>>> +#define IOMMU_MMIO_CMDBUF_SIZE_MASK       0x0F
>>>> +#define IOMMU_MMIO_CMDBUF_BASE_MASK       IOMMU_MMIO_DEVTAB_BASE_MASK
>>>> +#define IOMMU_MMIO_CMDBUF_DEFAULT_SIZE    8
>>>> +#define IOMMU_MMIO_CMDBUF_HEAD_MASK       (((1ULL << 19) - 1) & ~0x0F)
>>>> +#define IOMMU_MMIO_CMDBUF_TAIL_MASK       IOMMU_MMIO_EVTLOG_HEAD_MASK
>>>> +
>>>> +#define IOMMU_MMIO_EVTLOG_SIZE_BYTE       (IOMMU_MMIO_EVENT_BASE + 7)
>>>> +#define IOMMU_MMIO_EVTLOG_SIZE_MASK       IOMMU_MMIO_CMDBUF_SIZE_MASK
>>>> +#define IOMMU_MMIO_EVTLOG_BASE_MASK       IOMMU_MMIO_CMDBUF_BASE_MASK
>>>> +#define IOMMU_MMIO_EVTLOG_DEFAULT_SIZE    IOMMU_MMIO_CMDBUF_DEFAULT_SIZE
>>>> +#define IOMMU_MMIO_EVTLOG_HEAD_MASK       (((1ULL << 19) - 1) & ~0x0F)
>>>> +#define IOMMU_MMIO_EVTLOG_TAIL_MASK       IOMMU_MMIO_EVTLOG_HEAD_MASK
>>>> +
>>>> +#define IOMMU_MMIO_PPRLOG_SIZE_BYTE       (IOMMU_MMIO_EVENT_BASE + 7)
>>>> +#define IOMMU_MMIO_PPRLOG_HEAD_MASK       IOMMU_MMIO_EVTLOG_HEAD_MASK
>>>> +#define IOMMU_MMIO_PPRLOG_TAIL_MASK       IOMMU_MMIO_EVTLOG_HEAD_MASK
>>>> +#define IOMMU_MMIO_PPRLOG_BASE_MASK       IOMMU_MMIO_EVTLOG_BASE_MASK
>>>> +#define IOMMU_MMIO_PPRLOG_SIZE_MASK       IOMMU_MMIO_EVTLOG_SIZE_MASK
>>>> +
>>>> +#define IOMMU_MMIO_EXCL_BASE_MASK         IOMMU_MMIO_DEVTAB_BASE_MASK
>>>> +#define IOMMU_MMIO_EXCL_ENABLED_MASK      (1ULL << 0)
>>>> +#define IOMMU_MMIO_EXCL_ALLOW_MASK        (1ULL << 1)
>>>> +#define IOMMU_MMIO_EXCL_LIMIT_MASK        IOMMU_MMIO_DEVTAB_BASE_MASK
>>>> +#define IOMMU_MMIO_EXCL_LIMIT_LOW         0xFFF
>>>> +
>>>> +/* mmio control register flags */
>>>> +#define IOMMU_MMIO_CONTROL_IOMMUEN        (1ULL << 0)
>>>> +#define IOMMU_MMIO_CONTROL_HTTUNEN        (1ULL << 1)
>>>> +#define IOMMU_MMIO_CONTROL_EVENTLOGEN     (1ULL << 2)
>>>> +#define IOMMU_MMIO_CONTROL_EVENTINTEN     (1ULL << 3)
>>>> +#define IOMMU_MMIO_CONTROL_COMWAITINTEN   (1ULL << 4)
>>>> +#define IOMMU_MMIO_CONTROL_PASSPW         (1ULL << 7)
>>>> +#define IOMMU_MMIO_CONTROL_REPASSPW       (1ULL << 9)
>>>> +#define IOMMU_MMIO_CONTROL_COHERENT       (1ULL << 10)
>>>> +#define IOMMU_MMIO_CONTROL_ISOC           (1ULL << 11)
>>>> +#define IOMMU_MMIO_CONTROL_CMDBUFLEN      (1ULL << 12)
>>>> +#define IOMMU_MMIO_CONTROL_PPRLOGEN       (1ULL << 13)
>>>> +#define IOMMU_MMIO_CONTROL_PPRINTEN       (1ULL << 14)
>>>> +#define IOMMU_MMIO_CONTROL_PPREN          (1ULL << 15)
>>>> +#define IOMMU_MMIO_CONTROL_GAEN           (1ULL << 16)
>>>> +#define IOMMU_MMIO_CONTROL_GTEN           (1ULL << 17)
>>>> +
>>>> +/* MMIO status register bits */
>>>> +#define IOMMU_MMIO_STATUS_PPR_OVFE    (1 << 18)
>>>> +#define IOMMU_MMIO_STATUS_PPR_OVFEB   (1 << 17)
>>>> +#define IOMMU_MMIO_STATUS_EVT_ACTIVE  (1 << 16)
>>>> +#define IOMMU_MMIO_STATUS_EVT_OVFB    (1 << 15)
>>>> +#define IOMMU_MMIO_STATUS_PPR_ACTIVE  (1 << 12)
>>>> +#define IOMMU_MMIO_STATUS_PPR_OVFB    (1 << 11)
>>>> +#define IOMMU_MMIO_STATUS_GA_INT      (1 << 10)
>>>> +#define IOMMU_MMIO_STATUS_GA_RUN      (1 << 9)
>>>> +#define IOMMU_MMIO_STATUS_GA_OVF      (1 << 8)
>>>> +#define IOMMU_MMIO_STATUS_PPR_RUN     (1 << 7)
>>>> +#define IOMMU_MMIO_STATUS_PPR_INT     (1 << 6)
>>>> +#define IOMMU_MMIO_STATUS_PPR_OVF     (1 << 5)
>>>> +#define IOMMU_MMIO_STATUS_CMDBUF_RUN  (1 << 4)
>>>> +#define IOMMU_MMIO_STATUS_EVT_RUN     (1 << 3)
>>>> +#define IOMMU_MMIO_STATUS_COMP_INT    (1 << 2)
>>>> +#define IOMMU_MMIO_STATUS_EVT_INT     (1 << 1)
>>>> +#define IOMMU_MMIO_STATUS_EVT_OVF     (1 << 0)
>>>> +
>>>> +#define IOMMU_CMDBUF_ID_BYTE              0x07
>>>> +#define IOMMU_CMDBUF_ID_RSHIFT            4
>>>> +
>>>> +#define IOMMU_CMD_COMPLETION_WAIT         0x01
>>>> +#define IOMMU_CMD_INVAL_DEVTAB_ENTRY      0x02
>>>> +#define IOMMU_CMD_INVAL_IOMMU_PAGES       0x03
>>>> +#define IOMMU_CMD_INVAL_IOTLB_PAGES       0x04
>>>> +#define IOMMU_CMD_INVAL_INTR_TABLE        0x05
>>>> +#define IOMMU_CMD_PREFETCH_IOMMU_PAGES    0x06
>>>> +#define IOMMU_CMD_COMPLETE_PPR_REQUEST    0x07
>>>> +#define IOMMU_CMD_INVAL_IOMMU_ALL         0x08
>>>> +
>>>> +#define IOMMU_DEVTAB_ENTRY_SIZE           32
>>>> +
>>>> +/* Device table entry bits 0:63 */
>>>> +#define IOMMU_DEV_VALID                   (1ULL << 0)
>>>> +#define IOMMU_DEV_TRANSLATION_VALID       (1ULL << 1)
>>>> +#define IOMMU_DEV_MODE_MASK               0x7
>>>> +#define IOMMU_DEV_MODE_RSHIFT             9
>>>> +#define IOMMU_DEV_PT_ROOT_MASK            0xFFFFFFFFFF000
>>>> +#define IOMMU_DEV_PT_ROOT_RSHIFT          12
>>>> +#define IOMMU_DEV_PERM_SHIFT              61
>>>> +#define IOMMU_DEV_PERM_READ               (1ULL << 61)
>>>> +#define IOMMU_DEV_PERM_WRITE              (1ULL << 62)
>>>> +
>>>> +/* Device table entry bits 64:127 */
>>>> +#define IOMMU_DEV_DOMID_ID_MASK          ((1ULL << 16) - 1)
>>>> +#define IOMMU_DEV_IOTLB_SUPPORT           (1ULL << 17)
>>>> +#define IOMMU_DEV_SUPPRESS_PF             (1ULL << 18)
>>>> +#define IOMMU_DEV_SUPPRESS_ALL_PF         (1ULL << 19)
>>>> +#define IOMMU_DEV_IOCTL_MASK              (~3)
>>>> +#define IOMMU_DEV_IOCTL_RSHIFT            20
>>>> +#define   IOMMU_DEV_IOCTL_DENY            0
>>>> +#define   IOMMU_DEV_IOCTL_PASSTHROUGH     1
>>>> +#define   IOMMU_DEV_IOCTL_TRANSLATE       2
>>>> +#define IOMMU_DEV_CACHE                   (1ULL << 37)
>>>> +#define IOMMU_DEV_SNOOP_DISABLE           (1ULL << 38)
>>>> +#define IOMMU_DEV_EXCL                    (1ULL << 39)
>>>> +
>>>> +/* Event codes and flags, as stored in the info field */
>>>> +#define IOMMU_EVENT_ILLEGAL_DEVTAB_ENTRY  (0x1U << 12)
>>>> +#define IOMMU_EVENT_IOPF                  (0x2U << 12)
>>>> +#define   IOMMU_EVENT_IOPF_I              (1U << 3)
>>>> +#define   IOMMU_EVENT_IOPF_PR             (1U << 4)
>>>> +#define   IOMMU_EVENT_IOPF_RW             (1U << 5)
>>>> +#define   IOMMU_EVENT_IOPF_PE             (1U << 6)
>>>> +#define   IOMMU_EVENT_IOPF_RZ             (1U << 7)
>>>> +#define   IOMMU_EVENT_IOPF_TR             (1U << 8)
>>>> +#define IOMMU_EVENT_DEV_TAB_HW_ERROR      (0x3U << 12)
>>>> +#define IOMMU_EVENT_PAGE_TAB_HW_ERROR     (0x4U << 12)
>>>> +#define IOMMU_EVENT_ILLEGAL_COMMAND_ERROR (0x5U << 12)
>>>> +#define IOMMU_EVENT_COMMAND_HW_ERROR      (0x6U << 12)
>>>> +#define IOMMU_EVENT_IOTLB_INV_TIMEOUT     (0x7U << 12)
>>>> +#define IOMMU_EVENT_INVALID_DEV_REQUEST   (0x8U << 12)
>>>> +
>>>> +#define IOMMU_EVENT_LEN                   16
>>>> +#define IOMMU_PERM_READ             (1 << 0)
>>>> +#define IOMMU_PERM_WRITE            (1 << 1)
>>>> +#define IOMMU_PERM_RW               (IOMMU_PERM_READ | IOMMU_PERM_WRITE)
>>>> +
>>>> +/* AMD RD890 Chipset */
>>>> +#define PCI_DEVICE_ID_RD890_IOMMU   0x20
>
>
>>>
>>>
>>> We keep the pci ids in include/hw/pci/pci_ids.h
>
> This a dummy device id I use for IOMMU - IOMMU doesn't have a specific
> device id. There's a device id on linux include files for a certain
> AMD IOMMU but it makes IOMMU seem to be on a non-existant bus so I
> don't use it.
>
>>>
>>>> +
>>>> +#define IOMMU_FEATURE_PREFETCH            (1ULL << 0)
>>>> +#define IOMMU_FEATURE_PPR                 (1ULL << 1)
>>>> +#define IOMMU_FEATURE_NX                  (1ULL << 3)
>>>> +#define IOMMU_FEATURE_GT                  (1ULL << 4)
>>>> +#define IOMMU_FEATURE_IA                  (1ULL << 6)
>>>> +#define IOMMU_FEATURE_GA                  (1ULL << 7)
>>>> +#define IOMMU_FEATURE_HE                  (1ULL << 8)
>>>> +#define IOMMU_FEATURE_PC                  (1ULL << 9)
>>>> +
>>>> +/* reserved DTE bits */
>>>> +#define IOMMU_DTE_LOWER_QUAD_RESERVED  0x80300000000000fc
>>>> +#define IOMMU_DTE_MIDDLE_QUAD_RESERVED 0x0000000000000100
>>>> +#define IOMMU_DTE_UPPER_QUAD_RESERVED  0x08f0000000000000
>>>> +
>>>> +/* IOMMU paging mode */
>>>> +#define IOMMU_GATS_MODE                 (6ULL <<  12)
>>>> +#define IOMMU_HATS_MODE                 (6ULL <<  10)
>>>> +
>>>> +/* PCI SIG constants */
>>>> +#define PCI_BUS_MAX 256
>>>> +#define PCI_SLOT_MAX 32
>>>> +#define PCI_FUNC_MAX 8
>>>> +#define PCI_DEVFN_MAX 256
>>>
>>>
>>> Maybe we can move the PCI macros to include/hw/pci/pci.h, those are not
>>> IOMMU specific.
>
> Yeah, this are PCI macros but they are a not copied from linux while
> the macros in pci.h seem to have been copied from linux.
>
>>>
>>>> +
>>>> +/* IOTLB */
>>>> +#define IOMMU_IOTLB_MAX_SIZE 1024
>>>> +#define IOMMU_DEVID_SHIFT    36
>>>> +
>>>> +/* extended feature support */
>>>> +#define IOMMU_EXT_FEATURES (IOMMU_FEATURE_PREFETCH | IOMMU_FEATURE_PPR |
>>>> \
>>>> +        IOMMU_FEATURE_NX | IOMMU_FEATURE_IA | IOMMU_FEATURE_GT | \
>>>> +        IOMMU_FEATURE_GA | IOMMU_FEATURE_HE | IOMMU_GATS_MODE | \
>>>> +        IOMMU_HATS_MODE)
>>>> +
>>>> +/* capabilities header */
>>>> +#define IOMMU_CAPAB_FEATURES (IOMMU_CAPAB_FLAT_EXT | \
>>>> +        IOMMU_CAPAB_FLAG_NPCACHE | IOMMU_CAPAB_FLAG_IOTLBSUP \
>>>> +        | IOMMU_CAPAB_ID_SEC | IOMMU_CAPAB_INIT_TYPE | \
>>>> +        IOMMU_CAPAB_FLAG_HTTUNNEL |  IOMMU_CAPAB_EFR_SUP)
>>>> +
>>>> +/* command constants */
>>>> +#define IOMMU_COM_STORE_ADDRESS_MASK 0xffffffffffff8
>>>> +#define IOMMU_COM_COMPLETION_STORE_MASK 0x1
>>>> +#define IOMMU_COM_COMPLETION_INTR 0x2
>>>> +#define IOMMU_COM_COMPLETION_DATA_OFF 0x8
>>>> +#define IOMMU_COMMAND_SIZE 0x10
>>>> +
>>>> +/* IOMMU default address */
>>>> +#define IOMMU_BASE_ADDR 0xfed80000
>>>> +
>>>> +/* page management constants */
>>>> +#define IOMMU_PAGE_SHIFT 12
>>>> +#define IOMMU_PAGE_SIZE  (1ULL << IOMMU_PAGE_SHIFT)
>>>> +
>>>> +#define IOMMU_PAGE_SHIFT_4K 12
>>>> +#define IOMMU_PAGE_MASK_4K  (~((1ULL << IOMMU_PAGE_SHIFT_4K) - 1))
>>>> +#define IOMMU_PAGE_SHIFT_2M 21
>>>> +#define IOMMU_PAGE_MASK_2M  (~((1ULL << IOMMU_PAGE_SHIFT_2M) - 1))
>>>> +#define IOMMU_PAGE_SHIFT_1G 30
>>>> +#define IOMMU_PAGE_MASK_1G (~((1ULL << IOMMU_PAGE_SHIFT_1G) - 1))
>>>> +
>>>> +#define IOMMU_MAX_VA_ADDR          (48UL << 5)
>>>> +#define IOMMU_MAX_PH_ADDR          (40UL << 8)
>>>> +#define IOMMU_MAX_GVA_ADDR         (48UL << 15)
>>>> +
>>>> +/* invalidation command device id */
>>>> +#define IOMMU_INVAL_DEV_ID_SHIFT  32
>>>> +#define IOMMU_INVAL_DEV_ID_MASK   (~((1UL << IOMMU_INVAL_DEV_ID_SHIFT) -
>>>> 1))
David Kiarie March 2, 2016, 7:11 p.m. UTC | #5
On 25/02/16 18:43, Marcel Apfelbaum wrote:
> On 02/21/2016 08:10 PM, David Kiarie wrote:
>> Add AMD IOMMU emulaton to Qemu in addition to Intel IOMMU
>> The IOMMU does basic translation, error checking and has a
>> mininal IOTLB implementation
>
> Hi,
>
>>
>> Signed-off-by: David Kiarie <davidkiarie4@gmail.com>
>> ---
>>   hw/i386/Makefile.objs |    1 +
>>   hw/i386/amd_iommu.c   | 1432 
>> +++++++++++++++++++++++++++++++++++++++++++++++++
>>   hw/i386/amd_iommu.h   |  395 ++++++++++++++
>>   include/hw/pci/pci.h  |    2 +
>>   4 files changed, 1830 insertions(+)
>>   create mode 100644 hw/i386/amd_iommu.c
>>   create mode 100644 hw/i386/amd_iommu.h
>>
>> diff --git a/hw/i386/Makefile.objs b/hw/i386/Makefile.objs
>> index b52d5b8..2f1a265 100644
>> --- a/hw/i386/Makefile.objs
>> +++ b/hw/i386/Makefile.objs
>> @@ -3,6 +3,7 @@ obj-y += multiboot.o
>>   obj-y += pc.o pc_piix.o pc_q35.o
>>   obj-y += pc_sysfw.o
>>   obj-y += intel_iommu.o
>> +obj-y += amd_iommu.o
>>   obj-$(CONFIG_XEN) += ../xenpv/ xen/
>>
>>   obj-y += kvmvapic.o
>> diff --git a/hw/i386/amd_iommu.c b/hw/i386/amd_iommu.c
>> new file mode 100644
>> index 0000000..3dac043
>> --- /dev/null
>> +++ b/hw/i386/amd_iommu.c
>> @@ -0,0 +1,1432 @@
>> +/*
>> + * QEMU emulation of AMD IOMMU (AMD-Vi)
>> + *
>> + * Copyright (C) 2011 Eduard - Gabriel Munteanu
>> + * Copyright (C) 2015 David Kiarie, <davidkiarie4@gmail.com>
>> + *
>> + * This program is free software; you can redistribute it and/or modify
>> + * it under the terms of the GNU General Public License as published by
>> + * the Free Software Foundation; either version 2 of the License, or
>> + * (at your option) any later version.
>> +
>> + * This program is distributed in the hope that it will be useful,
>> + * but WITHOUT ANY WARRANTY; without even the implied warranty of
>> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
>> + * GNU General Public License for more details.
>> +
>> + * You should have received a copy of the GNU General Public License 
>> along
>> + * with this program; if not, see <http://www.gnu.org/licenses/>.
>> + *
>> + * Cache implementation inspired by hw/i386/intel_iommu.c
>> + *
>> + */
>> +#include "hw/i386/amd_iommu.h"
>> +
>> +/*#define DEBUG_AMD_IOMMU*/
>> +#ifdef DEBUG_AMD_IOMMU
>> +enum {
>> +    DEBUG_GENERAL, DEBUG_CAPAB, DEBUG_MMIO, DEBUG_ELOG,
>> +    DEBUG_CACHE, DEBUG_COMMAND, DEBUG_MMU
>> +};
>> +
>> +#define IOMMU_DBGBIT(x)   (1 << DEBUG_##x)
>> +static int iommu_dbgflags = IOMMU_DBGBIT(MMIO);
>> +
>> +#define IOMMU_DPRINTF(what, fmt, ...) do { \
>> +    if (iommu_dbgflags & IOMMU_DBGBIT(what)) { \
>> +        fprintf(stderr, "(amd-iommu)%s: " fmt "\n", __func__, \
>> +                ## __VA_ARGS__); } \
>> +    } while (0)
>> +#else
>> +#define IOMMU_DPRINTF(what, fmt, ...) do {} while (0)
>> +#endif
>> +
>> +typedef struct AMDIOMMUAddressSpace {
>> +    uint8_t bus_num;            /* bus 
>> number                           */
>> +    uint8_t devfn;              /* device 
>> function                      */
>> +    AMDIOMMUState *iommu_state; /* IOMMU - one per 
>> machine              */
>> +    MemoryRegion iommu;         /* Device's iommu 
>> region                */
>> +    AddressSpace as;            /* device's corresponding address 
>> space */
>> +} AMDIOMMUAddressSpace;
>> +
>> +/* IOMMU cache entry */
>> +typedef struct IOMMUIOTLBEntry {
>> +    uint64_t gfn;
>> +    uint16_t domid;
>> +    uint64_t devid;
>> +    uint64_t perms;
>> +    uint64_t translated_addr;
>> +} IOMMUIOTLBEntry;
>> +
>> +/* configure MMIO registers at startup/reset */
>> +static void amd_iommu_set_quad(AMDIOMMUState *s, hwaddr addr, 
>> uint64_t val,
>> +                               uint64_t romask, uint64_t w1cmask)
>> +{
>> +    stq_le_p(&s->mmior[addr], val);
>> +    stq_le_p(&s->romask[addr], romask);
>> +    stq_le_p(&s->w1cmask[addr], w1cmask);
>> +}
>> +
>> +static uint16_t amd_iommu_readw(AMDIOMMUState *s, hwaddr addr)
>> +{
>> +    return lduw_le_p(&s->mmior[addr]);
>> +}
>> +
>> +static uint32_t amd_iommu_readl(AMDIOMMUState *s, hwaddr addr)
>> +{
>> +    return ldl_le_p(&s->mmior[addr]);
>> +}
>> +
>> +static uint64_t amd_iommu_readq(AMDIOMMUState *s, hwaddr addr)
>> +{
>> +    return ldq_le_p(&s->mmior[addr]);
>> +}
>> +
>> +/* internal write */
>> +static void amd_iommu_writeq_raw(AMDIOMMUState *s, uint64_t val, 
>> hwaddr addr)
>> +{
>> +    stq_le_p(&s->mmior[addr], val);
>> +}
>> +
>> +/* external write */
>> +static void amd_iommu_writew(AMDIOMMUState *s, hwaddr addr, uint16_t 
>> val)
>> +{
>> +    uint16_t romask = lduw_le_p(&s->romask[addr]);
>> +    uint16_t w1cmask = lduw_le_p(&s->w1cmask[addr]);
>> +    uint16_t oldval = lduw_le_p(&s->mmior[addr]);
>> +    stw_le_p(&s->mmior[addr], (val & ~(val & w1cmask)) | (romask & 
>> oldval));
>> +}
>> +
>> +static void amd_iommu_writel(AMDIOMMUState *s, hwaddr addr, uint32_t 
>> val)
>> +{
>> +    uint32_t romask = ldl_le_p(&s->romask[addr]);
>> +    uint32_t w1cmask = ldl_le_p(&s->w1cmask[addr]);
>> +    uint32_t oldval = ldl_le_p(&s->mmior[addr]);
>> +    stl_le_p(&s->mmior[addr], (val & ~(val & w1cmask)) | (romask & 
>> oldval));
>> +}
>> +
>> +static void amd_iommu_writeq(AMDIOMMUState *s, hwaddr addr, uint64_t 
>> val)
>> +{
>> +    uint64_t romask = ldq_le_p(&s->romask[addr]);
>> +    uint64_t w1cmask = ldq_le_p(&s->w1cmask[addr]);
>> +    uint32_t oldval = ldq_le_p(&s->mmior[addr]);
>> +    stq_le_p(&s->mmior[addr], (val & ~(val & w1cmask)) | (romask & 
>> oldval));
>> +}
>> +
>> +static void amd_iommu_log_event(AMDIOMMUState *s, uint16_t *evt)
>> +{
>> +    /* event logging not enabled */
>> +    if (!s->evtlog_enabled || *(uint64_t 
>> *)&s->mmior[IOMMU_MMIO_STATUS] |
>> +        IOMMU_MMIO_STATUS_EVT_OVF) {
>> +        return;
>> +    }
>> +
>> +    /* event log buffer full */
>> +    if (s->evtlog_tail >= s->evtlog_len) {
>> +        *(uint64_t *)&s->mmior[IOMMU_MMIO_STATUS] |= 
>> IOMMU_MMIO_STATUS_EVT_OVF;
>> +        /* generate interrupt */
>> +        msi_notify(&s->dev, 0);
>> +    }
>> +
>> +    if (dma_memory_write(&address_space_memory, s->evtlog_len + 
>> s->evtlog_tail,
>> +       &evt, IOMMU_EVENT_LEN)) {
>> +        IOMMU_DPRINTF(ELOG, "error: fail to write at address 0x%"PRIx64
>> +                      " + offset 0x%"PRIx32, s->evtlog, 
>> s->evtlog_tail);
>> +    }
>> +
>> +     s->evtlog_tail += IOMMU_EVENT_LEN;
>> +     *(uint64_t *)&s->mmior[IOMMU_MMIO_STATUS] |= 
>> IOMMU_MMIO_STATUS_COMP_INT;
>> +}
>> +
>> +/* log an error encountered page-walking
>> + *
>> + * @addr: virtual address in translation request
>> + */
>> +static void amd_iommu_page_fault(AMDIOMMUState *s, uint16_t devid,
>> +                                 dma_addr_t addr, uint16_t info)
>> +{
>> +    IOMMU_DPRINTF(ELOG, "");
>> +
>> +    uint16_t evt[8];
>> +
>> +    info |= IOMMU_EVENT_IOPF_I;
>> +
>> +    /* encode information */
>> +    *(uint16_t *)&evt[0] = devid;
>> +    *(uint16_t *)&evt[3] = info;
>> +    *(uint64_t *)&evt[4] = cpu_to_le64(addr);
>> +
>> +    /* log a page fault */
>> +    amd_iommu_log_event(s, evt);
>> +
>> +    /* Abort the translation */
>> +    pci_word_test_and_set_mask(s->dev.config + PCI_STATUS,
>> +            PCI_STATUS_SIG_TARGET_ABORT);
>> +}
>> +/*
>> + * log a master abort accessing device table
>> + *  @devtab : address of device table entry
>> + *  @info : error flags
>> + */
>> +static void amd_iommu_log_devtab_error(AMDIOMMUState *s, uint16_t 
>> devid,
>> +                                       dma_addr_t devtab, uint16_t 
>> info)
>> +{
>> +
>> +    IOMMU_DPRINTF(ELOG, "");
>> +
>> +    uint16_t evt[8];
>> +
>> +    info |= IOMMU_EVENT_DEV_TAB_HW_ERROR;
>> +
>> +    /* encode information */
>> +    *(uint16_t *)&evt[0] = devid;
>> +    *(uint8_t *)&evt[3]  = info;
>> +    *(uint64_t *)&evt[4] = cpu_to_le64(devtab);
>> +
>> +    amd_iommu_log_event(s, evt);
>> +
>> +    /* Abort the translation */
>> +    pci_word_test_and_set_mask(s->dev.config + PCI_STATUS,
>> +            PCI_STATUS_SIG_TARGET_ABORT);
>> +
>> +}
>> +
>> +/* log a master abort encountered during a page-walk
>> + *  @addr : address that couldn't be accessed
>> + */
>> +static void amd_iommu_log_pagetab_error(AMDIOMMUState *s, uint16_t 
>> devid,
>> +                                        dma_addr_t addr, uint16_t info)
>> +{
>> +    IOMMU_DPRINTF(ELOG, "");
>> +
>> +    uint16_t evt[8];
>> +
>> +    info |= IOMMU_EVENT_PAGE_TAB_HW_ERROR;
>> +
>> +    /* encode information */
>> +    *(uint16_t *)&evt[0] = devid;
>> +    *(uint8_t *)&evt[3]  = info;
>> +    *(uint64_t *)&evt[4] = (cpu_to_le64(addr) >> 3);
>> +
>> +    amd_iommu_log_event(s, evt);
>> +
>> +    /* Abort the translation */
>> +    pci_word_test_and_set_mask(s->dev.config + PCI_STATUS,
>> +            PCI_STATUS_SIG_TARGET_ABORT);
>> +
>> +}
>> +
>> +/* log an event trying to access command buffer
>> + *   @addr : address that couldn't be accessed
>> + */
>> +static void amd_iommu_log_command_error(AMDIOMMUState *s, dma_addr_t 
>> addr)
>> +{
>> +    IOMMU_DPRINTF(ELOG, "");
>> +
>> +    uint16_t evt[8];
>> +
>> +    /* encode information */
>> +    *(uint8_t *)&evt[3]  = (uint8_t)IOMMU_EVENT_COMMAND_HW_ERROR;
>> +    *(uint64_t *)&evt[4] = (cpu_to_le64(addr) >> 3);
>> +
>> +    amd_iommu_log_event(s, evt);
>> +
>> +    /* Abort the translation */
>> +    pci_word_test_and_set_mask(s->dev.config + PCI_STATUS,
>> +            PCI_STATUS_SIG_TARGET_ABORT);
>> +}
>> +
>> +/* log an illegal comand event
>> + *   @addr : address of illegal command
>> + */
>> +static void amd_iommu_log_illegalcom_error(AMDIOMMUState *s, 
>> uint16_t info,
>> +                                           dma_addr_t addr)
>> +{
>> +    IOMMU_DPRINTF(ELOG, "");
>> +
>> +    uint16_t evt[8];
>> +
>> +    /* encode information */
>> +    *(uint8_t *)&evt[3]  = (uint8_t)IOMMU_EVENT_ILLEGAL_COMMAND_ERROR;
>> +    *(uint64_t *)&evt[4] = (cpu_to_le64(addr) >> 3);
>
> Can you please use a macro instead of 3 literal?
>
>> +
>> +    amd_iommu_log_event(s, evt);
>> +}
>> +
>> +/* log an error accessing device table
>> + *
>> + *  @devid : device owning the table entry
>> + *  @devtab : address of device table entry
>> + *  @info : error flags
>> + */
>> +static void amd_iommu_log_illegaldevtab_error(AMDIOMMUState *s, 
>> uint16_t devid,
>> +                                              dma_addr_t addr, 
>> uint16_t info)
>> +{
>> +    IOMMU_DPRINTF(ELOG, "");
>> +
>> +    uint16_t evt[8];
>> +
>> +    info |= IOMMU_EVENT_ILLEGAL_DEVTAB_ENTRY;
>> +
>> +    *(uint16_t *)&evt[0] = devid;
>> +    *(uint8_t *)&evt[3]  = info;
>> +    *(uint64_t *)&evt[4] = (cpu_to_le64(addr) >> 3);
>> +
>> +    amd_iommu_log_event(s, evt);
>> +}
>
> It seems that the all log functions do the same:
> create an event, log it and optionally set PCI_STATUS_SIG_TARGET_ABORT
>
> I would consider to unite them in the same function. (not a must)
>
>> +
>> +static gboolean amd_iommu_uint64_equal(gconstpointer v1, 
>> gconstpointer v2)
>> +{
>> +    return *((const uint64_t *)v1) == *((const uint64_t *)v2);
>> +}
>> +
>> +static guint amd_iommu_uint64_hash(gconstpointer v)
>> +{
>> +    return (guint)*(const uint64_t *)v;
>> +}
>> +
>> +static IOMMUIOTLBEntry *amd_iommu_iotlb_lookup(AMDIOMMUState *s, 
>> hwaddr addr,
>> +                                               uint64_t devid)
>> +{
>> +    uint64_t key = (addr >> IOMMU_PAGE_SHIFT_4K) |
>> +                   ((uint64_t)(devid) << IOMMU_DEVID_SHIFT);
>> +    return g_hash_table_lookup(s->iotlb, &key);
>> +}
>> +
>> +static void amd_iommu_iotlb_reset(AMDIOMMUState *s)
>> +{
>> +    assert(s->iotlb);
>> +    g_hash_table_remove_all(s->iotlb);
>> +}
>> +
>> +static gboolean amd_iommu_iotlb_remove_by_devid(gpointer key, 
>> gpointer value,
>> +                                                gpointer user_data)
>> +{
>> +    IOMMUIOTLBEntry *entry = (IOMMUIOTLBEntry *)value;
>> +    uint16_t devid = *(uint16_t *)user_data;
>> +    return entry->devid == devid;
>> +}
>> +
>> +static void amd_iommu_iotlb_remove_page(AMDIOMMUState *s, hwaddr addr,
>> +                                        uint64_t devid)
>> +{
>> +    uint64_t key = (addr >> IOMMU_PAGE_SHIFT_4K) |
>> +                   ((uint64_t)(devid) << IOMMU_DEVID_SHIFT);
>> +    g_hash_table_remove(s->iotlb, &key);
>> +}
>> +
>> +/* extract device id */
>> +static inline uint16_t devid_extract(uint8_t *cmd)
>> +{
>> +    return (uint16_t)cmd[2] & IOMMU_INVAL_DEV_ID_MASK;
>> +}
>> +
>> +static void amd_iommu_invalidate_iotlb(AMDIOMMUState *s, uint64_t *cmd)
>> +{
>> +    uint16_t devid = devid_extract((uint8_t *)cmd);
>> +    /* if invalidation of more than one page requested */
>> +    if (IOMMU_INVAL_ALL(cmd[0])) {
>> +        g_hash_table_foreach_remove(s->iotlb, 
>> amd_iommu_iotlb_remove_by_devid,
>> +                                    &devid);
>> +    } else {
>> +        hwaddr addr = (hwaddr)(cmd[1] & IOMMU_INVAL_ADDR_MASK);
>> +        amd_iommu_iotlb_remove_page(s, addr, devid);
>> +    }
>> +}
>> +
>> +static void amd_iommu_update_iotlb(AMDIOMMUState *s, uint16_t devid,
>> +                                   uint64_t gpa, uint64_t spa, 
>> uint64_t perms,
>> +                                   uint16_t domid)
>> +{
>> +    IOMMUIOTLBEntry *entry = g_malloc(sizeof(*entry));
>> +    uint64_t *key = g_malloc(sizeof(key));
>> +    uint64_t gfn = gpa >> IOMMU_PAGE_SHIFT_4K;
>> +
>> +    IOMMU_DPRINTF(CACHE, " update iotlb devid: %02x:%02x.%x gpa 
>> 0x%"PRIx64
>> +                  " hpa 0x%"PRIx64, PCI_BUS_NUM(devid), 
>> PCI_SLOT(devid),
>> +                  PCI_FUNC(devid), gpa, spa);
>> +
>> +    if (g_hash_table_size(s->iotlb) >= IOMMU_IOTLB_MAX_SIZE) {
>> +        IOMMU_DPRINTF(CACHE, "iotlb exceeds size limit - reset");
>> +        amd_iommu_iotlb_reset(s);
>> +    }
>> +
>> +    entry->gfn = gfn;
>> +    entry->domid = domid;
>> +    entry->perms = perms;
>> +    entry->translated_addr = spa;
>> +    *key = gfn | ((uint64_t)(devid) << IOMMU_DEVID_SHIFT);
>> +    g_hash_table_replace(s->iotlb, key, entry);
>> +}
>> +
>> +/* execute a completion wait command */
>> +static void amd_iommu_completion_wait(AMDIOMMUState *s, uint8_t *cmd)
>> +{
>> +    IOMMU_DPRINTF(COMMAND, "");
>> +    unsigned int addr;
>> +
>> +    /* completion store */
>> +    if (cmd[0] & IOMMU_COM_COMPLETION_STORE_MASK) {
>> +        addr = le64_to_cpu(*(uint64_t *)cmd) & 
>> IOMMU_COM_STORE_ADDRESS_MASK;
>> +        if (dma_memory_write(&address_space_memory, addr, cmd + 8, 
>> 8)) {
>> +            IOMMU_DPRINTF(ELOG, "error: fail to write at address 
>> 0%x"PRIx64,
>> +                          addr);
>> +        }
>> +    }
>> +
>> +    /* set completion interrupt */
>> +    if (cmd[0] & IOMMU_COM_COMPLETION_INTR) {
>> +        s->mmior[IOMMU_MMIO_STATUS] |= IOMMU_MMIO_STATUS_COMP_INT;
>> +    }
>> +}
>> +
>> +/* get command type */
>> +static uint8_t opcode(uint8_t *cmd)
>> +{
>> +    return cmd[IOMMU_CMDBUF_ID_BYTE] >> IOMMU_CMDBUF_ID_RSHIFT;
>> +}
>> +
>> +/* linux seems to be using reserved bits so I just log without 
>> abortig bug */
>
> I couldn't quite understand the comment
>
>> +static void iommu_inval_devtab_entry(AMDIOMMUState *s, uint8_t *cmd,
>> +                                     uint8_t type)
>> +{
>> +    IOMMU_DPRINTF(COMMAND, "");
>> +
>> +    /* This command should invalidate internal caches of which there 
>> isn't */
>> +    if (*(uint64_t *)&cmd[0] & IOMMU_CMD_INVAL_DEV_RSVD ||
>> +            *(uint64_t *)&cmd[1]) {
>> +        amd_iommu_log_illegalcom_error(s, type, s->cmdbuf + 
>> s->cmdbuf_head);
>> +    }
>> +#ifdef DEBUG_AMD_IOMMU
>> +    uint16_t devid = devid_extract(cmd);
>> +#endif
>> +    IOMMU_DPRINTF(COMMAND, "device table entry for devid: %02x:%02x.%x"
>> +                  "invalidated", PCI_BUS_NUM(devid), PCI_SLOT(devid),
>> +                  PCI_FUNC(devid));
>> +}
>> +
>> +static void iommu_completion_wait(AMDIOMMUState *s, uint8_t *cmd, 
>> uint8_t type)
>> +{
>> +    IOMMU_DPRINTF(COMMAND, "");
>> +
>> +    if (*(uint32_t *)&cmd[1] & IOMMU_COMPLETION_WAIT_RSVD) {
>> +        amd_iommu_log_illegalcom_error(s, type, s->cmdbuf + 
>> s->cmdbuf_head);
>> +    }
>> +    /* pretend to wait for command execution to complete */
>> +    IOMMU_DPRINTF(COMMAND, "completion wait requested with store 
>> address 0x%"
>> +                  PRIx64 " and store data 0x%"PRIx64, (cmd[0] &
>> +                  IOMMU_COM_STORE_ADDRESS_MASK), *(uint64_t *)(cmd + 
>> 8));
>> +    amd_iommu_completion_wait(s, cmd);
>> +}
>> +
>> +static void iommu_complete_ppr(AMDIOMMUState *s, uint8_t *cmd, 
>> uint8_t type)
>> +{
>> +    IOMMU_DPRINTF(COMMAND, "");
>> +
>> +    if ((*(uint64_t *)&cmd[0] & IOMMU_COMPLETE_PPR_RQ_RSVD) ||
>> +       *(uint64_t *)&cmd[1] & 0xffff000000000000) {
>
>
> Can you please document this mask?
>
>> +        amd_iommu_log_illegalcom_error(s, type, s->cmdbuf + 
>> s->cmdbuf_head);
>> +    }
>> +
>> +    IOMMU_DPRINTF(COMMAND, "Execution of PPR queue requested");
>> +}
>> +
>> +static void iommu_inval_all(AMDIOMMUState *s, uint8_t *cmd, uint8_t 
>> type)
>> +{
>> +    IOMMU_DPRINTF(COMMAND, "");
>> +
>> +    if ((*(uint64_t *)&cmd[0] & IOMMU_INVAL_IOMMU_ALL_RSVD) ||
>> +       *(uint64_t *)&cmd[1]) {
>> +        amd_iommu_log_illegalcom_error(s, type, s->cmdbuf + 
>> s->cmdbuf_head);
>> +    }
>> +
>> +    amd_iommu_iotlb_reset(s);
>> +    IOMMU_DPRINTF(COMMAND, "Invalidation of all IOMMU cache 
>> requested");
>> +}
>> +
>> +static inline uint16_t domid_extract(uint64_t *cmd)
>> +{
>> +    return (uint16_t)cmd[0] & IOMMU_INVAL_PAGES_DOMID;
>> +}
>> +
>> +static gboolean amd_iommu_iotlb_remove_by_domid(gpointer key, 
>> gpointer value,
>> +                                                gpointer user_data)
>> +{
>> +    IOMMUIOTLBEntry *entry = (IOMMUIOTLBEntry *)value;
>> +    uint16_t domid = *(uint16_t *)user_data;
>> +    return entry->domid == domid;
>> +}
>> +
>> +/* we don't have devid - we can't remove pages by address */
>> +static void iommu_inval_pages(AMDIOMMUState *s, uint8_t *cmd, 
>> uint8_t type)
>> +{
>> +    IOMMU_DPRINTF(COMMAND, "");
>> +    uint16_t domid = domid_extract((uint64_t *)cmd);
>> +
>> +    if (*(uint64_t *)&cmd[0] & IOMMU_INVAL_IOMMU_PAGES_RSVD ||
>> +       *(uint32_t *)&cmd[1] & 0x00000ff0) {
>> +        amd_iommu_log_illegalcom_error(s, type, s->cmdbuf + 
>> s->cmdbuf_head);
>> +    }
>> +
>> +    g_hash_table_foreach_remove(s->iotlb, 
>> amd_iommu_iotlb_remove_by_domid,
>> +                                &domid);
>> +
>> +    IOMMU_DPRINTF(COMMAND, "IOMMU pages for domain 0x%"PRIx16 
>> "invalidated",
>> +                  domid);
>> +}
>> +
>> +static void iommu_prefetch_pages(AMDIOMMUState *s, uint8_t *cmd, 
>> uint8_t type)
>> +{
>> +    IOMMU_DPRINTF(COMMAND, "");
>> +
>> +    if ((*(uint64_t *)&cmd[0] & IOMMU_PRF_IOMMU_PAGES_RSVD) ||
>> +       (*(uint32_t *)&cmd[1] & 0x00000fd4)) {
>
> Here the same, maybe you can name the mask, so we can easier follow 
> the spec.
>
>> +        amd_iommu_log_illegalcom_error(s, type, s->cmdbuf + 
>> s->cmdbuf_head);
>> +    }
>> +
>> +    IOMMU_DPRINTF(COMMAND, "Pre-fetch of IOMMU pages requested");
>> +}
>> +
>> +static void iommu_inval_inttable(AMDIOMMUState *s, uint8_t *cmd, 
>> uint8_t type)
>> +{
>> +    IOMMU_DPRINTF(COMMAND, "");
>> +
>> +    if ((*(uint64_t *)&cmd[0] & IOMMU_INVAL_INTR_TABLE_RSVD) ||
>> +       *(uint64_t *)&cmd[1]) {
>> +        amd_iommu_log_illegalcom_error(s, type, s->cmdbuf + 
>> s->cmdbuf_head);
>> +        return;
>> +    }
>> +
>> +    IOMMU_DPRINTF(COMMAND, "interrupt table invalidated");
>> +}
>> +
>> +static void iommu_inval_iotlb(AMDIOMMUState *s, uint8_t *cmd, 
>> uint8_t type)
>> +{
>> +    IOMMU_DPRINTF(COMMAND, "");
>> +
>> +    if (*(uint32_t *)&cmd[2] & IOMMU_INVAL_IOTLB_PAGES_RSVD) {
>> +        amd_iommu_log_illegalcom_error(s, type, s->cmdbuf + 
>> s->cmdbuf_head);
>> +        return;
>> +    }
>> +
>> +    amd_iommu_invalidate_iotlb(s, (uint64_t *)cmd);
>> +    IOMMU_DPRINTF(COMMAND, "IOTLB pages invalidated");
>> +}
>> +
>> +/* not honouring reserved bits is regarded as an illegal command */
>> +static void amd_iommu_cmdbuf_exec(AMDIOMMUState *s)
>> +{
>> +    IOMMU_DPRINTF(COMMAND, "");
>> +
>> +    uint8_t type;
>> +    uint8_t cmd[IOMMU_COMMAND_SIZE];
>> +
>> +    memset(cmd, 0, IOMMU_COMMAND_SIZE);
>> +
>> +    if (dma_memory_read(&address_space_memory, s->cmdbuf + 
>> s->cmdbuf_head, cmd,
>> +       IOMMU_COMMAND_SIZE)) {
>> +        IOMMU_DPRINTF(COMMAND, "error: fail to access memory at 
>> 0x%"PRIx64
>> +                      " + 0x%"PRIu8, s->cmdbuf, s->cmdbuf_head);
>> +        amd_iommu_log_command_error(s, s->cmdbuf + s->cmdbuf_head);
>> +        return;
>> +    }
>> +
>> +    type = opcode(cmd);
>> +
>> +    switch (type) {
>> +    case IOMMU_CMD_COMPLETION_WAIT:
>> +        iommu_completion_wait(s, cmd, type);
>> +        break;
>> +
>> +    case IOMMU_CMD_INVAL_DEVTAB_ENTRY:
>> +        iommu_inval_devtab_entry(s, cmd, type);
>> +        break;
>> +
>> +    case IOMMU_CMD_INVAL_IOMMU_PAGES:
>> +        iommu_inval_pages(s, cmd, type);
>> +        break;
>> +
>> +    case IOMMU_CMD_INVAL_IOTLB_PAGES:
>> +        iommu_inval_iotlb(s, cmd, type);
>> +        break;
>> +
>> +    case IOMMU_CMD_INVAL_INTR_TABLE:
>> +        iommu_inval_inttable(s, cmd, type);
>> +        break;
>> +
>> +    case IOMMU_CMD_PREFETCH_IOMMU_PAGES:
>> +        iommu_prefetch_pages(s, cmd, type);
>> +        break;
>> +
>> +    case IOMMU_CMD_COMPLETE_PPR_REQUEST:
>> +        iommu_complete_ppr(s, cmd, type);
>> +        break;
>> +
>> +    case IOMMU_CMD_INVAL_IOMMU_ALL:
>> +        iommu_inval_all(s, cmd, type);
>> +        break;
>> +
>> +    default:
>> +        IOMMU_DPRINTF(COMMAND, "unhandled command %d", type);
>> +        /* log illegal command */
>> +        amd_iommu_log_illegalcom_error(s, type,
>> +                                       s->cmdbuf + s->cmdbuf_head);
>> +        break;
>> +    }
>> +
>> +}
>> +
>> +static void amd_iommu_cmdbuf_run(AMDIOMMUState *s)
>> +{
>> +    IOMMU_DPRINTF(COMMAND, "");
>> +
>> +    uint64_t *mmio_cmdbuf_head = (uint64_t *)(s->mmior +
>> +                                 IOMMU_MMIO_COMMAND_HEAD);
>> +
>> +    if (!s->cmdbuf_enabled) {
>> +        IOMMU_DPRINTF(COMMAND, "error: IOMMU trying to execute 
>> commands with "
>> +                      "command buffer disabled. IOMMU control value 
>> 0x%"PRIx64,
>> +                      amd_iommu_readq(s, IOMMU_MMIO_CONTROL));
>> +        return;
>> +    }
>> +
>> +    while (s->cmdbuf_head != s->cmdbuf_tail) {
>> +        /* check if there is work to do. */
>> +        IOMMU_DPRINTF(COMMAND, "command buffer head at 0x%"PRIx32 " 
>> command "
>> +                      "buffer tail at 0x%"PRIx32" command buffer 
>> base at 0x%"
>> +                      PRIx64, s->cmdbuf_head, s->cmdbuf_tail, 
>> s->cmdbuf);
>> +         amd_iommu_cmdbuf_exec(s);
>> +         s->cmdbuf_head += IOMMU_COMMAND_SIZE;
>> +         amd_iommu_writeq_raw(s, s->cmdbuf_head, 
>> IOMMU_MMIO_COMMAND_HEAD);
>> +
>> +        /* wrap head pointer */
>> +        if (s->cmdbuf_head >= s->cmdbuf_len * IOMMU_COMMAND_SIZE) {
>> +            s->cmdbuf_head = 0;
>> +        }
>> +    }
>> +
>> +    *mmio_cmdbuf_head = cpu_to_le64(s->cmdbuf_head);
>> +}
>> +
>> +/* System Software might never read from some of this fields but 
>> anyways */
>> +static uint64_t amd_iommu_mmio_read(void *opaque, hwaddr addr, 
>> unsigned size)
>> +{
>> +    AMDIOMMUState *s = opaque;
>> +
>> +    uint64_t val = -1;
>
> The above might work, but it looks a little weird
>
>> +    if (addr + size > IOMMU_MMIO_SIZE) {
>> +        IOMMU_DPRINTF(MMIO, "error: addr outside region: max 0x%"PRIX64
>> +                      ", got 0x%"PRIx64 " %d", 
>> (uint64_t)IOMMU_MMIO_SIZE, addr,
>> +                      size);
>> +        return (uint64_t)-1;
>> +    }
>> +
>> +    if (size == 2) {
>> +        val = amd_iommu_readw(s, addr);
>> +    } else if (size == 4) {
>> +        val = amd_iommu_readl(s, addr);
>> +    } else if (size == 8) {
>> +        val = amd_iommu_readq(s, addr);
>> +    }
>> +
>> +    switch (addr & ~0x07) {
>> +    case IOMMU_MMIO_DEVICE_TABLE:
>> +        IOMMU_DPRINTF(MMIO, "MMIO_DEVICE_TABLE read addr 0x%"PRIx64
>> +                      ", size %d offset 0x%"PRIx64, addr, size,
>> +                       addr & ~0x07);
>> +        break;
>> +
>> +    case IOMMU_MMIO_COMMAND_BASE:
>> +        IOMMU_DPRINTF(MMIO, "MMIO_COMMAND_BASE read addr 0x%"PRIx64
>> +                      ", size %d offset 0x%"PRIx64, addr, size,
>> +                      addr & ~0x07);
>> +        break;
>> +
>> +    case IOMMU_MMIO_EVENT_BASE:
>> +        IOMMU_DPRINTF(MMIO, "MMIO_EVENT_BASE read addr 0x%"PRIx64
>> +                      ", size %d offset 0x%"PRIx64, addr, size,
>> +                      addr & ~0x07);
>> +        break;
>> +
>> +    case IOMMU_MMIO_CONTROL:
>> +        IOMMU_DPRINTF(MMIO, "MMIO_CONTROL read addr 0x%"PRIx64
>> +                      ", size %d offset 0x%"PRIx64, addr, size,
>> +                       addr & ~0x07);
>> +        break;
>> +
>> +    case IOMMU_MMIO_EXCL_BASE:
>> +        IOMMU_DPRINTF(MMIO, "MMIO_EXCL_BASE read addr 0x%"PRIx64
>> +                      ", size %d offset 0x%"PRIx64, addr, size,
>> +                      addr & ~0x07);
>> +        break;
>> +
>> +    case IOMMU_MMIO_EXCL_LIMIT:
>> +        IOMMU_DPRINTF(MMIO, "MMIO_EXCL_LIMIT read addr 0x%"PRIx64
>> +                      ", size %d offset 0x%"PRIx64, addr, size,
>> +                      addr & ~0x07);
>> +        break;
>> +
>> +    case IOMMU_MMIO_COMMAND_HEAD:
>> +        IOMMU_DPRINTF(MMIO, "MMIO_COMMAND_HEAD read addr 0x%"PRIx64
>> +                      ", size %d offset 0x%"PRIx64, addr, size,
>> +                      addr & ~0x07);
>> +        break;
>> +
>> +    case IOMMU_MMIO_COMMAND_TAIL:
>> +        IOMMU_DPRINTF(MMIO, "MMIO_COMMAND_TAIL read addr 0x%"PRIx64
>> +                      ", size %d offset 0x%"PRIx64, addr, size,
>> +                      addr & ~0x07);
>> +        break;
>> +
>> +    case IOMMU_MMIO_EVENT_HEAD:
>> +        IOMMU_DPRINTF(MMIO, "MMIO_EVENT_HEAD read addr 0x%"PRIx64
>> +                      ", size %d offset 0x%"PRIx64, addr, size,
>> +                      addr & ~0x07);
>> +        break;
>> +
>> +    case IOMMU_MMIO_EVENT_TAIL:
>> +        IOMMU_DPRINTF(MMIO, "MMIO_EVENT_TAIL read addr 0x%"PRIx64
>> +                      ", size %d offset 0x%"PRIx64, addr, size,
>> +                      addr & ~0x07);
>> +        break;
>> +
>> +    case IOMMU_MMIO_STATUS:
>> +        IOMMU_DPRINTF(MMIO, "MMIO_STATUS read addr 0x%"PRIx64
>> +                      ", size %d offset 0x%"PRIx64, addr, size,
>> +                      addr & ~0x07);
>> +        break;
>> +
>> +    case IOMMU_MMIO_EXT_FEATURES:
>> +        IOMMU_DPRINTF(MMU, "MMIO_EXT_FEATURES read addr 0x%"PRIx64
>> +                      ", size %d offset 0x%"PRIx64 "value 0x%"PRIx64,
>> +                      addr, size, addr & ~0x07, val);
>> +        break;
>> +
>> +    default:
>> +        IOMMU_DPRINTF(MMIO, "UNHANDLED MMIO read addr 0x%"PRIx64
>> +                      ", size %d offset 0x%"PRIx64, addr, size,
>> +                       addr & ~0x07);
>> +    }
>> +    return val;
>> +}
>> +
>> +static void iommu_handle_control_write(AMDIOMMUState *s)
>> +{
>> +    IOMMU_DPRINTF(COMMAND, "");
>> +    /*
>> +     * read whatever is already written in case
>> +     * software is writing in chucks less than 8 bytes
>> +     */
>> +    unsigned long control = amd_iommu_readq(s, IOMMU_MMIO_CONTROL);
>> +    s->enabled = !!(control & IOMMU_MMIO_CONTROL_IOMMUEN);
>> +
>> +    s->ats_enabled = !!(control & IOMMU_MMIO_CONTROL_HTTUNEN);
>> +    s->evtlog_enabled = s->enabled && !!(control &
>> +                        IOMMU_MMIO_CONTROL_EVENTLOGEN);
>> +
>> +    s->evtlog_intr = !!(control & IOMMU_MMIO_CONTROL_EVENTINTEN);
>> +    s->completion_wait_intr = !!(control & 
>> IOMMU_MMIO_CONTROL_COMWAITINTEN);
>> +    s->cmdbuf_enabled = s->enabled && !!(control &
>> +                        IOMMU_MMIO_CONTROL_CMDBUFLEN);
>> +
>> +    /* update the flags depending on the control register */
>> +    if (s->cmdbuf_enabled) {
>> +        (*(uint64_t *)&s->mmior[IOMMU_MMIO_STATUS]) |=
>> +            IOMMU_MMIO_STATUS_CMDBUF_RUN;
>> +    } else {
>> +        (*(uint64_t *)&s->mmior[IOMMU_MMIO_STATUS]) &=
>> +            ~IOMMU_MMIO_STATUS_CMDBUF_RUN;
>> +    }
>> +    if (s->evtlog_enabled) {
>> +        (*(uint64_t *)&s->mmior[IOMMU_MMIO_STATUS]) |=
>> +            IOMMU_MMIO_STATUS_EVT_RUN;
>> +    } else {
>> +        (*(uint64_t *)&s->mmior[IOMMU_MMIO_STATUS]) &=
>> +            ~IOMMU_MMIO_STATUS_EVT_RUN;
>> +    }
>> +
>> +    IOMMU_DPRINTF(COMMAND, "MMIO_STATUS state 0x%"PRIx64, control);
>> +
>> +    amd_iommu_cmdbuf_run(s);
>> +}
>> +
>> +static inline void iommu_handle_devtab_write(AMDIOMMUState *s)
>> +
>> +{
>> +    IOMMU_DPRINTF(COMMAND, "");
>> +
>> +    uint64_t val = amd_iommu_readq(s, IOMMU_MMIO_DEVICE_TABLE);
>> +    s->devtab = (dma_addr_t)(val & IOMMU_MMIO_DEVTAB_BASE_MASK);
>> +
>> +    /* set device table length */
>> +    s->devtab_len = ((val & IOMMU_MMIO_DEVTAB_SIZE_MASK) + 1 *
>> +                    (IOMMU_MMIO_DEVTAB_SIZE_UNIT /
>> +                     IOMMU_MMIO_DEVTAB_ENTRY_SIZE));
>> +}
>> +
>> +static inline void iommu_handle_cmdhead_write(AMDIOMMUState *s)
>> +{
>> +    IOMMU_DPRINTF(COMMAND, "");
>> +
>> +    s->cmdbuf_head = (dma_addr_t)amd_iommu_readq(s, 
>> IOMMU_MMIO_COMMAND_HEAD)
>> +                     & IOMMU_MMIO_CMDBUF_HEAD_MASK;
>> +    amd_iommu_cmdbuf_run(s);
>> +}
>> +
>> +static inline void iommu_handle_cmdbase_write(AMDIOMMUState *s)
>> +{
>> +    IOMMU_DPRINTF(COMMAND, "");
>> +
>> +    s->cmdbuf = (dma_addr_t)amd_iommu_readq(s, IOMMU_MMIO_COMMAND_BASE)
>> +                & IOMMU_MMIO_CMDBUF_BASE_MASK;
>> +    s->cmdbuf_len = 1UL << (s->mmior[IOMMU_MMIO_CMDBUF_SIZE_BYTE]
>> +                    & IOMMU_MMIO_CMDBUF_SIZE_MASK);
>> +    s->cmdbuf_head = s->cmdbuf_tail = 0;
>> +
>> +}
>> +
>> +static inline void iommu_handle_cmdtail_write(AMDIOMMUState *s)
>> +{
>> +    s->cmdbuf_tail = amd_iommu_readq(s, IOMMU_MMIO_COMMAND_TAIL)
>> +                     & IOMMU_MMIO_CMDBUF_TAIL_MASK;
>> +    amd_iommu_cmdbuf_run(s);
>> +}
>> +
>> +static inline void iommu_handle_excllim_write(AMDIOMMUState *s)
>> +{
>> +    IOMMU_DPRINTF(COMMAND, "");
>> +
>> +    uint64_t val = amd_iommu_readq(s, IOMMU_MMIO_EXCL_LIMIT);
>> +    s->excl_limit = (val & IOMMU_MMIO_EXCL_LIMIT_MASK) |
>> +                    IOMMU_MMIO_EXCL_LIMIT_LOW;
>> +}
>> +
>> +static inline void iommu_handle_evtbase_write(AMDIOMMUState *s)
>> +{
>> +    IOMMU_DPRINTF(COMMAND, "");
>> +
>> +    uint64_t val = amd_iommu_readq(s, IOMMU_MMIO_EVENT_BASE);
>> +    s->evtlog = val & IOMMU_MMIO_EVTLOG_BASE_MASK;
>> +    s->evtlog_len = 1UL << (*(uint64_t 
>> *)&s->mmior[IOMMU_MMIO_EVTLOG_SIZE_BYTE]
>> +                    & IOMMU_MMIO_EVTLOG_SIZE_MASK);
>> +}
>> +
>> +static inline void iommu_handle_evttail_write(AMDIOMMUState *s)
>> +{
>> +    IOMMU_DPRINTF(COMMAND, "");
>> +
>> +    uint64_t val = amd_iommu_readq(s, IOMMU_MMIO_EVENT_TAIL);
>> +    s->evtlog_tail = val & IOMMU_MMIO_EVTLOG_TAIL_MASK;
>> +}
>> +
>> +static inline void iommu_handle_evthead_write(AMDIOMMUState *s)
>> +{
>> +    IOMMU_DPRINTF(COMMAND, "");
>> +
>> +    uint64_t val = amd_iommu_readq(s, IOMMU_MMIO_EVENT_HEAD);
>> +    s->evtlog_head = val & IOMMU_MMIO_EVTLOG_HEAD_MASK;
>> +}
>> +
>> +static inline void iommu_handle_pprbase_write(AMDIOMMUState *s)
>> +{
>> +    IOMMU_DPRINTF(COMMAND, "");
>> +
>> +    uint64_t val = amd_iommu_readq(s, IOMMU_MMIO_PPR_BASE);
>> +    s->ppr_log = val & IOMMU_MMIO_PPRLOG_BASE_MASK;
>> +    s->pprlog_len = 1UL << (*(uint64_t 
>> *)&s->mmior[IOMMU_MMIO_PPRLOG_SIZE_BYTE]
>> +                    & IOMMU_MMIO_PPRLOG_SIZE_MASK);
>> +}
>> +
>> +static inline void iommu_handle_pprhead_write(AMDIOMMUState *s)
>> +{
>> +    IOMMU_DPRINTF(COMMAND, "");
>> +
>> +    uint64_t val = amd_iommu_readq(s, IOMMU_MMIO_PPR_HEAD);
>> +    s->pprlog_head = val & IOMMU_MMIO_PPRLOG_HEAD_MASK;
>> +}
>> +
>> +static inline void iommu_handle_pprtail_write(AMDIOMMUState *s)
>> +{
>> +    IOMMU_DPRINTF(COMMAND, "");
>> +
>> +    uint64_t val = amd_iommu_readq(s, IOMMU_MMIO_PPR_TAIL);
>> +    s->pprlog_tail = val & IOMMU_MMIO_PPRLOG_TAIL_MASK;
>> +}
>> +
>> +/* FIXME: something might go wrong if System Software writes in chunks
>> + * of one byte but linux writes in chunks of 4 bytes so currently it
>> + * works correctly with linux but will definitely be busted if software
>> + * reads/writes 8 bytes
>> + */
>> +static void amd_iommu_mmio_write(void *opaque, hwaddr addr, uint64_t 
>> val,
>> +                                 unsigned size)
>> +{
>> +
>> +    IOMMU_DPRINTF(COMMAND, "");
>> +
>> +    AMDIOMMUState *s = opaque;
>> +    unsigned long offset = addr & 0x07;
>> +
>> +    if (addr + size > IOMMU_MMIO_SIZE) {
>> +        IOMMU_DPRINTF(MMIO, "error: addr outside region: max 0x%"PRIx64
>> +                      ", got 0x%"PRIx64 " %d", 
>> (uint64_t)IOMMU_MMIO_SIZE, addr,
>> +                      size);
>> +        return;
>> +    }
>> +
>> +    switch (addr & ~0x07) {
>> +    case IOMMU_MMIO_CONTROL:
>> +        if (size == 2) {
>> +            amd_iommu_writew(s, addr, val);
>> +        } else if (size == 4) {
>> +            amd_iommu_writel(s, addr,  val);
>> +        } else if (size == 8) {
>> +            amd_iommu_writeq(s, addr, val);
>> +        }
>> +
>> +        IOMMU_DPRINTF(COMMAND, "MMIO_CONTROL write addr 0x%"PRIx64
>> +                      ", size %d, val 0x%"PRIx64 ", offset 0x%"PRIx64,
>> +                      addr, size, val, offset);
>> +        iommu_handle_control_write(s);
>> +        break;
>> +
>> +    case IOMMU_MMIO_DEVICE_TABLE:
>> +        IOMMU_DPRINTF(MMIO, "MMIO_DEVICE_TABLE write addr 0x%"PRIx64
>> +                      ", size %d, val 0x%"PRIx64 ", offset 0x%"PRIx64,
>> +                      addr, size, val, offset);
>> +        if (size == 2) {
>> +            amd_iommu_writew(s, addr, val);
>> +        } else if (size == 4) {
>> +            amd_iommu_writel(s, addr, val);
>> +        } else if (size == 8) {
>> +            amd_iommu_writeq(s, addr, val);
>> +        }
>> +
>> +       /*  set device table address
>> +        *   This also suffers from inability to tell whether software
>> +        *   is done writing
>> +        */
>> +
>> +        if (offset || (size == 8)) {
>> +            iommu_handle_devtab_write(s);
>> +        }
>> +        break;
>> +
>> +    case IOMMU_MMIO_COMMAND_HEAD:
>> +        IOMMU_DPRINTF(MMIO, "MMIO_COMMAND_HEAD write addr 0x%"PRIx64
>> +                      ", size %d, val 0x%"PRIx64 ", offset 0x%"PRIx64,
>> +                      addr, size, val, offset);
>> +        if (size == 2) {
>> +            amd_iommu_writew(s, addr, val);
>> +        } else if (size == 4) {
>> +            amd_iommu_writel(s, addr, val);
>> +        } else if (size == 8) {
>> +            amd_iommu_writeq(s, addr, val);
>> +        }
>> +
>> +        iommu_handle_cmdhead_write(s);
>> +        break;
>> +
>> +    case IOMMU_MMIO_COMMAND_BASE:
>> +        IOMMU_DPRINTF(COMMAND, "MMIO_COMMAND_BASE write addr 0x%"PRIx64
>> +                      ", size %d, val 0x%"PRIx64 ", offset 0x%"PRIx64,
>> +                      addr, size, val, offset);
>> +        if (size == 2) {
>> +            amd_iommu_writew(s, addr, val);
>> +        } else if (size == 4) {
>> +            amd_iommu_writel(s, addr, val);
>> +        } else if (size == 8) {
>> +            amd_iommu_writeq(s, addr, val);
>> +        }
>> +
>> +        /* FIXME - make sure System Software has finished writing 
>> incase
>> +         * it writes in chucks less than 8 bytes in a robust way.As for
>> +         * now, this hacks works for the linux driver
>> +         */
>> +        if (offset || (size == 8)) {
>> +            iommu_handle_cmdbase_write(s);
>> +        }
>> +        break;
>> +
>> +    case IOMMU_MMIO_COMMAND_TAIL:
>> +        IOMMU_DPRINTF(COMMAND, "MMIO_COMMAND_TAIL write addr 0x%"PRIx64
>> +                      ", size %d, val 0x%"PRIx64 ", offset 0x%"PRIx64,
>> +                      addr, size, val, offset);
>> +        if (size == 2) {
>> +            amd_iommu_writew(s, addr, val);
>> +        } else if (size == 4) {
>> +            amd_iommu_writel(s, addr, val);
>> +        } else if (size == 8) {
>> +            amd_iommu_writeq(s, addr, val);
>> +        }
>> +        iommu_handle_cmdtail_write(s);
>> +        break;
>> +
>> +    case IOMMU_MMIO_EVENT_BASE:
>> +        IOMMU_DPRINTF(MMIO, "MMIO_EVENT_BASE write addr 0x%"PRIx64
>> +                      ", size %d, val 0x%"PRIx64 ", offset 0x%"PRIx64,
>> +                      addr, size, val, offset);
>> +        if (size == 2) {
>> +            amd_iommu_writew(s, addr, val);
>> +        } else if (size == 4) {
>> +            amd_iommu_writel(s, addr, val);
>> +        } else if (size == 8) {
>> +            amd_iommu_writeq(s, addr, val);
>> +        }
>> +        iommu_handle_evtbase_write(s);
>> +        break;
>> +
>> +    case IOMMU_MMIO_EVENT_HEAD:
>> +        IOMMU_DPRINTF(MMIO, "MMIO_EVENT_HEAD write addr 0x%"PRIx64
>> +                      ", size %d, val 0x%"PRIx64 ", offset 0x%"PRIx64,
>> +                      addr, size, val, offset);
>> +        if (size == 2) {
>> +            amd_iommu_writew(s, addr, val);
>> +        } else if (size == 4) {
>> +            amd_iommu_writel(s, addr, val);
>> +        } else if (size == 8) {
>> +            amd_iommu_writeq(s, addr, val);
>> +        }
>> +        iommu_handle_evthead_write(s);
>> +        break;
>> +
>> +    case IOMMU_MMIO_EVENT_TAIL:
>> +        IOMMU_DPRINTF(MMIO, "MMIO_EVENT_TAIL write addr 0x%"PRIx64
>> +                      ", size %d, val 0x%"PRIx64 ", offset 0x%"PRIx64,
>> +                      addr, size, val, offset);
>> +        if (size == 2) {
>> +            amd_iommu_writew(s, addr, val);
>> +        } else if (size == 4) {
>> +            amd_iommu_writel(s, addr, val);
>> +        } else if (size == 8) {
>> +            amd_iommu_writeq(s, addr, val);
>> +        }
>> +        iommu_handle_evttail_write(s);
>> +        break;
>> +
>> +    case IOMMU_MMIO_EXCL_LIMIT:
>> +        IOMMU_DPRINTF(MMIO, "MMIO_EXCL_LIMIT write addr 0x%"PRIx64
>> +                      ", size %d, val 0x%"PRIx64 ", offset 0x%"PRIx64,
>> +                      addr, size, val, offset);
>> +        if (size == 2) {
>> +            amd_iommu_writew(s, addr, val);
>> +        } else if (size == 4) {
>> +            amd_iommu_writel(s, addr, val);
>> +        } else if (size == 8) {
>> +            amd_iommu_writeq(s, addr, val);
>> +        }
>> +        iommu_handle_excllim_write(s);
>> +        break;
>> +
>> +        /* PPR log base - unused for now */
>> +    case IOMMU_MMIO_PPR_BASE:
>> +        IOMMU_DPRINTF(MMIO, "MMIO_PPR_BASE write addr 0x%"PRIx64
>> +                      ", size %d, val 0x%"PRIx64 ", offset 0x%"PRIx64,
>> +                      addr, size, val, offset);
>> +        if (size == 2) {
>> +            amd_iommu_writew(s, addr, val);
>> +        } else if (size == 4) {
>> +            amd_iommu_writel(s, addr, val);
>> +        } else if (size == 8) {
>> +            amd_iommu_writeq(s, addr, val);
>> +        }
>> +        iommu_handle_pprbase_write(s);
>> +        break;
>> +        /* PPR log head - also unused for now */
>> +    case IOMMU_MMIO_PPR_HEAD:
>> +        IOMMU_DPRINTF(MMIO, "MMIO_PPR_HEAD write addr 0x%"PRIx64
>> +                      ", size %d, val 0x%"PRIx64 ", offset 0x%"PRIx64,
>> +                       addr, size, val, offset);
>> +        if (size == 2) {
>> +            amd_iommu_writew(s, addr, val);
>> +        } else if (size == 4) {
>> +            amd_iommu_writel(s, addr, val);
>> +        } else if (size == 8) {
>> +            amd_iommu_writeq(s, addr, val);
>> +        }
>> +        iommu_handle_pprhead_write(s);
>> +        break;
>> +        /* PPR log tail - unused for now */
>> +    case IOMMU_MMIO_PPR_TAIL:
>> +        IOMMU_DPRINTF(MMIO, "MMIO_PPR_TAIL write addr 0x%"PRIx64
>> +                      ", size %d, val 0x%"PRIx64 ", offset 0x%"PRIx64,
>> +                      addr, size, val, offset);
>> +        if (size == 2) {
>> +            amd_iommu_writew(s, addr, val);
>> +        } else if (size == 4) {
>> +            amd_iommu_writel(s, addr, val);
>> +        } else if (size == 8) {
>> +            amd_iommu_writeq(s, addr, val);
>> +        }
>> +        iommu_handle_pprtail_write(s);
>> +        break;
>> +
>> +        /* ignore write to ext_features */
>> +    default:
>> +        IOMMU_DPRINTF(MMIO, "UNHANDLED MMIO write addr 0x%"PRIx64
>> +                      ", size %d, val 0x%"PRIx64 ", offset 0x%"PRIx64,
>> +                      addr, size, val, offset);
>> +    }
>> +
>> +}
>> +
>> +static inline uint64_t amd_iommu_get_perms(uint64_t entry)
>> +{
>> +    return (entry & (IOMMU_DEV_PERM_READ | IOMMU_DEV_PERM_WRITE)) >>
>> +           IOMMU_DEV_PERM_SHIFT;
>> +}
>> +
>> +AddressSpace *bridge_host_amd_iommu(PCIBus *bus, void *opaque, int 
>> devfn)
>> +{
>> +    AMDIOMMUState *s = opaque;
>> +    AMDIOMMUAddressSpace **iommu_as;
>> +    int bus_num = pci_bus_num(bus);
>> +
>> +    /* just in case */
>
> This comment troubles me, do we need the assert?
>
>> +    assert(0 <= bus_num && bus_num <= PCI_BUS_MAX);
>
> bus_num < PCI_BUS_MAX, right ?
>
>> +    assert(0 <= devfn && devfn <= PCI_DEVFN_MAX);
>
> same with devfn I suppose.
>
>> +
>> +    iommu_as = s->address_spaces[bus_num];
>> +
>> +    /* allocate memory during the first run */
>> +    if (!iommu_as) {
>
> Why lazy init? We can do that at AMDIOMMUState init, right?
>
>> +        iommu_as = g_malloc0(sizeof(AMDIOMMUAddressSpace *) * 
>> PCI_DEVFN_MAX);
>> +        s->address_spaces[bus_num] = iommu_as;
>> +    }
>> +
>> +    /* set up IOMMU region */
>> +    if (!iommu_as[devfn]) {
>> +        iommu_as[devfn] = g_malloc0(sizeof(AMDIOMMUAddressSpace));
>
> same here
>
>> +        iommu_as[devfn]->bus_num = (uint8_t)bus_num;
>> +        iommu_as[devfn]->devfn = (uint8_t)devfn;
>> +        iommu_as[devfn]->iommu_state = s;
>> +
>> + memory_region_init_iommu(&iommu_as[devfn]->iommu, OBJECT(s),
>> +                                 &s->iommu_ops, "amd-iommu", 
>> UINT64_MAX);
>> +        address_space_init(&iommu_as[devfn]->as, 
>> &iommu_as[devfn]->iommu,
>> +                           "amd-iommu");
>> +    }
>> +    return &iommu_as[devfn]->as;
>> +}
>> +
>> +/* validate a page table entry */
>> +static bool amd_iommu_validate_dte(AMDIOMMUState *s, uint16_t devid,
>> +                                   uint64_t *dte)
>> +{
>> +    if ((dte[0] & IOMMU_DTE_LOWER_QUAD_RESERVED)
>> +        || (dte[1] & IOMMU_DTE_MIDDLE_QUAD_RESERVED)
>> +        || (dte[2] & IOMMU_DTE_UPPER_QUAD_RESERVED) || dte[3]) {
>> +        amd_iommu_log_illegaldevtab_error(s, devid,
>> +                                s->devtab + devid * 
>> IOMMU_DEVTAB_ENTRY_SIZE, 0);
>> +        return false;
>> +    }
>> +
>> +    return dte[0] & IOMMU_DEV_VALID && (dte[0] & 
>> IOMMU_DEV_TRANSLATION_VALID)
>> +           && (dte[0] & IOMMU_DEV_PT_ROOT_MASK);
>> +}
>> +
>> +/* get a device table entry given the devid */
>> +static bool amd_iommu_get_dte(AMDIOMMUState *s, int devid, uint64_t 
>> *entry)
>> +{
>> +    uint32_t offset = devid * IOMMU_DEVTAB_ENTRY_SIZE;
>> +
>> +    IOMMU_DPRINTF(MMU, "Device Table at 0x%"PRIx64, s->devtab);
>> +
>> +    if (dma_memory_read(&address_space_memory, s->devtab + offset, 
>> entry,
>> +                        IOMMU_DEVTAB_ENTRY_SIZE)) {
>> +        IOMMU_DPRINTF(MMU, "error: fail to access Device Entry 
>> devtab 0x%"PRIx64
>> +                      "offset 0x%"PRIx32, s->devtab, offset);
>> +        /* log ever accessing dte */
>> +        amd_iommu_log_devtab_error(s, devid, s->devtab + offset, 0);
>> +        return false;
>> +    }
>> +
>> +    if (!amd_iommu_validate_dte(s, devid, entry)) {
>> +        IOMMU_DPRINTF(MMU,
>> +                      "Pte entry at 0x%"PRIx64" is invalid", entry[0]);
>> +        return false;
>> +    }
>> +
>> +    return true;
>> +}
>> +
>> +/* get pte translation mode */
>> +static inline uint8_t get_pte_translation_mode(uint64_t pte)
>> +{
>> +    return (pte >> IOMMU_DEV_MODE_RSHIFT) & IOMMU_DEV_MODE_MASK;
>> +}
>> +
>> +static int amd_iommu_page_walk(AMDIOMMUAddressSpace *as, uint64_t *dte,
>> +                               IOMMUTLBEntry *ret, unsigned perms,
>> +                               hwaddr addr)
>> +{
>> +    uint8_t level, oldlevel;
>> +    unsigned present;
>> +    uint64_t pte, pte_addr;
>> +    uint64_t pte_perms;
>> +    pte = dte[0];
>> +
>> +    level = get_pte_translation_mode(pte);
>> +
>> +    if (level >= 7 || level == 0) {
>> +        IOMMU_DPRINTF(MMU, "error: translation level 0x%"PRIu8 " 
>> detected"
>> +                      "while translating 0x%"PRIx64, level, addr);
>> +        return -1;
>> +    }
>> +
>> +    while (level > 0) {
>> +        pte_perms = amd_iommu_get_perms(pte);
>> +        present = pte & 1;
>> +        if (!present || perms != (perms & pte_perms)) {
>> +            amd_iommu_page_fault(as->iommu_state, as->devfn, addr, 
>> perms);
>> +            IOMMU_DPRINTF(MMU, "error: page fault accessing virtual 
>> addr 0x%"
>> +                          PRIx64, addr);
>> +            return -1;
>> +        }
>> +
>> +        /* go to the next lower level */
>> +        pte_addr = pte & IOMMU_DEV_PT_ROOT_MASK;
>> +        /* add offset and load pte */
>> +        pte_addr += ((addr >> (3 + 9 * level)) & 0x1FF) << 3;
>> +        pte = ldq_phys(&address_space_memory, pte_addr);
>> +        oldlevel = level;
>> +        level = get_pte_translation_mode(pte);
>> +
>> +        /* PT is corrupted or not there */
>> +        if (level != oldlevel - 1) {
>> +            return -1;
>> +        }
>> +    }
>> +
>> +    ret->iova = addr & IOMMU_PAGE_MASK_4K;
>> +    ret->translated_addr = (pte & IOMMU_DEV_PT_ROOT_MASK) & 
>> IOMMU_PAGE_MASK_4K;
>> +    ret->addr_mask = ~IOMMU_PAGE_MASK_4K;
>> +    ret->perm = IOMMU_RW;
>> +    return 0;
>> +}
>> +
>> +/* TODO : Mark addresses as Accessed and Dirty */
>
> If you don't mark addresses as dirty, can't this cause the sporadic 
> errors
> of arbitrary programs Jan talked about?
>
>> +static void amd_iommu_do_translate(AMDIOMMUAddressSpace *as, hwaddr 
>> addr,
>> +                                   bool is_write, IOMMUTLBEntry *ret)
>> +{
>> +    AMDIOMMUState *s = as->iommu_state;
>> +    uint16_t devid = PCI_DEVID(as->bus_num, as->devfn);
>> +    IOMMUIOTLBEntry *iotlb_entry;
>> +    uint8_t err;
>> +    uint64_t entry[4];
>> +
>> +    /* try getting a cache entry first */
>> +    iotlb_entry = amd_iommu_iotlb_lookup(s, addr, as->devfn);
>> +
>> +    if (iotlb_entry) {
>> +        IOMMU_DPRINTF(CACHE, "hit  iotlb devid: %02x:%02x.%x gpa 
>> 0x%"PRIx64
>> +                      " hpa 0x%"PRIx64, PCI_BUS_NUM(devid), 
>> PCI_SLOT(devid),
>> +                      PCI_FUNC(devid), addr, 
>> iotlb_entry->translated_addr);
>> +        ret->iova = addr & IOMMU_PAGE_MASK_4K;
>> +        ret->translated_addr = iotlb_entry->translated_addr;
>> +        ret->addr_mask = ~IOMMU_PAGE_MASK_4K;
>> +        ret->perm = iotlb_entry->perms;
>> +        return;
>> +    } else {
>
> you return from the if clause so you don't need the else
>
>> +        if (!amd_iommu_get_dte(s, devid, entry)) {
>
> is not an error if you did not find the device id?
>
>> +            goto out;
>> +        }
>> +
>> +        err = amd_iommu_page_walk(as, entry, ret,
>> +                                  is_write ? IOMMU_PERM_WRITE : 
>> IOMMU_PERM_READ,
>> +                                  addr);
>> +        if (err) {
>> +            IOMMU_DPRINTF(MMU, "error: hardware error accessing page 
>> tables"
>> +                          " while translating addr 0x%"PRIx64, addr);
>> +            amd_iommu_log_pagetab_error(s, as->devfn, addr, 0);
>> +            goto out;
>> +        }
>> +
>> +        amd_iommu_update_iotlb(s, as->devfn, addr, 
>> ret->translated_addr,
>> +                               ret->perm, entry[1] & 
>> IOMMU_DEV_DOMID_ID_MASK);
>> +        return;
>> +    }
>> +
>> +out:
>> +    ret->iova = addr;
>> +    ret->translated_addr = addr & IOMMU_PAGE_MASK_4K;
>> +    ret->addr_mask = ~IOMMU_PAGE_MASK_4K;
>> +    ret->perm = IOMMU_RW;
>> +    return;
>
> you don't need the above return
>
>> +}
>> +
>> +static IOMMUTLBEntry amd_iommu_translate(MemoryRegion *iommu, hwaddr 
>> addr,
>> +                                         bool is_write)
>> +{
>> +    IOMMU_DPRINTF(GENERAL, "");
>> +
>> +    AMDIOMMUAddressSpace *as = container_of(iommu, 
>> AMDIOMMUAddressSpace, iommu);
>> +    AMDIOMMUState *s = as->iommu_state;
>> +
>> +    IOMMUTLBEntry ret = {
>> +        .target_as = &address_space_memory,
>> +        .iova = addr,
>> +        .translated_addr = 0,
>> +        .addr_mask = ~(hwaddr)0,
>> +        .perm = IOMMU_NONE,
>> +    };
>> +
>> +    if (!s->enabled) {
>> +        /* IOMMU disabled - corresponds to iommu=off not
>> +         * failure to provide any parameter
>> +         */
>> +        ret.iova = addr & IOMMU_PAGE_MASK_4K;
>> +        ret.translated_addr = addr & IOMMU_PAGE_MASK_4K;
>> +        ret.addr_mask = ~IOMMU_PAGE_MASK_4K;
>> +        ret.perm = IOMMU_RW;
>> +        return ret;
>> +    }
>> +
>> +    amd_iommu_do_translate(as, addr, is_write, &ret);
>> +    IOMMU_DPRINTF(MMU, "devid: %02x:%02x.%x gpa 0x%"PRIx64 " hpa 
>> 0x%"PRIx64,
>> +                  as->bus_num, PCI_SLOT(as->devfn), 
>> PCI_FUNC(as->devfn), addr,
>> +                  ret.translated_addr);
>> +
>> +    return ret;
>> +}
>> +
>> +static const MemoryRegionOps mmio_mem_ops = {
>> +    .read = amd_iommu_mmio_read,
>> +    .write = amd_iommu_mmio_write,
>> +    .endianness = DEVICE_LITTLE_ENDIAN,
>> +    .impl = {
>> +        .min_access_size = 1,
>> +        .max_access_size = 8,
>> +        .unaligned = false,
>> +    },
>> +    .valid = {
>> +        .min_access_size = 1,
>> +        .max_access_size = 8,
>> +    }
>> +};
>> +
>> +static void amd_iommu_init(AMDIOMMUState *s)
>> +{
>> +    printf("amd_iommu_init");
>
> you should use the debug macro here
>
>> +
>> +    amd_iommu_iotlb_reset(s);
>> +
>> +    s->iommu_ops.translate = amd_iommu_translate;
>> +
>> +    s->devtab_len = 0;
>> +    s->cmdbuf_len = 0;
>> +    s->cmdbuf_head = 0;
>> +    s->cmdbuf_tail = 0;
>> +    s->evtlog_head = 0;
>> +    s->evtlog_tail = 0;
>> +    s->excl_enabled = false;
>> +    s->excl_allow = false;
>> +    s->mmio_enabled = false;
>> +    s->enabled = false;
>> +    s->ats_enabled = false;
>> +    s->cmdbuf_enabled = false;
>> +
>> +    /* reset MMIO */
>> +    memset(s->mmior, 0, IOMMU_MMIO_SIZE);
>> +    amd_iommu_set_quad(s, IOMMU_MMIO_EXT_FEATURES, IOMMU_EXT_FEATURES,
>> +            0xffffffffffffffef, 0);
>> +    amd_iommu_set_quad(s, IOMMU_MMIO_STATUS, 0, 0x98, 0x67);
>> +    /* reset device ident */
>> +    pci_config_set_vendor_id(s->dev.config, PCI_VENDOR_ID_AMD);
>> +    pci_config_set_device_id(s->dev.config, PCI_DEVICE_ID_RD890_IOMMU);
>> +    pci_config_set_prog_interface(s->dev.config, 00);
>> +    pci_config_set_class(s->dev.config, 0x0806);
>> +
>> +    /* reset IOMMU specific capabilities  */
>> +    pci_set_long(s->dev.config + s->capab_offset, 
>> IOMMU_CAPAB_FEATURES);
>> +    pci_set_long(s->dev.config + s->capab_offset + IOMMU_CAPAB_BAR_LOW,
>> +                 s->mmio.addr & ~(0xffff0000));
>> +    pci_set_long(s->dev.config + s->capab_offset + 
>> IOMMU_CAPAB_BAR_HIGH,
>> +                (s->mmio.addr & ~(0xffff)) >> 16);
>> +    pci_set_long(s->dev.config + s->capab_offset + IOMMU_CAPAB_RANGE,
>> +                 0xff000000);
>> +    pci_set_long(s->dev.config + s->capab_offset + IOMMU_CAPAB_MISC, 
>> 0);
>> +    pci_set_long(s->dev.config + s->capab_offset + IOMMU_CAPAB_MISC,
>> +            IOMMU_MAX_PH_ADDR | IOMMU_MAX_GVA_ADDR | 
>> IOMMU_MAX_VA_ADDR);
>
> All the capabilities are read-write? Otherwise you need to set the wmask
> to indicate what fields are writable.

Some bits are r/w but I don't think they are relevant for now - we could 
just leave this a r/o
>
>> +}
>> +
>> +static void amd_iommu_reset(DeviceState *dev)
>> +{
>> +    AMDIOMMUState *s = AMD_IOMMU_DEVICE(dev);
>> +
>> +    amd_iommu_init(s);
>> +}
>> +
>> +static void amd_iommu_realize(PCIDevice *dev, Error **error)
>> +{
>> +    AMDIOMMUState *s = container_of(dev, AMDIOMMUState, dev);
>> +
>> +    s->iotlb = g_hash_table_new_full(amd_iommu_uint64_hash,
>> +                                     amd_iommu_uint64_equal, g_free, 
>> g_free);
>> +
>> +    s->capab_offset = pci_add_capability(dev, IOMMU_CAPAB_ID_SEC, 0,
>> +                                         IOMMU_CAPAB_SIZE);
>> +
>> +    /* add msi and hypertransport capabilities */
>> +    pci_add_capability(&s->dev, PCI_CAP_ID_MSI, 0, 
>> IOMMU_CAPAB_REG_SIZE);
>> +    pci_add_capability(&s->dev, PCI_CAP_ID_HT, 0, 
>> IOMMU_CAPAB_REG_SIZE);
>> +
>> +    amd_iommu_init(s);
>> +
>> +    /* set up MMIO */
>> +    memory_region_init_io(&s->mmio, OBJECT(s), &mmio_mem_ops, s, 
>> "mmio",
>> +                          IOMMU_MMIO_SIZE);
>> +
>> +    if (s->mmio.addr == IOMMU_BASE_ADDR) {
>
> I don't understand why is need here. realize is called only once in 
> the init process
> and you set it a few lines below.
>
>> +        return;
>> +    }
>> +
>> +    s->mmio.addr = IOMMU_BASE_ADDR;
>> +    memory_region_add_subregion(get_system_memory(), 
>> IOMMU_BASE_ADDR, &s->mmio);
>> +}
>> +
>> +static const VMStateDescription vmstate_amd_iommu = {
>> +    .name = "amd-iommu",
>> +    .fields  = (VMStateField[]) {
>> +        VMSTATE_PCI_DEVICE(dev, AMDIOMMUState),
>> +        VMSTATE_END_OF_LIST()
>> +    }
>> +};
>> +
>> +static Property amd_iommu_properties[] = {
>> +    DEFINE_PROP_UINT32("version", AMDIOMMUState, version, 2),
>> +    DEFINE_PROP_END_OF_LIST(),
>> +};
>> +
>> +static void amd_iommu_uninit(PCIDevice *dev)
>> +{
>> +    AMDIOMMUState *s = container_of(dev, AMDIOMMUState, dev);
>> +    amd_iommu_iotlb_reset(s);
>
> at this point you also need to clean also the memory regions you use.

What exactly do you mean by clean up the memory regions ?

>
>> +}
>> +
>> +static void amd_iommu_class_init(ObjectClass *klass, void* data)
>> +{
>> +    DeviceClass *dc = DEVICE_CLASS(klass);
>> +    PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
>> +
>> +    k->realize = amd_iommu_realize;
>> +    k->exit = amd_iommu_uninit;
>> +
>> +    dc->reset = amd_iommu_reset;
>> +    dc->vmsd = &vmstate_amd_iommu;
>> +    dc->props = amd_iommu_properties;
>> +}
>> +
>> +static const TypeInfo amd_iommu = {
>> +    .name = TYPE_AMD_IOMMU_DEVICE,
>> +    .parent = TYPE_PCI_DEVICE,
>> +    .instance_size = sizeof(AMDIOMMUState),
>> +    .class_init = amd_iommu_class_init
>> +};
>> +
>> +static void amd_iommu_register_types(void)
>> +{
>> +    type_register_static(&amd_iommu);
>> +}
>> +
>> +type_init(amd_iommu_register_types);
>> diff --git a/hw/i386/amd_iommu.h b/hw/i386/amd_iommu.h
>> new file mode 100644
>> index 0000000..7d317e1
>> --- /dev/null
>> +++ b/hw/i386/amd_iommu.h
>> @@ -0,0 +1,395 @@
>> +/*
>> + * QEMU emulation of an AMD IOMMU (AMD-Vi)
>> + *
>> + * Copyright (C) 2011 Eduard - Gabriel Munteanu
>> + * Copyright (C) 2015 David Kiarie, <davidkiarie4@gmail.com>
>> + *
>> + * This program is free software; you can redistribute it and/or modify
>> + * it under the terms of the GNU General Public License as published by
>> + * the Free Software Foundation; either version 2 of the License, or
>> + * (at your option) any later version.
>> +
>> + * This program is distributed in the hope that it will be useful,
>> + * but WITHOUT ANY WARRANTY; without even the implied warranty of
>> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
>> + * GNU General Public License for more details.
>> +
>> + * You should have received a copy of the GNU General Public License 
>> along
>> + * with this program; if not, see <http://www.gnu.org/licenses/>.
>> + */
>> +
>> +#ifndef AMD_IOMMU_H_
>> +#define AMD_IOMMU_H_
>> +
>> +#include "hw/hw.h"
>> +#include "hw/pci/pci.h"
>> +#include "hw/pci/msi.h"
>> +#include "hw/sysbus.h"
>> +#include "sysemu/dma.h"
>> +
>> +/* Capability registers */
>> +#define IOMMU_CAPAB_HEADER            0x00
>> +#define   IOMMU_CAPAB_REV_TYPE        0x02
>> +#define   IOMMU_CAPAB_FLAGS           0x03
>> +#define IOMMU_CAPAB_BAR_LOW           0x04
>> +#define IOMMU_CAPAB_BAR_HIGH          0x08
>> +#define IOMMU_CAPAB_RANGE             0x0C
>> +#define IOMMU_CAPAB_MISC              0x10
>> +#define IOMMU_CAPAB_MISC1             0x14
>> +
>> +#define IOMMU_CAPAB_SIZE              0x18
>> +#define IOMMU_CAPAB_REG_SIZE          0x04
>> +
>> +/* Capability header data */
>> +#define IOMMU_CAPAB_ID_SEC            0xf
>> +#define IOMMU_CAPAB_FLAT_EXT          (1 << 28)
>> +#define IOMMU_CAPAB_EFR_SUP           (1 << 27)
>> +#define IOMMU_CAPAB_FLAG_NPCACHE      (1 << 26)
>> +#define IOMMU_CAPAB_FLAG_HTTUNNEL     (1 << 25)
>> +#define IOMMU_CAPAB_FLAG_IOTLBSUP     (1 << 24)
>> +#define IOMMU_CAPAB_INIT_REV          (1 << 19)
>> +#define IOMMU_CAPAB_INIT_TYPE         (3 << 16)
>> +#define IOMMU_CAPAB_INIT_REV_TYPE     (IOMMU_CAPAB_REV | 
>> IOMMU_CAPAB_TYPE)
>> +#define IOMMU_CAPAB_INIT_FLAGS        (IOMMU_CAPAB_FLAG_NPCACHE | \
>> + IOMMU_CAPAB_FLAG_HTTUNNEL)
>> +#define IOMMU_CAPAB_INIT_MISC         ((64 << 15) | (48 << 8))
>> +#define IOMMU_CAPAB_BAR_MASK          (~((1UL << 14) - 1))
>> +
>> +/* MMIO registers */
>> +#define IOMMU_MMIO_DEVICE_TABLE       0x0000
>> +#define IOMMU_MMIO_COMMAND_BASE       0x0008
>> +#define IOMMU_MMIO_EVENT_BASE         0x0010
>> +#define IOMMU_MMIO_CONTROL            0x0018
>> +#define IOMMU_MMIO_EXCL_BASE          0x0020
>> +#define IOMMU_MMIO_EXCL_LIMIT         0x0028
>> +#define IOMMU_MMIO_EXT_FEATURES       0x0030
>> +#define IOMMU_MMIO_COMMAND_HEAD       0x2000
>> +#define IOMMU_MMIO_COMMAND_TAIL       0x2008
>> +#define IOMMU_MMIO_EVENT_HEAD         0x2010
>> +#define IOMMU_MMIO_EVENT_TAIL         0x2018
>> +#define IOMMU_MMIO_STATUS             0x2020
>> +#define IOMMU_MMIO_PPR_BASE           0x0038
>> +#define IOMMU_MMIO_PPR_HEAD           0x2030
>> +#define IOMMU_MMIO_PPR_TAIL           0x2038
>> +
>> +#define IOMMU_MMIO_SIZE               0x4000
>> +
>> +#define IOMMU_MMIO_DEVTAB_SIZE_MASK   ((1ULL << 12) - 1)
>> +#define IOMMU_MMIO_DEVTAB_BASE_MASK   (((1ULL << 52) - 1) & ~ \
>> + IOMMU_MMIO_DEVTAB_SIZE_MASK)
>> +#define IOMMU_MMIO_DEVTAB_ENTRY_SIZE  32
>> +#define IOMMU_MMIO_DEVTAB_SIZE_UNIT   4096
>> +
>> +/* some of this are similar but just for readability */
>> +#define IOMMU_MMIO_CMDBUF_SIZE_BYTE (IOMMU_MMIO_COMMAND_BASE + 7)
>> +#define IOMMU_MMIO_CMDBUF_SIZE_MASK       0x0F
>> +#define IOMMU_MMIO_CMDBUF_BASE_MASK IOMMU_MMIO_DEVTAB_BASE_MASK
>> +#define IOMMU_MMIO_CMDBUF_DEFAULT_SIZE    8
>> +#define IOMMU_MMIO_CMDBUF_HEAD_MASK       (((1ULL << 19) - 1) & ~0x0F)
>> +#define IOMMU_MMIO_CMDBUF_TAIL_MASK IOMMU_MMIO_EVTLOG_HEAD_MASK
>> +
>> +#define IOMMU_MMIO_EVTLOG_SIZE_BYTE (IOMMU_MMIO_EVENT_BASE + 7)
>> +#define IOMMU_MMIO_EVTLOG_SIZE_MASK IOMMU_MMIO_CMDBUF_SIZE_MASK
>> +#define IOMMU_MMIO_EVTLOG_BASE_MASK IOMMU_MMIO_CMDBUF_BASE_MASK
>> +#define IOMMU_MMIO_EVTLOG_DEFAULT_SIZE IOMMU_MMIO_CMDBUF_DEFAULT_SIZE
>> +#define IOMMU_MMIO_EVTLOG_HEAD_MASK       (((1ULL << 19) - 1) & ~0x0F)
>> +#define IOMMU_MMIO_EVTLOG_TAIL_MASK IOMMU_MMIO_EVTLOG_HEAD_MASK
>> +
>> +#define IOMMU_MMIO_PPRLOG_SIZE_BYTE (IOMMU_MMIO_EVENT_BASE + 7)
>> +#define IOMMU_MMIO_PPRLOG_HEAD_MASK IOMMU_MMIO_EVTLOG_HEAD_MASK
>> +#define IOMMU_MMIO_PPRLOG_TAIL_MASK IOMMU_MMIO_EVTLOG_HEAD_MASK
>> +#define IOMMU_MMIO_PPRLOG_BASE_MASK IOMMU_MMIO_EVTLOG_BASE_MASK
>> +#define IOMMU_MMIO_PPRLOG_SIZE_MASK IOMMU_MMIO_EVTLOG_SIZE_MASK
>> +
>> +#define IOMMU_MMIO_EXCL_BASE_MASK IOMMU_MMIO_DEVTAB_BASE_MASK
>> +#define IOMMU_MMIO_EXCL_ENABLED_MASK      (1ULL << 0)
>> +#define IOMMU_MMIO_EXCL_ALLOW_MASK        (1ULL << 1)
>> +#define IOMMU_MMIO_EXCL_LIMIT_MASK IOMMU_MMIO_DEVTAB_BASE_MASK
>> +#define IOMMU_MMIO_EXCL_LIMIT_LOW         0xFFF
>> +
>> +/* mmio control register flags */
>> +#define IOMMU_MMIO_CONTROL_IOMMUEN        (1ULL << 0)
>> +#define IOMMU_MMIO_CONTROL_HTTUNEN        (1ULL << 1)
>> +#define IOMMU_MMIO_CONTROL_EVENTLOGEN     (1ULL << 2)
>> +#define IOMMU_MMIO_CONTROL_EVENTINTEN     (1ULL << 3)
>> +#define IOMMU_MMIO_CONTROL_COMWAITINTEN   (1ULL << 4)
>> +#define IOMMU_MMIO_CONTROL_PASSPW         (1ULL << 7)
>> +#define IOMMU_MMIO_CONTROL_REPASSPW       (1ULL << 9)
>> +#define IOMMU_MMIO_CONTROL_COHERENT       (1ULL << 10)
>> +#define IOMMU_MMIO_CONTROL_ISOC           (1ULL << 11)
>> +#define IOMMU_MMIO_CONTROL_CMDBUFLEN      (1ULL << 12)
>> +#define IOMMU_MMIO_CONTROL_PPRLOGEN       (1ULL << 13)
>> +#define IOMMU_MMIO_CONTROL_PPRINTEN       (1ULL << 14)
>> +#define IOMMU_MMIO_CONTROL_PPREN          (1ULL << 15)
>> +#define IOMMU_MMIO_CONTROL_GAEN           (1ULL << 16)
>> +#define IOMMU_MMIO_CONTROL_GTEN           (1ULL << 17)
>> +
>> +/* MMIO status register bits */
>> +#define IOMMU_MMIO_STATUS_PPR_OVFE    (1 << 18)
>> +#define IOMMU_MMIO_STATUS_PPR_OVFEB   (1 << 17)
>> +#define IOMMU_MMIO_STATUS_EVT_ACTIVE  (1 << 16)
>> +#define IOMMU_MMIO_STATUS_EVT_OVFB    (1 << 15)
>> +#define IOMMU_MMIO_STATUS_PPR_ACTIVE  (1 << 12)
>> +#define IOMMU_MMIO_STATUS_PPR_OVFB    (1 << 11)
>> +#define IOMMU_MMIO_STATUS_GA_INT      (1 << 10)
>> +#define IOMMU_MMIO_STATUS_GA_RUN      (1 << 9)
>> +#define IOMMU_MMIO_STATUS_GA_OVF      (1 << 8)
>> +#define IOMMU_MMIO_STATUS_PPR_RUN     (1 << 7)
>> +#define IOMMU_MMIO_STATUS_PPR_INT     (1 << 6)
>> +#define IOMMU_MMIO_STATUS_PPR_OVF     (1 << 5)
>> +#define IOMMU_MMIO_STATUS_CMDBUF_RUN  (1 << 4)
>> +#define IOMMU_MMIO_STATUS_EVT_RUN     (1 << 3)
>> +#define IOMMU_MMIO_STATUS_COMP_INT    (1 << 2)
>> +#define IOMMU_MMIO_STATUS_EVT_INT     (1 << 1)
>> +#define IOMMU_MMIO_STATUS_EVT_OVF     (1 << 0)
>> +
>> +#define IOMMU_CMDBUF_ID_BYTE              0x07
>> +#define IOMMU_CMDBUF_ID_RSHIFT            4
>> +
>> +#define IOMMU_CMD_COMPLETION_WAIT         0x01
>> +#define IOMMU_CMD_INVAL_DEVTAB_ENTRY      0x02
>> +#define IOMMU_CMD_INVAL_IOMMU_PAGES       0x03
>> +#define IOMMU_CMD_INVAL_IOTLB_PAGES       0x04
>> +#define IOMMU_CMD_INVAL_INTR_TABLE        0x05
>> +#define IOMMU_CMD_PREFETCH_IOMMU_PAGES    0x06
>> +#define IOMMU_CMD_COMPLETE_PPR_REQUEST    0x07
>> +#define IOMMU_CMD_INVAL_IOMMU_ALL         0x08
>> +
>> +#define IOMMU_DEVTAB_ENTRY_SIZE           32
>> +
>> +/* Device table entry bits 0:63 */
>> +#define IOMMU_DEV_VALID                   (1ULL << 0)
>> +#define IOMMU_DEV_TRANSLATION_VALID       (1ULL << 1)
>> +#define IOMMU_DEV_MODE_MASK               0x7
>> +#define IOMMU_DEV_MODE_RSHIFT             9
>> +#define IOMMU_DEV_PT_ROOT_MASK            0xFFFFFFFFFF000
>> +#define IOMMU_DEV_PT_ROOT_RSHIFT          12
>> +#define IOMMU_DEV_PERM_SHIFT              61
>> +#define IOMMU_DEV_PERM_READ               (1ULL << 61)
>> +#define IOMMU_DEV_PERM_WRITE              (1ULL << 62)
>> +
>> +/* Device table entry bits 64:127 */
>> +#define IOMMU_DEV_DOMID_ID_MASK          ((1ULL << 16) - 1)
>> +#define IOMMU_DEV_IOTLB_SUPPORT           (1ULL << 17)
>> +#define IOMMU_DEV_SUPPRESS_PF             (1ULL << 18)
>> +#define IOMMU_DEV_SUPPRESS_ALL_PF         (1ULL << 19)
>> +#define IOMMU_DEV_IOCTL_MASK              (~3)
>> +#define IOMMU_DEV_IOCTL_RSHIFT            20
>> +#define   IOMMU_DEV_IOCTL_DENY            0
>> +#define   IOMMU_DEV_IOCTL_PASSTHROUGH     1
>> +#define   IOMMU_DEV_IOCTL_TRANSLATE       2
>> +#define IOMMU_DEV_CACHE                   (1ULL << 37)
>> +#define IOMMU_DEV_SNOOP_DISABLE           (1ULL << 38)
>> +#define IOMMU_DEV_EXCL                    (1ULL << 39)
>> +
>> +/* Event codes and flags, as stored in the info field */
>> +#define IOMMU_EVENT_ILLEGAL_DEVTAB_ENTRY  (0x1U << 12)
>> +#define IOMMU_EVENT_IOPF                  (0x2U << 12)
>> +#define   IOMMU_EVENT_IOPF_I              (1U << 3)
>> +#define   IOMMU_EVENT_IOPF_PR             (1U << 4)
>> +#define   IOMMU_EVENT_IOPF_RW             (1U << 5)
>> +#define   IOMMU_EVENT_IOPF_PE             (1U << 6)
>> +#define   IOMMU_EVENT_IOPF_RZ             (1U << 7)
>> +#define   IOMMU_EVENT_IOPF_TR             (1U << 8)
>> +#define IOMMU_EVENT_DEV_TAB_HW_ERROR      (0x3U << 12)
>> +#define IOMMU_EVENT_PAGE_TAB_HW_ERROR     (0x4U << 12)
>> +#define IOMMU_EVENT_ILLEGAL_COMMAND_ERROR (0x5U << 12)
>> +#define IOMMU_EVENT_COMMAND_HW_ERROR      (0x6U << 12)
>> +#define IOMMU_EVENT_IOTLB_INV_TIMEOUT     (0x7U << 12)
>> +#define IOMMU_EVENT_INVALID_DEV_REQUEST   (0x8U << 12)
>> +
>> +#define IOMMU_EVENT_LEN                   16
>> +#define IOMMU_PERM_READ             (1 << 0)
>> +#define IOMMU_PERM_WRITE            (1 << 1)
>> +#define IOMMU_PERM_RW               (IOMMU_PERM_READ | 
>> IOMMU_PERM_WRITE)
>> +
>> +/* AMD RD890 Chipset */
>> +#define PCI_DEVICE_ID_RD890_IOMMU   0x20
>
> We keep the pci ids in include/hw/pci/pci_ids.h
>
>> +
>> +#define IOMMU_FEATURE_PREFETCH            (1ULL << 0)
>> +#define IOMMU_FEATURE_PPR                 (1ULL << 1)
>> +#define IOMMU_FEATURE_NX                  (1ULL << 3)
>> +#define IOMMU_FEATURE_GT                  (1ULL << 4)
>> +#define IOMMU_FEATURE_IA                  (1ULL << 6)
>> +#define IOMMU_FEATURE_GA                  (1ULL << 7)
>> +#define IOMMU_FEATURE_HE                  (1ULL << 8)
>> +#define IOMMU_FEATURE_PC                  (1ULL << 9)
>> +
>> +/* reserved DTE bits */
>> +#define IOMMU_DTE_LOWER_QUAD_RESERVED  0x80300000000000fc
>> +#define IOMMU_DTE_MIDDLE_QUAD_RESERVED 0x0000000000000100
>> +#define IOMMU_DTE_UPPER_QUAD_RESERVED  0x08f0000000000000
>> +
>> +/* IOMMU paging mode */
>> +#define IOMMU_GATS_MODE                 (6ULL <<  12)
>> +#define IOMMU_HATS_MODE                 (6ULL <<  10)
>> +
>> +/* PCI SIG constants */
>> +#define PCI_BUS_MAX 256
>> +#define PCI_SLOT_MAX 32
>> +#define PCI_FUNC_MAX 8
>> +#define PCI_DEVFN_MAX 256
>
> Maybe we can move the PCI macros to include/hw/pci/pci.h, those are 
> not IOMMU specific.
>
>> +
>> +/* IOTLB */
>> +#define IOMMU_IOTLB_MAX_SIZE 1024
>> +#define IOMMU_DEVID_SHIFT    36
>> +
>> +/* extended feature support */
>> +#define IOMMU_EXT_FEATURES (IOMMU_FEATURE_PREFETCH | 
>> IOMMU_FEATURE_PPR | \
>> +        IOMMU_FEATURE_NX | IOMMU_FEATURE_IA | IOMMU_FEATURE_GT | \
>> +        IOMMU_FEATURE_GA | IOMMU_FEATURE_HE | IOMMU_GATS_MODE | \
>> +        IOMMU_HATS_MODE)
>> +
>> +/* capabilities header */
>> +#define IOMMU_CAPAB_FEATURES (IOMMU_CAPAB_FLAT_EXT | \
>> +        IOMMU_CAPAB_FLAG_NPCACHE | IOMMU_CAPAB_FLAG_IOTLBSUP \
>> +        | IOMMU_CAPAB_ID_SEC | IOMMU_CAPAB_INIT_TYPE | \
>> +        IOMMU_CAPAB_FLAG_HTTUNNEL |  IOMMU_CAPAB_EFR_SUP)
>> +
>> +/* command constants */
>> +#define IOMMU_COM_STORE_ADDRESS_MASK 0xffffffffffff8
>> +#define IOMMU_COM_COMPLETION_STORE_MASK 0x1
>> +#define IOMMU_COM_COMPLETION_INTR 0x2
>> +#define IOMMU_COM_COMPLETION_DATA_OFF 0x8
>> +#define IOMMU_COMMAND_SIZE 0x10
>> +
>> +/* IOMMU default address */
>> +#define IOMMU_BASE_ADDR 0xfed80000
>> +
>> +/* page management constants */
>> +#define IOMMU_PAGE_SHIFT 12
>> +#define IOMMU_PAGE_SIZE  (1ULL << IOMMU_PAGE_SHIFT)
>> +
>> +#define IOMMU_PAGE_SHIFT_4K 12
>> +#define IOMMU_PAGE_MASK_4K  (~((1ULL << IOMMU_PAGE_SHIFT_4K) - 1))
>> +#define IOMMU_PAGE_SHIFT_2M 21
>> +#define IOMMU_PAGE_MASK_2M  (~((1ULL << IOMMU_PAGE_SHIFT_2M) - 1))
>> +#define IOMMU_PAGE_SHIFT_1G 30
>> +#define IOMMU_PAGE_MASK_1G (~((1ULL << IOMMU_PAGE_SHIFT_1G) - 1))
>> +
>> +#define IOMMU_MAX_VA_ADDR          (48UL << 5)
>> +#define IOMMU_MAX_PH_ADDR          (40UL << 8)
>> +#define IOMMU_MAX_GVA_ADDR         (48UL << 15)
>> +
>> +/* invalidation command device id */
>> +#define IOMMU_INVAL_DEV_ID_SHIFT  32
>> +#define IOMMU_INVAL_DEV_ID_MASK   (~((1UL << 
>> IOMMU_INVAL_DEV_ID_SHIFT) - 1))
>> +
>> +/* invalidation address */
>> +#define IOMMU_INVAL_ADDR_MASK_SHIFT 12
>> +#define IOMMU_INVAL_ADDR_MASK     (~((1UL << 
>> IOMMU_INVAL_ADDR_MASK_SHIFT) - 1))
>> +
>> +/* invalidation S bit mask */
>> +#define IOMMU_INVAL_ALL(val) ((val) & (0x1))
>> +
>> +/* reserved bits */
>> +#define IOMMU_COMPLETION_WAIT_RSVD    0x0ff000000
>> +#define IOMMU_CMD_INVAL_DEV_RSVD      0xffff00000fffffff
>> +#define IOMMU_INVAL_IOMMU_PAGES_RSVD  0xfff000000fff0000
>> +#define IOMMU_INVAL_IOTLB_PAGES_RSVD  0x00000ff4
>> +#define IOMMU_INVAL_INTR_TABLE_RSVD   0xffff00000fffffff
>> +#define IOMMU_PRF_IOMMU_PAGES_RSVD    0x00ff00000ff00000
>> +#define IOMMU_COMPLETE_PPR_RQ_RSVD    0xffff00000ff00000
>> +#define IOMMU_INVAL_IOMMU_ALL_RSVD    0x0fffffff00000000
>> +
>> +/* command masks - inval iommu pages */
>> +#define IOMMU_INVAL_PAGES_PASID       (~((1UL << 20) - 1))
>> +#define IOMMU_INVAL_PAGES_DOMID       (((1UL << 16) - 1) << 32)
>> +#define IOMMU_INVAL_PAGES_ADDRESS     (~((1UL << 12) - 1))
>> +#define IOMMU_INVAL_PAGES_SBIT        (1UL << 0)
>> +#define IOMMU_INVAL_PAGES_PDE         (1UL << 1)
>> +#define IOMMU_INVAL_PAGES_GN          (1UL << 2)
>> +
>> +/* masks - inval iotlb pages */
>> +#define IOMMU_INVAL_IOTLB_DEVID       (~((1UL << 16) - 1))
>> +#define IOMMU_INVAL_IOTLB_PASID_LOW   (0xff << 15)
>> +#define IOMMU_INVAL_IOTLB_MAXPEND     (0xff << 23)
>> +#define IOMMU_INVAL_IOTLB_QUEUEID     (~((1UL << 16) - 1))
>> +#define IOMMU_INVAL_IOTLB_PASID_HIGH  (0xff << 46)
>> +#define IOMMU_INVAL_IOTLB_GN          IOMMU_INVAL_PAGES_GN
>> +#define IOMMU_INVAL_IOTLB_S           IOMMU_INVAL_PAGES_S
>> +#define IOMMU_INVAL_IOTLB_ADDRESS     IOMMU_INVAL_PAGES_ADDRESS
>> +#define IOMMU_INVAL_IOTLB_MAKEPASID(low, high)
>> +
>> +/* masks - prefetch pages   */
>> +#define IOMMU_PREFETCH_PAGES_DEVID     IOMMU_INVAL_IOTLB_DEVID
>> +#define IOMMU_PREFETCH_PAGES_PFCOUNT IOMMU_INVAL_IOTLB_MAXPEND
>> +
>> +#define TYPE_AMD_IOMMU_DEVICE "amd-iommu"
>> +#define AMD_IOMMU_DEVICE(obj)\
>> +    OBJECT_CHECK(AMDIOMMUState, (obj), TYPE_AMD_IOMMU_DEVICE)
>> +
>> +#define AMD_IOMMU_STR "amd"
>> +
>> +typedef struct AMDIOMMUAddressSpace AMDIOMMUAddressSpace;
>> +
>> +typedef struct AMDIOMMUState {
>> +    PCIDevice dev;               /* The PCI device itself        */
>> +
>> +    uint32_t version;
>> +
>> +    uint32_t capab_offset;       /* capability offset pointer    */
>> +    uint64_t mmio_addr;
>> +    uint8_t *capab;              /* capabilities registers       */
>> +
>> +    bool enabled;                /* IOMMU enabled                */
>> +    bool ats_enabled;            /* address translation enabled  */
>> +    bool cmdbuf_enabled;         /* command buffer enabled       */
>> +    bool evtlog_enabled;         /* event log enabled            */
>> +    bool excl_enabled;
>> +
>> +    dma_addr_t devtab;           /* base address device table    */
>> +    size_t devtab_len;           /* device table length          */
>> +
>> +    dma_addr_t cmdbuf;           /* command buffer base address  */
>> +    uint64_t cmdbuf_len;         /* command buffer length        */
>> +    uint32_t cmdbuf_head;        /* current IOMMU read position  */
>> +    uint32_t cmdbuf_tail;        /* next Software write position */
>> +    bool completion_wait_intr;
>> +
>> +    dma_addr_t evtlog;           /* base address event log       */
>> +    bool evtlog_intr;
>> +    uint32_t evtlog_len;         /* event log length             */
>> +    uint32_t evtlog_head;        /* current IOMMU write position */
>> +    uint32_t evtlog_tail;        /* current Software read position */
>> +
>> +    /* unused for now */
>
> I suggest what is not used to remove for now
>
>> +    dma_addr_t excl_base;        /* base DVA - IOMMU exclusion range */
>> +    dma_addr_t excl_limit;       /* limit of IOMMU exclusion range   */
>> +    bool excl_allow;             /* translate accesses to the 
>> exclusion range */
>> +    bool excl_enable;            /* exclusion range enabled          */
>> +
>> +    dma_addr_t ppr_log;          /* base address ppr log */
>> +    uint32_t pprlog_len;         /* ppr log len  */
>> +    uint32_t pprlog_head;        /* ppr log head */
>> +    uint32_t pprlog_tail;        /* ppr log tail */
>> +
>> +    MemoryRegion mmio;           /* MMIO region                  */
>> +    uint8_t mmior[IOMMU_MMIO_SIZE];    /* read/write 
>> MMIO              */
>> +    uint8_t w1cmask[IOMMU_MMIO_SIZE];  /* read/write 1 clear 
>> mask      */
>> +    uint8_t romask[IOMMU_MMIO_SIZE];   /* MMIO read/only 
>> mask          */
>> +    bool mmio_enabled;
>> +
>> +    /* IOMMU function */
>> +    MemoryRegionIOMMUOps iommu_ops;
>> +
>> +    /* for each served device */
>> +    AMDIOMMUAddressSpace **address_spaces[PCI_BUS_MAX];
>> +
>> +    /* IOTLB */
>> +    GHashTable *iotlb;
>> +} AMDIOMMUState;
>> +
>> +/*
>> + * bridge_host_amd_iommu: setup an IOMMU function on a bus
>> + *
>> + * called for all PCI devices
>> + *
>> + * @bus: PCI bus to host the IOMMU
>> + * @opaque: opaque pointer to AMDIOMMUState struct
>> + * @defvn: PCI function of device for which to setup IOMMU region for
>> + *
>> + */
>> +AddressSpace *bridge_host_amd_iommu(PCIBus *bus, void *opaque, int 
>> devfn);
>> +
>> +#endif
>> diff --git a/include/hw/pci/pci.h b/include/hw/pci/pci.h
>> index dedf277..61deace 100644
>> --- a/include/hw/pci/pci.h
>> +++ b/include/hw/pci/pci.h
>> @@ -15,6 +15,8 @@
>>
>>   /* PCI bus */
>>
>> +#define PCI_BUS_NUM(x)          (((x) >> 8) & 0xff)
>> +#define PCI_DEVID(bus, devfn)   ((((uint16_t)(bus)) << 8) | (devfn))
>>   #define PCI_DEVFN(slot, func)   ((((slot) & 0x1f) << 3) | ((func) & 
>> 0x07))
>>   #define PCI_SLOT(devfn)         (((devfn) >> 3) & 0x1f)
>>   #define PCI_FUNC(devfn)         ((devfn) & 0x07)
>>
>
>
> Thanks,
> Marcel
>
>
Marcel Apfelbaum March 3, 2016, 9:34 a.m. UTC | #6
On 03/02/2016 06:00 AM, David Kiarie wrote:
> On Fri, Feb 26, 2016 at 9:23 AM, David Kiarie <davidkiarie4@gmail.com> wrote:
>> On Thu, Feb 25, 2016 at 6:43 PM, Marcel Apfelbaum <marcel@redhat.com> wrote:
>>> On 02/21/2016 08:10 PM, David Kiarie wrote:
>>>>
>>>> Add AMD IOMMU emulaton to Qemu in addition to Intel IOMMU
>>>> The IOMMU does basic translation, error checking and has a
>>>> mininal IOTLB implementation
>>>
>>>
>>> Hi,
>>>
>>>>
>>>> Signed-off-by: David Kiarie <davidkiarie4@gmail.com>
>>>> ---
>>>>    hw/i386/Makefile.objs |    1 +
>>>>    hw/i386/amd_iommu.c   | 1432
>>>> +++++++++++++++++++++++++++++++++++++++++++++++++
>>>>    hw/i386/amd_iommu.h   |  395 ++++++++++++++
>>>>    include/hw/pci/pci.h  |    2 +
>>>>    4 files changed, 1830 insertions(+)
>>>>    create mode 100644 hw/i386/amd_iommu.c
>>>>    create mode 100644 hw/i386/amd_iommu.h
>>>>

[...]

>>>> +
>>>> +AddressSpace *bridge_host_amd_iommu(PCIBus *bus, void *opaque, int devfn)
>>>> +{
>>>> +    AMDIOMMUState *s = opaque;
>>>> +    AMDIOMMUAddressSpace **iommu_as;
>>>> +    int bus_num = pci_bus_num(bus);
>>>> +
>>>> +    /* just in case */
>>>
>>>
>>> This comment troubles me, do we need the assert?
>
> In case the bus_num or devfn is invalid. Anyway, I could get of rid of
> this assert.
>
>>>
>>>> +    assert(0 <= bus_num && bus_num <= PCI_BUS_MAX);
>>>
>>>
>>> bus_num < PCI_BUS_MAX, right ?
>>>
>>>> +    assert(0 <= devfn && devfn <= PCI_DEVFN_MAX);
>>>
>>>
>>> same with devfn I suppose.
>>>
>>>> +
>>>> +    iommu_as = s->address_spaces[bus_num];
>>>> +
>>>> +    /* allocate memory during the first run */
>>>> +    if (!iommu_as) {
>>>
>>>
>>> Why lazy init? We can do that at AMDIOMMUState init, right?
>
> This code has to be called for all emulated devices when the bus is
> initialized. If you have it on AMDIOMMUState init - it will only be
> called for one or two devices already initiliazed.

I was talking about the allocation, not the method.
You can make the allocations on init/realize so you don't need the

    if (!iommu_as) {
      iommu_as = g_malloc0(...


Thanks,
Marcel

>
>>>
>>>> +        iommu_as = g_malloc0(sizeof(AMDIOMMUAddressSpace *) *
>>>> PCI_DEVFN_MAX);
>>>> +        s->address_spaces[bus_num] = iommu_as;
>>>> +    }
>>>> +
>>>> +    /* set up IOMMU region */
>>>> +    if (!iommu_as[devfn]) {
>>>> +        iommu_as[devfn] = g_malloc0(sizeof(AMDIOMMUAddressSpace));
>>>
>>>
>>> same here
>>>
>>>> +        iommu_as[devfn]->bus_num = (uint8_t)bus_num;
>>>> +        iommu_as[devfn]->devfn = (uint8_t)devfn;
>>>> +        iommu_as[devfn]->iommu_state = s;
>>>> +
>>>> +        memory_region_init_iommu(&iommu_as[devfn]->iommu, OBJECT(s),
>>>> +                                 &s->iommu_ops, "amd-iommu", UINT64_MAX);
>>>> +        address_space_init(&iommu_as[devfn]->as, &iommu_as[devfn]->iommu,
>>>> +                           "amd-iommu");
>>>> +    }
>>>> +    return &iommu_as[devfn]->as;
>>>> +}
>>>> +
>>>> +/* validate a page table entry */
>>>> +static bool amd_iommu_validate_dte(AMDIOMMUState *s, uint16_t devid,
>>>> +                                   uint64_t *dte)
>>>> +{
>>>> +    if ((dte[0] & IOMMU_DTE_LOWER_QUAD_RESERVED)
>>>> +        || (dte[1] & IOMMU_DTE_MIDDLE_QUAD_RESERVED)
>>>> +        || (dte[2] & IOMMU_DTE_UPPER_QUAD_RESERVED) || dte[3]) {
>>>> +        amd_iommu_log_illegaldevtab_error(s, devid,
>>>> +                                s->devtab + devid *
>>>> IOMMU_DEVTAB_ENTRY_SIZE, 0);
>>>> +        return false;
>>>> +    }
>>>> +
>>>> +    return dte[0] & IOMMU_DEV_VALID && (dte[0] &
>>>> IOMMU_DEV_TRANSLATION_VALID)
>>>> +           && (dte[0] & IOMMU_DEV_PT_ROOT_MASK);
>>>> +}
>>>> +
>>>> +/* get a device table entry given the devid */
>>>> +static bool amd_iommu_get_dte(AMDIOMMUState *s, int devid, uint64_t
>>>> *entry)
>>>> +{
>>>> +    uint32_t offset = devid * IOMMU_DEVTAB_ENTRY_SIZE;
>>>> +
>>>> +    IOMMU_DPRINTF(MMU, "Device Table at 0x%"PRIx64, s->devtab);
>>>> +
>>>> +    if (dma_memory_read(&address_space_memory, s->devtab + offset, entry,
>>>> +                        IOMMU_DEVTAB_ENTRY_SIZE)) {
>>>> +        IOMMU_DPRINTF(MMU, "error: fail to access Device Entry devtab
>>>> 0x%"PRIx64
>>>> +                      "offset 0x%"PRIx32, s->devtab, offset);
>>>> +        /* log ever accessing dte */
>>>> +        amd_iommu_log_devtab_error(s, devid, s->devtab + offset, 0);
>>>> +        return false;
>>>> +    }
>>>> +
>>>> +    if (!amd_iommu_validate_dte(s, devid, entry)) {
>>>> +        IOMMU_DPRINTF(MMU,
>>>> +                      "Pte entry at 0x%"PRIx64" is invalid", entry[0]);
>>>> +        return false;
>>>> +    }
>>>> +
>>>> +    return true;
>>>> +}
>>>> +
>>>> +/* get pte translation mode */
>>>> +static inline uint8_t get_pte_translation_mode(uint64_t pte)
>>>> +{
>>>> +    return (pte >> IOMMU_DEV_MODE_RSHIFT) & IOMMU_DEV_MODE_MASK;
>>>> +}
>>>> +
>>>> +static int amd_iommu_page_walk(AMDIOMMUAddressSpace *as, uint64_t *dte,
>>>> +                               IOMMUTLBEntry *ret, unsigned perms,
>>>> +                               hwaddr addr)
>>>> +{
>>>> +    uint8_t level, oldlevel;
>>>> +    unsigned present;
>>>> +    uint64_t pte, pte_addr;
>>>> +    uint64_t pte_perms;
>>>> +    pte = dte[0];
>>>> +
>>>> +    level = get_pte_translation_mode(pte);
>>>> +
>>>> +    if (level >= 7 || level == 0) {
>>>> +        IOMMU_DPRINTF(MMU, "error: translation level 0x%"PRIu8 "
>>>> detected"
>>>> +                      "while translating 0x%"PRIx64, level, addr);
>>>> +        return -1;
>>>> +    }
>>>> +
>>>> +    while (level > 0) {
>>>> +        pte_perms = amd_iommu_get_perms(pte);
>>>> +        present = pte & 1;
>>>> +        if (!present || perms != (perms & pte_perms)) {
>>>> +            amd_iommu_page_fault(as->iommu_state, as->devfn, addr,
>>>> perms);
>>>> +            IOMMU_DPRINTF(MMU, "error: page fault accessing virtual addr
>>>> 0x%"
>>>> +                          PRIx64, addr);
>>>> +            return -1;
>>>> +        }
>>>> +
>>>> +        /* go to the next lower level */
>>>> +        pte_addr = pte & IOMMU_DEV_PT_ROOT_MASK;
>>>> +        /* add offset and load pte */
>>>> +        pte_addr += ((addr >> (3 + 9 * level)) & 0x1FF) << 3;
>>>> +        pte = ldq_phys(&address_space_memory, pte_addr);
>>>> +        oldlevel = level;
>>>> +        level = get_pte_translation_mode(pte);
>>>> +
>>>> +        /* PT is corrupted or not there */
>>>> +        if (level != oldlevel - 1) {
>>>> +            return -1;
>>>> +        }
>>>> +    }
>>>> +
>>>> +    ret->iova = addr & IOMMU_PAGE_MASK_4K;
>>>> +    ret->translated_addr = (pte & IOMMU_DEV_PT_ROOT_MASK) &
>>>> IOMMU_PAGE_MASK_4K;
>>>> +    ret->addr_mask = ~IOMMU_PAGE_MASK_4K;
>>>> +    ret->perm = IOMMU_RW;
>>>> +    return 0;
>>>> +}
>>>> +
>>>> +/* TODO : Mark addresses as Accessed and Dirty */
>>>
>>>
>>> If you don't mark addresses as dirty, can't this cause the sporadic errors
>>> of arbitrary programs Jan talked about?
>>
>> I don't think this the issue, am seem to be receiving wrong 'host
>> physical addresses' in the last few kernel version. This issue is not
>> there in older kernels.
>>
>>>
>>>> +static void amd_iommu_do_translate(AMDIOMMUAddressSpace *as, hwaddr addr,
>>>> +                                   bool is_write, IOMMUTLBEntry *ret)
>>>> +{
>>>> +    AMDIOMMUState *s = as->iommu_state;
>>>> +    uint16_t devid = PCI_DEVID(as->bus_num, as->devfn);
>>>> +    IOMMUIOTLBEntry *iotlb_entry;
>>>> +    uint8_t err;
>>>> +    uint64_t entry[4];
>>>> +
>>>> +    /* try getting a cache entry first */
>>>> +    iotlb_entry = amd_iommu_iotlb_lookup(s, addr, as->devfn);
>>>> +
>>>> +    if (iotlb_entry) {
>>>> +        IOMMU_DPRINTF(CACHE, "hit  iotlb devid: %02x:%02x.%x gpa
>>>> 0x%"PRIx64
>>>> +                      " hpa 0x%"PRIx64, PCI_BUS_NUM(devid),
>>>> PCI_SLOT(devid),
>>>> +                      PCI_FUNC(devid), addr,
>>>> iotlb_entry->translated_addr);
>>>> +        ret->iova = addr & IOMMU_PAGE_MASK_4K;
>>>> +        ret->translated_addr = iotlb_entry->translated_addr;
>>>> +        ret->addr_mask = ~IOMMU_PAGE_MASK_4K;
>>>> +        ret->perm = iotlb_entry->perms;
>>>> +        return;
>>>> +    } else {
>>>
>>>
>>> you return from the if clause so you don't need the else
>>>
>>>> +        if (!amd_iommu_get_dte(s, devid, entry)) {
>>>
>>>
>>> is not an error if you did not find the device id?
>>>
>>>> +            goto out;
>>>> +        }
>>>> +
>>>> +        err = amd_iommu_page_walk(as, entry, ret,
>>>> +                                  is_write ? IOMMU_PERM_WRITE :
>>>> IOMMU_PERM_READ,
>>>> +                                  addr);
>>>> +        if (err) {
>>>> +            IOMMU_DPRINTF(MMU, "error: hardware error accessing page
>>>> tables"
>>>> +                          " while translating addr 0x%"PRIx64, addr);
>>>> +            amd_iommu_log_pagetab_error(s, as->devfn, addr, 0);
>>>> +            goto out;
>>>> +        }
>>>> +
>>>> +        amd_iommu_update_iotlb(s, as->devfn, addr, ret->translated_addr,
>>>> +                               ret->perm, entry[1] &
>>>> IOMMU_DEV_DOMID_ID_MASK);
>>>> +        return;
>>>> +    }
>>>> +
>>>> +out:
>>>> +    ret->iova = addr;
>>>> +    ret->translated_addr = addr & IOMMU_PAGE_MASK_4K;
>>>> +    ret->addr_mask = ~IOMMU_PAGE_MASK_4K;
>>>> +    ret->perm = IOMMU_RW;
>>>> +    return;
>>>
>>>
>>> you don't need the above return
>>>
>>>> +}
>>>> +
>>>> +static IOMMUTLBEntry amd_iommu_translate(MemoryRegion *iommu, hwaddr
>>>> addr,
>>>> +                                         bool is_write)
>>>> +{
>>>> +    IOMMU_DPRINTF(GENERAL, "");
>>>> +
>>>> +    AMDIOMMUAddressSpace *as = container_of(iommu, AMDIOMMUAddressSpace,
>>>> iommu);
>>>> +    AMDIOMMUState *s = as->iommu_state;
>>>> +
>>>> +    IOMMUTLBEntry ret = {
>>>> +        .target_as = &address_space_memory,
>>>> +        .iova = addr,
>>>> +        .translated_addr = 0,
>>>> +        .addr_mask = ~(hwaddr)0,
>>>> +        .perm = IOMMU_NONE,
>>>> +    };
>>>> +
>>>> +    if (!s->enabled) {
>>>> +        /* IOMMU disabled - corresponds to iommu=off not
>>>> +         * failure to provide any parameter
>>>> +         */
>>>> +        ret.iova = addr & IOMMU_PAGE_MASK_4K;
>>>> +        ret.translated_addr = addr & IOMMU_PAGE_MASK_4K;
>>>> +        ret.addr_mask = ~IOMMU_PAGE_MASK_4K;
>>>> +        ret.perm = IOMMU_RW;
>>>> +        return ret;
>>>> +    }
>>>> +
>>>> +    amd_iommu_do_translate(as, addr, is_write, &ret);
>>>> +    IOMMU_DPRINTF(MMU, "devid: %02x:%02x.%x gpa 0x%"PRIx64 " hpa
>>>> 0x%"PRIx64,
>>>> +                  as->bus_num, PCI_SLOT(as->devfn), PCI_FUNC(as->devfn),
>>>> addr,
>>>> +                  ret.translated_addr);
>>>> +
>>>> +    return ret;
>>>> +}
>>>> +
>>>> +static const MemoryRegionOps mmio_mem_ops = {
>>>> +    .read = amd_iommu_mmio_read,
>>>> +    .write = amd_iommu_mmio_write,
>>>> +    .endianness = DEVICE_LITTLE_ENDIAN,
>>>> +    .impl = {
>>>> +        .min_access_size = 1,
>>>> +        .max_access_size = 8,
>>>> +        .unaligned = false,
>>>> +    },
>>>> +    .valid = {
>>>> +        .min_access_size = 1,
>>>> +        .max_access_size = 8,
>>>> +    }
>>>> +};
>>>> +
>>>> +static void amd_iommu_init(AMDIOMMUState *s)
>>>> +{
>>>> +    printf("amd_iommu_init");
>>>
>>>
>>> you should use the debug macro here
>>>
>>>> +
>>>> +    amd_iommu_iotlb_reset(s);
>>>> +
>>>> +    s->iommu_ops.translate = amd_iommu_translate;
>>>> +
>>>> +    s->devtab_len = 0;
>>>> +    s->cmdbuf_len = 0;
>>>> +    s->cmdbuf_head = 0;
>>>> +    s->cmdbuf_tail = 0;
>>>> +    s->evtlog_head = 0;
>>>> +    s->evtlog_tail = 0;
>>>> +    s->excl_enabled = false;
>>>> +    s->excl_allow = false;
>>>> +    s->mmio_enabled = false;
>>>> +    s->enabled = false;
>>>> +    s->ats_enabled = false;
>>>> +    s->cmdbuf_enabled = false;
>>>> +
>>>> +    /* reset MMIO */
>>>> +    memset(s->mmior, 0, IOMMU_MMIO_SIZE);
>>>> +    amd_iommu_set_quad(s, IOMMU_MMIO_EXT_FEATURES, IOMMU_EXT_FEATURES,
>>>> +            0xffffffffffffffef, 0);
>>>> +    amd_iommu_set_quad(s, IOMMU_MMIO_STATUS, 0, 0x98, 0x67);
>>>> +    /* reset device ident */
>>>> +    pci_config_set_vendor_id(s->dev.config, PCI_VENDOR_ID_AMD);
>>>> +    pci_config_set_device_id(s->dev.config, PCI_DEVICE_ID_RD890_IOMMU);
>>>> +    pci_config_set_prog_interface(s->dev.config, 00);
>>>> +    pci_config_set_class(s->dev.config, 0x0806);
>>>> +
>>>> +    /* reset IOMMU specific capabilities  */
>>>> +    pci_set_long(s->dev.config + s->capab_offset, IOMMU_CAPAB_FEATURES);
>>>> +    pci_set_long(s->dev.config + s->capab_offset + IOMMU_CAPAB_BAR_LOW,
>>>> +                 s->mmio.addr & ~(0xffff0000));
>>>> +    pci_set_long(s->dev.config + s->capab_offset + IOMMU_CAPAB_BAR_HIGH,
>>>> +                (s->mmio.addr & ~(0xffff)) >> 16);
>>>> +    pci_set_long(s->dev.config + s->capab_offset + IOMMU_CAPAB_RANGE,
>>>> +                 0xff000000);
>>>> +    pci_set_long(s->dev.config + s->capab_offset + IOMMU_CAPAB_MISC, 0);
>>>> +    pci_set_long(s->dev.config + s->capab_offset + IOMMU_CAPAB_MISC,
>>>> +            IOMMU_MAX_PH_ADDR | IOMMU_MAX_GVA_ADDR | IOMMU_MAX_VA_ADDR);
>>>
>>>
>>> All the capabilities are read-write? Otherwise you need to set the wmask
>>> to indicate what fields are writable.
>>>
>>>> +}
>>>> +
>>>> +static void amd_iommu_reset(DeviceState *dev)
>>>> +{
>>>> +    AMDIOMMUState *s = AMD_IOMMU_DEVICE(dev);
>>>> +
>>>> +    amd_iommu_init(s);
>>>> +}
>>>> +
>>>> +static void amd_iommu_realize(PCIDevice *dev, Error **error)
>>>> +{
>>>> +    AMDIOMMUState *s = container_of(dev, AMDIOMMUState, dev);
>>>> +
>>>> +    s->iotlb = g_hash_table_new_full(amd_iommu_uint64_hash,
>>>> +                                     amd_iommu_uint64_equal, g_free,
>>>> g_free);
>>>> +
>>>> +    s->capab_offset = pci_add_capability(dev, IOMMU_CAPAB_ID_SEC, 0,
>>>> +                                         IOMMU_CAPAB_SIZE);
>>>> +
>>>> +    /* add msi and hypertransport capabilities */
>>>> +    pci_add_capability(&s->dev, PCI_CAP_ID_MSI, 0, IOMMU_CAPAB_REG_SIZE);
>>>> +    pci_add_capability(&s->dev, PCI_CAP_ID_HT, 0, IOMMU_CAPAB_REG_SIZE);
>>>> +
>>>> +    amd_iommu_init(s);
>>>> +
>>>> +    /* set up MMIO */
>>>> +    memory_region_init_io(&s->mmio, OBJECT(s), &mmio_mem_ops, s, "mmio",
>>>> +                          IOMMU_MMIO_SIZE);
>>>> +
>>>> +    if (s->mmio.addr == IOMMU_BASE_ADDR) {
>>>
>>>
>>> I don't understand why is need here. realize is called only once in the init
>>> process
>>> and you set it a few lines below.
>>>
>>>> +        return;
>>>> +    }
>>>> +
>>>> +    s->mmio.addr = IOMMU_BASE_ADDR;
>>>> +    memory_region_add_subregion(get_system_memory(), IOMMU_BASE_ADDR,
>>>> &s->mmio);
>>>> +}
>>>> +
>>>> +static const VMStateDescription vmstate_amd_iommu = {
>>>> +    .name = "amd-iommu",
>>>> +    .fields  = (VMStateField[]) {
>>>> +        VMSTATE_PCI_DEVICE(dev, AMDIOMMUState),
>>>> +        VMSTATE_END_OF_LIST()
>>>> +    }
>>>> +};
>>>> +
>>>> +static Property amd_iommu_properties[] = {
>>>> +    DEFINE_PROP_UINT32("version", AMDIOMMUState, version, 2),
>>>> +    DEFINE_PROP_END_OF_LIST(),
>>>> +};
>>>> +
>>>> +static void amd_iommu_uninit(PCIDevice *dev)
>>>> +{
>>>> +    AMDIOMMUState *s = container_of(dev, AMDIOMMUState, dev);
>>>> +    amd_iommu_iotlb_reset(s);
>>>
>>>
>>> at this point you also need to clean also the memory regions you use.
>>>
>>>> +}
>>>> +
>>>> +static void amd_iommu_class_init(ObjectClass *klass, void* data)
>>>> +{
>>>> +    DeviceClass *dc = DEVICE_CLASS(klass);
>>>> +    PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
>>>> +
>>>> +    k->realize = amd_iommu_realize;
>>>> +    k->exit = amd_iommu_uninit;
>>>> +
>>>> +    dc->reset = amd_iommu_reset;
>>>> +    dc->vmsd = &vmstate_amd_iommu;
>>>> +    dc->props = amd_iommu_properties;
>>>> +}
>>>> +
>>>> +static const TypeInfo amd_iommu = {
>>>> +    .name = TYPE_AMD_IOMMU_DEVICE,
>>>> +    .parent = TYPE_PCI_DEVICE,
>>>> +    .instance_size = sizeof(AMDIOMMUState),
>>>> +    .class_init = amd_iommu_class_init
>>>> +};
>>>> +
>>>> +static void amd_iommu_register_types(void)
>>>> +{
>>>> +    type_register_static(&amd_iommu);
>>>> +}
>>>> +
>>>> +type_init(amd_iommu_register_types);
>>>> diff --git a/hw/i386/amd_iommu.h b/hw/i386/amd_iommu.h
>>>> new file mode 100644
>>>> index 0000000..7d317e1
>>>> --- /dev/null
>>>> +++ b/hw/i386/amd_iommu.h
>>>> @@ -0,0 +1,395 @@
>>>> +/*
>>>> + * QEMU emulation of an AMD IOMMU (AMD-Vi)
>>>> + *
>>>> + * Copyright (C) 2011 Eduard - Gabriel Munteanu
>>>> + * Copyright (C) 2015 David Kiarie, <davidkiarie4@gmail.com>
>>>> + *
>>>> + * This program is free software; you can redistribute it and/or modify
>>>> + * it under the terms of the GNU General Public License as published by
>>>> + * the Free Software Foundation; either version 2 of the License, or
>>>> + * (at your option) any later version.
>>>> +
>>>> + * This program is distributed in the hope that it will be useful,
>>>> + * but WITHOUT ANY WARRANTY; without even the implied warranty of
>>>> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
>>>> + * GNU General Public License for more details.
>>>> +
>>>> + * You should have received a copy of the GNU General Public License
>>>> along
>>>> + * with this program; if not, see <http://www.gnu.org/licenses/>.
>>>> + */
>>>> +
>>>> +#ifndef AMD_IOMMU_H_
>>>> +#define AMD_IOMMU_H_
>>>> +
>>>> +#include "hw/hw.h"
>>>> +#include "hw/pci/pci.h"
>>>> +#include "hw/pci/msi.h"
>>>> +#include "hw/sysbus.h"
>>>> +#include "sysemu/dma.h"
>>>> +
>>>> +/* Capability registers */
>>>> +#define IOMMU_CAPAB_HEADER            0x00
>>>> +#define   IOMMU_CAPAB_REV_TYPE        0x02
>>>> +#define   IOMMU_CAPAB_FLAGS           0x03
>>>> +#define IOMMU_CAPAB_BAR_LOW           0x04
>>>> +#define IOMMU_CAPAB_BAR_HIGH          0x08
>>>> +#define IOMMU_CAPAB_RANGE             0x0C
>>>> +#define IOMMU_CAPAB_MISC              0x10
>>>> +#define IOMMU_CAPAB_MISC1             0x14
>>>> +
>>>> +#define IOMMU_CAPAB_SIZE              0x18
>>>> +#define IOMMU_CAPAB_REG_SIZE          0x04
>>>> +
>>>> +/* Capability header data */
>>>> +#define IOMMU_CAPAB_ID_SEC            0xf
>>>> +#define IOMMU_CAPAB_FLAT_EXT          (1 << 28)
>>>> +#define IOMMU_CAPAB_EFR_SUP           (1 << 27)
>>>> +#define IOMMU_CAPAB_FLAG_NPCACHE      (1 << 26)
>>>> +#define IOMMU_CAPAB_FLAG_HTTUNNEL     (1 << 25)
>>>> +#define IOMMU_CAPAB_FLAG_IOTLBSUP     (1 << 24)
>>>> +#define IOMMU_CAPAB_INIT_REV          (1 << 19)
>>>> +#define IOMMU_CAPAB_INIT_TYPE         (3 << 16)
>>>> +#define IOMMU_CAPAB_INIT_REV_TYPE     (IOMMU_CAPAB_REV |
>>>> IOMMU_CAPAB_TYPE)
>>>> +#define IOMMU_CAPAB_INIT_FLAGS        (IOMMU_CAPAB_FLAG_NPCACHE | \
>>>> +                                       IOMMU_CAPAB_FLAG_HTTUNNEL)
>>>> +#define IOMMU_CAPAB_INIT_MISC         ((64 << 15) | (48 << 8))
>>>> +#define IOMMU_CAPAB_BAR_MASK          (~((1UL << 14) - 1))
>>>> +
>>>> +/* MMIO registers */
>>>> +#define IOMMU_MMIO_DEVICE_TABLE       0x0000
>>>> +#define IOMMU_MMIO_COMMAND_BASE       0x0008
>>>> +#define IOMMU_MMIO_EVENT_BASE         0x0010
>>>> +#define IOMMU_MMIO_CONTROL            0x0018
>>>> +#define IOMMU_MMIO_EXCL_BASE          0x0020
>>>> +#define IOMMU_MMIO_EXCL_LIMIT         0x0028
>>>> +#define IOMMU_MMIO_EXT_FEATURES       0x0030
>>>> +#define IOMMU_MMIO_COMMAND_HEAD       0x2000
>>>> +#define IOMMU_MMIO_COMMAND_TAIL       0x2008
>>>> +#define IOMMU_MMIO_EVENT_HEAD         0x2010
>>>> +#define IOMMU_MMIO_EVENT_TAIL         0x2018
>>>> +#define IOMMU_MMIO_STATUS             0x2020
>>>> +#define IOMMU_MMIO_PPR_BASE           0x0038
>>>> +#define IOMMU_MMIO_PPR_HEAD           0x2030
>>>> +#define IOMMU_MMIO_PPR_TAIL           0x2038
>>>> +
>>>> +#define IOMMU_MMIO_SIZE               0x4000
>>>> +
>>>> +#define IOMMU_MMIO_DEVTAB_SIZE_MASK   ((1ULL << 12) - 1)
>>>> +#define IOMMU_MMIO_DEVTAB_BASE_MASK   (((1ULL << 52) - 1) & ~ \
>>>> +                                       IOMMU_MMIO_DEVTAB_SIZE_MASK)
>>>> +#define IOMMU_MMIO_DEVTAB_ENTRY_SIZE  32
>>>> +#define IOMMU_MMIO_DEVTAB_SIZE_UNIT   4096
>>>> +
>>>> +/* some of this are similar but just for readability */
>>>> +#define IOMMU_MMIO_CMDBUF_SIZE_BYTE       (IOMMU_MMIO_COMMAND_BASE + 7)
>>>> +#define IOMMU_MMIO_CMDBUF_SIZE_MASK       0x0F
>>>> +#define IOMMU_MMIO_CMDBUF_BASE_MASK       IOMMU_MMIO_DEVTAB_BASE_MASK
>>>> +#define IOMMU_MMIO_CMDBUF_DEFAULT_SIZE    8
>>>> +#define IOMMU_MMIO_CMDBUF_HEAD_MASK       (((1ULL << 19) - 1) & ~0x0F)
>>>> +#define IOMMU_MMIO_CMDBUF_TAIL_MASK       IOMMU_MMIO_EVTLOG_HEAD_MASK
>>>> +
>>>> +#define IOMMU_MMIO_EVTLOG_SIZE_BYTE       (IOMMU_MMIO_EVENT_BASE + 7)
>>>> +#define IOMMU_MMIO_EVTLOG_SIZE_MASK       IOMMU_MMIO_CMDBUF_SIZE_MASK
>>>> +#define IOMMU_MMIO_EVTLOG_BASE_MASK       IOMMU_MMIO_CMDBUF_BASE_MASK
>>>> +#define IOMMU_MMIO_EVTLOG_DEFAULT_SIZE    IOMMU_MMIO_CMDBUF_DEFAULT_SIZE
>>>> +#define IOMMU_MMIO_EVTLOG_HEAD_MASK       (((1ULL << 19) - 1) & ~0x0F)
>>>> +#define IOMMU_MMIO_EVTLOG_TAIL_MASK       IOMMU_MMIO_EVTLOG_HEAD_MASK
>>>> +
>>>> +#define IOMMU_MMIO_PPRLOG_SIZE_BYTE       (IOMMU_MMIO_EVENT_BASE + 7)
>>>> +#define IOMMU_MMIO_PPRLOG_HEAD_MASK       IOMMU_MMIO_EVTLOG_HEAD_MASK
>>>> +#define IOMMU_MMIO_PPRLOG_TAIL_MASK       IOMMU_MMIO_EVTLOG_HEAD_MASK
>>>> +#define IOMMU_MMIO_PPRLOG_BASE_MASK       IOMMU_MMIO_EVTLOG_BASE_MASK
>>>> +#define IOMMU_MMIO_PPRLOG_SIZE_MASK       IOMMU_MMIO_EVTLOG_SIZE_MASK
>>>> +
>>>> +#define IOMMU_MMIO_EXCL_BASE_MASK         IOMMU_MMIO_DEVTAB_BASE_MASK
>>>> +#define IOMMU_MMIO_EXCL_ENABLED_MASK      (1ULL << 0)
>>>> +#define IOMMU_MMIO_EXCL_ALLOW_MASK        (1ULL << 1)
>>>> +#define IOMMU_MMIO_EXCL_LIMIT_MASK        IOMMU_MMIO_DEVTAB_BASE_MASK
>>>> +#define IOMMU_MMIO_EXCL_LIMIT_LOW         0xFFF
>>>> +
>>>> +/* mmio control register flags */
>>>> +#define IOMMU_MMIO_CONTROL_IOMMUEN        (1ULL << 0)
>>>> +#define IOMMU_MMIO_CONTROL_HTTUNEN        (1ULL << 1)
>>>> +#define IOMMU_MMIO_CONTROL_EVENTLOGEN     (1ULL << 2)
>>>> +#define IOMMU_MMIO_CONTROL_EVENTINTEN     (1ULL << 3)
>>>> +#define IOMMU_MMIO_CONTROL_COMWAITINTEN   (1ULL << 4)
>>>> +#define IOMMU_MMIO_CONTROL_PASSPW         (1ULL << 7)
>>>> +#define IOMMU_MMIO_CONTROL_REPASSPW       (1ULL << 9)
>>>> +#define IOMMU_MMIO_CONTROL_COHERENT       (1ULL << 10)
>>>> +#define IOMMU_MMIO_CONTROL_ISOC           (1ULL << 11)
>>>> +#define IOMMU_MMIO_CONTROL_CMDBUFLEN      (1ULL << 12)
>>>> +#define IOMMU_MMIO_CONTROL_PPRLOGEN       (1ULL << 13)
>>>> +#define IOMMU_MMIO_CONTROL_PPRINTEN       (1ULL << 14)
>>>> +#define IOMMU_MMIO_CONTROL_PPREN          (1ULL << 15)
>>>> +#define IOMMU_MMIO_CONTROL_GAEN           (1ULL << 16)
>>>> +#define IOMMU_MMIO_CONTROL_GTEN           (1ULL << 17)
>>>> +
>>>> +/* MMIO status register bits */
>>>> +#define IOMMU_MMIO_STATUS_PPR_OVFE    (1 << 18)
>>>> +#define IOMMU_MMIO_STATUS_PPR_OVFEB   (1 << 17)
>>>> +#define IOMMU_MMIO_STATUS_EVT_ACTIVE  (1 << 16)
>>>> +#define IOMMU_MMIO_STATUS_EVT_OVFB    (1 << 15)
>>>> +#define IOMMU_MMIO_STATUS_PPR_ACTIVE  (1 << 12)
>>>> +#define IOMMU_MMIO_STATUS_PPR_OVFB    (1 << 11)
>>>> +#define IOMMU_MMIO_STATUS_GA_INT      (1 << 10)
>>>> +#define IOMMU_MMIO_STATUS_GA_RUN      (1 << 9)
>>>> +#define IOMMU_MMIO_STATUS_GA_OVF      (1 << 8)
>>>> +#define IOMMU_MMIO_STATUS_PPR_RUN     (1 << 7)
>>>> +#define IOMMU_MMIO_STATUS_PPR_INT     (1 << 6)
>>>> +#define IOMMU_MMIO_STATUS_PPR_OVF     (1 << 5)
>>>> +#define IOMMU_MMIO_STATUS_CMDBUF_RUN  (1 << 4)
>>>> +#define IOMMU_MMIO_STATUS_EVT_RUN     (1 << 3)
>>>> +#define IOMMU_MMIO_STATUS_COMP_INT    (1 << 2)
>>>> +#define IOMMU_MMIO_STATUS_EVT_INT     (1 << 1)
>>>> +#define IOMMU_MMIO_STATUS_EVT_OVF     (1 << 0)
>>>> +
>>>> +#define IOMMU_CMDBUF_ID_BYTE              0x07
>>>> +#define IOMMU_CMDBUF_ID_RSHIFT            4
>>>> +
>>>> +#define IOMMU_CMD_COMPLETION_WAIT         0x01
>>>> +#define IOMMU_CMD_INVAL_DEVTAB_ENTRY      0x02
>>>> +#define IOMMU_CMD_INVAL_IOMMU_PAGES       0x03
>>>> +#define IOMMU_CMD_INVAL_IOTLB_PAGES       0x04
>>>> +#define IOMMU_CMD_INVAL_INTR_TABLE        0x05
>>>> +#define IOMMU_CMD_PREFETCH_IOMMU_PAGES    0x06
>>>> +#define IOMMU_CMD_COMPLETE_PPR_REQUEST    0x07
>>>> +#define IOMMU_CMD_INVAL_IOMMU_ALL         0x08
>>>> +
>>>> +#define IOMMU_DEVTAB_ENTRY_SIZE           32
>>>> +
>>>> +/* Device table entry bits 0:63 */
>>>> +#define IOMMU_DEV_VALID                   (1ULL << 0)
>>>> +#define IOMMU_DEV_TRANSLATION_VALID       (1ULL << 1)
>>>> +#define IOMMU_DEV_MODE_MASK               0x7
>>>> +#define IOMMU_DEV_MODE_RSHIFT             9
>>>> +#define IOMMU_DEV_PT_ROOT_MASK            0xFFFFFFFFFF000
>>>> +#define IOMMU_DEV_PT_ROOT_RSHIFT          12
>>>> +#define IOMMU_DEV_PERM_SHIFT              61
>>>> +#define IOMMU_DEV_PERM_READ               (1ULL << 61)
>>>> +#define IOMMU_DEV_PERM_WRITE              (1ULL << 62)
>>>> +
>>>> +/* Device table entry bits 64:127 */
>>>> +#define IOMMU_DEV_DOMID_ID_MASK          ((1ULL << 16) - 1)
>>>> +#define IOMMU_DEV_IOTLB_SUPPORT           (1ULL << 17)
>>>> +#define IOMMU_DEV_SUPPRESS_PF             (1ULL << 18)
>>>> +#define IOMMU_DEV_SUPPRESS_ALL_PF         (1ULL << 19)
>>>> +#define IOMMU_DEV_IOCTL_MASK              (~3)
>>>> +#define IOMMU_DEV_IOCTL_RSHIFT            20
>>>> +#define   IOMMU_DEV_IOCTL_DENY            0
>>>> +#define   IOMMU_DEV_IOCTL_PASSTHROUGH     1
>>>> +#define   IOMMU_DEV_IOCTL_TRANSLATE       2
>>>> +#define IOMMU_DEV_CACHE                   (1ULL << 37)
>>>> +#define IOMMU_DEV_SNOOP_DISABLE           (1ULL << 38)
>>>> +#define IOMMU_DEV_EXCL                    (1ULL << 39)
>>>> +
>>>> +/* Event codes and flags, as stored in the info field */
>>>> +#define IOMMU_EVENT_ILLEGAL_DEVTAB_ENTRY  (0x1U << 12)
>>>> +#define IOMMU_EVENT_IOPF                  (0x2U << 12)
>>>> +#define   IOMMU_EVENT_IOPF_I              (1U << 3)
>>>> +#define   IOMMU_EVENT_IOPF_PR             (1U << 4)
>>>> +#define   IOMMU_EVENT_IOPF_RW             (1U << 5)
>>>> +#define   IOMMU_EVENT_IOPF_PE             (1U << 6)
>>>> +#define   IOMMU_EVENT_IOPF_RZ             (1U << 7)
>>>> +#define   IOMMU_EVENT_IOPF_TR             (1U << 8)
>>>> +#define IOMMU_EVENT_DEV_TAB_HW_ERROR      (0x3U << 12)
>>>> +#define IOMMU_EVENT_PAGE_TAB_HW_ERROR     (0x4U << 12)
>>>> +#define IOMMU_EVENT_ILLEGAL_COMMAND_ERROR (0x5U << 12)
>>>> +#define IOMMU_EVENT_COMMAND_HW_ERROR      (0x6U << 12)
>>>> +#define IOMMU_EVENT_IOTLB_INV_TIMEOUT     (0x7U << 12)
>>>> +#define IOMMU_EVENT_INVALID_DEV_REQUEST   (0x8U << 12)
>>>> +
>>>> +#define IOMMU_EVENT_LEN                   16
>>>> +#define IOMMU_PERM_READ             (1 << 0)
>>>> +#define IOMMU_PERM_WRITE            (1 << 1)
>>>> +#define IOMMU_PERM_RW               (IOMMU_PERM_READ | IOMMU_PERM_WRITE)
>>>> +
>>>> +/* AMD RD890 Chipset */
>>>> +#define PCI_DEVICE_ID_RD890_IOMMU   0x20
>
>
>>>
>>>
>>> We keep the pci ids in include/hw/pci/pci_ids.h
>
> This a dummy device id I use for IOMMU - IOMMU doesn't have a specific
> device id. There's a device id on linux include files for a certain
> AMD IOMMU but it makes IOMMU seem to be on a non-existant bus so I
> don't use it.
>
>>>
>>>> +
>>>> +#define IOMMU_FEATURE_PREFETCH            (1ULL << 0)
>>>> +#define IOMMU_FEATURE_PPR                 (1ULL << 1)
>>>> +#define IOMMU_FEATURE_NX                  (1ULL << 3)
>>>> +#define IOMMU_FEATURE_GT                  (1ULL << 4)
>>>> +#define IOMMU_FEATURE_IA                  (1ULL << 6)
>>>> +#define IOMMU_FEATURE_GA                  (1ULL << 7)
>>>> +#define IOMMU_FEATURE_HE                  (1ULL << 8)
>>>> +#define IOMMU_FEATURE_PC                  (1ULL << 9)
>>>> +
>>>> +/* reserved DTE bits */
>>>> +#define IOMMU_DTE_LOWER_QUAD_RESERVED  0x80300000000000fc
>>>> +#define IOMMU_DTE_MIDDLE_QUAD_RESERVED 0x0000000000000100
>>>> +#define IOMMU_DTE_UPPER_QUAD_RESERVED  0x08f0000000000000
>>>> +
>>>> +/* IOMMU paging mode */
>>>> +#define IOMMU_GATS_MODE                 (6ULL <<  12)
>>>> +#define IOMMU_HATS_MODE                 (6ULL <<  10)
>>>> +
>>>> +/* PCI SIG constants */
>>>> +#define PCI_BUS_MAX 256
>>>> +#define PCI_SLOT_MAX 32
>>>> +#define PCI_FUNC_MAX 8
>>>> +#define PCI_DEVFN_MAX 256
>>>
>>>
>>> Maybe we can move the PCI macros to include/hw/pci/pci.h, those are not
>>> IOMMU specific.
>
> Yeah, this are PCI macros but they are a not copied from linux while
> the macros in pci.h seem to have been copied from linux.
>
>>>
>>>> +
>>>> +/* IOTLB */
>>>> +#define IOMMU_IOTLB_MAX_SIZE 1024
>>>> +#define IOMMU_DEVID_SHIFT    36
>>>> +
>>>> +/* extended feature support */
>>>> +#define IOMMU_EXT_FEATURES (IOMMU_FEATURE_PREFETCH | IOMMU_FEATURE_PPR |
>>>> \
>>>> +        IOMMU_FEATURE_NX | IOMMU_FEATURE_IA | IOMMU_FEATURE_GT | \
>>>> +        IOMMU_FEATURE_GA | IOMMU_FEATURE_HE | IOMMU_GATS_MODE | \
>>>> +        IOMMU_HATS_MODE)
>>>> +
>>>> +/* capabilities header */
>>>> +#define IOMMU_CAPAB_FEATURES (IOMMU_CAPAB_FLAT_EXT | \
>>>> +        IOMMU_CAPAB_FLAG_NPCACHE | IOMMU_CAPAB_FLAG_IOTLBSUP \
>>>> +        | IOMMU_CAPAB_ID_SEC | IOMMU_CAPAB_INIT_TYPE | \
>>>> +        IOMMU_CAPAB_FLAG_HTTUNNEL |  IOMMU_CAPAB_EFR_SUP)
>>>> +
>>>> +/* command constants */
>>>> +#define IOMMU_COM_STORE_ADDRESS_MASK 0xffffffffffff8
>>>> +#define IOMMU_COM_COMPLETION_STORE_MASK 0x1
>>>> +#define IOMMU_COM_COMPLETION_INTR 0x2
>>>> +#define IOMMU_COM_COMPLETION_DATA_OFF 0x8
>>>> +#define IOMMU_COMMAND_SIZE 0x10
>>>> +
>>>> +/* IOMMU default address */
>>>> +#define IOMMU_BASE_ADDR 0xfed80000
>>>> +
>>>> +/* page management constants */
>>>> +#define IOMMU_PAGE_SHIFT 12
>>>> +#define IOMMU_PAGE_SIZE  (1ULL << IOMMU_PAGE_SHIFT)
>>>> +
>>>> +#define IOMMU_PAGE_SHIFT_4K 12
>>>> +#define IOMMU_PAGE_MASK_4K  (~((1ULL << IOMMU_PAGE_SHIFT_4K) - 1))
>>>> +#define IOMMU_PAGE_SHIFT_2M 21
>>>> +#define IOMMU_PAGE_MASK_2M  (~((1ULL << IOMMU_PAGE_SHIFT_2M) - 1))
>>>> +#define IOMMU_PAGE_SHIFT_1G 30
>>>> +#define IOMMU_PAGE_MASK_1G (~((1ULL << IOMMU_PAGE_SHIFT_1G) - 1))
>>>> +
>>>> +#define IOMMU_MAX_VA_ADDR          (48UL << 5)
>>>> +#define IOMMU_MAX_PH_ADDR          (40UL << 8)
>>>> +#define IOMMU_MAX_GVA_ADDR         (48UL << 15)
>>>> +
>>>> +/* invalidation command device id */
>>>> +#define IOMMU_INVAL_DEV_ID_SHIFT  32
>>>> +#define IOMMU_INVAL_DEV_ID_MASK   (~((1UL << IOMMU_INVAL_DEV_ID_SHIFT) -
>>>> 1))
Marcel Apfelbaum March 3, 2016, 9:40 a.m. UTC | #7
On 03/02/2016 06:08 AM, David Kiarie wrote:
> On Wed, Mar 2, 2016 at 7:00 AM, David Kiarie <davidkiarie4@gmail.com> wrote:
>> On Fri, Feb 26, 2016 at 9:23 AM, David Kiarie <davidkiarie4@gmail.com> wrote:
>>> On Thu, Feb 25, 2016 at 6:43 PM, Marcel Apfelbaum <marcel@redhat.com> wrote:
>>>> On 02/21/2016 08:10 PM, David Kiarie wrote:
>>>>>
>>>>> Add AMD IOMMU emulaton to Qemu in addition to Intel IOMMU
>>>>> The IOMMU does basic translation, error checking and has a
>>>>> mininal IOTLB implementation
>>>>
>>>>
>>>> Hi,
>>>>
>>>>>
>>>>> Signed-off-by: David Kiarie <davidkiarie4@gmail.com>
>>>>> ---
>>>>>    hw/i386/Makefile.objs |    1 +
>>>>>    hw/i386/amd_iommu.c   | 1432
>>>>> +++++++++++++++++++++++++++++++++++++++++++++++++
>>>>>    hw/i386/amd_iommu.h   |  395 ++++++++++++++
>>>>>    include/hw/pci/pci.h  |    2 +
>>>>>    4 files changed, 1830 insertions(+)
>>>>>    create mode 100644 hw/i386/amd_iommu.c
>>>>>    create mode 100644 hw/i386/amd_iommu.h
>>>>>

[...]
>>>>
>>>>
>>>> you return from the if clause so you don't need the else
>>>>
>>>>> +        if (!amd_iommu_get_dte(s, devid, entry)) {
>>>>
>>>>
>>>> is not an error if you did not find the device id?
>
> I get device id from Qemu - so I assume it's correct.

I don't get it, if you assume is correct, why do you use the "if" clause?
If you think the id should be OK. maybe you want to assert here?

Currently it looks like a silent failure.

Thanks,
Marcel

>
>>>>
>>>>> +            goto out;
>>>>> +        }
>>>>> +
>>>>> +        err = amd_iommu_page_walk(as, entry, ret,
>>>>> +                                  is_write ? IOMMU_PERM_WRITE :
>>>>> IOMMU_PERM_READ,
>>>>> +                                  addr);
>>>>> +        if (err) {
>>>>> +            IOMMU_DPRINTF(MMU, "error: hardware error accessing page
>>>>> tables"
>>>>> +                          " while translating addr 0x%"PRIx64, addr);
>>>>> +            amd_iommu_log_pagetab_error(s, as->devfn, addr, 0);
>>>>> +            goto out;
>>>>> +        }
>>>>> +
>>>>> +        amd_iommu_update_iotlb(s, as->devfn, addr, ret->translated_addr,
>>>>> +                               ret->perm, entry[1] &
>>>>> IOMMU_DEV_DOMID_ID_MASK);
>>>>> +        return;
>>>>> +    }
>>>>> +
>>>>> +out:
>>>>> +    ret->iova = addr;
>>>>> +    ret->translated_addr = addr & IOMMU_PAGE_MASK_4K;
>>>>> +    ret->addr_mask = ~IOMMU_PAGE_MASK_4K;
>>>>> +    ret->perm = IOMMU_RW;
>>>>> +    return;
>>>>
>>>>
>>>> you don't need the above return
>>>>
>>>>> +}
>>>>> +
>>>>> +static IOMMUTLBEntry amd_iommu_translate(MemoryRegion *iommu, hwaddr
>>>>> addr,
>>>>> +                                         bool is_write)
>>>>> +{
>>>>> +    IOMMU_DPRINTF(GENERAL, "");
>>>>> +
>>>>> +    AMDIOMMUAddressSpace *as = container_of(iommu, AMDIOMMUAddressSpace,
>>>>> iommu);
>>>>> +    AMDIOMMUState *s = as->iommu_state;
>>>>> +
>>>>> +    IOMMUTLBEntry ret = {
>>>>> +        .target_as = &address_space_memory,
>>>>> +        .iova = addr,
>>>>> +        .translated_addr = 0,
>>>>> +        .addr_mask = ~(hwaddr)0,
>>>>> +        .perm = IOMMU_NONE,
>>>>> +    };
>>>>> +
>>>>> +    if (!s->enabled) {
>>>>> +        /* IOMMU disabled - corresponds to iommu=off not
>>>>> +         * failure to provide any parameter
>>>>> +         */
>>>>> +        ret.iova = addr & IOMMU_PAGE_MASK_4K;
>>>>> +        ret.translated_addr = addr & IOMMU_PAGE_MASK_4K;
>>>>> +        ret.addr_mask = ~IOMMU_PAGE_MASK_4K;
>>>>> +        ret.perm = IOMMU_RW;
>>>>> +        return ret;
>>>>> +    }
>>>>> +
>>>>> +    amd_iommu_do_translate(as, addr, is_write, &ret);
>>>>> +    IOMMU_DPRINTF(MMU, "devid: %02x:%02x.%x gpa 0x%"PRIx64 " hpa
>>>>> 0x%"PRIx64,
>>>>> +                  as->bus_num, PCI_SLOT(as->devfn), PCI_FUNC(as->devfn),
>>>>> addr,
>>>>> +                  ret.translated_addr);
>>>>> +
>>>>> +    return ret;
>>>>> +}
>>>>> +
>>>>> +static const MemoryRegionOps mmio_mem_ops = {
>>>>> +    .read = amd_iommu_mmio_read,
>>>>> +    .write = amd_iommu_mmio_write,
>>>>> +    .endianness = DEVICE_LITTLE_ENDIAN,
>>>>> +    .impl = {
>>>>> +        .min_access_size = 1,
>>>>> +        .max_access_size = 8,
>>>>> +        .unaligned = false,
>>>>> +    },
>>>>> +    .valid = {
>>>>> +        .min_access_size = 1,
>>>>> +        .max_access_size = 8,
>>>>> +    }
>>>>> +};
>>>>> +
>>>>> +static void amd_iommu_init(AMDIOMMUState *s)
>>>>> +{
>>>>> +    printf("amd_iommu_init");
>>>>
>>>>
>>>> you should use the debug macro here
>>>>
>>>>> +
>>>>> +    amd_iommu_iotlb_reset(s);
>>>>> +
>>>>> +    s->iommu_ops.translate = amd_iommu_translate;
>>>>> +
>>>>> +    s->devtab_len = 0;
>>>>> +    s->cmdbuf_len = 0;
>>>>> +    s->cmdbuf_head = 0;
>>>>> +    s->cmdbuf_tail = 0;
>>>>> +    s->evtlog_head = 0;
>>>>> +    s->evtlog_tail = 0;
>>>>> +    s->excl_enabled = false;
>>>>> +    s->excl_allow = false;
>>>>> +    s->mmio_enabled = false;
>>>>> +    s->enabled = false;
>>>>> +    s->ats_enabled = false;
>>>>> +    s->cmdbuf_enabled = false;
>>>>> +
>>>>> +    /* reset MMIO */
>>>>> +    memset(s->mmior, 0, IOMMU_MMIO_SIZE);
>>>>> +    amd_iommu_set_quad(s, IOMMU_MMIO_EXT_FEATURES, IOMMU_EXT_FEATURES,
>>>>> +            0xffffffffffffffef, 0);
>>>>> +    amd_iommu_set_quad(s, IOMMU_MMIO_STATUS, 0, 0x98, 0x67);
>>>>> +    /* reset device ident */
>>>>> +    pci_config_set_vendor_id(s->dev.config, PCI_VENDOR_ID_AMD);
>>>>> +    pci_config_set_device_id(s->dev.config, PCI_DEVICE_ID_RD890_IOMMU);
>>>>> +    pci_config_set_prog_interface(s->dev.config, 00);
>>>>> +    pci_config_set_class(s->dev.config, 0x0806);
>>>>> +
>>>>> +    /* reset IOMMU specific capabilities  */
>>>>> +    pci_set_long(s->dev.config + s->capab_offset, IOMMU_CAPAB_FEATURES);
>>>>> +    pci_set_long(s->dev.config + s->capab_offset + IOMMU_CAPAB_BAR_LOW,
>>>>> +                 s->mmio.addr & ~(0xffff0000));
>>>>> +    pci_set_long(s->dev.config + s->capab_offset + IOMMU_CAPAB_BAR_HIGH,
>>>>> +                (s->mmio.addr & ~(0xffff)) >> 16);
>>>>> +    pci_set_long(s->dev.config + s->capab_offset + IOMMU_CAPAB_RANGE,
>>>>> +                 0xff000000);
>>>>> +    pci_set_long(s->dev.config + s->capab_offset + IOMMU_CAPAB_MISC, 0);
>>>>> +    pci_set_long(s->dev.config + s->capab_offset + IOMMU_CAPAB_MISC,
>>>>> +            IOMMU_MAX_PH_ADDR | IOMMU_MAX_GVA_ADDR | IOMMU_MAX_VA_ADDR);
>>>>
>>>>
>>>> All the capabilities are read-write? Otherwise you need to set the wmask
>>>> to indicate what fields are writable.
>>>>
>>>>> +}
>>>>> +
>>>>> +static void amd_iommu_reset(DeviceState *dev)
>>>>> +{
>>>>> +    AMDIOMMUState *s = AMD_IOMMU_DEVICE(dev);
>>>>> +
>>>>> +    amd_iommu_init(s);
>>>>> +}
>>>>> +
>>>>> +static void amd_iommu_realize(PCIDevice *dev, Error **error)
>>>>> +{
>>>>> +    AMDIOMMUState *s = container_of(dev, AMDIOMMUState, dev);
>>>>> +
>>>>> +    s->iotlb = g_hash_table_new_full(amd_iommu_uint64_hash,
>>>>> +                                     amd_iommu_uint64_equal, g_free,
>>>>> g_free);
>>>>> +
>>>>> +    s->capab_offset = pci_add_capability(dev, IOMMU_CAPAB_ID_SEC, 0,
>>>>> +                                         IOMMU_CAPAB_SIZE);
>>>>> +
>>>>> +    /* add msi and hypertransport capabilities */
>>>>> +    pci_add_capability(&s->dev, PCI_CAP_ID_MSI, 0, IOMMU_CAPAB_REG_SIZE);
>>>>> +    pci_add_capability(&s->dev, PCI_CAP_ID_HT, 0, IOMMU_CAPAB_REG_SIZE);
>>>>> +
>>>>> +    amd_iommu_init(s);
>>>>> +
>>>>> +    /* set up MMIO */
>>>>> +    memory_region_init_io(&s->mmio, OBJECT(s), &mmio_mem_ops, s, "mmio",
>>>>> +                          IOMMU_MMIO_SIZE);
>>>>> +
>>>>> +    if (s->mmio.addr == IOMMU_BASE_ADDR) {
>>>>
>>>>
>>>> I don't understand why is need here. realize is called only once in the init
>>>> process
>>>> and you set it a few lines below.
>>>>
>>>>> +        return;
>>>>> +    }
>>>>> +
>>>>> +    s->mmio.addr = IOMMU_BASE_ADDR;
>>>>> +    memory_region_add_subregion(get_system_memory(), IOMMU_BASE_ADDR,
>>>>> &s->mmio);
>>>>> +}
>>>>> +
>>>>> +static const VMStateDescription vmstate_amd_iommu = {
>>>>> +    .name = "amd-iommu",
>>>>> +    .fields  = (VMStateField[]) {
>>>>> +        VMSTATE_PCI_DEVICE(dev, AMDIOMMUState),
>>>>> +        VMSTATE_END_OF_LIST()
>>>>> +    }
>>>>> +};
>>>>> +
>>>>> +static Property amd_iommu_properties[] = {
>>>>> +    DEFINE_PROP_UINT32("version", AMDIOMMUState, version, 2),
>>>>> +    DEFINE_PROP_END_OF_LIST(),
>>>>> +};
>>>>> +
>>>>> +static void amd_iommu_uninit(PCIDevice *dev)
>>>>> +{
>>>>> +    AMDIOMMUState *s = container_of(dev, AMDIOMMUState, dev);
>>>>> +    amd_iommu_iotlb_reset(s);
>>>>
>>>>
>>>> at this point you also need to clean also the memory regions you use.
>>>>
>>>>> +}
>>>>> +
>>>>> +static void amd_iommu_class_init(ObjectClass *klass, void* data)
>>>>> +{
>>>>> +    DeviceClass *dc = DEVICE_CLASS(klass);
>>>>> +    PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
>>>>> +
>>>>> +    k->realize = amd_iommu_realize;
>>>>> +    k->exit = amd_iommu_uninit;
>>>>> +
>>>>> +    dc->reset = amd_iommu_reset;
>>>>> +    dc->vmsd = &vmstate_amd_iommu;
>>>>> +    dc->props = amd_iommu_properties;
>>>>> +}
>>>>> +
>>>>> +static const TypeInfo amd_iommu = {
>>>>> +    .name = TYPE_AMD_IOMMU_DEVICE,
>>>>> +    .parent = TYPE_PCI_DEVICE,
>>>>> +    .instance_size = sizeof(AMDIOMMUState),
>>>>> +    .class_init = amd_iommu_class_init
>>>>> +};
>>>>> +
>>>>> +static void amd_iommu_register_types(void)
>>>>> +{
>>>>> +    type_register_static(&amd_iommu);
>>>>> +}
>>>>> +
>>>>> +type_init(amd_iommu_register_types);
>>>>> diff --git a/hw/i386/amd_iommu.h b/hw/i386/amd_iommu.h
>>>>> new file mode 100644
>>>>> index 0000000..7d317e1
>>>>> --- /dev/null
>>>>> +++ b/hw/i386/amd_iommu.h
>>>>> @@ -0,0 +1,395 @@
>>>>> +/*
>>>>> + * QEMU emulation of an AMD IOMMU (AMD-Vi)
>>>>> + *
>>>>> + * Copyright (C) 2011 Eduard - Gabriel Munteanu
>>>>> + * Copyright (C) 2015 David Kiarie, <davidkiarie4@gmail.com>
>>>>> + *
>>>>> + * This program is free software; you can redistribute it and/or modify
>>>>> + * it under the terms of the GNU General Public License as published by
>>>>> + * the Free Software Foundation; either version 2 of the License, or
>>>>> + * (at your option) any later version.
>>>>> +
>>>>> + * This program is distributed in the hope that it will be useful,
>>>>> + * but WITHOUT ANY WARRANTY; without even the implied warranty of
>>>>> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
>>>>> + * GNU General Public License for more details.
>>>>> +
>>>>> + * You should have received a copy of the GNU General Public License
>>>>> along
>>>>> + * with this program; if not, see <http://www.gnu.org/licenses/>.
>>>>> + */
>>>>> +
>>>>> +#ifndef AMD_IOMMU_H_
>>>>> +#define AMD_IOMMU_H_
>>>>> +
>>>>> +#include "hw/hw.h"
>>>>> +#include "hw/pci/pci.h"
>>>>> +#include "hw/pci/msi.h"
>>>>> +#include "hw/sysbus.h"
>>>>> +#include "sysemu/dma.h"
>>>>> +
>>>>> +/* Capability registers */
>>>>> +#define IOMMU_CAPAB_HEADER            0x00
>>>>> +#define   IOMMU_CAPAB_REV_TYPE        0x02
>>>>> +#define   IOMMU_CAPAB_FLAGS           0x03
>>>>> +#define IOMMU_CAPAB_BAR_LOW           0x04
>>>>> +#define IOMMU_CAPAB_BAR_HIGH          0x08
>>>>> +#define IOMMU_CAPAB_RANGE             0x0C
>>>>> +#define IOMMU_CAPAB_MISC              0x10
>>>>> +#define IOMMU_CAPAB_MISC1             0x14
>>>>> +
>>>>> +#define IOMMU_CAPAB_SIZE              0x18
>>>>> +#define IOMMU_CAPAB_REG_SIZE          0x04
>>>>> +
>>>>> +/* Capability header data */
>>>>> +#define IOMMU_CAPAB_ID_SEC            0xf
>>>>> +#define IOMMU_CAPAB_FLAT_EXT          (1 << 28)
>>>>> +#define IOMMU_CAPAB_EFR_SUP           (1 << 27)
>>>>> +#define IOMMU_CAPAB_FLAG_NPCACHE      (1 << 26)
>>>>> +#define IOMMU_CAPAB_FLAG_HTTUNNEL     (1 << 25)
>>>>> +#define IOMMU_CAPAB_FLAG_IOTLBSUP     (1 << 24)
>>>>> +#define IOMMU_CAPAB_INIT_REV          (1 << 19)
>>>>> +#define IOMMU_CAPAB_INIT_TYPE         (3 << 16)
>>>>> +#define IOMMU_CAPAB_INIT_REV_TYPE     (IOMMU_CAPAB_REV |
>>>>> IOMMU_CAPAB_TYPE)
>>>>> +#define IOMMU_CAPAB_INIT_FLAGS        (IOMMU_CAPAB_FLAG_NPCACHE | \
>>>>> +                                       IOMMU_CAPAB_FLAG_HTTUNNEL)
>>>>> +#define IOMMU_CAPAB_INIT_MISC         ((64 << 15) | (48 << 8))
>>>>> +#define IOMMU_CAPAB_BAR_MASK          (~((1UL << 14) - 1))
>>>>> +
>>>>> +/* MMIO registers */
>>>>> +#define IOMMU_MMIO_DEVICE_TABLE       0x0000
>>>>> +#define IOMMU_MMIO_COMMAND_BASE       0x0008
>>>>> +#define IOMMU_MMIO_EVENT_BASE         0x0010
>>>>> +#define IOMMU_MMIO_CONTROL            0x0018
>>>>> +#define IOMMU_MMIO_EXCL_BASE          0x0020
>>>>> +#define IOMMU_MMIO_EXCL_LIMIT         0x0028
>>>>> +#define IOMMU_MMIO_EXT_FEATURES       0x0030
>>>>> +#define IOMMU_MMIO_COMMAND_HEAD       0x2000
>>>>> +#define IOMMU_MMIO_COMMAND_TAIL       0x2008
>>>>> +#define IOMMU_MMIO_EVENT_HEAD         0x2010
>>>>> +#define IOMMU_MMIO_EVENT_TAIL         0x2018
>>>>> +#define IOMMU_MMIO_STATUS             0x2020
>>>>> +#define IOMMU_MMIO_PPR_BASE           0x0038
>>>>> +#define IOMMU_MMIO_PPR_HEAD           0x2030
>>>>> +#define IOMMU_MMIO_PPR_TAIL           0x2038
>>>>> +
>>>>> +#define IOMMU_MMIO_SIZE               0x4000
>>>>> +
>>>>> +#define IOMMU_MMIO_DEVTAB_SIZE_MASK   ((1ULL << 12) - 1)
>>>>> +#define IOMMU_MMIO_DEVTAB_BASE_MASK   (((1ULL << 52) - 1) & ~ \
>>>>> +                                       IOMMU_MMIO_DEVTAB_SIZE_MASK)
>>>>> +#define IOMMU_MMIO_DEVTAB_ENTRY_SIZE  32
>>>>> +#define IOMMU_MMIO_DEVTAB_SIZE_UNIT   4096
>>>>> +
>>>>> +/* some of this are similar but just for readability */
>>>>> +#define IOMMU_MMIO_CMDBUF_SIZE_BYTE       (IOMMU_MMIO_COMMAND_BASE + 7)
>>>>> +#define IOMMU_MMIO_CMDBUF_SIZE_MASK       0x0F
>>>>> +#define IOMMU_MMIO_CMDBUF_BASE_MASK       IOMMU_MMIO_DEVTAB_BASE_MASK
>>>>> +#define IOMMU_MMIO_CMDBUF_DEFAULT_SIZE    8
>>>>> +#define IOMMU_MMIO_CMDBUF_HEAD_MASK       (((1ULL << 19) - 1) & ~0x0F)
>>>>> +#define IOMMU_MMIO_CMDBUF_TAIL_MASK       IOMMU_MMIO_EVTLOG_HEAD_MASK
>>>>> +
>>>>> +#define IOMMU_MMIO_EVTLOG_SIZE_BYTE       (IOMMU_MMIO_EVENT_BASE + 7)
>>>>> +#define IOMMU_MMIO_EVTLOG_SIZE_MASK       IOMMU_MMIO_CMDBUF_SIZE_MASK
>>>>> +#define IOMMU_MMIO_EVTLOG_BASE_MASK       IOMMU_MMIO_CMDBUF_BASE_MASK
>>>>> +#define IOMMU_MMIO_EVTLOG_DEFAULT_SIZE    IOMMU_MMIO_CMDBUF_DEFAULT_SIZE
>>>>> +#define IOMMU_MMIO_EVTLOG_HEAD_MASK       (((1ULL << 19) - 1) & ~0x0F)
>>>>> +#define IOMMU_MMIO_EVTLOG_TAIL_MASK       IOMMU_MMIO_EVTLOG_HEAD_MASK
>>>>> +
>>>>> +#define IOMMU_MMIO_PPRLOG_SIZE_BYTE       (IOMMU_MMIO_EVENT_BASE + 7)
>>>>> +#define IOMMU_MMIO_PPRLOG_HEAD_MASK       IOMMU_MMIO_EVTLOG_HEAD_MASK
>>>>> +#define IOMMU_MMIO_PPRLOG_TAIL_MASK       IOMMU_MMIO_EVTLOG_HEAD_MASK
>>>>> +#define IOMMU_MMIO_PPRLOG_BASE_MASK       IOMMU_MMIO_EVTLOG_BASE_MASK
>>>>> +#define IOMMU_MMIO_PPRLOG_SIZE_MASK       IOMMU_MMIO_EVTLOG_SIZE_MASK
>>>>> +
>>>>> +#define IOMMU_MMIO_EXCL_BASE_MASK         IOMMU_MMIO_DEVTAB_BASE_MASK
>>>>> +#define IOMMU_MMIO_EXCL_ENABLED_MASK      (1ULL << 0)
>>>>> +#define IOMMU_MMIO_EXCL_ALLOW_MASK        (1ULL << 1)
>>>>> +#define IOMMU_MMIO_EXCL_LIMIT_MASK        IOMMU_MMIO_DEVTAB_BASE_MASK
>>>>> +#define IOMMU_MMIO_EXCL_LIMIT_LOW         0xFFF
>>>>> +
>>>>> +/* mmio control register flags */
>>>>> +#define IOMMU_MMIO_CONTROL_IOMMUEN        (1ULL << 0)
>>>>> +#define IOMMU_MMIO_CONTROL_HTTUNEN        (1ULL << 1)
>>>>> +#define IOMMU_MMIO_CONTROL_EVENTLOGEN     (1ULL << 2)
>>>>> +#define IOMMU_MMIO_CONTROL_EVENTINTEN     (1ULL << 3)
>>>>> +#define IOMMU_MMIO_CONTROL_COMWAITINTEN   (1ULL << 4)
>>>>> +#define IOMMU_MMIO_CONTROL_PASSPW         (1ULL << 7)
>>>>> +#define IOMMU_MMIO_CONTROL_REPASSPW       (1ULL << 9)
>>>>> +#define IOMMU_MMIO_CONTROL_COHERENT       (1ULL << 10)
>>>>> +#define IOMMU_MMIO_CONTROL_ISOC           (1ULL << 11)
>>>>> +#define IOMMU_MMIO_CONTROL_CMDBUFLEN      (1ULL << 12)
>>>>> +#define IOMMU_MMIO_CONTROL_PPRLOGEN       (1ULL << 13)
>>>>> +#define IOMMU_MMIO_CONTROL_PPRINTEN       (1ULL << 14)
>>>>> +#define IOMMU_MMIO_CONTROL_PPREN          (1ULL << 15)
>>>>> +#define IOMMU_MMIO_CONTROL_GAEN           (1ULL << 16)
>>>>> +#define IOMMU_MMIO_CONTROL_GTEN           (1ULL << 17)
>>>>> +
>>>>> +/* MMIO status register bits */
>>>>> +#define IOMMU_MMIO_STATUS_PPR_OVFE    (1 << 18)
>>>>> +#define IOMMU_MMIO_STATUS_PPR_OVFEB   (1 << 17)
>>>>> +#define IOMMU_MMIO_STATUS_EVT_ACTIVE  (1 << 16)
>>>>> +#define IOMMU_MMIO_STATUS_EVT_OVFB    (1 << 15)
>>>>> +#define IOMMU_MMIO_STATUS_PPR_ACTIVE  (1 << 12)
>>>>> +#define IOMMU_MMIO_STATUS_PPR_OVFB    (1 << 11)
>>>>> +#define IOMMU_MMIO_STATUS_GA_INT      (1 << 10)
>>>>> +#define IOMMU_MMIO_STATUS_GA_RUN      (1 << 9)
>>>>> +#define IOMMU_MMIO_STATUS_GA_OVF      (1 << 8)
>>>>> +#define IOMMU_MMIO_STATUS_PPR_RUN     (1 << 7)
>>>>> +#define IOMMU_MMIO_STATUS_PPR_INT     (1 << 6)
>>>>> +#define IOMMU_MMIO_STATUS_PPR_OVF     (1 << 5)
>>>>> +#define IOMMU_MMIO_STATUS_CMDBUF_RUN  (1 << 4)
>>>>> +#define IOMMU_MMIO_STATUS_EVT_RUN     (1 << 3)
>>>>> +#define IOMMU_MMIO_STATUS_COMP_INT    (1 << 2)
>>>>> +#define IOMMU_MMIO_STATUS_EVT_INT     (1 << 1)
>>>>> +#define IOMMU_MMIO_STATUS_EVT_OVF     (1 << 0)
>>>>> +
>>>>> +#define IOMMU_CMDBUF_ID_BYTE              0x07
>>>>> +#define IOMMU_CMDBUF_ID_RSHIFT            4
>>>>> +
>>>>> +#define IOMMU_CMD_COMPLETION_WAIT         0x01
>>>>> +#define IOMMU_CMD_INVAL_DEVTAB_ENTRY      0x02
>>>>> +#define IOMMU_CMD_INVAL_IOMMU_PAGES       0x03
>>>>> +#define IOMMU_CMD_INVAL_IOTLB_PAGES       0x04
>>>>> +#define IOMMU_CMD_INVAL_INTR_TABLE        0x05
>>>>> +#define IOMMU_CMD_PREFETCH_IOMMU_PAGES    0x06
>>>>> +#define IOMMU_CMD_COMPLETE_PPR_REQUEST    0x07
>>>>> +#define IOMMU_CMD_INVAL_IOMMU_ALL         0x08
>>>>> +
>>>>> +#define IOMMU_DEVTAB_ENTRY_SIZE           32
>>>>> +
>>>>> +/* Device table entry bits 0:63 */
>>>>> +#define IOMMU_DEV_VALID                   (1ULL << 0)
>>>>> +#define IOMMU_DEV_TRANSLATION_VALID       (1ULL << 1)
>>>>> +#define IOMMU_DEV_MODE_MASK               0x7
>>>>> +#define IOMMU_DEV_MODE_RSHIFT             9
>>>>> +#define IOMMU_DEV_PT_ROOT_MASK            0xFFFFFFFFFF000
>>>>> +#define IOMMU_DEV_PT_ROOT_RSHIFT          12
>>>>> +#define IOMMU_DEV_PERM_SHIFT              61
>>>>> +#define IOMMU_DEV_PERM_READ               (1ULL << 61)
>>>>> +#define IOMMU_DEV_PERM_WRITE              (1ULL << 62)
>>>>> +
>>>>> +/* Device table entry bits 64:127 */
>>>>> +#define IOMMU_DEV_DOMID_ID_MASK          ((1ULL << 16) - 1)
>>>>> +#define IOMMU_DEV_IOTLB_SUPPORT           (1ULL << 17)
>>>>> +#define IOMMU_DEV_SUPPRESS_PF             (1ULL << 18)
>>>>> +#define IOMMU_DEV_SUPPRESS_ALL_PF         (1ULL << 19)
>>>>> +#define IOMMU_DEV_IOCTL_MASK              (~3)
>>>>> +#define IOMMU_DEV_IOCTL_RSHIFT            20
>>>>> +#define   IOMMU_DEV_IOCTL_DENY            0
>>>>> +#define   IOMMU_DEV_IOCTL_PASSTHROUGH     1
>>>>> +#define   IOMMU_DEV_IOCTL_TRANSLATE       2
>>>>> +#define IOMMU_DEV_CACHE                   (1ULL << 37)
>>>>> +#define IOMMU_DEV_SNOOP_DISABLE           (1ULL << 38)
>>>>> +#define IOMMU_DEV_EXCL                    (1ULL << 39)
>>>>> +
>>>>> +/* Event codes and flags, as stored in the info field */
>>>>> +#define IOMMU_EVENT_ILLEGAL_DEVTAB_ENTRY  (0x1U << 12)
>>>>> +#define IOMMU_EVENT_IOPF                  (0x2U << 12)
>>>>> +#define   IOMMU_EVENT_IOPF_I              (1U << 3)
>>>>> +#define   IOMMU_EVENT_IOPF_PR             (1U << 4)
>>>>> +#define   IOMMU_EVENT_IOPF_RW             (1U << 5)
>>>>> +#define   IOMMU_EVENT_IOPF_PE             (1U << 6)
>>>>> +#define   IOMMU_EVENT_IOPF_RZ             (1U << 7)
>>>>> +#define   IOMMU_EVENT_IOPF_TR             (1U << 8)
>>>>> +#define IOMMU_EVENT_DEV_TAB_HW_ERROR      (0x3U << 12)
>>>>> +#define IOMMU_EVENT_PAGE_TAB_HW_ERROR     (0x4U << 12)
>>>>> +#define IOMMU_EVENT_ILLEGAL_COMMAND_ERROR (0x5U << 12)
>>>>> +#define IOMMU_EVENT_COMMAND_HW_ERROR      (0x6U << 12)
>>>>> +#define IOMMU_EVENT_IOTLB_INV_TIMEOUT     (0x7U << 12)
>>>>> +#define IOMMU_EVENT_INVALID_DEV_REQUEST   (0x8U << 12)
>>>>> +
>>>>> +#define IOMMU_EVENT_LEN                   16
>>>>> +#define IOMMU_PERM_READ             (1 << 0)
>>>>> +#define IOMMU_PERM_WRITE            (1 << 1)
>>>>> +#define IOMMU_PERM_RW               (IOMMU_PERM_READ | IOMMU_PERM_WRITE)
>>>>> +
>>>>> +/* AMD RD890 Chipset */
>>>>> +#define PCI_DEVICE_ID_RD890_IOMMU   0x20
>>
>>
>>>>
>>>>
>>>> We keep the pci ids in include/hw/pci/pci_ids.h
>>
>> This a dummy device id I use for IOMMU - IOMMU doesn't have a specific
>> device id. There's a device id on linux include files for a certain
>> AMD IOMMU but it makes IOMMU seem to be on a non-existant bus so I
>> don't use it.
>>
>>>>
>>>>> +
>>>>> +#define IOMMU_FEATURE_PREFETCH            (1ULL << 0)
>>>>> +#define IOMMU_FEATURE_PPR                 (1ULL << 1)
>>>>> +#define IOMMU_FEATURE_NX                  (1ULL << 3)
>>>>> +#define IOMMU_FEATURE_GT                  (1ULL << 4)
>>>>> +#define IOMMU_FEATURE_IA                  (1ULL << 6)
>>>>> +#define IOMMU_FEATURE_GA                  (1ULL << 7)
>>>>> +#define IOMMU_FEATURE_HE                  (1ULL << 8)
>>>>> +#define IOMMU_FEATURE_PC                  (1ULL << 9)
>>>>> +
>>>>> +/* reserved DTE bits */
>>>>> +#define IOMMU_DTE_LOWER_QUAD_RESERVED  0x80300000000000fc
>>>>> +#define IOMMU_DTE_MIDDLE_QUAD_RESERVED 0x0000000000000100
>>>>> +#define IOMMU_DTE_UPPER_QUAD_RESERVED  0x08f0000000000000
>>>>> +
>>>>> +/* IOMMU paging mode */
>>>>> +#define IOMMU_GATS_MODE                 (6ULL <<  12)
>>>>> +#define IOMMU_HATS_MODE                 (6ULL <<  10)
>>>>> +
>>>>> +/* PCI SIG constants */
>>>>> +#define PCI_BUS_MAX 256
>>>>> +#define PCI_SLOT_MAX 32
>>>>> +#define PCI_FUNC_MAX 8
>>>>> +#define PCI_DEVFN_MAX 256
>>>>
>>>>
>>>> Maybe we can move the PCI macros to include/hw/pci/pci.h, those are not
>>>> IOMMU specific.
>>
>> Yeah, this are PCI macros but they are a not copied from linux while
>> the macros in pci.h seem to have been copied from linux.
>>
>>>>
>>>>> +
>>>>> +/* IOTLB */
>>>>> +#define IOMMU_IOTLB_MAX_SIZE 1024
>>>>> +#define IOMMU_DEVID_SHIFT    36
>>>>> +
>>>>> +/* extended feature support */
>>>>> +#define IOMMU_EXT_FEATURES (IOMMU_FEATURE_PREFETCH | IOMMU_FEATURE_PPR |
>>>>> \
>>>>> +        IOMMU_FEATURE_NX | IOMMU_FEATURE_IA | IOMMU_FEATURE_GT | \
>>>>> +        IOMMU_FEATURE_GA | IOMMU_FEATURE_HE | IOMMU_GATS_MODE | \
>>>>> +        IOMMU_HATS_MODE)
>>>>> +
>>>>> +/* capabilities header */
>>>>> +#define IOMMU_CAPAB_FEATURES (IOMMU_CAPAB_FLAT_EXT | \
>>>>> +        IOMMU_CAPAB_FLAG_NPCACHE | IOMMU_CAPAB_FLAG_IOTLBSUP \
>>>>> +        | IOMMU_CAPAB_ID_SEC | IOMMU_CAPAB_INIT_TYPE | \
>>>>> +        IOMMU_CAPAB_FLAG_HTTUNNEL |  IOMMU_CAPAB_EFR_SUP)
>>>>> +
>>>>> +/* command constants */
>>>>> +#define IOMMU_COM_STORE_ADDRESS_MASK 0xffffffffffff8
>>>>> +#define IOMMU_COM_COMPLETION_STORE_MASK 0x1
>>>>> +#define IOMMU_COM_COMPLETION_INTR 0x2
>>>>> +#define IOMMU_COM_COMPLETION_DATA_OFF 0x8
>>>>> +#define IOMMU_COMMAND_SIZE 0x10
>>>>> +
>>>>> +/* IOMMU default address */
>>>>> +#define IOMMU_BASE_ADDR 0xfed80000
>>>>> +
>>>>> +/* page management constants */
>>>>> +#define IOMMU_PAGE_SHIFT 12
>>>>> +#define IOMMU_PAGE_SIZE  (1ULL << IOMMU_PAGE_SHIFT)
>>>>> +
>>>>> +#define IOMMU_PAGE_SHIFT_4K 12
>>>>> +#define IOMMU_PAGE_MASK_4K  (~((1ULL << IOMMU_PAGE_SHIFT_4K) - 1))
>>>>> +#define IOMMU_PAGE_SHIFT_2M 21
>>>>> +#define IOMMU_PAGE_MASK_2M  (~((1ULL << IOMMU_PAGE_SHIFT_2M) - 1))
>>>>> +#define IOMMU_PAGE_SHIFT_1G 30
>>>>> +#define IOMMU_PAGE_MASK_1G (~((1ULL << IOMMU_PAGE_SHIFT_1G) - 1))
>>>>> +
>>>>> +#define IOMMU_MAX_VA_ADDR          (48UL << 5)
>>>>> +#define IOMMU_MAX_PH_ADDR          (40UL << 8)
>>>>> +#define IOMMU_MAX_GVA_ADDR         (48UL << 15)
>>>>> +
>>>>> +/* invalidation command device id */
>>>>> +#define IOMMU_INVAL_DEV_ID_SHIFT  32
>>>>> +#define IOMMU_INVAL_DEV_ID_MASK   (~((1UL << IOMMU_INVAL_DEV_ID_SHIFT) -
>>>>> 1))
Marcel Apfelbaum March 3, 2016, 12:16 p.m. UTC | #8
On 03/02/2016 09:11 PM, David Kiarie wrote:
>
>
> On 25/02/16 18:43, Marcel Apfelbaum wrote:
>> On 02/21/2016 08:10 PM, David Kiarie wrote:
>>> Add AMD IOMMU emulaton to Qemu in addition to Intel IOMMU
>>> The IOMMU does basic translation, error checking and has a
>>> mininal IOTLB implementation
>>
>> Hi,
>>
>>>
>>> Signed-off-by: David Kiarie <davidkiarie4@gmail.com>
>>> ---
>

[...]

>>> +
>>> +static void amd_iommu_realize(PCIDevice *dev, Error **error)
>>> +{
>>> +    AMDIOMMUState *s = container_of(dev, AMDIOMMUState, dev);
>>> +
>>> +    s->iotlb = g_hash_table_new_full(amd_iommu_uint64_hash,
>>> +                                     amd_iommu_uint64_equal, g_free, g_free);
>>> +
>>> +    s->capab_offset = pci_add_capability(dev, IOMMU_CAPAB_ID_SEC, 0,
>>> +                                         IOMMU_CAPAB_SIZE);
>>> +
>>> +    /* add msi and hypertransport capabilities */
>>> +    pci_add_capability(&s->dev, PCI_CAP_ID_MSI, 0, IOMMU_CAPAB_REG_SIZE);
>>> +    pci_add_capability(&s->dev, PCI_CAP_ID_HT, 0, IOMMU_CAPAB_REG_SIZE);
>>> +
>>> +    amd_iommu_init(s);
>>> +
>>> +    /* set up MMIO */
>>> +    memory_region_init_io(&s->mmio, OBJECT(s), &mmio_mem_ops, s, "mmio",
>>> +                          IOMMU_MMIO_SIZE);
>>> +
>>> +    if (s->mmio.addr == IOMMU_BASE_ADDR) {
>>
>> I don't understand why is need here. realize is called only once in the init process
>> and you set it a few lines below.
>>
>>> +        return;
>>> +    }
>>> +
>>> +    s->mmio.addr = IOMMU_BASE_ADDR;
>>> +    memory_region_add_subregion(get_system_memory(), IOMMU_BASE_ADDR, &s->mmio);
>>> +}
>>> +
>>> +static const VMStateDescription vmstate_amd_iommu = {
>>> +    .name = "amd-iommu",
>>> +    .fields  = (VMStateField[]) {
>>> +        VMSTATE_PCI_DEVICE(dev, AMDIOMMUState),
>>> +        VMSTATE_END_OF_LIST()
>>> +    }
>>> +};
>>> +
>>> +static Property amd_iommu_properties[] = {
>>> +    DEFINE_PROP_UINT32("version", AMDIOMMUState, version, 2),
>>> +    DEFINE_PROP_END_OF_LIST(),
>>> +};
>>> +
>>> +static void amd_iommu_uninit(PCIDevice *dev)
>>> +{
>>> +    AMDIOMMUState *s = container_of(dev, AMDIOMMUState, dev);
>>> +    amd_iommu_iotlb_reset(s);
>>
>> at this point you also need to clean also the memory regions you use.
>
> What exactly do you mean by clean up the memory regions ?
>

You have an memory_region_add_subregion on realize, you need
a memory_region_del_subregion on exit.

Thanks,
Marcel

>>
>>> +}
>>> +
>>> +static void amd_iommu_class_init(ObjectClass *klass, void* data)
>>> +{
>>> +    DeviceClass *dc = DEVICE_CLASS(klass);
>>> +    PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
>>> +
>>> +    k->realize = amd_iommu_realize;
>>> +    k->exit = amd_iommu_uninit;
>>> +
>>> +    dc->reset = amd_iommu_reset;
>>> +    dc->vmsd = &vmstate_amd_iommu;
>>> +    dc->props = amd_iommu_properties;
>>> +}
>>> +
>>> +static const TypeInfo amd_iommu = {
>>> +    .name = TYPE_AMD_IOMMU_DEVICE,
>>> +    .parent = TYPE_PCI_DEVICE,
>>> +    .instance_size = sizeof(AMDIOMMUState),
>>> +    .class_init = amd_iommu_class_init
>>> +};
>>> +
>>> +static void amd_iommu_register_types(void)
>>> +{
>>> +    type_register_static(&amd_iommu);
>>> +}
>>> +
>>> +type_init(amd_iommu_register_types);

[...]
diff mbox

Patch

diff --git a/hw/i386/Makefile.objs b/hw/i386/Makefile.objs
index b52d5b8..2f1a265 100644
--- a/hw/i386/Makefile.objs
+++ b/hw/i386/Makefile.objs
@@ -3,6 +3,7 @@  obj-y += multiboot.o
 obj-y += pc.o pc_piix.o pc_q35.o
 obj-y += pc_sysfw.o
 obj-y += intel_iommu.o
+obj-y += amd_iommu.o
 obj-$(CONFIG_XEN) += ../xenpv/ xen/
 
 obj-y += kvmvapic.o
diff --git a/hw/i386/amd_iommu.c b/hw/i386/amd_iommu.c
new file mode 100644
index 0000000..3dac043
--- /dev/null
+++ b/hw/i386/amd_iommu.c
@@ -0,0 +1,1432 @@ 
+/*
+ * QEMU emulation of AMD IOMMU (AMD-Vi)
+ *
+ * Copyright (C) 2011 Eduard - Gabriel Munteanu
+ * Copyright (C) 2015 David Kiarie, <davidkiarie4@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * Cache implementation inspired by hw/i386/intel_iommu.c
+ *
+ */
+#include "hw/i386/amd_iommu.h"
+
+/*#define DEBUG_AMD_IOMMU*/
+#ifdef DEBUG_AMD_IOMMU
+enum {
+    DEBUG_GENERAL, DEBUG_CAPAB, DEBUG_MMIO, DEBUG_ELOG,
+    DEBUG_CACHE, DEBUG_COMMAND, DEBUG_MMU
+};
+
+#define IOMMU_DBGBIT(x)   (1 << DEBUG_##x)
+static int iommu_dbgflags = IOMMU_DBGBIT(MMIO);
+
+#define IOMMU_DPRINTF(what, fmt, ...) do { \
+    if (iommu_dbgflags & IOMMU_DBGBIT(what)) { \
+        fprintf(stderr, "(amd-iommu)%s: " fmt "\n", __func__, \
+                ## __VA_ARGS__); } \
+    } while (0)
+#else
+#define IOMMU_DPRINTF(what, fmt, ...) do {} while (0)
+#endif
+
+typedef struct AMDIOMMUAddressSpace {
+    uint8_t bus_num;            /* bus number                           */
+    uint8_t devfn;              /* device function                      */
+    AMDIOMMUState *iommu_state; /* IOMMU - one per machine              */
+    MemoryRegion iommu;         /* Device's iommu region                */
+    AddressSpace as;            /* device's corresponding address space */
+} AMDIOMMUAddressSpace;
+
+/* IOMMU cache entry */
+typedef struct IOMMUIOTLBEntry {
+    uint64_t gfn;
+    uint16_t domid;
+    uint64_t devid;
+    uint64_t perms;
+    uint64_t translated_addr;
+} IOMMUIOTLBEntry;
+
+/* configure MMIO registers at startup/reset */
+static void amd_iommu_set_quad(AMDIOMMUState *s, hwaddr addr, uint64_t val,
+                               uint64_t romask, uint64_t w1cmask)
+{
+    stq_le_p(&s->mmior[addr], val);
+    stq_le_p(&s->romask[addr], romask);
+    stq_le_p(&s->w1cmask[addr], w1cmask);
+}
+
+static uint16_t amd_iommu_readw(AMDIOMMUState *s, hwaddr addr)
+{
+    return lduw_le_p(&s->mmior[addr]);
+}
+
+static uint32_t amd_iommu_readl(AMDIOMMUState *s, hwaddr addr)
+{
+    return ldl_le_p(&s->mmior[addr]);
+}
+
+static uint64_t amd_iommu_readq(AMDIOMMUState *s, hwaddr addr)
+{
+    return ldq_le_p(&s->mmior[addr]);
+}
+
+/* internal write */
+static void amd_iommu_writeq_raw(AMDIOMMUState *s, uint64_t val, hwaddr addr)
+{
+    stq_le_p(&s->mmior[addr], val);
+}
+
+/* external write */
+static void amd_iommu_writew(AMDIOMMUState *s, hwaddr addr, uint16_t val)
+{
+    uint16_t romask = lduw_le_p(&s->romask[addr]);
+    uint16_t w1cmask = lduw_le_p(&s->w1cmask[addr]);
+    uint16_t oldval = lduw_le_p(&s->mmior[addr]);
+    stw_le_p(&s->mmior[addr], (val & ~(val & w1cmask)) | (romask & oldval));
+}
+
+static void amd_iommu_writel(AMDIOMMUState *s, hwaddr addr, uint32_t val)
+{
+    uint32_t romask = ldl_le_p(&s->romask[addr]);
+    uint32_t w1cmask = ldl_le_p(&s->w1cmask[addr]);
+    uint32_t oldval = ldl_le_p(&s->mmior[addr]);
+    stl_le_p(&s->mmior[addr], (val & ~(val & w1cmask)) | (romask & oldval));
+}
+
+static void amd_iommu_writeq(AMDIOMMUState *s, hwaddr addr, uint64_t val)
+{
+    uint64_t romask = ldq_le_p(&s->romask[addr]);
+    uint64_t w1cmask = ldq_le_p(&s->w1cmask[addr]);
+    uint32_t oldval = ldq_le_p(&s->mmior[addr]);
+    stq_le_p(&s->mmior[addr], (val & ~(val & w1cmask)) | (romask & oldval));
+}
+
+static void amd_iommu_log_event(AMDIOMMUState *s, uint16_t *evt)
+{
+    /* event logging not enabled */
+    if (!s->evtlog_enabled || *(uint64_t *)&s->mmior[IOMMU_MMIO_STATUS] |
+        IOMMU_MMIO_STATUS_EVT_OVF) {
+        return;
+    }
+
+    /* event log buffer full */
+    if (s->evtlog_tail >= s->evtlog_len) {
+        *(uint64_t *)&s->mmior[IOMMU_MMIO_STATUS] |= IOMMU_MMIO_STATUS_EVT_OVF;
+        /* generate interrupt */
+        msi_notify(&s->dev, 0);
+    }
+
+    if (dma_memory_write(&address_space_memory, s->evtlog_len + s->evtlog_tail,
+       &evt, IOMMU_EVENT_LEN)) {
+        IOMMU_DPRINTF(ELOG, "error: fail to write at address 0x%"PRIx64
+                      " + offset 0x%"PRIx32, s->evtlog, s->evtlog_tail);
+    }
+
+     s->evtlog_tail += IOMMU_EVENT_LEN;
+     *(uint64_t *)&s->mmior[IOMMU_MMIO_STATUS] |= IOMMU_MMIO_STATUS_COMP_INT;
+}
+
+/* log an error encountered page-walking
+ *
+ * @addr: virtual address in translation request
+ */
+static void amd_iommu_page_fault(AMDIOMMUState *s, uint16_t devid,
+                                 dma_addr_t addr, uint16_t info)
+{
+    IOMMU_DPRINTF(ELOG, "");
+
+    uint16_t evt[8];
+
+    info |= IOMMU_EVENT_IOPF_I;
+
+    /* encode information */
+    *(uint16_t *)&evt[0] = devid;
+    *(uint16_t *)&evt[3] = info;
+    *(uint64_t *)&evt[4] = cpu_to_le64(addr);
+
+    /* log a page fault */
+    amd_iommu_log_event(s, evt);
+
+    /* Abort the translation */
+    pci_word_test_and_set_mask(s->dev.config + PCI_STATUS,
+            PCI_STATUS_SIG_TARGET_ABORT);
+}
+/*
+ * log a master abort accessing device table
+ *  @devtab : address of device table entry
+ *  @info : error flags
+ */
+static void amd_iommu_log_devtab_error(AMDIOMMUState *s, uint16_t devid,
+                                       dma_addr_t devtab, uint16_t info)
+{
+
+    IOMMU_DPRINTF(ELOG, "");
+
+    uint16_t evt[8];
+
+    info |= IOMMU_EVENT_DEV_TAB_HW_ERROR;
+
+    /* encode information */
+    *(uint16_t *)&evt[0] = devid;
+    *(uint8_t *)&evt[3]  = info;
+    *(uint64_t *)&evt[4] = cpu_to_le64(devtab);
+
+    amd_iommu_log_event(s, evt);
+
+    /* Abort the translation */
+    pci_word_test_and_set_mask(s->dev.config + PCI_STATUS,
+            PCI_STATUS_SIG_TARGET_ABORT);
+
+}
+
+/* log a master abort encountered during a page-walk
+ *  @addr : address that couldn't be accessed
+ */
+static void amd_iommu_log_pagetab_error(AMDIOMMUState *s, uint16_t devid,
+                                        dma_addr_t addr, uint16_t info)
+{
+    IOMMU_DPRINTF(ELOG, "");
+
+    uint16_t evt[8];
+
+    info |= IOMMU_EVENT_PAGE_TAB_HW_ERROR;
+
+    /* encode information */
+    *(uint16_t *)&evt[0] = devid;
+    *(uint8_t *)&evt[3]  = info;
+    *(uint64_t *)&evt[4] = (cpu_to_le64(addr) >> 3);
+
+    amd_iommu_log_event(s, evt);
+
+    /* Abort the translation */
+    pci_word_test_and_set_mask(s->dev.config + PCI_STATUS,
+            PCI_STATUS_SIG_TARGET_ABORT);
+
+}
+
+/* log an event trying to access command buffer
+ *   @addr : address that couldn't be accessed
+ */
+static void amd_iommu_log_command_error(AMDIOMMUState *s, dma_addr_t addr)
+{
+    IOMMU_DPRINTF(ELOG, "");
+
+    uint16_t evt[8];
+
+    /* encode information */
+    *(uint8_t *)&evt[3]  = (uint8_t)IOMMU_EVENT_COMMAND_HW_ERROR;
+    *(uint64_t *)&evt[4] = (cpu_to_le64(addr) >> 3);
+
+    amd_iommu_log_event(s, evt);
+
+    /* Abort the translation */
+    pci_word_test_and_set_mask(s->dev.config + PCI_STATUS,
+            PCI_STATUS_SIG_TARGET_ABORT);
+}
+
+/* log an illegal comand event
+ *   @addr : address of illegal command
+ */
+static void amd_iommu_log_illegalcom_error(AMDIOMMUState *s, uint16_t info,
+                                           dma_addr_t addr)
+{
+    IOMMU_DPRINTF(ELOG, "");
+
+    uint16_t evt[8];
+
+    /* encode information */
+    *(uint8_t *)&evt[3]  = (uint8_t)IOMMU_EVENT_ILLEGAL_COMMAND_ERROR;
+    *(uint64_t *)&evt[4] = (cpu_to_le64(addr) >> 3);
+
+    amd_iommu_log_event(s, evt);
+}
+
+/* log an error accessing device table
+ *
+ *  @devid : device owning the table entry
+ *  @devtab : address of device table entry
+ *  @info : error flags
+ */
+static void amd_iommu_log_illegaldevtab_error(AMDIOMMUState *s, uint16_t devid,
+                                              dma_addr_t addr, uint16_t info)
+{
+    IOMMU_DPRINTF(ELOG, "");
+
+    uint16_t evt[8];
+
+    info |= IOMMU_EVENT_ILLEGAL_DEVTAB_ENTRY;
+
+    *(uint16_t *)&evt[0] = devid;
+    *(uint8_t *)&evt[3]  = info;
+    *(uint64_t *)&evt[4] = (cpu_to_le64(addr) >> 3);
+
+    amd_iommu_log_event(s, evt);
+}
+
+static gboolean amd_iommu_uint64_equal(gconstpointer v1, gconstpointer v2)
+{
+    return *((const uint64_t *)v1) == *((const uint64_t *)v2);
+}
+
+static guint amd_iommu_uint64_hash(gconstpointer v)
+{
+    return (guint)*(const uint64_t *)v;
+}
+
+static IOMMUIOTLBEntry *amd_iommu_iotlb_lookup(AMDIOMMUState *s, hwaddr addr,
+                                               uint64_t devid)
+{
+    uint64_t key = (addr >> IOMMU_PAGE_SHIFT_4K) |
+                   ((uint64_t)(devid) << IOMMU_DEVID_SHIFT);
+    return g_hash_table_lookup(s->iotlb, &key);
+}
+
+static void amd_iommu_iotlb_reset(AMDIOMMUState *s)
+{
+    assert(s->iotlb);
+    g_hash_table_remove_all(s->iotlb);
+}
+
+static gboolean amd_iommu_iotlb_remove_by_devid(gpointer key, gpointer value,
+                                                gpointer user_data)
+{
+    IOMMUIOTLBEntry *entry = (IOMMUIOTLBEntry *)value;
+    uint16_t devid = *(uint16_t *)user_data;
+    return entry->devid == devid;
+}
+
+static void amd_iommu_iotlb_remove_page(AMDIOMMUState *s, hwaddr addr,
+                                        uint64_t devid)
+{
+    uint64_t key = (addr >> IOMMU_PAGE_SHIFT_4K) |
+                   ((uint64_t)(devid) << IOMMU_DEVID_SHIFT);
+    g_hash_table_remove(s->iotlb, &key);
+}
+
+/* extract device id */
+static inline uint16_t devid_extract(uint8_t *cmd)
+{
+    return (uint16_t)cmd[2] & IOMMU_INVAL_DEV_ID_MASK;
+}
+
+static void amd_iommu_invalidate_iotlb(AMDIOMMUState *s, uint64_t *cmd)
+{
+    uint16_t devid = devid_extract((uint8_t *)cmd);
+    /* if invalidation of more than one page requested */
+    if (IOMMU_INVAL_ALL(cmd[0])) {
+        g_hash_table_foreach_remove(s->iotlb, amd_iommu_iotlb_remove_by_devid,
+                                    &devid);
+    } else {
+        hwaddr addr = (hwaddr)(cmd[1] & IOMMU_INVAL_ADDR_MASK);
+        amd_iommu_iotlb_remove_page(s, addr, devid);
+    }
+}
+
+static void amd_iommu_update_iotlb(AMDIOMMUState *s, uint16_t devid,
+                                   uint64_t gpa, uint64_t spa, uint64_t perms,
+                                   uint16_t domid)
+{
+    IOMMUIOTLBEntry *entry = g_malloc(sizeof(*entry));
+    uint64_t *key = g_malloc(sizeof(key));
+    uint64_t gfn = gpa >> IOMMU_PAGE_SHIFT_4K;
+
+    IOMMU_DPRINTF(CACHE, " update iotlb devid: %02x:%02x.%x gpa 0x%"PRIx64
+                  " hpa 0x%"PRIx64, PCI_BUS_NUM(devid), PCI_SLOT(devid),
+                  PCI_FUNC(devid), gpa, spa);
+
+    if (g_hash_table_size(s->iotlb) >= IOMMU_IOTLB_MAX_SIZE) {
+        IOMMU_DPRINTF(CACHE, "iotlb exceeds size limit - reset");
+        amd_iommu_iotlb_reset(s);
+    }
+
+    entry->gfn = gfn;
+    entry->domid = domid;
+    entry->perms = perms;
+    entry->translated_addr = spa;
+    *key = gfn | ((uint64_t)(devid) << IOMMU_DEVID_SHIFT);
+    g_hash_table_replace(s->iotlb, key, entry);
+}
+
+/* execute a completion wait command */
+static void amd_iommu_completion_wait(AMDIOMMUState *s, uint8_t *cmd)
+{
+    IOMMU_DPRINTF(COMMAND, "");
+    unsigned int addr;
+
+    /* completion store */
+    if (cmd[0] & IOMMU_COM_COMPLETION_STORE_MASK) {
+        addr = le64_to_cpu(*(uint64_t *)cmd) & IOMMU_COM_STORE_ADDRESS_MASK;
+        if (dma_memory_write(&address_space_memory, addr, cmd + 8, 8)) {
+            IOMMU_DPRINTF(ELOG, "error: fail to write at address 0%x"PRIx64,
+                          addr);
+        }
+    }
+
+    /* set completion interrupt */
+    if (cmd[0] & IOMMU_COM_COMPLETION_INTR) {
+        s->mmior[IOMMU_MMIO_STATUS] |= IOMMU_MMIO_STATUS_COMP_INT;
+    }
+}
+
+/* get command type */
+static uint8_t opcode(uint8_t *cmd)
+{
+    return cmd[IOMMU_CMDBUF_ID_BYTE] >> IOMMU_CMDBUF_ID_RSHIFT;
+}
+
+/* linux seems to be using reserved bits so I just log without abortig bug */
+static void iommu_inval_devtab_entry(AMDIOMMUState *s, uint8_t *cmd,
+                                     uint8_t type)
+{
+    IOMMU_DPRINTF(COMMAND, "");
+
+    /* This command should invalidate internal caches of which there isn't */
+    if (*(uint64_t *)&cmd[0] & IOMMU_CMD_INVAL_DEV_RSVD ||
+            *(uint64_t *)&cmd[1]) {
+        amd_iommu_log_illegalcom_error(s, type, s->cmdbuf + s->cmdbuf_head);
+    }
+#ifdef DEBUG_AMD_IOMMU
+    uint16_t devid = devid_extract(cmd);
+#endif
+    IOMMU_DPRINTF(COMMAND, "device table entry for devid: %02x:%02x.%x"
+                  "invalidated", PCI_BUS_NUM(devid), PCI_SLOT(devid),
+                  PCI_FUNC(devid));
+}
+
+static void iommu_completion_wait(AMDIOMMUState *s, uint8_t *cmd, uint8_t type)
+{
+    IOMMU_DPRINTF(COMMAND, "");
+
+    if (*(uint32_t *)&cmd[1] & IOMMU_COMPLETION_WAIT_RSVD) {
+        amd_iommu_log_illegalcom_error(s, type, s->cmdbuf + s->cmdbuf_head);
+    }
+    /* pretend to wait for command execution to complete */
+    IOMMU_DPRINTF(COMMAND, "completion wait requested with store address 0x%"
+                  PRIx64 " and store data 0x%"PRIx64, (cmd[0] &
+                  IOMMU_COM_STORE_ADDRESS_MASK), *(uint64_t *)(cmd + 8));
+    amd_iommu_completion_wait(s, cmd);
+}
+
+static void iommu_complete_ppr(AMDIOMMUState *s, uint8_t *cmd, uint8_t type)
+{
+    IOMMU_DPRINTF(COMMAND, "");
+
+    if ((*(uint64_t *)&cmd[0] & IOMMU_COMPLETE_PPR_RQ_RSVD) ||
+       *(uint64_t *)&cmd[1] & 0xffff000000000000) {
+        amd_iommu_log_illegalcom_error(s, type, s->cmdbuf + s->cmdbuf_head);
+    }
+
+    IOMMU_DPRINTF(COMMAND, "Execution of PPR queue requested");
+}
+
+static void iommu_inval_all(AMDIOMMUState *s, uint8_t *cmd, uint8_t type)
+{
+    IOMMU_DPRINTF(COMMAND, "");
+
+    if ((*(uint64_t *)&cmd[0] & IOMMU_INVAL_IOMMU_ALL_RSVD) ||
+       *(uint64_t *)&cmd[1]) {
+        amd_iommu_log_illegalcom_error(s, type, s->cmdbuf + s->cmdbuf_head);
+    }
+
+    amd_iommu_iotlb_reset(s);
+    IOMMU_DPRINTF(COMMAND, "Invalidation of all IOMMU cache requested");
+}
+
+static inline uint16_t domid_extract(uint64_t *cmd)
+{
+    return (uint16_t)cmd[0] & IOMMU_INVAL_PAGES_DOMID;
+}
+
+static gboolean amd_iommu_iotlb_remove_by_domid(gpointer key, gpointer value,
+                                                gpointer user_data)
+{
+    IOMMUIOTLBEntry *entry = (IOMMUIOTLBEntry *)value;
+    uint16_t domid = *(uint16_t *)user_data;
+    return entry->domid == domid;
+}
+
+/* we don't have devid - we can't remove pages by address */
+static void iommu_inval_pages(AMDIOMMUState *s, uint8_t *cmd, uint8_t type)
+{
+    IOMMU_DPRINTF(COMMAND, "");
+    uint16_t domid = domid_extract((uint64_t *)cmd);
+
+    if (*(uint64_t *)&cmd[0] & IOMMU_INVAL_IOMMU_PAGES_RSVD ||
+       *(uint32_t *)&cmd[1] & 0x00000ff0) {
+        amd_iommu_log_illegalcom_error(s, type, s->cmdbuf + s->cmdbuf_head);
+    }
+
+    g_hash_table_foreach_remove(s->iotlb, amd_iommu_iotlb_remove_by_domid,
+                                &domid);
+
+    IOMMU_DPRINTF(COMMAND, "IOMMU pages for domain 0x%"PRIx16 "invalidated",
+                  domid);
+}
+
+static void iommu_prefetch_pages(AMDIOMMUState *s, uint8_t *cmd, uint8_t type)
+{
+    IOMMU_DPRINTF(COMMAND, "");
+
+    if ((*(uint64_t *)&cmd[0] & IOMMU_PRF_IOMMU_PAGES_RSVD) ||
+       (*(uint32_t *)&cmd[1] & 0x00000fd4)) {
+        amd_iommu_log_illegalcom_error(s, type, s->cmdbuf + s->cmdbuf_head);
+    }
+
+    IOMMU_DPRINTF(COMMAND, "Pre-fetch of IOMMU pages requested");
+}
+
+static void iommu_inval_inttable(AMDIOMMUState *s, uint8_t *cmd, uint8_t type)
+{
+    IOMMU_DPRINTF(COMMAND, "");
+
+    if ((*(uint64_t *)&cmd[0] & IOMMU_INVAL_INTR_TABLE_RSVD) ||
+       *(uint64_t *)&cmd[1]) {
+        amd_iommu_log_illegalcom_error(s, type, s->cmdbuf + s->cmdbuf_head);
+        return;
+    }
+
+    IOMMU_DPRINTF(COMMAND, "interrupt table invalidated");
+}
+
+static void iommu_inval_iotlb(AMDIOMMUState *s, uint8_t *cmd, uint8_t type)
+{
+    IOMMU_DPRINTF(COMMAND, "");
+
+    if (*(uint32_t *)&cmd[2] & IOMMU_INVAL_IOTLB_PAGES_RSVD) {
+        amd_iommu_log_illegalcom_error(s, type, s->cmdbuf + s->cmdbuf_head);
+        return;
+    }
+
+    amd_iommu_invalidate_iotlb(s, (uint64_t *)cmd);
+    IOMMU_DPRINTF(COMMAND, "IOTLB pages invalidated");
+}
+
+/* not honouring reserved bits is regarded as an illegal command */
+static void amd_iommu_cmdbuf_exec(AMDIOMMUState *s)
+{
+    IOMMU_DPRINTF(COMMAND, "");
+
+    uint8_t type;
+    uint8_t cmd[IOMMU_COMMAND_SIZE];
+
+    memset(cmd, 0, IOMMU_COMMAND_SIZE);
+
+    if (dma_memory_read(&address_space_memory, s->cmdbuf + s->cmdbuf_head, cmd,
+       IOMMU_COMMAND_SIZE)) {
+        IOMMU_DPRINTF(COMMAND, "error: fail to access memory at 0x%"PRIx64
+                      " + 0x%"PRIu8, s->cmdbuf, s->cmdbuf_head);
+        amd_iommu_log_command_error(s, s->cmdbuf + s->cmdbuf_head);
+        return;
+    }
+
+    type = opcode(cmd);
+
+    switch (type) {
+    case IOMMU_CMD_COMPLETION_WAIT:
+        iommu_completion_wait(s, cmd, type);
+        break;
+
+    case IOMMU_CMD_INVAL_DEVTAB_ENTRY:
+        iommu_inval_devtab_entry(s, cmd, type);
+        break;
+
+    case IOMMU_CMD_INVAL_IOMMU_PAGES:
+        iommu_inval_pages(s, cmd, type);
+        break;
+
+    case IOMMU_CMD_INVAL_IOTLB_PAGES:
+        iommu_inval_iotlb(s, cmd, type);
+        break;
+
+    case IOMMU_CMD_INVAL_INTR_TABLE:
+        iommu_inval_inttable(s, cmd, type);
+        break;
+
+    case IOMMU_CMD_PREFETCH_IOMMU_PAGES:
+        iommu_prefetch_pages(s, cmd, type);
+        break;
+
+    case IOMMU_CMD_COMPLETE_PPR_REQUEST:
+        iommu_complete_ppr(s, cmd, type);
+        break;
+
+    case IOMMU_CMD_INVAL_IOMMU_ALL:
+        iommu_inval_all(s, cmd, type);
+        break;
+
+    default:
+        IOMMU_DPRINTF(COMMAND, "unhandled command %d", type);
+        /* log illegal command */
+        amd_iommu_log_illegalcom_error(s, type,
+                                       s->cmdbuf + s->cmdbuf_head);
+        break;
+    }
+
+}
+
+static void amd_iommu_cmdbuf_run(AMDIOMMUState *s)
+{
+    IOMMU_DPRINTF(COMMAND, "");
+
+    uint64_t *mmio_cmdbuf_head = (uint64_t *)(s->mmior +
+                                 IOMMU_MMIO_COMMAND_HEAD);
+
+    if (!s->cmdbuf_enabled) {
+        IOMMU_DPRINTF(COMMAND, "error: IOMMU trying to execute commands with "
+                      "command buffer disabled. IOMMU control value 0x%"PRIx64,
+                      amd_iommu_readq(s, IOMMU_MMIO_CONTROL));
+        return;
+    }
+
+    while (s->cmdbuf_head != s->cmdbuf_tail) {
+        /* check if there is work to do. */
+        IOMMU_DPRINTF(COMMAND, "command buffer head at 0x%"PRIx32 " command "
+                      "buffer tail at 0x%"PRIx32" command buffer base at 0x%"
+                      PRIx64, s->cmdbuf_head, s->cmdbuf_tail, s->cmdbuf);
+         amd_iommu_cmdbuf_exec(s);
+         s->cmdbuf_head += IOMMU_COMMAND_SIZE;
+         amd_iommu_writeq_raw(s, s->cmdbuf_head, IOMMU_MMIO_COMMAND_HEAD);
+
+        /* wrap head pointer */
+        if (s->cmdbuf_head >= s->cmdbuf_len * IOMMU_COMMAND_SIZE) {
+            s->cmdbuf_head = 0;
+        }
+    }
+
+    *mmio_cmdbuf_head = cpu_to_le64(s->cmdbuf_head);
+}
+
+/* System Software might never read from some of this fields but anyways */
+static uint64_t amd_iommu_mmio_read(void *opaque, hwaddr addr, unsigned size)
+{
+    AMDIOMMUState *s = opaque;
+
+    uint64_t val = -1;
+    if (addr + size > IOMMU_MMIO_SIZE) {
+        IOMMU_DPRINTF(MMIO, "error: addr outside region: max 0x%"PRIX64
+                      ", got 0x%"PRIx64 " %d", (uint64_t)IOMMU_MMIO_SIZE, addr,
+                      size);
+        return (uint64_t)-1;
+    }
+
+    if (size == 2) {
+        val = amd_iommu_readw(s, addr);
+    } else if (size == 4) {
+        val = amd_iommu_readl(s, addr);
+    } else if (size == 8) {
+        val = amd_iommu_readq(s, addr);
+    }
+
+    switch (addr & ~0x07) {
+    case IOMMU_MMIO_DEVICE_TABLE:
+        IOMMU_DPRINTF(MMIO, "MMIO_DEVICE_TABLE read addr 0x%"PRIx64
+                      ", size %d offset 0x%"PRIx64, addr, size,
+                       addr & ~0x07);
+        break;
+
+    case IOMMU_MMIO_COMMAND_BASE:
+        IOMMU_DPRINTF(MMIO, "MMIO_COMMAND_BASE read addr 0x%"PRIx64
+                      ", size %d offset 0x%"PRIx64, addr, size,
+                      addr & ~0x07);
+        break;
+
+    case IOMMU_MMIO_EVENT_BASE:
+        IOMMU_DPRINTF(MMIO, "MMIO_EVENT_BASE read addr 0x%"PRIx64
+                      ", size %d offset 0x%"PRIx64, addr, size,
+                      addr & ~0x07);
+        break;
+
+    case IOMMU_MMIO_CONTROL:
+        IOMMU_DPRINTF(MMIO, "MMIO_CONTROL read addr 0x%"PRIx64
+                      ", size %d offset 0x%"PRIx64, addr, size,
+                       addr & ~0x07);
+        break;
+
+    case IOMMU_MMIO_EXCL_BASE:
+        IOMMU_DPRINTF(MMIO, "MMIO_EXCL_BASE read addr 0x%"PRIx64
+                      ", size %d offset 0x%"PRIx64, addr, size,
+                      addr & ~0x07);
+        break;
+
+    case IOMMU_MMIO_EXCL_LIMIT:
+        IOMMU_DPRINTF(MMIO, "MMIO_EXCL_LIMIT read addr 0x%"PRIx64
+                      ", size %d offset 0x%"PRIx64, addr, size,
+                      addr & ~0x07);
+        break;
+
+    case IOMMU_MMIO_COMMAND_HEAD:
+        IOMMU_DPRINTF(MMIO, "MMIO_COMMAND_HEAD read addr 0x%"PRIx64
+                      ", size %d offset 0x%"PRIx64, addr, size,
+                      addr & ~0x07);
+        break;
+
+    case IOMMU_MMIO_COMMAND_TAIL:
+        IOMMU_DPRINTF(MMIO, "MMIO_COMMAND_TAIL read addr 0x%"PRIx64
+                      ", size %d offset 0x%"PRIx64, addr, size,
+                      addr & ~0x07);
+        break;
+
+    case IOMMU_MMIO_EVENT_HEAD:
+        IOMMU_DPRINTF(MMIO, "MMIO_EVENT_HEAD read addr 0x%"PRIx64
+                      ", size %d offset 0x%"PRIx64, addr, size,
+                      addr & ~0x07);
+        break;
+
+    case IOMMU_MMIO_EVENT_TAIL:
+        IOMMU_DPRINTF(MMIO, "MMIO_EVENT_TAIL read addr 0x%"PRIx64
+                      ", size %d offset 0x%"PRIx64, addr, size,
+                      addr & ~0x07);
+        break;
+
+    case IOMMU_MMIO_STATUS:
+        IOMMU_DPRINTF(MMIO, "MMIO_STATUS read addr 0x%"PRIx64
+                      ", size %d offset 0x%"PRIx64, addr, size,
+                      addr & ~0x07);
+        break;
+
+    case IOMMU_MMIO_EXT_FEATURES:
+        IOMMU_DPRINTF(MMU, "MMIO_EXT_FEATURES read addr 0x%"PRIx64
+                      ", size %d offset 0x%"PRIx64 "value 0x%"PRIx64,
+                      addr, size, addr & ~0x07, val);
+        break;
+
+    default:
+        IOMMU_DPRINTF(MMIO, "UNHANDLED MMIO read addr 0x%"PRIx64
+                      ", size %d offset 0x%"PRIx64, addr, size,
+                       addr & ~0x07);
+    }
+    return val;
+}
+
+static void iommu_handle_control_write(AMDIOMMUState *s)
+{
+    IOMMU_DPRINTF(COMMAND, "");
+    /*
+     * read whatever is already written in case
+     * software is writing in chucks less than 8 bytes
+     */
+    unsigned long control = amd_iommu_readq(s, IOMMU_MMIO_CONTROL);
+    s->enabled = !!(control & IOMMU_MMIO_CONTROL_IOMMUEN);
+
+    s->ats_enabled = !!(control & IOMMU_MMIO_CONTROL_HTTUNEN);
+    s->evtlog_enabled = s->enabled && !!(control &
+                        IOMMU_MMIO_CONTROL_EVENTLOGEN);
+
+    s->evtlog_intr = !!(control & IOMMU_MMIO_CONTROL_EVENTINTEN);
+    s->completion_wait_intr = !!(control & IOMMU_MMIO_CONTROL_COMWAITINTEN);
+    s->cmdbuf_enabled = s->enabled && !!(control &
+                        IOMMU_MMIO_CONTROL_CMDBUFLEN);
+
+    /* update the flags depending on the control register */
+    if (s->cmdbuf_enabled) {
+        (*(uint64_t *)&s->mmior[IOMMU_MMIO_STATUS]) |=
+            IOMMU_MMIO_STATUS_CMDBUF_RUN;
+    } else {
+        (*(uint64_t *)&s->mmior[IOMMU_MMIO_STATUS]) &=
+            ~IOMMU_MMIO_STATUS_CMDBUF_RUN;
+    }
+    if (s->evtlog_enabled) {
+        (*(uint64_t *)&s->mmior[IOMMU_MMIO_STATUS]) |=
+            IOMMU_MMIO_STATUS_EVT_RUN;
+    } else {
+        (*(uint64_t *)&s->mmior[IOMMU_MMIO_STATUS]) &=
+            ~IOMMU_MMIO_STATUS_EVT_RUN;
+    }
+
+    IOMMU_DPRINTF(COMMAND, "MMIO_STATUS state 0x%"PRIx64, control);
+
+    amd_iommu_cmdbuf_run(s);
+}
+
+static inline void iommu_handle_devtab_write(AMDIOMMUState *s)
+
+{
+    IOMMU_DPRINTF(COMMAND, "");
+
+    uint64_t val = amd_iommu_readq(s, IOMMU_MMIO_DEVICE_TABLE);
+    s->devtab = (dma_addr_t)(val & IOMMU_MMIO_DEVTAB_BASE_MASK);
+
+    /* set device table length */
+    s->devtab_len = ((val & IOMMU_MMIO_DEVTAB_SIZE_MASK) + 1 *
+                    (IOMMU_MMIO_DEVTAB_SIZE_UNIT /
+                     IOMMU_MMIO_DEVTAB_ENTRY_SIZE));
+}
+
+static inline void iommu_handle_cmdhead_write(AMDIOMMUState *s)
+{
+    IOMMU_DPRINTF(COMMAND, "");
+
+    s->cmdbuf_head = (dma_addr_t)amd_iommu_readq(s, IOMMU_MMIO_COMMAND_HEAD)
+                     & IOMMU_MMIO_CMDBUF_HEAD_MASK;
+    amd_iommu_cmdbuf_run(s);
+}
+
+static inline void iommu_handle_cmdbase_write(AMDIOMMUState *s)
+{
+    IOMMU_DPRINTF(COMMAND, "");
+
+    s->cmdbuf = (dma_addr_t)amd_iommu_readq(s, IOMMU_MMIO_COMMAND_BASE)
+                & IOMMU_MMIO_CMDBUF_BASE_MASK;
+    s->cmdbuf_len = 1UL << (s->mmior[IOMMU_MMIO_CMDBUF_SIZE_BYTE]
+                    & IOMMU_MMIO_CMDBUF_SIZE_MASK);
+    s->cmdbuf_head = s->cmdbuf_tail = 0;
+
+}
+
+static inline void iommu_handle_cmdtail_write(AMDIOMMUState *s)
+{
+    s->cmdbuf_tail = amd_iommu_readq(s, IOMMU_MMIO_COMMAND_TAIL)
+                     & IOMMU_MMIO_CMDBUF_TAIL_MASK;
+    amd_iommu_cmdbuf_run(s);
+}
+
+static inline void iommu_handle_excllim_write(AMDIOMMUState *s)
+{
+    IOMMU_DPRINTF(COMMAND, "");
+
+    uint64_t val = amd_iommu_readq(s, IOMMU_MMIO_EXCL_LIMIT);
+    s->excl_limit = (val & IOMMU_MMIO_EXCL_LIMIT_MASK) |
+                    IOMMU_MMIO_EXCL_LIMIT_LOW;
+}
+
+static inline void iommu_handle_evtbase_write(AMDIOMMUState *s)
+{
+    IOMMU_DPRINTF(COMMAND, "");
+
+    uint64_t val = amd_iommu_readq(s, IOMMU_MMIO_EVENT_BASE);
+    s->evtlog = val & IOMMU_MMIO_EVTLOG_BASE_MASK;
+    s->evtlog_len = 1UL << (*(uint64_t *)&s->mmior[IOMMU_MMIO_EVTLOG_SIZE_BYTE]
+                    & IOMMU_MMIO_EVTLOG_SIZE_MASK);
+}
+
+static inline void iommu_handle_evttail_write(AMDIOMMUState *s)
+{
+    IOMMU_DPRINTF(COMMAND, "");
+
+    uint64_t val = amd_iommu_readq(s, IOMMU_MMIO_EVENT_TAIL);
+    s->evtlog_tail = val & IOMMU_MMIO_EVTLOG_TAIL_MASK;
+}
+
+static inline void iommu_handle_evthead_write(AMDIOMMUState *s)
+{
+    IOMMU_DPRINTF(COMMAND, "");
+
+    uint64_t val = amd_iommu_readq(s, IOMMU_MMIO_EVENT_HEAD);
+    s->evtlog_head = val & IOMMU_MMIO_EVTLOG_HEAD_MASK;
+}
+
+static inline void iommu_handle_pprbase_write(AMDIOMMUState *s)
+{
+    IOMMU_DPRINTF(COMMAND, "");
+
+    uint64_t val = amd_iommu_readq(s, IOMMU_MMIO_PPR_BASE);
+    s->ppr_log = val & IOMMU_MMIO_PPRLOG_BASE_MASK;
+    s->pprlog_len = 1UL << (*(uint64_t *)&s->mmior[IOMMU_MMIO_PPRLOG_SIZE_BYTE]
+                    & IOMMU_MMIO_PPRLOG_SIZE_MASK);
+}
+
+static inline void iommu_handle_pprhead_write(AMDIOMMUState *s)
+{
+    IOMMU_DPRINTF(COMMAND, "");
+
+    uint64_t val = amd_iommu_readq(s, IOMMU_MMIO_PPR_HEAD);
+    s->pprlog_head = val & IOMMU_MMIO_PPRLOG_HEAD_MASK;
+}
+
+static inline void iommu_handle_pprtail_write(AMDIOMMUState *s)
+{
+    IOMMU_DPRINTF(COMMAND, "");
+
+    uint64_t val = amd_iommu_readq(s, IOMMU_MMIO_PPR_TAIL);
+    s->pprlog_tail = val & IOMMU_MMIO_PPRLOG_TAIL_MASK;
+}
+
+/* FIXME: something might go wrong if System Software writes in chunks
+ * of one byte but linux writes in chunks of 4 bytes so currently it
+ * works correctly with linux but will definitely be busted if software
+ * reads/writes 8 bytes
+ */
+static void amd_iommu_mmio_write(void *opaque, hwaddr addr, uint64_t val,
+                                 unsigned size)
+{
+
+    IOMMU_DPRINTF(COMMAND, "");
+
+    AMDIOMMUState *s = opaque;
+    unsigned long offset = addr & 0x07;
+
+    if (addr + size > IOMMU_MMIO_SIZE) {
+        IOMMU_DPRINTF(MMIO, "error: addr outside region: max 0x%"PRIx64
+                      ", got 0x%"PRIx64 " %d", (uint64_t)IOMMU_MMIO_SIZE, addr,
+                      size);
+        return;
+    }
+
+    switch (addr & ~0x07) {
+    case IOMMU_MMIO_CONTROL:
+        if (size == 2) {
+            amd_iommu_writew(s, addr, val);
+        } else if (size == 4) {
+            amd_iommu_writel(s, addr,  val);
+        } else if (size == 8) {
+            amd_iommu_writeq(s, addr, val);
+        }
+
+        IOMMU_DPRINTF(COMMAND, "MMIO_CONTROL write addr 0x%"PRIx64
+                      ", size %d, val 0x%"PRIx64 ", offset 0x%"PRIx64,
+                      addr, size, val, offset);
+        iommu_handle_control_write(s);
+        break;
+
+    case IOMMU_MMIO_DEVICE_TABLE:
+        IOMMU_DPRINTF(MMIO, "MMIO_DEVICE_TABLE write addr 0x%"PRIx64
+                      ", size %d, val 0x%"PRIx64 ", offset 0x%"PRIx64,
+                      addr, size, val, offset);
+        if (size == 2) {
+            amd_iommu_writew(s, addr, val);
+        } else if (size == 4) {
+            amd_iommu_writel(s, addr, val);
+        } else if (size == 8) {
+            amd_iommu_writeq(s, addr, val);
+        }
+
+       /*  set device table address
+        *   This also suffers from inability to tell whether software
+        *   is done writing
+        */
+
+        if (offset || (size == 8)) {
+            iommu_handle_devtab_write(s);
+        }
+        break;
+
+    case IOMMU_MMIO_COMMAND_HEAD:
+        IOMMU_DPRINTF(MMIO, "MMIO_COMMAND_HEAD write addr 0x%"PRIx64
+                      ", size %d, val 0x%"PRIx64 ", offset 0x%"PRIx64,
+                      addr, size, val, offset);
+        if (size == 2) {
+            amd_iommu_writew(s, addr, val);
+        } else if (size == 4) {
+            amd_iommu_writel(s, addr, val);
+        } else if (size == 8) {
+            amd_iommu_writeq(s, addr, val);
+        }
+
+        iommu_handle_cmdhead_write(s);
+        break;
+
+    case IOMMU_MMIO_COMMAND_BASE:
+        IOMMU_DPRINTF(COMMAND, "MMIO_COMMAND_BASE write addr 0x%"PRIx64
+                      ", size %d, val 0x%"PRIx64 ", offset 0x%"PRIx64,
+                      addr, size, val, offset);
+        if (size == 2) {
+            amd_iommu_writew(s, addr, val);
+        } else if (size == 4) {
+            amd_iommu_writel(s, addr, val);
+        } else if (size == 8) {
+            amd_iommu_writeq(s, addr, val);
+        }
+
+        /* FIXME - make sure System Software has finished writing incase
+         * it writes in chucks less than 8 bytes in a robust way.As for
+         * now, this hacks works for the linux driver
+         */
+        if (offset || (size == 8)) {
+            iommu_handle_cmdbase_write(s);
+        }
+        break;
+
+    case IOMMU_MMIO_COMMAND_TAIL:
+        IOMMU_DPRINTF(COMMAND, "MMIO_COMMAND_TAIL write addr 0x%"PRIx64
+                      ", size %d, val 0x%"PRIx64 ", offset 0x%"PRIx64,
+                      addr, size, val, offset);
+        if (size == 2) {
+            amd_iommu_writew(s, addr, val);
+        } else if (size == 4) {
+            amd_iommu_writel(s, addr, val);
+        } else if (size == 8) {
+            amd_iommu_writeq(s, addr, val);
+        }
+        iommu_handle_cmdtail_write(s);
+        break;
+
+    case IOMMU_MMIO_EVENT_BASE:
+        IOMMU_DPRINTF(MMIO, "MMIO_EVENT_BASE write addr 0x%"PRIx64
+                      ", size %d, val 0x%"PRIx64 ", offset 0x%"PRIx64,
+                      addr, size, val, offset);
+        if (size == 2) {
+            amd_iommu_writew(s, addr, val);
+        } else if (size == 4) {
+            amd_iommu_writel(s, addr, val);
+        } else if (size == 8) {
+            amd_iommu_writeq(s, addr, val);
+        }
+        iommu_handle_evtbase_write(s);
+        break;
+
+    case IOMMU_MMIO_EVENT_HEAD:
+        IOMMU_DPRINTF(MMIO, "MMIO_EVENT_HEAD write addr 0x%"PRIx64
+                      ", size %d, val 0x%"PRIx64 ", offset 0x%"PRIx64,
+                      addr, size, val, offset);
+        if (size == 2) {
+            amd_iommu_writew(s, addr, val);
+        } else if (size == 4) {
+            amd_iommu_writel(s, addr, val);
+        } else if (size == 8) {
+            amd_iommu_writeq(s, addr, val);
+        }
+        iommu_handle_evthead_write(s);
+        break;
+
+    case IOMMU_MMIO_EVENT_TAIL:
+        IOMMU_DPRINTF(MMIO, "MMIO_EVENT_TAIL write addr 0x%"PRIx64
+                      ", size %d, val 0x%"PRIx64 ", offset 0x%"PRIx64,
+                      addr, size, val, offset);
+        if (size == 2) {
+            amd_iommu_writew(s, addr, val);
+        } else if (size == 4) {
+            amd_iommu_writel(s, addr, val);
+        } else if (size == 8) {
+            amd_iommu_writeq(s, addr, val);
+        }
+        iommu_handle_evttail_write(s);
+        break;
+
+    case IOMMU_MMIO_EXCL_LIMIT:
+        IOMMU_DPRINTF(MMIO, "MMIO_EXCL_LIMIT write addr 0x%"PRIx64
+                      ", size %d, val 0x%"PRIx64 ", offset 0x%"PRIx64,
+                      addr, size, val, offset);
+        if (size == 2) {
+            amd_iommu_writew(s, addr, val);
+        } else if (size == 4) {
+            amd_iommu_writel(s, addr, val);
+        } else if (size == 8) {
+            amd_iommu_writeq(s, addr, val);
+        }
+        iommu_handle_excllim_write(s);
+        break;
+
+        /* PPR log base - unused for now */
+    case IOMMU_MMIO_PPR_BASE:
+        IOMMU_DPRINTF(MMIO, "MMIO_PPR_BASE write addr 0x%"PRIx64
+                      ", size %d, val 0x%"PRIx64 ", offset 0x%"PRIx64,
+                      addr, size, val, offset);
+        if (size == 2) {
+            amd_iommu_writew(s, addr, val);
+        } else if (size == 4) {
+            amd_iommu_writel(s, addr, val);
+        } else if (size == 8) {
+            amd_iommu_writeq(s, addr, val);
+        }
+        iommu_handle_pprbase_write(s);
+        break;
+        /* PPR log head - also unused for now */
+    case IOMMU_MMIO_PPR_HEAD:
+        IOMMU_DPRINTF(MMIO, "MMIO_PPR_HEAD write addr 0x%"PRIx64
+                      ", size %d, val 0x%"PRIx64 ", offset 0x%"PRIx64,
+                       addr, size, val, offset);
+        if (size == 2) {
+            amd_iommu_writew(s, addr, val);
+        } else if (size == 4) {
+            amd_iommu_writel(s, addr, val);
+        } else if (size == 8) {
+            amd_iommu_writeq(s, addr, val);
+        }
+        iommu_handle_pprhead_write(s);
+        break;
+        /* PPR log tail - unused for now */
+    case IOMMU_MMIO_PPR_TAIL:
+        IOMMU_DPRINTF(MMIO, "MMIO_PPR_TAIL write addr 0x%"PRIx64
+                      ", size %d, val 0x%"PRIx64 ", offset 0x%"PRIx64,
+                      addr, size, val, offset);
+        if (size == 2) {
+            amd_iommu_writew(s, addr, val);
+        } else if (size == 4) {
+            amd_iommu_writel(s, addr, val);
+        } else if (size == 8) {
+            amd_iommu_writeq(s, addr, val);
+        }
+        iommu_handle_pprtail_write(s);
+        break;
+
+        /* ignore write to ext_features */
+    default:
+        IOMMU_DPRINTF(MMIO, "UNHANDLED MMIO write addr 0x%"PRIx64
+                      ", size %d, val 0x%"PRIx64 ", offset 0x%"PRIx64,
+                      addr, size, val, offset);
+    }
+
+}
+
+static inline uint64_t amd_iommu_get_perms(uint64_t entry)
+{
+    return (entry & (IOMMU_DEV_PERM_READ | IOMMU_DEV_PERM_WRITE)) >>
+           IOMMU_DEV_PERM_SHIFT;
+}
+
+AddressSpace *bridge_host_amd_iommu(PCIBus *bus, void *opaque, int devfn)
+{
+    AMDIOMMUState *s = opaque;
+    AMDIOMMUAddressSpace **iommu_as;
+    int bus_num = pci_bus_num(bus);
+
+    /* just in case */
+    assert(0 <= bus_num && bus_num <= PCI_BUS_MAX);
+    assert(0 <= devfn && devfn <= PCI_DEVFN_MAX);
+
+    iommu_as = s->address_spaces[bus_num];
+
+    /* allocate memory during the first run */
+    if (!iommu_as) {
+        iommu_as = g_malloc0(sizeof(AMDIOMMUAddressSpace *) * PCI_DEVFN_MAX);
+        s->address_spaces[bus_num] = iommu_as;
+    }
+
+    /* set up IOMMU region */
+    if (!iommu_as[devfn]) {
+        iommu_as[devfn] = g_malloc0(sizeof(AMDIOMMUAddressSpace));
+        iommu_as[devfn]->bus_num = (uint8_t)bus_num;
+        iommu_as[devfn]->devfn = (uint8_t)devfn;
+        iommu_as[devfn]->iommu_state = s;
+
+        memory_region_init_iommu(&iommu_as[devfn]->iommu, OBJECT(s),
+                                 &s->iommu_ops, "amd-iommu", UINT64_MAX);
+        address_space_init(&iommu_as[devfn]->as, &iommu_as[devfn]->iommu,
+                           "amd-iommu");
+    }
+    return &iommu_as[devfn]->as;
+}
+
+/* validate a page table entry */
+static bool amd_iommu_validate_dte(AMDIOMMUState *s, uint16_t devid,
+                                   uint64_t *dte)
+{
+    if ((dte[0] & IOMMU_DTE_LOWER_QUAD_RESERVED)
+        || (dte[1] & IOMMU_DTE_MIDDLE_QUAD_RESERVED)
+        || (dte[2] & IOMMU_DTE_UPPER_QUAD_RESERVED) || dte[3]) {
+        amd_iommu_log_illegaldevtab_error(s, devid,
+                                s->devtab + devid * IOMMU_DEVTAB_ENTRY_SIZE, 0);
+        return false;
+    }
+
+    return dte[0] & IOMMU_DEV_VALID && (dte[0] & IOMMU_DEV_TRANSLATION_VALID)
+           && (dte[0] & IOMMU_DEV_PT_ROOT_MASK);
+}
+
+/* get a device table entry given the devid */
+static bool amd_iommu_get_dte(AMDIOMMUState *s, int devid, uint64_t *entry)
+{
+    uint32_t offset = devid * IOMMU_DEVTAB_ENTRY_SIZE;
+
+    IOMMU_DPRINTF(MMU, "Device Table at 0x%"PRIx64, s->devtab);
+
+    if (dma_memory_read(&address_space_memory, s->devtab + offset, entry,
+                        IOMMU_DEVTAB_ENTRY_SIZE)) {
+        IOMMU_DPRINTF(MMU, "error: fail to access Device Entry devtab 0x%"PRIx64
+                      "offset 0x%"PRIx32, s->devtab, offset);
+        /* log ever accessing dte */
+        amd_iommu_log_devtab_error(s, devid, s->devtab + offset, 0);
+        return false;
+    }
+
+    if (!amd_iommu_validate_dte(s, devid, entry)) {
+        IOMMU_DPRINTF(MMU,
+                      "Pte entry at 0x%"PRIx64" is invalid", entry[0]);
+        return false;
+    }
+
+    return true;
+}
+
+/* get pte translation mode */
+static inline uint8_t get_pte_translation_mode(uint64_t pte)
+{
+    return (pte >> IOMMU_DEV_MODE_RSHIFT) & IOMMU_DEV_MODE_MASK;
+}
+
+static int amd_iommu_page_walk(AMDIOMMUAddressSpace *as, uint64_t *dte,
+                               IOMMUTLBEntry *ret, unsigned perms,
+                               hwaddr addr)
+{
+    uint8_t level, oldlevel;
+    unsigned present;
+    uint64_t pte, pte_addr;
+    uint64_t pte_perms;
+    pte = dte[0];
+
+    level = get_pte_translation_mode(pte);
+
+    if (level >= 7 || level == 0) {
+        IOMMU_DPRINTF(MMU, "error: translation level 0x%"PRIu8 " detected"
+                      "while translating 0x%"PRIx64, level, addr);
+        return -1;
+    }
+
+    while (level > 0) {
+        pte_perms = amd_iommu_get_perms(pte);
+        present = pte & 1;
+        if (!present || perms != (perms & pte_perms)) {
+            amd_iommu_page_fault(as->iommu_state, as->devfn, addr, perms);
+            IOMMU_DPRINTF(MMU, "error: page fault accessing virtual addr 0x%"
+                          PRIx64, addr);
+            return -1;
+        }
+
+        /* go to the next lower level */
+        pte_addr = pte & IOMMU_DEV_PT_ROOT_MASK;
+        /* add offset and load pte */
+        pte_addr += ((addr >> (3 + 9 * level)) & 0x1FF) << 3;
+        pte = ldq_phys(&address_space_memory, pte_addr);
+        oldlevel = level;
+        level = get_pte_translation_mode(pte);
+
+        /* PT is corrupted or not there */
+        if (level != oldlevel - 1) {
+            return -1;
+        }
+    }
+
+    ret->iova = addr & IOMMU_PAGE_MASK_4K;
+    ret->translated_addr = (pte & IOMMU_DEV_PT_ROOT_MASK) & IOMMU_PAGE_MASK_4K;
+    ret->addr_mask = ~IOMMU_PAGE_MASK_4K;
+    ret->perm = IOMMU_RW;
+    return 0;
+}
+
+/* TODO : Mark addresses as Accessed and Dirty */
+static void amd_iommu_do_translate(AMDIOMMUAddressSpace *as, hwaddr addr,
+                                   bool is_write, IOMMUTLBEntry *ret)
+{
+    AMDIOMMUState *s = as->iommu_state;
+    uint16_t devid = PCI_DEVID(as->bus_num, as->devfn);
+    IOMMUIOTLBEntry *iotlb_entry;
+    uint8_t err;
+    uint64_t entry[4];
+
+    /* try getting a cache entry first */
+    iotlb_entry = amd_iommu_iotlb_lookup(s, addr, as->devfn);
+
+    if (iotlb_entry) {
+        IOMMU_DPRINTF(CACHE, "hit  iotlb devid: %02x:%02x.%x gpa 0x%"PRIx64
+                      " hpa 0x%"PRIx64, PCI_BUS_NUM(devid), PCI_SLOT(devid),
+                      PCI_FUNC(devid), addr, iotlb_entry->translated_addr);
+        ret->iova = addr & IOMMU_PAGE_MASK_4K;
+        ret->translated_addr = iotlb_entry->translated_addr;
+        ret->addr_mask = ~IOMMU_PAGE_MASK_4K;
+        ret->perm = iotlb_entry->perms;
+        return;
+    } else {
+        if (!amd_iommu_get_dte(s, devid, entry)) {
+            goto out;
+        }
+
+        err = amd_iommu_page_walk(as, entry, ret,
+                                  is_write ? IOMMU_PERM_WRITE : IOMMU_PERM_READ,
+                                  addr);
+        if (err) {
+            IOMMU_DPRINTF(MMU, "error: hardware error accessing page tables"
+                          " while translating addr 0x%"PRIx64, addr);
+            amd_iommu_log_pagetab_error(s, as->devfn, addr, 0);
+            goto out;
+        }
+
+        amd_iommu_update_iotlb(s, as->devfn, addr, ret->translated_addr,
+                               ret->perm, entry[1] & IOMMU_DEV_DOMID_ID_MASK);
+        return;
+    }
+
+out:
+    ret->iova = addr;
+    ret->translated_addr = addr & IOMMU_PAGE_MASK_4K;
+    ret->addr_mask = ~IOMMU_PAGE_MASK_4K;
+    ret->perm = IOMMU_RW;
+    return;
+}
+
+static IOMMUTLBEntry amd_iommu_translate(MemoryRegion *iommu, hwaddr addr,
+                                         bool is_write)
+{
+    IOMMU_DPRINTF(GENERAL, "");
+
+    AMDIOMMUAddressSpace *as = container_of(iommu, AMDIOMMUAddressSpace, iommu);
+    AMDIOMMUState *s = as->iommu_state;
+
+    IOMMUTLBEntry ret = {
+        .target_as = &address_space_memory,
+        .iova = addr,
+        .translated_addr = 0,
+        .addr_mask = ~(hwaddr)0,
+        .perm = IOMMU_NONE,
+    };
+
+    if (!s->enabled) {
+        /* IOMMU disabled - corresponds to iommu=off not
+         * failure to provide any parameter
+         */
+        ret.iova = addr & IOMMU_PAGE_MASK_4K;
+        ret.translated_addr = addr & IOMMU_PAGE_MASK_4K;
+        ret.addr_mask = ~IOMMU_PAGE_MASK_4K;
+        ret.perm = IOMMU_RW;
+        return ret;
+    }
+
+    amd_iommu_do_translate(as, addr, is_write, &ret);
+    IOMMU_DPRINTF(MMU, "devid: %02x:%02x.%x gpa 0x%"PRIx64 " hpa 0x%"PRIx64,
+                  as->bus_num, PCI_SLOT(as->devfn), PCI_FUNC(as->devfn), addr,
+                  ret.translated_addr);
+
+    return ret;
+}
+
+static const MemoryRegionOps mmio_mem_ops = {
+    .read = amd_iommu_mmio_read,
+    .write = amd_iommu_mmio_write,
+    .endianness = DEVICE_LITTLE_ENDIAN,
+    .impl = {
+        .min_access_size = 1,
+        .max_access_size = 8,
+        .unaligned = false,
+    },
+    .valid = {
+        .min_access_size = 1,
+        .max_access_size = 8,
+    }
+};
+
+static void amd_iommu_init(AMDIOMMUState *s)
+{
+    printf("amd_iommu_init");
+
+    amd_iommu_iotlb_reset(s);
+
+    s->iommu_ops.translate = amd_iommu_translate;
+
+    s->devtab_len = 0;
+    s->cmdbuf_len = 0;
+    s->cmdbuf_head = 0;
+    s->cmdbuf_tail = 0;
+    s->evtlog_head = 0;
+    s->evtlog_tail = 0;
+    s->excl_enabled = false;
+    s->excl_allow = false;
+    s->mmio_enabled = false;
+    s->enabled = false;
+    s->ats_enabled = false;
+    s->cmdbuf_enabled = false;
+
+    /* reset MMIO */
+    memset(s->mmior, 0, IOMMU_MMIO_SIZE);
+    amd_iommu_set_quad(s, IOMMU_MMIO_EXT_FEATURES, IOMMU_EXT_FEATURES,
+            0xffffffffffffffef, 0);
+    amd_iommu_set_quad(s, IOMMU_MMIO_STATUS, 0, 0x98, 0x67);
+    /* reset device ident */
+    pci_config_set_vendor_id(s->dev.config, PCI_VENDOR_ID_AMD);
+    pci_config_set_device_id(s->dev.config, PCI_DEVICE_ID_RD890_IOMMU);
+    pci_config_set_prog_interface(s->dev.config, 00);
+    pci_config_set_class(s->dev.config, 0x0806);
+
+    /* reset IOMMU specific capabilities  */
+    pci_set_long(s->dev.config + s->capab_offset, IOMMU_CAPAB_FEATURES);
+    pci_set_long(s->dev.config + s->capab_offset + IOMMU_CAPAB_BAR_LOW,
+                 s->mmio.addr & ~(0xffff0000));
+    pci_set_long(s->dev.config + s->capab_offset + IOMMU_CAPAB_BAR_HIGH,
+                (s->mmio.addr & ~(0xffff)) >> 16);
+    pci_set_long(s->dev.config + s->capab_offset + IOMMU_CAPAB_RANGE,
+                 0xff000000);
+    pci_set_long(s->dev.config + s->capab_offset + IOMMU_CAPAB_MISC, 0);
+    pci_set_long(s->dev.config + s->capab_offset + IOMMU_CAPAB_MISC,
+            IOMMU_MAX_PH_ADDR | IOMMU_MAX_GVA_ADDR | IOMMU_MAX_VA_ADDR);
+}
+
+static void amd_iommu_reset(DeviceState *dev)
+{
+    AMDIOMMUState *s = AMD_IOMMU_DEVICE(dev);
+
+    amd_iommu_init(s);
+}
+
+static void amd_iommu_realize(PCIDevice *dev, Error **error)
+{
+    AMDIOMMUState *s = container_of(dev, AMDIOMMUState, dev);
+
+    s->iotlb = g_hash_table_new_full(amd_iommu_uint64_hash,
+                                     amd_iommu_uint64_equal, g_free, g_free);
+
+    s->capab_offset = pci_add_capability(dev, IOMMU_CAPAB_ID_SEC, 0,
+                                         IOMMU_CAPAB_SIZE);
+
+    /* add msi and hypertransport capabilities */
+    pci_add_capability(&s->dev, PCI_CAP_ID_MSI, 0, IOMMU_CAPAB_REG_SIZE);
+    pci_add_capability(&s->dev, PCI_CAP_ID_HT, 0, IOMMU_CAPAB_REG_SIZE);
+
+    amd_iommu_init(s);
+
+    /* set up MMIO */
+    memory_region_init_io(&s->mmio, OBJECT(s), &mmio_mem_ops, s, "mmio",
+                          IOMMU_MMIO_SIZE);
+
+    if (s->mmio.addr == IOMMU_BASE_ADDR) {
+        return;
+    }
+
+    s->mmio.addr = IOMMU_BASE_ADDR;
+    memory_region_add_subregion(get_system_memory(), IOMMU_BASE_ADDR, &s->mmio);
+}
+
+static const VMStateDescription vmstate_amd_iommu = {
+    .name = "amd-iommu",
+    .fields  = (VMStateField[]) {
+        VMSTATE_PCI_DEVICE(dev, AMDIOMMUState),
+        VMSTATE_END_OF_LIST()
+    }
+};
+
+static Property amd_iommu_properties[] = {
+    DEFINE_PROP_UINT32("version", AMDIOMMUState, version, 2),
+    DEFINE_PROP_END_OF_LIST(),
+};
+
+static void amd_iommu_uninit(PCIDevice *dev)
+{
+    AMDIOMMUState *s = container_of(dev, AMDIOMMUState, dev);
+    amd_iommu_iotlb_reset(s);
+}
+
+static void amd_iommu_class_init(ObjectClass *klass, void* data)
+{
+    DeviceClass *dc = DEVICE_CLASS(klass);
+    PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
+
+    k->realize = amd_iommu_realize;
+    k->exit = amd_iommu_uninit;
+
+    dc->reset = amd_iommu_reset;
+    dc->vmsd = &vmstate_amd_iommu;
+    dc->props = amd_iommu_properties;
+}
+
+static const TypeInfo amd_iommu = {
+    .name = TYPE_AMD_IOMMU_DEVICE,
+    .parent = TYPE_PCI_DEVICE,
+    .instance_size = sizeof(AMDIOMMUState),
+    .class_init = amd_iommu_class_init
+};
+
+static void amd_iommu_register_types(void)
+{
+    type_register_static(&amd_iommu);
+}
+
+type_init(amd_iommu_register_types);
diff --git a/hw/i386/amd_iommu.h b/hw/i386/amd_iommu.h
new file mode 100644
index 0000000..7d317e1
--- /dev/null
+++ b/hw/i386/amd_iommu.h
@@ -0,0 +1,395 @@ 
+/*
+ * QEMU emulation of an AMD IOMMU (AMD-Vi)
+ *
+ * Copyright (C) 2011 Eduard - Gabriel Munteanu
+ * Copyright (C) 2015 David Kiarie, <davidkiarie4@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef AMD_IOMMU_H_
+#define AMD_IOMMU_H_
+
+#include "hw/hw.h"
+#include "hw/pci/pci.h"
+#include "hw/pci/msi.h"
+#include "hw/sysbus.h"
+#include "sysemu/dma.h"
+
+/* Capability registers */
+#define IOMMU_CAPAB_HEADER            0x00
+#define   IOMMU_CAPAB_REV_TYPE        0x02
+#define   IOMMU_CAPAB_FLAGS           0x03
+#define IOMMU_CAPAB_BAR_LOW           0x04
+#define IOMMU_CAPAB_BAR_HIGH          0x08
+#define IOMMU_CAPAB_RANGE             0x0C
+#define IOMMU_CAPAB_MISC              0x10
+#define IOMMU_CAPAB_MISC1             0x14
+
+#define IOMMU_CAPAB_SIZE              0x18
+#define IOMMU_CAPAB_REG_SIZE          0x04
+
+/* Capability header data */
+#define IOMMU_CAPAB_ID_SEC            0xf
+#define IOMMU_CAPAB_FLAT_EXT          (1 << 28)
+#define IOMMU_CAPAB_EFR_SUP           (1 << 27)
+#define IOMMU_CAPAB_FLAG_NPCACHE      (1 << 26)
+#define IOMMU_CAPAB_FLAG_HTTUNNEL     (1 << 25)
+#define IOMMU_CAPAB_FLAG_IOTLBSUP     (1 << 24)
+#define IOMMU_CAPAB_INIT_REV          (1 << 19)
+#define IOMMU_CAPAB_INIT_TYPE         (3 << 16)
+#define IOMMU_CAPAB_INIT_REV_TYPE     (IOMMU_CAPAB_REV | IOMMU_CAPAB_TYPE)
+#define IOMMU_CAPAB_INIT_FLAGS        (IOMMU_CAPAB_FLAG_NPCACHE | \
+                                       IOMMU_CAPAB_FLAG_HTTUNNEL)
+#define IOMMU_CAPAB_INIT_MISC         ((64 << 15) | (48 << 8))
+#define IOMMU_CAPAB_BAR_MASK          (~((1UL << 14) - 1))
+
+/* MMIO registers */
+#define IOMMU_MMIO_DEVICE_TABLE       0x0000
+#define IOMMU_MMIO_COMMAND_BASE       0x0008
+#define IOMMU_MMIO_EVENT_BASE         0x0010
+#define IOMMU_MMIO_CONTROL            0x0018
+#define IOMMU_MMIO_EXCL_BASE          0x0020
+#define IOMMU_MMIO_EXCL_LIMIT         0x0028
+#define IOMMU_MMIO_EXT_FEATURES       0x0030
+#define IOMMU_MMIO_COMMAND_HEAD       0x2000
+#define IOMMU_MMIO_COMMAND_TAIL       0x2008
+#define IOMMU_MMIO_EVENT_HEAD         0x2010
+#define IOMMU_MMIO_EVENT_TAIL         0x2018
+#define IOMMU_MMIO_STATUS             0x2020
+#define IOMMU_MMIO_PPR_BASE           0x0038
+#define IOMMU_MMIO_PPR_HEAD           0x2030
+#define IOMMU_MMIO_PPR_TAIL           0x2038
+
+#define IOMMU_MMIO_SIZE               0x4000
+
+#define IOMMU_MMIO_DEVTAB_SIZE_MASK   ((1ULL << 12) - 1)
+#define IOMMU_MMIO_DEVTAB_BASE_MASK   (((1ULL << 52) - 1) & ~ \
+                                       IOMMU_MMIO_DEVTAB_SIZE_MASK)
+#define IOMMU_MMIO_DEVTAB_ENTRY_SIZE  32
+#define IOMMU_MMIO_DEVTAB_SIZE_UNIT   4096
+
+/* some of this are similar but just for readability */
+#define IOMMU_MMIO_CMDBUF_SIZE_BYTE       (IOMMU_MMIO_COMMAND_BASE + 7)
+#define IOMMU_MMIO_CMDBUF_SIZE_MASK       0x0F
+#define IOMMU_MMIO_CMDBUF_BASE_MASK       IOMMU_MMIO_DEVTAB_BASE_MASK
+#define IOMMU_MMIO_CMDBUF_DEFAULT_SIZE    8
+#define IOMMU_MMIO_CMDBUF_HEAD_MASK       (((1ULL << 19) - 1) & ~0x0F)
+#define IOMMU_MMIO_CMDBUF_TAIL_MASK       IOMMU_MMIO_EVTLOG_HEAD_MASK
+
+#define IOMMU_MMIO_EVTLOG_SIZE_BYTE       (IOMMU_MMIO_EVENT_BASE + 7)
+#define IOMMU_MMIO_EVTLOG_SIZE_MASK       IOMMU_MMIO_CMDBUF_SIZE_MASK
+#define IOMMU_MMIO_EVTLOG_BASE_MASK       IOMMU_MMIO_CMDBUF_BASE_MASK
+#define IOMMU_MMIO_EVTLOG_DEFAULT_SIZE    IOMMU_MMIO_CMDBUF_DEFAULT_SIZE
+#define IOMMU_MMIO_EVTLOG_HEAD_MASK       (((1ULL << 19) - 1) & ~0x0F)
+#define IOMMU_MMIO_EVTLOG_TAIL_MASK       IOMMU_MMIO_EVTLOG_HEAD_MASK
+
+#define IOMMU_MMIO_PPRLOG_SIZE_BYTE       (IOMMU_MMIO_EVENT_BASE + 7)
+#define IOMMU_MMIO_PPRLOG_HEAD_MASK       IOMMU_MMIO_EVTLOG_HEAD_MASK
+#define IOMMU_MMIO_PPRLOG_TAIL_MASK       IOMMU_MMIO_EVTLOG_HEAD_MASK
+#define IOMMU_MMIO_PPRLOG_BASE_MASK       IOMMU_MMIO_EVTLOG_BASE_MASK
+#define IOMMU_MMIO_PPRLOG_SIZE_MASK       IOMMU_MMIO_EVTLOG_SIZE_MASK
+
+#define IOMMU_MMIO_EXCL_BASE_MASK         IOMMU_MMIO_DEVTAB_BASE_MASK
+#define IOMMU_MMIO_EXCL_ENABLED_MASK      (1ULL << 0)
+#define IOMMU_MMIO_EXCL_ALLOW_MASK        (1ULL << 1)
+#define IOMMU_MMIO_EXCL_LIMIT_MASK        IOMMU_MMIO_DEVTAB_BASE_MASK
+#define IOMMU_MMIO_EXCL_LIMIT_LOW         0xFFF
+
+/* mmio control register flags */
+#define IOMMU_MMIO_CONTROL_IOMMUEN        (1ULL << 0)
+#define IOMMU_MMIO_CONTROL_HTTUNEN        (1ULL << 1)
+#define IOMMU_MMIO_CONTROL_EVENTLOGEN     (1ULL << 2)
+#define IOMMU_MMIO_CONTROL_EVENTINTEN     (1ULL << 3)
+#define IOMMU_MMIO_CONTROL_COMWAITINTEN   (1ULL << 4)
+#define IOMMU_MMIO_CONTROL_PASSPW         (1ULL << 7)
+#define IOMMU_MMIO_CONTROL_REPASSPW       (1ULL << 9)
+#define IOMMU_MMIO_CONTROL_COHERENT       (1ULL << 10)
+#define IOMMU_MMIO_CONTROL_ISOC           (1ULL << 11)
+#define IOMMU_MMIO_CONTROL_CMDBUFLEN      (1ULL << 12)
+#define IOMMU_MMIO_CONTROL_PPRLOGEN       (1ULL << 13)
+#define IOMMU_MMIO_CONTROL_PPRINTEN       (1ULL << 14)
+#define IOMMU_MMIO_CONTROL_PPREN          (1ULL << 15)
+#define IOMMU_MMIO_CONTROL_GAEN           (1ULL << 16)
+#define IOMMU_MMIO_CONTROL_GTEN           (1ULL << 17)
+
+/* MMIO status register bits */
+#define IOMMU_MMIO_STATUS_PPR_OVFE    (1 << 18)
+#define IOMMU_MMIO_STATUS_PPR_OVFEB   (1 << 17)
+#define IOMMU_MMIO_STATUS_EVT_ACTIVE  (1 << 16)
+#define IOMMU_MMIO_STATUS_EVT_OVFB    (1 << 15)
+#define IOMMU_MMIO_STATUS_PPR_ACTIVE  (1 << 12)
+#define IOMMU_MMIO_STATUS_PPR_OVFB    (1 << 11)
+#define IOMMU_MMIO_STATUS_GA_INT      (1 << 10)
+#define IOMMU_MMIO_STATUS_GA_RUN      (1 << 9)
+#define IOMMU_MMIO_STATUS_GA_OVF      (1 << 8)
+#define IOMMU_MMIO_STATUS_PPR_RUN     (1 << 7)
+#define IOMMU_MMIO_STATUS_PPR_INT     (1 << 6)
+#define IOMMU_MMIO_STATUS_PPR_OVF     (1 << 5)
+#define IOMMU_MMIO_STATUS_CMDBUF_RUN  (1 << 4)
+#define IOMMU_MMIO_STATUS_EVT_RUN     (1 << 3)
+#define IOMMU_MMIO_STATUS_COMP_INT    (1 << 2)
+#define IOMMU_MMIO_STATUS_EVT_INT     (1 << 1)
+#define IOMMU_MMIO_STATUS_EVT_OVF     (1 << 0)
+
+#define IOMMU_CMDBUF_ID_BYTE              0x07
+#define IOMMU_CMDBUF_ID_RSHIFT            4
+
+#define IOMMU_CMD_COMPLETION_WAIT         0x01
+#define IOMMU_CMD_INVAL_DEVTAB_ENTRY      0x02
+#define IOMMU_CMD_INVAL_IOMMU_PAGES       0x03
+#define IOMMU_CMD_INVAL_IOTLB_PAGES       0x04
+#define IOMMU_CMD_INVAL_INTR_TABLE        0x05
+#define IOMMU_CMD_PREFETCH_IOMMU_PAGES    0x06
+#define IOMMU_CMD_COMPLETE_PPR_REQUEST    0x07
+#define IOMMU_CMD_INVAL_IOMMU_ALL         0x08
+
+#define IOMMU_DEVTAB_ENTRY_SIZE           32
+
+/* Device table entry bits 0:63 */
+#define IOMMU_DEV_VALID                   (1ULL << 0)
+#define IOMMU_DEV_TRANSLATION_VALID       (1ULL << 1)
+#define IOMMU_DEV_MODE_MASK               0x7
+#define IOMMU_DEV_MODE_RSHIFT             9
+#define IOMMU_DEV_PT_ROOT_MASK            0xFFFFFFFFFF000
+#define IOMMU_DEV_PT_ROOT_RSHIFT          12
+#define IOMMU_DEV_PERM_SHIFT              61
+#define IOMMU_DEV_PERM_READ               (1ULL << 61)
+#define IOMMU_DEV_PERM_WRITE              (1ULL << 62)
+
+/* Device table entry bits 64:127 */
+#define IOMMU_DEV_DOMID_ID_MASK          ((1ULL << 16) - 1)
+#define IOMMU_DEV_IOTLB_SUPPORT           (1ULL << 17)
+#define IOMMU_DEV_SUPPRESS_PF             (1ULL << 18)
+#define IOMMU_DEV_SUPPRESS_ALL_PF         (1ULL << 19)
+#define IOMMU_DEV_IOCTL_MASK              (~3)
+#define IOMMU_DEV_IOCTL_RSHIFT            20
+#define   IOMMU_DEV_IOCTL_DENY            0
+#define   IOMMU_DEV_IOCTL_PASSTHROUGH     1
+#define   IOMMU_DEV_IOCTL_TRANSLATE       2
+#define IOMMU_DEV_CACHE                   (1ULL << 37)
+#define IOMMU_DEV_SNOOP_DISABLE           (1ULL << 38)
+#define IOMMU_DEV_EXCL                    (1ULL << 39)
+
+/* Event codes and flags, as stored in the info field */
+#define IOMMU_EVENT_ILLEGAL_DEVTAB_ENTRY  (0x1U << 12)
+#define IOMMU_EVENT_IOPF                  (0x2U << 12)
+#define   IOMMU_EVENT_IOPF_I              (1U << 3)
+#define   IOMMU_EVENT_IOPF_PR             (1U << 4)
+#define   IOMMU_EVENT_IOPF_RW             (1U << 5)
+#define   IOMMU_EVENT_IOPF_PE             (1U << 6)
+#define   IOMMU_EVENT_IOPF_RZ             (1U << 7)
+#define   IOMMU_EVENT_IOPF_TR             (1U << 8)
+#define IOMMU_EVENT_DEV_TAB_HW_ERROR      (0x3U << 12)
+#define IOMMU_EVENT_PAGE_TAB_HW_ERROR     (0x4U << 12)
+#define IOMMU_EVENT_ILLEGAL_COMMAND_ERROR (0x5U << 12)
+#define IOMMU_EVENT_COMMAND_HW_ERROR      (0x6U << 12)
+#define IOMMU_EVENT_IOTLB_INV_TIMEOUT     (0x7U << 12)
+#define IOMMU_EVENT_INVALID_DEV_REQUEST   (0x8U << 12)
+
+#define IOMMU_EVENT_LEN                   16
+#define IOMMU_PERM_READ             (1 << 0)
+#define IOMMU_PERM_WRITE            (1 << 1)
+#define IOMMU_PERM_RW               (IOMMU_PERM_READ | IOMMU_PERM_WRITE)
+
+/* AMD RD890 Chipset */
+#define PCI_DEVICE_ID_RD890_IOMMU   0x20
+
+#define IOMMU_FEATURE_PREFETCH            (1ULL << 0)
+#define IOMMU_FEATURE_PPR                 (1ULL << 1)
+#define IOMMU_FEATURE_NX                  (1ULL << 3)
+#define IOMMU_FEATURE_GT                  (1ULL << 4)
+#define IOMMU_FEATURE_IA                  (1ULL << 6)
+#define IOMMU_FEATURE_GA                  (1ULL << 7)
+#define IOMMU_FEATURE_HE                  (1ULL << 8)
+#define IOMMU_FEATURE_PC                  (1ULL << 9)
+
+/* reserved DTE bits */
+#define IOMMU_DTE_LOWER_QUAD_RESERVED  0x80300000000000fc
+#define IOMMU_DTE_MIDDLE_QUAD_RESERVED 0x0000000000000100
+#define IOMMU_DTE_UPPER_QUAD_RESERVED  0x08f0000000000000
+
+/* IOMMU paging mode */
+#define IOMMU_GATS_MODE                 (6ULL <<  12)
+#define IOMMU_HATS_MODE                 (6ULL <<  10)
+
+/* PCI SIG constants */
+#define PCI_BUS_MAX 256
+#define PCI_SLOT_MAX 32
+#define PCI_FUNC_MAX 8
+#define PCI_DEVFN_MAX 256
+
+/* IOTLB */
+#define IOMMU_IOTLB_MAX_SIZE 1024
+#define IOMMU_DEVID_SHIFT    36
+
+/* extended feature support */
+#define IOMMU_EXT_FEATURES (IOMMU_FEATURE_PREFETCH | IOMMU_FEATURE_PPR | \
+        IOMMU_FEATURE_NX | IOMMU_FEATURE_IA | IOMMU_FEATURE_GT | \
+        IOMMU_FEATURE_GA | IOMMU_FEATURE_HE | IOMMU_GATS_MODE | \
+        IOMMU_HATS_MODE)
+
+/* capabilities header */
+#define IOMMU_CAPAB_FEATURES (IOMMU_CAPAB_FLAT_EXT | \
+        IOMMU_CAPAB_FLAG_NPCACHE | IOMMU_CAPAB_FLAG_IOTLBSUP \
+        | IOMMU_CAPAB_ID_SEC | IOMMU_CAPAB_INIT_TYPE | \
+        IOMMU_CAPAB_FLAG_HTTUNNEL |  IOMMU_CAPAB_EFR_SUP)
+
+/* command constants */
+#define IOMMU_COM_STORE_ADDRESS_MASK 0xffffffffffff8
+#define IOMMU_COM_COMPLETION_STORE_MASK 0x1
+#define IOMMU_COM_COMPLETION_INTR 0x2
+#define IOMMU_COM_COMPLETION_DATA_OFF 0x8
+#define IOMMU_COMMAND_SIZE 0x10
+
+/* IOMMU default address */
+#define IOMMU_BASE_ADDR 0xfed80000
+
+/* page management constants */
+#define IOMMU_PAGE_SHIFT 12
+#define IOMMU_PAGE_SIZE  (1ULL << IOMMU_PAGE_SHIFT)
+
+#define IOMMU_PAGE_SHIFT_4K 12
+#define IOMMU_PAGE_MASK_4K  (~((1ULL << IOMMU_PAGE_SHIFT_4K) - 1))
+#define IOMMU_PAGE_SHIFT_2M 21
+#define IOMMU_PAGE_MASK_2M  (~((1ULL << IOMMU_PAGE_SHIFT_2M) - 1))
+#define IOMMU_PAGE_SHIFT_1G 30
+#define IOMMU_PAGE_MASK_1G (~((1ULL << IOMMU_PAGE_SHIFT_1G) - 1))
+
+#define IOMMU_MAX_VA_ADDR          (48UL << 5)
+#define IOMMU_MAX_PH_ADDR          (40UL << 8)
+#define IOMMU_MAX_GVA_ADDR         (48UL << 15)
+
+/* invalidation command device id */
+#define IOMMU_INVAL_DEV_ID_SHIFT  32
+#define IOMMU_INVAL_DEV_ID_MASK   (~((1UL << IOMMU_INVAL_DEV_ID_SHIFT) - 1))
+
+/* invalidation address */
+#define IOMMU_INVAL_ADDR_MASK_SHIFT 12
+#define IOMMU_INVAL_ADDR_MASK     (~((1UL << IOMMU_INVAL_ADDR_MASK_SHIFT) - 1))
+
+/* invalidation S bit mask */
+#define IOMMU_INVAL_ALL(val) ((val) & (0x1))
+
+/* reserved bits */
+#define IOMMU_COMPLETION_WAIT_RSVD    0x0ff000000
+#define IOMMU_CMD_INVAL_DEV_RSVD      0xffff00000fffffff
+#define IOMMU_INVAL_IOMMU_PAGES_RSVD  0xfff000000fff0000
+#define IOMMU_INVAL_IOTLB_PAGES_RSVD  0x00000ff4
+#define IOMMU_INVAL_INTR_TABLE_RSVD   0xffff00000fffffff
+#define IOMMU_PRF_IOMMU_PAGES_RSVD    0x00ff00000ff00000
+#define IOMMU_COMPLETE_PPR_RQ_RSVD    0xffff00000ff00000
+#define IOMMU_INVAL_IOMMU_ALL_RSVD    0x0fffffff00000000
+
+/* command masks - inval iommu pages */
+#define IOMMU_INVAL_PAGES_PASID       (~((1UL << 20) - 1))
+#define IOMMU_INVAL_PAGES_DOMID       (((1UL << 16) - 1) << 32)
+#define IOMMU_INVAL_PAGES_ADDRESS     (~((1UL << 12) - 1))
+#define IOMMU_INVAL_PAGES_SBIT        (1UL << 0)
+#define IOMMU_INVAL_PAGES_PDE         (1UL << 1)
+#define IOMMU_INVAL_PAGES_GN          (1UL << 2)
+
+/* masks - inval iotlb pages */
+#define IOMMU_INVAL_IOTLB_DEVID       (~((1UL << 16) - 1))
+#define IOMMU_INVAL_IOTLB_PASID_LOW   (0xff << 15)
+#define IOMMU_INVAL_IOTLB_MAXPEND     (0xff << 23)
+#define IOMMU_INVAL_IOTLB_QUEUEID     (~((1UL << 16) - 1))
+#define IOMMU_INVAL_IOTLB_PASID_HIGH  (0xff << 46)
+#define IOMMU_INVAL_IOTLB_GN          IOMMU_INVAL_PAGES_GN
+#define IOMMU_INVAL_IOTLB_S           IOMMU_INVAL_PAGES_S
+#define IOMMU_INVAL_IOTLB_ADDRESS     IOMMU_INVAL_PAGES_ADDRESS
+#define IOMMU_INVAL_IOTLB_MAKEPASID(low, high)
+
+/* masks - prefetch pages   */
+#define IOMMU_PREFETCH_PAGES_DEVID     IOMMU_INVAL_IOTLB_DEVID
+#define IOMMU_PREFETCH_PAGES_PFCOUNT   IOMMU_INVAL_IOTLB_MAXPEND
+
+#define TYPE_AMD_IOMMU_DEVICE "amd-iommu"
+#define AMD_IOMMU_DEVICE(obj)\
+    OBJECT_CHECK(AMDIOMMUState, (obj), TYPE_AMD_IOMMU_DEVICE)
+
+#define AMD_IOMMU_STR "amd"
+
+typedef struct AMDIOMMUAddressSpace AMDIOMMUAddressSpace;
+
+typedef struct AMDIOMMUState {
+    PCIDevice dev;               /* The PCI device itself        */
+
+    uint32_t version;
+
+    uint32_t capab_offset;       /* capability offset pointer    */
+    uint64_t mmio_addr;
+    uint8_t *capab;              /* capabilities registers       */
+
+    bool enabled;                /* IOMMU enabled                */
+    bool ats_enabled;            /* address translation enabled  */
+    bool cmdbuf_enabled;         /* command buffer enabled       */
+    bool evtlog_enabled;         /* event log enabled            */
+    bool excl_enabled;
+
+    dma_addr_t devtab;           /* base address device table    */
+    size_t devtab_len;           /* device table length          */
+
+    dma_addr_t cmdbuf;           /* command buffer base address  */
+    uint64_t cmdbuf_len;         /* command buffer length        */
+    uint32_t cmdbuf_head;        /* current IOMMU read position  */
+    uint32_t cmdbuf_tail;        /* next Software write position */
+    bool completion_wait_intr;
+
+    dma_addr_t evtlog;           /* base address event log       */
+    bool evtlog_intr;
+    uint32_t evtlog_len;         /* event log length             */
+    uint32_t evtlog_head;        /* current IOMMU write position */
+    uint32_t evtlog_tail;        /* current Software read position */
+
+    /* unused for now */
+    dma_addr_t excl_base;        /* base DVA - IOMMU exclusion range */
+    dma_addr_t excl_limit;       /* limit of IOMMU exclusion range   */
+    bool excl_allow;             /* translate accesses to the exclusion range */
+    bool excl_enable;            /* exclusion range enabled          */
+
+    dma_addr_t ppr_log;          /* base address ppr log */
+    uint32_t pprlog_len;         /* ppr log len  */
+    uint32_t pprlog_head;        /* ppr log head */
+    uint32_t pprlog_tail;        /* ppr log tail */
+
+    MemoryRegion mmio;           /* MMIO region                  */
+    uint8_t mmior[IOMMU_MMIO_SIZE];    /* read/write MMIO              */
+    uint8_t w1cmask[IOMMU_MMIO_SIZE];  /* read/write 1 clear mask      */
+    uint8_t romask[IOMMU_MMIO_SIZE];   /* MMIO read/only mask          */
+    bool mmio_enabled;
+
+    /* IOMMU function */
+    MemoryRegionIOMMUOps iommu_ops;
+
+    /* for each served device */
+    AMDIOMMUAddressSpace **address_spaces[PCI_BUS_MAX];
+
+    /* IOTLB */
+    GHashTable *iotlb;
+} AMDIOMMUState;
+
+/*
+ * bridge_host_amd_iommu: setup an IOMMU function on a bus
+ *
+ * called for all PCI devices
+ *
+ * @bus: PCI bus to host the IOMMU
+ * @opaque: opaque pointer to AMDIOMMUState struct
+ * @defvn: PCI function of device for which to setup IOMMU region for
+ *
+ */
+AddressSpace *bridge_host_amd_iommu(PCIBus *bus, void *opaque, int devfn);
+
+#endif
diff --git a/include/hw/pci/pci.h b/include/hw/pci/pci.h
index dedf277..61deace 100644
--- a/include/hw/pci/pci.h
+++ b/include/hw/pci/pci.h
@@ -15,6 +15,8 @@ 
 
 /* PCI bus */
 
+#define PCI_BUS_NUM(x)          (((x) >> 8) & 0xff)
+#define PCI_DEVID(bus, devfn)   ((((uint16_t)(bus)) << 8) | (devfn))
 #define PCI_DEVFN(slot, func)   ((((slot) & 0x1f) << 3) | ((func) & 0x07))
 #define PCI_SLOT(devfn)         (((devfn) >> 3) & 0x1f)
 #define PCI_FUNC(devfn)         ((devfn) & 0x07)