@@ -227,7 +227,7 @@ static void build_extop_package(GArray *package, uint8_t op)
build_prepend_byte(package, 0x5B); /* ExtOpPrefix */
}
-static void build_append_int_noprefix(GArray *table, uint64_t value, int size)
+void build_append_int_noprefix(GArray *table, uint64_t value, int size)
{
int i;
new file mode 100644
@@ -0,0 +1,1471 @@
+/*
+ * QEMU emulation of AMD IOMMU (AMD-Vi)
+ *
+ * Copyright (C) 2011 Eduard - Gabriel Munteanu
+ * Copyright (C) 2015 David Kiarie, <davidkiarie4@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * Cache implementation inspired by hw/i386/intel_iommu.c
+ *
+ */
+#include "qemu/osdep.h"
+#include "hw/i386/amd_iommu.h"
+#include "hw/pci/pci_bus.h"
+#include "qom/object.h"
+
+//#define DEBUG_AMD_AMDVI
+#ifdef DEBUG_AMD_AMDVI
+enum {
+ DEBUG_GENERAL, DEBUG_CAPAB, DEBUG_MMIO, DEBUG_ELOG,
+ DEBUG_CACHE, DEBUG_COMMAND, DEBUG_MMU, DEBUG_CUSTOM
+};
+
+#define AMDVI_DBGBIT(x) (1 << DEBUG_##x)
+static int iommu_dbgflags = AMDVI_DBGBIT(MMU);
+
+#define AMDVI_DPRINTF(what, fmt, ...) do { \
+ if (iommu_dbgflags & AMDVI_DBGBIT(what)) { \
+ fprintf(stderr, "(amd-iommu)%s: " fmt "\n", __func__, \
+ ## __VA_ARGS__); } \
+ } while (0)
+#else
+#define AMDVI_DPRINTF(what, fmt, ...) do {} while (0)
+#endif
+
+#define ENCODE_EVENT(devid, info, addr, rshift) do { \
+ *(uint16_t *)&evt[0] = devid; \
+ *(uint8_t *)&evt[3] = info; \
+ *(uint64_t *)&evt[4] = rshift ? cpu_to_le64(addr) :\
+ cpu_to_le64(addr) >> rshift; \
+ } while (0)
+
+typedef struct AMDVIAddressSpace {
+ uint8_t bus_num; /* bus number */
+ uint8_t devfn; /* device function */
+ AMDVIState *iommu_state; /* AMDVI - one per machine */
+ MemoryRegion iommu; /* Device's iommu region */
+ AddressSpace as; /* device's corresponding address space */
+} AMDVIAddressSpace;
+
+/* AMDVI cache entry */
+typedef struct AMDVIIOTLBEntry {
+ uint64_t gfn; /* guest frame number */
+ uint16_t domid; /* assigned domain id */
+ uint64_t devid; /* device owning entry */
+ uint64_t perms; /* access permissions */
+ uint64_t translated_addr; /* translated address */
+} AMDVIIOTLBEntry;
+
+/* configure MMIO registers at startup/reset */
+static void amdvi_set_quad(AMDVIState *s, hwaddr addr, uint64_t val,
+ uint64_t romask, uint64_t w1cmask)
+{
+ stq_le_p(&s->mmior[addr], val);
+ stq_le_p(&s->romask[addr], romask);
+ stq_le_p(&s->w1cmask[addr], w1cmask);
+}
+
+static uint16_t amdvi_readw(AMDVIState *s, hwaddr addr)
+{
+ return lduw_le_p(&s->mmior[addr]);
+}
+
+static uint32_t amdvi_readl(AMDVIState *s, hwaddr addr)
+{
+ return ldl_le_p(&s->mmior[addr]);
+}
+
+static uint64_t amdvi_readq(AMDVIState *s, hwaddr addr)
+{
+ return ldq_le_p(&s->mmior[addr]);
+}
+
+/* internal write */
+static void amdvi_writeq_raw(AMDVIState *s, uint64_t val, hwaddr addr)
+{
+ stq_le_p(&s->mmior[addr], val);
+}
+
+/* external write */
+static void amdvi_writew(AMDVIState *s, hwaddr addr, uint16_t val)
+{
+ uint16_t romask = lduw_le_p(&s->romask[addr]);
+ uint16_t w1cmask = lduw_le_p(&s->w1cmask[addr]);
+ uint16_t oldval = lduw_le_p(&s->mmior[addr]);
+ stw_le_p(&s->mmior[addr], (val & ~(val & w1cmask)) | (romask & oldval));
+}
+
+static void amdvi_writel(AMDVIState *s, hwaddr addr, uint32_t val)
+{
+ uint32_t romask = ldl_le_p(&s->romask[addr]);
+ uint32_t w1cmask = ldl_le_p(&s->w1cmask[addr]);
+ uint32_t oldval = ldl_le_p(&s->mmior[addr]);
+ stl_le_p(&s->mmior[addr], (val & ~(val & w1cmask)) | (romask & oldval));
+}
+
+static void amdvi_writeq(AMDVIState *s, hwaddr addr, uint64_t val)
+{
+ uint64_t romask = ldq_le_p(&s->romask[addr]);
+ uint64_t w1cmask = ldq_le_p(&s->w1cmask[addr]);
+ uint32_t oldval = ldq_le_p(&s->mmior[addr]);
+ stq_le_p(&s->mmior[addr], (val & ~(val & w1cmask)) | (romask & oldval));
+}
+
+static void amdvi_log_event(AMDVIState *s, uint16_t *evt)
+{
+ /* event logging not enabled */
+ if (!s->evtlog_enabled || *(uint64_t *)&s->mmior[AMDVI_MMIO_STATUS] |
+ AMDVI_MMIO_STATUS_EVT_OVF) {
+ return;
+ }
+
+ /* event log buffer full */
+ if (s->evtlog_tail >= s->evtlog_len) {
+ *(uint64_t *)&s->mmior[AMDVI_MMIO_STATUS] |= AMDVI_MMIO_STATUS_EVT_OVF;
+ /* generate interrupt */
+ msi_notify(&s->dev->dev, 0);
+ }
+
+ if (dma_memory_write(&address_space_memory, s->evtlog_len + s->evtlog_tail,
+ &evt, AMDVI_EVENT_LEN)) {
+ AMDVI_DPRINTF(ELOG, "error: fail to write at address 0x%"PRIx64
+ " + offset 0x%"PRIx32, s->evtlog, s->evtlog_tail);
+ }
+
+ s->evtlog_tail += AMDVI_EVENT_LEN;
+ //TODO
+ *(uint64_t *)&s->mmior[AMDVI_MMIO_STATUS] |= AMDVI_MMIO_STATUS_COMP_INT;
+}
+
+/* log an error encountered page-walking
+ *
+ * @addr: virtual address in translation request
+ */
+static void amdvi_page_fault(AMDVIState *s, uint16_t devid,
+ dma_addr_t addr, uint16_t info)
+{
+ AMDVI_DPRINTF(ELOG, "");
+
+ uint16_t evt[8];
+
+ info |= AMDVI_EVENT_IOPF_I | AMDVI_EVENT_IOPF;
+
+ /* encode information */
+ ENCODE_EVENT(devid, info, addr, 0);
+
+ /* log a page fault */
+ amdvi_log_event(s, evt);
+
+ /* Abort the translation */
+ pci_word_test_and_set_mask(s->dev->dev.config + PCI_STATUS,
+ PCI_STATUS_SIG_TARGET_ABORT);
+}
+/*
+ * log a master abort accessing device table
+ * @devtab : address of device table entry
+ * @info : error flags
+ */
+static void amdvi_log_devtab_error(AMDVIState *s, uint16_t devid,
+ dma_addr_t devtab, uint16_t info)
+{
+
+ AMDVI_DPRINTF(ELOG, "");
+
+ uint16_t evt[8];
+
+ info |= AMDVI_EVENT_DEV_TAB_HW_ERROR;
+
+ /* encode information */
+ ENCODE_EVENT(devid, info, devtab, 0);
+
+ amdvi_log_event(s, evt);
+
+ /* Abort the translation */
+ pci_word_test_and_set_mask(s->dev->dev.config + PCI_STATUS,
+ PCI_STATUS_SIG_TARGET_ABORT);
+
+}
+
+/* log an event trying to access command buffer
+ * @addr : address that couldn't be accessed
+ */
+static void amdvi_log_command_error(AMDVIState *s, dma_addr_t addr)
+{
+ AMDVI_DPRINTF(ELOG, "");
+
+ uint16_t evt[8], info = AMDVI_EVENT_COMMAND_HW_ERROR;
+
+ /* encode information */
+ ENCODE_EVENT(0, info, addr, 3);
+
+ amdvi_log_event(s, evt);
+
+ /* Abort the translation */
+ pci_word_test_and_set_mask(s->dev->dev.config + PCI_STATUS,
+ PCI_STATUS_SIG_TARGET_ABORT);
+}
+
+/* log an illegal comand event
+ * @addr : address of illegal command
+ */
+static void amdvi_log_illegalcom_error(AMDVIState *s, uint16_t info,
+ dma_addr_t addr)
+{
+ AMDVI_DPRINTF(ELOG, "");
+
+ uint16_t evt[8];
+ info |= AMDVI_EVENT_ILLEGAL_COMMAND_ERROR;
+
+ /* encode information */
+ ENCODE_EVENT(0, info, addr, 3);
+
+ amdvi_log_event(s, evt);
+}
+
+/* log an error accessing device table
+ *
+ * @devid : device owning the table entry
+ * @devtab : address of device table entry
+ * @info : error flags
+ */
+static void amdvi_log_illegaldevtab_error(AMDVIState *s, uint16_t devid,
+ dma_addr_t addr, uint16_t info)
+{
+ AMDVI_DPRINTF(ELOG, "");
+
+ uint16_t evt[8];
+
+ info |= AMDVI_EVENT_ILLEGAL_DEVTAB_ENTRY;
+
+ ENCODE_EVENT(devid, info, addr, 3);
+
+ amdvi_log_event(s, evt);
+}
+
+static gboolean amdvi_uint64_equal(gconstpointer v1, gconstpointer v2)
+{
+ return *((const uint64_t *)v1) == *((const uint64_t *)v2);
+}
+
+static guint amdvi_uint64_hash(gconstpointer v)
+{
+ return (guint)*(const uint64_t *)v;
+}
+
+static AMDVIIOTLBEntry *amdvi_iotlb_lookup(AMDVIState *s, hwaddr addr,
+ uint64_t devid)
+{
+ uint64_t key = (addr >> AMDVI_PAGE_SHIFT_4K) |
+ ((uint64_t)(devid) << AMDVI_DEVID_SHIFT);
+ return g_hash_table_lookup(s->iotlb, &key);
+}
+
+static void amdvi_iotlb_reset(AMDVIState *s)
+{
+ assert(s->iotlb);
+ g_hash_table_remove_all(s->iotlb);
+}
+
+static gboolean amdvi_iotlb_remove_by_devid(gpointer key, gpointer value,
+ gpointer user_data)
+{
+ AMDVIIOTLBEntry *entry = (AMDVIIOTLBEntry *)value;
+ uint16_t devid = *(uint16_t *)user_data;
+ return entry->devid == devid;
+}
+
+static void amdvi_iotlb_remove_page(AMDVIState *s, hwaddr addr,
+ uint64_t devid)
+{
+ uint64_t key = (addr >> AMDVI_PAGE_SHIFT_4K) |
+ ((uint64_t)(devid) << AMDVI_DEVID_SHIFT);
+ g_hash_table_remove(s->iotlb, &key);
+}
+
+/* extract device id */
+static inline uint16_t devid_extract(uint8_t *cmd)
+{
+ return (uint16_t)cmd[2] & AMDVI_INVAL_DEV_ID_MASK;
+}
+
+static void amdvi_invalidate_iotlb(AMDVIState *s, uint64_t *cmd)
+{
+ uint16_t devid = devid_extract((uint8_t *)cmd);
+ /* if invalidation of more than one page requested */
+ if (AMDVI_INVAL_ALL(cmd[0])) {
+ g_hash_table_foreach_remove(s->iotlb, amdvi_iotlb_remove_by_devid,
+ &devid);
+ } else {
+ hwaddr addr = (hwaddr)(cmd[1] & AMDVI_INVAL_ADDR_MASK);
+ amdvi_iotlb_remove_page(s, addr, devid);
+ }
+}
+
+static void amdvi_update_iotlb(AMDVIState *s, uint16_t devid,
+ uint64_t gpa, uint64_t spa, uint64_t perms,
+ uint16_t domid)
+{
+ AMDVIIOTLBEntry *entry = g_malloc(sizeof(*entry));
+ uint64_t *key = g_malloc(sizeof(key));
+ uint64_t gfn = gpa >> AMDVI_PAGE_SHIFT_4K;
+
+ if (perms != IOMMU_NONE) {
+ AMDVI_DPRINTF(CACHE, " update iotlb domid 0x%"PRIx16" devid: "
+ "%02x:%02x.%xgpa 0x%"PRIx64 " hpa 0x%"PRIx64, domid,
+ PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
+ gpa, spa);
+
+ if (g_hash_table_size(s->iotlb) >= AMDVI_IOTLB_MAX_SIZE) {
+ AMDVI_DPRINTF(CACHE, "iotlb exceeds size limit - reset");
+ amdvi_iotlb_reset(s);
+ }
+
+ entry->gfn = gfn;
+ entry->domid = domid;
+ entry->perms = perms;
+ entry->translated_addr = spa;
+ *key = gfn | ((uint64_t)(devid) << AMDVI_DEVID_SHIFT);
+ g_hash_table_replace(s->iotlb, key, entry);
+ }
+}
+
+/* execute a completion wait command */
+static void amdvi_completion_wait_exec(AMDVIState *s, uint8_t *cmd)
+{
+ AMDVI_DPRINTF(COMMAND, "");
+ unsigned int addr;
+
+ /* completion store */
+ if (cmd[0] & AMDVI_COM_COMPLETION_STORE_MASK) {
+ addr = le64_to_cpu(*(uint64_t *)cmd) & AMDVI_COM_STORE_ADDRESS_MASK;
+ if (dma_memory_write(&address_space_memory, addr, cmd + 8, 8)) {
+ AMDVI_DPRINTF(ELOG, "error: fail to write at address 0%x"PRIx64,
+ addr);
+ }
+ }
+
+ /* set completion interrupt */
+ if (cmd[0] & AMDVI_COM_COMPLETION_INTR) {
+ s->mmior[AMDVI_MMIO_STATUS] |= AMDVI_MMIO_STATUS_COMP_INT;
+ }
+ /* generate interrupt */
+ msi_notify(&s->dev->dev, 0);
+}
+
+/* get command type */
+static uint8_t opcode(uint8_t *cmd)
+{
+ return cmd[AMDVI_CMDBUF_ID_BYTE] >> AMDVI_CMDBUF_ID_RSHIFT;
+}
+
+/* log error without aborting since linux seems to be using reserved bits */
+static void amdvi_inval_devtab_entry(AMDVIState *s, uint8_t *cmd,
+ uint8_t type)
+{
+ AMDVI_DPRINTF(COMMAND, "");
+
+ /* This command should invalidate internal caches of which there isn't */
+ if (*(uint64_t *)&cmd[0] & AMDVI_CMD_INVAL_DEV_RSVD ||
+ *(uint64_t *)&cmd[1]) {
+ amdvi_log_illegalcom_error(s, type, s->cmdbuf + s->cmdbuf_head);
+ }
+#ifdef DEBUG_AMD_AMDVI
+ uint16_t devid = devid_extract(cmd);
+#endif
+ AMDVI_DPRINTF(COMMAND, "device table entry for devid: %02x:%02x.%x"
+ " invalidated", PCI_BUS_NUM(devid), PCI_SLOT(devid),
+ PCI_FUNC(devid));
+}
+
+static void amdvi_completion_wait(AMDVIState *s, uint8_t *cmd, uint8_t type)
+{
+ AMDVI_DPRINTF(COMMAND, "");
+
+ if (*(uint32_t *)&cmd[1] & AMDVI_COMPLETION_WAIT_RSVD) {
+ amdvi_log_illegalcom_error(s, type, s->cmdbuf + s->cmdbuf_head);
+ }
+ /* pretend to wait for command execution to complete */
+ AMDVI_DPRINTF(COMMAND, "completion wait requested with store address 0x%"
+ PRIx64 " and store data 0x%"PRIx64, (cmd[0] &
+ AMDVI_COM_STORE_ADDRESS_MASK), *(uint64_t *)(cmd + 8));
+ amdvi_completion_wait_exec(s, cmd);
+}
+
+static void amdvi_complete_ppr(AMDVIState *s, uint8_t *cmd, uint8_t type)
+{
+ AMDVI_DPRINTF(COMMAND, "");
+
+ if ((*(uint64_t *)&cmd[0] & AMDVI_COMPLETE_PPR_RQ_RSVD) ||
+ *(uint64_t *)&cmd[1] & AMDVI_COMPLETE_PPR_HIGH_RSVD) {
+ amdvi_log_illegalcom_error(s, type, s->cmdbuf + s->cmdbuf_head);
+ }
+
+ AMDVI_DPRINTF(COMMAND, "Execution of PPR queue requested");
+}
+
+static void amdvi_inval_all(AMDVIState *s, uint8_t *cmd, uint8_t type)
+{
+ AMDVI_DPRINTF(COMMAND, "");
+
+ if ((*(uint64_t *)&cmd[0] & AMDVI_INVAL_AMDVI_ALL_RSVD) ||
+ *(uint64_t *)&cmd[1]) {
+ amdvi_log_illegalcom_error(s, type, s->cmdbuf + s->cmdbuf_head);
+ }
+
+ amdvi_iotlb_reset(s);
+ AMDVI_DPRINTF(COMMAND, "Invalidation of all AMDVI cache requested");
+}
+
+static inline uint16_t domid_extract(uint64_t *cmd)
+{
+ return (uint16_t)(cmd[0] >> 32) & AMDVI_INVAL_PAGES_DOMID;
+}
+
+static gboolean amdvi_iotlb_remove_by_domid(gpointer key, gpointer value,
+ gpointer user_data)
+{
+ AMDVIIOTLBEntry *entry = (AMDVIIOTLBEntry *)value;
+ uint16_t domid = *(uint16_t *)user_data;
+ return entry->domid == domid;
+}
+
+/* we don't have devid - we can't remove pages by address */
+static void amdvi_inval_pages(AMDVIState *s, uint8_t *cmd, uint8_t type)
+{
+ AMDVI_DPRINTF(CUSTOM, "");
+ uint16_t domid = domid_extract((uint64_t *)cmd);
+
+ if (*(uint64_t *)&cmd[0] & AMDVI_INVAL_AMDVI_PAGES_RSVD ||
+ *(uint32_t *)&cmd[1] & AMDVI_INVAL_PAGES_HIGH_RSVD) {
+ amdvi_log_illegalcom_error(s, type, s->cmdbuf + s->cmdbuf_head);
+ }
+
+ g_hash_table_foreach_remove(s->iotlb, amdvi_iotlb_remove_by_domid,
+ &domid);
+
+ AMDVI_DPRINTF(CUSTOM, "AMDVI pages for domain 0x%"PRIx16 "invalidated",
+ domid);
+}
+
+static void amdvi_prefetch_pages(AMDVIState *s, uint8_t *cmd, uint8_t type)
+{
+ AMDVI_DPRINTF(COMMAND, "");
+
+ if ((*(uint64_t *)&cmd[0] & AMDVI_PRF_AMDVI_PAGES_RSVD) ||
+ (*(uint32_t *)&cmd[1] & AMDVI_PREF_HIGH_RSVD)) {
+ amdvi_log_illegalcom_error(s, type, s->cmdbuf + s->cmdbuf_head);
+ }
+
+ AMDVI_DPRINTF(COMMAND, "Pre-fetch of AMDVI pages requested");
+}
+
+static void amdvi_inval_inttable(AMDVIState *s, uint8_t *cmd, uint8_t type)
+{
+ AMDVI_DPRINTF(COMMAND, "");
+
+ if ((*(uint64_t *)&cmd[0] & AMDVI_INVAL_INTR_TABLE_RSVD) ||
+ *(uint64_t *)&cmd[1]) {
+ amdvi_log_illegalcom_error(s, type, s->cmdbuf + s->cmdbuf_head);
+ return;
+ }
+
+ AMDVI_DPRINTF(COMMAND, "interrupt table invalidated");
+}
+
+static void iommu_inval_iotlb(AMDVIState *s, uint8_t *cmd, uint8_t type)
+{
+ AMDVI_DPRINTF(COMMAND, "");
+
+ if (*(uint32_t *)&cmd[2] & AMDVI_INVAL_IOTLB_PAGES_RSVD) {
+ amdvi_log_illegalcom_error(s, type, s->cmdbuf + s->cmdbuf_head);
+ return;
+ }
+
+ amdvi_invalidate_iotlb(s, (uint64_t *)cmd);
+ AMDVI_DPRINTF(COMMAND, "IOTLB pages invalidated");
+}
+
+/* not honouring reserved bits is regarded as an illegal command */
+static void amdvi_cmdbuf_exec(AMDVIState *s)
+{
+ AMDVI_DPRINTF(COMMAND, "");
+
+ uint8_t type;
+ uint8_t cmd[AMDVI_COMMAND_SIZE];
+
+ memset(cmd, 0, AMDVI_COMMAND_SIZE);
+
+ if (dma_memory_read(&address_space_memory, s->cmdbuf + s->cmdbuf_head, cmd,
+ AMDVI_COMMAND_SIZE)) {
+ AMDVI_DPRINTF(COMMAND, "error: fail to access memory at 0x%"PRIx64
+ " + 0x%"PRIu8, s->cmdbuf, s->cmdbuf_head);
+ amdvi_log_command_error(s, s->cmdbuf + s->cmdbuf_head);
+ return;
+ }
+
+ *cmd = le64_to_cpu(*(uint64_t *)cmd);
+ type = opcode(cmd);
+
+ switch (type) {
+ case AMDVI_CMD_COMPLETION_WAIT:
+ amdvi_completion_wait(s, cmd, type);
+ break;
+
+ case AMDVI_CMD_INVAL_DEVTAB_ENTRY:
+ amdvi_inval_devtab_entry(s, cmd, type);
+ break;
+
+ case AMDVI_CMD_INVAL_AMDVI_PAGES:
+ amdvi_inval_pages(s, cmd, type);
+ break;
+
+ case AMDVI_CMD_INVAL_IOTLB_PAGES:
+ iommu_inval_iotlb(s, cmd, type);
+ break;
+
+ case AMDVI_CMD_INVAL_INTR_TABLE:
+ amdvi_inval_inttable(s, cmd, type);
+ break;
+
+ case AMDVI_CMD_PREFETCH_AMDVI_PAGES:
+ amdvi_prefetch_pages(s, cmd, type);
+ break;
+
+ case AMDVI_CMD_COMPLETE_PPR_REQUEST:
+ amdvi_complete_ppr(s, cmd, type);
+ break;
+
+ case AMDVI_CMD_INVAL_AMDVI_ALL:
+ amdvi_inval_all(s, cmd, type);
+ break;
+
+ default:
+ AMDVI_DPRINTF(COMMAND, "unhandled command %d", type);
+ /* log illegal command */
+ amdvi_log_illegalcom_error(s, type,
+ s->cmdbuf + s->cmdbuf_head);
+ break;
+ }
+
+}
+
+static void amdvi_cmdbuf_run(AMDVIState *s)
+{
+ AMDVI_DPRINTF(COMMAND, "");
+
+ uint64_t *mmio_cmdbuf_head = (uint64_t *)(s->mmior +
+ AMDVI_MMIO_COMMAND_HEAD);
+
+ if (!s->cmdbuf_enabled) {
+ AMDVI_DPRINTF(COMMAND, "error: AMDVI trying to execute commands with "
+ "command buffer disabled. AMDVI control value 0x%"PRIx64,
+ amdvi_readq(s, AMDVI_MMIO_CONTROL));
+ return;
+ }
+
+ while (s->cmdbuf_head != s->cmdbuf_tail) {
+ /* check if there is work to do. */
+ AMDVI_DPRINTF(COMMAND, "command buffer head at 0x%"PRIx32 " command "
+ "buffer tail at 0x%"PRIx32" command buffer base at 0x%"
+ PRIx64, s->cmdbuf_head, s->cmdbuf_tail, s->cmdbuf);
+ amdvi_cmdbuf_exec(s);
+ s->cmdbuf_head += AMDVI_COMMAND_SIZE;
+ amdvi_writeq_raw(s, s->cmdbuf_head, AMDVI_MMIO_COMMAND_HEAD);
+
+ /* wrap head pointer */
+ if (s->cmdbuf_head >= s->cmdbuf_len * AMDVI_COMMAND_SIZE) {
+ s->cmdbuf_head = 0;
+ }
+ }
+
+ *mmio_cmdbuf_head = cpu_to_le64(s->cmdbuf_head);
+}
+
+/* System Software might never read from some of this fields but anyways */
+static uint64_t amdvi_mmio_read(void *opaque, hwaddr addr, unsigned size)
+{
+ AMDVIState *s = opaque;
+
+ uint64_t val = -1;
+ if (addr + size > AMDVI_MMIO_SIZE) {
+ AMDVI_DPRINTF(MMIO, "error: addr outside region: max 0x%"PRIX64
+ ", got 0x%"PRIx64 " %d", (uint64_t)AMDVI_MMIO_SIZE, addr,
+ size);
+ return (uint64_t)-1;
+ }
+
+ if (size == 2) {
+ val = amdvi_readw(s, addr);
+ } else if (size == 4) {
+ val = amdvi_readl(s, addr);
+ } else if (size == 8) {
+ val = amdvi_readq(s, addr);
+ }
+
+ switch (addr & ~0x07) {
+ case AMDVI_MMIO_DEVICE_TABLE:
+ AMDVI_DPRINTF(MMIO, "MMIO_DEVICE_TABLE read addr 0x%"PRIx64
+ ", size %d offset 0x%"PRIx64, addr, size,
+ addr & ~0x07);
+ break;
+
+ case AMDVI_MMIO_COMMAND_BASE:
+ AMDVI_DPRINTF(MMIO, "MMIO_COMMAND_BASE read addr 0x%"PRIx64
+ ", size %d offset 0x%"PRIx64, addr, size,
+ addr & ~0x07);
+ break;
+
+ case AMDVI_MMIO_EVENT_BASE:
+ AMDVI_DPRINTF(MMIO, "MMIO_EVENT_BASE read addr 0x%"PRIx64
+ ", size %d offset 0x%"PRIx64, addr, size,
+ addr & ~0x07);
+ break;
+
+ case AMDVI_MMIO_CONTROL:
+ AMDVI_DPRINTF(MMIO, "MMIO_CONTROL read addr 0x%"PRIx64
+ ", size %d offset 0x%"PRIx64, addr, size,
+ addr & ~0x07);
+ break;
+
+ case AMDVI_MMIO_EXCL_BASE:
+ AMDVI_DPRINTF(MMIO, "MMIO_EXCL_BASE read addr 0x%"PRIx64
+ ", size %d offset 0x%"PRIx64, addr, size,
+ addr & ~0x07);
+ break;
+
+ case AMDVI_MMIO_EXCL_LIMIT:
+ AMDVI_DPRINTF(MMIO, "MMIO_EXCL_LIMIT read addr 0x%"PRIx64
+ ", size %d offset 0x%"PRIx64, addr, size,
+ addr & ~0x07);
+ break;
+
+ case AMDVI_MMIO_COMMAND_HEAD:
+ AMDVI_DPRINTF(MMIO, "MMIO_COMMAND_HEAD read addr 0x%"PRIx64
+ ", size %d offset 0x%"PRIx64, addr, size,
+ addr & ~0x07);
+ break;
+
+ case AMDVI_MMIO_COMMAND_TAIL:
+ AMDVI_DPRINTF(MMIO, "MMIO_COMMAND_TAIL read addr 0x%"PRIx64
+ ", size %d offset 0x%"PRIx64, addr, size,
+ addr & ~0x07);
+ break;
+
+ case AMDVI_MMIO_EVENT_HEAD:
+ AMDVI_DPRINTF(MMIO, "MMIO_EVENT_HEAD read addr 0x%"PRIx64
+ ", size %d offset 0x%"PRIx64, addr, size,
+ addr & ~0x07);
+ break;
+
+ case AMDVI_MMIO_EVENT_TAIL:
+ AMDVI_DPRINTF(MMIO, "MMIO_EVENT_TAIL read addr 0x%"PRIx64
+ ", size %d offset 0x%"PRIx64, addr, size,
+ addr & ~0x07);
+ break;
+
+ case AMDVI_MMIO_STATUS:
+ AMDVI_DPRINTF(MMIO, "MMIO_STATUS read addr 0x%"PRIx64
+ ", size %d offset 0x%"PRIx64, addr, size,
+ addr & ~0x07);
+ break;
+
+ case AMDVI_MMIO_EXT_FEATURES:
+ AMDVI_DPRINTF(MMIO, "MMIO_EXT_FEATURES read addr 0x%"PRIx64
+ ", size %d offset 0x%"PRIx64, addr, size,
+ addr & ~0x07);
+ break;
+
+ default:
+ AMDVI_DPRINTF(MMIO, "UNHANDLED MMIO read addr 0x%"PRIx64
+ ", size %d offset 0x%"PRIx64, addr, size,
+ addr & ~0x07);
+ }
+ return val;
+}
+
+static void amdvi_handle_control_write(AMDVIState *s)
+{
+ AMDVI_DPRINTF(COMMAND, "");
+ /*
+ * read whatever is already written in case
+ * software is writing in chucks less than 8 bytes
+ */
+ unsigned long control = amdvi_readq(s, AMDVI_MMIO_CONTROL);
+ s->enabled = !!(control & AMDVI_MMIO_CONTROL_AMDVIEN);
+
+ s->ats_enabled = !!(control & AMDVI_MMIO_CONTROL_HTTUNEN);
+ s->evtlog_enabled = s->enabled && !!(control &
+ AMDVI_MMIO_CONTROL_EVENTLOGEN);
+
+ s->evtlog_intr = !!(control & AMDVI_MMIO_CONTROL_EVENTINTEN);
+ s->completion_wait_intr = !!(control & AMDVI_MMIO_CONTROL_COMWAITINTEN);
+ s->cmdbuf_enabled = s->enabled && !!(control &
+ AMDVI_MMIO_CONTROL_CMDBUFLEN);
+
+ /* update the flags depending on the control register */
+ if (s->cmdbuf_enabled) {
+ (*(uint64_t *)&s->mmior[AMDVI_MMIO_STATUS]) |=
+ AMDVI_MMIO_STATUS_CMDBUF_RUN;
+ } else {
+ (*(uint64_t *)&s->mmior[AMDVI_MMIO_STATUS]) &=
+ ~AMDVI_MMIO_STATUS_CMDBUF_RUN;
+ }
+ if (s->evtlog_enabled) {
+ (*(uint64_t *)&s->mmior[AMDVI_MMIO_STATUS]) |=
+ AMDVI_MMIO_STATUS_EVT_RUN;
+ } else {
+ (*(uint64_t *)&s->mmior[AMDVI_MMIO_STATUS]) &=
+ ~AMDVI_MMIO_STATUS_EVT_RUN;
+ }
+
+ AMDVI_DPRINTF(COMMAND, "MMIO_STATUS state 0x%"PRIx64, control);
+
+ amdvi_cmdbuf_run(s);
+}
+
+static inline void amdvi_handle_devtab_write(AMDVIState *s)
+
+{
+ AMDVI_DPRINTF(MMIO, "");
+
+ uint64_t val = amdvi_readq(s, AMDVI_MMIO_DEVICE_TABLE);
+ s->devtab = (dma_addr_t)(val & AMDVI_MMIO_DEVTAB_BASE_MASK);
+
+ /* set device table length */
+ s->devtab_len = ((val & AMDVI_MMIO_DEVTAB_SIZE_MASK) + 1 *
+ (AMDVI_MMIO_DEVTAB_SIZE_UNIT /
+ AMDVI_MMIO_DEVTAB_ENTRY_SIZE));
+}
+
+static inline void amdvi_handle_cmdhead_write(AMDVIState *s)
+{
+ AMDVI_DPRINTF(MMIO, "");
+
+ s->cmdbuf_head = (dma_addr_t)amdvi_readq(s, AMDVI_MMIO_COMMAND_HEAD)
+ & AMDVI_MMIO_CMDBUF_HEAD_MASK;
+ amdvi_cmdbuf_run(s);
+}
+
+static inline void amdvi_handle_cmdbase_write(AMDVIState *s)
+{
+ AMDVI_DPRINTF(MMIO, "");
+
+ s->cmdbuf = (dma_addr_t)amdvi_readq(s, AMDVI_MMIO_COMMAND_BASE)
+ & AMDVI_MMIO_CMDBUF_BASE_MASK;
+ s->cmdbuf_len = 1UL << (s->mmior[AMDVI_MMIO_CMDBUF_SIZE_BYTE]
+ & AMDVI_MMIO_CMDBUF_SIZE_MASK);
+ s->cmdbuf_head = s->cmdbuf_tail = 0;
+
+}
+
+static inline void amdvi_handle_cmdtail_write(AMDVIState *s)
+{
+ s->cmdbuf_tail = amdvi_readq(s, AMDVI_MMIO_COMMAND_TAIL)
+ & AMDVI_MMIO_CMDBUF_TAIL_MASK;
+ amdvi_cmdbuf_run(s);
+}
+
+static inline void amdvi_handle_excllim_write(AMDVIState *s)
+{
+ AMDVI_DPRINTF(MMIO, "");
+
+ uint64_t val = amdvi_readq(s, AMDVI_MMIO_EXCL_LIMIT);
+ s->excl_limit = (val & AMDVI_MMIO_EXCL_LIMIT_MASK) |
+ AMDVI_MMIO_EXCL_LIMIT_LOW;
+}
+
+static inline void amdvi_handle_evtbase_write(AMDVIState *s)
+{
+ AMDVI_DPRINTF(MMIO, "");
+
+ uint64_t val = amdvi_readq(s, AMDVI_MMIO_EVENT_BASE);
+ s->evtlog = val & AMDVI_MMIO_EVTLOG_BASE_MASK;
+ s->evtlog_len = 1UL << (*(uint64_t *)&s->mmior[AMDVI_MMIO_EVTLOG_SIZE_BYTE]
+ & AMDVI_MMIO_EVTLOG_SIZE_MASK);
+}
+
+static inline void amdvi_handle_evttail_write(AMDVIState *s)
+{
+ AMDVI_DPRINTF(MMIO, "");
+
+ uint64_t val = amdvi_readq(s, AMDVI_MMIO_EVENT_TAIL);
+ s->evtlog_tail = val & AMDVI_MMIO_EVTLOG_TAIL_MASK;
+}
+
+static inline void amdvi_handle_evthead_write(AMDVIState *s)
+{
+ AMDVI_DPRINTF(MMIO, "");
+
+ uint64_t val = amdvi_readq(s, AMDVI_MMIO_EVENT_HEAD);
+ s->evtlog_head = val & AMDVI_MMIO_EVTLOG_HEAD_MASK;
+}
+
+static inline void amdvi_handle_pprbase_write(AMDVIState *s)
+{
+ AMDVI_DPRINTF(MMIO, "");
+
+ uint64_t val = amdvi_readq(s, AMDVI_MMIO_PPR_BASE);
+ s->ppr_log = val & AMDVI_MMIO_PPRLOG_BASE_MASK;
+ s->pprlog_len = 1UL << (*(uint64_t *)&s->mmior[AMDVI_MMIO_PPRLOG_SIZE_BYTE]
+ & AMDVI_MMIO_PPRLOG_SIZE_MASK);
+}
+
+static inline void amdvi_handle_pprhead_write(AMDVIState *s)
+{
+ AMDVI_DPRINTF(MMIO, "");
+
+ uint64_t val = amdvi_readq(s, AMDVI_MMIO_PPR_HEAD);
+ s->pprlog_head = val & AMDVI_MMIO_PPRLOG_HEAD_MASK;
+}
+
+static inline void amdvi_handle_pprtail_write(AMDVIState *s)
+{
+ AMDVI_DPRINTF(MMIO, "");
+
+ uint64_t val = amdvi_readq(s, AMDVI_MMIO_PPR_TAIL);
+ s->pprlog_tail = val & AMDVI_MMIO_PPRLOG_TAIL_MASK;
+}
+
+/* FIXME: something might go wrong if System Software writes in chunks
+ * of one byte but linux writes in chunks of 4 bytes so currently it
+ * works correctly with linux but will definitely be busted if software
+ * reads/writes 8 bytes
+ */
+static void amdvi_mmio_write(void *opaque, hwaddr addr, uint64_t val,
+ unsigned size)
+{
+ AMDVI_DPRINTF(MMIO, "");
+
+ AMDVIState *s = opaque;
+ unsigned long offset = addr & 0x07;
+
+ if (addr + size > AMDVI_MMIO_SIZE) {
+ AMDVI_DPRINTF(MMIO, "error: addr outside region: max 0x%"PRIx64
+ ", got 0x%"PRIx64 " %d", (uint64_t)AMDVI_MMIO_SIZE,
+ addr, size);
+ return;
+ }
+
+ switch (addr & ~0x07) {
+ case AMDVI_MMIO_CONTROL:
+ if (size == 2) {
+ amdvi_writew(s, addr, val);
+ } else if (size == 4) {
+ amdvi_writel(s, addr, val);
+ } else if (size == 8) {
+ amdvi_writeq(s, addr, val);
+ }
+
+ AMDVI_DPRINTF(MMIO, "MMIO_CONTROL write addr 0x%"PRIx64
+ ", size %d, val 0x%"PRIx64 ", offset 0x%"PRIx64,
+ addr, size, val, offset);
+ amdvi_handle_control_write(s);
+ break;
+
+ case AMDVI_MMIO_DEVICE_TABLE:
+ AMDVI_DPRINTF(MMIO, "MMIO_DEVICE_TABLE write addr 0x%"PRIx64
+ ", size %d, val 0x%"PRIx64 ", offset 0x%"PRIx64,
+ addr, size, val, offset);
+ if (size == 2) {
+ amdvi_writew(s, addr, val);
+ } else if (size == 4) {
+ amdvi_writel(s, addr, val);
+ } else if (size == 8) {
+ amdvi_writeq(s, addr, val);
+ }
+
+ /* set device table address
+ * This also suffers from inability to tell whether software
+ * is done writing
+ */
+
+ if (offset || (size == 8)) {
+ amdvi_handle_devtab_write(s);
+ }
+ break;
+
+ case AMDVI_MMIO_COMMAND_HEAD:
+ AMDVI_DPRINTF(MMIO, "MMIO_COMMAND_HEAD write addr 0x%"PRIx64
+ ", size %d, val 0x%"PRIx64 ", offset 0x%"PRIx64,
+ addr, size, val, offset);
+ if (size == 2) {
+ amdvi_writew(s, addr, val);
+ } else if (size == 4) {
+ amdvi_writel(s, addr, val);
+ } else if (size == 8) {
+ amdvi_writeq(s, addr, val);
+ }
+
+ amdvi_handle_cmdhead_write(s);
+ break;
+
+ case AMDVI_MMIO_COMMAND_BASE:
+ AMDVI_DPRINTF(MMIO, "MMIO_COMMAND_BASE write addr 0x%"PRIx64
+ ", size %d, val 0x%"PRIx64 ", offset 0x%"PRIx64,
+ addr, size, val, offset);
+ if (size == 2) {
+ amdvi_writew(s, addr, val);
+ } else if (size == 4) {
+ amdvi_writel(s, addr, val);
+ } else if (size == 8) {
+ amdvi_writeq(s, addr, val);
+ }
+
+ /* FIXME - make sure System Software has finished writing incase
+ * it writes in chucks less than 8 bytes in a robust way.As for
+ * now, this hacks works for the linux driver
+ */
+ if (offset || (size == 8)) {
+ amdvi_handle_cmdbase_write(s);
+ }
+ break;
+
+ case AMDVI_MMIO_COMMAND_TAIL:
+ AMDVI_DPRINTF(MMIO, "MMIO_COMMAND_TAIL write addr 0x%"PRIx64
+ ", size %d, val 0x%"PRIx64 ", offset 0x%"PRIx64,
+ addr, size, val, offset);
+ if (size == 2) {
+ amdvi_writew(s, addr, val);
+ } else if (size == 4) {
+ amdvi_writel(s, addr, val);
+ } else if (size == 8) {
+ amdvi_writeq(s, addr, val);
+ }
+ amdvi_handle_cmdtail_write(s);
+ break;
+
+ case AMDVI_MMIO_EVENT_BASE:
+ AMDVI_DPRINTF(MMIO, "MMIO_EVENT_BASE write addr 0x%"PRIx64
+ ", size %d, val 0x%"PRIx64 ", offset 0x%"PRIx64,
+ addr, size, val, offset);
+ if (size == 2) {
+ amdvi_writew(s, addr, val);
+ } else if (size == 4) {
+ amdvi_writel(s, addr, val);
+ } else if (size == 8) {
+ amdvi_writeq(s, addr, val);
+ }
+ amdvi_handle_evtbase_write(s);
+ break;
+
+ case AMDVI_MMIO_EVENT_HEAD:
+ AMDVI_DPRINTF(MMIO, "MMIO_EVENT_HEAD write addr 0x%"PRIx64
+ ", size %d, val 0x%"PRIx64 ", offset 0x%"PRIx64,
+ addr, size, val, offset);
+ if (size == 2) {
+ amdvi_writew(s, addr, val);
+ } else if (size == 4) {
+ amdvi_writel(s, addr, val);
+ } else if (size == 8) {
+ amdvi_writeq(s, addr, val);
+ }
+ amdvi_handle_evthead_write(s);
+ break;
+
+ case AMDVI_MMIO_EVENT_TAIL:
+ AMDVI_DPRINTF(MMIO, "MMIO_EVENT_TAIL write addr 0x%"PRIx64
+ ", size %d, val 0x%"PRIx64 ", offset 0x%"PRIx64,
+ addr, size, val, offset);
+ if (size == 2) {
+ amdvi_writew(s, addr, val);
+ } else if (size == 4) {
+ amdvi_writel(s, addr, val);
+ } else if (size == 8) {
+ amdvi_writeq(s, addr, val);
+ }
+ amdvi_handle_evttail_write(s);
+ break;
+
+ case AMDVI_MMIO_EXCL_LIMIT:
+ AMDVI_DPRINTF(MMIO, "MMIO_EXCL_LIMIT write addr 0x%"PRIx64
+ ", size %d, val 0x%"PRIx64 ", offset 0x%"PRIx64,
+ addr, size, val, offset);
+ if (size == 2) {
+ amdvi_writew(s, addr, val);
+ } else if (size == 4) {
+ amdvi_writel(s, addr, val);
+ } else if (size == 8) {
+ amdvi_writeq(s, addr, val);
+ }
+ amdvi_handle_excllim_write(s);
+ break;
+
+ /* PPR log base - unused for now */
+ case AMDVI_MMIO_PPR_BASE:
+ AMDVI_DPRINTF(MMIO, "MMIO_PPR_BASE write addr 0x%"PRIx64
+ ", size %d, val 0x%"PRIx64 ", offset 0x%"PRIx64,
+ addr, size, val, offset);
+ if (size == 2) {
+ amdvi_writew(s, addr, val);
+ } else if (size == 4) {
+ amdvi_writel(s, addr, val);
+ } else if (size == 8) {
+ amdvi_writeq(s, addr, val);
+ }
+ amdvi_handle_pprbase_write(s);
+ break;
+ /* PPR log head - also unused for now */
+ case AMDVI_MMIO_PPR_HEAD:
+ AMDVI_DPRINTF(MMIO, "MMIO_PPR_HEAD write addr 0x%"PRIx64
+ ", size %d, val 0x%"PRIx64 ", offset 0x%"PRIx64,
+ addr, size, val, offset);
+ if (size == 2) {
+ amdvi_writew(s, addr, val);
+ } else if (size == 4) {
+ amdvi_writel(s, addr, val);
+ } else if (size == 8) {
+ amdvi_writeq(s, addr, val);
+ }
+ amdvi_handle_pprhead_write(s);
+ break;
+ /* PPR log tail - unused for now */
+ case AMDVI_MMIO_PPR_TAIL:
+ AMDVI_DPRINTF(MMIO, "MMIO_PPR_TAIL write addr 0x%"PRIx64
+ ", size %d, val 0x%"PRIx64 ", offset 0x%"PRIx64,
+ addr, size, val, offset);
+ if (size == 2) {
+ amdvi_writew(s, addr, val);
+ } else if (size == 4) {
+ amdvi_writel(s, addr, val);
+ } else if (size == 8) {
+ amdvi_writeq(s, addr, val);
+ }
+ amdvi_handle_pprtail_write(s);
+ break;
+
+ /* ignore write to ext_features */
+ default:
+ AMDVI_DPRINTF(MMIO, "UNHANDLED MMIO write addr 0x%"PRIx64
+ ", size %d, val 0x%"PRIx64 ", offset 0x%"PRIx64,
+ addr, size, val, offset);
+ }
+
+}
+
+static inline uint64_t amdvi_get_perms(uint64_t entry)
+{
+ return (entry & (AMDVI_DEV_PERM_READ | AMDVI_DEV_PERM_WRITE)) >>
+ AMDVI_DEV_PERM_SHIFT;
+}
+
+AddressSpace *bridge_host_amdvi(PCIBus *bus, void *opaque, int devfn)
+{
+ AMDVIState *s = opaque;
+ AMDVIAddressSpace **iommu_as;
+ int bus_num = pci_bus_num(bus);
+
+ iommu_as = s->address_spaces[bus_num];
+
+ /* allocate memory during the first run */
+ if (!iommu_as) {
+ iommu_as = g_malloc0(sizeof(AMDVIAddressSpace *) * PCI_DEVFN_MAX);
+ s->address_spaces[bus_num] = iommu_as;
+ }
+
+ /* set up AMDVI region */
+ if (!iommu_as[devfn]) {
+ iommu_as[devfn] = g_malloc0(sizeof(AMDVIAddressSpace));
+ iommu_as[devfn]->bus_num = (uint8_t)bus_num;
+ iommu_as[devfn]->devfn = (uint8_t)devfn;
+ iommu_as[devfn]->iommu_state = s;
+
+ memory_region_init_iommu(&iommu_as[devfn]->iommu, OBJECT(s),
+ &s->iommu_ops, "amd-iommu", UINT64_MAX);
+ address_space_init(&iommu_as[devfn]->as, &iommu_as[devfn]->iommu,
+ "amd-iommu");
+ }
+ return &iommu_as[devfn]->as;
+}
+
+/* a valid entry should have V = 1 and reserved bits honoured */
+static bool amdvi_validate_dte(AMDVIState *s, uint16_t devid,
+ uint64_t *dte)
+{
+ if ((dte[0] & AMDVI_DTE_LOWER_QUAD_RESERVED)
+ || (dte[1] & AMDVI_DTE_MIDDLE_QUAD_RESERVED)
+ || (dte[2] & AMDVI_DTE_UPPER_QUAD_RESERVED) || dte[3]) {
+ amdvi_log_illegaldevtab_error(s, devid,
+ s->devtab + devid * AMDVI_DEVTAB_ENTRY_SIZE, 0);
+ return false;
+ }
+
+ return dte[0] & AMDVI_DEV_VALID;
+}
+
+/* get a device table entry given the devid */
+static bool amdvi_get_dte(AMDVIState *s, int devid, uint64_t *entry)
+{
+ uint32_t offset = devid * AMDVI_DEVTAB_ENTRY_SIZE;
+
+ AMDVI_DPRINTF(MMU, "Device Table at 0x%"PRIx64, s->devtab);
+
+ if (dma_memory_read(&address_space_memory, s->devtab + offset, entry,
+ AMDVI_DEVTAB_ENTRY_SIZE)) {
+ AMDVI_DPRINTF(MMU, "error: fail to access Device Entry devtab 0x%"PRIx64
+ "offset 0x%"PRIx32, s->devtab, offset);
+ /* log error accessing dte */
+ amdvi_log_devtab_error(s, devid, s->devtab + offset, 0);
+ return false;
+ }
+
+ *entry = le64_to_cpu(*entry);
+ if (!amdvi_validate_dte(s, devid, entry)) {
+ AMDVI_DPRINTF(MMU,
+ "Pte entry at 0x%"PRIx64" is invalid", entry[0]);
+ return false;
+ }
+
+ return true;
+}
+
+/* get pte translation mode */
+static inline uint8_t get_pte_translation_mode(uint64_t pte)
+{
+ return (pte >> AMDVI_DEV_MODE_RSHIFT) & AMDVI_DEV_MODE_MASK;
+}
+
+static void amdvi_page_walk(AMDVIAddressSpace *as, uint64_t *dte,
+ IOMMUTLBEntry *ret, unsigned perms,
+ hwaddr addr)
+{
+ unsigned level, present, pte_perms;
+ uint64_t pte = dte[0], pte_addr;
+
+ /* make sure the DTE has TV = 1 */
+ if (pte & AMDVI_DEV_TRANSLATION_VALID) {
+ level = get_pte_translation_mode(pte);
+ if (level >= 7) {
+ AMDVI_DPRINTF(MMU, "error: translation level 0x%"PRIu8 " detected"
+ " while translating 0x%"PRIx64, level, addr);
+ return;
+ }
+ if (level == 0) {
+ goto no_remap;
+ }
+
+ while (level > 0) {
+ pte_perms = amdvi_get_perms(pte);
+ present = pte & 1;
+ if (!present || perms != (perms & pte_perms)) {
+ amdvi_page_fault(as->iommu_state, as->devfn, addr, perms);
+ AMDVI_DPRINTF(MMU, "error: page fault accessing virtual addr "
+ "0x%"PRIx64, addr);
+ return;
+ }
+
+ /* go to the next lower level */
+ pte_addr = pte & AMDVI_DEV_PT_ROOT_MASK;
+ /* add offset and load pte */
+ pte_addr += ((addr >> (3 + 9 * level)) & 0x1FF) << 3;
+ pte = ldq_phys(&address_space_memory, pte_addr);
+ level = get_pte_translation_mode(pte);
+ }
+ /* get access permissions from pte */
+ ret->iova = addr & AMDVI_PAGE_MASK_4K;
+ ret->translated_addr = (pte & AMDVI_DEV_PT_ROOT_MASK) &
+ AMDVI_PAGE_MASK_4K;
+ ret->addr_mask = ~AMDVI_PAGE_MASK_4K;
+ ret->perm = amdvi_get_perms(pte);
+ return;
+ }
+
+no_remap:
+ ret->iova = addr & AMDVI_PAGE_MASK_4K;
+ ret->translated_addr = addr & AMDVI_PAGE_MASK_4K;
+ ret->addr_mask = ~AMDVI_PAGE_MASK_4K;
+ ret->perm = amdvi_get_perms(pte);
+}
+
+static void amdvi_do_translate(AMDVIAddressSpace *as, hwaddr addr,
+ bool is_write, IOMMUTLBEntry *ret)
+{
+ AMDVIState *s = as->iommu_state;
+ uint16_t devid = PCI_DEVID(as->bus_num, as->devfn);
+ AMDVIIOTLBEntry *iotlb_entry = amdvi_iotlb_lookup(s, addr, as->devfn);
+ uint64_t entry[4];
+
+ if (iotlb_entry) {
+ AMDVI_DPRINTF(CACHE, "hit iotlb devid: %02x:%02x.%x gpa 0x%"PRIx64
+ " hpa 0x%"PRIx64, PCI_BUS_NUM(devid), PCI_SLOT(devid),
+ PCI_FUNC(devid), addr, iotlb_entry->translated_addr);
+ ret->iova = addr & AMDVI_PAGE_MASK_4K;
+ ret->translated_addr = iotlb_entry->translated_addr;
+ ret->addr_mask = ~AMDVI_PAGE_MASK_4K;
+ ret->perm = iotlb_entry->perms;
+ return;
+ }
+
+ /* devices with V = 0 are not translated */
+ if (!amdvi_get_dte(s, devid, entry)) {
+ goto out;
+ }
+
+ amdvi_page_walk(as, entry, ret,
+ is_write ? AMDVI_PERM_WRITE : AMDVI_PERM_READ, addr);
+
+ amdvi_update_iotlb(s, as->devfn, addr, ret->translated_addr,
+ ret->perm, entry[1] & AMDVI_DEV_DOMID_ID_MASK);
+ return;
+
+out:
+ ret->iova = addr & AMDVI_PAGE_MASK_4K;
+ ret->translated_addr = addr & AMDVI_PAGE_MASK_4K;
+ ret->addr_mask = ~AMDVI_PAGE_MASK_4K;
+ ret->perm = IOMMU_RW;
+}
+
+static inline bool amdvi_is_interrupt_addr(hwaddr addr)
+{
+ return addr >= AMDVI_INT_ADDR_FIRST && addr <= AMDVI_INT_ADDR_LAST;
+}
+
+static IOMMUTLBEntry amdvi_translate(MemoryRegion *iommu, hwaddr addr,
+ bool is_write)
+{
+ AMDVI_DPRINTF(GENERAL, "");
+
+ AMDVIAddressSpace *as = container_of(iommu, AMDVIAddressSpace, iommu);
+ AMDVIState *s = as->iommu_state;
+ IOMMUTLBEntry ret = {
+ .target_as = &address_space_memory,
+ .iova = addr,
+ .translated_addr = 0,
+ .addr_mask = ~(hwaddr)0,
+ .perm = IOMMU_NONE
+ };
+
+ if (!s->enabled) {
+ /* AMDVI disabled - corresponds to iommu=off not
+ * failure to provide any parameter
+ */
+ ret.iova = addr & AMDVI_PAGE_MASK_4K;
+ ret.translated_addr = addr & AMDVI_PAGE_MASK_4K;
+ ret.addr_mask = ~AMDVI_PAGE_MASK_4K;
+ ret.perm = IOMMU_RW;
+ return ret;
+ } else if (amdvi_is_interrupt_addr(addr)) {
+ ret.iova = addr & AMDVI_PAGE_MASK_4K;
+ ret.translated_addr = addr & AMDVI_PAGE_MASK_4K;
+ ret.addr_mask = ~AMDVI_PAGE_MASK_4K;
+ ret.perm = IOMMU_WO;
+ return ret;
+ }
+
+ amdvi_do_translate(as, addr, is_write, &ret);
+ AMDVI_DPRINTF(MMU, "devid: %02x:%02x.%x gpa 0x%"PRIx64 " hpa 0x%"PRIx64,
+ as->bus_num, PCI_SLOT(as->devfn), PCI_FUNC(as->devfn), addr,
+ ret.translated_addr);
+
+ return ret;
+}
+
+static int amdvi_int_remap(X86IOMMUState *iommu, MSIMessage *src, MSIMessage *dst, uint16_t sid)
+{
+ return 0;
+}
+
+static AddressSpace *amdvi_find_add_as(X86IOMMUState *x86_iommu, PCIBus *bus, int devfn)
+{
+ return &address_space_memory;
+}
+
+static const MemoryRegionOps mmio_mem_ops = {
+ .read = amdvi_mmio_read,
+ .write = amdvi_mmio_write,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+ .impl = {
+ .min_access_size = 1,
+ .max_access_size = 8,
+ .unaligned = false,
+ },
+ .valid = {
+ .min_access_size = 1,
+ .max_access_size = 8,
+ }
+};
+
+static void amdvi_init(AMDVIState *s)
+{
+ AMDVI_DPRINTF(GENERAL, "");
+
+ AMDVIPCIState *dev = s->dev;
+ DeviceClass *klass = DEVICE_CLASS(dev);
+
+ amdvi_iotlb_reset(s);
+
+ s->iommu_ops.translate = amdvi_translate;
+
+ s->devtab_len = 0;
+ s->cmdbuf_len = 0;
+ s->cmdbuf_head = 0;
+ s->cmdbuf_tail = 0;
+ s->evtlog_head = 0;
+ s->evtlog_tail = 0;
+ s->excl_enabled = false;
+ s->excl_allow = false;
+ s->mmio_enabled = false;
+ s->enabled = false;
+ s->ats_enabled = false;
+ s->cmdbuf_enabled = false;
+
+ /* reset MMIO */
+ memset(s->mmior, 0, AMDVI_MMIO_SIZE);
+ amdvi_set_quad(s, AMDVI_MMIO_EXT_FEATURES, AMDVI_EXT_FEATURES,
+ 0xffffffffffffffef, 0);
+ amdvi_set_quad(s, AMDVI_MMIO_STATUS, 0, 0x98, 0x67);
+
+ /* reset PCI device */
+ klass->reset();
+}
+
+static void amdvi_reset(DeviceState *dev)
+{
+ AMDVIState *s = AMD_IOMMU_DEVICE(dev);
+
+ amdvi_init(s);
+}
+
+static void amdvi_realize(DeviceState *dev, Error **errp)
+{
+ AMDVIState *s = AMD_IOMMU_DEVICE(dev);
+ PCIBus *bus = PC_MACHINE(qdev_get_machine())->bus;
+
+ s->iotlb = g_hash_table_new_full(amdvi_uint64_hash,
+ amdvi_uint64_equal, g_free, g_free);
+
+ /* This device should take care of IOMMU PCI properties */
+ PCIDevice *createddev = pci_create_simple(bus, -1, "AMDVI-PCI");
+ AMDVIPCIState *amdpcidevice = container_of(createddev, AMDVIPCIState, dev);
+ s->dev = amdpcidevice;
+
+ /* set up MMIO */
+ memory_region_init_io(&s->mmio, OBJECT(s), &mmio_mem_ops, s, "mmio",
+ AMDVI_MMIO_SIZE);
+ sysbus_init_mmio(SYS_BUS_DEVICE(s), &s->mmio);
+
+ amdvi_init(s);
+
+ bus->iommu_fn = bridge_host_amdvi;
+ bus->iommu_opaque = s;
+}
+
+static const VMStateDescription vmstate_amdvi = {
+ .name = "iommu-amd",
+ .fields = (VMStateField[]) {
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static Property amdvi_properties[] = {
+ DEFINE_PROP_UINT32("version", AMDVIState, version, 2),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static void amdvi_class_init(ObjectClass *klass, void* data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ X86IOMMUClass *dc_class = X86_IOMMU_CLASS(klass);
+
+ dc->reset = amdvi_reset;
+ dc->vmsd = &vmstate_amdvi;
+ dc->props = amdvi_properties;
+
+ dc_class->realize = amdvi_realize;
+ dc_class->find_add_as = amdvi_find_add_as;
+ dc_class->int_remap = amdvi_int_remap;
+}
+
+static const TypeInfo amdvi = {
+ .name = TYPE_AMD_IOMMU_DEVICE,
+ .parent = TYPE_X86_IOMMU_DEVICE,
+ .instance_size = sizeof(AMDVIState),
+ .class_init = amdvi_class_init
+};
+
+
+static const VMStateDescription vmstate_amdviPCI = {
+ .name = "amdvi-pci",
+ .fields = (VMStateField[]) {
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static Property amdviPCI_properties[] = {
+ DEFINE_PROP_UINT32("version", AMDVIState, version, 2),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static void amdviPCI_realize(PCIDevice *dev, Error **errp)
+{
+ AMDVIPCIState *s = container_of(dev, AMDVIPCIState, dev);
+
+ /* add msi and hypertransport capabilities */
+ s->capab_offset = pci_add_capability(&s->dev->dev, AMDVI_CAPAB_ID_SEC, 0,
+ AMDVI_CAPAB_SIZE);
+ pci_add_capability(&s->dev->dev, PCI_CAP_ID_MSI, 0, AMDVI_CAPAB_REG_SIZE);
+ pci_add_capability(&s->dev->dev, PCI_CAP_ID_HT, 0, AMDVI_CAPAB_REG_SIZE);
+}
+
+static void amdviPCI_reset(PCIDevice *dev)
+{
+ AMDVIPCIState *s = container_of(dev, AMDVIPCIState, dev);
+
+ /* reset device ident */
+ pci_config_set_vendor_id(dev->config, PCI_VENDOR_ID_AMD);
+ pci_config_set_device_id(dev->config, PCI_DEVICE_ID_RD890_IOMMU);
+ pci_config_set_prog_interface(dev->config, 00);
+ pci_config_set_class(dev->config, 0x0806);
+
+ /* reset AMDVI specific capabilities, all r/o */
+ pci_set_long(dev->config + s->capab_offset, AMDVI_CAPAB_FEATURES);
+ pci_set_long(dev->config + s->capab_offset + AMDVI_CAPAB_BAR_LOW,
+ s->mmio.addr & ~(0xffff0000));
+ pci_set_long(dev->config + s->capab_offset + AMDVI_CAPAB_BAR_HIGH,
+ (s->mmio.addr & ~(0xffff)) >> 16);
+ pci_set_long(dev->config + s->capab_offset + AMDVI_CAPAB_RANGE,
+ 0xff000000);
+ pci_set_long(dev->config + s->capab_offset + AMDVI_CAPAB_MISC, 0);
+ pci_set_long(dev->config + s->capab_offset + AMDVI_CAPAB_MISC,
+ AMDVI_MAX_PH_ADDR | AMDVI_MAX_GVA_ADDR | AMDVI_MAX_VA_ADDR);
+
+}
+
+static void amdviPCI_class_init(ObjectClass *klass, void* data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
+
+ k->realize = amdviPCI_realize;
+
+ dc->reset = amdviPCI_reset;
+ dc->vmsd = &vmstate_amdviPCI;
+ dc->props = amdviPCI_properties;
+}
+
+static const TypeInfo amdviPCI = {
+ .name = "AMDVI-PCI",
+ .parent = TYPE_PCI_DEVICE,
+ .instance_size = sizeof(AMDVIPCIState),
+ .class_init = amdviPCI_class_init
+};
+
+static void amdviPCI_register_types(void)
+{
+ type_register_static(&amdviPCI);
+ type_register_static(&amdvi);
+}
+
+type_init(amdviPCI_register_types);
+type_init(amdvi_register_types);
new file mode 100644
@@ -0,0 +1,348 @@
+/*
+ * QEMU emulation of an AMD IOMMU (AMD-Vi)
+ *
+ * Copyright (C) 2011 Eduard - Gabriel Munteanu
+ * Copyright (C) 2015 David Kiarie, <davidkiarie4@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef AMD_IOMMU_H_
+#define AMD_IOMMU_H_
+
+#include "hw/hw.h"
+#include "hw/pci/pci.h"
+#include "hw/pci/msi.h"
+#include "hw/i386/pc.h"
+#include "hw/sysbus.h"
+#include "sysemu/dma.h"
+#include "hw/i386/x86-iommu.h"
+
+/* Capability registers */
+#define AMDVI_CAPAB_BAR_LOW 0x04
+#define AMDVI_CAPAB_BAR_HIGH 0x08
+#define AMDVI_CAPAB_RANGE 0x0C
+#define AMDVI_CAPAB_MISC 0x10
+
+#define AMDVI_CAPAB_SIZE 0x18
+#define AMDVI_CAPAB_REG_SIZE 0x04
+
+/* Capability header data */
+#define AMDVI_CAPAB_ID_SEC 0xf
+#define AMDVI_CAPAB_FLAT_EXT (1 << 28)
+#define AMDVI_CAPAB_EFR_SUP (1 << 27)
+#define AMDVI_CAPAB_FLAG_NPCACHE (1 << 26)
+#define AMDVI_CAPAB_FLAG_HTTUNNEL (1 << 25)
+#define AMDVI_CAPAB_FLAG_IOTLBSUP (1 << 24)
+#define AMDVI_CAPAB_INIT_TYPE (3 << 16)
+
+/* MMIO registers */
+#define AMDVI_MMIO_DEVICE_TABLE 0x0000
+#define AMDVI_MMIO_COMMAND_BASE 0x0008
+#define AMDVI_MMIO_EVENT_BASE 0x0010
+#define AMDVI_MMIO_CONTROL 0x0018
+#define AMDVI_MMIO_EXCL_BASE 0x0020
+#define AMDVI_MMIO_EXCL_LIMIT 0x0028
+#define AMDVI_MMIO_EXT_FEATURES 0x0030
+#define AMDVI_MMIO_COMMAND_HEAD 0x2000
+#define AMDVI_MMIO_COMMAND_TAIL 0x2008
+#define AMDVI_MMIO_EVENT_HEAD 0x2010
+#define AMDVI_MMIO_EVENT_TAIL 0x2018
+#define AMDVI_MMIO_STATUS 0x2020
+#define AMDVI_MMIO_PPR_BASE 0x0038
+#define AMDVI_MMIO_PPR_HEAD 0x2030
+#define AMDVI_MMIO_PPR_TAIL 0x2038
+
+#define AMDVI_MMIO_SIZE 0x4000
+
+#define AMDVI_MMIO_DEVTAB_SIZE_MASK ((1ULL << 12) - 1)
+#define AMDVI_MMIO_DEVTAB_BASE_MASK (((1ULL << 52) - 1) & ~ \
+ AMDVI_MMIO_DEVTAB_SIZE_MASK)
+#define AMDVI_MMIO_DEVTAB_ENTRY_SIZE 32
+#define AMDVI_MMIO_DEVTAB_SIZE_UNIT 4096
+
+/* some of this are similar but just for readability */
+#define AMDVI_MMIO_CMDBUF_SIZE_BYTE (AMDVI_MMIO_COMMAND_BASE + 7)
+#define AMDVI_MMIO_CMDBUF_SIZE_MASK 0x0F
+#define AMDVI_MMIO_CMDBUF_BASE_MASK AMDVI_MMIO_DEVTAB_BASE_MASK
+#define AMDVI_MMIO_CMDBUF_HEAD_MASK (((1ULL << 19) - 1) & ~0x0F)
+#define AMDVI_MMIO_CMDBUF_TAIL_MASK AMDVI_MMIO_EVTLOG_HEAD_MASK
+
+#define AMDVI_MMIO_EVTLOG_SIZE_BYTE (AMDVI_MMIO_EVENT_BASE + 7)
+#define AMDVI_MMIO_EVTLOG_SIZE_MASK AMDVI_MMIO_CMDBUF_SIZE_MASK
+#define AMDVI_MMIO_EVTLOG_BASE_MASK AMDVI_MMIO_CMDBUF_BASE_MASK
+#define AMDVI_MMIO_EVTLOG_HEAD_MASK (((1ULL << 19) - 1) & ~0x0F)
+#define AMDVI_MMIO_EVTLOG_TAIL_MASK AMDVI_MMIO_EVTLOG_HEAD_MASK
+
+#define AMDVI_MMIO_PPRLOG_SIZE_BYTE (AMDVI_MMIO_EVENT_BASE + 7)
+#define AMDVI_MMIO_PPRLOG_HEAD_MASK AMDVI_MMIO_EVTLOG_HEAD_MASK
+#define AMDVI_MMIO_PPRLOG_TAIL_MASK AMDVI_MMIO_EVTLOG_HEAD_MASK
+#define AMDVI_MMIO_PPRLOG_BASE_MASK AMDVI_MMIO_EVTLOG_BASE_MASK
+#define AMDVI_MMIO_PPRLOG_SIZE_MASK AMDVI_MMIO_EVTLOG_SIZE_MASK
+
+#define AMDVI_MMIO_EXCL_ENABLED_MASK (1ULL << 0)
+#define AMDVI_MMIO_EXCL_ALLOW_MASK (1ULL << 1)
+#define AMDVI_MMIO_EXCL_LIMIT_MASK AMDVI_MMIO_DEVTAB_BASE_MASK
+#define AMDVI_MMIO_EXCL_LIMIT_LOW 0xFFF
+
+/* mmio control register flags */
+#define AMDVI_MMIO_CONTROL_AMDVIEN (1ULL << 0)
+#define AMDVI_MMIO_CONTROL_HTTUNEN (1ULL << 1)
+#define AMDVI_MMIO_CONTROL_EVENTLOGEN (1ULL << 2)
+#define AMDVI_MMIO_CONTROL_EVENTINTEN (1ULL << 3)
+#define AMDVI_MMIO_CONTROL_COMWAITINTEN (1ULL << 4)
+#define AMDVI_MMIO_CONTROL_CMDBUFLEN (1ULL << 12)
+
+/* MMIO status register bits */
+#define AMDVI_MMIO_STATUS_CMDBUF_RUN (1 << 4)
+#define AMDVI_MMIO_STATUS_EVT_RUN (1 << 3)
+#define AMDVI_MMIO_STATUS_COMP_INT (1 << 2)
+#define AMDVI_MMIO_STATUS_EVT_OVF (1 << 0)
+
+#define AMDVI_CMDBUF_ID_BYTE 0x07
+#define AMDVI_CMDBUF_ID_RSHIFT 4
+
+#define AMDVI_CMD_COMPLETION_WAIT 0x01
+#define AMDVI_CMD_INVAL_DEVTAB_ENTRY 0x02
+#define AMDVI_CMD_INVAL_AMDVI_PAGES 0x03
+#define AMDVI_CMD_INVAL_IOTLB_PAGES 0x04
+#define AMDVI_CMD_INVAL_INTR_TABLE 0x05
+#define AMDVI_CMD_PREFETCH_AMDVI_PAGES 0x06
+#define AMDVI_CMD_COMPLETE_PPR_REQUEST 0x07
+#define AMDVI_CMD_INVAL_AMDVI_ALL 0x08
+
+#define AMDVI_DEVTAB_ENTRY_SIZE 32
+
+/* Device table entry bits 0:63 */
+#define AMDVI_DEV_VALID (1ULL << 0)
+#define AMDVI_DEV_TRANSLATION_VALID (1ULL << 1)
+#define AMDVI_DEV_MODE_MASK 0x7
+#define AMDVI_DEV_MODE_RSHIFT 9
+#define AMDVI_DEV_PT_ROOT_MASK 0xFFFFFFFFFF000
+#define AMDVI_DEV_PT_ROOT_RSHIFT 12
+#define AMDVI_DEV_PERM_SHIFT 61
+#define AMDVI_DEV_PERM_READ (1ULL << 61)
+#define AMDVI_DEV_PERM_WRITE (1ULL << 62)
+
+/* Device table entry bits 64:127 */
+#define AMDVI_DEV_DOMID_ID_MASK ((1ULL << 16) - 1)
+
+/* Event codes and flags, as stored in the info field */
+#define AMDVI_EVENT_ILLEGAL_DEVTAB_ENTRY (0x1U << 12)
+#define AMDVI_EVENT_IOPF (0x2U << 12)
+#define AMDVI_EVENT_IOPF_I (1U << 3)
+#define AMDVI_EVENT_DEV_TAB_HW_ERROR (0x3U << 12)
+#define AMDVI_EVENT_PAGE_TAB_HW_ERROR (0x4U << 12)
+#define AMDVI_EVENT_ILLEGAL_COMMAND_ERROR (0x5U << 12)
+#define AMDVI_EVENT_COMMAND_HW_ERROR (0x6U << 12)
+
+#define AMDVI_EVENT_LEN 16
+#define AMDVI_PERM_READ (1 << 0)
+#define AMDVI_PERM_WRITE (1 << 1)
+
+/* AMD RD890 Chipset */
+#define PCI_DEVICE_ID_RD890_IOMMU 0x20
+
+#define AMDVI_FEATURE_PREFETCH (1ULL << 0) /* page prefetch Support */
+#define AMDVI_FEATURE_PPR (1ULL << 1) /* PPR Support */
+#define AMDVI_FEATURE_GT (1ULL << 4) /* Guest Translation */
+#define AMDVI_FEATURE_IA (1ULL << 6) /* inval all support */
+#define AMDVI_FEATURE_GA (1ULL << 7) /* guest VAPIC support */
+#define AMDVI_FEATURE_HE (1ULL << 8) /* hardware error regs */
+#define AMDVI_FEATURE_PC (1ULL << 9) /* Performance counters */
+
+/* reserved DTE bits */
+#define AMDVI_DTE_LOWER_QUAD_RESERVED 0x80300000000000fc
+#define AMDVI_DTE_MIDDLE_QUAD_RESERVED 0x0000000000000100
+#define AMDVI_DTE_UPPER_QUAD_RESERVED 0x08f0000000000000
+
+/* AMDVI paging mode */
+#define AMDVI_GATS_MODE (6ULL << 12)
+#define AMDVI_HATS_MODE (6ULL << 10)
+
+/* PCI SIG constants */
+#define PCI_BUS_MAX 256
+#define PCI_SLOT_MAX 32
+#define PCI_FUNC_MAX 8
+#define PCI_DEVFN_MAX 256
+
+/* IOTLB */
+#define AMDVI_IOTLB_MAX_SIZE 1024
+#define AMDVI_DEVID_SHIFT 36
+
+/* extended feature support */
+#define AMDVI_EXT_FEATURES (AMDVI_FEATURE_PREFETCH | AMDVI_FEATURE_PPR | \
+ AMDVI_FEATURE_IA | AMDVI_FEATURE_GT | AMDVI_FEATURE_GA | \
+ AMDVI_FEATURE_HE | AMDVI_GATS_MODE | AMDVI_HATS_MODE )
+
+/* capabilities header */
+#define AMDVI_CAPAB_FEATURES (AMDVI_CAPAB_FLAT_EXT | \
+ AMDVI_CAPAB_FLAG_NPCACHE | AMDVI_CAPAB_FLAG_IOTLBSUP \
+ | AMDVI_CAPAB_ID_SEC | AMDVI_CAPAB_INIT_TYPE | \
+ AMDVI_CAPAB_FLAG_HTTUNNEL | AMDVI_CAPAB_EFR_SUP)
+
+/* command constants */
+#define AMDVI_COM_STORE_ADDRESS_MASK 0xffffffffffff8
+#define AMDVI_COM_COMPLETION_STORE_MASK 0x1
+#define AMDVI_COM_COMPLETION_INTR 0x2
+#define AMDVI_COM_COMPLETION_DATA_OFF 0x8
+#define AMDVI_COMMAND_SIZE 0x10
+
+/* AMDVI default address */
+#define AMDVI_BASE_ADDR 0xfed80000
+
+/* page management constants */
+#define AMDVI_PAGE_SHIFT 12
+#define AMDVI_PAGE_SIZE (1ULL << AMDVI_PAGE_SHIFT)
+
+#define AMDVI_PAGE_SHIFT_4K 12
+#define AMDVI_PAGE_MASK_4K (~((1ULL << AMDVI_PAGE_SHIFT_4K) - 1))
+#define AMDVI_PAGE_SHIFT_2M 21
+#define AMDVI_PAGE_MASK_2M (~((1ULL << AMDVI_PAGE_SHIFT_2M) - 1))
+#define AMDVI_PAGE_SHIFT_1G 30
+#define AMDVI_PAGE_MASK_1G (~((1ULL << AMDVI_PAGE_SHIFT_1G) - 1))
+
+#define AMDVI_MAX_VA_ADDR (48UL << 5)
+#define AMDVI_MAX_PH_ADDR (40UL << 8)
+#define AMDVI_MAX_GVA_ADDR (48UL << 15)
+
+/* invalidation command device id */
+#define AMDVI_INVAL_DEV_ID_SHIFT 32
+#define AMDVI_INVAL_DEV_ID_MASK (~((1UL << AMDVI_INVAL_DEV_ID_SHIFT) - 1))
+
+/* invalidation address */
+#define AMDVI_INVAL_ADDR_MASK_SHIFT 12
+#define AMDVI_INVAL_ADDR_MASK (~((1UL << AMDVI_INVAL_ADDR_MASK_SHIFT) - 1))
+
+/* invalidation S bit mask */
+#define AMDVI_INVAL_ALL(val) ((val) & (0x1))
+
+/* reserved bits */
+#define AMDVI_COMPLETION_WAIT_RSVD 0x0ff000000
+#define AMDVI_CMD_INVAL_DEV_RSVD 0xffff00000fffffff
+#define AMDVI_INVAL_AMDVI_PAGES_RSVD 0xfff000000fff0000
+#define AMDVI_INVAL_IOTLB_PAGES_RSVD 0x00000ff4
+#define AMDVI_INVAL_INTR_TABLE_RSVD 0xffff00000fffffff
+#define AMDVI_PRF_AMDVI_PAGES_RSVD 0x00ff00000ff00000
+#define AMDVI_COMPLETE_PPR_RQ_RSVD 0xffff00000ff00000
+#define AMDVI_INVAL_AMDVI_ALL_RSVD 0x0fffffff00000000
+#define AMDVI_COMPLETE_PPR_HIGH_RSVD 0xffff000000000000
+#define AMDVI_INVAL_PAGES_HIGH_RSVD 0xff0
+#define AMDVI_PREF_HIGH_RSVD 0xfd4
+
+/* command masks - inval iommu pages */
+#define AMDVI_INVAL_PAGES_PASID (~((1UL << 20) - 1))
+#define AMDVI_INVAL_PAGES_DOMID ((1UL << 16) - 1)
+#define AMDVI_INVAL_PAGES_ADDRESS (~((1UL << 12) - 1))
+#define AMDVI_INVAL_PAGES_SBIT (1UL << 0)
+#define AMDVI_INVAL_PAGES_PDE (1UL << 1)
+#define AMDVI_INVAL_PAGES_GN (1UL << 2)
+
+/* masks - inval iotlb pages */
+#define AMDVI_INVAL_IOTLB_DEVID (~((1UL << 16) - 1))
+#define AMDVI_INVAL_IOTLB_PASID_LOW (0xff << 15)
+#define AMDVI_INVAL_IOTLB_MAXPEND (0xff << 23)
+#define AMDVI_INVAL_IOTLB_QUEUEID (~((1UL << 16) - 1))
+#define AMDVI_INVAL_IOTLB_PASID_HIGH (0xff << 46)
+#define AMDVI_INVAL_IOTLB_GN AMDVI_INVAL_PAGES_GN
+#define AMDVI_INVAL_IOTLB_S AMDVI_INVAL_PAGES_S
+#define AMDVI_INVAL_IOTLB_ADDRESS AMDVI_INVAL_PAGES_ADDRESS
+
+#define AMDVI_INT_ADDR_FIRST 0xfee00000
+#define AMDVI_INT_ADDR_LAST 0xfeefffff
+
+#define TYPE_AMD_IOMMU_DEVICE "amd-iommu"
+#define AMD_IOMMU_DEVICE(obj)\
+ OBJECT_CHECK(AMDVIState, (obj), TYPE_AMD_IOMMU_DEVICE)
+
+#define AMD_IOMMU_STR "amd"
+
+typedef struct AMDVIAddressSpace AMDVIAddressSpace;
+
+/* functions to steal PCI config space */
+typedef struct AMDVIPCIState {
+ PCIDevice dev;
+ uint8_t *capab; /* capabilities registers */
+ uint32_t capab_offset; /* capability offset pointer */
+} AMDVIPCIState;
+
+typedef struct AMDVIState {
+ X86IOMMUState iommu; /* IOMMU bus device */
+ AMDVIPCIState *dev; /* IOMMU PCI device */
+
+ uint32_t version;
+
+ uint64_t mmio_addr;
+
+ bool enabled; /* IOMMU enabled */
+ bool ats_enabled; /* address translation enabled */
+ bool cmdbuf_enabled; /* command buffer enabled */
+ bool evtlog_enabled; /* event log enabled */
+ bool excl_enabled;
+
+ dma_addr_t devtab; /* base address device table */
+ size_t devtab_len; /* device table length */
+
+ dma_addr_t cmdbuf; /* command buffer base address */
+ uint64_t cmdbuf_len; /* command buffer length */
+ uint32_t cmdbuf_head; /* current IOMMU read position */
+ uint32_t cmdbuf_tail; /* next Software write position */
+ bool completion_wait_intr;
+
+ dma_addr_t evtlog; /* base address event log */
+ bool evtlog_intr;
+ uint32_t evtlog_len; /* event log length */
+ uint32_t evtlog_head; /* current IOMMU write position */
+ uint32_t evtlog_tail; /* current Software read position */
+
+ /* unused for now */
+ dma_addr_t excl_base; /* base DVA - IOMMU exclusion range */
+ dma_addr_t excl_limit; /* limit of IOMMU exclusion range */
+ bool excl_allow; /* translate accesses to the exclusion range */
+ bool excl_enable; /* exclusion range enabled */
+
+ dma_addr_t ppr_log; /* base address ppr log */
+ uint32_t pprlog_len; /* ppr log len */
+ uint32_t pprlog_head; /* ppr log head */
+ uint32_t pprlog_tail; /* ppr log tail */
+
+ MemoryRegion mmio; /* MMIO region */
+ uint8_t mmior[AMDVI_MMIO_SIZE]; /* read/write MMIO */
+ uint8_t w1cmask[AMDVI_MMIO_SIZE]; /* read/write 1 clear mask */
+ uint8_t romask[AMDVI_MMIO_SIZE]; /* MMIO read/only mask */
+ bool mmio_enabled;
+
+ /* IOMMU function */
+ MemoryRegionIOMMUOps iommu_ops;
+
+ /* for each served device */
+ AMDVIAddressSpace **address_spaces[PCI_BUS_MAX];
+
+ /* IOTLB */
+ GHashTable *iotlb;
+} AMDVIState;
+
+/*
+ * bridge_host_amd_iommu: setup an IOMMU function on a bus
+ *
+ * called for all PCI devices
+ *
+ * @bus: PCI bus to host the IOMMU
+ * @opaque: opaque pointer to AMDIOMMUState struct
+ * @defvn: PCI function of device for which to setup IOMMU region for
+ *
+ */
+AddressSpace *bridge_host_amdvi(PCIBus *bus, void *opaque, int devfn);
+
+#endif
@@ -1479,7 +1479,7 @@ static int assigned_device_pci_cap_init(PCIDevice *pci_dev, Error **errp)
* error bits, leave the rest. */
status = pci_get_long(pci_dev->config + pos + PCI_X_STATUS);
status &= ~(PCI_X_STATUS_BUS | PCI_X_STATUS_DEVFN);
- status |= pci_requester_id(pci_dev);
+ status |= pci_get_bdf(pci_dev);
status &= ~(PCI_X_STATUS_SPL_DISC | PCI_X_STATUS_UNX_SPL |
PCI_X_STATUS_SPL_ERR);
pci_set_long(pci_dev->config + pos + PCI_X_STATUS, status);
@@ -281,6 +281,7 @@ static void pc_q35_machine_options(MachineClass *m)
m->default_machine_opts = "firmware=bios-256k.bin";
m->default_display = "std";
m->no_floppy = 1;
+ m->has_dynamic_sysbus = true;
}
static void pc_q35_2_6_machine_options(MachineClass *m)
@@ -598,4 +598,17 @@ typedef struct AcpiDmarHardwareUnit AcpiDmarHardwareUnit;
/* Masks for Flags field above */
#define ACPI_DMAR_INCLUDE_PCI_ALL 1
+/* IVRS constants */
+#define AMD_IOMMU_HOST_ADDRESS_WIDTH 40UL
+
+/* flags in the IVHD headers */
+#define IVHD_HT_TUNEN (1UL << 0) /* recommended setting for HtTunEn */
+#define IVHD_IOTLBSUP (1UL << 4) /* remote IOTLB support */
+#define IVHD_PREFSUP (1UL << 6) /* page prefetch support */
+#define IVHD_PPRSUP (1UL << 7) /* peripheral page service support */
+
+#define IVHD_EFR_HATS 48 /* host address translation size */
+#define IVHD_EFR_GATS 48 /* guest address translation size */
+#define IVHD_EFR_GTSUP (1UL << 2) /* guest translation support */
+
#endif
@@ -362,6 +362,7 @@ Aml *aml_derefof(Aml *arg);
Aml *aml_sizeof(Aml *arg);
Aml *aml_concatenate(Aml *source1, Aml *source2, Aml *target);
+void build_append_int_noprefix(GArray *table, uint64_t value, int size);
void
build_header(GArray *linker, GArray *table_data,
AcpiTableHeader *h, const char *sig, int len, uint8_t rev,
@@ -11,10 +11,11 @@
#include "hw/pci/pcie.h"
/* PCI bus */
-
+#define PCI_DEVID(bus, devfn) ((((uint16_t)(bus)) << 8) | (devfn))
#define PCI_DEVFN(slot, func) ((((slot) & 0x1f) << 3) | ((func) & 0x07))
#define PCI_SLOT(devfn) (((devfn) >> 3) & 0x1f)
#define PCI_FUNC(devfn) ((devfn) & 0x07)
+#define PCI_BUILD_BDF(bus, devfn) ((bus << 8) | (devfn))
#define PCI_SLOT_MAX 32
#define PCI_FUNC_MAX 8
@@ -328,7 +329,6 @@ int pci_add_capability(PCIDevice *pdev, uint8_t cap_id,
int pci_add_capability2(PCIDevice *pdev, uint8_t cap_id,
uint8_t offset, uint8_t size,
Error **errp);
-
void pci_del_capability(PCIDevice *pci_dev, uint8_t cap_id, uint8_t cap_size);
uint8_t pci_find_capability(PCIDevice *pci_dev, uint8_t cap_id);
@@ -692,11 +692,13 @@ static inline uint32_t pci_config_size(const PCIDevice *d)
return pci_is_express(d) ? PCIE_CONFIG_SPACE_SIZE : PCI_CONFIG_SPACE_SIZE;
}
-static inline uint16_t pci_requester_id(PCIDevice *dev)
+static inline uint16_t pci_get_bdf(PCIDevice *dev)
{
- return (pci_bus_num(dev->bus) << 8) | dev->devfn;
+ return PCI_BUILD_BDF(pci_bus_num(dev->bus), dev->devfn);
}
+uint16_t pci_requester_id(PCIDevice *dev);
+
/* DMA access functions */
static inline AddressSpace *pci_get_address_space(PCIDevice *dev)
{
@@ -38,7 +38,8 @@ DEF("machine", HAS_ARG, QEMU_OPTION_machine, \
" kvm_shadow_mem=size of KVM shadow MMU\n"
" dump-guest-core=on|off include guest memory in a core dump (default=on)\n"
" mem-merge=on|off controls memory merge support (default: on)\n"
- " iommu=on|off controls emulated Intel IOMMU (VT-d) support (default=off)\n"
+ " iommu=on|off controls emulated IOMMU support(default: off)\n"
+ " x-iommu-type=amd|intel overrides emulated IOMMU to AMD IOMMU (default: intel)\n"
" igd-passthru=on|off controls IGD GFX passthrough support (default=off)\n"
" aes-key-wrap=on|off controls support for AES key wrapping (default=on)\n"
" dea-key-wrap=on|off controls support for DEA key wrapping (default=on)\n"
@@ -74,7 +75,9 @@ Enables or disables memory merge support. This feature, when supported by
the host, de-duplicates identical memory pages among VMs instances
(enabled by default).
@item iommu=on|off
-Enables or disables emulated Intel IOMMU (VT-d) support. The default is off.
+Enables and disables IOMMU emulation. The default is off.
+@item x-iommu-type=on|off
+Overrides emulated IOMMU from AMD IOMMU. By default Intel IOMMU is emulated.
@item aes-key-wrap=on|off
Enables or disables AES key wrapping support on s390-ccw hosts. This feature
controls whether AES wrapping keys will be created to allow
@@ -213,8 +213,12 @@ static QemuOptsList machine_opts = {
.help = "firmware image",
},{
.name = "iommu",
- .type = QEMU_OPT_BOOL,
- .help = "Set on/off to enable/disable Intel IOMMU (VT-d)",
+ .type = QEMU_OPT_BOOL,
+ .help = "Set on/off to enable iommu",
+ },{
+ .name = "x-iommu-type",
+ .type = QEMU_OPT_STRING,
+ .help = "Overrides emulated IOMMU from Intel to AMD",
},{
.name = "suppress-vmdesc",
.type = QEMU_OPT_BOOL,
Signed-off-by: David Kiarie <davidkiarie4@gmail.com> --- hw/acpi/aml-build.c | 2 +- hw/i386/amd_iommu.c | 1471 +++++++++++++++++++++++++++++++++++++++++++ hw/i386/amd_iommu.h | 348 ++++++++++ hw/i386/kvm/pci-assign.c | 2 +- hw/i386/pc_q35.c | 1 + include/hw/acpi/acpi-defs.h | 13 + include/hw/acpi/aml-build.h | 1 + include/hw/pci/pci.h | 10 +- qemu-options.hx | 7 +- util/qemu-config.c | 8 +- 10 files changed, 1853 insertions(+), 10 deletions(-) create mode 100644 hw/i386/amd_iommu.c create mode 100644 hw/i386/amd_iommu.h