diff mbox series

[13/18] amd_iommu: Invalidate address translations on INVALIDATE_IOMMU_ALL

Message ID 20250414020253.443831-14-alejandro.j.jimenez@oracle.com (mailing list archive)
State New
Headers show
Series AMD vIOMMU: DMA remapping support for VFIO devices | expand

Commit Message

Alejandro Jimenez April 14, 2025, 2:02 a.m. UTC
When the kernel IOMMU driver issues an INVALIDATE_IOMMU_ALL, the address
translation and interrupt remapping information must be cleared for all
Device IDs and all domains. Introduce a helper to sync the shadow page
table for all the address spaces with registered notifiers, which
replays both MAP and UNMAP events.

Signed-off-by: Alejandro Jimenez <alejandro.j.jimenez@oracle.com>
---
 hw/i386/amd_iommu.c | 49 +++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 49 insertions(+)
diff mbox series

Patch

diff --git a/hw/i386/amd_iommu.c b/hw/i386/amd_iommu.c
index e24eab34c9e0..3bfa08419ffe 100644
--- a/hw/i386/amd_iommu.c
+++ b/hw/i386/amd_iommu.c
@@ -100,6 +100,7 @@  static int amdvi_as_to_dte(AMDVIAddressSpace *as, uint64_t *dte);
 static void amdvi_sync_shadow_page_table_range(AMDVIAddressSpace *as,
                                                uint64_t *dte, hwaddr addr,
                                                uint64_t size, bool send_unmap);
+static void amdvi_address_space_unmap(AMDVIAddressSpace *as, IOMMUNotifier *n);
 
 uint64_t amdvi_extended_feature_register(AMDVIState *s)
 {
@@ -462,6 +463,47 @@  static void amdvi_intremap_inval_notify_all(AMDVIState *s, bool global,
     x86_iommu_iec_notify_all(X86_IOMMU_DEVICE(s), global, index, mask);
 }
 
+static void amdvi_address_space_sync(AMDVIAddressSpace *as)
+{
+    IOMMUNotifier *n;
+    uint64_t dte[4] = { 0 };
+
+    /* If only UNMAP notifiers are registered, drop all existing mappings */
+    if (!(as->notifier_flags & IOMMU_NOTIFIER_MAP)) {
+        IOMMU_NOTIFIER_FOREACH(n, &as->iommu) {
+            /*
+             * Directly calling memory_region_unmap_iommu_notifier_range() does
+             * not guarantee that the addr_mask eventually passed as parameter
+             * to the notifier is valid. Use amdvi_address_space_unmap() which
+             * ensures the notifier range is divided into properly aligned
+             * regions, and issues notifications for each one.
+             */
+            amdvi_address_space_unmap(as, n);
+        }
+        return;
+    }
+
+    if (amdvi_as_to_dte(as, dte)) {
+        return;
+    }
+
+    amdvi_sync_shadow_page_table_range(as, &dte[0], 0, UINT64_MAX, true);
+}
+
+/*
+ * This differs from the replay() method in that it issues both MAP and UNMAP
+ * notifications since it is called after global invalidation events in order to
+ * re-sync all address spaces.
+ */
+static void amdvi_iommu_address_space_sync_all(AMDVIState *s)
+{
+    AMDVIAddressSpace *as;
+
+    QLIST_FOREACH(as, &s->amdvi_as_with_notifiers, next) {
+        amdvi_address_space_sync(as);
+    }
+}
+
 static void amdvi_inval_all(AMDVIState *s, uint64_t *cmd)
 {
     if (extract64(cmd[0], 0, 60) || cmd[1]) {
@@ -473,6 +515,13 @@  static void amdvi_inval_all(AMDVIState *s, uint64_t *cmd)
     amdvi_intremap_inval_notify_all(s, true, 0, 0);
 
     amdvi_iotlb_reset(s);
+
+    /*
+     * Fully replay the address space i.e. send both UNMAP and MAP events in
+     * order to synchronize guest and host IO page tables tables.
+     */
+    amdvi_iommu_address_space_sync_all(s);
+
     trace_amdvi_all_inval();
 }