@@ -1974,7 +1974,8 @@ static IOMMUTLBEntry vtd_iommu_translate(MemoryRegion *iommu, hwaddr addr,
return ret;
}
-static void vtd_iommu_notify_started(MemoryRegion *iommu)
+static void vtd_iommu_notify_started(MemoryRegion *iommu,
+ IOMMUAccessFlags flag)
{
VTDAddressSpace *vtd_as = container_of(iommu, VTDAddressSpace, iommu);
@@ -156,7 +156,8 @@ static uint64_t spapr_tce_get_min_page_size(MemoryRegion *iommu)
return 1ULL << tcet->page_shift;
}
-static void spapr_tce_notify_started(MemoryRegion *iommu)
+static void spapr_tce_notify_started(MemoryRegion *iommu,
+ IOMMUAccessFlags flag)
{
spapr_tce_set_need_vfio(container_of(iommu, sPAPRTCETable, iommu), true);
}
@@ -456,7 +456,8 @@ static void vfio_listener_region_add(MemoryListener *listener,
giommu->n.notify = vfio_iommu_map_notify;
QLIST_INSERT_HEAD(&container->giommu_list, giommu, giommu_next);
- memory_region_register_iommu_notifier(giommu->iommu, &giommu->n);
+ memory_region_register_iommu_notifier(giommu->iommu, &giommu->n,
+ IOMMU_RW);
memory_region_iommu_replay(giommu->iommu, &giommu->n, false);
return;
@@ -154,7 +154,7 @@ struct MemoryRegionIOMMUOps {
/* Returns minimum supported page size */
uint64_t (*get_min_page_size)(MemoryRegion *iommu);
/* Called when the first notifier is set */
- void (*notify_started)(MemoryRegion *iommu);
+ void (*notify_started)(MemoryRegion *iommu, IOMMUAccessFlags flag);
/* Called when the last notifier is removed */
void (*notify_stopped)(MemoryRegion *iommu);
};
@@ -623,8 +623,12 @@ void memory_region_notify_iommu(MemoryRegion *mr,
* @n: the notifier to be added; the notifier receives a pointer to an
* #IOMMUTLBEntry as the opaque value; the pointer ceases to be
* valid on exit from the notifier.
+ * @flag: kind of notifer to request. IOMMU_RW for notifying all
+ * events (including additions), and IOMMU_NONE for notifying
+ * cache invalidations only.
*/
-void memory_region_register_iommu_notifier(MemoryRegion *mr, Notifier *n);
+void memory_region_register_iommu_notifier(MemoryRegion *mr, Notifier *n,
+ IOMMUAccessFlags flag);
/**
* memory_region_iommu_replay: replay existing IOMMU translations to
@@ -1513,11 +1513,13 @@ bool memory_region_is_logging(MemoryRegion *mr, uint8_t client)
return memory_region_get_dirty_log_mask(mr) & (1 << client);
}
-void memory_region_register_iommu_notifier(MemoryRegion *mr, Notifier *n)
+void memory_region_register_iommu_notifier(MemoryRegion *mr, Notifier *n,
+ IOMMUAccessFlags flag)
{
+ assert(flag == IOMMU_NONE || flag == IOMMU_RW);
if (mr->iommu_ops->notify_started &&
QLIST_EMPTY(&mr->iommu_notify.notifiers)) {
- mr->iommu_ops->notify_started(mr);
+ mr->iommu_ops->notify_started(mr, flag);
}
notifier_list_add(&mr->iommu_notify, n);
}
With the new flag, now we allow to register two kinds of IOMMU notifiers: - IOMMU_RW: All DMA mapping changes will be notified. - IOMMU_NONE: will only be notified when there are cache invalidations. Here IOMMU_RW is the original scemantics for existing IOMMU notifiers. VFIO is the only register for IOMMU notifier, and it's with type IOMMU_RW. Suggested-by: Alex Williamson <alex.williamson@redhat.com> Signed-off-by: Peter Xu <peterx@redhat.com> --- hw/i386/intel_iommu.c | 3 ++- hw/ppc/spapr_iommu.c | 3 ++- hw/vfio/common.c | 3 ++- include/exec/memory.h | 8 ++++++-- memory.c | 6 ++++-- 5 files changed, 16 insertions(+), 7 deletions(-)