@@ -606,9 +606,19 @@ static void vfio_listener_region_add(MemoryListener *listener,
if (memory_region_is_iommu(section->mr)) {
VFIOGuestIOMMU *giommu;
IOMMUMemoryRegion *iommu_mr = IOMMU_MEMORY_REGION(section->mr);
+ bool nested;
int iommu_idx;
trace_vfio_listener_region_add_iommu(iova, end);
+
+ if (!memory_region_iommu_get_attr(iommu_mr,
+ IOMMU_ATTR_HW_NESTED_PAGING,
+ (void *)&nested) && nested) {
+ error_report("VFIO/vIOMMU integration based on HW nested paging "
+ "is not yet supported");
+ ret = -EINVAL;
+ goto fail;
+ }
/*
* FIXME: For VFIO iommu types which have KVM acceleration to
* avoid bouncing all map/unmaps through qemu this way, this
As of today, VFIO only works along with vIOMMU supporting caching mode. The SMMUv3 does not support this mode and requires HW nested paging to work properly with VFIO. So any attempt to run a VFIO device protected by such IOMMU would prevent the assigned device from working and at the moment the guest does not even boot as the default memory_region_iommu_replay() implementation attempts to translate the whole address space and completely stalls the guest. So let's fail on that case. Signed-off-by: Eric Auger <eric.auger@redhat.com> --- v3 -> v4: - use IOMMU_ATTR_HW_NESTED_PAGING - do not abort anymore but jump to fail --- hw/vfio/common.c | 10 ++++++++++ 1 file changed, 10 insertions(+)