@@ -835,6 +835,31 @@ static void smmuv3_notify_iova(IOMMUMemoryRegion *mr,
memory_region_notify_iommu_one(n, &event);
}
+/**
+ * smmuv3_notify_asid - call the notifier @n for a given asid
+ *
+ * @mr: IOMMU mr region handle
+ * @n: notifier to be called
+ * @asid: address space ID or negative value if we don't care
+ */
+static void smmuv3_notify_asid(IOMMUMemoryRegion *mr,
+ IOMMUNotifier *n, int asid)
+{
+ IOMMUTLBEvent event = {};
+
+ event.type = IOMMU_NOTIFIER_UNMAP;
+ event.entry.target_as = &address_space_memory;
+ event.entry.perm = IOMMU_NONE;
+ event.entry.granularity = IOMMU_INV_GRAN_PASID;
+ event.entry.flags = IOMMU_INV_FLAGS_ARCHID;
+ event.entry.arch_id = asid;
+ event.entry.iova = n->start;
+ event.entry.addr_mask = n->end - n->start;
+
+ memory_region_notify_iommu_one(n, &event);
+}
+
+
/* invalidate an asid/iova range tuple in all mr's */
static void smmuv3_inv_notifiers_iova(SMMUState *s, int asid, dma_addr_t iova,
uint8_t tg, uint64_t num_pages)
@@ -910,6 +935,22 @@ smmuv3_invalidate_ste(gpointer key, gpointer value, gpointer user_data)
return true;
}
+static void smmuv3_s1_asid_inval(SMMUState *s, uint16_t asid)
+{
+ SMMUDevice *sdev;
+
+ trace_smmuv3_s1_asid_inval(asid);
+ QLIST_FOREACH(sdev, &s->devices_with_notifiers, next) {
+ IOMMUMemoryRegion *mr = &sdev->iommu;
+ IOMMUNotifier *n;
+
+ IOMMU_NOTIFIER_FOREACH(n, mr) {
+ smmuv3_notify_asid(mr, n, asid);
+ }
+ }
+ smmu_iotlb_inv_asid(s, asid);
+}
+
static int smmuv3_cmdq_consume(SMMUv3State *s)
{
SMMUState *bs = ARM_SMMU(s);
@@ -1020,8 +1061,7 @@ static int smmuv3_cmdq_consume(SMMUv3State *s)
uint16_t asid = CMD_ASID(&cmd);
trace_smmuv3_cmdq_tlbi_nh_asid(asid);
- smmu_inv_notifiers_all(&s->smmu_state);
- smmu_iotlb_inv_asid(bs, asid);
+ smmuv3_s1_asid_inval(bs, asid);
break;
}
case SMMU_CMD_TLBI_NH_ALL:
@@ -46,6 +46,7 @@ smmuv3_cmdq_cfgi_cd(uint32_t sid) "sid=0x%x"
smmuv3_config_cache_hit(uint32_t sid, uint32_t hits, uint32_t misses, uint32_t perc) "Config cache HIT for sid=0x%x (hits=%d, misses=%d, hit rate=%d)"
smmuv3_config_cache_miss(uint32_t sid, uint32_t hits, uint32_t misses, uint32_t perc) "Config cache MISS for sid=0x%x (hits=%d, misses=%d, hit rate=%d)"
smmuv3_s1_range_inval(int vmid, int asid, uint64_t addr, uint8_t tg, uint64_t num_pages, uint8_t ttl, bool leaf) "vmid=%d asid=%d addr=0x%"PRIx64" tg=%d num_pages=0x%"PRIx64" ttl=%d leaf=%d"
+smmuv3_s1_asid_inval(int asid) "asid=%d"
smmuv3_cmdq_tlbi_nh(void) ""
smmuv3_cmdq_tlbi_nh_asid(uint16_t asid) "asid=%d"
smmuv3_config_cache_inv(uint32_t sid) "Config cache INV for sid=0x%x"
At the moment ASID invalidation command (CMD_TLBI_NH_ASID) is propagated as a domain invalidation (the whole notifier range is invalidated independently on any ASID information). The new granularity field now allows to be more precise and restrict the invalidation to a peculiar ASID. Set the corresponding fields and flag. We still keep the iova and addr_mask settings for consumers that do not support the new fields, like VHOST. Signed-off-by: Eric Auger <eric.auger@redhat.com> --- v8 -> v9: - restore the iova and addr_massk settings for consumers that do not support the new fields like VHOST --- hw/arm/smmuv3.c | 44 ++++++++++++++++++++++++++++++++++++++++++-- hw/arm/trace-events | 1 + 2 files changed, 43 insertions(+), 2 deletions(-)