Message ID | 20240408140818.3799590-8-smostafa@google.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | SMMUv3 nested translation support | expand |
Hi Mostafa, On 4/8/24 16:08, Mostafa Saleh wrote: > Some commands need rework for nesting, as they used to assume S1 > and S2 are mutually exclusive: > > - CMD_TLBI_NH_ASID: Consider VMID if stage-2 is supported > - CMD_TLBI_NH_ALL: Consider VMID if stage-2 is supported, otherwise > invalidate everything, this required a new vmid invalidation > function for stage-1 only (ASID >= 0) > > Also, rework trace events to reflect the new implementation. This does not apply for me. Could you share a branch or respin? Thank you in advance Eric > > Signed-off-by: Mostafa Saleh <smostafa@google.com> > --- > hw/arm/smmu-common.c | 36 +++++++++++++++++++++++++++++------- > hw/arm/smmuv3.c | 31 +++++++++++++++++++++++++++++-- > hw/arm/trace-events | 6 ++++-- > include/hw/arm/smmu-common.h | 3 ++- > 4 files changed, 64 insertions(+), 12 deletions(-) > > diff --git a/hw/arm/smmu-common.c b/hw/arm/smmu-common.c > index 8b9e59b24b..b1cf1303c6 100644 > --- a/hw/arm/smmu-common.c > +++ b/hw/arm/smmu-common.c > @@ -148,13 +148,14 @@ void smmu_iotlb_inv_all(SMMUState *s) > g_hash_table_remove_all(s->iotlb); > } > > -static gboolean smmu_hash_remove_by_asid(gpointer key, gpointer value, > - gpointer user_data) > +static gboolean smmu_hash_remove_by_asid_vmid(gpointer key, gpointer value, > + gpointer user_data) > { > - int asid = *(int *)user_data; > + SMMUIOTLBPageInvInfo *info = (SMMUIOTLBPageInvInfo *)user_data; > SMMUIOTLBKey *iotlb_key = (SMMUIOTLBKey *)key; > > - return SMMU_IOTLB_ASID(*iotlb_key) == asid; > + return (SMMU_IOTLB_ASID(*iotlb_key) == info->asid) && > + (SMMU_IOTLB_VMID(*iotlb_key) == info->vmid); > } > > static gboolean smmu_hash_remove_by_vmid(gpointer key, gpointer value, > @@ -166,6 +167,16 @@ static gboolean smmu_hash_remove_by_vmid(gpointer key, gpointer value, > return SMMU_IOTLB_VMID(*iotlb_key) == vmid; > } > > +static gboolean smmu_hash_remove_by_vmid_s1(gpointer key, gpointer value, > + gpointer user_data) > +{ > + int vmid = *(int *)user_data; > + SMMUIOTLBKey *iotlb_key = (SMMUIOTLBKey *)key; > + > + return (SMMU_IOTLB_VMID(*iotlb_key) == vmid) && > + (SMMU_IOTLB_ASID(*iotlb_key) >= 0); > +} > + > static gboolean smmu_hash_remove_by_asid_vmid_iova(gpointer key, gpointer value, > gpointer user_data) > { > @@ -259,10 +270,15 @@ void smmu_iotlb_inv_ipa(SMMUState *s, int vmid, dma_addr_t ipa, uint8_t tg, > &info); > } > > -void smmu_iotlb_inv_asid(SMMUState *s, int asid) > +void smmu_iotlb_inv_asid_vmid(SMMUState *s, int asid, int vmid) > { > - trace_smmu_iotlb_inv_asid(asid); > - g_hash_table_foreach_remove(s->iotlb, smmu_hash_remove_by_asid, &asid); > + SMMUIOTLBPageInvInfo info = { > + .asid = asid, > + .vmid = vmid, > + }; > + > + trace_smmu_iotlb_inv_asid_vmid(asid, vmid); > + g_hash_table_foreach_remove(s->iotlb, smmu_hash_remove_by_asid_vmid, &info); > } > > inline void smmu_iotlb_inv_vmid(SMMUState *s, int vmid) > @@ -271,6 +287,12 @@ inline void smmu_iotlb_inv_vmid(SMMUState *s, int vmid) > g_hash_table_foreach_remove(s->iotlb, smmu_hash_remove_by_vmid, &vmid); > } > > +inline void smmu_iotlb_inv_vmid_s1(SMMUState *s, int vmid) > +{ > + trace_smmu_iotlb_inv_vmid_s1(vmid); > + g_hash_table_foreach_remove(s->iotlb, smmu_hash_remove_by_vmid_s1, &vmid); > +} > + > /* VMSAv8-64 Translation */ > > /** > diff --git a/hw/arm/smmuv3.c b/hw/arm/smmuv3.c > index 17bbd43c13..ece647b8bf 100644 > --- a/hw/arm/smmuv3.c > +++ b/hw/arm/smmuv3.c > @@ -1280,25 +1280,52 @@ static int smmuv3_cmdq_consume(SMMUv3State *s) > case SMMU_CMD_TLBI_NH_ASID: > { > int asid = CMD_ASID(&cmd); > + int vmid = -1; > > if (!STAGE1_SUPPORTED(s)) { > cmd_error = SMMU_CERROR_ILL; > break; > } > > + /* > + * VMID is only matched when stage 2 is supported for the Security > + * state corresponding to the command queue that the command was > + * issued in. > + * QEMU ignores the field by setting to -1, similarly to what STE > + * decoding does. And invalidation commands ignore VMID < 0. > + */ > + if (STAGE2_SUPPORTED(s)) { > + vmid = CMD_VMID(&cmd); > + } > + > trace_smmuv3_cmdq_tlbi_nh_asid(asid); > smmu_inv_notifiers_all(&s->smmu_state); > - smmu_iotlb_inv_asid(bs, asid); > + smmu_iotlb_inv_asid_vmid(bs, asid, vmid); > break; > } > case SMMU_CMD_TLBI_NH_ALL: > + { > + int vmid = -1; > + > if (!STAGE1_SUPPORTED(s)) { > cmd_error = SMMU_CERROR_ILL; > break; > } > + > + /* > + * If stage-2 is supported, invalidate for this VMID only, otherwise > + * invalidate the whole thing, see SMMU_CMD_TLBI_NH_ASID() > + */ > + if (STAGE2_SUPPORTED(s)) { > + vmid = CMD_VMID(&cmd); > + trace_smmuv3_cmdq_tlbi_nh(vmid); > + smmu_iotlb_inv_vmid_s1(bs, vmid); > + break; > + } > QEMU_FALLTHROUGH; > + } > case SMMU_CMD_TLBI_NSNH_ALL: > - trace_smmuv3_cmdq_tlbi_nh(); > + trace_smmuv3_cmdq_tlbi_nsnh(); > smmu_inv_notifiers_all(&s->smmu_state); > smmu_iotlb_inv_all(bs); > break; > diff --git a/hw/arm/trace-events b/hw/arm/trace-events > index f5c361d96e..2556f4721a 100644 > --- a/hw/arm/trace-events > +++ b/hw/arm/trace-events > @@ -11,8 +11,9 @@ smmu_ptw_page_pte(int stage, int level, uint64_t iova, uint64_t baseaddr, uint6 > smmu_ptw_block_pte(int stage, int level, uint64_t baseaddr, uint64_t pteaddr, uint64_t pte, uint64_t iova, uint64_t gpa, int bsize_mb) "stage=%d level=%d base@=0x%"PRIx64" pte@=0x%"PRIx64" pte=0x%"PRIx64" iova=0x%"PRIx64" block address = 0x%"PRIx64" block size = %d MiB" > smmu_get_pte(uint64_t baseaddr, int index, uint64_t pteaddr, uint64_t pte) "baseaddr=0x%"PRIx64" index=0x%x, pteaddr=0x%"PRIx64", pte=0x%"PRIx64 > smmu_iotlb_inv_all(void) "IOTLB invalidate all" > -smmu_iotlb_inv_asid(uint16_t asid) "IOTLB invalidate asid=%d" > +smmu_iotlb_inv_asid_vmid(int asid, uint16_t vmid) "IOTLB invalidate asid=%d vmid=%d" > smmu_iotlb_inv_vmid(uint16_t vmid) "IOTLB invalidate vmid=%d" > +smmu_iotlb_inv_vmid_s1(uint16_t vmid) "IOTLB invalidate vmid=%d" > smmu_iotlb_inv_iova(uint16_t asid, uint64_t addr) "IOTLB invalidate asid=%d addr=0x%"PRIx64 > smmu_inv_notifiers_mr(const char *name) "iommu mr=%s" > smmu_iotlb_lookup_hit(int asid, uint16_t vmid, uint64_t addr, uint64_t mask, uint32_t hit, uint32_t miss, uint32_t p) "IOTLB cache HIT asid=%d vmid=%d addr=0x%"PRIx64" mask=0x%"PRIx64" hit=%d miss=%d hit rate=%d" > @@ -47,7 +48,8 @@ smmuv3_cmdq_cfgi_cd(uint32_t sid) "sid=0x%x" > smmuv3_config_cache_hit(uint32_t sid, uint32_t hits, uint32_t misses, uint32_t perc) "Config cache HIT for sid=0x%x (hits=%d, misses=%d, hit rate=%d)" > smmuv3_config_cache_miss(uint32_t sid, uint32_t hits, uint32_t misses, uint32_t perc) "Config cache MISS for sid=0x%x (hits=%d, misses=%d, hit rate=%d)" > smmuv3_range_inval(int vmid, int asid, uint64_t addr, uint8_t tg, uint64_t num_pages, uint8_t ttl, bool leaf, int stage) "vmid=%d asid=%d addr=0x%"PRIx64" tg=%d num_pages=0x%"PRIx64" ttl=%d leaf=%d stage=%d" > -smmuv3_cmdq_tlbi_nh(void) "" > +smmuv3_cmdq_tlbi_nh(int vmid) "vmid=%d" > +smmuv3_cmdq_tlbi_nsnh(void) "" > smmuv3_cmdq_tlbi_nh_asid(uint16_t asid) "asid=%d" > smmuv3_cmdq_tlbi_s12_vmid(uint16_t vmid) "vmid=%d" > smmuv3_config_cache_inv(uint32_t sid) "Config cache INV for sid=0x%x" > diff --git a/include/hw/arm/smmu-common.h b/include/hw/arm/smmu-common.h > index df166d8477..67db30e85b 100644 > --- a/include/hw/arm/smmu-common.h > +++ b/include/hw/arm/smmu-common.h > @@ -226,8 +226,9 @@ void smmu_iotlb_insert(SMMUState *bs, SMMUTransCfg *cfg, SMMUTLBEntry *entry); > SMMUIOTLBKey smmu_get_iotlb_key(int asid, int vmid, uint64_t iova, > uint8_t tg, uint8_t level); > void smmu_iotlb_inv_all(SMMUState *s); > -void smmu_iotlb_inv_asid(SMMUState *s, int asid); > +void smmu_iotlb_inv_asid_vmid(SMMUState *s, int asid, int vmid); > void smmu_iotlb_inv_vmid(SMMUState *s, int vmid); > +void smmu_iotlb_inv_vmid_s1(SMMUState *s, int vmid); > void smmu_iotlb_inv_iova(SMMUState *s, int asid, int vmid, dma_addr_t iova, > uint8_t tg, uint64_t num_pages, uint8_t ttl); > void smmu_iotlb_inv_ipa(SMMUState *s, int vmid, dma_addr_t ipa, uint8_t tg,
Hi Eric, On Thu, Apr 18, 2024 at 04:48:39PM +0200, Eric Auger wrote: > Hi Mostafa, > > On 4/8/24 16:08, Mostafa Saleh wrote: > > Some commands need rework for nesting, as they used to assume S1 > > and S2 are mutually exclusive: > > > > - CMD_TLBI_NH_ASID: Consider VMID if stage-2 is supported > > - CMD_TLBI_NH_ALL: Consider VMID if stage-2 is supported, otherwise > > invalidate everything, this required a new vmid invalidation > > function for stage-1 only (ASID >= 0) > > > > Also, rework trace events to reflect the new implementation. > > This does not apply for me. Could you share a branch or respin? Oh, Sorry about that. I will address the previous comments and respin. Thanks, Mostafa > > Thank you in advance > > Eric > > > > Signed-off-by: Mostafa Saleh <smostafa@google.com> > > --- > > hw/arm/smmu-common.c | 36 +++++++++++++++++++++++++++++------- > > hw/arm/smmuv3.c | 31 +++++++++++++++++++++++++++++-- > > hw/arm/trace-events | 6 ++++-- > > include/hw/arm/smmu-common.h | 3 ++- > > 4 files changed, 64 insertions(+), 12 deletions(-) > > > > diff --git a/hw/arm/smmu-common.c b/hw/arm/smmu-common.c > > index 8b9e59b24b..b1cf1303c6 100644 > > --- a/hw/arm/smmu-common.c > > +++ b/hw/arm/smmu-common.c > > @@ -148,13 +148,14 @@ void smmu_iotlb_inv_all(SMMUState *s) > > g_hash_table_remove_all(s->iotlb); > > } > > > > -static gboolean smmu_hash_remove_by_asid(gpointer key, gpointer value, > > - gpointer user_data) > > +static gboolean smmu_hash_remove_by_asid_vmid(gpointer key, gpointer value, > > + gpointer user_data) > > { > > - int asid = *(int *)user_data; > > + SMMUIOTLBPageInvInfo *info = (SMMUIOTLBPageInvInfo *)user_data; > > SMMUIOTLBKey *iotlb_key = (SMMUIOTLBKey *)key; > > > > - return SMMU_IOTLB_ASID(*iotlb_key) == asid; > > + return (SMMU_IOTLB_ASID(*iotlb_key) == info->asid) && > > + (SMMU_IOTLB_VMID(*iotlb_key) == info->vmid); > > } > > > > static gboolean smmu_hash_remove_by_vmid(gpointer key, gpointer value, > > @@ -166,6 +167,16 @@ static gboolean smmu_hash_remove_by_vmid(gpointer key, gpointer value, > > return SMMU_IOTLB_VMID(*iotlb_key) == vmid; > > } > > > > +static gboolean smmu_hash_remove_by_vmid_s1(gpointer key, gpointer value, > > + gpointer user_data) > > +{ > > + int vmid = *(int *)user_data; > > + SMMUIOTLBKey *iotlb_key = (SMMUIOTLBKey *)key; > > + > > + return (SMMU_IOTLB_VMID(*iotlb_key) == vmid) && > > + (SMMU_IOTLB_ASID(*iotlb_key) >= 0); > > +} > > + > > static gboolean smmu_hash_remove_by_asid_vmid_iova(gpointer key, gpointer value, > > gpointer user_data) > > { > > @@ -259,10 +270,15 @@ void smmu_iotlb_inv_ipa(SMMUState *s, int vmid, dma_addr_t ipa, uint8_t tg, > > &info); > > } > > > > -void smmu_iotlb_inv_asid(SMMUState *s, int asid) > > +void smmu_iotlb_inv_asid_vmid(SMMUState *s, int asid, int vmid) > > { > > - trace_smmu_iotlb_inv_asid(asid); > > - g_hash_table_foreach_remove(s->iotlb, smmu_hash_remove_by_asid, &asid); > > + SMMUIOTLBPageInvInfo info = { > > + .asid = asid, > > + .vmid = vmid, > > + }; > > + > > + trace_smmu_iotlb_inv_asid_vmid(asid, vmid); > > + g_hash_table_foreach_remove(s->iotlb, smmu_hash_remove_by_asid_vmid, &info); > > } > > > > inline void smmu_iotlb_inv_vmid(SMMUState *s, int vmid) > > @@ -271,6 +287,12 @@ inline void smmu_iotlb_inv_vmid(SMMUState *s, int vmid) > > g_hash_table_foreach_remove(s->iotlb, smmu_hash_remove_by_vmid, &vmid); > > } > > > > +inline void smmu_iotlb_inv_vmid_s1(SMMUState *s, int vmid) > > +{ > > + trace_smmu_iotlb_inv_vmid_s1(vmid); > > + g_hash_table_foreach_remove(s->iotlb, smmu_hash_remove_by_vmid_s1, &vmid); > > +} > > + > > /* VMSAv8-64 Translation */ > > > > /** > > diff --git a/hw/arm/smmuv3.c b/hw/arm/smmuv3.c > > index 17bbd43c13..ece647b8bf 100644 > > --- a/hw/arm/smmuv3.c > > +++ b/hw/arm/smmuv3.c > > @@ -1280,25 +1280,52 @@ static int smmuv3_cmdq_consume(SMMUv3State *s) > > case SMMU_CMD_TLBI_NH_ASID: > > { > > int asid = CMD_ASID(&cmd); > > + int vmid = -1; > > > > if (!STAGE1_SUPPORTED(s)) { > > cmd_error = SMMU_CERROR_ILL; > > break; > > } > > > > + /* > > + * VMID is only matched when stage 2 is supported for the Security > > + * state corresponding to the command queue that the command was > > + * issued in. > > + * QEMU ignores the field by setting to -1, similarly to what STE > > + * decoding does. And invalidation commands ignore VMID < 0. > > + */ > > + if (STAGE2_SUPPORTED(s)) { > > + vmid = CMD_VMID(&cmd); > > + } > > + > > trace_smmuv3_cmdq_tlbi_nh_asid(asid); > > smmu_inv_notifiers_all(&s->smmu_state); > > - smmu_iotlb_inv_asid(bs, asid); > > + smmu_iotlb_inv_asid_vmid(bs, asid, vmid); > > break; > > } > > case SMMU_CMD_TLBI_NH_ALL: > > + { > > + int vmid = -1; > > + > > if (!STAGE1_SUPPORTED(s)) { > > cmd_error = SMMU_CERROR_ILL; > > break; > > } > > + > > + /* > > + * If stage-2 is supported, invalidate for this VMID only, otherwise > > + * invalidate the whole thing, see SMMU_CMD_TLBI_NH_ASID() > > + */ > > + if (STAGE2_SUPPORTED(s)) { > > + vmid = CMD_VMID(&cmd); > > + trace_smmuv3_cmdq_tlbi_nh(vmid); > > + smmu_iotlb_inv_vmid_s1(bs, vmid); > > + break; > > + } > > QEMU_FALLTHROUGH; > > + } > > case SMMU_CMD_TLBI_NSNH_ALL: > > - trace_smmuv3_cmdq_tlbi_nh(); > > + trace_smmuv3_cmdq_tlbi_nsnh(); > > smmu_inv_notifiers_all(&s->smmu_state); > > smmu_iotlb_inv_all(bs); > > break; > > diff --git a/hw/arm/trace-events b/hw/arm/trace-events > > index f5c361d96e..2556f4721a 100644 > > --- a/hw/arm/trace-events > > +++ b/hw/arm/trace-events > > @@ -11,8 +11,9 @@ smmu_ptw_page_pte(int stage, int level, uint64_t iova, uint64_t baseaddr, uint6 > > smmu_ptw_block_pte(int stage, int level, uint64_t baseaddr, uint64_t pteaddr, uint64_t pte, uint64_t iova, uint64_t gpa, int bsize_mb) "stage=%d level=%d base@=0x%"PRIx64" pte@=0x%"PRIx64" pte=0x%"PRIx64" iova=0x%"PRIx64" block address = 0x%"PRIx64" block size = %d MiB" > > smmu_get_pte(uint64_t baseaddr, int index, uint64_t pteaddr, uint64_t pte) "baseaddr=0x%"PRIx64" index=0x%x, pteaddr=0x%"PRIx64", pte=0x%"PRIx64 > > smmu_iotlb_inv_all(void) "IOTLB invalidate all" > > -smmu_iotlb_inv_asid(uint16_t asid) "IOTLB invalidate asid=%d" > > +smmu_iotlb_inv_asid_vmid(int asid, uint16_t vmid) "IOTLB invalidate asid=%d vmid=%d" > > smmu_iotlb_inv_vmid(uint16_t vmid) "IOTLB invalidate vmid=%d" > > +smmu_iotlb_inv_vmid_s1(uint16_t vmid) "IOTLB invalidate vmid=%d" > > smmu_iotlb_inv_iova(uint16_t asid, uint64_t addr) "IOTLB invalidate asid=%d addr=0x%"PRIx64 > > smmu_inv_notifiers_mr(const char *name) "iommu mr=%s" > > smmu_iotlb_lookup_hit(int asid, uint16_t vmid, uint64_t addr, uint64_t mask, uint32_t hit, uint32_t miss, uint32_t p) "IOTLB cache HIT asid=%d vmid=%d addr=0x%"PRIx64" mask=0x%"PRIx64" hit=%d miss=%d hit rate=%d" > > @@ -47,7 +48,8 @@ smmuv3_cmdq_cfgi_cd(uint32_t sid) "sid=0x%x" > > smmuv3_config_cache_hit(uint32_t sid, uint32_t hits, uint32_t misses, uint32_t perc) "Config cache HIT for sid=0x%x (hits=%d, misses=%d, hit rate=%d)" > > smmuv3_config_cache_miss(uint32_t sid, uint32_t hits, uint32_t misses, uint32_t perc) "Config cache MISS for sid=0x%x (hits=%d, misses=%d, hit rate=%d)" > > smmuv3_range_inval(int vmid, int asid, uint64_t addr, uint8_t tg, uint64_t num_pages, uint8_t ttl, bool leaf, int stage) "vmid=%d asid=%d addr=0x%"PRIx64" tg=%d num_pages=0x%"PRIx64" ttl=%d leaf=%d stage=%d" > > -smmuv3_cmdq_tlbi_nh(void) "" > > +smmuv3_cmdq_tlbi_nh(int vmid) "vmid=%d" > > +smmuv3_cmdq_tlbi_nsnh(void) "" > > smmuv3_cmdq_tlbi_nh_asid(uint16_t asid) "asid=%d" > > smmuv3_cmdq_tlbi_s12_vmid(uint16_t vmid) "vmid=%d" > > smmuv3_config_cache_inv(uint32_t sid) "Config cache INV for sid=0x%x" > > diff --git a/include/hw/arm/smmu-common.h b/include/hw/arm/smmu-common.h > > index df166d8477..67db30e85b 100644 > > --- a/include/hw/arm/smmu-common.h > > +++ b/include/hw/arm/smmu-common.h > > @@ -226,8 +226,9 @@ void smmu_iotlb_insert(SMMUState *bs, SMMUTransCfg *cfg, SMMUTLBEntry *entry); > > SMMUIOTLBKey smmu_get_iotlb_key(int asid, int vmid, uint64_t iova, > > uint8_t tg, uint8_t level); > > void smmu_iotlb_inv_all(SMMUState *s); > > -void smmu_iotlb_inv_asid(SMMUState *s, int asid); > > +void smmu_iotlb_inv_asid_vmid(SMMUState *s, int asid, int vmid); > > void smmu_iotlb_inv_vmid(SMMUState *s, int vmid); > > +void smmu_iotlb_inv_vmid_s1(SMMUState *s, int vmid); > > void smmu_iotlb_inv_iova(SMMUState *s, int asid, int vmid, dma_addr_t iova, > > uint8_t tg, uint64_t num_pages, uint8_t ttl); > > void smmu_iotlb_inv_ipa(SMMUState *s, int vmid, dma_addr_t ipa, uint8_t tg, >
diff --git a/hw/arm/smmu-common.c b/hw/arm/smmu-common.c index 8b9e59b24b..b1cf1303c6 100644 --- a/hw/arm/smmu-common.c +++ b/hw/arm/smmu-common.c @@ -148,13 +148,14 @@ void smmu_iotlb_inv_all(SMMUState *s) g_hash_table_remove_all(s->iotlb); } -static gboolean smmu_hash_remove_by_asid(gpointer key, gpointer value, - gpointer user_data) +static gboolean smmu_hash_remove_by_asid_vmid(gpointer key, gpointer value, + gpointer user_data) { - int asid = *(int *)user_data; + SMMUIOTLBPageInvInfo *info = (SMMUIOTLBPageInvInfo *)user_data; SMMUIOTLBKey *iotlb_key = (SMMUIOTLBKey *)key; - return SMMU_IOTLB_ASID(*iotlb_key) == asid; + return (SMMU_IOTLB_ASID(*iotlb_key) == info->asid) && + (SMMU_IOTLB_VMID(*iotlb_key) == info->vmid); } static gboolean smmu_hash_remove_by_vmid(gpointer key, gpointer value, @@ -166,6 +167,16 @@ static gboolean smmu_hash_remove_by_vmid(gpointer key, gpointer value, return SMMU_IOTLB_VMID(*iotlb_key) == vmid; } +static gboolean smmu_hash_remove_by_vmid_s1(gpointer key, gpointer value, + gpointer user_data) +{ + int vmid = *(int *)user_data; + SMMUIOTLBKey *iotlb_key = (SMMUIOTLBKey *)key; + + return (SMMU_IOTLB_VMID(*iotlb_key) == vmid) && + (SMMU_IOTLB_ASID(*iotlb_key) >= 0); +} + static gboolean smmu_hash_remove_by_asid_vmid_iova(gpointer key, gpointer value, gpointer user_data) { @@ -259,10 +270,15 @@ void smmu_iotlb_inv_ipa(SMMUState *s, int vmid, dma_addr_t ipa, uint8_t tg, &info); } -void smmu_iotlb_inv_asid(SMMUState *s, int asid) +void smmu_iotlb_inv_asid_vmid(SMMUState *s, int asid, int vmid) { - trace_smmu_iotlb_inv_asid(asid); - g_hash_table_foreach_remove(s->iotlb, smmu_hash_remove_by_asid, &asid); + SMMUIOTLBPageInvInfo info = { + .asid = asid, + .vmid = vmid, + }; + + trace_smmu_iotlb_inv_asid_vmid(asid, vmid); + g_hash_table_foreach_remove(s->iotlb, smmu_hash_remove_by_asid_vmid, &info); } inline void smmu_iotlb_inv_vmid(SMMUState *s, int vmid) @@ -271,6 +287,12 @@ inline void smmu_iotlb_inv_vmid(SMMUState *s, int vmid) g_hash_table_foreach_remove(s->iotlb, smmu_hash_remove_by_vmid, &vmid); } +inline void smmu_iotlb_inv_vmid_s1(SMMUState *s, int vmid) +{ + trace_smmu_iotlb_inv_vmid_s1(vmid); + g_hash_table_foreach_remove(s->iotlb, smmu_hash_remove_by_vmid_s1, &vmid); +} + /* VMSAv8-64 Translation */ /** diff --git a/hw/arm/smmuv3.c b/hw/arm/smmuv3.c index 17bbd43c13..ece647b8bf 100644 --- a/hw/arm/smmuv3.c +++ b/hw/arm/smmuv3.c @@ -1280,25 +1280,52 @@ static int smmuv3_cmdq_consume(SMMUv3State *s) case SMMU_CMD_TLBI_NH_ASID: { int asid = CMD_ASID(&cmd); + int vmid = -1; if (!STAGE1_SUPPORTED(s)) { cmd_error = SMMU_CERROR_ILL; break; } + /* + * VMID is only matched when stage 2 is supported for the Security + * state corresponding to the command queue that the command was + * issued in. + * QEMU ignores the field by setting to -1, similarly to what STE + * decoding does. And invalidation commands ignore VMID < 0. + */ + if (STAGE2_SUPPORTED(s)) { + vmid = CMD_VMID(&cmd); + } + trace_smmuv3_cmdq_tlbi_nh_asid(asid); smmu_inv_notifiers_all(&s->smmu_state); - smmu_iotlb_inv_asid(bs, asid); + smmu_iotlb_inv_asid_vmid(bs, asid, vmid); break; } case SMMU_CMD_TLBI_NH_ALL: + { + int vmid = -1; + if (!STAGE1_SUPPORTED(s)) { cmd_error = SMMU_CERROR_ILL; break; } + + /* + * If stage-2 is supported, invalidate for this VMID only, otherwise + * invalidate the whole thing, see SMMU_CMD_TLBI_NH_ASID() + */ + if (STAGE2_SUPPORTED(s)) { + vmid = CMD_VMID(&cmd); + trace_smmuv3_cmdq_tlbi_nh(vmid); + smmu_iotlb_inv_vmid_s1(bs, vmid); + break; + } QEMU_FALLTHROUGH; + } case SMMU_CMD_TLBI_NSNH_ALL: - trace_smmuv3_cmdq_tlbi_nh(); + trace_smmuv3_cmdq_tlbi_nsnh(); smmu_inv_notifiers_all(&s->smmu_state); smmu_iotlb_inv_all(bs); break; diff --git a/hw/arm/trace-events b/hw/arm/trace-events index f5c361d96e..2556f4721a 100644 --- a/hw/arm/trace-events +++ b/hw/arm/trace-events @@ -11,8 +11,9 @@ smmu_ptw_page_pte(int stage, int level, uint64_t iova, uint64_t baseaddr, uint6 smmu_ptw_block_pte(int stage, int level, uint64_t baseaddr, uint64_t pteaddr, uint64_t pte, uint64_t iova, uint64_t gpa, int bsize_mb) "stage=%d level=%d base@=0x%"PRIx64" pte@=0x%"PRIx64" pte=0x%"PRIx64" iova=0x%"PRIx64" block address = 0x%"PRIx64" block size = %d MiB" smmu_get_pte(uint64_t baseaddr, int index, uint64_t pteaddr, uint64_t pte) "baseaddr=0x%"PRIx64" index=0x%x, pteaddr=0x%"PRIx64", pte=0x%"PRIx64 smmu_iotlb_inv_all(void) "IOTLB invalidate all" -smmu_iotlb_inv_asid(uint16_t asid) "IOTLB invalidate asid=%d" +smmu_iotlb_inv_asid_vmid(int asid, uint16_t vmid) "IOTLB invalidate asid=%d vmid=%d" smmu_iotlb_inv_vmid(uint16_t vmid) "IOTLB invalidate vmid=%d" +smmu_iotlb_inv_vmid_s1(uint16_t vmid) "IOTLB invalidate vmid=%d" smmu_iotlb_inv_iova(uint16_t asid, uint64_t addr) "IOTLB invalidate asid=%d addr=0x%"PRIx64 smmu_inv_notifiers_mr(const char *name) "iommu mr=%s" smmu_iotlb_lookup_hit(int asid, uint16_t vmid, uint64_t addr, uint64_t mask, uint32_t hit, uint32_t miss, uint32_t p) "IOTLB cache HIT asid=%d vmid=%d addr=0x%"PRIx64" mask=0x%"PRIx64" hit=%d miss=%d hit rate=%d" @@ -47,7 +48,8 @@ smmuv3_cmdq_cfgi_cd(uint32_t sid) "sid=0x%x" smmuv3_config_cache_hit(uint32_t sid, uint32_t hits, uint32_t misses, uint32_t perc) "Config cache HIT for sid=0x%x (hits=%d, misses=%d, hit rate=%d)" smmuv3_config_cache_miss(uint32_t sid, uint32_t hits, uint32_t misses, uint32_t perc) "Config cache MISS for sid=0x%x (hits=%d, misses=%d, hit rate=%d)" smmuv3_range_inval(int vmid, int asid, uint64_t addr, uint8_t tg, uint64_t num_pages, uint8_t ttl, bool leaf, int stage) "vmid=%d asid=%d addr=0x%"PRIx64" tg=%d num_pages=0x%"PRIx64" ttl=%d leaf=%d stage=%d" -smmuv3_cmdq_tlbi_nh(void) "" +smmuv3_cmdq_tlbi_nh(int vmid) "vmid=%d" +smmuv3_cmdq_tlbi_nsnh(void) "" smmuv3_cmdq_tlbi_nh_asid(uint16_t asid) "asid=%d" smmuv3_cmdq_tlbi_s12_vmid(uint16_t vmid) "vmid=%d" smmuv3_config_cache_inv(uint32_t sid) "Config cache INV for sid=0x%x" diff --git a/include/hw/arm/smmu-common.h b/include/hw/arm/smmu-common.h index df166d8477..67db30e85b 100644 --- a/include/hw/arm/smmu-common.h +++ b/include/hw/arm/smmu-common.h @@ -226,8 +226,9 @@ void smmu_iotlb_insert(SMMUState *bs, SMMUTransCfg *cfg, SMMUTLBEntry *entry); SMMUIOTLBKey smmu_get_iotlb_key(int asid, int vmid, uint64_t iova, uint8_t tg, uint8_t level); void smmu_iotlb_inv_all(SMMUState *s); -void smmu_iotlb_inv_asid(SMMUState *s, int asid); +void smmu_iotlb_inv_asid_vmid(SMMUState *s, int asid, int vmid); void smmu_iotlb_inv_vmid(SMMUState *s, int vmid); +void smmu_iotlb_inv_vmid_s1(SMMUState *s, int vmid); void smmu_iotlb_inv_iova(SMMUState *s, int asid, int vmid, dma_addr_t iova, uint8_t tg, uint64_t num_pages, uint8_t ttl); void smmu_iotlb_inv_ipa(SMMUState *s, int vmid, dma_addr_t ipa, uint8_t tg,
Some commands need rework for nesting, as they used to assume S1 and S2 are mutually exclusive: - CMD_TLBI_NH_ASID: Consider VMID if stage-2 is supported - CMD_TLBI_NH_ALL: Consider VMID if stage-2 is supported, otherwise invalidate everything, this required a new vmid invalidation function for stage-1 only (ASID >= 0) Also, rework trace events to reflect the new implementation. Signed-off-by: Mostafa Saleh <smostafa@google.com> --- hw/arm/smmu-common.c | 36 +++++++++++++++++++++++++++++------- hw/arm/smmuv3.c | 31 +++++++++++++++++++++++++++++-- hw/arm/trace-events | 6 ++++-- include/hw/arm/smmu-common.h | 3 ++- 4 files changed, 64 insertions(+), 12 deletions(-)