Message ID | 20240718081636.879544-10-zhenzhong.duan@intel.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | intel_iommu: Enable stage-1 translation for emulated device | expand |
Reviewed-by: Clément Mathieu--Drif<clement.mathieu--drif@eviden.com> On 18/07/2024 10:16, Zhenzhong Duan wrote: > Caution: External email. Do not open attachments or click links, unless this email comes from a known sender and you know the content is safe. > > > According to spec, Page-Selective-within-Domain Invalidation (11b): > > 1. IOTLB entries caching second-stage mappings (PGTT=010b) or pass-through > (PGTT=100b) mappings associated with the specified domain-id and the > input-address range are invalidated. > 2. IOTLB entries caching first-stage (PGTT=001b) or nested (PGTT=011b) > mapping associated with specified domain-id are invalidated. > > So per spec definition the Page-Selective-within-Domain Invalidation > needs to flush first stage and nested cached IOTLB enties as well. > > We don't support nested yet and pass-through mapping is never cached, > so what in iotlb cache are only first-stage and second-stage mappings. > > Add a tag pgtt in VTDIOTLBEntry to mark PGTT type of the mapping and > invalidate entries based on PGTT type. > > Signed-off-by: Zhenzhong Duan <zhenzhong.duan@intel.com> > --- > include/hw/i386/intel_iommu.h | 1 + > hw/i386/intel_iommu.c | 27 +++++++++++++++++++++------ > 2 files changed, 22 insertions(+), 6 deletions(-) > > diff --git a/include/hw/i386/intel_iommu.h b/include/hw/i386/intel_iommu.h > index fe9057c50d..b843d069cc 100644 > --- a/include/hw/i386/intel_iommu.h > +++ b/include/hw/i386/intel_iommu.h > @@ -155,6 +155,7 @@ struct VTDIOTLBEntry { > uint64_t pte; > uint64_t mask; > uint8_t access_flags; > + uint8_t pgtt; > }; > > /* VT-d Source-ID Qualifier types */ > diff --git a/hw/i386/intel_iommu.c b/hw/i386/intel_iommu.c > index 210df32f01..8d47e5ba78 100644 > --- a/hw/i386/intel_iommu.c > +++ b/hw/i386/intel_iommu.c > @@ -305,9 +305,21 @@ static gboolean vtd_hash_remove_by_page(gpointer key, gpointer value, > VTDIOTLBPageInvInfo *info = (VTDIOTLBPageInvInfo *)user_data; > uint64_t gfn = (info->addr >> VTD_PAGE_SHIFT_4K) & info->mask; > uint64_t gfn_tlb = (info->addr & entry->mask) >> VTD_PAGE_SHIFT_4K; > - return (entry->domain_id == info->domain_id) && > - (((entry->gfn & info->mask) == gfn) || > - (entry->gfn == gfn_tlb)); > + > + if (entry->domain_id != info->domain_id) { > + return false; > + } > + > + /* > + * According to spec, IOTLB entries caching first-stage (PGTT=001b) or > + * nested (PGTT=011b) mapping associated with specified domain-id are > + * invalidated. Nested isn't supported yet, so only need to check 001b. > + */ > + if (entry->pgtt == VTD_SM_PASID_ENTRY_FLT) { > + return true; > + } > + > + return (entry->gfn & info->mask) == gfn || entry->gfn == gfn_tlb; > } > > /* Reset all the gen of VTDAddressSpace to zero and set the gen of > @@ -382,7 +394,7 @@ out: > static void vtd_update_iotlb(IntelIOMMUState *s, uint16_t source_id, > uint16_t domain_id, hwaddr addr, uint64_t pte, > uint8_t access_flags, uint32_t level, > - uint32_t pasid) > + uint32_t pasid, uint8_t pgtt) > { > VTDIOTLBEntry *entry = g_malloc(sizeof(*entry)); > struct vtd_iotlb_key *key = g_malloc(sizeof(*key)); > @@ -400,6 +412,7 @@ static void vtd_update_iotlb(IntelIOMMUState *s, uint16_t source_id, > entry->access_flags = access_flags; > entry->mask = vtd_pt_level_page_mask(level); > entry->pasid = pasid; > + entry->pgtt = pgtt; > > key->gfn = gfn; > key->sid = source_id; > @@ -2071,7 +2084,7 @@ static bool vtd_do_iommu_translate(VTDAddressSpace *vtd_as, PCIBus *bus, > bool is_fpd_set = false; > bool reads = true; > bool writes = true; > - uint8_t access_flags; > + uint8_t access_flags, pgtt; > bool rid2pasid = (pasid == PCI_NO_PASID) && s->root_scalable; > VTDIOTLBEntry *iotlb_entry; > > @@ -2179,9 +2192,11 @@ static bool vtd_do_iommu_translate(VTDAddressSpace *vtd_as, PCIBus *bus, > if (s->scalable_modern && s->root_scalable) { > ret_fr = vtd_iova_to_flpte(s, &ce, addr, is_write, &pte, &level, > &reads, &writes, s->aw_bits, pasid); > + pgtt = VTD_SM_PASID_ENTRY_FLT; > } else { > ret_fr = vtd_iova_to_slpte(s, &ce, addr, is_write, &pte, &level, > &reads, &writes, s->aw_bits, pasid); > + pgtt = VTD_SM_PASID_ENTRY_SLT; > } > if (ret_fr) { > vtd_report_fault(s, -ret_fr, is_fpd_set, source_id, > @@ -2192,7 +2207,7 @@ static bool vtd_do_iommu_translate(VTDAddressSpace *vtd_as, PCIBus *bus, > page_mask = vtd_pt_level_page_mask(level); > access_flags = IOMMU_ACCESS_FLAG(reads, writes); > vtd_update_iotlb(s, source_id, vtd_get_domain_id(s, &ce, pasid), > - addr, pte, access_flags, level, pasid); > + addr, pte, access_flags, level, pasid, pgtt); > out: > vtd_iommu_unlock(s); > entry->iova = addr & page_mask; > -- > 2.34.1 >
diff --git a/include/hw/i386/intel_iommu.h b/include/hw/i386/intel_iommu.h index fe9057c50d..b843d069cc 100644 --- a/include/hw/i386/intel_iommu.h +++ b/include/hw/i386/intel_iommu.h @@ -155,6 +155,7 @@ struct VTDIOTLBEntry { uint64_t pte; uint64_t mask; uint8_t access_flags; + uint8_t pgtt; }; /* VT-d Source-ID Qualifier types */ diff --git a/hw/i386/intel_iommu.c b/hw/i386/intel_iommu.c index 210df32f01..8d47e5ba78 100644 --- a/hw/i386/intel_iommu.c +++ b/hw/i386/intel_iommu.c @@ -305,9 +305,21 @@ static gboolean vtd_hash_remove_by_page(gpointer key, gpointer value, VTDIOTLBPageInvInfo *info = (VTDIOTLBPageInvInfo *)user_data; uint64_t gfn = (info->addr >> VTD_PAGE_SHIFT_4K) & info->mask; uint64_t gfn_tlb = (info->addr & entry->mask) >> VTD_PAGE_SHIFT_4K; - return (entry->domain_id == info->domain_id) && - (((entry->gfn & info->mask) == gfn) || - (entry->gfn == gfn_tlb)); + + if (entry->domain_id != info->domain_id) { + return false; + } + + /* + * According to spec, IOTLB entries caching first-stage (PGTT=001b) or + * nested (PGTT=011b) mapping associated with specified domain-id are + * invalidated. Nested isn't supported yet, so only need to check 001b. + */ + if (entry->pgtt == VTD_SM_PASID_ENTRY_FLT) { + return true; + } + + return (entry->gfn & info->mask) == gfn || entry->gfn == gfn_tlb; } /* Reset all the gen of VTDAddressSpace to zero and set the gen of @@ -382,7 +394,7 @@ out: static void vtd_update_iotlb(IntelIOMMUState *s, uint16_t source_id, uint16_t domain_id, hwaddr addr, uint64_t pte, uint8_t access_flags, uint32_t level, - uint32_t pasid) + uint32_t pasid, uint8_t pgtt) { VTDIOTLBEntry *entry = g_malloc(sizeof(*entry)); struct vtd_iotlb_key *key = g_malloc(sizeof(*key)); @@ -400,6 +412,7 @@ static void vtd_update_iotlb(IntelIOMMUState *s, uint16_t source_id, entry->access_flags = access_flags; entry->mask = vtd_pt_level_page_mask(level); entry->pasid = pasid; + entry->pgtt = pgtt; key->gfn = gfn; key->sid = source_id; @@ -2071,7 +2084,7 @@ static bool vtd_do_iommu_translate(VTDAddressSpace *vtd_as, PCIBus *bus, bool is_fpd_set = false; bool reads = true; bool writes = true; - uint8_t access_flags; + uint8_t access_flags, pgtt; bool rid2pasid = (pasid == PCI_NO_PASID) && s->root_scalable; VTDIOTLBEntry *iotlb_entry; @@ -2179,9 +2192,11 @@ static bool vtd_do_iommu_translate(VTDAddressSpace *vtd_as, PCIBus *bus, if (s->scalable_modern && s->root_scalable) { ret_fr = vtd_iova_to_flpte(s, &ce, addr, is_write, &pte, &level, &reads, &writes, s->aw_bits, pasid); + pgtt = VTD_SM_PASID_ENTRY_FLT; } else { ret_fr = vtd_iova_to_slpte(s, &ce, addr, is_write, &pte, &level, &reads, &writes, s->aw_bits, pasid); + pgtt = VTD_SM_PASID_ENTRY_SLT; } if (ret_fr) { vtd_report_fault(s, -ret_fr, is_fpd_set, source_id, @@ -2192,7 +2207,7 @@ static bool vtd_do_iommu_translate(VTDAddressSpace *vtd_as, PCIBus *bus, page_mask = vtd_pt_level_page_mask(level); access_flags = IOMMU_ACCESS_FLAG(reads, writes); vtd_update_iotlb(s, source_id, vtd_get_domain_id(s, &ce, pasid), - addr, pte, access_flags, level, pasid); + addr, pte, access_flags, level, pasid, pgtt); out: vtd_iommu_unlock(s); entry->iova = addr & page_mask;
According to spec, Page-Selective-within-Domain Invalidation (11b): 1. IOTLB entries caching second-stage mappings (PGTT=010b) or pass-through (PGTT=100b) mappings associated with the specified domain-id and the input-address range are invalidated. 2. IOTLB entries caching first-stage (PGTT=001b) or nested (PGTT=011b) mapping associated with specified domain-id are invalidated. So per spec definition the Page-Selective-within-Domain Invalidation needs to flush first stage and nested cached IOTLB enties as well. We don't support nested yet and pass-through mapping is never cached, so what in iotlb cache are only first-stage and second-stage mappings. Add a tag pgtt in VTDIOTLBEntry to mark PGTT type of the mapping and invalidate entries based on PGTT type. Signed-off-by: Zhenzhong Duan <zhenzhong.duan@intel.com> --- include/hw/i386/intel_iommu.h | 1 + hw/i386/intel_iommu.c | 27 +++++++++++++++++++++------ 2 files changed, 22 insertions(+), 6 deletions(-)