@@ -560,6 +560,13 @@ struct VTDPASIDCacheInfo {
};
typedef struct VTDPASIDCacheInfo VTDPASIDCacheInfo;
+struct VTDPIOTLBInvInfo {
+ uint16_t domain_id;
+ uint32_t pasid;
+ struct iommu_hwpt_vtd_s1_invalidate *inv_data;
+};
+typedef struct VTDPIOTLBInvInfo VTDPIOTLBInvInfo;
+
/* PASID Table Related Definitions */
#define VTD_PASID_DIR_BASE_ADDR_MASK (~0xfffULL)
#define VTD_PASID_TABLE_BASE_ADDR_MASK (~0xfffULL)
@@ -3786,15 +3786,118 @@ static bool vtd_process_pasid_desc(IntelIOMMUState *s,
return !pc_info.error_happened ? true : false;
}
+/**
+ * Caller of this function should hold iommu_lock.
+ */
+static void vtd_invalidate_piotlb(VTDPASIDAddressSpace *vtd_pasid_as,
+ struct iommu_hwpt_vtd_s1_invalidate *cache)
+{
+ VTDIOMMUFDDevice *vtd_idev;
+ VTDHwpt *hwpt = &vtd_pasid_as->hwpt;
+ int devfn = vtd_pasid_as->devfn;
+ struct vtd_as_key key = {
+ .bus = vtd_pasid_as->bus,
+ .devfn = devfn,
+ };
+ IntelIOMMUState *s = vtd_pasid_as->iommu_state;
+ uint32_t entry_num = 1; /* Only implement one request for simplicity */
+
+ if (!hwpt) {
+ return;
+ }
+
+ vtd_idev = g_hash_table_lookup(s->vtd_iommufd_dev, &key);
+ if (!vtd_idev || !vtd_idev->idev) {
+ return;
+ }
+ if (iommufd_backend_invalidate_cache(vtd_idev->idev->iommufd, hwpt->hwpt_id,
+ IOMMU_HWPT_INVALIDATE_DATA_VTD_S1,
+ sizeof(*cache), &entry_num, cache)) {
+ error_report("Cache flush failed, entry_num %d", entry_num);
+ }
+}
+
+/**
+ * This function is a loop function for the s->vtd_pasid_as
+ * list with VTDPIOTLBInvInfo as execution filter. It propagates
+ * the piotlb invalidation to host. Caller of this function
+ * should hold iommu_lock.
+ */
+static void vtd_flush_pasid_iotlb(gpointer key, gpointer value,
+ gpointer user_data)
+{
+ VTDPIOTLBInvInfo *piotlb_info = user_data;
+ VTDPASIDAddressSpace *vtd_pasid_as = value;
+ VTDPASIDCacheEntry *pc_entry = &vtd_pasid_as->pasid_cache_entry;
+ uint16_t did;
+
+ if (!vtd_pe_pgtt_is_flt(&pc_entry->pasid_entry)) {
+ return;
+ }
+
+ did = vtd_pe_get_domain_id(&pc_entry->pasid_entry);
+
+ if ((piotlb_info->domain_id == did) &&
+ (piotlb_info->pasid == vtd_pasid_as->pasid)) {
+ vtd_invalidate_piotlb(vtd_pasid_as,
+ piotlb_info->inv_data);
+ }
+
+ /*
+ * TODO: needs to add QEMU piotlb flush when QEMU piotlb
+ * infrastructure is ready. For now, it is enough for passthru
+ * devices.
+ */
+}
+
static void vtd_piotlb_pasid_invalidate(IntelIOMMUState *s,
uint16_t domain_id, uint32_t pasid)
{
+ struct iommu_hwpt_vtd_s1_invalidate cache_info = { 0 };
+ VTDPIOTLBInvInfo piotlb_info;
+
+ cache_info.addr = 0;
+ cache_info.npages = (uint64_t)-1;
+
+ piotlb_info.domain_id = domain_id;
+ piotlb_info.pasid = pasid;
+ piotlb_info.inv_data = &cache_info;
+
+ vtd_iommu_lock(s);
+ /*
+ * Here loops all the vtd_pasid_as instances in s->vtd_pasid_as
+ * to find out the affected devices since piotlb invalidation
+ * should check pasid cache per architecture point of view.
+ */
+ g_hash_table_foreach(s->vtd_pasid_as,
+ vtd_flush_pasid_iotlb, &piotlb_info);
+ vtd_iommu_unlock(s);
}
static void vtd_piotlb_page_invalidate(IntelIOMMUState *s, uint16_t domain_id,
uint32_t pasid, hwaddr addr, uint8_t am,
bool ih)
{
+ struct iommu_hwpt_vtd_s1_invalidate cache_info = { 0 };
+ VTDPIOTLBInvInfo piotlb_info;
+
+ cache_info.addr = addr;
+ cache_info.npages = 1 << am;
+ cache_info.flags = ih ? IOMMU_VTD_INV_FLAGS_LEAF : 0;
+
+ piotlb_info.domain_id = domain_id;
+ piotlb_info.pasid = pasid;
+ piotlb_info.inv_data = &cache_info;
+
+ vtd_iommu_lock(s);
+ /*
+ * Here loops all the vtd_pasid_as instances in s->vtd_pasid_as
+ * to find out the affected devices since piotlb invalidation
+ * should check pasid cache per architecture point of view.
+ */
+ g_hash_table_foreach(s->vtd_pasid_as,
+ vtd_flush_pasid_iotlb, &piotlb_info);
+ vtd_iommu_unlock(s);
}
static bool vtd_process_piotlb_desc(IntelIOMMUState *s,