@@ -3241,7 +3241,7 @@ void device_block_translation(struct device *dev)
if (!dev_is_real_dma_subdevice(dev)) {
if (sm_supported(iommu))
intel_pasid_tear_down_entry(iommu, dev,
- IOMMU_NO_PASID, false);
+ IOMMU_NO_PASID, false, false);
else
domain_context_clear(info);
}
@@ -4060,8 +4060,7 @@ static void intel_iommu_remove_dev_pasid(struct device *dev, ioasid_t pasid,
intel_iommu_debugfs_remove_dev_pasid(dev_pasid);
kfree(dev_pasid);
out_tear_down:
- intel_pasid_tear_down_entry(iommu, dev, pasid, false);
- intel_drain_pasid_prq(dev, pasid);
+ intel_pasid_tear_down_entry(iommu, dev, pasid, false, true);
}
static int intel_iommu_set_dev_pasid(struct iommu_domain *domain,
@@ -233,8 +233,12 @@ devtlb_invalidation_with_pasid(struct intel_iommu *iommu,
qi_flush_dev_iotlb_pasid(iommu, sid, pfsid, pasid, qdep, 0, 64 - VTD_PAGE_SHIFT);
}
+/*
+ * Not all PASID entry destroy requires PRQ drain as it can be handled in
+ * the remove_dev_pasid path. Caller should be clear on it.
+ */
void intel_pasid_tear_down_entry(struct intel_iommu *iommu, struct device *dev,
- u32 pasid, bool fault_ignore)
+ u32 pasid, bool fault_ignore, bool drain_prq)
{
struct pasid_entry *pte;
u16 did, pgtt;
@@ -264,6 +268,9 @@ void intel_pasid_tear_down_entry(struct intel_iommu *iommu, struct device *dev,
/* Device IOTLB doesn't need to be flushed in caching mode. */
if (!cap_caching_mode(iommu->cap))
devtlb_invalidation_with_pasid(iommu, dev, pasid);
+
+ if (drain_prq)
+ intel_drain_pasid_prq(dev, pasid);
}
/*
@@ -313,9 +313,8 @@ int intel_pasid_setup_pass_through(struct intel_iommu *iommu,
struct device *dev, u32 pasid);
int intel_pasid_setup_nested(struct intel_iommu *iommu, struct device *dev,
u32 pasid, struct dmar_domain *domain);
-void intel_pasid_tear_down_entry(struct intel_iommu *iommu,
- struct device *dev, u32 pasid,
- bool fault_ignore);
+void intel_pasid_tear_down_entry(struct intel_iommu *iommu, struct device *dev,
+ u32 pasid, bool fault_ignore, bool drain_prq);
void intel_pasid_setup_page_snoop_control(struct intel_iommu *iommu,
struct device *dev, u32 pasid);
int intel_pasid_setup_sm_context(struct device *dev);
@@ -175,8 +175,12 @@ static void intel_mm_release(struct mmu_notifier *mn, struct mm_struct *mm)
spin_lock_irqsave(&domain->lock, flags);
list_for_each_entry(dev_pasid, &domain->dev_pasids, link_domain) {
info = dev_iommu_priv_get(dev_pasid->dev);
+ /*
+ * PRQ drain would happen in the remove_dev_pasid() path,
+ * no need to do it here.
+ */
intel_pasid_tear_down_entry(info->iommu, dev_pasid->dev,
- dev_pasid->pasid, true);
+ dev_pasid->pasid, true, false);
}
spin_unlock_irqrestore(&domain->lock, flags);
Draining PRQ is needed before repurposing a PASID. It makes sense to invoke it in the intel_pasid_tear_down_entry(). Signed-off-by: Yi Liu <yi.l.liu@intel.com> --- drivers/iommu/intel/iommu.c | 5 ++--- drivers/iommu/intel/pasid.c | 9 ++++++++- drivers/iommu/intel/pasid.h | 5 ++--- drivers/iommu/intel/svm.c | 6 +++++- 4 files changed, 17 insertions(+), 8 deletions(-)