@@ -226,9 +226,14 @@ static void __sysmmu_tlb_invalidate(void __iomem *sfrbase)
}
static void __sysmmu_tlb_invalidate_entry(void __iomem *sfrbase,
- unsigned long iova)
+ unsigned long iova, unsigned int num_inv)
{
- __raw_writel((iova & SPAGE_MASK) | 1, sfrbase + REG_MMU_FLUSH_ENTRY);
+ unsigned int i;
+ for (i = 0; i < num_inv; i++) {
+ __raw_writel((iova & SPAGE_MASK) | 1,
+ sfrbase + REG_MMU_FLUSH_ENTRY);
+ iova += SPAGE_SIZE;
+ }
}
static void __sysmmu_set_ptbase(void __iomem *sfrbase,
@@ -452,7 +457,8 @@ static bool exynos_sysmmu_disable(struct device *dev)
return disabled;
}
-static void sysmmu_tlb_invalidate_entry(struct device *dev, unsigned long iova)
+static void sysmmu_tlb_invalidate_entry(struct device *dev, unsigned long iova,
+ size_t size)
{
unsigned long flags;
struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
@@ -460,9 +466,25 @@ static void sysmmu_tlb_invalidate_entry(struct device *dev, unsigned long iova)
read_lock_irqsave(&data->lock, flags);
if (is_sysmmu_active(data)) {
+ unsigned int maj;
+ unsigned int num_inv = 1;
+ maj = __raw_readl(data->sfrbase + REG_MMU_VERSION);
+ /*
+ * L2TLB invalidation required
+ * 4KB page: 1 invalidation
+ * 64KB page: 16 invalidation
+ * 1MB page: 64 invalidation
+ * because it is set-associative TLB
+ * with 8-way and 64 sets.
+ * 1MB page can be cached in one of all sets.
+ * 64KB page can be one of 16 consecutive sets.
+ */
+ if ((maj >> 28) == 2) /* major version number */
+ num_inv = min_t(unsigned int, size / PAGE_SIZE, 64);
+
if (sysmmu_block(data->sfrbase)) {
__sysmmu_tlb_invalidate_entry(
- data->sfrbase, iova);
+ data->sfrbase, iova, num_inv);
sysmmu_unblock(data->sfrbase);
}
} else {
@@ -915,7 +937,7 @@ done:
spin_lock_irqsave(&priv->lock, flags);
list_for_each_entry(data, &priv->clients, node)
- sysmmu_tlb_invalidate_entry(data->dev, iova);
+ sysmmu_tlb_invalidate_entry(data->dev, iova, size);
spin_unlock_irqrestore(&priv->lock, flags);
return size;