@@ -173,11 +173,12 @@ static void mtk_iommu_tlb_flush_all(void *cookie)
}
}
-static void mtk_iommu_tlb_add_flush_nosync(unsigned long iova, size_t size,
- size_t granule, bool leaf,
- void *cookie)
+static void mtk_iommu_tlb_add_flush(unsigned long iova, size_t size,
+ size_t granule, void *cookie)
{
struct mtk_iommu_data *data = cookie;
+ int ret;
+ u32 tmp;
for_each_m4u(data) {
writel_relaxed(F_INVLD_EN1 | F_INVLD_EN0,
@@ -188,21 +189,12 @@ static void mtk_iommu_tlb_add_flush_nosync(unsigned long iova, size_t size,
data->base + REG_MMU_INVLD_END_A);
writel_relaxed(F_MMU_INV_RANGE,
data->base + REG_MMU_INVALIDATE);
- data->tlb_flush_active = true;
- }
-}
-
-static void mtk_iommu_tlb_sync(void *cookie)
-{
- struct mtk_iommu_data *data = cookie;
- int ret;
- u32 tmp;
-
- for_each_m4u(data) {
- /* Avoid timing out if there's nothing to wait for */
- if (!data->tlb_flush_active)
- return;
+ /*
+ * There is no tlb flush queue in the HW, the HW always expect
+ * tlb_flush and tlb_sync in pair strictly. Here tlb_sync always
+ * follows tlb_flush to avoid break the sequence.
+ */
ret = readl_poll_timeout_atomic(data->base + REG_MMU_CPE_DONE,
tmp, tmp != 0, 10, 100000);
if (ret) {
@@ -212,36 +204,21 @@ static void mtk_iommu_tlb_sync(void *cookie)
}
/* Clear the CPE status */
writel_relaxed(0, data->base + REG_MMU_CPE_DONE);
- data->tlb_flush_active = false;
}
}
-static void mtk_iommu_tlb_flush_walk(unsigned long iova, size_t size,
- size_t granule, void *cookie)
-{
- mtk_iommu_tlb_add_flush_nosync(iova, size, granule, false, cookie);
- mtk_iommu_tlb_sync(cookie);
-}
-
-static void mtk_iommu_tlb_flush_leaf(unsigned long iova, size_t size,
- size_t granule, void *cookie)
+static void mtk_iommu_tlb_flush_page(struct iommu_iotlb_gather *gather,
+ unsigned long iova, size_t granule,
+ void *cookie)
{
- mtk_iommu_tlb_add_flush_nosync(iova, size, granule, true, cookie);
- mtk_iommu_tlb_sync(cookie);
-}
-
-static void mtk_iommu_tlb_flush_page_nosync(struct iommu_iotlb_gather *gather,
- unsigned long iova, size_t granule,
- void *cookie)
-{
- mtk_iommu_tlb_add_flush_nosync(iova, granule, granule, true, cookie);
+ mtk_iommu_tlb_add_flush(iova, granule, granule, cookie);
}
static const struct iommu_flush_ops mtk_iommu_flush_ops = {
.tlb_flush_all = mtk_iommu_tlb_flush_all,
- .tlb_flush_walk = mtk_iommu_tlb_flush_walk,
- .tlb_flush_leaf = mtk_iommu_tlb_flush_leaf,
- .tlb_add_page = mtk_iommu_tlb_flush_page_nosync,
+ .tlb_flush_walk = mtk_iommu_tlb_add_flush,
+ .tlb_flush_leaf = mtk_iommu_tlb_add_flush,
+ .tlb_add_page = mtk_iommu_tlb_flush_page,
};
static irqreturn_t mtk_iommu_isr(int irq, void *dev_id)
@@ -450,12 +427,6 @@ static void mtk_iommu_flush_iotlb_all(struct iommu_domain *domain)
mtk_iommu_tlb_flush_all(mtk_iommu_get_m4u_data());
}
-static void mtk_iommu_iotlb_sync(struct iommu_domain *domain,
- struct iommu_iotlb_gather *gather)
-{
- mtk_iommu_tlb_sync(mtk_iommu_get_m4u_data());
-}
-
static phys_addr_t mtk_iommu_iova_to_phys(struct iommu_domain *domain,
dma_addr_t iova)
{
@@ -558,7 +529,7 @@ static int mtk_iommu_of_xlate(struct device *dev, struct of_phandle_args *args)
.map = mtk_iommu_map,
.unmap = mtk_iommu_unmap,
.flush_iotlb_all = mtk_iommu_flush_iotlb_all,
- .iotlb_sync = mtk_iommu_iotlb_sync,
+ /* No iotlb_sync here since the tlb_sync always follows the tlb_flush */
.iova_to_phys = mtk_iommu_iova_to_phys,
.add_device = mtk_iommu_add_device,
.remove_device = mtk_iommu_remove_device,
@@ -57,7 +57,6 @@ struct mtk_iommu_data {
struct mtk_iommu_domain *m4u_dom;
struct iommu_group *m4u_group;
bool enable_4GB;
- bool tlb_flush_active;
struct iommu_device iommu;
const struct mtk_iommu_plat_data *plat_data;