@@ -2574,6 +2574,29 @@ static int arm_smmu_sync_dirty_log(struct iommu_domain *domain,
base_iova, bitmap_pgshift);
}
+static int arm_smmu_clear_dirty_log(struct iommu_domain *domain,
+ unsigned long iova, size_t size,
+ unsigned long *bitmap,
+ unsigned long base_iova,
+ unsigned long bitmap_pgshift)
+{
+ struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops;
+ struct arm_smmu_device *smmu = to_smmu_domain(domain)->smmu;
+
+ if (!(smmu->features & ARM_SMMU_FEAT_HTTU_HD)) {
+ dev_err(smmu->dev, "don't support HTTU_HD and clear dirty log\n");
+ return -EPERM;
+ }
+
+ if (!ops || !ops->clear_dirty_log) {
+ pr_err("don't support clear dirty log\n");
+ return -ENODEV;
+ }
+
+ return ops->clear_dirty_log(ops, iova, size, bitmap, base_iova,
+ bitmap_pgshift);
+}
+
static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args)
{
return iommu_fwspec_add_ids(dev, args->args, 1);
@@ -2676,6 +2699,7 @@ static struct iommu_ops arm_smmu_ops = {
.split_block = arm_smmu_split_block,
.merge_page = arm_smmu_merge_page,
.sync_dirty_log = arm_smmu_sync_dirty_log,
+ .clear_dirty_log = arm_smmu_clear_dirty_log,
.of_xlate = arm_smmu_of_xlate,
.get_resv_regions = arm_smmu_get_resv_regions,
.put_resv_regions = generic_iommu_put_resv_regions,
@@ -966,6 +966,100 @@ static int arm_lpae_sync_dirty_log(struct io_pgtable_ops *ops,
bitmap, base_iova, bitmap_pgshift);
}
+static int __arm_lpae_clear_dirty_log(struct arm_lpae_io_pgtable *data,
+ unsigned long iova, size_t size,
+ int lvl, arm_lpae_iopte *ptep,
+ unsigned long *bitmap,
+ unsigned long base_iova,
+ unsigned long bitmap_pgshift)
+{
+ arm_lpae_iopte pte;
+ struct io_pgtable *iop = &data->iop;
+ unsigned long offset;
+ size_t base, next_size;
+ int nbits, ret, i;
+
+ if (WARN_ON(lvl == ARM_LPAE_MAX_LEVELS))
+ return -EINVAL;
+
+ ptep += ARM_LPAE_LVL_IDX(iova, lvl, data);
+ pte = READ_ONCE(*ptep);
+ if (WARN_ON(!pte))
+ return -EINVAL;
+
+ if (size == ARM_LPAE_BLOCK_SIZE(lvl, data)) {
+ if (iopte_leaf(pte, lvl, iop->fmt)) {
+ if (pte & ARM_LPAE_PTE_AP_RDONLY)
+ return 0;
+
+ /* Ensure all corresponding bits are set */
+ nbits = size >> bitmap_pgshift;
+ offset = (iova - base_iova) >> bitmap_pgshift;
+ for (i = offset; i < offset + nbits; i++) {
+ if (!test_bit(i, bitmap))
+ return 0;
+ }
+
+ /* Race does not exist */
+ pte |= ARM_LPAE_PTE_AP_RDONLY;
+ __arm_lpae_set_pte(ptep, pte, &iop->cfg);
+ return 0;
+ } else {
+ /* To traverse next level */
+ next_size = ARM_LPAE_BLOCK_SIZE(lvl + 1, data);
+ ptep = iopte_deref(pte, data);
+ for (base = 0; base < size; base += next_size) {
+ ret = __arm_lpae_clear_dirty_log(data,
+ iova + base, next_size, lvl + 1,
+ ptep, bitmap, base_iova,
+ bitmap_pgshift);
+ if (ret)
+ return ret;
+ }
+ return 0;
+ }
+ } else if (iopte_leaf(pte, lvl, iop->fmt)) {
+ /* Though the size is too small, it is already clean */
+ if (pte & ARM_LPAE_PTE_AP_RDONLY)
+ return 0;
+
+ return -EINVAL;
+ }
+
+ /* Keep on walkin */
+ ptep = iopte_deref(pte, data);
+ return __arm_lpae_clear_dirty_log(data, iova, size, lvl + 1, ptep,
+ bitmap, base_iova, bitmap_pgshift);
+}
+
+static int arm_lpae_clear_dirty_log(struct io_pgtable_ops *ops,
+ unsigned long iova, size_t size,
+ unsigned long *bitmap,
+ unsigned long base_iova,
+ unsigned long bitmap_pgshift)
+{
+ struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
+ arm_lpae_iopte *ptep = data->pgd;
+ int lvl = data->start_level;
+ struct io_pgtable_cfg *cfg = &data->iop.cfg;
+ long iaext = (s64)iova >> cfg->ias;
+
+ if (WARN_ON(!size || (size & cfg->pgsize_bitmap) != size))
+ return -EINVAL;
+
+ if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_TTBR1)
+ iaext = ~iaext;
+ if (WARN_ON(iaext))
+ return -EINVAL;
+
+ if (data->iop.fmt != ARM_64_LPAE_S1 &&
+ data->iop.fmt != ARM_32_LPAE_S1)
+ return -EINVAL;
+
+ return __arm_lpae_clear_dirty_log(data, iova, size, lvl, ptep,
+ bitmap, base_iova, bitmap_pgshift);
+}
+
static void arm_lpae_restrict_pgsizes(struct io_pgtable_cfg *cfg)
{
unsigned long granule, page_sizes;
@@ -1047,6 +1141,7 @@ arm_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg)
.split_block = arm_lpae_split_block,
.merge_page = arm_lpae_merge_page,
.sync_dirty_log = arm_lpae_sync_dirty_log,
+ .clear_dirty_log = arm_lpae_clear_dirty_log,
};
return data;
@@ -2863,6 +2863,77 @@ int iommu_sync_dirty_log(struct iommu_domain *domain, unsigned long iova,
}
EXPORT_SYMBOL_GPL(iommu_sync_dirty_log);
+static int __iommu_clear_dirty_log(struct iommu_domain *domain,
+ unsigned long iova, size_t size,
+ unsigned long *bitmap,
+ unsigned long base_iova,
+ unsigned long bitmap_pgshift)
+{
+ const struct iommu_ops *ops = domain->ops;
+ size_t pgsize;
+ int ret = 0;
+
+ if (!ops || !ops->clear_dirty_log) {
+ pr_err("don't support clear dirty log\n");
+ return -ENODEV;
+ }
+
+ while (size) {
+ pgsize = iommu_pgsize(domain, iova, size);
+ ret = ops->clear_dirty_log(domain, iova, pgsize, bitmap,
+ base_iova, bitmap_pgshift);
+
+ if (ret)
+ break;
+
+ pr_debug("dirty_log_clear: iova 0x%lx pagesz 0x%zx\n", iova,
+ pgsize);
+
+ iova += pgsize;
+ size -= pgsize;
+ }
+ iommu_flush_iotlb_all(domain);
+
+ return ret;
+}
+
+int iommu_clear_dirty_log(struct iommu_domain *domain,
+ unsigned long iova, size_t size,
+ unsigned long *bitmap, unsigned long base_iova,
+ unsigned long bitmap_pgshift)
+{
+ unsigned long riova, rsize;
+ unsigned int min_pagesz;
+ bool flush = false;
+ int rs, re, start, end, ret = 0;
+
+ min_pagesz = 1 << __ffs(domain->pgsize_bitmap);
+
+ if (!IS_ALIGNED(iova | size, min_pagesz)) {
+ pr_err("unaligned: iova 0x%lx min_pagesz 0x%x\n",
+ iova, min_pagesz);
+ return -EINVAL;
+ }
+
+ start = (iova - base_iova) >> bitmap_pgshift;
+ end = start + (size >> bitmap_pgshift);
+ bitmap_for_each_set_region(bitmap, rs, re, start, end) {
+ flush = true;
+ riova = iova + (rs << bitmap_pgshift);
+ rsize = (re - rs) << bitmap_pgshift;
+ ret = __iommu_clear_dirty_log(domain, riova, rsize, bitmap,
+ base_iova, bitmap_pgshift);
+ if (ret)
+ break;
+ }
+
+ if (flush)
+ iommu_flush_iotlb_all(domain);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(iommu_clear_dirty_log);
+
void iommu_get_resv_regions(struct device *dev, struct list_head *list)
{
const struct iommu_ops *ops = dev->bus->iommu_ops;
@@ -170,6 +170,10 @@ struct io_pgtable_ops {
unsigned long iova, size_t size,
unsigned long *bitmap, unsigned long base_iova,
unsigned long bitmap_pgshift);
+ int (*clear_dirty_log)(struct io_pgtable_ops *ops,
+ unsigned long iova, size_t size,
+ unsigned long *bitmap, unsigned long base_iova,
+ unsigned long bitmap_pgshift);
};
/**
@@ -266,6 +266,10 @@ struct iommu_ops {
unsigned long iova, size_t size,
unsigned long *bitmap, unsigned long base_iova,
unsigned long bitmap_pgshift);
+ int (*clear_dirty_log)(struct iommu_domain *domain,
+ unsigned long iova, size_t size,
+ unsigned long *bitmap, unsigned long base_iova,
+ unsigned long bitmap_pgshift);
/* Request/Free a list of reserved regions for a device */
void (*get_resv_regions)(struct device *dev, struct list_head *list);
@@ -525,6 +529,10 @@ extern int iommu_sync_dirty_log(struct iommu_domain *domain, unsigned long iova,
size_t size, unsigned long *bitmap,
unsigned long base_iova,
unsigned long bitmap_pgshift);
+extern int iommu_clear_dirty_log(struct iommu_domain *domain, unsigned long iova,
+ size_t dma_size, unsigned long *bitmap,
+ unsigned long base_iova,
+ unsigned long bitmap_pgshift);
/* Window handling function prototypes */
extern int iommu_domain_window_enable(struct iommu_domain *domain, u32 wnd_nr,
@@ -940,6 +948,15 @@ static inline int iommu_sync_dirty_log(struct iommu_domain *domain,
return -EINVAL;
}
+static inline int iommu_clear_dirty_log(struct iommu_domain *domain,
+ unsigned long iova, size_t size,
+ unsigned long *bitmap,
+ unsigned long base_iova,
+ unsigned long pgshift)
+{
+ return -EINVAL;
+}
+
static inline int iommu_device_register(struct iommu_device *iommu)
{
return -ENODEV;