@@ -290,13 +290,31 @@ static void panfrost_mmu_flush_range(struct panfrost_device *pfdev,
pm_runtime_put_autosuspend(pfdev->base.dev);
}
+static void mmu_unmap_range(size_t len, u64 iova, struct io_pgtable_ops *ops)
+{
+ size_t pgsize, unmapped_len = 0;
+ size_t unmapped_page, pgcount;
+
+ while (unmapped_len < len) {
+ pgsize = get_pgsize(iova, len - unmapped_len, &pgcount);
+
+ unmapped_page = ops->unmap_pages(ops, iova, pgsize, pgcount, NULL);
+ WARN_ON(unmapped_page != pgsize * pgcount);
+
+ iova += pgsize * pgcount;
+ unmapped_len += pgsize * pgcount;
+ }
+}
+
static int mmu_map_sg(struct panfrost_device *pfdev, struct panfrost_mmu *mmu,
u64 iova, int prot, struct sg_table *sgt)
{
unsigned int count;
struct scatterlist *sgl;
struct io_pgtable_ops *ops = mmu->pgtbl_ops;
+ size_t total_mapped = 0;
u64 start_iova = iova;
+ int ret = 0;
for_each_sgtable_dma_sg(sgt, sgl, count) {
unsigned long paddr = sg_dma_address(sgl);
@@ -310,10 +328,15 @@ static int mmu_map_sg(struct panfrost_device *pfdev, struct panfrost_mmu *mmu,
size_t pgcount, mapped = 0;
size_t pgsize = get_pgsize(iova | paddr, len, &pgcount);
- ops->map_pages(ops, iova, paddr, pgsize, pgcount, prot,
+ ret = ops->map_pages(ops, iova, paddr, pgsize, pgcount, prot,
GFP_KERNEL, &mapped);
+ if (ret) {
+ mmu_unmap_range(total_mapped, start_iova, ops);
+ return ret;
+ }
/* Don't get stuck if things have gone wrong */
mapped = max(mapped, pgsize);
+ total_mapped += mapped;
iova += mapped;
paddr += mapped;
len -= mapped;
@@ -333,6 +356,7 @@ int panfrost_mmu_map(struct panfrost_gem_mapping *mapping)
struct panfrost_device *pfdev = to_panfrost_device(obj->dev);
struct sg_table *sgt;
int prot = IOMMU_READ | IOMMU_WRITE;
+ int ret;
if (WARN_ON(mapping->active))
return 0;
@@ -344,8 +368,13 @@ int panfrost_mmu_map(struct panfrost_gem_mapping *mapping)
if (WARN_ON(IS_ERR(sgt)))
return PTR_ERR(sgt);
- mmu_map_sg(pfdev, mapping->mmu, mapping->mmnode.start << PAGE_SHIFT,
- prot, sgt);
+ ret = mmu_map_sg(pfdev, mapping->mmu, mapping->mmnode.start << PAGE_SHIFT,
+ prot, sgt);
+ if (ret) {
+ drm_gem_shmem_put_pages(shmem);
+ return ret;
+ }
+
mapping->active = true;
return 0;
@@ -532,8 +561,10 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
if (ret)
goto err_map;
- mmu_map_sg(pfdev, bomapping->mmu, addr,
- IOMMU_WRITE | IOMMU_READ | IOMMU_NOEXEC, sgt);
+ ret = mmu_map_sg(pfdev, bomapping->mmu, addr,
+ IOMMU_WRITE | IOMMU_READ | IOMMU_NOEXEC, sgt);
+ if (ret)
+ goto err_mmu_map_sg;
bomapping->active = true;
bo->heap_rss_size += SZ_2M;
@@ -547,6 +578,9 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
return 0;
+err_mmu_map_sg:
+ dma_unmap_sgtable(pfdev->base.dev, sgt,
+ DMA_BIDIRECTIONAL, 0);
err_map:
sg_free_table(sgt);
err_unlock:
When mapping the pages of a BO, either a heap type at page fault time or else a non-heap BO at object creation time, if the ARM page table mapping function fails, we unmap what had been mapped so far and bail out. Signed-off-by: Adrián Larumbe <adrian.larumbe@collabora.com> --- drivers/gpu/drm/panfrost/panfrost_mmu.c | 44 ++++++++++++++++++++++--- 1 file changed, 39 insertions(+), 5 deletions(-)