diff mbox series

[v3,1/3] iommu: io-pgtable: Add ARM Mali midgard MMU page table format

Message ID 20190401074730.12241-2-robh@kernel.org (mailing list archive)
State New, archived
Headers show
Series Initial Panfrost driver | expand

Commit Message

Rob Herring April 1, 2019, 7:47 a.m. UTC
ARM Mali midgard GPU is similar to standard 64-bit stage 1 page tables, but
have a few differences. Add a new format type to represent the format. The
input address size is 48-bits and the output address size is 40-bits (and
possibly less?). Note that the later bifrost GPUs follow the standard
64-bit stage 1 format.

The differences in the format compared to 64-bit stage 1 format are:

The 3rd level page entry bits are 0x1 instead of 0x3 for page entries.

The access flags are not read-only and unprivileged, but read and write.
This is similar to stage 2 entries, but the memory attributes field matches
stage 1 being an index.

The nG bit is not set by the vendor driver. This one didn't seem to matter,
but we'll keep it aligned to the vendor driver.

Cc: Will Deacon <will.deacon@arm.com>
Cc: Robin Murphy <robin.murphy@arm.com>
Cc: Joerg Roedel <joro@8bytes.org>
Cc: linux-arm-kernel@lists.infradead.org
Cc: iommu@lists.linux-foundation.org
Signed-off-by: Rob Herring <robh@kernel.org>
---
Please ack this as I need to apply it to the drm-misc tree. Or we need a
stable branch with this patch.

v3:
- Rework arm_lpae_prot_to_pte as written by Robin
- Fill in complete register values for TRANSTAB and MEMATTR registers


 drivers/iommu/io-pgtable-arm.c | 93 +++++++++++++++++++++++++---------
 drivers/iommu/io-pgtable.c     |  1 +
 include/linux/io-pgtable.h     |  7 +++
 3 files changed, 77 insertions(+), 24 deletions(-)

--
2.19.1

Comments

Robin Murphy April 1, 2019, 7:11 p.m. UTC | #1
On 01/04/2019 08:47, Rob Herring wrote:
> ARM Mali midgard GPU is similar to standard 64-bit stage 1 page tables, but
> have a few differences. Add a new format type to represent the format. The
> input address size is 48-bits and the output address size is 40-bits (and
> possibly less?). Note that the later bifrost GPUs follow the standard
> 64-bit stage 1 format.
> 
> The differences in the format compared to 64-bit stage 1 format are:
> 
> The 3rd level page entry bits are 0x1 instead of 0x3 for page entries.
> 
> The access flags are not read-only and unprivileged, but read and write.
> This is similar to stage 2 entries, but the memory attributes field matches
> stage 1 being an index.
> 
> The nG bit is not set by the vendor driver. This one didn't seem to matter,
> but we'll keep it aligned to the vendor driver.
> 
> Cc: Will Deacon <will.deacon@arm.com>
> Cc: Robin Murphy <robin.murphy@arm.com>
> Cc: Joerg Roedel <joro@8bytes.org>
> Cc: linux-arm-kernel@lists.infradead.org
> Cc: iommu@lists.linux-foundation.org
> Signed-off-by: Rob Herring <robh@kernel.org>
> ---
> Please ack this as I need to apply it to the drm-misc tree. Or we need a
> stable branch with this patch.

With the diff below squashed in to address my outstanding style nits,

Acked-by: Robin Murphy <robin.murphy@arm.com>

I don't foresee any conflicting io-pgtable changes to prevent this going 
via DRM, but I'll leave the final say up to Joerg.

Thanks,
Robin.

----->8-----
diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
index 98551d0cff59..55ed039da166 100644
--- a/drivers/iommu/io-pgtable-arm.c
+++ b/drivers/iommu/io-pgtable-arm.c
@@ -197,12 +197,13 @@ struct arm_lpae_io_pgtable {

  typedef u64 arm_lpae_iopte;

-static inline bool iopte_leaf(arm_lpae_iopte pte, int l, enum 
io_pgtable_fmt fmt)
+static inline bool iopte_leaf(arm_lpae_iopte pte, int lvl,
+			      enum io_pgtable_fmt fmt)
  {
-	if ((l == (ARM_LPAE_MAX_LEVELS - 1)) && (fmt != ARM_MALI_LPAE))
-		return iopte_type(pte,l) == ARM_LPAE_PTE_TYPE_PAGE;
+	if (lvl == (ARM_LPAE_MAX_LEVELS - 1) && fmt != ARM_MALI_LPAE)
+		return iopte_type(pte, lvl) == ARM_LPAE_PTE_TYPE_PAGE;

-	return iopte_type(pte,l) == ARM_LPAE_PTE_TYPE_BLOCK;
+	return iopte_type(pte, lvl) == ARM_LPAE_PTE_TYPE_BLOCK;
  }

  static arm_lpae_iopte paddr_to_iopte(phys_addr_t paddr,
@@ -310,13 +311,10 @@ static void __arm_lpae_init_pte(struct 
arm_lpae_io_pgtable *data,
  	if (data->iop.cfg.quirks & IO_PGTABLE_QUIRK_ARM_NS)
  		pte |= ARM_LPAE_PTE_NS;

-	if (lvl == ARM_LPAE_MAX_LEVELS - 1) {
-		if (data->iop.fmt == ARM_MALI_LPAE)
-			pte |= ARM_LPAE_PTE_TYPE_BLOCK;
-		else
-			pte |= ARM_LPAE_PTE_TYPE_PAGE;
-	} else
+	if (data->iop.fmt != ARM_MALI_LPAE && lvl != ARM_LPAE_MAX_LEVELS - 1)
  		pte |= ARM_LPAE_PTE_TYPE_BLOCK;
+	else
+		pte |= ARM_LPAE_PTE_TYPE_PAGE;

  	if (data->iop.fmt != ARM_MALI_LPAE)
  		pte |= ARM_LPAE_PTE_AF;
Steven Price April 5, 2019, 9:42 a.m. UTC | #2
First let me say congratulations to everyone working on Panfrost - it's
an impressive achievement!

Full disclosure: I used to work on the Mali kbase driver. And have been
playing around with running the Mali user-space blob with the Panfrost
kernel driver.

On 01/04/2019 08:47, Rob Herring wrote:
> ARM Mali midgard GPU is similar to standard 64-bit stage 1 page tables, but
> have a few differences. Add a new format type to represent the format. The
> input address size is 48-bits and the output address size is 40-bits (and
> possibly less?). Note that the later bifrost GPUs follow the standard
> 64-bit stage 1 format.
> 
> The differences in the format compared to 64-bit stage 1 format are:
> 
> The 3rd level page entry bits are 0x1 instead of 0x3 for page entries.
> 
> The access flags are not read-only and unprivileged, but read and write.
> This is similar to stage 2 entries, but the memory attributes field matches
> stage 1 being an index.
> 
> The nG bit is not set by the vendor driver. This one didn't seem to matter,
> but we'll keep it aligned to the vendor driver.

The nG bit should be ignored by the hardware.

The MMU in Midgard/Bifrost has a quirk similar to
IO_PGTABLE_QUIRK_TLBI_ON_MAP - you must perform a cache flush for the
GPU to (reliably) pick up new page table mappings.

You may not have seen this because of the use of the JS_CONFIG_START_MMU
bit - this effectively performs a cache flush and TLB invalidate before
starting a job, however when using a GPU like T760 (e.g. on the Firefly
RK3288) this bit isn't being set. In my testing on the Firefly board I
saw GPU page faults because of this.

There's two options for fixing this - a patch like below adds the quirk
mode to the MMU. Or alternatively always set JS_CONFIG_START_MMU on
jobs. In my testing both options solve the page faults.

To be honest I don't know the reasoning behind kbase making the
JS_CONFIG_START_MMU bit conditional - I'm not aware of any reason why it
can't always be set. My guess is performance, but I haven't benchmarked
the difference between this and JS_CONFIG_START_MMU.

-----8<----------
From e3f75c7f04e43238dfc579029b8c11fb6b4a0c18 Mon Sep 17 00:00:00 2001
From: Steven Price <steven.price@arm.com>
Date: Thu, 4 Apr 2019 15:53:17 +0100
Subject: [PATCH] iommu: io-pgtable: IO_PGTABLE_QUIRK_TLBI_ON_MAP for LPAE

Midgard/Bifrost GPUs require a TLB invalidation when mapping pages,
implement IO_PGTABLE_QUIRK_TLBI_ON_MAP for LPAE iommu page table
formats and add the quirk bit to Panfrost.

Signed-off-by: Steven Price <steven.price@arm.com>
---
 drivers/gpu/drm/panfrost/panfrost_mmu.c |  1 +
 drivers/iommu/io-pgtable-arm.c          | 11 +++++++++--
 2 files changed, 10 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c
b/drivers/gpu/drm/panfrost/panfrost_mmu.c
index f3aad8591cf4..094312074d66 100644
--- a/drivers/gpu/drm/panfrost/panfrost_mmu.c
+++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c
@@ -343,6 +343,7 @@ int panfrost_mmu_init(struct panfrost_device *pfdev)
 	mmu_write(pfdev, MMU_INT_MASK, ~0);

 	pfdev->mmu->pgtbl_cfg = (struct io_pgtable_cfg) {
+		.quirks		= IO_PGTABLE_QUIRK_TLBI_ON_MAP,
 		.pgsize_bitmap	= SZ_4K, // | SZ_2M | SZ_1G),
 		.ias		= 48,
 		.oas		= 40,	/* Should come from dma mask? */
diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
index 84beea1f47a7..45fd7bbdf9aa 100644
--- a/drivers/iommu/io-pgtable-arm.c
+++ b/drivers/iommu/io-pgtable-arm.c
@@ -505,7 +505,13 @@ static int arm_lpae_map(struct io_pgtable_ops *ops,
unsigned long iova,
 	 * Synchronise all PTE updates for the new mapping before there's
 	 * a chance for anything to kick off a table walk for the new iova.
 	 */
-	wmb();
+	if (data->iop.cfg.quirks & IO_PGTABLE_QUIRK_TLBI_ON_MAP) {
+		io_pgtable_tlb_add_flush(&data->iop, iova, size,
+					 ARM_LPAE_BLOCK_SIZE(2, data), false);
+		io_pgtable_tlb_sync(&data->iop);
+	} else {
+		wmb();
+	}

 	return ret;
 }
@@ -800,7 +806,8 @@ arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg
*cfg, void *cookie)
 	struct arm_lpae_io_pgtable *data;

 	if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_NS | IO_PGTABLE_QUIRK_NO_DMA |
-			    IO_PGTABLE_QUIRK_NON_STRICT))
+			    IO_PGTABLE_QUIRK_NON_STRICT |
+			    IO_PGTABLE_QUIRK_TLBI_ON_MAP))
 		return NULL;

 	data = arm_lpae_alloc_pgtable(cfg);
Robin Murphy April 5, 2019, 9:51 a.m. UTC | #3
Hi Steve,

On 05/04/2019 10:42, Steven Price wrote:
> First let me say congratulations to everyone working on Panfrost - it's
> an impressive achievement!
> 
> Full disclosure: I used to work on the Mali kbase driver. And have been
> playing around with running the Mali user-space blob with the Panfrost
> kernel driver.
> 
> On 01/04/2019 08:47, Rob Herring wrote:
>> ARM Mali midgard GPU is similar to standard 64-bit stage 1 page tables, but
>> have a few differences. Add a new format type to represent the format. The
>> input address size is 48-bits and the output address size is 40-bits (and
>> possibly less?). Note that the later bifrost GPUs follow the standard
>> 64-bit stage 1 format.
>>
>> The differences in the format compared to 64-bit stage 1 format are:
>>
>> The 3rd level page entry bits are 0x1 instead of 0x3 for page entries.
>>
>> The access flags are not read-only and unprivileged, but read and write.
>> This is similar to stage 2 entries, but the memory attributes field matches
>> stage 1 being an index.
>>
>> The nG bit is not set by the vendor driver. This one didn't seem to matter,
>> but we'll keep it aligned to the vendor driver.
> 
> The nG bit should be ignored by the hardware.
> 
> The MMU in Midgard/Bifrost has a quirk similar to
> IO_PGTABLE_QUIRK_TLBI_ON_MAP - you must perform a cache flush for the
> GPU to (reliably) pick up new page table mappings.
> 
> You may not have seen this because of the use of the JS_CONFIG_START_MMU
> bit - this effectively performs a cache flush and TLB invalidate before
> starting a job, however when using a GPU like T760 (e.g. on the Firefly
> RK3288) this bit isn't being set. In my testing on the Firefly board I
> saw GPU page faults because of this.
> 
> There's two options for fixing this - a patch like below adds the quirk
> mode to the MMU. Or alternatively always set JS_CONFIG_START_MMU on
> jobs. In my testing both options solve the page faults.
> 
> To be honest I don't know the reasoning behind kbase making the
> JS_CONFIG_START_MMU bit conditional - I'm not aware of any reason why it
> can't always be set. My guess is performance, but I haven't benchmarked
> the difference between this and JS_CONFIG_START_MMU.
> 
> -----8<----------
>  From e3f75c7f04e43238dfc579029b8c11fb6b4a0c18 Mon Sep 17 00:00:00 2001
> From: Steven Price <steven.price@arm.com>
> Date: Thu, 4 Apr 2019 15:53:17 +0100
> Subject: [PATCH] iommu: io-pgtable: IO_PGTABLE_QUIRK_TLBI_ON_MAP for LPAE
> 
> Midgard/Bifrost GPUs require a TLB invalidation when mapping pages,
> implement IO_PGTABLE_QUIRK_TLBI_ON_MAP for LPAE iommu page table
> formats and add the quirk bit to Panfrost.
> 
> Signed-off-by: Steven Price <steven.price@arm.com>
> ---
>   drivers/gpu/drm/panfrost/panfrost_mmu.c |  1 +
>   drivers/iommu/io-pgtable-arm.c          | 11 +++++++++--
>   2 files changed, 10 insertions(+), 2 deletions(-)
> 
> diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c
> b/drivers/gpu/drm/panfrost/panfrost_mmu.c
> index f3aad8591cf4..094312074d66 100644
> --- a/drivers/gpu/drm/panfrost/panfrost_mmu.c
> +++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c
> @@ -343,6 +343,7 @@ int panfrost_mmu_init(struct panfrost_device *pfdev)
>   	mmu_write(pfdev, MMU_INT_MASK, ~0);
> 
>   	pfdev->mmu->pgtbl_cfg = (struct io_pgtable_cfg) {
> +		.quirks		= IO_PGTABLE_QUIRK_TLBI_ON_MAP,
>   		.pgsize_bitmap	= SZ_4K, // | SZ_2M | SZ_1G),
>   		.ias		= 48,
>   		.oas		= 40,	/* Should come from dma mask? */
> diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
> index 84beea1f47a7..45fd7bbdf9aa 100644
> --- a/drivers/iommu/io-pgtable-arm.c
> +++ b/drivers/iommu/io-pgtable-arm.c
> @@ -505,7 +505,13 @@ static int arm_lpae_map(struct io_pgtable_ops *ops,
> unsigned long iova,
>   	 * Synchronise all PTE updates for the new mapping before there's
>   	 * a chance for anything to kick off a table walk for the new iova.
>   	 */
> -	wmb();
> +	if (data->iop.cfg.quirks & IO_PGTABLE_QUIRK_TLBI_ON_MAP) {
> +		io_pgtable_tlb_add_flush(&data->iop, iova, size,
> +					 ARM_LPAE_BLOCK_SIZE(2, data), false);

For correctness (in case this ever ends up used for something with 
VMSA-like invalidation behaviour), the granule would need to be "size" 
here, rather than effectively hard-coded.

However, since Mali's invalidations appear to operate on arbitrary 
ranges, it would probably be a lot more efficient for the driver to 
handle this case directly, by just issuing a single big invalidation 
after the for_each_sg() loop in panfrost_mmu_map().

Robin.

> +		io_pgtable_tlb_sync(&data->iop);
> +	} else {
> +		wmb();
> +	}
> 
>   	return ret;
>   }
> @@ -800,7 +806,8 @@ arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg
> *cfg, void *cookie)
>   	struct arm_lpae_io_pgtable *data;
> 
>   	if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_NS | IO_PGTABLE_QUIRK_NO_DMA |
> -			    IO_PGTABLE_QUIRK_NON_STRICT))
> +			    IO_PGTABLE_QUIRK_NON_STRICT |
> +			    IO_PGTABLE_QUIRK_TLBI_ON_MAP))
>   		return NULL;
> 
>   	data = arm_lpae_alloc_pgtable(cfg);
>
Robin Murphy April 5, 2019, 10:02 a.m. UTC | #4
On 01/04/2019 20:11, Robin Murphy wrote:
> On 01/04/2019 08:47, Rob Herring wrote:
>> ARM Mali midgard GPU is similar to standard 64-bit stage 1 page 
>> tables, but
>> have a few differences. Add a new format type to represent the format. 
>> The
>> input address size is 48-bits and the output address size is 40-bits (and
>> possibly less?). Note that the later bifrost GPUs follow the standard
>> 64-bit stage 1 format.
>>
>> The differences in the format compared to 64-bit stage 1 format are:
>>
>> The 3rd level page entry bits are 0x1 instead of 0x3 for page entries.
>>
>> The access flags are not read-only and unprivileged, but read and write.
>> This is similar to stage 2 entries, but the memory attributes field 
>> matches
>> stage 1 being an index.
>>
>> The nG bit is not set by the vendor driver. This one didn't seem to 
>> matter,
>> but we'll keep it aligned to the vendor driver.
>>
>> Cc: Will Deacon <will.deacon@arm.com>
>> Cc: Robin Murphy <robin.murphy@arm.com>
>> Cc: Joerg Roedel <joro@8bytes.org>
>> Cc: linux-arm-kernel@lists.infradead.org
>> Cc: iommu@lists.linux-foundation.org
>> Signed-off-by: Rob Herring <robh@kernel.org>
>> ---
>> Please ack this as I need to apply it to the drm-misc tree. Or we need a
>> stable branch with this patch.
> 
> With the diff below squashed in to address my outstanding style nits,
> 
> Acked-by: Robin Murphy <robin.murphy@arm.com>
> 
> I don't foresee any conflicting io-pgtable changes to prevent this going 
> via DRM, but I'll leave the final say up to Joerg.

Urgh, sorry, turns out I flipped one condition too many there. On 
reflection I may also forget my clever trick in future and inadvertently 
break it, so it probably warrants a comment. Please supersede my 
previous request with the (actually tested) diff below :)

Thanks,
Robin.

----->8-----
diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
index 98551d0cff59..4f7be5a3e19b 100644
--- a/drivers/iommu/io-pgtable-arm.c
+++ b/drivers/iommu/io-pgtable-arm.c
@@ -197,12 +197,13 @@ struct arm_lpae_io_pgtable {

  typedef u64 arm_lpae_iopte;

-static inline bool iopte_leaf(arm_lpae_iopte pte, int l, enum 
io_pgtable_fmt fmt)
+static inline bool iopte_leaf(arm_lpae_iopte pte, int lvl,
+			      enum io_pgtable_fmt fmt)
  {
-	if ((l == (ARM_LPAE_MAX_LEVELS - 1)) && (fmt != ARM_MALI_LPAE))
-		return iopte_type(pte,l) == ARM_LPAE_PTE_TYPE_PAGE;
+	if (lvl == (ARM_LPAE_MAX_LEVELS - 1) && fmt != ARM_MALI_LPAE)
+		return iopte_type(pte, lvl) == ARM_LPAE_PTE_TYPE_PAGE;

-	return iopte_type(pte,l) == ARM_LPAE_PTE_TYPE_BLOCK;
+	return iopte_type(pte, lvl) == ARM_LPAE_PTE_TYPE_BLOCK;
  }

  static arm_lpae_iopte paddr_to_iopte(phys_addr_t paddr,
@@ -310,12 +311,9 @@ static void __arm_lpae_init_pte(struct 
arm_lpae_io_pgtable *data,
  	if (data->iop.cfg.quirks & IO_PGTABLE_QUIRK_ARM_NS)
  		pte |= ARM_LPAE_PTE_NS;

-	if (lvl == ARM_LPAE_MAX_LEVELS - 1) {
-		if (data->iop.fmt == ARM_MALI_LPAE)
-			pte |= ARM_LPAE_PTE_TYPE_BLOCK;
-		else
-			pte |= ARM_LPAE_PTE_TYPE_PAGE;
-	} else
+	if (data->iop.fmt != ARM_MALI_LPAE && lvl == ARM_LPAE_MAX_LEVELS - 1)
+		pte |= ARM_LPAE_PTE_TYPE_PAGE;
+	else
  		pte |= ARM_LPAE_PTE_TYPE_BLOCK;

  	if (data->iop.fmt != ARM_MALI_LPAE)
@@ -452,7 +450,10 @@ static arm_lpae_iopte arm_lpae_prot_to_pte(struct 
arm_lpae_io_pgtable *data,
  		if (prot & IOMMU_WRITE)
  			pte |= ARM_LPAE_PTE_HAP_WRITE;
  	}
-
+	/*
+	 * Note that this logic is structured to accommodate Mali LPAE
+	 * having stage-1-like attributes but stage-2-like permissions.
+	 */
  	if (data->iop.fmt == ARM_64_LPAE_S2 ||
  	    data->iop.fmt == ARM_32_LPAE_S2) {
  		if (prot & IOMMU_MMIO)
Steven Price April 5, 2019, 10:36 a.m. UTC | #5
On 05/04/2019 10:51, Robin Murphy wrote:
> Hi Steve,
> 
> On 05/04/2019 10:42, Steven Price wrote:
>> First let me say congratulations to everyone working on Panfrost - it's
>> an impressive achievement!
>>
>> Full disclosure: I used to work on the Mali kbase driver. And have been
>> playing around with running the Mali user-space blob with the Panfrost
>> kernel driver.
>>
>> On 01/04/2019 08:47, Rob Herring wrote:
>>> ARM Mali midgard GPU is similar to standard 64-bit stage 1 page
>>> tables, but
>>> have a few differences. Add a new format type to represent the
>>> format. The
>>> input address size is 48-bits and the output address size is 40-bits
>>> (and
>>> possibly less?). Note that the later bifrost GPUs follow the standard
>>> 64-bit stage 1 format.
>>>
>>> The differences in the format compared to 64-bit stage 1 format are:
>>>
>>> The 3rd level page entry bits are 0x1 instead of 0x3 for page entries.
>>>
>>> The access flags are not read-only and unprivileged, but read and write.
>>> This is similar to stage 2 entries, but the memory attributes field
>>> matches
>>> stage 1 being an index.
>>>
>>> The nG bit is not set by the vendor driver. This one didn't seem to
>>> matter,
>>> but we'll keep it aligned to the vendor driver.
>>
>> The nG bit should be ignored by the hardware.
>>
>> The MMU in Midgard/Bifrost has a quirk similar to
>> IO_PGTABLE_QUIRK_TLBI_ON_MAP - you must perform a cache flush for the
>> GPU to (reliably) pick up new page table mappings.
>>
>> You may not have seen this because of the use of the JS_CONFIG_START_MMU
>> bit - this effectively performs a cache flush and TLB invalidate before
>> starting a job, however when using a GPU like T760 (e.g. on the Firefly
>> RK3288) this bit isn't being set. In my testing on the Firefly board I
>> saw GPU page faults because of this.
>>
>> There's two options for fixing this - a patch like below adds the quirk
>> mode to the MMU. Or alternatively always set JS_CONFIG_START_MMU on
>> jobs. In my testing both options solve the page faults.
>>
>> To be honest I don't know the reasoning behind kbase making the
>> JS_CONFIG_START_MMU bit conditional - I'm not aware of any reason why it
>> can't always be set. My guess is performance, but I haven't benchmarked
>> the difference between this and JS_CONFIG_START_MMU.
>>
>> -----8<----------
>>  From e3f75c7f04e43238dfc579029b8c11fb6b4a0c18 Mon Sep 17 00:00:00 2001
>> From: Steven Price <steven.price@arm.com>
>> Date: Thu, 4 Apr 2019 15:53:17 +0100
>> Subject: [PATCH] iommu: io-pgtable: IO_PGTABLE_QUIRK_TLBI_ON_MAP for LPAE
>>
>> Midgard/Bifrost GPUs require a TLB invalidation when mapping pages,
>> implement IO_PGTABLE_QUIRK_TLBI_ON_MAP for LPAE iommu page table
>> formats and add the quirk bit to Panfrost.
>>
>> Signed-off-by: Steven Price <steven.price@arm.com>
>> ---
>>   drivers/gpu/drm/panfrost/panfrost_mmu.c |  1 +
>>   drivers/iommu/io-pgtable-arm.c          | 11 +++++++++--
>>   2 files changed, 10 insertions(+), 2 deletions(-)
>>
>> diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c
>> b/drivers/gpu/drm/panfrost/panfrost_mmu.c
>> index f3aad8591cf4..094312074d66 100644
>> --- a/drivers/gpu/drm/panfrost/panfrost_mmu.c
>> +++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c
>> @@ -343,6 +343,7 @@ int panfrost_mmu_init(struct panfrost_device *pfdev)
>>       mmu_write(pfdev, MMU_INT_MASK, ~0);
>>
>>       pfdev->mmu->pgtbl_cfg = (struct io_pgtable_cfg) {
>> +        .quirks        = IO_PGTABLE_QUIRK_TLBI_ON_MAP,
>>           .pgsize_bitmap    = SZ_4K, // | SZ_2M | SZ_1G),
>>           .ias        = 48,
>>           .oas        = 40,    /* Should come from dma mask? */
>> diff --git a/drivers/iommu/io-pgtable-arm.c
>> b/drivers/iommu/io-pgtable-arm.c
>> index 84beea1f47a7..45fd7bbdf9aa 100644
>> --- a/drivers/iommu/io-pgtable-arm.c
>> +++ b/drivers/iommu/io-pgtable-arm.c
>> @@ -505,7 +505,13 @@ static int arm_lpae_map(struct io_pgtable_ops *ops,
>> unsigned long iova,
>>        * Synchronise all PTE updates for the new mapping before there's
>>        * a chance for anything to kick off a table walk for the new iova.
>>        */
>> -    wmb();
>> +    if (data->iop.cfg.quirks & IO_PGTABLE_QUIRK_TLBI_ON_MAP) {
>> +        io_pgtable_tlb_add_flush(&data->iop, iova, size,
>> +                     ARM_LPAE_BLOCK_SIZE(2, data), false);
> 
> For correctness (in case this ever ends up used for something with
> VMSA-like invalidation behaviour), the granule would need to be "size"
> here, rather than effectively hard-coded.

Ah yes - I did rather just copy/paste this from io-pgtable-arm-v7s with
minor fix-ups.

> However, since Mali's invalidations appear to operate on arbitrary
> ranges, it would probably be a lot more efficient for the driver to
> handle this case directly, by just issuing a single big invalidation
> after the for_each_sg() loop in panfrost_mmu_map().

Yes - that would probably be a better option. Although I think
personally I'd lean towards just using JS_CONFIG_START_MMU for most
cases. The only thing that won't handle is modifying the MMU while the
job is running (e.g. faulting in pages). But that can be handled
internally in Panfrost by invalidating the exact region which is being
populated.

Steve

> Robin.
> 
>> +        io_pgtable_tlb_sync(&data->iop);
>> +    } else {
>> +        wmb();
>> +    }
>>
>>       return ret;
>>   }
>> @@ -800,7 +806,8 @@ arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg
>> *cfg, void *cookie)
>>       struct arm_lpae_io_pgtable *data;
>>
>>       if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_NS |
>> IO_PGTABLE_QUIRK_NO_DMA |
>> -                IO_PGTABLE_QUIRK_NON_STRICT))
>> +                IO_PGTABLE_QUIRK_NON_STRICT |
>> +                IO_PGTABLE_QUIRK_TLBI_ON_MAP))
>>           return NULL;
>>
>>       data = arm_lpae_alloc_pgtable(cfg);
>>
> _______________________________________________
> dri-devel mailing list
> dri-devel@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/dri-devel
Steven Price April 8, 2019, 8:56 a.m. UTC | #6
On 05/04/2019 11:36, Steven Price wrote:
> On 05/04/2019 10:51, Robin Murphy wrote:
>> Hi Steve,
>>
>> On 05/04/2019 10:42, Steven Price wrote:
>>> First let me say congratulations to everyone working on Panfrost - it's
>>> an impressive achievement!
>>>
>>> Full disclosure: I used to work on the Mali kbase driver. And have been
>>> playing around with running the Mali user-space blob with the Panfrost
>>> kernel driver.
>>>
>>> On 01/04/2019 08:47, Rob Herring wrote:
>>>> ARM Mali midgard GPU is similar to standard 64-bit stage 1 page
>>>> tables, but
>>>> have a few differences. Add a new format type to represent the
>>>> format. The
>>>> input address size is 48-bits and the output address size is 40-bits
>>>> (and
>>>> possibly less?). Note that the later bifrost GPUs follow the standard
>>>> 64-bit stage 1 format.
>>>>
>>>> The differences in the format compared to 64-bit stage 1 format are:
>>>>
>>>> The 3rd level page entry bits are 0x1 instead of 0x3 for page entries.
>>>>
>>>> The access flags are not read-only and unprivileged, but read and write.
>>>> This is similar to stage 2 entries, but the memory attributes field
>>>> matches
>>>> stage 1 being an index.
>>>>
>>>> The nG bit is not set by the vendor driver. This one didn't seem to
>>>> matter,
>>>> but we'll keep it aligned to the vendor driver.
>>>
>>> The nG bit should be ignored by the hardware.
>>>
>>> The MMU in Midgard/Bifrost has a quirk similar to
>>> IO_PGTABLE_QUIRK_TLBI_ON_MAP - you must perform a cache flush for the
>>> GPU to (reliably) pick up new page table mappings.
>>>
>>> You may not have seen this because of the use of the JS_CONFIG_START_MMU
>>> bit - this effectively performs a cache flush and TLB invalidate before
>>> starting a job, however when using a GPU like T760 (e.g. on the Firefly
>>> RK3288) this bit isn't being set. In my testing on the Firefly board I
>>> saw GPU page faults because of this.
>>>
>>> There's two options for fixing this - a patch like below adds the quirk
>>> mode to the MMU. Or alternatively always set JS_CONFIG_START_MMU on
>>> jobs. In my testing both options solve the page faults.
>>>
>>> To be honest I don't know the reasoning behind kbase making the
>>> JS_CONFIG_START_MMU bit conditional - I'm not aware of any reason why it
>>> can't always be set. My guess is performance, but I haven't benchmarked
>>> the difference between this and JS_CONFIG_START_MMU.
>>>
>>> -----8<----------
>>>  From e3f75c7f04e43238dfc579029b8c11fb6b4a0c18 Mon Sep 17 00:00:00 2001
>>> From: Steven Price <steven.price@arm.com>
>>> Date: Thu, 4 Apr 2019 15:53:17 +0100
>>> Subject: [PATCH] iommu: io-pgtable: IO_PGTABLE_QUIRK_TLBI_ON_MAP for LPAE
>>>
>>> Midgard/Bifrost GPUs require a TLB invalidation when mapping pages,
>>> implement IO_PGTABLE_QUIRK_TLBI_ON_MAP for LPAE iommu page table
>>> formats and add the quirk bit to Panfrost.
>>>
>>> Signed-off-by: Steven Price <steven.price@arm.com>
>>> ---
>>>   drivers/gpu/drm/panfrost/panfrost_mmu.c |  1 +
>>>   drivers/iommu/io-pgtable-arm.c          | 11 +++++++++--
>>>   2 files changed, 10 insertions(+), 2 deletions(-)
>>>
>>> diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c
>>> b/drivers/gpu/drm/panfrost/panfrost_mmu.c
>>> index f3aad8591cf4..094312074d66 100644
>>> --- a/drivers/gpu/drm/panfrost/panfrost_mmu.c
>>> +++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c
>>> @@ -343,6 +343,7 @@ int panfrost_mmu_init(struct panfrost_device *pfdev)
>>>       mmu_write(pfdev, MMU_INT_MASK, ~0);
>>>
>>>       pfdev->mmu->pgtbl_cfg = (struct io_pgtable_cfg) {
>>> +        .quirks        = IO_PGTABLE_QUIRK_TLBI_ON_MAP,
>>>           .pgsize_bitmap    = SZ_4K, // | SZ_2M | SZ_1G),
>>>           .ias        = 48,
>>>           .oas        = 40,    /* Should come from dma mask? */
>>> diff --git a/drivers/iommu/io-pgtable-arm.c
>>> b/drivers/iommu/io-pgtable-arm.c
>>> index 84beea1f47a7..45fd7bbdf9aa 100644
>>> --- a/drivers/iommu/io-pgtable-arm.c
>>> +++ b/drivers/iommu/io-pgtable-arm.c
>>> @@ -505,7 +505,13 @@ static int arm_lpae_map(struct io_pgtable_ops *ops,
>>> unsigned long iova,
>>>        * Synchronise all PTE updates for the new mapping before there's
>>>        * a chance for anything to kick off a table walk for the new iova.
>>>        */
>>> -    wmb();
>>> +    if (data->iop.cfg.quirks & IO_PGTABLE_QUIRK_TLBI_ON_MAP) {
>>> +        io_pgtable_tlb_add_flush(&data->iop, iova, size,
>>> +                     ARM_LPAE_BLOCK_SIZE(2, data), false);
>>
>> For correctness (in case this ever ends up used for something with
>> VMSA-like invalidation behaviour), the granule would need to be "size"
>> here, rather than effectively hard-coded.
> 
> Ah yes - I did rather just copy/paste this from io-pgtable-arm-v7s with
> minor fix-ups.
> 
>> However, since Mali's invalidations appear to operate on arbitrary
>> ranges, it would probably be a lot more efficient for the driver to
>> handle this case directly, by just issuing a single big invalidation
>> after the for_each_sg() loop in panfrost_mmu_map().
> 
> Yes - that would probably be a better option. Although I think
> personally I'd lean towards just using JS_CONFIG_START_MMU for most
> cases. The only thing that won't handle is modifying the MMU while the
> job is running (e.g. faulting in pages). But that can be handled
> internally in Panfrost by invalidating the exact region which is being
> populated.

I asked around. Apparently there are some interesting issues with
START_MMU on some hardware revisions. So best to follow mali_kbase here
and only use START_MMU on those hardware revisions that mali_kbase does
(what Panfrost is already doing). Which means we'll definitely need this
quirk in some form.

Steve

> 
> Steve
> 
>> Robin.
>>
>>> +        io_pgtable_tlb_sync(&data->iop);
>>> +    } else {
>>> +        wmb();
>>> +    }
>>>
>>>       return ret;
>>>   }
>>> @@ -800,7 +806,8 @@ arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg
>>> *cfg, void *cookie)
>>>       struct arm_lpae_io_pgtable *data;
>>>
>>>       if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_NS |
>>> IO_PGTABLE_QUIRK_NO_DMA |
>>> -                IO_PGTABLE_QUIRK_NON_STRICT))
>>> +                IO_PGTABLE_QUIRK_NON_STRICT |
>>> +                IO_PGTABLE_QUIRK_TLBI_ON_MAP))
>>>           return NULL;
>>>
>>>       data = arm_lpae_alloc_pgtable(cfg);
>>>
>> _______________________________________________
>> dri-devel mailing list
>> dri-devel@lists.freedesktop.org
>> https://lists.freedesktop.org/mailman/listinfo/dri-devel
> 
> 
> _______________________________________________
> linux-arm-kernel mailing list
> linux-arm-kernel@lists.infradead.org
> http://lists.infradead.org/mailman/listinfo/linux-arm-kernel
>
Joerg Roedel April 11, 2019, 1:15 p.m. UTC | #7
On Mon, Apr 01, 2019 at 08:11:00PM +0100, Robin Murphy wrote:
> With the diff below squashed in to address my outstanding style nits,
> 
> Acked-by: Robin Murphy <robin.murphy@arm.com>
> 
> I don't foresee any conflicting io-pgtable changes to prevent this going via
> DRM, but I'll leave the final say up to Joerg.

No objection from my side with merging this via DRM. With Robin's
concerns addressed:

	Acked-by: Joerg Roedel <jroedel@suse.de>

> 
> Thanks,
> Robin.
> 
> ----->8-----
> diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
> index 98551d0cff59..55ed039da166 100644
> --- a/drivers/iommu/io-pgtable-arm.c
> +++ b/drivers/iommu/io-pgtable-arm.c
> @@ -197,12 +197,13 @@ struct arm_lpae_io_pgtable {
> 
>  typedef u64 arm_lpae_iopte;
> 
> -static inline bool iopte_leaf(arm_lpae_iopte pte, int l, enum
> io_pgtable_fmt fmt)
> +static inline bool iopte_leaf(arm_lpae_iopte pte, int lvl,
> +			      enum io_pgtable_fmt fmt)
>  {
> -	if ((l == (ARM_LPAE_MAX_LEVELS - 1)) && (fmt != ARM_MALI_LPAE))
> -		return iopte_type(pte,l) == ARM_LPAE_PTE_TYPE_PAGE;
> +	if (lvl == (ARM_LPAE_MAX_LEVELS - 1) && fmt != ARM_MALI_LPAE)
> +		return iopte_type(pte, lvl) == ARM_LPAE_PTE_TYPE_PAGE;
> 
> -	return iopte_type(pte,l) == ARM_LPAE_PTE_TYPE_BLOCK;
> +	return iopte_type(pte, lvl) == ARM_LPAE_PTE_TYPE_BLOCK;
>  }
> 
>  static arm_lpae_iopte paddr_to_iopte(phys_addr_t paddr,
> @@ -310,13 +311,10 @@ static void __arm_lpae_init_pte(struct
> arm_lpae_io_pgtable *data,
>  	if (data->iop.cfg.quirks & IO_PGTABLE_QUIRK_ARM_NS)
>  		pte |= ARM_LPAE_PTE_NS;
> 
> -	if (lvl == ARM_LPAE_MAX_LEVELS - 1) {
> -		if (data->iop.fmt == ARM_MALI_LPAE)
> -			pte |= ARM_LPAE_PTE_TYPE_BLOCK;
> -		else
> -			pte |= ARM_LPAE_PTE_TYPE_PAGE;
> -	} else
> +	if (data->iop.fmt != ARM_MALI_LPAE && lvl != ARM_LPAE_MAX_LEVELS - 1)
>  		pte |= ARM_LPAE_PTE_TYPE_BLOCK;
> +	else
> +		pte |= ARM_LPAE_PTE_TYPE_PAGE;
> 
>  	if (data->iop.fmt != ARM_MALI_LPAE)
>  		pte |= ARM_LPAE_PTE_AF;
diff mbox series

Patch

diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
index d3700ec15cbd..98551d0cff59 100644
--- a/drivers/iommu/io-pgtable-arm.c
+++ b/drivers/iommu/io-pgtable-arm.c
@@ -172,6 +172,10 @@ 
 #define ARM_LPAE_MAIR_ATTR_IDX_CACHE	1
 #define ARM_LPAE_MAIR_ATTR_IDX_DEV	2

+#define ARM_MALI_LPAE_TTBR_ADRMODE_TABLE (3u << 0)
+#define ARM_MALI_LPAE_TTBR_READ_INNER	BIT(2)
+#define ARM_MALI_LPAE_TTBR_SHARE_OUTER	BIT(4)
+
 /* IOPTE accessors */
 #define iopte_deref(pte,d) __va(iopte_to_paddr(pte, d))

@@ -180,11 +184,6 @@ 

 #define iopte_prot(pte)	((pte) & ARM_LPAE_PTE_ATTR_MASK)

-#define iopte_leaf(pte,l)					\
-	(l == (ARM_LPAE_MAX_LEVELS - 1) ?			\
-		(iopte_type(pte,l) == ARM_LPAE_PTE_TYPE_PAGE) :	\
-		(iopte_type(pte,l) == ARM_LPAE_PTE_TYPE_BLOCK))
-
 struct arm_lpae_io_pgtable {
 	struct io_pgtable	iop;

@@ -198,6 +197,14 @@  struct arm_lpae_io_pgtable {

 typedef u64 arm_lpae_iopte;

+static inline bool iopte_leaf(arm_lpae_iopte pte, int l, enum io_pgtable_fmt fmt)
+{
+	if ((l == (ARM_LPAE_MAX_LEVELS - 1)) && (fmt != ARM_MALI_LPAE))
+		return iopte_type(pte,l) == ARM_LPAE_PTE_TYPE_PAGE;
+
+	return iopte_type(pte,l) == ARM_LPAE_PTE_TYPE_BLOCK;
+}
+
 static arm_lpae_iopte paddr_to_iopte(phys_addr_t paddr,
 				     struct arm_lpae_io_pgtable *data)
 {
@@ -303,12 +310,17 @@  static void __arm_lpae_init_pte(struct arm_lpae_io_pgtable *data,
 	if (data->iop.cfg.quirks & IO_PGTABLE_QUIRK_ARM_NS)
 		pte |= ARM_LPAE_PTE_NS;

-	if (lvl == ARM_LPAE_MAX_LEVELS - 1)
-		pte |= ARM_LPAE_PTE_TYPE_PAGE;
-	else
+	if (lvl == ARM_LPAE_MAX_LEVELS - 1) {
+		if (data->iop.fmt == ARM_MALI_LPAE)
+			pte |= ARM_LPAE_PTE_TYPE_BLOCK;
+		else
+			pte |= ARM_LPAE_PTE_TYPE_PAGE;
+	} else
 		pte |= ARM_LPAE_PTE_TYPE_BLOCK;

-	pte |= ARM_LPAE_PTE_AF | ARM_LPAE_PTE_SH_IS;
+	if (data->iop.fmt != ARM_MALI_LPAE)
+		pte |= ARM_LPAE_PTE_AF;
+	pte |= ARM_LPAE_PTE_SH_IS;
 	pte |= paddr_to_iopte(paddr, data);

 	__arm_lpae_set_pte(ptep, pte, &data->iop.cfg);
@@ -321,7 +333,7 @@  static int arm_lpae_init_pte(struct arm_lpae_io_pgtable *data,
 {
 	arm_lpae_iopte pte = *ptep;

-	if (iopte_leaf(pte, lvl)) {
+	if (iopte_leaf(pte, lvl, data->iop.fmt)) {
 		/* We require an unmap first */
 		WARN_ON(!selftest_running);
 		return -EEXIST;
@@ -409,7 +421,7 @@  static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova,
 		__arm_lpae_sync_pte(ptep, cfg);
 	}

-	if (pte && !iopte_leaf(pte, lvl)) {
+	if (pte && !iopte_leaf(pte, lvl, data->iop.fmt)) {
 		cptep = iopte_deref(pte, data);
 	} else if (pte) {
 		/* We require an unmap first */
@@ -429,31 +441,33 @@  static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data,
 	if (data->iop.fmt == ARM_64_LPAE_S1 ||
 	    data->iop.fmt == ARM_32_LPAE_S1) {
 		pte = ARM_LPAE_PTE_nG;
-
 		if (!(prot & IOMMU_WRITE) && (prot & IOMMU_READ))
 			pte |= ARM_LPAE_PTE_AP_RDONLY;
-
 		if (!(prot & IOMMU_PRIV))
 			pte |= ARM_LPAE_PTE_AP_UNPRIV;
-
-		if (prot & IOMMU_MMIO)
-			pte |= (ARM_LPAE_MAIR_ATTR_IDX_DEV
-				<< ARM_LPAE_PTE_ATTRINDX_SHIFT);
-		else if (prot & IOMMU_CACHE)
-			pte |= (ARM_LPAE_MAIR_ATTR_IDX_CACHE
-				<< ARM_LPAE_PTE_ATTRINDX_SHIFT);
 	} else {
 		pte = ARM_LPAE_PTE_HAP_FAULT;
 		if (prot & IOMMU_READ)
 			pte |= ARM_LPAE_PTE_HAP_READ;
 		if (prot & IOMMU_WRITE)
 			pte |= ARM_LPAE_PTE_HAP_WRITE;
+	}
+
+	if (data->iop.fmt == ARM_64_LPAE_S2 ||
+	    data->iop.fmt == ARM_32_LPAE_S2) {
 		if (prot & IOMMU_MMIO)
 			pte |= ARM_LPAE_PTE_MEMATTR_DEV;
 		else if (prot & IOMMU_CACHE)
 			pte |= ARM_LPAE_PTE_MEMATTR_OIWB;
 		else
 			pte |= ARM_LPAE_PTE_MEMATTR_NC;
+	} else {
+		if (prot & IOMMU_MMIO)
+			pte |= (ARM_LPAE_MAIR_ATTR_IDX_DEV
+				<< ARM_LPAE_PTE_ATTRINDX_SHIFT);
+		else if (prot & IOMMU_CACHE)
+			pte |= (ARM_LPAE_MAIR_ATTR_IDX_CACHE
+				<< ARM_LPAE_PTE_ATTRINDX_SHIFT);
 	}

 	if (prot & IOMMU_NOEXEC)
@@ -511,7 +525,7 @@  static void __arm_lpae_free_pgtable(struct arm_lpae_io_pgtable *data, int lvl,
 	while (ptep != end) {
 		arm_lpae_iopte pte = *ptep++;

-		if (!pte || iopte_leaf(pte, lvl))
+		if (!pte || iopte_leaf(pte, lvl, data->iop.fmt))
 			continue;

 		__arm_lpae_free_pgtable(data, lvl + 1, iopte_deref(pte, data));
@@ -602,7 +616,7 @@  static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
 	if (size == ARM_LPAE_BLOCK_SIZE(lvl, data)) {
 		__arm_lpae_set_pte(ptep, 0, &iop->cfg);

-		if (!iopte_leaf(pte, lvl)) {
+		if (!iopte_leaf(pte, lvl, iop->fmt)) {
 			/* Also flush any partial walks */
 			io_pgtable_tlb_add_flush(iop, iova, size,
 						ARM_LPAE_GRANULE(data), false);
@@ -621,7 +635,7 @@  static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
 		}

 		return size;
-	} else if (iopte_leaf(pte, lvl)) {
+	} else if (iopte_leaf(pte, lvl, iop->fmt)) {
 		/*
 		 * Insert a table at the next level to map the old region,
 		 * minus the part we want to unmap
@@ -669,7 +683,7 @@  static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable_ops *ops,
 			return 0;

 		/* Leaf entry? */
-		if (iopte_leaf(pte,lvl))
+		if (iopte_leaf(pte, lvl, data->iop.fmt))
 			goto found_translation;

 		/* Take it to the next level */
@@ -995,6 +1009,32 @@  arm_32_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
 	return iop;
 }

+static struct io_pgtable *
+arm_mali_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie)
+{
+	struct io_pgtable *iop;
+
+	if (cfg->ias != 48 || cfg->oas > 40)
+		return NULL;
+
+	cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
+	iop = arm_64_lpae_alloc_pgtable_s1(cfg, cookie);
+	if (iop) {
+		u64 mair, ttbr;
+
+		/* Copy values as union fields overlap */
+		mair = cfg->arm_lpae_s1_cfg.mair[0];
+		ttbr = cfg->arm_lpae_s1_cfg.ttbr[0];
+
+		cfg->arm_mali_lpae_cfg.memattr = mair;
+		cfg->arm_mali_lpae_cfg.transtab = ttbr |
+			ARM_MALI_LPAE_TTBR_READ_INNER |
+			ARM_MALI_LPAE_TTBR_ADRMODE_TABLE;
+	}
+
+	return iop;
+}
+
 struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns = {
 	.alloc	= arm_64_lpae_alloc_pgtable_s1,
 	.free	= arm_lpae_free_pgtable,
@@ -1015,6 +1055,11 @@  struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s2_init_fns = {
 	.free	= arm_lpae_free_pgtable,
 };

+struct io_pgtable_init_fns io_pgtable_arm_mali_lpae_init_fns = {
+	.alloc	= arm_mali_lpae_alloc_pgtable,
+	.free	= arm_lpae_free_pgtable,
+};
+
 #ifdef CONFIG_IOMMU_IO_PGTABLE_LPAE_SELFTEST

 static struct io_pgtable_cfg *cfg_cookie;
diff --git a/drivers/iommu/io-pgtable.c b/drivers/iommu/io-pgtable.c
index 93f2880be6c6..5227cfdbb65b 100644
--- a/drivers/iommu/io-pgtable.c
+++ b/drivers/iommu/io-pgtable.c
@@ -30,6 +30,7 @@  io_pgtable_init_table[IO_PGTABLE_NUM_FMTS] = {
 	[ARM_32_LPAE_S2] = &io_pgtable_arm_32_lpae_s2_init_fns,
 	[ARM_64_LPAE_S1] = &io_pgtable_arm_64_lpae_s1_init_fns,
 	[ARM_64_LPAE_S2] = &io_pgtable_arm_64_lpae_s2_init_fns,
+	[ARM_MALI_LPAE] = &io_pgtable_arm_mali_lpae_init_fns,
 #endif
 #ifdef CONFIG_IOMMU_IO_PGTABLE_ARMV7S
 	[ARM_V7S] = &io_pgtable_arm_v7s_init_fns,
diff --git a/include/linux/io-pgtable.h b/include/linux/io-pgtable.h
index 47d5ae559329..76969a564831 100644
--- a/include/linux/io-pgtable.h
+++ b/include/linux/io-pgtable.h
@@ -12,6 +12,7 @@  enum io_pgtable_fmt {
 	ARM_64_LPAE_S1,
 	ARM_64_LPAE_S2,
 	ARM_V7S,
+	ARM_MALI_LPAE,
 	IO_PGTABLE_NUM_FMTS,
 };

@@ -108,6 +109,11 @@  struct io_pgtable_cfg {
 			u32	nmrr;
 			u32	prrr;
 		} arm_v7s_cfg;
+
+		struct {
+			u64	transtab;
+			u64	memattr;
+		} arm_mali_lpae_cfg;
 	};
 };

@@ -209,5 +215,6 @@  extern struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s2_init_fns;
 extern struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns;
 extern struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s2_init_fns;
 extern struct io_pgtable_init_fns io_pgtable_arm_v7s_init_fns;
+extern struct io_pgtable_init_fns io_pgtable_arm_mali_lpae_init_fns;

 #endif /* __IO_PGTABLE_H */