diff mbox series

[v3,2/2] iommu: rockchip: Allocate tables from all available memory for IOMMU v2

Message ID 20230617182540.3091374-3-jonas@kwiboo.se (mailing list archive)
State New, archived
Headers show
Series iommu: rockchip: Fix directory table address encoding | expand

Commit Message

Jonas Karlman June 17, 2023, 6:25 p.m. UTC
IOMMU v2 found in newer Rockchip SoCs, e.g. RK356x and RK3588, support
placing directory and page tables in up to 40-bit addressable physical
memory.

Remove the use of GFP_DMA32 flag for IOMMU v2 now that the physical
address to the directory table is correctly written to DTE_ADDR.

Signed-off-by: Jonas Karlman <jonas@kwiboo.se>
---
v3:
- rework to only affect IOMMU v2

v2:
- no change

 drivers/iommu/rockchip-iommu.c | 7 +++++--
 1 file changed, 5 insertions(+), 2 deletions(-)

Comments

Robin Murphy June 19, 2023, 2:51 p.m. UTC | #1
On 17/06/2023 7:25 pm, Jonas Karlman wrote:
> IOMMU v2 found in newer Rockchip SoCs, e.g. RK356x and RK3588, support
> placing directory and page tables in up to 40-bit addressable physical
> memory.
> 
> Remove the use of GFP_DMA32 flag for IOMMU v2 now that the physical
> address to the directory table is correctly written to DTE_ADDR.

FWIW I'd be tempted to refactor a bit harder since this is closely 
coupled to the DMA mask and both could be calculated from a single data 
value, but there's absolutely nothing wrong with this approach either.

Reviewed-by: Robin Murphy <robin.murphy@arm.com>

[ In fact if you start down that rabbit-hole, then I think logically it 
leads to an even bigger refactor to convert the whole lot to use 
dma_alloc_pages() instead ]

> Signed-off-by: Jonas Karlman <jonas@kwiboo.se>
> ---
> v3:
> - rework to only affect IOMMU v2
> 
> v2:
> - no change
> 
>   drivers/iommu/rockchip-iommu.c | 7 +++++--
>   1 file changed, 5 insertions(+), 2 deletions(-)
> 
> diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c
> index ae42959bc490..8ff69fbf9f65 100644
> --- a/drivers/iommu/rockchip-iommu.c
> +++ b/drivers/iommu/rockchip-iommu.c
> @@ -99,6 +99,7 @@ struct rk_iommu_ops {
>   	u32 (*mk_dtentries)(dma_addr_t pt_dma);
>   	u32 (*mk_ptentries)(phys_addr_t page, int prot);
>   	u64 dma_bit_mask;
> +	gfp_t gfp_flags;
>   };
>   
>   struct rk_iommu {
> @@ -727,7 +728,7 @@ static u32 *rk_dte_get_page_table(struct rk_iommu_domain *rk_domain,
>   	if (rk_dte_is_pt_valid(dte))
>   		goto done;
>   
> -	page_table = (u32 *)get_zeroed_page(GFP_ATOMIC | GFP_DMA32);
> +	page_table = (u32 *)get_zeroed_page(GFP_ATOMIC | rk_ops->gfp_flags);
>   	if (!page_table)
>   		return ERR_PTR(-ENOMEM);
>   
> @@ -1076,7 +1077,7 @@ static struct iommu_domain *rk_iommu_domain_alloc(unsigned type)
>   	 * Each level1 (dt) and level2 (pt) table has 1024 4-byte entries.
>   	 * Allocate one 4 KiB page for each table.
>   	 */
> -	rk_domain->dt = (u32 *)get_zeroed_page(GFP_KERNEL | GFP_DMA32);
> +	rk_domain->dt = (u32 *)get_zeroed_page(GFP_KERNEL | rk_ops->gfp_flags);
>   	if (!rk_domain->dt)
>   		goto err_free_domain;
>   
> @@ -1377,6 +1378,7 @@ static struct rk_iommu_ops iommu_data_ops_v1 = {
>   	.mk_dtentries = &rk_mk_dte,
>   	.mk_ptentries = &rk_mk_pte,
>   	.dma_bit_mask = DMA_BIT_MASK(32),
> +	.gfp_flags = GFP_DMA32,
>   };
>   
>   static struct rk_iommu_ops iommu_data_ops_v2 = {
> @@ -1384,6 +1386,7 @@ static struct rk_iommu_ops iommu_data_ops_v2 = {
>   	.mk_dtentries = &rk_mk_dte_v2,
>   	.mk_ptentries = &rk_mk_pte_v2,
>   	.dma_bit_mask = DMA_BIT_MASK(40),
> +	.gfp_flags = 0,
>   };
>   
>   static const struct of_device_id rk_iommu_dt_ids[] = {
diff mbox series

Patch

diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c
index ae42959bc490..8ff69fbf9f65 100644
--- a/drivers/iommu/rockchip-iommu.c
+++ b/drivers/iommu/rockchip-iommu.c
@@ -99,6 +99,7 @@  struct rk_iommu_ops {
 	u32 (*mk_dtentries)(dma_addr_t pt_dma);
 	u32 (*mk_ptentries)(phys_addr_t page, int prot);
 	u64 dma_bit_mask;
+	gfp_t gfp_flags;
 };
 
 struct rk_iommu {
@@ -727,7 +728,7 @@  static u32 *rk_dte_get_page_table(struct rk_iommu_domain *rk_domain,
 	if (rk_dte_is_pt_valid(dte))
 		goto done;
 
-	page_table = (u32 *)get_zeroed_page(GFP_ATOMIC | GFP_DMA32);
+	page_table = (u32 *)get_zeroed_page(GFP_ATOMIC | rk_ops->gfp_flags);
 	if (!page_table)
 		return ERR_PTR(-ENOMEM);
 
@@ -1076,7 +1077,7 @@  static struct iommu_domain *rk_iommu_domain_alloc(unsigned type)
 	 * Each level1 (dt) and level2 (pt) table has 1024 4-byte entries.
 	 * Allocate one 4 KiB page for each table.
 	 */
-	rk_domain->dt = (u32 *)get_zeroed_page(GFP_KERNEL | GFP_DMA32);
+	rk_domain->dt = (u32 *)get_zeroed_page(GFP_KERNEL | rk_ops->gfp_flags);
 	if (!rk_domain->dt)
 		goto err_free_domain;
 
@@ -1377,6 +1378,7 @@  static struct rk_iommu_ops iommu_data_ops_v1 = {
 	.mk_dtentries = &rk_mk_dte,
 	.mk_ptentries = &rk_mk_pte,
 	.dma_bit_mask = DMA_BIT_MASK(32),
+	.gfp_flags = GFP_DMA32,
 };
 
 static struct rk_iommu_ops iommu_data_ops_v2 = {
@@ -1384,6 +1386,7 @@  static struct rk_iommu_ops iommu_data_ops_v2 = {
 	.mk_dtentries = &rk_mk_dte_v2,
 	.mk_ptentries = &rk_mk_pte_v2,
 	.dma_bit_mask = DMA_BIT_MASK(40),
+	.gfp_flags = 0,
 };
 
 static const struct of_device_id rk_iommu_dt_ids[] = {