diff mbox series

[02/11] mm: ioremap: fixup the physical address

Message ID 20220801144029.57829-3-bhe@redhat.com (mailing list archive)
State New
Headers show
Series mm: ioremap: Convert architectures to take GENERIC_IOREMAP way | expand

Commit Message

Baoquan He Aug. 1, 2022, 2:40 p.m. UTC
In some architectures, the io address will be fixed up before doing
mapping, e.g, parisc, mips.

In oder to convert them to take GENERIC_IOREMAP method, we need wrap
the address fixing up code into ioremap_allowed() and pass the new
address out for ioremap_prot() handling.

This is a preparation patch, no functionality change.

Signed-off-by: Baoquan He <bhe@redhat.com>
---
 arch/arm64/include/asm/io.h |  3 ++-
 arch/arm64/mm/ioremap.c     | 10 ++++++++--
 include/asm-generic/io.h    |  6 +++---
 mm/ioremap.c                | 19 ++++++++++---------
 4 files changed, 23 insertions(+), 15 deletions(-)

Comments

Alexander Gordeev Aug. 4, 2022, 4:02 p.m. UTC | #1
On Mon, Aug 01, 2022 at 10:40:20PM +0800, Baoquan He wrote:
> This is a preparation patch, no functionality change.

There is, please see below.

> @@ -3,11 +3,17 @@
>  #include <linux/mm.h>
>  #include <linux/io.h>
>  
> -void __iomem *ioremap_allowed(phys_addr_t phys_addr, size_t size, unsigned long prot)
> +void __iomem *
> +ioremap_allowed(phys_addr_t *paddr, size_t size, unsigned long *prot_val)
>  {
> -	unsigned long last_addr = phys_addr + size - 1;
> +	unsigned long last_addr, offset, phys_addr = *paddr;
>  	int ret = -EINVAL;
>  
> +	offset = phys_addr & (~PAGE_MASK);
> +	phys_addr -= offset;

FWIW, phys_addr &= PAGE_MASK looks much more usual.

> @@ -11,13 +11,20 @@
>  #include <linux/io.h>
>  #include <linux/export.h>
>  
> -void __iomem *ioremap_prot(phys_addr_t phys_addr, size_t size,
> +void __iomem *ioremap_prot(phys_addr_t paddr, size_t size,
>  			   unsigned long prot)
>  {
>  	unsigned long offset, vaddr;
> -	phys_addr_t last_addr;
> +	phys_addr_t last_addr, phys_addr = paddr;
>  	struct vm_struct *area;
>  	void __iomem *base;
> +	unsigned long prot_val = prot;

Why prot_val is needed?

> +	base = ioremap_allowed(&phys_addr, size, &prot_val);
> +	if (IS_ERR(base))
> +		return NULL;
> +	else if (base)
> +		return base;

By moving ioremap_allowed() here you allow it to be called
before the wrap-around check, including architectures that
do not do fixups.

And now ioremap_allowed() semantics, prototype and name turn
less than obvious. Why not introduce a separate fixup callback?

>  	/* Disallow wrap-around or zero size */
>  	last_addr = phys_addr + size - 1;
> @@ -29,12 +36,6 @@ void __iomem *ioremap_prot(phys_addr_t phys_addr, size_t size,
>  	phys_addr -= offset;
>  	size = PAGE_ALIGN(size + offset);
>  
> -	base = ioremap_allowed(phys_addr, size, prot);
> -	if (IS_ERR(base))
> -		return NULL;
> -	else if (base)
> -		return base;
> -
>  	area = get_vm_area_caller(size, VM_IOREMAP,
>  			__builtin_return_address(0));
>  	if (!area)
Baoquan He Aug. 7, 2022, 2:11 a.m. UTC | #2
On 08/04/22 at 06:02pm, Alexander Gordeev wrote:
> On Mon, Aug 01, 2022 at 10:40:20PM +0800, Baoquan He wrote:
> > This is a preparation patch, no functionality change.
> 
> There is, please see below.
> 
> > @@ -3,11 +3,17 @@
> >  #include <linux/mm.h>
> >  #include <linux/io.h>
> >  
> > -void __iomem *ioremap_allowed(phys_addr_t phys_addr, size_t size, unsigned long prot)
> > +void __iomem *
> > +ioremap_allowed(phys_addr_t *paddr, size_t size, unsigned long *prot_val)
> >  {
> > -	unsigned long last_addr = phys_addr + size - 1;
> > +	unsigned long last_addr, offset, phys_addr = *paddr;
> >  	int ret = -EINVAL;
> >  
> > +	offset = phys_addr & (~PAGE_MASK);
> > +	phys_addr -= offset;
> 
> FWIW, phys_addr &= PAGE_MASK looks much more usual.

Sure, will change.

> 
> > @@ -11,13 +11,20 @@
> >  #include <linux/io.h>
> >  #include <linux/export.h>
> >  
> > -void __iomem *ioremap_prot(phys_addr_t phys_addr, size_t size,
> > +void __iomem *ioremap_prot(phys_addr_t paddr, size_t size,
> >  			   unsigned long prot)
> >  {
> >  	unsigned long offset, vaddr;
> > -	phys_addr_t last_addr;
> > +	phys_addr_t last_addr, phys_addr = paddr;
> >  	struct vm_struct *area;
> >  	void __iomem *base;
> > +	unsigned long prot_val = prot;
> 
> Why prot_val is needed?

I will remove it and pass &prot to ioremap_allowed(). I could think too
much when I made change here. Thanks.

> 
> > +	base = ioremap_allowed(&phys_addr, size, &prot_val);
> > +	if (IS_ERR(base))
> > +		return NULL;
> > +	else if (base)
> > +		return base;
> 
> By moving ioremap_allowed() here you allow it to be called
> before the wrap-around check, including architectures that
> do not do fixups.

Yes, just as you say.

> 
> And now ioremap_allowed() semantics, prototype and name turn
> less than obvious. Why not introduce a separate fixup callback?

I can do that, while a little worried if too many hooks introduced.
I will introduce another fixup hook in v2 if no other suggestion or
concern. Thanks.


> 
> >  	/* Disallow wrap-around or zero size */
> >  	last_addr = phys_addr + size - 1;
> > @@ -29,12 +36,6 @@ void __iomem *ioremap_prot(phys_addr_t phys_addr, size_t size,
> >  	phys_addr -= offset;
> >  	size = PAGE_ALIGN(size + offset);
> >  
> > -	base = ioremap_allowed(phys_addr, size, prot);
> > -	if (IS_ERR(base))
> > -		return NULL;
> > -	else if (base)
> > -		return base;
> > -
> >  	area = get_vm_area_caller(size, VM_IOREMAP,
> >  			__builtin_return_address(0));
> >  	if (!area)
>
Baoquan He Aug. 20, 2022, 12:49 a.m. UTC | #3
On 08/04/22 at 06:02pm, Alexander Gordeev wrote:
> On Mon, Aug 01, 2022 at 10:40:20PM +0800, Baoquan He wrote:
> > This is a preparation patch, no functionality change.
> 
> There is, please see below.
> 
> > @@ -3,11 +3,17 @@
> >  #include <linux/mm.h>
> >  #include <linux/io.h>
> >  
> > -void __iomem *ioremap_allowed(phys_addr_t phys_addr, size_t size, unsigned long prot)
> > +void __iomem *
> > +ioremap_allowed(phys_addr_t *paddr, size_t size, unsigned long *prot_val)
> >  {
> > -	unsigned long last_addr = phys_addr + size - 1;
> > +	unsigned long last_addr, offset, phys_addr = *paddr;
> >  	int ret = -EINVAL;
> >  
> > +	offset = phys_addr & (~PAGE_MASK);
> > +	phys_addr -= offset;
> 
> FWIW, phys_addr &= PAGE_MASK looks much more usual.
> 
> > @@ -11,13 +11,20 @@
> >  #include <linux/io.h>
> >  #include <linux/export.h>
> >  
> > -void __iomem *ioremap_prot(phys_addr_t phys_addr, size_t size,
> > +void __iomem *ioremap_prot(phys_addr_t paddr, size_t size,
> >  			   unsigned long prot)
> >  {
> >  	unsigned long offset, vaddr;
> > -	phys_addr_t last_addr;
> > +	phys_addr_t last_addr, phys_addr = paddr;
> >  	struct vm_struct *area;
> >  	void __iomem *base;
> > +	unsigned long prot_val = prot;
> 
> Why prot_val is needed?
> 
> > +	base = ioremap_allowed(&phys_addr, size, &prot_val);
> > +	if (IS_ERR(base))
> > +		return NULL;
> > +	else if (base)
> > +		return base;
> 
> By moving ioremap_allowed() here you allow it to be called
> before the wrap-around check, including architectures that
> do not do fixups.
> 
> And now ioremap_allowed() semantics, prototype and name turn
> less than obvious. Why not introduce a separate fixup callback?

I finally renamed ioremap_allowed()/iounmap_allowed() to arch_ioremap()
and arch_iounmap(). I didn't introduce a separate fixup callback, and
have added more explanation to log of patch 1~2, please check that.
diff mbox series

Patch

diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h
index 9c56a077b687..7f0c5f60d946 100644
--- a/arch/arm64/include/asm/io.h
+++ b/arch/arm64/include/asm/io.h
@@ -164,7 +164,8 @@  extern void __memset_io(volatile void __iomem *, int, size_t);
  * I/O memory mapping functions.
  */
 
-void __iomem *ioremap_allowed(phys_addr_t phys_addr, size_t size, unsigned long prot);
+void __iomem *
+ioremap_allowed(phys_addr_t *paddr, size_t size, unsigned long *prot_val);
 #define ioremap_allowed ioremap_allowed
 
 #define _PAGE_IOREMAP PROT_DEVICE_nGnRE
diff --git a/arch/arm64/mm/ioremap.c b/arch/arm64/mm/ioremap.c
index 49467c781283..b9febcf0f5f4 100644
--- a/arch/arm64/mm/ioremap.c
+++ b/arch/arm64/mm/ioremap.c
@@ -3,11 +3,17 @@ 
 #include <linux/mm.h>
 #include <linux/io.h>
 
-void __iomem *ioremap_allowed(phys_addr_t phys_addr, size_t size, unsigned long prot)
+void __iomem *
+ioremap_allowed(phys_addr_t *paddr, size_t size, unsigned long *prot_val)
 {
-	unsigned long last_addr = phys_addr + size - 1;
+	unsigned long last_addr, offset, phys_addr = *paddr;
 	int ret = -EINVAL;
 
+	offset = phys_addr & (~PAGE_MASK);
+	phys_addr -= offset;
+	size = PAGE_ALIGN(size + offset);
+	last_addr = phys_addr + size - 1;
+
 	/* Don't allow outside PHYS_MASK */
 	if (last_addr & ~PHYS_MASK)
 		return IOMEM_ERR_PTR(ret);
diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h
index d72eb310fb3c..0b5cd3cef99d 100644
--- a/include/asm-generic/io.h
+++ b/include/asm-generic/io.h
@@ -976,8 +976,8 @@  static inline void iounmap(volatile void __iomem *addr)
  */
 #ifndef ioremap_allowed
 #define ioremap_allowed ioremap_allowed
-static inline void __iomem *ioremap_allowed(phys_addr_t phys_addr, size_t size,
-				   unsigned long prot)
+static inline void __iomem *ioremap_allowed(phys_addr_t *paddr, size_t size,
+				   unsigned long *prot_val)
 {
 	return NULL;
 }
@@ -991,7 +991,7 @@  static inline int iounmap_allowed(void *addr)
 }
 #endif
 
-void __iomem *ioremap_prot(phys_addr_t phys_addr, size_t size,
+void __iomem *ioremap_prot(phys_addr_t paddr, size_t size,
 			   unsigned long prot);
 void iounmap(volatile void __iomem *addr);
 
diff --git a/mm/ioremap.c b/mm/ioremap.c
index b16ee9debea2..01439882112e 100644
--- a/mm/ioremap.c
+++ b/mm/ioremap.c
@@ -11,13 +11,20 @@ 
 #include <linux/io.h>
 #include <linux/export.h>
 
-void __iomem *ioremap_prot(phys_addr_t phys_addr, size_t size,
+void __iomem *ioremap_prot(phys_addr_t paddr, size_t size,
 			   unsigned long prot)
 {
 	unsigned long offset, vaddr;
-	phys_addr_t last_addr;
+	phys_addr_t last_addr, phys_addr = paddr;
 	struct vm_struct *area;
 	void __iomem *base;
+	unsigned long prot_val = prot;
+
+	base = ioremap_allowed(&phys_addr, size, &prot_val);
+	if (IS_ERR(base))
+		return NULL;
+	else if (base)
+		return base;
 
 	/* Disallow wrap-around or zero size */
 	last_addr = phys_addr + size - 1;
@@ -29,12 +36,6 @@  void __iomem *ioremap_prot(phys_addr_t phys_addr, size_t size,
 	phys_addr -= offset;
 	size = PAGE_ALIGN(size + offset);
 
-	base = ioremap_allowed(phys_addr, size, prot);
-	if (IS_ERR(base))
-		return NULL;
-	else if (base)
-		return base;
-
 	area = get_vm_area_caller(size, VM_IOREMAP,
 			__builtin_return_address(0));
 	if (!area)
@@ -43,7 +44,7 @@  void __iomem *ioremap_prot(phys_addr_t phys_addr, size_t size,
 	area->phys_addr = phys_addr;
 
 	if (ioremap_page_range(vaddr, vaddr + size, phys_addr,
-			       __pgprot(prot))) {
+			       __pgprot(prot_val))) {
 		free_vm_area(area);
 		return NULL;
 	}