diff mbox series

[v5,4/6] mm: ioremap: Add ioremap/iounmap_allowed()

Message ID 20220607125027.44946-5-wangkefeng.wang@huawei.com (mailing list archive)
State New
Headers show
Series arm64: Cleanup ioremap() and support ioremap_prot() | expand

Commit Message

Kefeng Wang June 7, 2022, 12:50 p.m. UTC
Add special hook for architecture to verify addr, size or prot
when ioremap() or iounmap(), which will make the generic ioremap
more useful.

  ioremap_allowed() return a bool,
    - true means continue to remap
    - false means skip remap and return directly
  iounmap_allowed() return a bool,
    - true means continue to vunmap
    - false code means skip vunmap and return directly

Meanwhile, only vunmap the address when it is in vmalloc area
as the generic ioremap only returns vmalloc addresses.

Acked-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
---
 include/asm-generic/io.h | 26 ++++++++++++++++++++++++++
 mm/ioremap.c             | 11 ++++++++++-
 2 files changed, 36 insertions(+), 1 deletion(-)

Comments

Baoquan He June 8, 2022, 4:18 a.m. UTC | #1
On 06/07/22 at 08:50pm, Kefeng Wang wrote:
> Add special hook for architecture to verify addr, size or prot
> when ioremap() or iounmap(), which will make the generic ioremap
> more useful.
> 
>   ioremap_allowed() return a bool,
>     - true means continue to remap
>     - false means skip remap and return directly
>   iounmap_allowed() return a bool,
>     - true means continue to vunmap
>     - false code means skip vunmap and return directly
> 
> Meanwhile, only vunmap the address when it is in vmalloc area
> as the generic ioremap only returns vmalloc addresses.

LGTM,

Reviewed-by: Baoquan He <bhe@redhat.com>

> 
> Acked-by: Andrew Morton <akpm@linux-foundation.org>
> Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
> ---
>  include/asm-generic/io.h | 26 ++++++++++++++++++++++++++
>  mm/ioremap.c             | 11 ++++++++++-
>  2 files changed, 36 insertions(+), 1 deletion(-)
> 
> diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h
> index b76379628a02..db5b890eaff7 100644
> --- a/include/asm-generic/io.h
> +++ b/include/asm-generic/io.h
> @@ -964,6 +964,32 @@ static inline void iounmap(volatile void __iomem *addr)
>  #elif defined(CONFIG_GENERIC_IOREMAP)
>  #include <linux/pgtable.h>
>  
> +/*
> + * Arch code can implement the following two hooks when using GENERIC_IOREMAP
> + * ioremap_allowed() return a bool,
> + *   - true means continue to remap
> + *   - false means skip remap and return directly
> + * iounmap_allowed() return a bool,
> + *   - true means continue to vunmap
> + *   - false means skip vunmap and return directly
> + */
> +#ifndef ioremap_allowed
> +#define ioremap_allowed ioremap_allowed
> +static inline bool ioremap_allowed(phys_addr_t phys_addr, size_t size,
> +				   unsigned long prot)
> +{
> +	return true;
> +}
> +#endif
> +
> +#ifndef iounmap_allowed
> +#define iounmap_allowed iounmap_allowed
> +static inline bool iounmap_allowed(void *addr)
> +{
> +	return true;
> +}
> +#endif
> +
>  void __iomem *ioremap_prot(phys_addr_t phys_addr, size_t size,
>  			   unsigned long prot);
>  void iounmap(volatile void __iomem *addr);
> diff --git a/mm/ioremap.c b/mm/ioremap.c
> index e1d008e8f87f..8652426282cc 100644
> --- a/mm/ioremap.c
> +++ b/mm/ioremap.c
> @@ -28,6 +28,9 @@ void __iomem *ioremap_prot(phys_addr_t phys_addr, size_t size,
>  	phys_addr -= offset;
>  	size = PAGE_ALIGN(size + offset);
>  
> +	if (!ioremap_allowed(phys_addr, size, prot))
> +		return NULL;
> +
>  	area = get_vm_area_caller(size, VM_IOREMAP,
>  			__builtin_return_address(0));
>  	if (!area)
> @@ -47,6 +50,12 @@ EXPORT_SYMBOL(ioremap_prot);
>  
>  void iounmap(volatile void __iomem *addr)
>  {
> -	vunmap((void *)((unsigned long)addr & PAGE_MASK));
> +	void *vaddr = (void *)((unsigned long)addr & PAGE_MASK);
> +
> +	if (!iounmap_allowed(vaddr))
> +		return;
> +
> +	if (is_vmalloc_addr(vaddr))
> +		vunmap(vaddr);
>  }
>  EXPORT_SYMBOL(iounmap);
> -- 
> 2.35.3
> 
>
Christoph Hellwig June 8, 2022, 6:10 a.m. UTC | #2
Looks good:

Reviewed-by: Christoph Hellwig <hch@lst.de>
diff mbox series

Patch

diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h
index b76379628a02..db5b890eaff7 100644
--- a/include/asm-generic/io.h
+++ b/include/asm-generic/io.h
@@ -964,6 +964,32 @@  static inline void iounmap(volatile void __iomem *addr)
 #elif defined(CONFIG_GENERIC_IOREMAP)
 #include <linux/pgtable.h>
 
+/*
+ * Arch code can implement the following two hooks when using GENERIC_IOREMAP
+ * ioremap_allowed() return a bool,
+ *   - true means continue to remap
+ *   - false means skip remap and return directly
+ * iounmap_allowed() return a bool,
+ *   - true means continue to vunmap
+ *   - false means skip vunmap and return directly
+ */
+#ifndef ioremap_allowed
+#define ioremap_allowed ioremap_allowed
+static inline bool ioremap_allowed(phys_addr_t phys_addr, size_t size,
+				   unsigned long prot)
+{
+	return true;
+}
+#endif
+
+#ifndef iounmap_allowed
+#define iounmap_allowed iounmap_allowed
+static inline bool iounmap_allowed(void *addr)
+{
+	return true;
+}
+#endif
+
 void __iomem *ioremap_prot(phys_addr_t phys_addr, size_t size,
 			   unsigned long prot);
 void iounmap(volatile void __iomem *addr);
diff --git a/mm/ioremap.c b/mm/ioremap.c
index e1d008e8f87f..8652426282cc 100644
--- a/mm/ioremap.c
+++ b/mm/ioremap.c
@@ -28,6 +28,9 @@  void __iomem *ioremap_prot(phys_addr_t phys_addr, size_t size,
 	phys_addr -= offset;
 	size = PAGE_ALIGN(size + offset);
 
+	if (!ioremap_allowed(phys_addr, size, prot))
+		return NULL;
+
 	area = get_vm_area_caller(size, VM_IOREMAP,
 			__builtin_return_address(0));
 	if (!area)
@@ -47,6 +50,12 @@  EXPORT_SYMBOL(ioremap_prot);
 
 void iounmap(volatile void __iomem *addr)
 {
-	vunmap((void *)((unsigned long)addr & PAGE_MASK));
+	void *vaddr = (void *)((unsigned long)addr & PAGE_MASK);
+
+	if (!iounmap_allowed(vaddr))
+		return;
+
+	if (is_vmalloc_addr(vaddr))
+		vunmap(vaddr);
 }
 EXPORT_SYMBOL(iounmap);