diff mbox series

[5/5] mm: ioremap: rename ioremap_page_range() to ioremap_range()

Message ID 20220606083909.363350-6-bhe@redhat.com (mailing list archive)
State New
Headers show
Series Cleanup patches of vmalloc | expand

Commit Message

Baoquan He June 6, 2022, 8:39 a.m. UTC
Because the current ioremap_page_range() only maps IO address to kernel
virtual address, no struct page pointer passed in or page handling related.
So rename it here.

The renaming is done with below command:
sed -i "s/ioremap_page_range/ioremap_range/g" `git grep -l ioremap_page_range`

Signed-off-by: Baoquan He <bhe@redhat.com>
---
 Documentation/ia64/aliasing.rst  | 2 +-
 arch/alpha/kernel/proto.h        | 2 +-
 arch/arc/mm/ioremap.c            | 2 +-
 arch/arm/mm/ioremap.c            | 6 +++---
 arch/arm64/mm/ioremap.c          | 2 +-
 arch/hexagon/mm/ioremap.c        | 2 +-
 arch/ia64/mm/ioremap.c           | 2 +-
 arch/mips/loongson64/init.c      | 2 +-
 arch/mips/mm/ioremap.c           | 2 +-
 arch/openrisc/mm/ioremap.c       | 2 +-
 arch/parisc/mm/ioremap.c         | 2 +-
 arch/powerpc/kernel/isa-bridge.c | 2 +-
 arch/powerpc/kernel/pci_64.c     | 2 +-
 arch/powerpc/mm/ioremap.c        | 2 +-
 arch/s390/pci/pci.c              | 2 +-
 arch/sh/kernel/cpu/sh4/sq.c      | 2 +-
 arch/sh/mm/ioremap.c             | 2 +-
 arch/x86/mm/ioremap.c            | 2 +-
 arch/xtensa/mm/ioremap.c         | 2 +-
 drivers/pci/pci.c                | 2 +-
 include/linux/io.h               | 4 ++--
 mm/ioremap.c                     | 2 +-
 mm/vmalloc.c                     | 2 +-
 23 files changed, 26 insertions(+), 26 deletions(-)

Comments

Christoph Hellwig June 7, 2022, 5:27 a.m. UTC | #1
On Mon, Jun 06, 2022 at 04:39:09PM +0800, Baoquan He wrote:
> Because the current ioremap_page_range() only maps IO address to kernel
> virtual address, no struct page pointer passed in or page handling related.
> So rename it here.
> 
> The renaming is done with below command:
> sed -i "s/ioremap_page_range/ioremap_range/g" `git grep -l ioremap_page_range`

This creates a lot of churn without much of a benefit.  If you want
to get rid of the name please convert most architectures to the
generioc ioremap code first so that all these callers go away..
Baoquan He June 7, 2022, 7:53 a.m. UTC | #2
On 06/06/22 at 10:27pm, Christoph Hellwig wrote:
> On Mon, Jun 06, 2022 at 04:39:09PM +0800, Baoquan He wrote:
> > Because the current ioremap_page_range() only maps IO address to kernel
> > virtual address, no struct page pointer passed in or page handling related.
> > So rename it here.
> > 
> > The renaming is done with below command:
> > sed -i "s/ioremap_page_range/ioremap_range/g" `git grep -l ioremap_page_range`
> 
> This creates a lot of churn without much of a benefit.  If you want
> to get rid of the name please convert most architectures to the
> generioc ioremap code first so that all these callers go away..

Thanks for checking.

Yeah, I didn't manually adjust those indentation after replacing via
command. While the name of ioremap_page_range() is misleading, relative
to its implementation.

Converting to use generic ioremap code on most of architectures sounds
like a good idea, just like what the arm/arm64 converting patchset is
doing. From a quick look, not all places can take the converting, and
there's one in pci_remap_iospace() under drivers. So what I need to do
is to:

1)take back this pach;
2)convert as many architectures to the generic ioremap code as possible;
3)rename the left places of ioremap_page_range() to ioremap_range().

Please check if this is what you are suggesting.

Thanks
Baoquan
diff mbox series

Patch

diff --git a/Documentation/ia64/aliasing.rst b/Documentation/ia64/aliasing.rst
index a08b36aba015..1736be3e9820 100644
--- a/Documentation/ia64/aliasing.rst
+++ b/Documentation/ia64/aliasing.rst
@@ -165,7 +165,7 @@  ioremap()
 
 	If the granule contains non-WB memory, but we can cover the
 	region safely with kernel page table mappings, we can use
-	ioremap_page_range() as most other architectures do.
+	ioremap_range() as most other architectures do.
 
 	Failing all of the above, we have to fall back to a UC mapping.
 
diff --git a/arch/alpha/kernel/proto.h b/arch/alpha/kernel/proto.h
index 5816a31c1b38..7a1aad5f7777 100644
--- a/arch/alpha/kernel/proto.h
+++ b/arch/alpha/kernel/proto.h
@@ -187,7 +187,7 @@  __alpha_remap_area_pages(unsigned long address, unsigned long phys_addr,
 
 	prot = __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE
 			| _PAGE_KWE | flags);
-	return ioremap_page_range(address, address + size, phys_addr, prot);
+	return ioremap_range(address, address + size, phys_addr, prot);
 }
 
 /* irq.c */
diff --git a/arch/arc/mm/ioremap.c b/arch/arc/mm/ioremap.c
index 0ee75aca6e10..1b9d6ba7e46a 100644
--- a/arch/arc/mm/ioremap.c
+++ b/arch/arc/mm/ioremap.c
@@ -85,7 +85,7 @@  void __iomem *ioremap_prot(phys_addr_t paddr, unsigned long size,
 		return NULL;
 	area->phys_addr = paddr;
 	vaddr = (unsigned long)area->addr;
-	if (ioremap_page_range(vaddr, vaddr + size, paddr, prot)) {
+	if (ioremap_range(vaddr, vaddr + size, paddr, prot)) {
 		vunmap((void __force *)vaddr);
 		return NULL;
 	}
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
index 576c0e6c92fc..6c942409f188 100644
--- a/arch/arm/mm/ioremap.c
+++ b/arch/arm/mm/ioremap.c
@@ -110,7 +110,7 @@  void __init add_static_vm_early(struct static_vm *svm)
 int ioremap_page(unsigned long virt, unsigned long phys,
 		 const struct mem_type *mtype)
 {
-	return ioremap_page_range(virt, virt + PAGE_SIZE, phys,
+	return ioremap_range(virt, virt + PAGE_SIZE, phys,
 				  __pgprot(mtype->prot_pte));
 }
 EXPORT_SYMBOL(ioremap_page);
@@ -312,7 +312,7 @@  static void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
 		err = remap_area_sections(addr, pfn, size, type);
 	} else
 #endif
-		err = ioremap_page_range(addr, addr + size, paddr,
+		err = ioremap_range(addr, addr + size, paddr,
 					 __pgprot(type->prot_pte));
 
 	if (err) {
@@ -473,7 +473,7 @@  int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr)
 	if (res->end > IO_SPACE_LIMIT)
 		return -EINVAL;
 
-	return ioremap_page_range(vaddr, vaddr + resource_size(res), phys_addr,
+	return ioremap_range(vaddr, vaddr + resource_size(res), phys_addr,
 				  __pgprot(get_mem_type(pci_ioremap_mem_type)->prot_pte));
 }
 EXPORT_SYMBOL(pci_remap_iospace);
diff --git a/arch/arm64/mm/ioremap.c b/arch/arm64/mm/ioremap.c
index b21f91cd830d..573621e7b173 100644
--- a/arch/arm64/mm/ioremap.c
+++ b/arch/arm64/mm/ioremap.c
@@ -52,7 +52,7 @@  static void __iomem *__ioremap_caller(phys_addr_t phys_addr, size_t size,
 	addr = (unsigned long)area->addr;
 	area->phys_addr = phys_addr;
 
-	err = ioremap_page_range(addr, addr + size, phys_addr, prot);
+	err = ioremap_range(addr, addr + size, phys_addr, prot);
 	if (err) {
 		vunmap((void *)addr);
 		return NULL;
diff --git a/arch/hexagon/mm/ioremap.c b/arch/hexagon/mm/ioremap.c
index 255c5b1ee1a7..90a6cd75d1e1 100644
--- a/arch/hexagon/mm/ioremap.c
+++ b/arch/hexagon/mm/ioremap.c
@@ -30,7 +30,7 @@  void __iomem *ioremap(unsigned long phys_addr, unsigned long size)
 	area = get_vm_area(size, VM_IOREMAP);
 	addr = (unsigned long)area->addr;
 
-	if (ioremap_page_range(addr, addr+size, phys_addr, prot)) {
+	if (ioremap_range(addr, addr+size, phys_addr, prot)) {
 		vunmap((void *)addr);
 		return NULL;
 	}
diff --git a/arch/ia64/mm/ioremap.c b/arch/ia64/mm/ioremap.c
index 55fd3eb753ff..ef2e52591247 100644
--- a/arch/ia64/mm/ioremap.c
+++ b/arch/ia64/mm/ioremap.c
@@ -86,7 +86,7 @@  ioremap (unsigned long phys_addr, unsigned long size)
 
 		area->phys_addr = phys_addr;
 		addr = (void __iomem *) area->addr;
-		if (ioremap_page_range((unsigned long) addr,
+		if (ioremap_range((unsigned long) addr,
 				(unsigned long) addr + size, phys_addr, prot)) {
 			vunmap((void __force *) addr);
 			return NULL;
diff --git a/arch/mips/loongson64/init.c b/arch/mips/loongson64/init.c
index ee8de1735b7c..53f148522663 100644
--- a/arch/mips/loongson64/init.c
+++ b/arch/mips/loongson64/init.c
@@ -162,7 +162,7 @@  static int __init add_legacy_isa_io(struct fwnode_handle *fwnode, resource_size_
 
 	vaddr = PCI_IOBASE + range->io_start;
 
-	ioremap_page_range(vaddr, vaddr + size, hw_start, pgprot_device(PAGE_KERNEL));
+	ioremap_range(vaddr, vaddr + size, hw_start, pgprot_device(PAGE_KERNEL));
 
 	return 0;
 }
diff --git a/arch/mips/mm/ioremap.c b/arch/mips/mm/ioremap.c
index b6dad2fd5575..b1cfeee648a5 100644
--- a/arch/mips/mm/ioremap.c
+++ b/arch/mips/mm/ioremap.c
@@ -101,7 +101,7 @@  void __iomem *ioremap_prot(phys_addr_t phys_addr, unsigned long size,
 	vaddr = (unsigned long)area->addr;
 
 	flags |= _PAGE_GLOBAL | _PAGE_PRESENT | __READABLE | __WRITEABLE;
-	if (ioremap_page_range(vaddr, vaddr + size, phys_addr,
+	if (ioremap_range(vaddr, vaddr + size, phys_addr,
 			__pgprot(flags))) {
 		free_vm_area(area);
 		return NULL;
diff --git a/arch/openrisc/mm/ioremap.c b/arch/openrisc/mm/ioremap.c
index daae13a76743..f5dc775f5715 100644
--- a/arch/openrisc/mm/ioremap.c
+++ b/arch/openrisc/mm/ioremap.c
@@ -64,7 +64,7 @@  void __iomem *__ref ioremap(phys_addr_t addr, unsigned long size)
 		fixmaps_used += (size >> PAGE_SHIFT);
 	}
 
-	if (ioremap_page_range(v, v + size, p,
+	if (ioremap_range(v, v + size, p,
 			__pgprot(pgprot_val(PAGE_KERNEL) | _PAGE_CI))) {
 		if (likely(mem_init_done))
 			vfree(area->addr);
diff --git a/arch/parisc/mm/ioremap.c b/arch/parisc/mm/ioremap.c
index 345ff0b66499..74c940a70b82 100644
--- a/arch/parisc/mm/ioremap.c
+++ b/arch/parisc/mm/ioremap.c
@@ -80,7 +80,7 @@  void __iomem *ioremap(unsigned long phys_addr, unsigned long size)
 		return NULL;
 
 	addr = (void __iomem *) area->addr;
-	if (ioremap_page_range((unsigned long)addr, (unsigned long)addr + size,
+	if (ioremap_range((unsigned long)addr, (unsigned long)addr + size,
 			       phys_addr, pgprot)) {
 		vunmap(addr);
 		return NULL;
diff --git a/arch/powerpc/kernel/isa-bridge.c b/arch/powerpc/kernel/isa-bridge.c
index dc746611ebc0..0eb13278a96e 100644
--- a/arch/powerpc/kernel/isa-bridge.c
+++ b/arch/powerpc/kernel/isa-bridge.c
@@ -46,7 +46,7 @@  static void remap_isa_base(phys_addr_t pa, unsigned long size)
 	WARN_ON_ONCE(size & ~PAGE_MASK);
 
 	if (slab_is_available()) {
-		if (ioremap_page_range(ISA_IO_BASE, ISA_IO_BASE + size, pa,
+		if (ioremap_range(ISA_IO_BASE, ISA_IO_BASE + size, pa,
 				pgprot_noncached(PAGE_KERNEL)))
 			vunmap_range(ISA_IO_BASE, ISA_IO_BASE + size);
 	} else {
diff --git a/arch/powerpc/kernel/pci_64.c b/arch/powerpc/kernel/pci_64.c
index 19b03ddf5631..7adee5cc58a4 100644
--- a/arch/powerpc/kernel/pci_64.c
+++ b/arch/powerpc/kernel/pci_64.c
@@ -138,7 +138,7 @@  void __iomem *ioremap_phb(phys_addr_t paddr, unsigned long size)
 		return NULL;
 
 	addr = (unsigned long)area->addr;
-	if (ioremap_page_range(addr, addr + size, paddr,
+	if (ioremap_range(addr, addr + size, paddr,
 			pgprot_noncached(PAGE_KERNEL))) {
 		vunmap_range(addr, addr + size);
 		return NULL;
diff --git a/arch/powerpc/mm/ioremap.c b/arch/powerpc/mm/ioremap.c
index 4f12504fb405..0cac4e5c8a8f 100644
--- a/arch/powerpc/mm/ioremap.c
+++ b/arch/powerpc/mm/ioremap.c
@@ -89,7 +89,7 @@  void __iomem *do_ioremap(phys_addr_t pa, phys_addr_t offset, unsigned long size,
 	area->phys_addr = pa;
 	va = (unsigned long)area->addr;
 
-	ret = ioremap_page_range(va, va + size, pa, prot);
+	ret = ioremap_range(va, va + size, pa, prot);
 	if (!ret)
 		return (void __iomem *)area->addr + offset;
 
diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c
index bc980fd313d5..f0568be4ac4b 100644
--- a/arch/s390/pci/pci.c
+++ b/arch/s390/pci/pci.c
@@ -252,7 +252,7 @@  static void __iomem *__ioremap(phys_addr_t addr, size_t size, pgprot_t prot)
 		return NULL;
 
 	vaddr = (unsigned long) area->addr;
-	if (ioremap_page_range(vaddr, vaddr + size, addr, prot)) {
+	if (ioremap_range(vaddr, vaddr + size, addr, prot)) {
 		free_vm_area(area);
 		return NULL;
 	}
diff --git a/arch/sh/kernel/cpu/sh4/sq.c b/arch/sh/kernel/cpu/sh4/sq.c
index a76b94e41e91..a85f0c05e9d6 100644
--- a/arch/sh/kernel/cpu/sh4/sq.c
+++ b/arch/sh/kernel/cpu/sh4/sq.c
@@ -110,7 +110,7 @@  static int __sq_remap(struct sq_mapping *map, pgprot_t prot)
 
 	vma->phys_addr = map->addr;
 
-	if (ioremap_page_range((unsigned long)vma->addr,
+	if (ioremap_range((unsigned long)vma->addr,
 			       (unsigned long)vma->addr + map->size,
 			       vma->phys_addr, prot)) {
 		vunmap(vma->addr);
diff --git a/arch/sh/mm/ioremap.c b/arch/sh/mm/ioremap.c
index 21342581144d..60a582e5b4f3 100644
--- a/arch/sh/mm/ioremap.c
+++ b/arch/sh/mm/ioremap.c
@@ -132,7 +132,7 @@  __ioremap_caller(phys_addr_t phys_addr, unsigned long size,
 	area->phys_addr = phys_addr;
 	orig_addr = addr = (unsigned long)area->addr;
 
-	if (ioremap_page_range(addr, addr + size, phys_addr, pgprot)) {
+	if (ioremap_range(addr, addr + size, phys_addr, pgprot)) {
 		vunmap((void *)orig_addr);
 		return NULL;
 	}
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
index 1ad0228f8ceb..0c245f4eaa60 100644
--- a/arch/x86/mm/ioremap.c
+++ b/arch/x86/mm/ioremap.c
@@ -286,7 +286,7 @@  __ioremap_caller(resource_size_t phys_addr, unsigned long size,
 	if (memtype_kernel_map_sync(phys_addr, size, pcm))
 		goto err_free_area;
 
-	if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot))
+	if (ioremap_range(vaddr, vaddr + size, phys_addr, prot))
 		goto err_free_area;
 
 	ret_addr = (void __iomem *) (vaddr + offset);
diff --git a/arch/xtensa/mm/ioremap.c b/arch/xtensa/mm/ioremap.c
index a400188c16b9..8e914a16c04f 100644
--- a/arch/xtensa/mm/ioremap.c
+++ b/arch/xtensa/mm/ioremap.c
@@ -33,7 +33,7 @@  static void __iomem *xtensa_ioremap(unsigned long paddr, unsigned long size,
 	vaddr = (unsigned long)area->addr;
 	area->phys_addr = paddr;
 
-	err = ioremap_page_range(vaddr, vaddr + size, paddr, prot);
+	err = ioremap_range(vaddr, vaddr + size, paddr, prot);
 
 	if (err) {
 		vunmap((void *)vaddr);
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index cfaf40a540a8..d77219be295d 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -4234,7 +4234,7 @@  int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr)
 	if (res->end > IO_SPACE_LIMIT)
 		return -EINVAL;
 
-	return ioremap_page_range(vaddr, vaddr + resource_size(res), phys_addr,
+	return ioremap_range(vaddr, vaddr + resource_size(res), phys_addr,
 				  pgprot_device(PAGE_KERNEL));
 #else
 	/*
diff --git a/include/linux/io.h b/include/linux/io.h
index 5fc800390fe4..80974fa37d53 100644
--- a/include/linux/io.h
+++ b/include/linux/io.h
@@ -21,10 +21,10 @@  void __ioread32_copy(void *to, const void __iomem *from, size_t count);
 void __iowrite64_copy(void __iomem *to, const void *from, size_t count);
 
 #ifdef CONFIG_MMU
-int ioremap_page_range(unsigned long addr, unsigned long end,
+int ioremap_range(unsigned long addr, unsigned long end,
 		       phys_addr_t phys_addr, pgprot_t prot);
 #else
-static inline int ioremap_page_range(unsigned long addr, unsigned long end,
+static inline int ioremap_range(unsigned long addr, unsigned long end,
 				     phys_addr_t phys_addr, pgprot_t prot)
 {
 	return 0;
diff --git a/mm/ioremap.c b/mm/ioremap.c
index 5fe598ecd9b7..d08b30db332b 100644
--- a/mm/ioremap.c
+++ b/mm/ioremap.c
@@ -33,7 +33,7 @@  void __iomem *ioremap_prot(phys_addr_t addr, size_t size, unsigned long prot)
 		return NULL;
 	vaddr = (unsigned long)area->addr;
 
-	if (ioremap_page_range(vaddr, vaddr + size, addr, __pgprot(prot))) {
+	if (ioremap_range(vaddr, vaddr + size, addr, __pgprot(prot))) {
 		free_vm_area(area);
 		return NULL;
 	}
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 860ed9986775..32a18ae16bf5 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -312,7 +312,7 @@  static int vmap_range_noflush(unsigned long addr, unsigned long end,
 	return err;
 }
 
-int ioremap_page_range(unsigned long addr, unsigned long end,
+int ioremap_range(unsigned long addr, unsigned long end,
 		phys_addr_t phys_addr, pgprot_t prot)
 {
 	int err;