Message ID | 20250320084428.51151-1-cuiyunhui@bytedance.com (mailing list archive) |
---|---|
State | New |
Headers | show |
Series | riscv: introduce the ioremap_prot() function | expand |
Context | Check | Description |
---|---|---|
bjorn/pre-ci_am | fail | Failed to apply series |
On Thu, Mar 20, 2025, at 09:44, Yunhui Cui wrote: > It's advisable to avoid mapping memory with the non-cache attribute. > This is because issues may arise when the same physical address is > mapped as both cacheable and non-cacheable simultaneously, such as > in the case of hardware prefetching. > > Signed-off-by: Yunhui Cui <cuiyunhui@bytedance.com> Makes sense to me. Ideally we'd have the check in generic_ioremap_prot(), but I believe this would break on (at least) x86 because of legacy callers of ioremap() on memory. > diff --git a/arch/riscv/include/asm/io.h b/arch/riscv/include/asm/io.h > index a0e51840b9db..736c5557bd06 100644 > --- a/arch/riscv/include/asm/io.h > +++ b/arch/riscv/include/asm/io.h > @@ -133,6 +133,8 @@ __io_writes_outs(outs, u64, q, __io_pbr(), __io_paw()) > #define outsq(addr, buffer, count) __outsq(PCI_IOBASE + (addr), buffer, count) > #endif > > +#define ioremap_prot ioremap_prot > + > #include <asm-generic/io.h> > > #ifdef CONFIG_MMU This feels slightly wrong to me, the "#define foo foo" method is normally used to override a declaration or inline function with another one, but this one only overrides the implementation, not the declaration. I see the same is done on arc, arm64, parisc, powerpc, s390, sh and xtensa, so we can keep this one as well, but it would be nice to change all of these to a less surprising approach. Maybe we should just remove these macros from asm/io.h and the trivial wrapper from mm/ioremap.c, and instead change the other architectures that have GENERIC_IOREMAP to use #define ioremap_prot generic_ioremap_prot It seems this would be only csky, hexagon, (some) loongarch and openrisc. Arnd
> +{ > + phys_addr_t addr = PFN_PHYS(pfn); > + > + /* avoid false positives for bogus PFNs, see comment in pfn_valid() */ > + if (PHYS_PFN(addr) != pfn) > + return 0; > + > + return memblock_is_map_memory(addr); > +} > +EXPORT_SYMBOL(pfn_is_map_memory); This should not be exported or even exposed. Please just open code it in the only caller. > +void __iomem *ioremap_prot(phys_addr_t phys_addr, size_t size, > + pgprot_t pgprot) > +{ > + /* Don't allow RAM to be mapped. */ > + if (WARN_ON(pfn_is_map_memory(__phys_to_pfn(phys_addr)))) > + return NULL; Or just use pfn_valid as that's what we usually use for that check. Or am I missing something specific here? It would also be nice to just life this to common code.
diff --git a/arch/riscv/include/asm/io.h b/arch/riscv/include/asm/io.h index a0e51840b9db..736c5557bd06 100644 --- a/arch/riscv/include/asm/io.h +++ b/arch/riscv/include/asm/io.h @@ -133,6 +133,8 @@ __io_writes_outs(outs, u64, q, __io_pbr(), __io_paw()) #define outsq(addr, buffer, count) __outsq(PCI_IOBASE + (addr), buffer, count) #endif +#define ioremap_prot ioremap_prot + #include <asm-generic/io.h> #ifdef CONFIG_MMU diff --git a/arch/riscv/include/asm/page.h b/arch/riscv/include/asm/page.h index 125f5ecd9565..181d2d3a0922 100644 --- a/arch/riscv/include/asm/page.h +++ b/arch/riscv/include/asm/page.h @@ -202,6 +202,8 @@ static __always_inline void *pfn_to_kaddr(unsigned long pfn) return __va(pfn << PAGE_SHIFT); } +int pfn_is_map_memory(unsigned long pfn); + #endif /* __ASSEMBLY__ */ #define virt_addr_valid(vaddr) ({ \ diff --git a/arch/riscv/mm/Makefile b/arch/riscv/mm/Makefile index b916a68d324a..58a75f7d66e9 100644 --- a/arch/riscv/mm/Makefile +++ b/arch/riscv/mm/Makefile @@ -17,6 +17,7 @@ obj-$(CONFIG_MMU) += extable.o fault.o pageattr.o pgtable.o tlbflush.o obj-y += cacheflush.o obj-y += context.o obj-y += pmem.o +obj-y += ioremap.o obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o obj-$(CONFIG_PTDUMP) += ptdump.o diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c index 3ec9bfaa088a..74ad96e973a4 100644 --- a/arch/riscv/mm/init.c +++ b/arch/riscv/mm/init.c @@ -92,6 +92,18 @@ static void __init zone_sizes_init(void) free_area_init(max_zone_pfns); } +int pfn_is_map_memory(unsigned long pfn) +{ + phys_addr_t addr = PFN_PHYS(pfn); + + /* avoid false positives for bogus PFNs, see comment in pfn_valid() */ + if (PHYS_PFN(addr) != pfn) + return 0; + + return memblock_is_map_memory(addr); +} +EXPORT_SYMBOL(pfn_is_map_memory); + #if defined(CONFIG_MMU) && defined(CONFIG_DEBUG_VM) #define LOG2_SZ_1K ilog2(SZ_1K) diff --git a/arch/riscv/mm/ioremap.c b/arch/riscv/mm/ioremap.c new file mode 100644 index 000000000000..037f87dfe88d --- /dev/null +++ b/arch/riscv/mm/ioremap.c @@ -0,0 +1,15 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#include <linux/mm.h> +#include <linux/io.h> + +void __iomem *ioremap_prot(phys_addr_t phys_addr, size_t size, + pgprot_t pgprot) +{ + /* Don't allow RAM to be mapped. */ + if (WARN_ON(pfn_is_map_memory(__phys_to_pfn(phys_addr)))) + return NULL; + + return generic_ioremap_prot(phys_addr, size, pgprot); +} +EXPORT_SYMBOL(ioremap_prot);
It's advisable to avoid mapping memory with the non-cache attribute. This is because issues may arise when the same physical address is mapped as both cacheable and non-cacheable simultaneously, such as in the case of hardware prefetching. Signed-off-by: Yunhui Cui <cuiyunhui@bytedance.com> --- arch/riscv/include/asm/io.h | 2 ++ arch/riscv/include/asm/page.h | 2 ++ arch/riscv/mm/Makefile | 1 + arch/riscv/mm/init.c | 12 ++++++++++++ arch/riscv/mm/ioremap.c | 15 +++++++++++++++ 5 files changed, 32 insertions(+) create mode 100644 arch/riscv/mm/ioremap.c