Message ID | 20250320174118.39173-3-Jonathan.Cameron@huawei.com |
---|---|
State | New |
Headers | show |
Series | Cache coherency management subsystem | expand |
On Thu, Mar 20, 2025 at 05:41:14PM +0000, Jonathan Cameron wrote: > +struct system_cache_flush_method { > + int (*invalidate_memregion)(int res_desc, > + phys_addr_t start, size_t len); > +}; [...] > +int cpu_cache_invalidate_memregion(int res_desc, phys_addr_t start, size_t len) > +{ > + guard(spinlock_irqsave)(&scfm_lock); > + if (!scfm_data) > + return -EOPNOTSUPP; > + > + return scfm_data->invalidate_memregion(res_desc, start, len); > +} WBINVD on x86 deals with the CPU caches as well. Even the API naming in Linux implies CPU caches. IIUC, devices registering to the above on Arm SoCs can only deal with system caches. Is it sufficient?
On 2025/3/29 2:22, Catalin Marinas wrote: > On Thu, Mar 20, 2025 at 05:41:14PM +0000, Jonathan Cameron wrote: >> +struct system_cache_flush_method { >> + int (*invalidate_memregion)(int res_desc, >> + phys_addr_t start, size_t len); >> +}; > [...] >> +int cpu_cache_invalidate_memregion(int res_desc, phys_addr_t start, size_t len) >> +{ >> + guard(spinlock_irqsave)(&scfm_lock); >> + if (!scfm_data) >> + return -EOPNOTSUPP; >> + >> + return scfm_data->invalidate_memregion(res_desc, start, len); >> +} > > WBINVD on x86 deals with the CPU caches as well. Even the API naming in > Linux implies CPU caches. IIUC, devices registering to the above on Arm > SoCs can only deal with system caches. Is it sufficient? > The device driver who register this method should handle this. If the hardware support maintaining the coherency among the system, for example on system cache invalidation the hardware is also able to invalidate the involved cachelines on all the subordinate caches (L1/L2/etc, by back invalidate snoop or other ways), then software don't need to invalidate the non-system cache explicitly. Otherwise the driver need to explicitly invalidate the non-system cache explicitly in their scfm_data::invalidate_memregion() method. Here in the generally code we simply don't know the capability of the hardware. Thanks.
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 940343beb3d4..11ecd20ec3b8 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -21,6 +21,7 @@ config ARM64 select ARCH_ENABLE_THP_MIGRATION if TRANSPARENT_HUGEPAGE select ARCH_HAS_CACHE_LINE_SIZE select ARCH_HAS_CC_PLATFORM + select ARCH_HAS_CPU_CACHE_INVALIDATE_MEMREGION select ARCH_HAS_CRC32 select ARCH_HAS_CRC_T10DIF if KERNEL_MODE_NEON select ARCH_HAS_CURRENT_STACK_POINTER diff --git a/arch/arm64/include/asm/cacheflush.h b/arch/arm64/include/asm/cacheflush.h index 28ab96e808ef..b8eb8738c965 100644 --- a/arch/arm64/include/asm/cacheflush.h +++ b/arch/arm64/include/asm/cacheflush.h @@ -139,6 +139,20 @@ static __always_inline void icache_inval_all_pou(void) dsb(ish); } +#ifdef CONFIG_ARCH_HAS_CPU_CACHE_INVALIDATE_MEMREGION + +#include <linux/memregion.h> + +struct system_cache_flush_method { + int (*invalidate_memregion)(int res_desc, + phys_addr_t start, size_t len); +}; + +void arm64_set_sys_cache_flush_method(const struct system_cache_flush_method *method); +void arm64_clr_sys_cache_flush_method(const struct system_cache_flush_method *method); + +#endif /* CONFIG_ARCH_HAS_CPU_CACHE_INVALIDATE_MEMREGION */ + #include <asm-generic/cacheflush.h> #endif /* __ASM_CACHEFLUSH_H */ diff --git a/arch/arm64/mm/flush.c b/arch/arm64/mm/flush.c index 013eead9b695..d822406d925d 100644 --- a/arch/arm64/mm/flush.c +++ b/arch/arm64/mm/flush.c @@ -10,6 +10,7 @@ #include <linux/mm.h> #include <linux/libnvdimm.h> #include <linux/pagemap.h> +#include <linux/memregion.h> #include <asm/cacheflush.h> #include <asm/cache.h> @@ -100,3 +101,44 @@ void arch_invalidate_pmem(void *addr, size_t size) } EXPORT_SYMBOL_GPL(arch_invalidate_pmem); #endif + +#ifdef CONFIG_ARCH_HAS_CPU_CACHE_INVALIDATE_MEMREGION + +static const struct system_cache_flush_method *scfm_data; +DEFINE_SPINLOCK(scfm_lock); + +void arm64_set_sys_cache_flush_method(const struct system_cache_flush_method *method) +{ + guard(spinlock_irqsave)(&scfm_lock); + if (scfm_data || !method || !method->invalidate_memregion) + return; + + scfm_data = method; +} +EXPORT_SYMBOL_GPL(arm64_set_sys_cache_flush_method); + +void arm64_clr_sys_cache_flush_method(const struct system_cache_flush_method *method) +{ + guard(spinlock_irqsave)(&scfm_lock); + if (scfm_data && scfm_data == method) + scfm_data = NULL; +} + +int cpu_cache_invalidate_memregion(int res_desc, phys_addr_t start, size_t len) +{ + guard(spinlock_irqsave)(&scfm_lock); + if (!scfm_data) + return -EOPNOTSUPP; + + return scfm_data->invalidate_memregion(res_desc, start, len); +} +EXPORT_SYMBOL_NS_GPL(cpu_cache_invalidate_memregion, "DEVMEM"); + +bool cpu_cache_has_invalidate_memregion(void) +{ + guard(spinlock_irqsave)(&scfm_lock); + return !!scfm_data; +} +EXPORT_SYMBOL_NS_GPL(cpu_cache_has_invalidate_memregion, "DEVMEM"); + +#endif /* CONFIG_ARCH_HAS_CPU_CACHE_INVALIDATE_MEMREGION */