diff mbox series

[RFC,2/6] arm64: Support ARCH_HAS_CPU_CACHE_INVALIDATE_MEMREGION

Message ID 20250320174118.39173-3-Jonathan.Cameron@huawei.com
State New
Headers show
Series Cache coherency management subsystem | expand

Commit Message

Jonathan Cameron March 20, 2025, 5:41 p.m. UTC
From: Yicong Yang <yangyicong@hisilicon.com>

ARCH_HAS_CPU_CACHE_INVALIDATE_MEMREGION provides the mechanism for
invalidate certain memory regions in a cache-incoherent manner.
Currently is used by NVIDMM and CXL memory. This is mainly done
by the system component and is implementation define per spec.
Provides a method for the platforms register their own invalidate
method and implement ARCH_HAS_CPU_CACHE_INVALIDATE_MEMREGION.

Signed-off-by: Yicong Yang <yangyicong@hisilicon.com>
Signed-off-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
---
 arch/arm64/Kconfig                  |  1 +
 arch/arm64/include/asm/cacheflush.h | 14 ++++++++++
 arch/arm64/mm/flush.c               | 42 +++++++++++++++++++++++++++++
 3 files changed, 57 insertions(+)

Comments

Catalin Marinas March 28, 2025, 6:22 p.m. UTC | #1
On Thu, Mar 20, 2025 at 05:41:14PM +0000, Jonathan Cameron wrote:
> +struct system_cache_flush_method {
> +	int (*invalidate_memregion)(int res_desc,
> +				    phys_addr_t start, size_t len);
> +};
[...]
> +int cpu_cache_invalidate_memregion(int res_desc, phys_addr_t start, size_t len)
> +{
> +	guard(spinlock_irqsave)(&scfm_lock);
> +	if (!scfm_data)
> +		return -EOPNOTSUPP;
> +
> +	return scfm_data->invalidate_memregion(res_desc, start, len);
> +}

WBINVD on x86 deals with the CPU caches as well. Even the API naming in
Linux implies CPU caches. IIUC, devices registering to the above on Arm
SoCs can only deal with system caches. Is it sufficient?
Yicong Yang March 29, 2025, 7:14 a.m. UTC | #2
On 2025/3/29 2:22, Catalin Marinas wrote:
> On Thu, Mar 20, 2025 at 05:41:14PM +0000, Jonathan Cameron wrote:
>> +struct system_cache_flush_method {
>> +	int (*invalidate_memregion)(int res_desc,
>> +				    phys_addr_t start, size_t len);
>> +};
> [...]
>> +int cpu_cache_invalidate_memregion(int res_desc, phys_addr_t start, size_t len)
>> +{
>> +	guard(spinlock_irqsave)(&scfm_lock);
>> +	if (!scfm_data)
>> +		return -EOPNOTSUPP;
>> +
>> +	return scfm_data->invalidate_memregion(res_desc, start, len);
>> +}
> 
> WBINVD on x86 deals with the CPU caches as well. Even the API naming in
> Linux implies CPU caches. IIUC, devices registering to the above on Arm
> SoCs can only deal with system caches. Is it sufficient?
> 

The device driver who register this method should handle this. If the
hardware support maintaining the coherency among the system, for example
on system cache invalidation the hardware is also able to invalidate the
involved cachelines on all the subordinate caches (L1/L2/etc, by back
invalidate snoop or other ways), then software don't need to invalidate
the non-system cache explicitly. Otherwise the driver need to explicitly
invalidate the non-system cache explicitly in their
scfm_data::invalidate_memregion() method. Here in the generally code we
simply don't know the capability of the hardware.

Thanks.
diff mbox series

Patch

diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 940343beb3d4..11ecd20ec3b8 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -21,6 +21,7 @@  config ARM64
 	select ARCH_ENABLE_THP_MIGRATION if TRANSPARENT_HUGEPAGE
 	select ARCH_HAS_CACHE_LINE_SIZE
 	select ARCH_HAS_CC_PLATFORM
+	select ARCH_HAS_CPU_CACHE_INVALIDATE_MEMREGION
 	select ARCH_HAS_CRC32
 	select ARCH_HAS_CRC_T10DIF if KERNEL_MODE_NEON
 	select ARCH_HAS_CURRENT_STACK_POINTER
diff --git a/arch/arm64/include/asm/cacheflush.h b/arch/arm64/include/asm/cacheflush.h
index 28ab96e808ef..b8eb8738c965 100644
--- a/arch/arm64/include/asm/cacheflush.h
+++ b/arch/arm64/include/asm/cacheflush.h
@@ -139,6 +139,20 @@  static __always_inline void icache_inval_all_pou(void)
 	dsb(ish);
 }
 
+#ifdef CONFIG_ARCH_HAS_CPU_CACHE_INVALIDATE_MEMREGION
+
+#include <linux/memregion.h>
+
+struct system_cache_flush_method {
+	int (*invalidate_memregion)(int res_desc,
+				    phys_addr_t start, size_t len);
+};
+
+void arm64_set_sys_cache_flush_method(const struct system_cache_flush_method *method);
+void arm64_clr_sys_cache_flush_method(const struct system_cache_flush_method *method);
+
+#endif /* CONFIG_ARCH_HAS_CPU_CACHE_INVALIDATE_MEMREGION */
+
 #include <asm-generic/cacheflush.h>
 
 #endif /* __ASM_CACHEFLUSH_H */
diff --git a/arch/arm64/mm/flush.c b/arch/arm64/mm/flush.c
index 013eead9b695..d822406d925d 100644
--- a/arch/arm64/mm/flush.c
+++ b/arch/arm64/mm/flush.c
@@ -10,6 +10,7 @@ 
 #include <linux/mm.h>
 #include <linux/libnvdimm.h>
 #include <linux/pagemap.h>
+#include <linux/memregion.h>
 
 #include <asm/cacheflush.h>
 #include <asm/cache.h>
@@ -100,3 +101,44 @@  void arch_invalidate_pmem(void *addr, size_t size)
 }
 EXPORT_SYMBOL_GPL(arch_invalidate_pmem);
 #endif
+
+#ifdef CONFIG_ARCH_HAS_CPU_CACHE_INVALIDATE_MEMREGION
+
+static const struct system_cache_flush_method *scfm_data;
+DEFINE_SPINLOCK(scfm_lock);
+
+void arm64_set_sys_cache_flush_method(const struct system_cache_flush_method *method)
+{
+	guard(spinlock_irqsave)(&scfm_lock);
+	if (scfm_data || !method || !method->invalidate_memregion)
+		return;
+
+	scfm_data = method;
+}
+EXPORT_SYMBOL_GPL(arm64_set_sys_cache_flush_method);
+
+void arm64_clr_sys_cache_flush_method(const struct system_cache_flush_method *method)
+{
+	guard(spinlock_irqsave)(&scfm_lock);
+	if (scfm_data && scfm_data == method)
+		scfm_data = NULL;
+}
+
+int cpu_cache_invalidate_memregion(int res_desc, phys_addr_t start, size_t len)
+{
+	guard(spinlock_irqsave)(&scfm_lock);
+	if (!scfm_data)
+		return -EOPNOTSUPP;
+
+	return scfm_data->invalidate_memregion(res_desc, start, len);
+}
+EXPORT_SYMBOL_NS_GPL(cpu_cache_invalidate_memregion, "DEVMEM");
+
+bool cpu_cache_has_invalidate_memregion(void)
+{
+	guard(spinlock_irqsave)(&scfm_lock);
+	return !!scfm_data;
+}
+EXPORT_SYMBOL_NS_GPL(cpu_cache_has_invalidate_memregion, "DEVMEM");
+
+#endif /* CONFIG_ARCH_HAS_CPU_CACHE_INVALIDATE_MEMREGION */