@@ -4,5 +4,6 @@
void cacheinfo_amd_init_llc_id(struct cpuinfo_x86 *c, int cpu);
void cacheinfo_hygon_init_llc_id(struct cpuinfo_x86 *c, int cpu);
+int cacheinfo_lookup_max_size(int cpu);
#endif /* _ASM_X86_CACHEINFO_H */
@@ -1033,3 +1033,16 @@ int populate_cache_leaves(unsigned int cpu)
return 0;
}
+
+int cacheinfo_lookup_max_size(int cpu)
+{
+ struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
+ struct cacheinfo *this_leaf = this_cpu_ci->info_list;
+ struct cacheinfo *max_leaf;
+
+ /*
+ * Assume that cache sizes always increase with level.
+ */
+ max_leaf = this_leaf + this_cpu_ci->num_leaves - 1;
+ return max_leaf->size;
+}
@@ -49,6 +49,7 @@
#include <asm/thermal.h>
#include <asm/unwind.h>
#include <asm/vsyscall.h>
+#include <asm/cacheinfo.h>
#include <linux/vmalloc.h>
/*
@@ -1250,3 +1251,8 @@ static int __init register_kernel_offset_dumper(void)
return 0;
}
__initcall(register_kernel_offset_dumper);
+
+unsigned long __init arch_clear_page_uncached_threshold(void)
+{
+ return cacheinfo_lookup_max_size(0);
+}
Add arch_clear_page_uncached_threshold() for a machine specific value above which clear_page_uncached() would be used. The ideal threshold value depends on the CPU model and where the performance curves for cached and uncached stores intersect. A safe value is LLC-size, so we use that of the boot_cpu. Signed-off-by: Ankur Arora <ankur.a.arora@oracle.com> --- arch/x86/include/asm/cacheinfo.h | 1 + arch/x86/kernel/cpu/cacheinfo.c | 13 +++++++++++++ arch/x86/kernel/setup.c | 6 ++++++ 3 files changed, 20 insertions(+)