Message ID | 20250122013438.731416-2-kevinloughlin@google.com (mailing list archive) |
---|---|
State | New |
Headers | show |
Series | KVM: SEV: Prefer WBNOINVD over WBINVD for cache maintenance efficiency | expand |
On Wed, Jan 22, 2025 at 01:34:37AM +0000, Kevin Loughlin wrote: > In line with WBINVD usage, add WBONINVD helper functions. For the > wbnoinvd() helper, fall back to WBINVD if X86_FEATURE_WBNOINVD is not > present. > > Signed-off-by: Kevin Loughlin <kevinloughlin@google.com> > --- > arch/x86/include/asm/smp.h | 7 +++++++ > arch/x86/include/asm/special_insns.h | 15 ++++++++++++++- > arch/x86/lib/cache-smp.c | 12 ++++++++++++ > 3 files changed, 33 insertions(+), 1 deletion(-) > > diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h > index ca073f40698f..ecf93a243b83 100644 > --- a/arch/x86/include/asm/smp.h > +++ b/arch/x86/include/asm/smp.h > @@ -112,6 +112,7 @@ void native_play_dead(void); > void play_dead_common(void); > void wbinvd_on_cpu(int cpu); > int wbinvd_on_all_cpus(void); > +int wbnoinvd_on_all_cpus(void); > > void smp_kick_mwait_play_dead(void); > > @@ -160,6 +161,12 @@ static inline int wbinvd_on_all_cpus(void) > return 0; > } > > +static inline int wbnoinvd_on_all_cpus(void) > +{ > + wbnoinvd(); > + return 0; > +} > + > static inline struct cpumask *cpu_llc_shared_mask(int cpu) > { > return (struct cpumask *)cpumask_of(0); > diff --git a/arch/x86/include/asm/special_insns.h b/arch/x86/include/asm/special_insns.h > index 03e7c2d49559..94640c3491d7 100644 > --- a/arch/x86/include/asm/special_insns.h > +++ b/arch/x86/include/asm/special_insns.h > @@ -117,7 +117,20 @@ static inline void wrpkru(u32 pkru) > > static __always_inline void wbinvd(void) > { > - asm volatile("wbinvd": : :"memory"); > + asm volatile("wbinvd" : : : "memory"); > +} > + > +/* > + * Cheaper version of wbinvd(). Call when caches > + * need to be written back but not invalidated. > + */ > +static __always_inline void wbnoinvd(void) > +{ > + /* > + * Use the compatible but more destructive "invalidate" > + * variant when no-invalidate is unavailable. > + */ > + alternative("wbinvd", "wbnoinvd", X86_FEATURE_WBNOINVD); The minimal version of binutils kernel supports is 2.25 which doesn't know about WBNOINVD. I think you need to do something like. alternative("wbinvd", ".byte 0xf3; wbinvd", X86_FEATURE_WBNOINVD); Or propose to bump minimal binutils version.
diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h index ca073f40698f..ecf93a243b83 100644 --- a/arch/x86/include/asm/smp.h +++ b/arch/x86/include/asm/smp.h @@ -112,6 +112,7 @@ void native_play_dead(void); void play_dead_common(void); void wbinvd_on_cpu(int cpu); int wbinvd_on_all_cpus(void); +int wbnoinvd_on_all_cpus(void); void smp_kick_mwait_play_dead(void); @@ -160,6 +161,12 @@ static inline int wbinvd_on_all_cpus(void) return 0; } +static inline int wbnoinvd_on_all_cpus(void) +{ + wbnoinvd(); + return 0; +} + static inline struct cpumask *cpu_llc_shared_mask(int cpu) { return (struct cpumask *)cpumask_of(0); diff --git a/arch/x86/include/asm/special_insns.h b/arch/x86/include/asm/special_insns.h index 03e7c2d49559..94640c3491d7 100644 --- a/arch/x86/include/asm/special_insns.h +++ b/arch/x86/include/asm/special_insns.h @@ -117,7 +117,20 @@ static inline void wrpkru(u32 pkru) static __always_inline void wbinvd(void) { - asm volatile("wbinvd": : :"memory"); + asm volatile("wbinvd" : : : "memory"); +} + +/* + * Cheaper version of wbinvd(). Call when caches + * need to be written back but not invalidated. + */ +static __always_inline void wbnoinvd(void) +{ + /* + * Use the compatible but more destructive "invalidate" + * variant when no-invalidate is unavailable. + */ + alternative("wbinvd", "wbnoinvd", X86_FEATURE_WBNOINVD); } static inline unsigned long __read_cr4(void) diff --git a/arch/x86/lib/cache-smp.c b/arch/x86/lib/cache-smp.c index 7af743bd3b13..7ac5cca53031 100644 --- a/arch/x86/lib/cache-smp.c +++ b/arch/x86/lib/cache-smp.c @@ -20,3 +20,15 @@ int wbinvd_on_all_cpus(void) return 0; } EXPORT_SYMBOL(wbinvd_on_all_cpus); + +static void __wbnoinvd(void *dummy) +{ + wbnoinvd(); +} + +int wbnoinvd_on_all_cpus(void) +{ + on_each_cpu(__wbnoinvd, NULL, 1); + return 0; +} +EXPORT_SYMBOL(wbnoinvd_on_all_cpus);
In line with WBINVD usage, add WBONINVD helper functions. For the wbnoinvd() helper, fall back to WBINVD if X86_FEATURE_WBNOINVD is not present. Signed-off-by: Kevin Loughlin <kevinloughlin@google.com> --- arch/x86/include/asm/smp.h | 7 +++++++ arch/x86/include/asm/special_insns.h | 15 ++++++++++++++- arch/x86/lib/cache-smp.c | 12 ++++++++++++ 3 files changed, 33 insertions(+), 1 deletion(-)