Message ID | 20250227014858.3244505-4-seanjc@google.com (mailing list archive) |
---|---|
State | New |
Headers | show |
Series | x86, KVM: Optimize SEV cache flushing | expand |
On Wed, 2025-02-26 at 17:48 -0800, Sean Christopherson wrote: > From: Kevin Loughlin <kevinloughlin@google.com> > > In line with WBINVD usage, add WBONINVD helper functions. Fall back to > WBINVD (via alternative()) if WBNOINVD isn't supported, as WBINVD provides > a superset of functionality, just more slowly. > > Note, alternative() ensures compatibility with early boot code as needed. > > Signed-off-by: Kevin Loughlin <kevinloughlin@google.com> > Reviewed-by: Tom Lendacky <thomas.lendacky@amd.com> > [sean: massage changelog and comments, use ASM_WBNOINVD and _ASM_BYTES] > Signed-off-by: Sean Christopherson <seanjc@google.com> Reviewed-by: Kai Huang <kai.huang@intel.com> [...] > static __always_inline void wbinvd(void) > { > - asm volatile("wbinvd": : :"memory"); > + asm volatile("wbinvd" : : : "memory"); > +} > Nit: this is not related, though.
diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h index ee61e322e2a1..d4c50128aa6c 100644 --- a/arch/x86/include/asm/smp.h +++ b/arch/x86/include/asm/smp.h @@ -112,6 +112,7 @@ void native_play_dead(void); void play_dead_common(void); void wbinvd_on_cpu(int cpu); void wbinvd_on_all_cpus(void); +void wbnoinvd_on_all_cpus(void); void smp_kick_mwait_play_dead(void); @@ -159,6 +160,11 @@ static inline void wbinvd_on_all_cpus(void) wbinvd(); } +static inline void wbnoinvd_on_all_cpus(void) +{ + wbnoinvd(); +} + static inline struct cpumask *cpu_llc_shared_mask(int cpu) { return (struct cpumask *)cpumask_of(0); diff --git a/arch/x86/include/asm/special_insns.h b/arch/x86/include/asm/special_insns.h index 03e7c2d49559..962477a83584 100644 --- a/arch/x86/include/asm/special_insns.h +++ b/arch/x86/include/asm/special_insns.h @@ -117,7 +117,24 @@ static inline void wrpkru(u32 pkru) static __always_inline void wbinvd(void) { - asm volatile("wbinvd": : :"memory"); + asm volatile("wbinvd" : : : "memory"); +} + +/* Instruction encoding provided for binutils backwards compatibility. */ +#define ASM_WBNOINVD _ASM_BYTES(0xf3,0x0f,0x09) + +/* + * Cheaper version of wbinvd(). Call when caches need to be written back but + * not invalidated. + */ +static __always_inline void wbnoinvd(void) +{ + /* + * If WBNOINVD is unavailable, fall back to the compatible but + * more destructive WBINVD (which still writes the caches back + * but also invalidates them). + */ + alternative("wbinvd", ASM_WBNOINVD, X86_FEATURE_WBNOINVD); } static inline unsigned long __read_cr4(void) diff --git a/arch/x86/lib/cache-smp.c b/arch/x86/lib/cache-smp.c index 079c3f3cd32c..1789db5d8825 100644 --- a/arch/x86/lib/cache-smp.c +++ b/arch/x86/lib/cache-smp.c @@ -19,3 +19,14 @@ void wbinvd_on_all_cpus(void) on_each_cpu(__wbinvd, NULL, 1); } EXPORT_SYMBOL(wbinvd_on_all_cpus); + +static void __wbnoinvd(void *dummy) +{ + wbnoinvd(); +} + +void wbnoinvd_on_all_cpus(void) +{ + on_each_cpu(__wbnoinvd, NULL, 1); +} +EXPORT_SYMBOL(wbnoinvd_on_all_cpus);