Message ID | 20201215182805.53913-3-ubizjak@gmail.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | x86/KVM/VMX: Introduce and use try_cmpxchg64() | expand |
On Tue, Dec 15, 2020 at 7:28 PM Uros Bizjak <ubizjak@gmail.com> wrote: > > Add arch_try_cmpxchg64(), similar to arch_try_cmpxchg(), that > operates with 64 bit operands. This function provides the same > interface for 32 bit and 64 bit targets. > > Signed-off-by: Uros Bizjak <ubizjak@gmail.com> > Cc: Thomas Gleixner <tglx@linutronix.de> > Cc: Ingo Molnar <mingo@redhat.com> > Cc: Borislav Petkov <bp@alien8.de> > Cc: "H. Peter Anvin" <hpa@zytor.com> > --- > arch/x86/include/asm/cmpxchg_32.h | 62 ++++++++++++++++++++++++++----- > arch/x86/include/asm/cmpxchg_64.h | 6 +++ > 2 files changed, 59 insertions(+), 9 deletions(-) > > diff --git a/arch/x86/include/asm/cmpxchg_32.h b/arch/x86/include/asm/cmpxchg_32.h > index 0a7fe0321613..8dcde400244e 100644 > --- a/arch/x86/include/asm/cmpxchg_32.h > +++ b/arch/x86/include/asm/cmpxchg_32.h > @@ -35,15 +35,6 @@ static inline void set_64bit(volatile u64 *ptr, u64 value) > : "memory"); > } > > -#ifdef CONFIG_X86_CMPXCHG64 Oops, I didn't notice that I had left a reversed #ifdef condition in the patch (to test 32 bit target without X86_CMPXCHG64). Obviously, CONFIG_X86_CMPXCHG64 has to be defined to use CMPXCHG8B, so please use #ifdef here. Uros. > -#define arch_cmpxchg64(ptr, o, n) \ > - ((__typeof__(*(ptr)))__cmpxchg64((ptr), (unsigned long long)(o), \ > - (unsigned long long)(n))) > -#define arch_cmpxchg64_local(ptr, o, n) \ > - ((__typeof__(*(ptr)))__cmpxchg64_local((ptr), (unsigned long long)(o), \ > - (unsigned long long)(n))) > -#endif > - > static inline u64 __cmpxchg64(volatile u64 *ptr, u64 old, u64 new) > { > u64 prev; > @@ -71,6 +62,39 @@ static inline u64 __cmpxchg64_local(volatile u64 *ptr, u64 old, u64 new) > } > > #ifndef CONFIG_X86_CMPXCHG64 > +#define arch_cmpxchg64(ptr, o, n) \ > + ((__typeof__(*(ptr)))__cmpxchg64((ptr), (unsigned long long)(o), \ > + (unsigned long long)(n))) > +#define arch_cmpxchg64_local(ptr, o, n) \ > + ((__typeof__(*(ptr)))__cmpxchg64_local((ptr), (unsigned long long)(o), \ > + > +#define __raw_try_cmpxchg64(_ptr, _pold, _new, lock) \ > +({ \ > + bool success; \ > + __typeof__(_ptr) _old = (__typeof__(_ptr))(_pold); \ > + __typeof__(*(_ptr)) __old = *_old; \ > + __typeof__(*(_ptr)) __new = (_new); \ > + asm volatile(lock "cmpxchg8b %1" \ > + CC_SET(z) \ > + : CC_OUT(z) (success), \ > + "+m" (*_ptr), \ > + "+A" (__old) \ > + : "b" ((unsigned int)__new), \ > + "c" ((unsigned int)(__new>>32)) \ > + : "memory"); \ > + if (unlikely(!success)) \ > + *_old = __old; \ > + likely(success); \ > +}) > + > +#define __try_cmpxchg64(ptr, pold, new) \ > + __raw_try_cmpxchg64((ptr), (pold), (new), LOCK_PREFIX) > + > +#define arch_try_cmpxchg64(ptr, pold, new) \ > + __try_cmpxchg64((ptr), (pold), (new)) > + > +#else > + > /* > * Building a kernel capable running on 80386 and 80486. It may be necessary > * to simulate the cmpxchg8b on the 80386 and 80486 CPU. > @@ -108,6 +132,26 @@ static inline u64 __cmpxchg64_local(volatile u64 *ptr, u64 old, u64 new) > : "memory"); \ > __ret; }) > > +#define arch_try_cmpxchg64(ptr, po, n) \ > +({ \ > + bool success; \ > + __typeof__(ptr) _old = (__typeof__(ptr))(po); \ > + __typeof__(*(ptr)) __old = *_old; \ > + __typeof__(*(ptr)) __new = (n); \ > + alternative_io(LOCK_PREFIX_HERE \ > + "call cmpxchg8b_emu", \ > + "lock; cmpxchg8b (%%esi)" , \ > + X86_FEATURE_CX8, \ > + "+A" (__old), \ > + "S" ((ptr)), \ > + "b" ((unsigned int)__new), \ > + "c" ((unsigned int)(__new>>32)) \ > + : "memory"); \ > + success = (__old == *_old); \ > + if (unlikely(!success)) \ > + *_old = __old; \ > + likely(success); \ > +}) > #endif > > #define system_has_cmpxchg_double() boot_cpu_has(X86_FEATURE_CX8) > diff --git a/arch/x86/include/asm/cmpxchg_64.h b/arch/x86/include/asm/cmpxchg_64.h > index 072e5459fe2f..250187ac8248 100644 > --- a/arch/x86/include/asm/cmpxchg_64.h > +++ b/arch/x86/include/asm/cmpxchg_64.h > @@ -19,6 +19,12 @@ static inline void set_64bit(volatile u64 *ptr, u64 val) > arch_cmpxchg_local((ptr), (o), (n)); \ > }) > > +#define arch_try_cmpxchg64(ptr, po, n) \ > +({ \ > + BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ > + arch_try_cmpxchg((ptr), (po), (n)); \ > +}) > + > #define system_has_cmpxchg_double() boot_cpu_has(X86_FEATURE_CX16) > > #endif /* _ASM_X86_CMPXCHG_64_H */ > -- > 2.26.2 >
Hi Uros, Thank you for the patch! Yet something to improve: [auto build test ERROR on tip/master] [also build test ERROR on next-20201215] [cannot apply to tip/x86/core kvm/linux-next v5.10] [If your patch is applied to the wrong git tree, kindly drop us a note. And when submitting patch, we suggest to use '--base' as documented in https://git-scm.com/docs/git-format-patch] url: https://github.com/0day-ci/linux/commits/Uros-Bizjak/x86-KVM-VMX-Introduce-and-use-try_cmpxchg64/20201216-024049 base: https://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git d1c29f5debd4633eb0e9ea1bc00aaad48b077a9b config: i386-tinyconfig (attached as .config) compiler: gcc-9 (Debian 9.3.0-15) 9.3.0 reproduce (this is a W=1 build): # https://github.com/0day-ci/linux/commit/60a11e7e63e120b5fd41b5346cf5a05ea71c7cb2 git remote add linux-review https://github.com/0day-ci/linux git fetch --no-tags linux-review Uros-Bizjak/x86-KVM-VMX-Introduce-and-use-try_cmpxchg64/20201216-024049 git checkout 60a11e7e63e120b5fd41b5346cf5a05ea71c7cb2 # save the attached .config to linux build tree make W=1 ARCH=i386 If you fix the issue, kindly add following tag as appropriate Reported-by: kernel test robot <lkp@intel.com> All errors (new ones prefixed by >>): ld: arch/x86/events/core.o: in function `x86_perf_event_update': >> core.c:(.text+0x847): undefined reference to `cmpxchg8b_emu' ld: kernel/sched/clock.o: in function `sched_clock_local.constprop.0': >> clock.c:(.text+0x1b4): undefined reference to `cmpxchg8b_emu' ld: kernel/sched/clock.o: in function `sched_clock_cpu': clock.c:(.text+0x293): undefined reference to `cmpxchg8b_emu' >> ld: clock.c:(.text+0x2e3): undefined reference to `cmpxchg8b_emu' ld: kernel/events/core.o: in function `perf_swevent_set_period': core.c:(.text+0x8cbb): undefined reference to `cmpxchg8b_emu' ld: fs/inode.o:inode.c:(.text+0x10ca): more undefined references to `cmpxchg8b_emu' follow --- 0-DAY CI Kernel Test Service, Intel Corporation https://lists.01.org/hyperkitty/list/kbuild-all@lists.01.org
diff --git a/arch/x86/include/asm/cmpxchg_32.h b/arch/x86/include/asm/cmpxchg_32.h index 0a7fe0321613..8dcde400244e 100644 --- a/arch/x86/include/asm/cmpxchg_32.h +++ b/arch/x86/include/asm/cmpxchg_32.h @@ -35,15 +35,6 @@ static inline void set_64bit(volatile u64 *ptr, u64 value) : "memory"); } -#ifdef CONFIG_X86_CMPXCHG64 -#define arch_cmpxchg64(ptr, o, n) \ - ((__typeof__(*(ptr)))__cmpxchg64((ptr), (unsigned long long)(o), \ - (unsigned long long)(n))) -#define arch_cmpxchg64_local(ptr, o, n) \ - ((__typeof__(*(ptr)))__cmpxchg64_local((ptr), (unsigned long long)(o), \ - (unsigned long long)(n))) -#endif - static inline u64 __cmpxchg64(volatile u64 *ptr, u64 old, u64 new) { u64 prev; @@ -71,6 +62,39 @@ static inline u64 __cmpxchg64_local(volatile u64 *ptr, u64 old, u64 new) } #ifndef CONFIG_X86_CMPXCHG64 +#define arch_cmpxchg64(ptr, o, n) \ + ((__typeof__(*(ptr)))__cmpxchg64((ptr), (unsigned long long)(o), \ + (unsigned long long)(n))) +#define arch_cmpxchg64_local(ptr, o, n) \ + ((__typeof__(*(ptr)))__cmpxchg64_local((ptr), (unsigned long long)(o), \ + +#define __raw_try_cmpxchg64(_ptr, _pold, _new, lock) \ +({ \ + bool success; \ + __typeof__(_ptr) _old = (__typeof__(_ptr))(_pold); \ + __typeof__(*(_ptr)) __old = *_old; \ + __typeof__(*(_ptr)) __new = (_new); \ + asm volatile(lock "cmpxchg8b %1" \ + CC_SET(z) \ + : CC_OUT(z) (success), \ + "+m" (*_ptr), \ + "+A" (__old) \ + : "b" ((unsigned int)__new), \ + "c" ((unsigned int)(__new>>32)) \ + : "memory"); \ + if (unlikely(!success)) \ + *_old = __old; \ + likely(success); \ +}) + +#define __try_cmpxchg64(ptr, pold, new) \ + __raw_try_cmpxchg64((ptr), (pold), (new), LOCK_PREFIX) + +#define arch_try_cmpxchg64(ptr, pold, new) \ + __try_cmpxchg64((ptr), (pold), (new)) + +#else + /* * Building a kernel capable running on 80386 and 80486. It may be necessary * to simulate the cmpxchg8b on the 80386 and 80486 CPU. @@ -108,6 +132,26 @@ static inline u64 __cmpxchg64_local(volatile u64 *ptr, u64 old, u64 new) : "memory"); \ __ret; }) +#define arch_try_cmpxchg64(ptr, po, n) \ +({ \ + bool success; \ + __typeof__(ptr) _old = (__typeof__(ptr))(po); \ + __typeof__(*(ptr)) __old = *_old; \ + __typeof__(*(ptr)) __new = (n); \ + alternative_io(LOCK_PREFIX_HERE \ + "call cmpxchg8b_emu", \ + "lock; cmpxchg8b (%%esi)" , \ + X86_FEATURE_CX8, \ + "+A" (__old), \ + "S" ((ptr)), \ + "b" ((unsigned int)__new), \ + "c" ((unsigned int)(__new>>32)) \ + : "memory"); \ + success = (__old == *_old); \ + if (unlikely(!success)) \ + *_old = __old; \ + likely(success); \ +}) #endif #define system_has_cmpxchg_double() boot_cpu_has(X86_FEATURE_CX8) diff --git a/arch/x86/include/asm/cmpxchg_64.h b/arch/x86/include/asm/cmpxchg_64.h index 072e5459fe2f..250187ac8248 100644 --- a/arch/x86/include/asm/cmpxchg_64.h +++ b/arch/x86/include/asm/cmpxchg_64.h @@ -19,6 +19,12 @@ static inline void set_64bit(volatile u64 *ptr, u64 val) arch_cmpxchg_local((ptr), (o), (n)); \ }) +#define arch_try_cmpxchg64(ptr, po, n) \ +({ \ + BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ + arch_try_cmpxchg((ptr), (po), (n)); \ +}) + #define system_has_cmpxchg_double() boot_cpu_has(X86_FEATURE_CX16) #endif /* _ASM_X86_CMPXCHG_64_H */
Add arch_try_cmpxchg64(), similar to arch_try_cmpxchg(), that operates with 64 bit operands. This function provides the same interface for 32 bit and 64 bit targets. Signed-off-by: Uros Bizjak <ubizjak@gmail.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Ingo Molnar <mingo@redhat.com> Cc: Borislav Petkov <bp@alien8.de> Cc: "H. Peter Anvin" <hpa@zytor.com> --- arch/x86/include/asm/cmpxchg_32.h | 62 ++++++++++++++++++++++++++----- arch/x86/include/asm/cmpxchg_64.h | 6 +++ 2 files changed, 59 insertions(+), 9 deletions(-)