@@ -62,7 +62,7 @@ __XCHG_CASE( , , mb_, 64, dmb ish, nop, , a, l, "memory")
#undef __XCHG_CASE
#define __XCHG_GEN(sfx) \
-static __always_inline unsigned long __xchg##sfx(unsigned long x, \
+static __always_inline unsigned long __arch_xchg##sfx(unsigned long x, \
volatile void *ptr, \
int size) \
{ \
@@ -93,7 +93,7 @@ __XCHG_GEN(_mb)
({ \
__typeof__(*(ptr)) __ret; \
__ret = (__typeof__(*(ptr))) \
- __xchg##sfx((unsigned long)(x), (ptr), sizeof(*(ptr))); \
+ __arch_xchg##sfx((unsigned long)(x), (ptr), sizeof(*(ptr))); \
__ret; \
})
__xchg will be used for non-atomic xchg macro. Signed-off-by: Andrzej Hajda <andrzej.hajda@intel.com> --- arch/arm64/include/asm/cmpxchg.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-)