@@ -22,7 +22,7 @@ extern unsigned long __xchg64(unsigned long, volatile unsigned long *);
/* optimizer better get rid of switch since size is a constant */
static inline unsigned long
-__xchg(unsigned long x, volatile void *ptr, int size)
+__arch_xchg(unsigned long x, volatile void *ptr, int size)
{
switch (size) {
#ifdef CONFIG_64BIT
@@ -49,7 +49,7 @@ __xchg(unsigned long x, volatile void *ptr, int size)
__typeof__(*(ptr)) __ret; \
__typeof__(*(ptr)) _x_ = (x); \
__ret = (__typeof__(*(ptr))) \
- __xchg((unsigned long)_x_, (ptr), sizeof(*(ptr))); \
+ __arch_xchg((unsigned long)_x_, (ptr), sizeof(*(ptr))); \
__ret; \
})
__xchg will be used for non-atomic xchg macro. Signed-off-by: Andrzej Hajda <andrzej.hajda@intel.com> --- arch/parisc/include/asm/cmpxchg.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-)