@@ -15,7 +15,7 @@
unsigned long __xchg_u32(volatile u32 *m, u32 new);
void __xchg_called_with_bad_pointer(void);
-static inline unsigned long __xchg(unsigned long x, __volatile__ void * ptr, int size)
+static inline unsigned long __arch_xchg(unsigned long x, __volatile__ void * ptr, int size)
{
switch (size) {
case 4:
@@ -25,7 +25,7 @@ static inline unsigned long __xchg(unsigned long x, __volatile__ void * ptr, int
return x;
}
-#define arch_xchg(ptr,x) ({(__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)));})
+#define arch_xchg(ptr,x) ({(__typeof__(*(ptr)))__arch_xchg((unsigned long)(x),(ptr),sizeof(*(ptr)));})
/* Emulate cmpxchg() the same way we emulate atomics,
* by hashing the object address and indexing into an array
@@ -55,7 +55,7 @@ static inline unsigned long xchg64(__volatile__ unsigned long *m, unsigned long
#define arch_xchg(ptr,x) \
({ __typeof__(*(ptr)) __ret; \
__ret = (__typeof__(*(ptr))) \
- __xchg((unsigned long)(x), (ptr), sizeof(*(ptr))); \
+ __arch_xchg((unsigned long)(x), (ptr), sizeof(*(ptr))); \
__ret; \
})
@@ -87,7 +87,7 @@ xchg16(__volatile__ unsigned short *m, unsigned short val)
return (load32 & mask) >> bit_shift;
}
-static inline unsigned long __xchg(unsigned long x, __volatile__ void * ptr,
+static inline unsigned long __arch_xchg(unsigned long x, __volatile__ void * ptr,
int size)
{
switch (size) {
__xchg will be used for non-atomic xchg macro. Signed-off-by: Andrzej Hajda <andrzej.hajda@intel.com> --- arch/sparc/include/asm/cmpxchg_32.h | 4 ++-- arch/sparc/include/asm/cmpxchg_64.h | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-)