diff mbox

[1/2] sh: Relax inline assembly constraints

Message ID 1232486078-11227-1-git-send-email-matt@console-pimps.org (mailing list archive)
State Accepted
Delegated to: Paul Mundt
Headers show

Commit Message

Matt Fleming Jan. 20, 2009, 9:14 p.m. UTC
When dereferencing the memory address contained in a register and
modifying the value at that memory address, the register should not be
listed in the inline asm outputs. The value at the memory address is an
output (which is taken care of with the "memory" clobber), not the register.

Signed-off-by: Matt Fleming <matt@console-pimps.org>
---
 arch/sh/include/asm/bitops-llsc.h  |   72 ++++++++++++++++++------------------
 arch/sh/include/asm/cmpxchg-llsc.h |   38 +++++++++---------
 2 files changed, 55 insertions(+), 55 deletions(-)

Comments

Paul Mundt Jan. 29, 2009, 6:39 a.m. UTC | #1
On Tue, Jan 20, 2009 at 09:14:37PM +0000, Matt Fleming wrote:
> When dereferencing the memory address contained in a register and
> modifying the value at that memory address, the register should not be
> listed in the inline asm outputs. The value at the memory address is an
> output (which is taken care of with the "memory" clobber), not the register.
> 
> Signed-off-by: Matt Fleming <matt@console-pimps.org>

On Tue, Jan 20, 2009 at 09:14:38PM +0000, Matt Fleming wrote:
> Now that atomic_t is a generic opaque type for all architectures, it is
> unwise to use intimate knowledge of its internals when manipulating it.
> 
> Instead of relying on the "counter" member being at offset 0 from the
> beginning of an atomic_t, explicitly reference the member. This guards
> us from any changes to the layout of the beginning of the atomic_t type.
> 
> Signed-off-by: Matt Fleming <matt@console-pimps.org>

Applied, thanks.
--
To unsubscribe from this list: send the line "unsubscribe linux-sh" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
diff mbox

Patch

diff --git a/arch/sh/include/asm/bitops-llsc.h b/arch/sh/include/asm/bitops-llsc.h
index 1d2fc0b..d8328be 100644
--- a/arch/sh/include/asm/bitops-llsc.h
+++ b/arch/sh/include/asm/bitops-llsc.h
@@ -1,7 +1,7 @@ 
 #ifndef __ASM_SH_BITOPS_LLSC_H
 #define __ASM_SH_BITOPS_LLSC_H
 
-static inline void set_bit(int nr, volatile void * addr)
+static inline void set_bit(int nr, volatile void *addr)
 {
 	int	mask;
 	volatile unsigned int *a = addr;
@@ -13,16 +13,16 @@  static inline void set_bit(int nr, volatile void * addr)
 	__asm__ __volatile__ (
 		"1:						\n\t"
 		"movli.l	@%1, %0	! set_bit		\n\t"
-		"or		%3, %0				\n\t"
+		"or		%2, %0				\n\t"
 		"movco.l	%0, @%1				\n\t"
 		"bf		1b				\n\t"
-		: "=&z" (tmp), "=r" (a)
-		: "1" (a), "r" (mask)
+		: "=&z" (tmp)
+		: "r" (a), "r" (mask)
 		: "t", "memory"
 	);
 }
 
-static inline void clear_bit(int nr, volatile void * addr)
+static inline void clear_bit(int nr, volatile void *addr)
 {
 	int	mask;
 	volatile unsigned int *a = addr;
@@ -34,16 +34,16 @@  static inline void clear_bit(int nr, volatile void * addr)
 	__asm__ __volatile__ (
 		"1:						\n\t"
 		"movli.l	@%1, %0	! clear_bit		\n\t"
-		"and		%3, %0				\n\t"
+		"and		%2, %0				\n\t"
 		"movco.l	%0, @%1				\n\t"
 		"bf		1b				\n\t"
-		: "=&z" (tmp), "=r" (a)
-		: "1" (a), "r" (~mask)
+		: "=&z" (tmp)
+		: "r" (a), "r" (~mask)
 		: "t", "memory"
 	);
 }
 
-static inline void change_bit(int nr, volatile void * addr)
+static inline void change_bit(int nr, volatile void *addr)
 {
 	int	mask;
 	volatile unsigned int *a = addr;
@@ -55,16 +55,16 @@  static inline void change_bit(int nr, volatile void * addr)
 	__asm__ __volatile__ (
 		"1:						\n\t"
 		"movli.l	@%1, %0	! change_bit		\n\t"
-		"xor		%3, %0				\n\t"
+		"xor		%2, %0				\n\t"
 		"movco.l	%0, @%1				\n\t"
 		"bf		1b				\n\t"
-		: "=&z" (tmp), "=r" (a)
-		: "1" (a), "r" (mask)
+		: "=&z" (tmp)
+		: "r" (a), "r" (mask)
 		: "t", "memory"
 	);
 }
 
-static inline int test_and_set_bit(int nr, volatile void * addr)
+static inline int test_and_set_bit(int nr, volatile void *addr)
 {
 	int	mask, retval;
 	volatile unsigned int *a = addr;
@@ -75,21 +75,21 @@  static inline int test_and_set_bit(int nr, volatile void * addr)
 
 	__asm__ __volatile__ (
 		"1:						\n\t"
-		"movli.l	@%1, %0	! test_and_set_bit	\n\t"
-		"mov		%0, %2				\n\t"
-		"or		%4, %0				\n\t"
-		"movco.l	%0, @%1				\n\t"
+		"movli.l	@%2, %0	! test_and_set_bit	\n\t"
+		"mov		%0, %1				\n\t"
+		"or		%3, %0				\n\t"
+		"movco.l	%0, @%2				\n\t"
 		"bf		1b				\n\t"
-		"and		%4, %2				\n\t"
-		: "=&z" (tmp), "=r" (a), "=&r" (retval)
-		: "1" (a), "r" (mask)
+		"and		%3, %1				\n\t"
+		: "=&z" (tmp), "=&r" (retval)
+		: "r" (a), "r" (mask)
 		: "t", "memory"
 	);
 
 	return retval != 0;
 }
 
-static inline int test_and_clear_bit(int nr, volatile void * addr)
+static inline int test_and_clear_bit(int nr, volatile void *addr)
 {
 	int	mask, retval;
 	volatile unsigned int *a = addr;
@@ -100,22 +100,22 @@  static inline int test_and_clear_bit(int nr, volatile void * addr)
 
 	__asm__ __volatile__ (
 		"1:						\n\t"
-		"movli.l	@%1, %0	! test_and_clear_bit	\n\t"
-		"mov		%0, %2				\n\t"
-		"and		%5, %0				\n\t"
-		"movco.l	%0, @%1				\n\t"
+		"movli.l	@%2, %0	! test_and_clear_bit	\n\t"
+		"mov		%0, %1				\n\t"
+		"and		%4, %0				\n\t"
+		"movco.l	%0, @%2				\n\t"
 		"bf		1b				\n\t"
-		"and		%4, %2				\n\t"
+		"and		%3, %1				\n\t"
 		"synco						\n\t"
-		: "=&z" (tmp), "=r" (a), "=&r" (retval)
-		: "1" (a), "r" (mask), "r" (~mask)
+		: "=&z" (tmp), "=&r" (retval)
+		: "r" (a), "r" (mask), "r" (~mask)
 		: "t", "memory"
 	);
 
 	return retval != 0;
 }
 
-static inline int test_and_change_bit(int nr, volatile void * addr)
+static inline int test_and_change_bit(int nr, volatile void *addr)
 {
 	int	mask, retval;
 	volatile unsigned int *a = addr;
@@ -126,15 +126,15 @@  static inline int test_and_change_bit(int nr, volatile void * addr)
 
 	__asm__ __volatile__ (
 		"1:						\n\t"
-		"movli.l	@%1, %0	! test_and_change_bit	\n\t"
-		"mov		%0, %2				\n\t"
-		"xor		%4, %0				\n\t"
-		"movco.l	%0, @%1				\n\t"
+		"movli.l	@%2, %0	! test_and_change_bit	\n\t"
+		"mov		%0, %1				\n\t"
+		"xor		%3, %0				\n\t"
+		"movco.l	%0, @%2				\n\t"
 		"bf		1b				\n\t"
-		"and		%4, %2				\n\t"
+		"and		%3, %1				\n\t"
 		"synco						\n\t"
-		: "=&z" (tmp), "=r" (a), "=&r" (retval)
-		: "1" (a), "r" (mask)
+		: "=&z" (tmp), "=&r" (retval)
+		: "r" (a), "r" (mask)
 		: "t", "memory"
 	);
 
diff --git a/arch/sh/include/asm/cmpxchg-llsc.h b/arch/sh/include/asm/cmpxchg-llsc.h
index aee3bf2..0fac3da 100644
--- a/arch/sh/include/asm/cmpxchg-llsc.h
+++ b/arch/sh/include/asm/cmpxchg-llsc.h
@@ -8,14 +8,14 @@  static inline unsigned long xchg_u32(volatile u32 *m, unsigned long val)
 
 	__asm__ __volatile__ (
 		"1:					\n\t"
-		"movli.l	@%1, %0	! xchg_u32	\n\t"
-		"mov		%0, %2			\n\t"
-		"mov		%4, %0			\n\t"
-		"movco.l	%0, @%1			\n\t"
+		"movli.l	@%2, %0	! xchg_u32	\n\t"
+		"mov		%0, %1			\n\t"
+		"mov		%3, %0			\n\t"
+		"movco.l	%0, @%2			\n\t"
 		"bf		1b			\n\t"
 		"synco					\n\t"
-		: "=&z"(tmp), "=r" (m), "=&r" (retval)
-		: "1" (m), "r" (val)
+		: "=&z"(tmp), "=&r" (retval)
+		: "r" (m), "r" (val)
 		: "t", "memory"
 	);
 
@@ -29,14 +29,14 @@  static inline unsigned long xchg_u8(volatile u8 *m, unsigned long val)
 
 	__asm__ __volatile__ (
 		"1:					\n\t"
-		"movli.l	@%1, %0	! xchg_u8	\n\t"
-		"mov		%0, %2			\n\t"
-		"mov		%4, %0			\n\t"
-		"movco.l	%0, @%1			\n\t"
+		"movli.l	@%2, %0	! xchg_u8	\n\t"
+		"mov		%0, %1			\n\t"
+		"mov		%3, %0			\n\t"
+		"movco.l	%0, @%2			\n\t"
 		"bf		1b			\n\t"
 		"synco					\n\t"
-		: "=&z"(tmp), "=r" (m), "=&r" (retval)
-		: "1" (m), "r" (val & 0xff)
+		: "=&z"(tmp), "=&r" (retval)
+		: "r" (m), "r" (val & 0xff)
 		: "t", "memory"
 	);
 
@@ -51,17 +51,17 @@  __cmpxchg_u32(volatile int *m, unsigned long old, unsigned long new)
 
 	__asm__ __volatile__ (
 		"1:						\n\t"
-		"movli.l	@%1, %0	! __cmpxchg_u32		\n\t"
-		"mov		%0, %2				\n\t"
-		"cmp/eq		%2, %4				\n\t"
+		"movli.l	@%2, %0	! __cmpxchg_u32		\n\t"
+		"mov		%0, %1				\n\t"
+		"cmp/eq		%1, %3				\n\t"
 		"bf		2f				\n\t"
-		"mov		%5, %0				\n\t"
+		"mov		%3, %0				\n\t"
 		"2:						\n\t"
-		"movco.l	%0, @%1				\n\t"
+		"movco.l	%0, @%2				\n\t"
 		"bf		1b				\n\t"
 		"synco						\n\t"
-		: "=&z" (tmp), "=r" (m), "=&r" (retval)
-		: "1" (m), "r" (old), "r" (new)
+		: "=&z" (tmp), "=&r" (retval)
+		: "r" (m), "r" (old), "r" (new)
 		: "t", "memory"
 	);