diff mbox series

[v2,5/9] LoongArch: Simplify the invtlb wrappers

Message ID 20230624184055.3000636-6-kernel@xen0n.name (mailing list archive)
State New, archived
Headers show
Series LoongArch: Preliminary ClangBuiltLinux enablement | expand

Commit Message

WANG Xuerui June 24, 2023, 6:40 p.m. UTC
From: WANG Xuerui <git@xen0n.name>

The invtlb instruction has been supported by upstream LoongArch
toolchains from day one, so ditch the raw opcode trickery and just use
plain inline asm for it.

While at it, also make the invtlb asm statements barriers, for proper
modeling of the side effects.

The signature of the other more specific invtlb wrappers contain unused
arguments right now, but these are not removed right away in order for
the patch to be focused. In the meantime, assertions are added to ensure
no accidental misuse happens before the refactor. (The more specific
wrappers cannot re-use the generic invtlb wrapper, because the ISA
manual says $zero shall be used in case a particular op does not take
the respective argument: re-using the generic wrapper would mean losing
control over the register usage.)

Signed-off-by: WANG Xuerui <git@xen0n.name>
---
 arch/loongarch/include/asm/tlb.h | 39 ++++++++++++++++----------------
 1 file changed, 19 insertions(+), 20 deletions(-)

Comments

Xi Ruoyao June 24, 2023, 6:55 p.m. UTC | #1
On Sun, 2023-06-25 at 02:40 +0800, WANG Xuerui wrote:
> From: WANG Xuerui <git@xen0n.name>
> 
> The invtlb instruction has been supported by upstream LoongArch
> toolchains from day one, so ditch the raw opcode trickery and just use
> plain inline asm for it.
> 
> While at it, also make the invtlb asm statements barriers, for proper
> modeling of the side effects.
> 
> The signature of the other more specific invtlb wrappers contain unused
> arguments right now, but these are not removed right away in order for
> the patch to be focused. In the meantime, assertions are added to ensure
> no accidental misuse happens before the refactor. (The more specific
> wrappers cannot re-use the generic invtlb wrapper, because the ISA
> manual says $zero shall be used in case a particular op does not take
> the respective argument: re-using the generic wrapper would mean losing
> control over the register usage.)
> 
> Signed-off-by: WANG Xuerui <git@xen0n.name>
> ---
>  arch/loongarch/include/asm/tlb.h | 39 ++++++++++++++++----------------
>  1 file changed, 19 insertions(+), 20 deletions(-)
> 
> diff --git a/arch/loongarch/include/asm/tlb.h b/arch/loongarch/include/asm/tlb.h
> index 0dc9ee2b05d2..15750900540c 100644
> --- a/arch/loongarch/include/asm/tlb.h
> +++ b/arch/loongarch/include/asm/tlb.h
> @@ -88,52 +88,51 @@ enum invtlb_ops {
>         INVTLB_GID_ADDR = 0x16,
>  };
>  
> -/*
> - * invtlb op info addr
> - * (0x1 << 26) | (0x24 << 20) | (0x13 << 15) |
> - * (addr << 10) | (info << 5) | op
> - */
>  static inline void invtlb(u32 op, u32 info, u64 addr)

Oh, technically these wrappers should be __always_inline, not only
inline because they don't work at all if not inlined.  Should we change
them to __always_inline in this patch by the way?

>  {
> +       BUILD_BUG_ON(!__builtin_constant_p(op));

Hmm, I guess it's redundant.  If op is not a compile-time constant, it
won't satisfy the "i" constraint then the compiler will complain anyway.

>         __asm__ __volatile__(
> -               "parse_r addr,%0\n\t"
> -               "parse_r info,%1\n\t"
> -               ".word ((0x6498000) | (addr << 10) | (info << 5) | %2)\n\t"
> -               :
> -               : "r"(addr), "r"(info), "i"(op)
> +               "invtlb %0, %1, %2\n\t"
>                 :
> +               : "i"(op), "r"(info), "r"(addr)
> +               : "memory"
>                 );
>  }

Likewise for other wrappers.

/* snip */
WANG Xuerui June 24, 2023, 7:03 p.m. UTC | #2
Good evening! (I really have to get some sleep real soon.)

On 6/25/23 02:55, Xi Ruoyao wrote:
> On Sun, 2023-06-25 at 02:40 +0800, WANG Xuerui wrote:
>> From: WANG Xuerui <git@xen0n.name>
>>
>> The invtlb instruction has been supported by upstream LoongArch
>> toolchains from day one, so ditch the raw opcode trickery and just use
>> plain inline asm for it.
>>
>> While at it, also make the invtlb asm statements barriers, for proper
>> modeling of the side effects.
>>
>> The signature of the other more specific invtlb wrappers contain unused
>> arguments right now, but these are not removed right away in order for
>> the patch to be focused. In the meantime, assertions are added to ensure
>> no accidental misuse happens before the refactor. (The more specific
>> wrappers cannot re-use the generic invtlb wrapper, because the ISA
>> manual says $zero shall be used in case a particular op does not take
>> the respective argument: re-using the generic wrapper would mean losing
>> control over the register usage.)
>>
>> Signed-off-by: WANG Xuerui <git@xen0n.name>
>> ---
>>   arch/loongarch/include/asm/tlb.h | 39 ++++++++++++++++----------------
>>   1 file changed, 19 insertions(+), 20 deletions(-)
>>
>> diff --git a/arch/loongarch/include/asm/tlb.h b/arch/loongarch/include/asm/tlb.h
>> index 0dc9ee2b05d2..15750900540c 100644
>> --- a/arch/loongarch/include/asm/tlb.h
>> +++ b/arch/loongarch/include/asm/tlb.h
>> @@ -88,52 +88,51 @@ enum invtlb_ops {
>>          INVTLB_GID_ADDR = 0x16,
>>   };
>>   
>> -/*
>> - * invtlb op info addr
>> - * (0x1 << 26) | (0x24 << 20) | (0x13 << 15) |
>> - * (addr << 10) | (info << 5) | op
>> - */
>>   static inline void invtlb(u32 op, u32 info, u64 addr)
> Oh, technically these wrappers should be __always_inline, not only
> inline because they don't work at all if not inlined.  Should we change
> them to __always_inline in this patch by the way?
Makes sense... let me send a quick v3 tomorrow (and maybe after Huacai 
has taken a look).
>
>>   {
>> +       BUILD_BUG_ON(!__builtin_constant_p(op));
> Hmm, I guess it's redundant.  If op is not a compile-time constant, it
> won't satisfy the "i" constraint then the compiler will complain anyway.
You're right (in v1 this wasn't done, yet it compiled just fine with GCC 
and Clang). I'll remove the op assertions in v3. Thanks for the reviews.
>
>>          __asm__ __volatile__(
>> -               "parse_r addr,%0\n\t"
>> -               "parse_r info,%1\n\t"
>> -               ".word ((0x6498000) | (addr << 10) | (info << 5) | %2)\n\t"
>> -               :
>> -               : "r"(addr), "r"(info), "i"(op)
>> +               "invtlb %0, %1, %2\n\t"
>>                  :
>> +               : "i"(op), "r"(info), "r"(addr)
>> +               : "memory"
>>                  );
>>   }
> Likewise for other wrappers.
>
> /* snip */
>
Bibo Mao June 25, 2023, 3:17 a.m. UTC | #3
在 2023/6/25 02:40, WANG Xuerui 写道:
> From: WANG Xuerui <git@xen0n.name>
> 
> The invtlb instruction has been supported by upstream LoongArch
> toolchains from day one, so ditch the raw opcode trickery and just use
> plain inline asm for it.
> 
> While at it, also make the invtlb asm statements barriers, for proper
> modeling of the side effects.
> 
> The signature of the other more specific invtlb wrappers contain unused
> arguments right now, but these are not removed right away in order for
> the patch to be focused. In the meantime, assertions are added to ensure
> no accidental misuse happens before the refactor. (The more specific
> wrappers cannot re-use the generic invtlb wrapper, because the ISA
> manual says $zero shall be used in case a particular op does not take
> the respective argument: re-using the generic wrapper would mean losing
> control over the register usage.)
> 
> Signed-off-by: WANG Xuerui <git@xen0n.name>
> ---
>  arch/loongarch/include/asm/tlb.h | 39 ++++++++++++++++----------------
>  1 file changed, 19 insertions(+), 20 deletions(-)
> 
> diff --git a/arch/loongarch/include/asm/tlb.h b/arch/loongarch/include/asm/tlb.h
> index 0dc9ee2b05d2..15750900540c 100644
> --- a/arch/loongarch/include/asm/tlb.h
> +++ b/arch/loongarch/include/asm/tlb.h
> @@ -88,52 +88,51 @@ enum invtlb_ops {
>  	INVTLB_GID_ADDR = 0x16,
>  };
>  
> -/*
> - * invtlb op info addr
> - * (0x1 << 26) | (0x24 << 20) | (0x13 << 15) |
> - * (addr << 10) | (info << 5) | op
> - */
>  static inline void invtlb(u32 op, u32 info, u64 addr)
>  {
inline function is not assured, it may be general function so op is
not constant in this situation. Had better define it as macro or change
inline as __always_inline.

Regards
Bibo Mao

> +	BUILD_BUG_ON(!__builtin_constant_p(op));
>  	__asm__ __volatile__(
> -		"parse_r addr,%0\n\t"
> -		"parse_r info,%1\n\t"
> -		".word ((0x6498000) | (addr << 10) | (info << 5) | %2)\n\t"
> -		:
> -		: "r"(addr), "r"(info), "i"(op)
> +		"invtlb %0, %1, %2\n\t"
>  		:
> +		: "i"(op), "r"(info), "r"(addr)
> +		: "memory"
>  		);
>  }
>  
>  static inline void invtlb_addr(u32 op, u32 info, u64 addr)
>  {
> +	BUILD_BUG_ON(!__builtin_constant_p(op));
> +	BUILD_BUG_ON(!__builtin_constant_p(info) || info != 0);
>  	__asm__ __volatile__(
> -		"parse_r addr,%0\n\t"
> -		".word ((0x6498000) | (addr << 10) | (0 << 5) | %1)\n\t"
> -		:
> -		: "r"(addr), "i"(op)
> +		"invtlb %0, $zero, %1\n\t"
>  		:
> +		: "i"(op), "r"(addr)
> +		: "memory"
>  		);
>  }
>  
>  static inline void invtlb_info(u32 op, u32 info, u64 addr)
>  {
> +	BUILD_BUG_ON(!__builtin_constant_p(op));
> +	BUILD_BUG_ON(!__builtin_constant_p(addr) || addr != 0);
>  	__asm__ __volatile__(
> -		"parse_r info,%0\n\t"
> -		".word ((0x6498000) | (0 << 10) | (info << 5) | %1)\n\t"
> -		:
> -		: "r"(info), "i"(op)
> +		"invtlb %0, %1, $zero\n\t"
>  		:
> +		: "i"(op), "r"(info)
> +		: "memory"
>  		);
>  }
>  
>  static inline void invtlb_all(u32 op, u32 info, u64 addr)
>  {
> +	BUILD_BUG_ON(!__builtin_constant_p(op));
> +	BUILD_BUG_ON(!__builtin_constant_p(info) || info != 0);
> +	BUILD_BUG_ON(!__builtin_constant_p(addr) || addr != 0);
>  	__asm__ __volatile__(
> -		".word ((0x6498000) | (0 << 10) | (0 << 5) | %0)\n\t"
> +		"invtlb %0, $zero, $zero\n\t"
>  		:
>  		: "i"(op)
> -		:
> +		: "memory"
>  		);
>  }
>
diff mbox series

Patch

diff --git a/arch/loongarch/include/asm/tlb.h b/arch/loongarch/include/asm/tlb.h
index 0dc9ee2b05d2..15750900540c 100644
--- a/arch/loongarch/include/asm/tlb.h
+++ b/arch/loongarch/include/asm/tlb.h
@@ -88,52 +88,51 @@  enum invtlb_ops {
 	INVTLB_GID_ADDR = 0x16,
 };
 
-/*
- * invtlb op info addr
- * (0x1 << 26) | (0x24 << 20) | (0x13 << 15) |
- * (addr << 10) | (info << 5) | op
- */
 static inline void invtlb(u32 op, u32 info, u64 addr)
 {
+	BUILD_BUG_ON(!__builtin_constant_p(op));
 	__asm__ __volatile__(
-		"parse_r addr,%0\n\t"
-		"parse_r info,%1\n\t"
-		".word ((0x6498000) | (addr << 10) | (info << 5) | %2)\n\t"
-		:
-		: "r"(addr), "r"(info), "i"(op)
+		"invtlb %0, %1, %2\n\t"
 		:
+		: "i"(op), "r"(info), "r"(addr)
+		: "memory"
 		);
 }
 
 static inline void invtlb_addr(u32 op, u32 info, u64 addr)
 {
+	BUILD_BUG_ON(!__builtin_constant_p(op));
+	BUILD_BUG_ON(!__builtin_constant_p(info) || info != 0);
 	__asm__ __volatile__(
-		"parse_r addr,%0\n\t"
-		".word ((0x6498000) | (addr << 10) | (0 << 5) | %1)\n\t"
-		:
-		: "r"(addr), "i"(op)
+		"invtlb %0, $zero, %1\n\t"
 		:
+		: "i"(op), "r"(addr)
+		: "memory"
 		);
 }
 
 static inline void invtlb_info(u32 op, u32 info, u64 addr)
 {
+	BUILD_BUG_ON(!__builtin_constant_p(op));
+	BUILD_BUG_ON(!__builtin_constant_p(addr) || addr != 0);
 	__asm__ __volatile__(
-		"parse_r info,%0\n\t"
-		".word ((0x6498000) | (0 << 10) | (info << 5) | %1)\n\t"
-		:
-		: "r"(info), "i"(op)
+		"invtlb %0, %1, $zero\n\t"
 		:
+		: "i"(op), "r"(info)
+		: "memory"
 		);
 }
 
 static inline void invtlb_all(u32 op, u32 info, u64 addr)
 {
+	BUILD_BUG_ON(!__builtin_constant_p(op));
+	BUILD_BUG_ON(!__builtin_constant_p(info) || info != 0);
+	BUILD_BUG_ON(!__builtin_constant_p(addr) || addr != 0);
 	__asm__ __volatile__(
-		".word ((0x6498000) | (0 << 10) | (0 << 5) | %0)\n\t"
+		"invtlb %0, $zero, $zero\n\t"
 		:
 		: "i"(op)
-		:
+		: "memory"
 		);
 }