diff mbox series

powerpc/bpf: Use bctrl for making function calls

Message ID 20210609090024.1446800-1-naveen.n.rao@linux.vnet.ibm.com (mailing list archive)
State Changes Requested
Delegated to: BPF
Headers show
Series powerpc/bpf: Use bctrl for making function calls | expand

Checks

Context Check Description
netdev/tree_selection success Not a local patch

Commit Message

Naveen N. Rao June 9, 2021, 9 a.m. UTC
blrl corrupts the link stack. Instead use bctrl when making function
calls from BPF programs.

Reported-by: Anton Blanchard <anton@ozlabs.org>
Signed-off-by: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
---
 arch/powerpc/include/asm/ppc-opcode.h |  1 +
 arch/powerpc/net/bpf_jit_comp32.c     |  4 ++--
 arch/powerpc/net/bpf_jit_comp64.c     | 12 ++++++------
 3 files changed, 9 insertions(+), 8 deletions(-)


base-commit: 112f47a1484ddca610b70cbe4a99f0d0f1701daf

Comments

Christophe Leroy June 9, 2021, 9:42 a.m. UTC | #1
Le 09/06/2021 à 11:00, Naveen N. Rao a écrit :
> blrl corrupts the link stack. Instead use bctrl when making function
> calls from BPF programs.

What's the link stack ? Is it the PPC64 branch predictor stack ?

> 
> Reported-by: Anton Blanchard <anton@ozlabs.org>
> Signed-off-by: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
> ---
>   arch/powerpc/include/asm/ppc-opcode.h |  1 +
>   arch/powerpc/net/bpf_jit_comp32.c     |  4 ++--
>   arch/powerpc/net/bpf_jit_comp64.c     | 12 ++++++------
>   3 files changed, 9 insertions(+), 8 deletions(-)
> 
> diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h
> index ac41776661e963..1abacb8417d562 100644
> --- a/arch/powerpc/include/asm/ppc-opcode.h
> +++ b/arch/powerpc/include/asm/ppc-opcode.h
> @@ -451,6 +451,7 @@
>   #define PPC_RAW_MTLR(r)			(0x7c0803a6 | ___PPC_RT(r))
>   #define PPC_RAW_MFLR(t)			(PPC_INST_MFLR | ___PPC_RT(t))
>   #define PPC_RAW_BCTR()			(PPC_INST_BCTR)
> +#define PPC_RAW_BCTRL()			(PPC_INST_BCTRL)

Can you use the numeric value instead of the PPC_INST_BCTRL, to avoid conflict with 
https://patchwork.ozlabs.org/project/linuxppc-dev/patch/4ca2bfdca2f47a293d05f61eb3c4e487ee170f1f.1621506159.git.christophe.leroy@csgroup.eu/

>   #define PPC_RAW_MTCTR(r)		(PPC_INST_MTCTR | ___PPC_RT(r))
>   #define PPC_RAW_ADDI(d, a, i)		(PPC_INST_ADDI | ___PPC_RT(d) | ___PPC_RA(a) | IMM_L(i))
>   #define PPC_RAW_LI(r, i)		PPC_RAW_ADDI(r, 0, i)
> diff --git a/arch/powerpc/net/bpf_jit_comp32.c b/arch/powerpc/net/bpf_jit_comp32.c
> index bbb16099e8c7fa..40ab50bea61c02 100644
> --- a/arch/powerpc/net/bpf_jit_comp32.c
> +++ b/arch/powerpc/net/bpf_jit_comp32.c
> @@ -195,8 +195,8 @@ void bpf_jit_emit_func_call_rel(u32 *image, struct codegen_context *ctx, u64 fun
>   		/* Load function address into r0 */
>   		EMIT(PPC_RAW_LIS(__REG_R0, IMM_H(func)));
>   		EMIT(PPC_RAW_ORI(__REG_R0, __REG_R0, IMM_L(func)));
> -		EMIT(PPC_RAW_MTLR(__REG_R0));
> -		EMIT(PPC_RAW_BLRL());
> +		EMIT(PPC_RAW_MTCTR(__REG_R0));
> +		EMIT(PPC_RAW_BCTRL());
>   	}
>   }
>   
> diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c
> index 57a8c1153851a0..ae9a6532be6ad4 100644
> --- a/arch/powerpc/net/bpf_jit_comp64.c
> +++ b/arch/powerpc/net/bpf_jit_comp64.c
> @@ -153,8 +153,8 @@ static void bpf_jit_emit_func_call_hlp(u32 *image, struct codegen_context *ctx,
>   	PPC_LI64(b2p[TMP_REG_2], func);
>   	/* Load actual entry point from function descriptor */
>   	PPC_BPF_LL(b2p[TMP_REG_1], b2p[TMP_REG_2], 0);
> -	/* ... and move it to LR */
> -	EMIT(PPC_RAW_MTLR(b2p[TMP_REG_1]));
> +	/* ... and move it to CTR */
> +	EMIT(PPC_RAW_MTCTR(b2p[TMP_REG_1]));
>   	/*
>   	 * Load TOC from function descriptor at offset 8.
>   	 * We can clobber r2 since we get called through a
> @@ -165,9 +165,9 @@ static void bpf_jit_emit_func_call_hlp(u32 *image, struct codegen_context *ctx,
>   #else
>   	/* We can clobber r12 */
>   	PPC_FUNC_ADDR(12, func);
> -	EMIT(PPC_RAW_MTLR(12));
> +	EMIT(PPC_RAW_MTCTR(12));
>   #endif
> -	EMIT(PPC_RAW_BLRL());
> +	EMIT(PPC_RAW_BCTRL());
>   }
>   
>   void bpf_jit_emit_func_call_rel(u32 *image, struct codegen_context *ctx, u64 func)
> @@ -202,8 +202,8 @@ void bpf_jit_emit_func_call_rel(u32 *image, struct codegen_context *ctx, u64 fun
>   	PPC_BPF_LL(12, 12, 0);
>   #endif
>   
> -	EMIT(PPC_RAW_MTLR(12));
> -	EMIT(PPC_RAW_BLRL());
> +	EMIT(PPC_RAW_MTCTR(12));
> +	EMIT(PPC_RAW_BCTRL());
>   }
>   
>   static void bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 out)
> 
> base-commit: 112f47a1484ddca610b70cbe4a99f0d0f1701daf
>
Naveen N. Rao June 9, 2021, 1:11 p.m. UTC | #2
Christophe Leroy wrote:
> 
> 
> Le 09/06/2021 à 11:00, Naveen N. Rao a écrit :
>> blrl corrupts the link stack. Instead use bctrl when making function
>> calls from BPF programs.
> 
> What's the link stack ? Is it the PPC64 branch predictor stack ?

c974809a26a13e ("powerpc/vdso: Avoid link stack corruption in 
__get_datapage()") has a good write up on the link stack.

> 
>> 
>> Reported-by: Anton Blanchard <anton@ozlabs.org>
>> Signed-off-by: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
>> ---
>>   arch/powerpc/include/asm/ppc-opcode.h |  1 +
>>   arch/powerpc/net/bpf_jit_comp32.c     |  4 ++--
>>   arch/powerpc/net/bpf_jit_comp64.c     | 12 ++++++------
>>   3 files changed, 9 insertions(+), 8 deletions(-)
>> 
>> diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h
>> index ac41776661e963..1abacb8417d562 100644
>> --- a/arch/powerpc/include/asm/ppc-opcode.h
>> +++ b/arch/powerpc/include/asm/ppc-opcode.h
>> @@ -451,6 +451,7 @@
>>   #define PPC_RAW_MTLR(r)			(0x7c0803a6 | ___PPC_RT(r))
>>   #define PPC_RAW_MFLR(t)			(PPC_INST_MFLR | ___PPC_RT(t))
>>   #define PPC_RAW_BCTR()			(PPC_INST_BCTR)
>> +#define PPC_RAW_BCTRL()			(PPC_INST_BCTRL)
> 
> Can you use the numeric value instead of the PPC_INST_BCTRL, to avoid conflict with 
> https://patchwork.ozlabs.org/project/linuxppc-dev/patch/4ca2bfdca2f47a293d05f61eb3c4e487ee170f1f.1621506159.git.christophe.leroy@csgroup.eu/

Sure. I'll post a v2.

- Naveen
Michael Ellerman June 26, 2021, 10:37 a.m. UTC | #3
On Wed, 9 Jun 2021 14:30:24 +0530, Naveen N. Rao wrote:
> blrl corrupts the link stack. Instead use bctrl when making function
> calls from BPF programs.

Applied to powerpc/next.

[1/1] powerpc/bpf: Use bctrl for making function calls
      https://git.kernel.org/powerpc/c/20ccb004bad659c186f9091015a956da220d615d

cheers
diff mbox series

Patch

diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h
index ac41776661e963..1abacb8417d562 100644
--- a/arch/powerpc/include/asm/ppc-opcode.h
+++ b/arch/powerpc/include/asm/ppc-opcode.h
@@ -451,6 +451,7 @@ 
 #define PPC_RAW_MTLR(r)			(0x7c0803a6 | ___PPC_RT(r))
 #define PPC_RAW_MFLR(t)			(PPC_INST_MFLR | ___PPC_RT(t))
 #define PPC_RAW_BCTR()			(PPC_INST_BCTR)
+#define PPC_RAW_BCTRL()			(PPC_INST_BCTRL)
 #define PPC_RAW_MTCTR(r)		(PPC_INST_MTCTR | ___PPC_RT(r))
 #define PPC_RAW_ADDI(d, a, i)		(PPC_INST_ADDI | ___PPC_RT(d) | ___PPC_RA(a) | IMM_L(i))
 #define PPC_RAW_LI(r, i)		PPC_RAW_ADDI(r, 0, i)
diff --git a/arch/powerpc/net/bpf_jit_comp32.c b/arch/powerpc/net/bpf_jit_comp32.c
index bbb16099e8c7fa..40ab50bea61c02 100644
--- a/arch/powerpc/net/bpf_jit_comp32.c
+++ b/arch/powerpc/net/bpf_jit_comp32.c
@@ -195,8 +195,8 @@  void bpf_jit_emit_func_call_rel(u32 *image, struct codegen_context *ctx, u64 fun
 		/* Load function address into r0 */
 		EMIT(PPC_RAW_LIS(__REG_R0, IMM_H(func)));
 		EMIT(PPC_RAW_ORI(__REG_R0, __REG_R0, IMM_L(func)));
-		EMIT(PPC_RAW_MTLR(__REG_R0));
-		EMIT(PPC_RAW_BLRL());
+		EMIT(PPC_RAW_MTCTR(__REG_R0));
+		EMIT(PPC_RAW_BCTRL());
 	}
 }
 
diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c
index 57a8c1153851a0..ae9a6532be6ad4 100644
--- a/arch/powerpc/net/bpf_jit_comp64.c
+++ b/arch/powerpc/net/bpf_jit_comp64.c
@@ -153,8 +153,8 @@  static void bpf_jit_emit_func_call_hlp(u32 *image, struct codegen_context *ctx,
 	PPC_LI64(b2p[TMP_REG_2], func);
 	/* Load actual entry point from function descriptor */
 	PPC_BPF_LL(b2p[TMP_REG_1], b2p[TMP_REG_2], 0);
-	/* ... and move it to LR */
-	EMIT(PPC_RAW_MTLR(b2p[TMP_REG_1]));
+	/* ... and move it to CTR */
+	EMIT(PPC_RAW_MTCTR(b2p[TMP_REG_1]));
 	/*
 	 * Load TOC from function descriptor at offset 8.
 	 * We can clobber r2 since we get called through a
@@ -165,9 +165,9 @@  static void bpf_jit_emit_func_call_hlp(u32 *image, struct codegen_context *ctx,
 #else
 	/* We can clobber r12 */
 	PPC_FUNC_ADDR(12, func);
-	EMIT(PPC_RAW_MTLR(12));
+	EMIT(PPC_RAW_MTCTR(12));
 #endif
-	EMIT(PPC_RAW_BLRL());
+	EMIT(PPC_RAW_BCTRL());
 }
 
 void bpf_jit_emit_func_call_rel(u32 *image, struct codegen_context *ctx, u64 func)
@@ -202,8 +202,8 @@  void bpf_jit_emit_func_call_rel(u32 *image, struct codegen_context *ctx, u64 fun
 	PPC_BPF_LL(12, 12, 0);
 #endif
 
-	EMIT(PPC_RAW_MTLR(12));
-	EMIT(PPC_RAW_BLRL());
+	EMIT(PPC_RAW_MTCTR(12));
+	EMIT(PPC_RAW_BCTRL());
 }
 
 static void bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 out)