diff mbox series

[bpf-next,v3,13/13] bpf/tests: Add tail call limit test with external function call

Message ID 20210909143303.811171-14-johan.almbladh@anyfinetworks.com (mailing list archive)
State Superseded
Delegated to: BPF
Headers show
Series bpf/tests: Extend JIT test suite coverage | expand

Checks

Context Check Description
bpf/vmtest-bpf-next-PR success PR summary
netdev/cover_letter success Link
netdev/fixes_present success Link
netdev/patch_count success Link
netdev/tree_selection success Clearly marked for bpf-next
netdev/subject_prefix success Link
netdev/cc_maintainers success CCed 10 of 10 maintainers
netdev/source_inline success Was 0 now: 0
netdev/verify_signedoff success Link
netdev/module_param success Was 0 now: 0
netdev/build_32bit fail Errors and warnings before: 100 this patch: 101
netdev/kdoc success Errors and warnings before: 0 this patch: 0
netdev/verify_fixes success Link
netdev/checkpatch warning CHECK: Please use a blank line after function/struct/union/enum declarations
netdev/build_allmodconfig_warn fail Errors and warnings before: 100 this patch: 101
netdev/header_inline success Link
bpf/vmtest-bpf-next success VM_Test

Commit Message

Johan Almbladh Sept. 9, 2021, 2:33 p.m. UTC
This patch adds a tail call limit test where the program also emits
a BPF_CALL to an external function prior to the tail call. Mainly
testing that JITed programs preserve its internal register state, for
example tail call count, across such external calls.

Signed-off-by: Johan Almbladh <johan.almbladh@anyfinetworks.com>
---
 lib/test_bpf.c | 83 ++++++++++++++++++++++++++++++++++++++++++++++++--
 1 file changed, 80 insertions(+), 3 deletions(-)

Comments

Daniel Borkmann Sept. 10, 2021, 7:47 p.m. UTC | #1
On 9/9/21 4:33 PM, Johan Almbladh wrote:
> This patch adds a tail call limit test where the program also emits
> a BPF_CALL to an external function prior to the tail call. Mainly
> testing that JITed programs preserve its internal register state, for
> example tail call count, across such external calls.
> 
> Signed-off-by: Johan Almbladh <johan.almbladh@anyfinetworks.com>
> ---
>   lib/test_bpf.c | 83 ++++++++++++++++++++++++++++++++++++++++++++++++--
>   1 file changed, 80 insertions(+), 3 deletions(-)
> 
> diff --git a/lib/test_bpf.c b/lib/test_bpf.c
> index 7475abfd2186..152193b4080f 100644
> --- a/lib/test_bpf.c
> +++ b/lib/test_bpf.c
> @@ -12202,6 +12202,30 @@ struct tail_call_test {
>   		     offset, TAIL_CALL_MARKER),	       \
>   	BPF_JMP_IMM(BPF_TAIL_CALL, 0, 0, 0)
>   
> +/*
> + * A test function to be called from a BPF program, clobbering a lot of
> + * CPU registers in the process. A JITed BPF program calling this function
> + * must save and restore any caller-saved registers it uses for internal
> + * state, for example the current tail call count.
> + */
> +BPF_CALL_1(bpf_test_func, u64, arg)
> +{
> +	char buf[64];
> +	long a = 0;
> +	long b = 1;
> +	long c = 2;
> +	long d = 3;
> +	long e = 4;
> +	long f = 5;
> +	long g = 6;
> +	long h = 7;
> +
> +	return snprintf(buf, sizeof(buf),
> +			"%ld %lu %lx %ld %lu %lx %ld %lu %x",
> +			a, b, c, d, e, f, g, h, (int)arg);
> +}
> +#define BPF_FUNC_test_func __BPF_FUNC_MAX_ID
> +
>   /*
>    * Tail call tests. Each test case may call any other test in the table,
>    * including itself, specified as a relative index offset from the calling
> @@ -12259,6 +12283,25 @@ static struct tail_call_test tail_call_tests[] = {
>   		},
>   		.result = MAX_TAIL_CALL_CNT + 1,
>   	},
> +	{
> +		"Tail call count preserved across function calls",
> +		.insns = {
> +			BPF_ALU64_IMM(BPF_ADD, R1, 1),
> +			BPF_STX_MEM(BPF_DW, R10, R1, -8),
> +			BPF_CALL_REL(BPF_FUNC_get_numa_node_id),
> +			BPF_CALL_REL(BPF_FUNC_ktime_get_ns),
> +			BPF_CALL_REL(BPF_FUNC_ktime_get_boot_ns),
> +			BPF_CALL_REL(BPF_FUNC_ktime_get_coarse_ns),
> +			BPF_CALL_REL(BPF_FUNC_jiffies64),
> +			BPF_CALL_REL(BPF_FUNC_test_func),
> +			BPF_LDX_MEM(BPF_DW, R1, R10, -8),
> +			BPF_ALU32_REG(BPF_MOV, R0, R1),
> +			TAIL_CALL(0),
> +			BPF_EXIT_INSN(),

 From discussion with Johan, there'll be a v4 respin since assumption of R0
being valid before exit insn would not hold true when going through verifier.
Fixing it confirmed the 33 limit for x86 JIT as well, so both interpreter and
JIT is 33-aligned.

> +		},
> +		.stack_depth = 8,
> +		.result = MAX_TAIL_CALL_CNT + 1,
> +	},
>   	{
>   		"Tail call error path, NULL target",
>   		.insns = {
> @@ -12333,17 +12376,19 @@ static __init int prepare_tail_call_tests(struct bpf_array **pprogs)
>   		/* Relocate runtime tail call offsets and addresses */
>   		for (i = 0; i < len; i++) {
>   			struct bpf_insn *insn = &fp->insnsi[i];
> -
> -			if (insn->imm != TAIL_CALL_MARKER)
> -				continue;
> +			long addr = 0;
>   
>   			switch (insn->code) {
>   			case BPF_LD | BPF_DW | BPF_IMM:
diff mbox series

Patch

diff --git a/lib/test_bpf.c b/lib/test_bpf.c
index 7475abfd2186..152193b4080f 100644
--- a/lib/test_bpf.c
+++ b/lib/test_bpf.c
@@ -12202,6 +12202,30 @@  struct tail_call_test {
 		     offset, TAIL_CALL_MARKER),	       \
 	BPF_JMP_IMM(BPF_TAIL_CALL, 0, 0, 0)
 
+/*
+ * A test function to be called from a BPF program, clobbering a lot of
+ * CPU registers in the process. A JITed BPF program calling this function
+ * must save and restore any caller-saved registers it uses for internal
+ * state, for example the current tail call count.
+ */
+BPF_CALL_1(bpf_test_func, u64, arg)
+{
+	char buf[64];
+	long a = 0;
+	long b = 1;
+	long c = 2;
+	long d = 3;
+	long e = 4;
+	long f = 5;
+	long g = 6;
+	long h = 7;
+
+	return snprintf(buf, sizeof(buf),
+			"%ld %lu %lx %ld %lu %lx %ld %lu %x",
+			a, b, c, d, e, f, g, h, (int)arg);
+}
+#define BPF_FUNC_test_func __BPF_FUNC_MAX_ID
+
 /*
  * Tail call tests. Each test case may call any other test in the table,
  * including itself, specified as a relative index offset from the calling
@@ -12259,6 +12283,25 @@  static struct tail_call_test tail_call_tests[] = {
 		},
 		.result = MAX_TAIL_CALL_CNT + 1,
 	},
+	{
+		"Tail call count preserved across function calls",
+		.insns = {
+			BPF_ALU64_IMM(BPF_ADD, R1, 1),
+			BPF_STX_MEM(BPF_DW, R10, R1, -8),
+			BPF_CALL_REL(BPF_FUNC_get_numa_node_id),
+			BPF_CALL_REL(BPF_FUNC_ktime_get_ns),
+			BPF_CALL_REL(BPF_FUNC_ktime_get_boot_ns),
+			BPF_CALL_REL(BPF_FUNC_ktime_get_coarse_ns),
+			BPF_CALL_REL(BPF_FUNC_jiffies64),
+			BPF_CALL_REL(BPF_FUNC_test_func),
+			BPF_LDX_MEM(BPF_DW, R1, R10, -8),
+			BPF_ALU32_REG(BPF_MOV, R0, R1),
+			TAIL_CALL(0),
+			BPF_EXIT_INSN(),
+		},
+		.stack_depth = 8,
+		.result = MAX_TAIL_CALL_CNT + 1,
+	},
 	{
 		"Tail call error path, NULL target",
 		.insns = {
@@ -12333,17 +12376,19 @@  static __init int prepare_tail_call_tests(struct bpf_array **pprogs)
 		/* Relocate runtime tail call offsets and addresses */
 		for (i = 0; i < len; i++) {
 			struct bpf_insn *insn = &fp->insnsi[i];
-
-			if (insn->imm != TAIL_CALL_MARKER)
-				continue;
+			long addr = 0;
 
 			switch (insn->code) {
 			case BPF_LD | BPF_DW | BPF_IMM:
+				if (insn->imm != TAIL_CALL_MARKER)
+					break;
 				insn[0].imm = (u32)(long)progs;
 				insn[1].imm = ((u64)(long)progs) >> 32;
 				break;
 
 			case BPF_ALU | BPF_MOV | BPF_K:
+				if (insn->imm != TAIL_CALL_MARKER)
+					break;
 				if (insn->off == TAIL_CALL_NULL)
 					insn->imm = ntests;
 				else if (insn->off == TAIL_CALL_INVALID)
@@ -12351,6 +12396,38 @@  static __init int prepare_tail_call_tests(struct bpf_array **pprogs)
 				else
 					insn->imm = which + insn->off;
 				insn->off = 0;
+				break;
+
+			case BPF_JMP | BPF_CALL:
+				if (insn->src_reg != BPF_PSEUDO_CALL)
+					break;
+				switch (insn->imm) {
+				case BPF_FUNC_get_numa_node_id:
+					addr = (long)&numa_node_id;
+					break;
+				case BPF_FUNC_ktime_get_ns:
+					addr = (long)&ktime_get_ns;
+					break;
+				case BPF_FUNC_ktime_get_boot_ns:
+					addr = (long)&ktime_get_boot_fast_ns;
+					break;
+				case BPF_FUNC_ktime_get_coarse_ns:
+					addr = (long)&ktime_get_coarse_ns;
+					break;
+				case BPF_FUNC_jiffies64:
+					addr = (long)&get_jiffies_64;
+					break;
+				case BPF_FUNC_test_func:
+					addr = (long)&bpf_test_func;
+					break;
+				default:
+					err = -EFAULT;
+					goto out_err;
+				}
+				*insn = BPF_EMIT_CALL(BPF_CAST_CALL(addr));
+				if ((long)__bpf_call_base + insn->imm != addr)
+					*insn = BPF_JMP_A(0); /* Skip: NOP */
+				break;
 			}
 		}