diff mbox series

[14/14] bpf/tests: Add tail call test suite

Message ID 20210728170502.351010-15-johan.almbladh@anyfinetworks.com (mailing list archive)
State Superseded
Delegated to: BPF
Headers show
Series bpf/tests: Extend the eBPF test suite | expand

Checks

Context Check Description
netdev/tree_selection success Not a local patch

Commit Message

Johan Almbladh July 28, 2021, 5:05 p.m. UTC
While BPF_CALL instructions were tested implicitly by the cBPF-to-eBPF
translation, there has not been any tests for BPF_TAIL_CALL instructions.
The new test suite includes tests for tail call chaining, tail call count
tracking and error paths. It is mainly intended for JIT development and
testing.

Signed-off-by: Johan Almbladh <johan.almbladh@anyfinetworks.com>
Reported-by: kernel test robot <lkp@intel.com>
---
 lib/test_bpf.c | 249 +++++++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 249 insertions(+)

Comments

Yonghong Song July 29, 2021, 2:56 a.m. UTC | #1
On 7/28/21 10:05 AM, Johan Almbladh wrote:
> While BPF_CALL instructions were tested implicitly by the cBPF-to-eBPF
> translation, there has not been any tests for BPF_TAIL_CALL instructions.
> The new test suite includes tests for tail call chaining, tail call count
> tracking and error paths. It is mainly intended for JIT development and
> testing.
> 
> Signed-off-by: Johan Almbladh <johan.almbladh@anyfinetworks.com>
> Reported-by: kernel test robot <lkp@intel.com>

The above Reported-by tag can be removed. This patch itself is not
about fixing an issue reported by kernel test robot...

The patch looks good to me except a few minor comments below.

Acked-by: Yonghong Song <yhs@fb.com>

> ---
>   lib/test_bpf.c | 249 +++++++++++++++++++++++++++++++++++++++++++++++++
>   1 file changed, 249 insertions(+)
> 
> diff --git a/lib/test_bpf.c b/lib/test_bpf.c
> index af5758151d0a..222d454b2ed4 100644
> --- a/lib/test_bpf.c
> +++ b/lib/test_bpf.c
> @@ -8981,8 +8981,249 @@ static __init int test_bpf(void)
>   	return err_cnt ? -EINVAL : 0;
>   }
>   
> +struct tail_call_test {
> +	const char *descr;
> +	struct bpf_insn insns[MAX_INSNS];
> +	int result;
> +	int stack_depth;
> +};
> +
> +/*
> + * Magic marker used in test snippets for tail calls below.
> + * BPF_LD/MOV to R2 and R2 with this immediate value is replaced
> + * with the proper values by the test runner.
> + */
> +#define TAIL_CALL_MARKER 0x7a11ca11
> +
> +/* Special offset to indicate a NULL call target */
> +#define TAIL_CALL_NULL 0x7fff
> +
> +#define TAIL_CALL(offset)			       \
> +	BPF_LD_IMM64(R2, TAIL_CALL_MARKER),	       \
> +	BPF_RAW_INSN(BPF_ALU | BPF_MOV | BPF_K, R3, 0, \
> +		     offset, TAIL_CALL_MARKER),	       \
> +	BPF_JMP_IMM(BPF_TAIL_CALL, 0, 0, 0)
> +
> +/*
> + * Tail call tests. Each test case may call any other test in the table,
> + * including itself, specified as a relative index offset from the calling
> + * test. The index TAIL_CALL_NULL can be used to specify a NULL target
> + * function to test the JIT error path.
> + */
> +static struct tail_call_test tail_call_tests[] = {
> +	{
> +		"Tail call leaf",
> +		.insns = {
> +			BPF_ALU64_REG(BPF_MOV, R0, R1),
> +			BPF_ALU64_IMM(BPF_ADD, R0, 1),
> +			BPF_EXIT_INSN(),
> +		},
> +		.result = 1,
> +	},
> +	{
> +		"Tail call 2",
> +		.insns = {
> +			BPF_ALU64_IMM(BPF_ADD, R1, 2),
> +			TAIL_CALL(-1),
> +			BPF_ALU64_IMM(BPF_MOV, R0, -1),
> +			BPF_EXIT_INSN(),
> +		},
> +		.result = 3,
> +	},
> +	{
> +		"Tail call 3",
> +		.insns = {
> +			BPF_ALU64_IMM(BPF_ADD, R1, 3),
> +			TAIL_CALL(-1),
> +			BPF_ALU64_IMM(BPF_MOV, R0, -1),
> +			BPF_EXIT_INSN(),
> +		},
> +		.result = 6,
> +	},
> +	{
> +		"Tail call 4",
> +		.insns = {
> +			BPF_ALU64_IMM(BPF_ADD, R1, 4),
> +			TAIL_CALL(-1),
> +			BPF_ALU64_IMM(BPF_MOV, R0, -1),
> +			BPF_EXIT_INSN(),
> +		},
> +		.result = 10,
> +	},
> +	{
> +		"Tail call error path, max count reached",
> +		.insns = {
> +			BPF_ALU64_IMM(BPF_ADD, R1, 1),
> +			BPF_ALU64_REG(BPF_MOV, R0, R1),
> +			TAIL_CALL(0),
> +			BPF_EXIT_INSN(),
> +		},
> +		.result = MAX_TAIL_CALL_CNT + 1,
> +	},
> +	{
> +		"Tail call error path, NULL target",
> +		.insns = {
> +			BPF_ALU64_IMM(BPF_MOV, R0, -1),
> +			TAIL_CALL(TAIL_CALL_NULL),
> +			BPF_ALU64_IMM(BPF_MOV, R0, 1),
> +			BPF_EXIT_INSN(),
> +		},
> +		.result = 1,
> +	},
> +	{
> +		/* Must be the last test */
> +		"Tail call error path, index out of range",
> +		.insns = {
> +			BPF_ALU64_IMM(BPF_MOV, R0, -1),
> +			TAIL_CALL(1),    /* Index out of range */
> +			BPF_ALU64_IMM(BPF_MOV, R0, 1),
> +			BPF_EXIT_INSN(),
> +		},
> +		.result = 1,
> +	},
> +};
> +
> +static void __init destroy_tail_call_tests(struct bpf_array *progs)
> +{
> +	int i;
> +
> +	for (i = 0; i < ARRAY_SIZE(tail_call_tests); i++)
> +		if (progs->ptrs[i])
> +			bpf_prog_free(progs->ptrs[i]);
> +	kfree(progs);
> +}
> +
> +static __init int prepare_tail_call_tests(struct bpf_array **pprogs)
> +{
> +	struct bpf_array *progs;
> +	int ntests = ARRAY_SIZE(tail_call_tests);
> +	int which, err;

reverse christmas tree?

> +
> +	/* Allocate the table of programs to be used for tall calls */
> +	progs = kzalloc(sizeof(*progs) + (ntests + 1) * sizeof(progs->ptrs[0]),
> +			GFP_KERNEL);
> +	if (!progs)
> +		goto out_nomem;
> +
> +	/* Create all eBPF programs and populate the table */
> +	for (which = 0; which < ntests; which++) {
> +		struct tail_call_test *test = &tail_call_tests[which];
> +		struct bpf_prog *fp;
> +		int len, i;
> +
> +		/* Compute the number of program instructions */
> +		for (len = 0; len < MAX_INSNS; len++) {
> +			struct bpf_insn *insn = &test->insns[len];
> +
> +			if (len < MAX_INSNS - 1 &&
> +			    insn->code == (BPF_LD | BPF_DW | BPF_IMM))
> +				len++;
> +			if (insn->code == 0)
> +				break;
> +		}
> +
> +		/* Allocate and initialize the program */
> +		fp = bpf_prog_alloc(bpf_prog_size(len), 0);
> +		if (!fp)
> +			goto out_nomem;
> +
> +		fp->len = len;
> +		fp->type = BPF_PROG_TYPE_SOCKET_FILTER;
> +		fp->aux->stack_depth = test->stack_depth;
> +		memcpy(fp->insnsi, test->insns, len * sizeof(struct bpf_insn));
> +
> +		/* Relocate runtime tail call offsets and addresses */
> +		for (i = 0; i < len; i++) {
> +			struct bpf_insn *insn = &fp->insnsi[i];
> +			int target;
> +
> +			if (insn->imm != TAIL_CALL_MARKER)
> +				continue;
> +
> +			switch (insn->code) {
> +			case BPF_LD | BPF_DW | BPF_IMM:
> +				if (insn->dst_reg == R2) {

Looks like the above condition is not needed. It is always true.

> +					insn[0].imm = (u32)(long)progs;
> +					insn[1].imm = ((u64)(long)progs) >> 32;
> +				}
> +				break;
> +
> +			case BPF_ALU | BPF_MOV | BPF_K:
> +			case BPF_ALU64 | BPF_MOV | BPF_K:

case BPF_ALU64 | BPF_MOV | BPF_K is not needed.

> +				if (insn->off == TAIL_CALL_NULL)
> +					target = ntests;
> +				else
> +					target = which + insn->off;
> +				if (insn->dst_reg == R3)

the same here, insn->dst_reg == R3 is not needed. It is always true.

I suggest to set insn->off = 0. Otherwise, it is an illegal insn.
We won't issue here because we didn't invoke verifier. It is still
good to make the insn legel.

> +					insn->imm = target;



> +				break;
> +			}
> +		}
> +
> +		fp = bpf_prog_select_runtime(fp, &err);
> +		if (err)
> +			goto out_err;
> +
> +		progs->ptrs[which] = fp;
> +	}
> +
> +	/* The last entry contains a NULL program pointer */
> +	progs->map.max_entries = ntests + 1;
> +	*pprogs = progs;
> +	return 0;
> +
> +out_nomem:
> +	err = -ENOMEM;
> +
> +out_err:
> +	if (progs)
> +		destroy_tail_call_tests(progs);
> +	return err;
> +}
> +
[...]
Johan Almbladh July 29, 2021, 8:44 p.m. UTC | #2
On Thu, Jul 29, 2021 at 4:56 AM Yonghong Song <yhs@fb.com> wrote:
> > +static __init int prepare_tail_call_tests(struct bpf_array **pprogs)
> > +{
> > +     struct bpf_array *progs;
> > +     int ntests = ARRAY_SIZE(tail_call_tests);
> > +     int which, err;
>
> reverse christmas tree?

Will do.

> > +
> > +     /* Allocate the table of programs to be used for tall calls */
> > +     progs = kzalloc(sizeof(*progs) + (ntests + 1) * sizeof(progs->ptrs[0]),
> > +                     GFP_KERNEL);
> > +     if (!progs)
> > +             goto out_nomem;
> > +
> > +     /* Create all eBPF programs and populate the table */
> > +     for (which = 0; which < ntests; which++) {
> > +             struct tail_call_test *test = &tail_call_tests[which];
> > +             struct bpf_prog *fp;
> > +             int len, i;
> > +
> > +             /* Compute the number of program instructions */
> > +             for (len = 0; len < MAX_INSNS; len++) {
> > +                     struct bpf_insn *insn = &test->insns[len];
> > +
> > +                     if (len < MAX_INSNS - 1 &&
> > +                         insn->code == (BPF_LD | BPF_DW | BPF_IMM))
> > +                             len++;
> > +                     if (insn->code == 0)
> > +                             break;
> > +             }
> > +
> > +             /* Allocate and initialize the program */
> > +             fp = bpf_prog_alloc(bpf_prog_size(len), 0);
> > +             if (!fp)
> > +                     goto out_nomem;
> > +
> > +             fp->len = len;
> > +             fp->type = BPF_PROG_TYPE_SOCKET_FILTER;
> > +             fp->aux->stack_depth = test->stack_depth;
> > +             memcpy(fp->insnsi, test->insns, len * sizeof(struct bpf_insn));
> > +
> > +             /* Relocate runtime tail call offsets and addresses */
> > +             for (i = 0; i < len; i++) {
> > +                     struct bpf_insn *insn = &fp->insnsi[i];
> > +                     int target;
> > +
> > +                     if (insn->imm != TAIL_CALL_MARKER)
> > +                             continue;
> > +
> > +                     switch (insn->code) {
> > +                     case BPF_LD | BPF_DW | BPF_IMM:
> > +                             if (insn->dst_reg == R2) {
>
> Looks like the above condition is not needed. It is always true.
>
> > +                                     insn[0].imm = (u32)(long)progs;
> > +                                     insn[1].imm = ((u64)(long)progs) >> 32;
> > +                             }
> > +                             break;
> > +
> > +                     case BPF_ALU | BPF_MOV | BPF_K:
> > +                     case BPF_ALU64 | BPF_MOV | BPF_K:
>
> case BPF_ALU64 | BPF_MOV | BPF_K is not needed.
>
> > +                             if (insn->off == TAIL_CALL_NULL)
> > +                                     target = ntests;
> > +                             else
> > +                                     target = which + insn->off;
> > +                             if (insn->dst_reg == R3)
>
> the same here, insn->dst_reg == R3 is not needed. It is always true.

I added the register checks to further restrict the cases when
rewriting is done, but it might be more clear if the instruction is
always rewritten whenever the tail call marker is set. I can remove
the unnecessary conditions.

> I suggest to set insn->off = 0. Otherwise, it is an illegal insn.
> We won't issue here because we didn't invoke verifier. It is still
> good to make the insn legel.

I agree. Fixing it.
diff mbox series

Patch

diff --git a/lib/test_bpf.c b/lib/test_bpf.c
index af5758151d0a..222d454b2ed4 100644
--- a/lib/test_bpf.c
+++ b/lib/test_bpf.c
@@ -8981,8 +8981,249 @@  static __init int test_bpf(void)
 	return err_cnt ? -EINVAL : 0;
 }
 
+struct tail_call_test {
+	const char *descr;
+	struct bpf_insn insns[MAX_INSNS];
+	int result;
+	int stack_depth;
+};
+
+/*
+ * Magic marker used in test snippets for tail calls below.
+ * BPF_LD/MOV to R2 and R2 with this immediate value is replaced
+ * with the proper values by the test runner.
+ */
+#define TAIL_CALL_MARKER 0x7a11ca11
+
+/* Special offset to indicate a NULL call target */
+#define TAIL_CALL_NULL 0x7fff
+
+#define TAIL_CALL(offset)			       \
+	BPF_LD_IMM64(R2, TAIL_CALL_MARKER),	       \
+	BPF_RAW_INSN(BPF_ALU | BPF_MOV | BPF_K, R3, 0, \
+		     offset, TAIL_CALL_MARKER),	       \
+	BPF_JMP_IMM(BPF_TAIL_CALL, 0, 0, 0)
+
+/*
+ * Tail call tests. Each test case may call any other test in the table,
+ * including itself, specified as a relative index offset from the calling
+ * test. The index TAIL_CALL_NULL can be used to specify a NULL target
+ * function to test the JIT error path.
+ */
+static struct tail_call_test tail_call_tests[] = {
+	{
+		"Tail call leaf",
+		.insns = {
+			BPF_ALU64_REG(BPF_MOV, R0, R1),
+			BPF_ALU64_IMM(BPF_ADD, R0, 1),
+			BPF_EXIT_INSN(),
+		},
+		.result = 1,
+	},
+	{
+		"Tail call 2",
+		.insns = {
+			BPF_ALU64_IMM(BPF_ADD, R1, 2),
+			TAIL_CALL(-1),
+			BPF_ALU64_IMM(BPF_MOV, R0, -1),
+			BPF_EXIT_INSN(),
+		},
+		.result = 3,
+	},
+	{
+		"Tail call 3",
+		.insns = {
+			BPF_ALU64_IMM(BPF_ADD, R1, 3),
+			TAIL_CALL(-1),
+			BPF_ALU64_IMM(BPF_MOV, R0, -1),
+			BPF_EXIT_INSN(),
+		},
+		.result = 6,
+	},
+	{
+		"Tail call 4",
+		.insns = {
+			BPF_ALU64_IMM(BPF_ADD, R1, 4),
+			TAIL_CALL(-1),
+			BPF_ALU64_IMM(BPF_MOV, R0, -1),
+			BPF_EXIT_INSN(),
+		},
+		.result = 10,
+	},
+	{
+		"Tail call error path, max count reached",
+		.insns = {
+			BPF_ALU64_IMM(BPF_ADD, R1, 1),
+			BPF_ALU64_REG(BPF_MOV, R0, R1),
+			TAIL_CALL(0),
+			BPF_EXIT_INSN(),
+		},
+		.result = MAX_TAIL_CALL_CNT + 1,
+	},
+	{
+		"Tail call error path, NULL target",
+		.insns = {
+			BPF_ALU64_IMM(BPF_MOV, R0, -1),
+			TAIL_CALL(TAIL_CALL_NULL),
+			BPF_ALU64_IMM(BPF_MOV, R0, 1),
+			BPF_EXIT_INSN(),
+		},
+		.result = 1,
+	},
+	{
+		/* Must be the last test */
+		"Tail call error path, index out of range",
+		.insns = {
+			BPF_ALU64_IMM(BPF_MOV, R0, -1),
+			TAIL_CALL(1),    /* Index out of range */
+			BPF_ALU64_IMM(BPF_MOV, R0, 1),
+			BPF_EXIT_INSN(),
+		},
+		.result = 1,
+	},
+};
+
+static void __init destroy_tail_call_tests(struct bpf_array *progs)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(tail_call_tests); i++)
+		if (progs->ptrs[i])
+			bpf_prog_free(progs->ptrs[i]);
+	kfree(progs);
+}
+
+static __init int prepare_tail_call_tests(struct bpf_array **pprogs)
+{
+	struct bpf_array *progs;
+	int ntests = ARRAY_SIZE(tail_call_tests);
+	int which, err;
+
+	/* Allocate the table of programs to be used for tall calls */
+	progs = kzalloc(sizeof(*progs) + (ntests + 1) * sizeof(progs->ptrs[0]),
+			GFP_KERNEL);
+	if (!progs)
+		goto out_nomem;
+
+	/* Create all eBPF programs and populate the table */
+	for (which = 0; which < ntests; which++) {
+		struct tail_call_test *test = &tail_call_tests[which];
+		struct bpf_prog *fp;
+		int len, i;
+
+		/* Compute the number of program instructions */
+		for (len = 0; len < MAX_INSNS; len++) {
+			struct bpf_insn *insn = &test->insns[len];
+
+			if (len < MAX_INSNS - 1 &&
+			    insn->code == (BPF_LD | BPF_DW | BPF_IMM))
+				len++;
+			if (insn->code == 0)
+				break;
+		}
+
+		/* Allocate and initialize the program */
+		fp = bpf_prog_alloc(bpf_prog_size(len), 0);
+		if (!fp)
+			goto out_nomem;
+
+		fp->len = len;
+		fp->type = BPF_PROG_TYPE_SOCKET_FILTER;
+		fp->aux->stack_depth = test->stack_depth;
+		memcpy(fp->insnsi, test->insns, len * sizeof(struct bpf_insn));
+
+		/* Relocate runtime tail call offsets and addresses */
+		for (i = 0; i < len; i++) {
+			struct bpf_insn *insn = &fp->insnsi[i];
+			int target;
+
+			if (insn->imm != TAIL_CALL_MARKER)
+				continue;
+
+			switch (insn->code) {
+			case BPF_LD | BPF_DW | BPF_IMM:
+				if (insn->dst_reg == R2) {
+					insn[0].imm = (u32)(long)progs;
+					insn[1].imm = ((u64)(long)progs) >> 32;
+				}
+				break;
+
+			case BPF_ALU | BPF_MOV | BPF_K:
+			case BPF_ALU64 | BPF_MOV | BPF_K:
+				if (insn->off == TAIL_CALL_NULL)
+					target = ntests;
+				else
+					target = which + insn->off;
+				if (insn->dst_reg == R3)
+					insn->imm = target;
+				break;
+			}
+		}
+
+		fp = bpf_prog_select_runtime(fp, &err);
+		if (err)
+			goto out_err;
+
+		progs->ptrs[which] = fp;
+	}
+
+	/* The last entry contains a NULL program pointer */
+	progs->map.max_entries = ntests + 1;
+	*pprogs = progs;
+	return 0;
+
+out_nomem:
+	err = -ENOMEM;
+
+out_err:
+	if (progs)
+		destroy_tail_call_tests(progs);
+	return err;
+}
+
+static __init int test_tail_calls(struct bpf_array *progs)
+{
+	int i, err_cnt = 0, pass_cnt = 0;
+	int jit_cnt = 0, run_cnt = 0;
+
+	for (i = 0; i < ARRAY_SIZE(tail_call_tests); i++) {
+		struct tail_call_test *test = &tail_call_tests[i];
+		struct bpf_prog *fp = progs->ptrs[i];
+		u64 duration;
+		int ret;
+
+		cond_resched();
+
+		pr_info("#%d %s ", i, test->descr);
+		if (!fp) {
+			err_cnt++;
+			continue;
+		}
+		pr_cont("jited:%u ", fp->jited);
+
+		run_cnt++;
+		if (fp->jited)
+			jit_cnt++;
+
+		ret = __run_one(fp, NULL, MAX_TESTRUNS, &duration);
+		if (ret == test->result) {
+			pr_cont("%lld PASS", duration);
+			pass_cnt++;
+		} else {
+			pr_cont("ret %d != %d FAIL", ret, test->result);
+			err_cnt++;
+		}
+	}
+
+	pr_info("%s: Summary: %d PASSED, %d FAILED, [%d/%d JIT'ed]\n",
+		__func__, pass_cnt, err_cnt, jit_cnt, run_cnt);
+
+	return err_cnt ? -EINVAL : 0;
+}
+
 static int __init test_bpf_init(void)
 {
+	struct bpf_array *progs = NULL;
 	int ret;
 
 	ret = prepare_bpf_tests();
@@ -8994,6 +9235,14 @@  static int __init test_bpf_init(void)
 	if (ret)
 		return ret;
 
+	ret = prepare_tail_call_tests(&progs);
+	if (ret)
+		return ret;
+	ret = test_tail_calls(progs);
+	destroy_tail_call_tests(progs);
+	if (ret)
+		return ret;
+
 	return test_skb_segment();
 }