diff mbox series

[bpf-next,2/3] bpf/selftests: add bpf_get_task_stack retval bounds verifier test

Message ID 20210416025537.2352753-3-davemarchevsky@fb.com (mailing list archive)
State Superseded
Delegated to: BPF
Headers show
Series bpf: refine retval for bpf_get_task_stack helper | expand

Checks

Context Check Description
netdev/cover_letter success Link
netdev/fixes_present success Link
netdev/patch_count success Link
netdev/tree_selection success Clearly marked for bpf-next
netdev/subject_prefix success Link
netdev/cc_maintainers warning 7 maintainers not CCed: linux-kselftest@vger.kernel.org netdev@vger.kernel.org kpsingh@kernel.org andrii@kernel.org kafai@fb.com john.fastabend@gmail.com shuah@kernel.org
netdev/source_inline success Was 0 now: 0
netdev/verify_signedoff success Link
netdev/module_param success Was 0 now: 0
netdev/build_32bit success Errors and warnings before: 0 this patch: 0
netdev/kdoc success Errors and warnings before: 0 this patch: 0
netdev/verify_fixes success Link
netdev/checkpatch success total: 0 errors, 0 warnings, 0 checks, 46 lines checked
netdev/build_allmodconfig_warn success Errors and warnings before: 0 this patch: 0
netdev/header_inline success Link

Commit Message

Dave Marchevsky April 16, 2021, 2:55 a.m. UTC
Add a bpf_iter test which feeds bpf_get_task_stack's return value into
seq_write after confirming it's positive. No attempt to bound the value
from above is made.

Load will fail if verifier does not refine retval range based on
buf sz input to bpf_get_task_stack.

Signed-off-by: Dave Marchevsky <davemarchevsky@fb.com>
---
 .../selftests/bpf/verifier/bpf_get_stack.c    | 43 +++++++++++++++++++
 1 file changed, 43 insertions(+)

Comments

Song Liu April 16, 2021, 5:09 p.m. UTC | #1
> On Apr 15, 2021, at 7:55 PM, Dave Marchevsky <davemarchevsky@fb.com> wrote:
> 
> Add a bpf_iter test which feeds bpf_get_task_stack's return value into
> seq_write after confirming it's positive. No attempt to bound the value
> from above is made.
> 
> Load will fail if verifier does not refine retval range based on
> buf sz input to bpf_get_task_stack.
> 
> Signed-off-by: Dave Marchevsky <davemarchevsky@fb.com>

Acked-by: Song Liu <songliubraving@fb.com>

> ---
> .../selftests/bpf/verifier/bpf_get_stack.c    | 43 +++++++++++++++++++
> 1 file changed, 43 insertions(+)
> 
> diff --git a/tools/testing/selftests/bpf/verifier/bpf_get_stack.c b/tools/testing/selftests/bpf/verifier/bpf_get_stack.c
> index 69b048cf46d9..0e8299c043d4 100644
> --- a/tools/testing/selftests/bpf/verifier/bpf_get_stack.c
> +++ b/tools/testing/selftests/bpf/verifier/bpf_get_stack.c
> @@ -42,3 +42,46 @@
> 	.result = ACCEPT,
> 	.prog_type = BPF_PROG_TYPE_TRACEPOINT,
> },
> +{
> +	"bpf_get_task_stack return R0 range is refined",
> +	.insns = {
> +	BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
> +	BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_6, 0), // ctx->meta->seq
> +	BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_1, 8), // ctx->task
> +	BPF_LD_MAP_FD(BPF_REG_1, 0), // fixup_map_array_48b
> +	BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
> +	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
> +	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
> +	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
> +	BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
> +	BPF_MOV64_IMM(BPF_REG_0, 0),
> +	BPF_EXIT_INSN(),
> +	BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 0, 2),
> +	BPF_MOV64_IMM(BPF_REG_0, 0),
> +	BPF_EXIT_INSN(),
> +
> +	BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
> +	BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
> +	BPF_MOV64_REG(BPF_REG_9, BPF_REG_0), // keep buf for seq_write
> +	BPF_MOV64_IMM(BPF_REG_3, 48),
> +	BPF_MOV64_IMM(BPF_REG_4, 0),
> +	BPF_EMIT_CALL(BPF_FUNC_get_task_stack),
> +	BPF_JMP_IMM(BPF_JSGT, BPF_REG_0, 0, 2),
> +	BPF_MOV64_IMM(BPF_REG_0, 0),
> +	BPF_EXIT_INSN(),
> +
> +	BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
> +	BPF_MOV64_REG(BPF_REG_2, BPF_REG_9),
> +	BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
> +	BPF_EMIT_CALL(BPF_FUNC_seq_write),
> +
> +	BPF_MOV64_IMM(BPF_REG_0, 0),
> +	BPF_EXIT_INSN(),
> +	},
> +	.result = ACCEPT,
> +	.prog_type = BPF_PROG_TYPE_TRACING,
> +	.expected_attach_type = BPF_TRACE_ITER,
> +	.kfunc = "task",
> +	.runs = -1, // Don't run, just load
> +	.fixup_map_array_48b = { 3 },
> +},
> -- 
> 2.30.2
>
diff mbox series

Patch

diff --git a/tools/testing/selftests/bpf/verifier/bpf_get_stack.c b/tools/testing/selftests/bpf/verifier/bpf_get_stack.c
index 69b048cf46d9..0e8299c043d4 100644
--- a/tools/testing/selftests/bpf/verifier/bpf_get_stack.c
+++ b/tools/testing/selftests/bpf/verifier/bpf_get_stack.c
@@ -42,3 +42,46 @@ 
 	.result = ACCEPT,
 	.prog_type = BPF_PROG_TYPE_TRACEPOINT,
 },
+{
+	"bpf_get_task_stack return R0 range is refined",
+	.insns = {
+	BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
+	BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_6, 0), // ctx->meta->seq
+	BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_1, 8), // ctx->task
+	BPF_LD_MAP_FD(BPF_REG_1, 0), // fixup_map_array_48b
+	BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+	BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+	BPF_MOV64_IMM(BPF_REG_0, 0),
+	BPF_EXIT_INSN(),
+	BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 0, 2),
+	BPF_MOV64_IMM(BPF_REG_0, 0),
+	BPF_EXIT_INSN(),
+
+	BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
+	BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+	BPF_MOV64_REG(BPF_REG_9, BPF_REG_0), // keep buf for seq_write
+	BPF_MOV64_IMM(BPF_REG_3, 48),
+	BPF_MOV64_IMM(BPF_REG_4, 0),
+	BPF_EMIT_CALL(BPF_FUNC_get_task_stack),
+	BPF_JMP_IMM(BPF_JSGT, BPF_REG_0, 0, 2),
+	BPF_MOV64_IMM(BPF_REG_0, 0),
+	BPF_EXIT_INSN(),
+
+	BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+	BPF_MOV64_REG(BPF_REG_2, BPF_REG_9),
+	BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
+	BPF_EMIT_CALL(BPF_FUNC_seq_write),
+
+	BPF_MOV64_IMM(BPF_REG_0, 0),
+	BPF_EXIT_INSN(),
+	},
+	.result = ACCEPT,
+	.prog_type = BPF_PROG_TYPE_TRACING,
+	.expected_attach_type = BPF_TRACE_ITER,
+	.kfunc = "task",
+	.runs = -1, // Don't run, just load
+	.fixup_map_array_48b = { 3 },
+},