@@ -18,25 +18,41 @@ static bool check_prog_load(int prog_fd, bool expect_err, const char *tag)
if (!ASSERT_GT(prog_fd, 0, tag))
return false;
}
+ if (prog_fd >= 0)
+ close(prog_fd);
return true;
}
+static struct {
+ /* strategically placed before others to avoid accidental modification by kernel */
+ char filler[1024];
+ char buf[1024];
+ /* strategically placed after buf[] to catch more accidental corruptions */
+ char reference[1024];
+} logs;
+static const struct bpf_insn *insns;
+static size_t insn_cnt;
+
+static int load_prog(struct bpf_prog_load_opts *opts, bool expect_load_error)
+{
+ int prog_fd;
+
+ prog_fd = bpf_prog_load(BPF_PROG_TYPE_RAW_TRACEPOINT, "log_prog",
+ "GPL", insns, insn_cnt, opts);
+ check_prog_load(prog_fd, expect_load_error, "prog_load");
+
+ return prog_fd;
+}
+
static void verif_log_subtest(const char *name, bool expect_load_error, int log_level)
{
LIBBPF_OPTS(bpf_prog_load_opts, opts);
- struct {
- /* strategically placed before others to avoid accidental modification by kernel */
- char filler[1024];
- char buf[1024];
- /* strategically placed after buf[] to catch more accidental corruptions */
- char reference[1024];
- } logs;
char *exp_log, prog_name[16], op_name[32];
struct test_log_buf *skel;
struct bpf_program *prog;
- const struct bpf_insn *insns;
- size_t insn_cnt, fixed_log_sz;
- int i, err, prog_fd;
+ size_t fixed_log_sz;
+ __u32 log_sz_actual_fixed, log_sz_actual_rolling;
+ int i, err, prog_fd, res;
skel = test_log_buf__open();
if (!ASSERT_OK_PTR(skel, "skel_open"))
@@ -61,11 +77,7 @@ static void verif_log_subtest(const char *name, bool expect_load_error, int log_
opts.log_buf = logs.reference;
opts.log_size = sizeof(logs.reference);
opts.log_level = log_level | 8 /* BPF_LOG_FIXED */;
- prog_fd = bpf_prog_load(BPF_PROG_TYPE_RAW_TRACEPOINT, "log_fixed",
- "GPL", insns, insn_cnt, &opts);
- if (!check_prog_load(prog_fd, expect_load_error, "fixed_buf_prog_load"))
- goto cleanup;
- close(prog_fd);
+ load_prog(&opts, expect_load_error);
fixed_log_sz = strlen(logs.reference) + 1;
if (!ASSERT_GT(fixed_log_sz, 50, "fixed_log_sz"))
@@ -89,7 +101,7 @@ static void verif_log_subtest(const char *name, bool expect_load_error, int log_
opts.log_level = log_level | 8; /* fixed-length log */
opts.log_size = 25;
- prog_fd = bpf_prog_load(BPF_PROG_TYPE_RAW_TRACEPOINT, "log_fixed50",
+ prog_fd = bpf_prog_load(BPF_PROG_TYPE_RAW_TRACEPOINT, "log_fixed25",
"GPL", insns, insn_cnt, &opts);
if (!ASSERT_EQ(prog_fd, -ENOSPC, "unexpected_log_fixed_prog_load_result")) {
if (prog_fd >= 0)
@@ -147,6 +159,54 @@ static void verif_log_subtest(const char *name, bool expect_load_error, int log_
}
}
+ /* (FIXED) get actual log size */
+ opts.log_buf = logs.buf;
+ opts.log_level = log_level | 8; /* BPF_LOG_FIXED */
+ opts.log_size = sizeof(logs.buf);
+ res = load_prog(&opts, expect_load_error);
+ ASSERT_NEQ(res, -ENOSPC, "prog_load_res_fixed");
+
+ log_sz_actual_fixed = opts.log_size_actual;
+ ASSERT_GT(log_sz_actual_fixed, 0, "log_sz_actual_fixed");
+
+ /* (ROLLING) get actual log size */
+ opts.log_buf = logs.buf;
+ opts.log_level = log_level;
+ opts.log_size = sizeof(logs.buf);
+ res = load_prog(&opts, expect_load_error);
+ ASSERT_NEQ(res, -ENOSPC, "prog_load_res_rolling");
+
+ log_sz_actual_rolling = opts.log_size_actual;
+ ASSERT_EQ(log_sz_actual_rolling, log_sz_actual_fixed, "log_sz_actual_eq");
+
+ /* (FIXED) expect -ENOSPC for one byte short log */
+ opts.log_buf = logs.buf;
+ opts.log_level = log_level | 8; /* BPF_LOG_FIXED */
+ opts.log_size = log_sz_actual_fixed - 1;
+ res = load_prog(&opts, true /* should fail */);
+ ASSERT_EQ(res, -ENOSPC, "prog_load_res_too_short_fixed");
+
+ /* (FIXED) expect *not* -ENOSPC with exact log_size_actual buffer */
+ opts.log_buf = logs.buf;
+ opts.log_level = log_level | 8; /* BPF_LOG_FIXED */
+ opts.log_size = log_sz_actual_fixed;
+ res = load_prog(&opts, expect_load_error);
+ ASSERT_NEQ(res, -ENOSPC, "prog_load_res_just_right_fixed");
+
+ /* (ROLLING) expect -ENOSPC for one byte short log */
+ opts.log_buf = logs.buf;
+ opts.log_level = log_level;
+ opts.log_size = log_sz_actual_rolling - 1;
+ res = load_prog(&opts, true /* should fail */);
+ ASSERT_EQ(res, -ENOSPC, "prog_load_res_too_short_rolling");
+
+ /* (ROLLING) expect *not* -ENOSPC with exact log_size_actual buffer */
+ opts.log_buf = logs.buf;
+ opts.log_level = log_level;
+ opts.log_size = log_sz_actual_rolling;
+ res = load_prog(&opts, expect_load_error);
+ ASSERT_NEQ(res, -ENOSPC, "prog_load_res_just_right_rolling");
+
cleanup:
test_log_buf__destroy(skel);
}
Add additional test cases validating that log_size_actual is consistent between fixed and rotating log modes, and that log_size_actual can be used *exactly* without causing -ENOSPC, while using just 1 byte shorter log buffer would cause -ENOSPC. Signed-off-by: Andrii Nakryiko <andrii@kernel.org> --- .../selftests/bpf/prog_tests/verifier_log.c | 92 +++++++++++++++---- 1 file changed, 76 insertions(+), 16 deletions(-)