diff mbox series

[bpf-next] selftests/bpf: add --json-summary option to test_progs

Message ID 20230316063901.3619730-1-chantr4@gmail.com (mailing list archive)
State Superseded
Delegated to: BPF
Headers show
Series [bpf-next] selftests/bpf: add --json-summary option to test_progs | expand

Checks

Context Check Description
netdev/series_format success Single patches do not need cover letters
netdev/tree_selection success Clearly marked for bpf-next
netdev/fixes_present success Fixes tag not required for -next series
netdev/header_inline success No static functions without inline keyword in header files
netdev/build_32bit success Errors and warnings before: 20 this patch: 20
netdev/cc_maintainers warning 8 maintainers not CCed: song@kernel.org shuah@kernel.org sdf@google.com haoluo@google.com john.fastabend@gmail.com kpsingh@kernel.org jolsa@kernel.org linux-kselftest@vger.kernel.org
netdev/build_clang success Errors and warnings before: 18 this patch: 18
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/deprecated_api success None detected
netdev/check_selftest success No net selftest shell script
netdev/verify_fixes success No Fixes tag
netdev/build_allmodconfig_warn success Errors and warnings before: 20 this patch: 20
netdev/checkpatch warning CHECK: Blank lines aren't necessary before a close brace '}' CHECK: Comparison to NULL could be written "!env->json" CHECK: spaces preferred around that '+' (ctx:VxV) CHECK: spaces preferred around that '/' (ctx:VxV) WARNING: Missing or malformed SPDX-License-Identifier tag in line 1 WARNING: added, moved or deleted file(s), does MAINTAINERS need updating? WARNING: adding a line without newline at end of file WARNING: line length of 100 exceeds 80 columns WARNING: line length of 82 exceeds 80 columns WARNING: line length of 83 exceeds 80 columns WARNING: line length of 84 exceeds 80 columns WARNING: line length of 85 exceeds 80 columns WARNING: line length of 88 exceeds 80 columns WARNING: line length of 90 exceeds 80 columns WARNING: line length of 99 exceeds 80 columns
netdev/kdoc success Errors and warnings before: 0 this patch: 0
netdev/source_inline success Was 0 now: 0
bpf/vmtest-bpf-next-PR success PR summary
bpf/vmtest-bpf-next-VM_Test-1 success Logs for ShellCheck
bpf/vmtest-bpf-next-VM_Test-5 success Logs for build for x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-6 success Logs for build for x86_64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-7 success Logs for llvm-toolchain
bpf/vmtest-bpf-next-VM_Test-8 success Logs for set-matrix
bpf/vmtest-bpf-next-VM_Test-2 success Logs for build for aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-3 success Logs for build for aarch64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-4 success Logs for build for s390x with gcc
bpf/vmtest-bpf-next-VM_Test-11 pending Logs for test_maps on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-16 pending Logs for test_progs on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-21 pending Logs for test_progs_no_alu32 on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-34 pending Logs for test_verifier on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-9 success Logs for test_maps on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-10 success Logs for test_maps on aarch64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-12 success Logs for test_maps on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-13 success Logs for test_maps on x86_64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-14 success Logs for test_progs on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-17 success Logs for test_progs on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-18 success Logs for test_progs on x86_64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-19 success Logs for test_progs_no_alu32 on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-22 fail Logs for test_progs_no_alu32 on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-23 success Logs for test_progs_no_alu32 on x86_64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-24 success Logs for test_progs_no_alu32_parallel on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-26 success Logs for test_progs_no_alu32_parallel on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-27 success Logs for test_progs_no_alu32_parallel on x86_64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-28 success Logs for test_progs_parallel on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-30 success Logs for test_progs_parallel on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-31 success Logs for test_progs_parallel on x86_64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-32 success Logs for test_verifier on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-33 success Logs for test_verifier on aarch64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-35 success Logs for test_verifier on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-36 success Logs for test_verifier on x86_64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-15 success Logs for test_progs on aarch64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-20 success Logs for test_progs_no_alu32 on aarch64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-25 success Logs for test_progs_no_alu32_parallel on aarch64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-29 success Logs for test_progs_parallel on aarch64 with llvm-16

Commit Message

Manu Bretelle March 16, 2023, 6:39 a.m. UTC
Currently, test_progs outputs all stdout/stderr as it runs, and when it
is done, prints a summary.

It is non-trivial for tooling to parse that output and extract meaningful
information from it.

This change adds a new option, `--json-summary`/`-J` that let the caller
specify a file where `test_progs{,-no_alu32}` can write a summary of the
run in a json format that can later be parsed by tooling.

Currently, it creates a summary section with successes/skipped/failures
followed by a list of failed tests/subtests.

A test contains the following fields:
- test_name: the name of the test
- test_number: the number of the test
- message: the log message that was printed by the test.
- failed: A boolean indicating whether the test failed or not. Currently
we only output failed tests, but in the future, successful tests could
be added.

A subtest contains the following fields:
- test_name: same as above
- test_number: sanme as above
- subtest_name: the name of the subtest
- subtest_number: the number of the subtest.
- message: the log message that was printed by the subtest.
- is_subtest: a boolean indicating that the entry is a subtest.
- failed: same as above but for the subtest

```
$ sudo ./test_progs -a $(grep -v '^#' ./DENYLIST.aarch64 | awk '{print
$1","}' | tr -d '\n') -j -J /tmp/test_progs.json
$ jq . < /tmp/test_progs.json | head -n 30
$ head -n 30 /tmp/test_progs.json
{
    "success": 29,
    "success_subtest": 23,
    "skipped": 3,
    "failed": 27,
    "results": [{
            "test_name": "bpf_cookie",
            "test_number": 10,
            "message": "test_bpf_cookie:PASS:skel_open 0 nsec\n",
            "failed": true
        },{
            "test_name": "bpf_cookie",
            "subtest_name": "multi_kprobe_link_api",
            "test_number": 10,
            "subtest_number": 2,
            "message": "kprobe_multi_link_api_subtest:PASS:load_kallsyms
0 nsec\nlibbpf: extern 'bpf_testmod_fentry_test1' (strong): not
resolved\nlibbpf: failed to load object 'kprobe_multi'\nlibbpf: failed
to load BPF skeleton 'kprobe_multi':
-3\nkprobe_multi_link_api_subtest:FAIL:fentry_raw_skel_load unexpected
error: -3\n",
            "is_subtest": true,
            "failed": true
        },{
            "test_name": "bpf_cookie",
            "subtest_name": "multi_kprobe_attach_api",
            "test_number": 10,
            "subtest_number": 3,
            "message": "libbpf: extern 'bpf_testmod_fentry_test1'
(strong): not resolved\nlibbpf: failed to load object
'kprobe_multi'\nlibbpf: failed to load BPF skeleton 'kprobe_multi':
-3\nkprobe_multi_attach_api_subtest:FAIL:fentry_raw_skel_load unexpected
error: -3\n",
            "is_subtest": true,
            "failed": true
        },{
            "test_name": "bpf_cookie",
            "subtest_name": "lsm",
            "test_number": 10,
```

The file can then be used to print a summary of the test run and list of
failing tests/subtests:

```
$ jq -r < /tmp/test_progs.json '"Success:
\(.success)/\(.success_subtest), Skipped: \(.skipped), Failed:
\(.failed)"'

Success: 29/23, Skipped: 3, Failed: 27
$ jq -r <
/tmp/test_progs.json  '.results[] | if .is_subtest then
"#\(.test_number)/\(.subtest_number) \(.test_name)/\(.subtest_name)"
else "#\(.test_number) \(.test_name)" end'
```

Signed-off-by: Manu Bretelle <chantr4@gmail.com>
---
 tools/testing/selftests/bpf/Makefile      |  4 +-
 tools/testing/selftests/bpf/json_writer.c |  1 +
 tools/testing/selftests/bpf/json_writer.h |  1 +
 tools/testing/selftests/bpf/test_progs.c  | 83 +++++++++++++++++++++--
 tools/testing/selftests/bpf/test_progs.h  |  1 +
 5 files changed, 84 insertions(+), 6 deletions(-)
 create mode 120000 tools/testing/selftests/bpf/json_writer.c
 create mode 120000 tools/testing/selftests/bpf/json_writer.h

Comments

Eduard Zingerman March 16, 2023, 3:03 p.m. UTC | #1
On Wed, 2023-03-15 at 23:39 -0700, Manu Bretelle wrote:
> Currently, test_progs outputs all stdout/stderr as it runs, and when it
> is done, prints a summary.
> 
> It is non-trivial for tooling to parse that output and extract meaningful
> information from it.
> 
> This change adds a new option, `--json-summary`/`-J` that let the caller
> specify a file where `test_progs{,-no_alu32}` can write a summary of the
> run in a json format that can later be parsed by tooling.
> 
> Currently, it creates a summary section with successes/skipped/failures
> followed by a list of failed tests/subtests.
> 
> A test contains the following fields:
> - test_name: the name of the test
> - test_number: the number of the test
> - message: the log message that was printed by the test.
> - failed: A boolean indicating whether the test failed or not. Currently
> we only output failed tests, but in the future, successful tests could
> be added.
> 
> A subtest contains the following fields:
> - test_name: same as above
> - test_number: sanme as above
> - subtest_name: the name of the subtest
> - subtest_number: the number of the subtest.
> - message: the log message that was printed by the subtest.
> - is_subtest: a boolean indicating that the entry is a subtest.
> - failed: same as above but for the subtest
> 

Looks like a great feature!
A few nitpicks below.

> ```
> $ sudo ./test_progs -a $(grep -v '^#' ./DENYLIST.aarch64 | awk '{print
> $1","}' | tr -d '\n') -j -J /tmp/test_progs.json
> $ jq . < /tmp/test_progs.json | head -n 30
> $ head -n 30 /tmp/test_progs.json
> {
>     "success": 29,
>     "success_subtest": 23,
>     "skipped": 3,
>     "failed": 27,
>     "results": [{
>             "test_name": "bpf_cookie",
>             "test_number": 10,
>             "message": "test_bpf_cookie:PASS:skel_open 0 nsec\n",
>             "failed": true
>         },{
>             "test_name": "bpf_cookie",
>             "subtest_name": "multi_kprobe_link_api",
>             "test_number": 10,
>             "subtest_number": 2,
>             "message": "kprobe_multi_link_api_subtest:PASS:load_kallsyms
> 0 nsec\nlibbpf: extern 'bpf_testmod_fentry_test1' (strong): not
> resolved\nlibbpf: failed to load object 'kprobe_multi'\nlibbpf: failed
> to load BPF skeleton 'kprobe_multi':
> -3\nkprobe_multi_link_api_subtest:FAIL:fentry_raw_skel_load unexpected
> error: -3\n",
>             "is_subtest": true,
>             "failed": true
>         },{
>             "test_name": "bpf_cookie",
>             "subtest_name": "multi_kprobe_attach_api",
>             "test_number": 10,
>             "subtest_number": 3,
>             "message": "libbpf: extern 'bpf_testmod_fentry_test1'
> (strong): not resolved\nlibbpf: failed to load object
> 'kprobe_multi'\nlibbpf: failed to load BPF skeleton 'kprobe_multi':
> -3\nkprobe_multi_attach_api_subtest:FAIL:fentry_raw_skel_load unexpected
> error: -3\n",
>             "is_subtest": true,
>             "failed": true
>         },{
>             "test_name": "bpf_cookie",
>             "subtest_name": "lsm",
>             "test_number": 10,
> ```
> 
> The file can then be used to print a summary of the test run and list of
> failing tests/subtests:
> 
> ```
> $ jq -r < /tmp/test_progs.json '"Success:
> \(.success)/\(.success_subtest), Skipped: \(.skipped), Failed:
> \(.failed)"'
> 
> Success: 29/23, Skipped: 3, Failed: 27
> $ jq -r <
> /tmp/test_progs.json  '.results[] | if .is_subtest then
> "#\(.test_number)/\(.subtest_number) \(.test_name)/\(.subtest_name)"
> else "#\(.test_number) \(.test_name)" end'
> ```
> 
> Signed-off-by: Manu Bretelle <chantr4@gmail.com>
> ---
>  tools/testing/selftests/bpf/Makefile      |  4 +-
>  tools/testing/selftests/bpf/json_writer.c |  1 +
>  tools/testing/selftests/bpf/json_writer.h |  1 +
>  tools/testing/selftests/bpf/test_progs.c  | 83 +++++++++++++++++++++--
>  tools/testing/selftests/bpf/test_progs.h  |  1 +
>  5 files changed, 84 insertions(+), 6 deletions(-)
>  create mode 120000 tools/testing/selftests/bpf/json_writer.c
>  create mode 120000 tools/testing/selftests/bpf/json_writer.h
> 
> diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile
> index 55811c448eb7..fc092582d16d 100644
> --- a/tools/testing/selftests/bpf/Makefile
> +++ b/tools/testing/selftests/bpf/Makefile
> @@ -234,6 +234,7 @@ $(TEST_GEN_PROGS) $(TEST_GEN_PROGS_EXTENDED): $(BPFOBJ)
>  CGROUP_HELPERS	:= $(OUTPUT)/cgroup_helpers.o
>  TESTING_HELPERS	:= $(OUTPUT)/testing_helpers.o
>  TRACE_HELPERS	:= $(OUTPUT)/trace_helpers.o
> +JSON_WRITER		:= $(OUTPUT)/json_writer.o
>  CAP_HELPERS	:= $(OUTPUT)/cap_helpers.o
>  
>  $(OUTPUT)/test_dev_cgroup: $(CGROUP_HELPERS) $(TESTING_HELPERS)
> @@ -559,7 +560,8 @@ TRUNNER_BPF_PROGS_DIR := progs
>  TRUNNER_EXTRA_SOURCES := test_progs.c cgroup_helpers.c trace_helpers.c	\
>  			 network_helpers.c testing_helpers.c		\
>  			 btf_helpers.c flow_dissector_load.h		\
> -			 cap_helpers.c test_loader.c xsk.c disasm.c
> +			 cap_helpers.c test_loader.c xsk.c disasm.c \
> +			 json_writer.c
>  TRUNNER_EXTRA_FILES := $(OUTPUT)/urandom_read $(OUTPUT)/bpf_testmod.ko	\
>  		       $(OUTPUT)/liburandom_read.so			\
>  		       $(OUTPUT)/xdp_synproxy				\
> diff --git a/tools/testing/selftests/bpf/json_writer.c b/tools/testing/selftests/bpf/json_writer.c
> new file mode 120000
> index 000000000000..5effa31e2f39
> --- /dev/null
> +++ b/tools/testing/selftests/bpf/json_writer.c
> @@ -0,0 +1 @@
> +../../../bpf/bpftool/json_writer.c
> \ No newline at end of file
> diff --git a/tools/testing/selftests/bpf/json_writer.h b/tools/testing/selftests/bpf/json_writer.h
> new file mode 120000
> index 000000000000..e0a264c26752
> --- /dev/null
> +++ b/tools/testing/selftests/bpf/json_writer.h
> @@ -0,0 +1 @@
> +../../../bpf/bpftool/json_writer.h
> \ No newline at end of file
> diff --git a/tools/testing/selftests/bpf/test_progs.c b/tools/testing/selftests/bpf/test_progs.c
> index 6d5e3022c75f..cf56f6a4e1af 100644
> --- a/tools/testing/selftests/bpf/test_progs.c
> +++ b/tools/testing/selftests/bpf/test_progs.c
> @@ -18,6 +18,7 @@
>  #include <sys/socket.h>
>  #include <sys/un.h>
>  #include <bpf/btf.h>
> +#include "json_writer.h"
>  
>  static bool verbose(void)
>  {
> @@ -269,10 +270,22 @@ static void print_subtest_name(int test_num, int subtest_num,
>  	fprintf(env.stdout, "\n");
>  }
>  
> +static void jsonw_write_log_message(json_writer_t *w, char *log_buf, size_t log_cnt)
> +{
> +	// open_memstream (from stdio_hijack_init) ensures that log_bug is terminated by a
> +	// null byte. Yet in parralel mode, log_buf will be NULL if there is no message.
> +	if (log_cnt) {
> +		jsonw_string_field(w, "message", log_buf);
> +	} else {
> +		jsonw_string_field(w, "message", "");
> +	}
> +}
> +
>  static void dump_test_log(const struct prog_test_def *test,
>  			  const struct test_state *test_state,
>  			  bool skip_ok_subtests,
> -			  bool par_exec_result)
> +			  bool par_exec_result,
> +			  json_writer_t *w)
>  {
>  	bool test_failed = test_state->error_cnt > 0;
>  	bool force_log = test_state->force_log;
> @@ -296,6 +309,15 @@ static void dump_test_log(const struct prog_test_def *test,
>  	if (test_state->log_cnt && print_test)
>  		print_test_log(test_state->log_buf, test_state->log_cnt);
>  
> +	if (w && print_test) {
> +		jsonw_start_object(w);
> +		jsonw_string_field(w, "test_name", test->test_name);
> +		jsonw_uint_field(w, "test_number", test->test_num);
> +		jsonw_write_log_message(w, test_state->log_buf, test_state->log_cnt);
> +		jsonw_bool_field(w, "failed", true);

The `print_test` is used as a precondition, but it is defined as follows:

	bool print_test = verbose() || force_log || test_failed;

Maybe change the line above to:

		jsonw_bool_field(w, "failed", test_failed);

Or use `test_failed` as a precondition?

> +		jsonw_end_object(w);
> +	}
> +
>  	for (i = 0; i < test_state->subtest_num; i++) {
>  		subtest_state = &test_state->subtest_states[i];
>  		subtest_failed = subtest_state->error_cnt;
> @@ -314,6 +336,19 @@ static void dump_test_log(const struct prog_test_def *test,
>  				   test->test_name, subtest_state->name,
>  				   test_result(subtest_state->error_cnt,
>  					       subtest_state->skipped));
> +
> +		if (w && print_subtest) {
> +			jsonw_start_object(w);
> +			jsonw_string_field(w, "test_name", test->test_name);
> +			jsonw_string_field(w, "subtest_name", subtest_state->name);
> +			jsonw_uint_field(w, "test_number", test->test_num);
> +			jsonw_uint_field(w, "subtest_number", i+1);
> +			jsonw_write_log_message(w, subtest_state->log_buf, subtest_state->log_cnt);
> +			jsonw_bool_field(w, "is_subtest", true);
> +			jsonw_bool_field(w, "failed", true);
> +			jsonw_end_object(w);
> +		}
> +

Maybe organize failed subtests as a field of a top-level result?
E.g. as follows:

{
    "success": 295,
    "success_subtest": 1771,
    "skipped": 18,
    "failed": 3,
    "results": [
        {
            "test_name": "btf_tag",
            "test_number": 29,
            "message": "",
            "failed": true
            "subtests": [
                {
                    "test_name": "btf_tag",
                    "subtest_name": "btf_type_tag_percpu_mod1",
                    "test_number": 29,
                    "subtest_number": 6,
                    "message": "...",
                    "failed": true
                }
            ]
        }
    ]
}

>  	}
>  
>  	print_test_result(test, test_state);
> @@ -715,6 +750,7 @@ enum ARG_KEYS {
>  	ARG_TEST_NAME_GLOB_DENYLIST = 'd',
>  	ARG_NUM_WORKERS = 'j',
>  	ARG_DEBUG = -1,
> +	ARG_JSON_SUMMARY = 'J'
>  };
>  
>  static const struct argp_option opts[] = {
> @@ -740,6 +776,7 @@ static const struct argp_option opts[] = {
>  	  "Number of workers to run in parallel, default to number of cpus." },
>  	{ "debug", ARG_DEBUG, NULL, 0,
>  	  "print extra debug information for test_progs." },
> +	{ "json-summary", ARG_JSON_SUMMARY, "FILE", 0, "Write report in json format to this file."},
>  	{},
>  };
>  
> @@ -870,6 +907,13 @@ static error_t parse_arg(int key, char *arg, struct argp_state *state)
>  	case ARG_DEBUG:
>  		env->debug = true;
>  		break;
> +	case ARG_JSON_SUMMARY:
> +		env->json = fopen(arg, "w");
> +		if (env->json == NULL) {
> +			perror("Failed to open json summary file");
> +			return -errno;
> +		}
> +		break;
>  	case ARGP_KEY_ARG:
>  		argp_usage(state);
>  		break;
> @@ -1017,7 +1061,7 @@ void crash_handler(int signum)
>  		stdio_restore();
>  	if (env.test) {
>  		env.test_state->error_cnt++;
> -		dump_test_log(env.test, env.test_state, true, false);
> +		dump_test_log(env.test, env.test_state, true, false, NULL);
>  	}
>  	if (env.worker_id != -1)
>  		fprintf(stderr, "[%d]: ", env.worker_id);
> @@ -1124,7 +1168,7 @@ static void run_one_test(int test_num)
>  
>  	stdio_restore();
>  
> -	dump_test_log(test, state, false, false);
> +	dump_test_log(test, state, false, false, NULL);
>  }
>  
>  struct dispatch_data {
> @@ -1283,7 +1327,7 @@ static void *dispatch_thread(void *ctx)
>  		} while (false);
>  
>  		pthread_mutex_lock(&stdout_output_lock);
> -		dump_test_log(test, state, false, true);
> +		dump_test_log(test, state, false, true, NULL);
>  		pthread_mutex_unlock(&stdout_output_lock);
>  	} /* while (true) */
>  error:
> @@ -1322,8 +1366,28 @@ static void calculate_summary_and_print_errors(struct test_env *env)
>  			fail_cnt++;
>  		else
>  			succ_cnt++;
> +
> +	}
> +
> +	json_writer_t *w = NULL;
> +
> +	if (env->json) {
> +		w = jsonw_new(env->json);
> +		if (!w)
> +			fprintf(env->stderr, "Failed to create new JSON stream.");
>  	}
>  
> +	if (w) {
> +		jsonw_pretty(w, 1);
> +		jsonw_start_object(w);
> +		jsonw_uint_field(w, "success", succ_cnt);
> +		jsonw_uint_field(w, "success_subtest", sub_succ_cnt);
> +		jsonw_uint_field(w, "skipped", skip_cnt);
> +		jsonw_uint_field(w, "failed", fail_cnt);
> +		jsonw_name(w, "results");
> +		jsonw_start_array(w);
> +
> +	}
>  	/*
>  	 * We only print error logs summary when there are failed tests and
>  	 * verbose mode is not enabled. Otherwise, results may be incosistent.
> @@ -1340,10 +1404,19 @@ static void calculate_summary_and_print_errors(struct test_env *env)
>  			if (!state->tested || !state->error_cnt)
>  				continue;
>  
> -			dump_test_log(test, state, true, true);
> +			dump_test_log(test, state, true, true, w);
>  		}
>  	}
>  
> +	if (w) {
> +		jsonw_end_array(w);
> +		jsonw_end_object(w);
> +		jsonw_destroy(&w);
> +	}
> +
> +	if (env->json)
> +		fclose(env->json);
> +
>  	printf("Summary: %d/%d PASSED, %d SKIPPED, %d FAILED\n",
>  	       succ_cnt, sub_succ_cnt, skip_cnt, fail_cnt);
>  
> diff --git a/tools/testing/selftests/bpf/test_progs.h b/tools/testing/selftests/bpf/test_progs.h
> index 3cbf005747ed..4b06b8347cd4 100644
> --- a/tools/testing/selftests/bpf/test_progs.h
> +++ b/tools/testing/selftests/bpf/test_progs.h
> @@ -114,6 +114,7 @@ struct test_env {
>  	FILE *stdout;
>  	FILE *stderr;
>  	int nr_cpus;
> +	FILE *json;
>  
>  	int succ_cnt; /* successful tests */
>  	int sub_succ_cnt; /* successful sub-tests */
Manu Bretelle March 16, 2023, 4:38 p.m. UTC | #2
On Thu, Mar 16, 2023 at 05:03:01PM +0200, Eduard Zingerman wrote:
> On Wed, 2023-03-15 at 23:39 -0700, Manu Bretelle wrote:
> > Currently, test_progs outputs all stdout/stderr as it runs, and when it
> > is done, prints a summary.
> > 
> > It is non-trivial for tooling to parse that output and extract meaningful
> > information from it.
> > 
> > This change adds a new option, `--json-summary`/`-J` that let the caller
> > specify a file where `test_progs{,-no_alu32}` can write a summary of the
> > run in a json format that can later be parsed by tooling.
> > 
> > Currently, it creates a summary section with successes/skipped/failures
> > followed by a list of failed tests/subtests.
> > 
> > A test contains the following fields:
> > - test_name: the name of the test
> > - test_number: the number of the test
> > - message: the log message that was printed by the test.
> > - failed: A boolean indicating whether the test failed or not. Currently
> > we only output failed tests, but in the future, successful tests could
> > be added.
> > 
> > A subtest contains the following fields:
> > - test_name: same as above
> > - test_number: sanme as above
> > - subtest_name: the name of the subtest
> > - subtest_number: the number of the subtest.
> > - message: the log message that was printed by the subtest.
> > - is_subtest: a boolean indicating that the entry is a subtest.
> > - failed: same as above but for the subtest
> > 
> 
> Looks like a great feature!
> A few nitpicks below.
> 
> > ```
> > $ sudo ./test_progs -a $(grep -v '^#' ./DENYLIST.aarch64 | awk '{print
> > $1","}' | tr -d '\n') -j -J /tmp/test_progs.json
> > $ jq . < /tmp/test_progs.json | head -n 30
> > $ head -n 30 /tmp/test_progs.json
> > {
> >     "success": 29,
> >     "success_subtest": 23,
> >     "skipped": 3,
> >     "failed": 27,
> >     "results": [{
> >             "test_name": "bpf_cookie",
> >             "test_number": 10,
> >             "message": "test_bpf_cookie:PASS:skel_open 0 nsec\n",
> >             "failed": true
> >         },{
> >             "test_name": "bpf_cookie",
> >             "subtest_name": "multi_kprobe_link_api",
> >             "test_number": 10,
> >             "subtest_number": 2,
> >             "message": "kprobe_multi_link_api_subtest:PASS:load_kallsyms
> > 0 nsec\nlibbpf: extern 'bpf_testmod_fentry_test1' (strong): not
> > resolved\nlibbpf: failed to load object 'kprobe_multi'\nlibbpf: failed
> > to load BPF skeleton 'kprobe_multi':
> > -3\nkprobe_multi_link_api_subtest:FAIL:fentry_raw_skel_load unexpected
> > error: -3\n",
> >             "is_subtest": true,
> >             "failed": true
> >         },{
> >             "test_name": "bpf_cookie",
> >             "subtest_name": "multi_kprobe_attach_api",
> >             "test_number": 10,
> >             "subtest_number": 3,
> >             "message": "libbpf: extern 'bpf_testmod_fentry_test1'
> > (strong): not resolved\nlibbpf: failed to load object
> > 'kprobe_multi'\nlibbpf: failed to load BPF skeleton 'kprobe_multi':
> > -3\nkprobe_multi_attach_api_subtest:FAIL:fentry_raw_skel_load unexpected
> > error: -3\n",
> >             "is_subtest": true,
> >             "failed": true
> >         },{
> >             "test_name": "bpf_cookie",
> >             "subtest_name": "lsm",
> >             "test_number": 10,
> > ```
> > 
> > The file can then be used to print a summary of the test run and list of
> > failing tests/subtests:
> > 
> > ```
> > $ jq -r < /tmp/test_progs.json '"Success:
> > \(.success)/\(.success_subtest), Skipped: \(.skipped), Failed:
> > \(.failed)"'
> > 
> > Success: 29/23, Skipped: 3, Failed: 27
> > $ jq -r <
> > /tmp/test_progs.json  '.results[] | if .is_subtest then
> > "#\(.test_number)/\(.subtest_number) \(.test_name)/\(.subtest_name)"
> > else "#\(.test_number) \(.test_name)" end'
> > ```
> > 
> > Signed-off-by: Manu Bretelle <chantr4@gmail.com>
> > ---
> >  tools/testing/selftests/bpf/Makefile      |  4 +-
> >  tools/testing/selftests/bpf/json_writer.c |  1 +
> >  tools/testing/selftests/bpf/json_writer.h |  1 +
> >  tools/testing/selftests/bpf/test_progs.c  | 83 +++++++++++++++++++++--
> >  tools/testing/selftests/bpf/test_progs.h  |  1 +
> >  5 files changed, 84 insertions(+), 6 deletions(-)
> >  create mode 120000 tools/testing/selftests/bpf/json_writer.c
> >  create mode 120000 tools/testing/selftests/bpf/json_writer.h
> > 
> > diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile
> > index 55811c448eb7..fc092582d16d 100644
> > --- a/tools/testing/selftests/bpf/Makefile
> > +++ b/tools/testing/selftests/bpf/Makefile
> > @@ -234,6 +234,7 @@ $(TEST_GEN_PROGS) $(TEST_GEN_PROGS_EXTENDED): $(BPFOBJ)
> >  CGROUP_HELPERS	:= $(OUTPUT)/cgroup_helpers.o
> >  TESTING_HELPERS	:= $(OUTPUT)/testing_helpers.o
> >  TRACE_HELPERS	:= $(OUTPUT)/trace_helpers.o
> > +JSON_WRITER		:= $(OUTPUT)/json_writer.o
> >  CAP_HELPERS	:= $(OUTPUT)/cap_helpers.o
> >  
> >  $(OUTPUT)/test_dev_cgroup: $(CGROUP_HELPERS) $(TESTING_HELPERS)
> > @@ -559,7 +560,8 @@ TRUNNER_BPF_PROGS_DIR := progs
> >  TRUNNER_EXTRA_SOURCES := test_progs.c cgroup_helpers.c trace_helpers.c	\
> >  			 network_helpers.c testing_helpers.c		\
> >  			 btf_helpers.c flow_dissector_load.h		\
> > -			 cap_helpers.c test_loader.c xsk.c disasm.c
> > +			 cap_helpers.c test_loader.c xsk.c disasm.c \
> > +			 json_writer.c
> >  TRUNNER_EXTRA_FILES := $(OUTPUT)/urandom_read $(OUTPUT)/bpf_testmod.ko	\
> >  		       $(OUTPUT)/liburandom_read.so			\
> >  		       $(OUTPUT)/xdp_synproxy				\
> > diff --git a/tools/testing/selftests/bpf/json_writer.c b/tools/testing/selftests/bpf/json_writer.c
> > new file mode 120000
> > index 000000000000..5effa31e2f39
> > --- /dev/null
> > +++ b/tools/testing/selftests/bpf/json_writer.c
> > @@ -0,0 +1 @@
> > +../../../bpf/bpftool/json_writer.c
> > \ No newline at end of file
> > diff --git a/tools/testing/selftests/bpf/json_writer.h b/tools/testing/selftests/bpf/json_writer.h
> > new file mode 120000
> > index 000000000000..e0a264c26752
> > --- /dev/null
> > +++ b/tools/testing/selftests/bpf/json_writer.h
> > @@ -0,0 +1 @@
> > +../../../bpf/bpftool/json_writer.h
> > \ No newline at end of file
> > diff --git a/tools/testing/selftests/bpf/test_progs.c b/tools/testing/selftests/bpf/test_progs.c
> > index 6d5e3022c75f..cf56f6a4e1af 100644
> > --- a/tools/testing/selftests/bpf/test_progs.c
> > +++ b/tools/testing/selftests/bpf/test_progs.c
> > @@ -18,6 +18,7 @@
> >  #include <sys/socket.h>
> >  #include <sys/un.h>
> >  #include <bpf/btf.h>
> > +#include "json_writer.h"
> >  
> >  static bool verbose(void)
> >  {
> > @@ -269,10 +270,22 @@ static void print_subtest_name(int test_num, int subtest_num,
> >  	fprintf(env.stdout, "\n");
> >  }
> >  
> > +static void jsonw_write_log_message(json_writer_t *w, char *log_buf, size_t log_cnt)
> > +{
> > +	// open_memstream (from stdio_hijack_init) ensures that log_bug is terminated by a
> > +	// null byte. Yet in parralel mode, log_buf will be NULL if there is no message.
> > +	if (log_cnt) {
> > +		jsonw_string_field(w, "message", log_buf);
> > +	} else {
> > +		jsonw_string_field(w, "message", "");
> > +	}
> > +}
> > +
> >  static void dump_test_log(const struct prog_test_def *test,
> >  			  const struct test_state *test_state,
> >  			  bool skip_ok_subtests,
> > -			  bool par_exec_result)
> > +			  bool par_exec_result,
> > +			  json_writer_t *w)
> >  {
> >  	bool test_failed = test_state->error_cnt > 0;
> >  	bool force_log = test_state->force_log;
> > @@ -296,6 +309,15 @@ static void dump_test_log(const struct prog_test_def *test,
> >  	if (test_state->log_cnt && print_test)
> >  		print_test_log(test_state->log_buf, test_state->log_cnt);
> >  
> > +	if (w && print_test) {
> > +		jsonw_start_object(w);
> > +		jsonw_string_field(w, "test_name", test->test_name);
> > +		jsonw_uint_field(w, "test_number", test->test_num);
> > +		jsonw_write_log_message(w, test_state->log_buf, test_state->log_cnt);
> > +		jsonw_bool_field(w, "failed", true);
> 
> The `print_test` is used as a precondition, but it is defined as follows:
> 
> 	bool print_test = verbose() || force_log || test_failed;
> 
> Maybe change the line above to:
> 
> 		jsonw_bool_field(w, "failed", test_failed);
> 

Good point/catch. Thanks.

> Or use `test_failed` as a precondition?
> 
> > +		jsonw_end_object(w);
> > +	}
> > +
> >  	for (i = 0; i < test_state->subtest_num; i++) {
> >  		subtest_state = &test_state->subtest_states[i];
> >  		subtest_failed = subtest_state->error_cnt;
> > @@ -314,6 +336,19 @@ static void dump_test_log(const struct prog_test_def *test,
> >  				   test->test_name, subtest_state->name,
> >  				   test_result(subtest_state->error_cnt,
> >  					       subtest_state->skipped));
> > +
> > +		if (w && print_subtest) {
> > +			jsonw_start_object(w);
> > +			jsonw_string_field(w, "test_name", test->test_name);
> > +			jsonw_string_field(w, "subtest_name", subtest_state->name);
> > +			jsonw_uint_field(w, "test_number", test->test_num);
> > +			jsonw_uint_field(w, "subtest_number", i+1);
> > +			jsonw_write_log_message(w, subtest_state->log_buf, subtest_state->log_cnt);
> > +			jsonw_bool_field(w, "is_subtest", true);
> > +			jsonw_bool_field(w, "failed", true);
> > +			jsonw_end_object(w);
> > +		}
> > +
> 
> Maybe organize failed subtests as a field of a top-level result?
> E.g. as follows:
> 
> {
>     "success": 295,
>     "success_subtest": 1771,
>     "skipped": 18,
>     "failed": 3,
>     "results": [
>         {
>             "test_name": "btf_tag",
>             "test_number": 29,
>             "message": "",
>             "failed": true
>             "subtests": [
>                 {
>                     "test_name": "btf_tag",
>                     "subtest_name": "btf_type_tag_percpu_mod1",
>                     "test_number": 29,
>                     "subtest_number": 6,
>                     "message": "...",
>                     "failed": true
>                 }
>             ]
>         }
>     ]
> }

I was originally going to do a nested structure similar to this too
(minus the repeat of test_* entries for subtests. But while discussing this
offline with Andrii, a flatter structured seemed to be easier to parse/manage
with tools such as `jq`. I am also very probably missing the right
incantation for `jq`.

Finding whether a test has subtests (currently only adding failed ones,
but this could change in the future) would be easier (essentially checking
length(subtests)). But neither is it difficult to reconstruct using
higher level language.

In term of logical structure and maybe extensibility, this is more appropriate,
in term of pragmatism maybe less.

I don't have strong opinions and can see benefit for both.

> 
> >  	}
> >  
> >  	print_test_result(test, test_state);
> > @@ -715,6 +750,7 @@ enum ARG_KEYS {
> >  	ARG_TEST_NAME_GLOB_DENYLIST = 'd',
> >  	ARG_NUM_WORKERS = 'j',
> >  	ARG_DEBUG = -1,
> > +	ARG_JSON_SUMMARY = 'J'
> >  };
> >  
> >  static const struct argp_option opts[] = {
> > @@ -740,6 +776,7 @@ static const struct argp_option opts[] = {
> >  	  "Number of workers to run in parallel, default to number of cpus." },
> >  	{ "debug", ARG_DEBUG, NULL, 0,
> >  	  "print extra debug information for test_progs." },
> > +	{ "json-summary", ARG_JSON_SUMMARY, "FILE", 0, "Write report in json format to this file."},
> >  	{},
> >  };
> >  
> > @@ -870,6 +907,13 @@ static error_t parse_arg(int key, char *arg, struct argp_state *state)
> >  	case ARG_DEBUG:
> >  		env->debug = true;
> >  		break;
> > +	case ARG_JSON_SUMMARY:
> > +		env->json = fopen(arg, "w");
> > +		if (env->json == NULL) {
> > +			perror("Failed to open json summary file");
> > +			return -errno;
> > +		}
> > +		break;
> >  	case ARGP_KEY_ARG:
> >  		argp_usage(state);
> >  		break;
> > @@ -1017,7 +1061,7 @@ void crash_handler(int signum)
> >  		stdio_restore();
> >  	if (env.test) {
> >  		env.test_state->error_cnt++;
> > -		dump_test_log(env.test, env.test_state, true, false);
> > +		dump_test_log(env.test, env.test_state, true, false, NULL);
> >  	}
> >  	if (env.worker_id != -1)
> >  		fprintf(stderr, "[%d]: ", env.worker_id);
> > @@ -1124,7 +1168,7 @@ static void run_one_test(int test_num)
> >  
> >  	stdio_restore();
> >  
> > -	dump_test_log(test, state, false, false);
> > +	dump_test_log(test, state, false, false, NULL);
> >  }
> >  
> >  struct dispatch_data {
> > @@ -1283,7 +1327,7 @@ static void *dispatch_thread(void *ctx)
> >  		} while (false);
> >  
> >  		pthread_mutex_lock(&stdout_output_lock);
> > -		dump_test_log(test, state, false, true);
> > +		dump_test_log(test, state, false, true, NULL);
> >  		pthread_mutex_unlock(&stdout_output_lock);
> >  	} /* while (true) */
> >  error:
> > @@ -1322,8 +1366,28 @@ static void calculate_summary_and_print_errors(struct test_env *env)
> >  			fail_cnt++;
> >  		else
> >  			succ_cnt++;
> > +
> > +	}
> > +
> > +	json_writer_t *w = NULL;
> > +
> > +	if (env->json) {
> > +		w = jsonw_new(env->json);
> > +		if (!w)
> > +			fprintf(env->stderr, "Failed to create new JSON stream.");
> >  	}
> >  
> > +	if (w) {
> > +		jsonw_pretty(w, 1);
> > +		jsonw_start_object(w);
> > +		jsonw_uint_field(w, "success", succ_cnt);
> > +		jsonw_uint_field(w, "success_subtest", sub_succ_cnt);
> > +		jsonw_uint_field(w, "skipped", skip_cnt);
> > +		jsonw_uint_field(w, "failed", fail_cnt);
> > +		jsonw_name(w, "results");
> > +		jsonw_start_array(w);
> > +
> > +	}
> >  	/*
> >  	 * We only print error logs summary when there are failed tests and
> >  	 * verbose mode is not enabled. Otherwise, results may be incosistent.
> > @@ -1340,10 +1404,19 @@ static void calculate_summary_and_print_errors(struct test_env *env)
> >  			if (!state->tested || !state->error_cnt)
> >  				continue;
> >  
> > -			dump_test_log(test, state, true, true);
> > +			dump_test_log(test, state, true, true, w);
> >  		}
> >  	}
> >  
> > +	if (w) {
> > +		jsonw_end_array(w);
> > +		jsonw_end_object(w);
> > +		jsonw_destroy(&w);
> > +	}
> > +
> > +	if (env->json)
> > +		fclose(env->json);
> > +
> >  	printf("Summary: %d/%d PASSED, %d SKIPPED, %d FAILED\n",
> >  	       succ_cnt, sub_succ_cnt, skip_cnt, fail_cnt);
> >  
> > diff --git a/tools/testing/selftests/bpf/test_progs.h b/tools/testing/selftests/bpf/test_progs.h
> > index 3cbf005747ed..4b06b8347cd4 100644
> > --- a/tools/testing/selftests/bpf/test_progs.h
> > +++ b/tools/testing/selftests/bpf/test_progs.h
> > @@ -114,6 +114,7 @@ struct test_env {
> >  	FILE *stdout;
> >  	FILE *stderr;
> >  	int nr_cpus;
> > +	FILE *json;
> >  
> >  	int succ_cnt; /* successful tests */
> >  	int sub_succ_cnt; /* successful sub-tests */
>
Eduard Zingerman March 16, 2023, 7:18 p.m. UTC | #3
On Thu, 2023-03-16 at 09:38 -0700, Manu Bretelle wrote:
> [...]
>
> I was originally going to do a nested structure similar to this too
> (minus the repeat of test_* entries for subtests. But while discussing this
> offline with Andrii, a flatter structured seemed to be easier to parse/manage
> with tools such as `jq`. I am also very probably missing the right
> incantation for `jq`.
> 
> Finding whether a test has subtests (currently only adding failed ones,
> but this could change in the future) would be easier (essentially checking
> length(subtests)). But neither is it difficult to reconstruct using
> higher level language.
> 

`jq` query is a bit more complicated with nested structure, but not terribly so:

  $ cat query.jq
  .results | map([
                  .test_name,
                  (.subtests | map([([.test_name, .subtest_name] | join("/")) ]))
                 ])
           | flatten

  $ jq -f query.jq test.json
  [
    "test_global_funcs",
    "test_global_funcs/global_func16"
  ]

Test data for reference:

  $ cat test.json | sed -r 's/"[^"]{20,}"/"..."/g'
  {
      "success": 1,
      "success_subtest": 24,
      "skipped": 0,
      "failed": 1,
      "results": [{
              "test_name": "test_global_funcs",
              "test_number": 223,
              "message": "...",
              "failed": true,
              "subtests": [{
                      "test_name": "test_global_funcs",
                      "subtest_name": "global_func16",
                      "test_number": 223,
                      "subtest_number": 16,
                      "message": "...",
                      "is_subtest": true,
                      "failed": true
                  }
              ]
          }
      ]
  }

> In term of logical structure and maybe extensibility, this is more appropriate,
> in term of pragmatism maybe less.
> 
> I don't have strong opinions and can see benefit for both.

idk, I don't have a strong opinion either.

> [...]
Andrii Nakryiko March 16, 2023, 11:21 p.m. UTC | #4
On Wed, Mar 15, 2023 at 11:39 PM Manu Bretelle <chantr4@gmail.com> wrote:
>
> Currently, test_progs outputs all stdout/stderr as it runs, and when it
> is done, prints a summary.
>
> It is non-trivial for tooling to parse that output and extract meaningful
> information from it.
>
> This change adds a new option, `--json-summary`/`-J` that let the caller
> specify a file where `test_progs{,-no_alu32}` can write a summary of the
> run in a json format that can later be parsed by tooling.
>
> Currently, it creates a summary section with successes/skipped/failures
> followed by a list of failed tests/subtests.
>
> A test contains the following fields:
> - test_name: the name of the test
> - test_number: the number of the test
> - message: the log message that was printed by the test.
> - failed: A boolean indicating whether the test failed or not. Currently
> we only output failed tests, but in the future, successful tests could
> be added.
>
> A subtest contains the following fields:
> - test_name: same as above
> - test_number: sanme as above
> - subtest_name: the name of the subtest
> - subtest_number: the number of the subtest.
> - message: the log message that was printed by the subtest.
> - is_subtest: a boolean indicating that the entry is a subtest.

"is_" prefix stands out compared to other bool field ("failed"),
should we call this just "subtest" for consistency?

> - failed: same as above but for the subtest
>
> ```
> $ sudo ./test_progs -a $(grep -v '^#' ./DENYLIST.aarch64 | awk '{print
> $1","}' | tr -d '\n') -j -J /tmp/test_progs.json
> $ jq . < /tmp/test_progs.json | head -n 30
> $ head -n 30 /tmp/test_progs.json
> {
>     "success": 29,
>     "success_subtest": 23,
>     "skipped": 3,
>     "failed": 27,
>     "results": [{
>             "test_name": "bpf_cookie",
>             "test_number": 10,
>             "message": "test_bpf_cookie:PASS:skel_open 0 nsec\n",
>             "failed": true
>         },{
>             "test_name": "bpf_cookie",
>             "subtest_name": "multi_kprobe_link_api",
>             "test_number": 10,
>             "subtest_number": 2,
>             "message": "kprobe_multi_link_api_subtest:PASS:load_kallsyms
> 0 nsec\nlibbpf: extern 'bpf_testmod_fentry_test1' (strong): not
> resolved\nlibbpf: failed to load object 'kprobe_multi'\nlibbpf: failed
> to load BPF skeleton 'kprobe_multi':
> -3\nkprobe_multi_link_api_subtest:FAIL:fentry_raw_skel_load unexpected
> error: -3\n",
>             "is_subtest": true,
>             "failed": true
>         },{
>             "test_name": "bpf_cookie",
>             "subtest_name": "multi_kprobe_attach_api",
>             "test_number": 10,
>             "subtest_number": 3,
>             "message": "libbpf: extern 'bpf_testmod_fentry_test1'
> (strong): not resolved\nlibbpf: failed to load object
> 'kprobe_multi'\nlibbpf: failed to load BPF skeleton 'kprobe_multi':
> -3\nkprobe_multi_attach_api_subtest:FAIL:fentry_raw_skel_load unexpected
> error: -3\n",
>             "is_subtest": true,
>             "failed": true
>         },{
>             "test_name": "bpf_cookie",
>             "subtest_name": "lsm",
>             "test_number": 10,
> ```
>
> The file can then be used to print a summary of the test run and list of
> failing tests/subtests:
>
> ```
> $ jq -r < /tmp/test_progs.json '"Success:
> \(.success)/\(.success_subtest), Skipped: \(.skipped), Failed:
> \(.failed)"'
>
> Success: 29/23, Skipped: 3, Failed: 27
> $ jq -r <
> /tmp/test_progs.json  '.results[] | if .is_subtest then
> "#\(.test_number)/\(.subtest_number) \(.test_name)/\(.subtest_name)"
> else "#\(.test_number) \(.test_name)" end'
> ```
>
> Signed-off-by: Manu Bretelle <chantr4@gmail.com>
> ---

Looks great, some nits below.

>  tools/testing/selftests/bpf/Makefile      |  4 +-
>  tools/testing/selftests/bpf/json_writer.c |  1 +
>  tools/testing/selftests/bpf/json_writer.h |  1 +
>  tools/testing/selftests/bpf/test_progs.c  | 83 +++++++++++++++++++++--
>  tools/testing/selftests/bpf/test_progs.h  |  1 +
>  5 files changed, 84 insertions(+), 6 deletions(-)
>  create mode 120000 tools/testing/selftests/bpf/json_writer.c
>  create mode 120000 tools/testing/selftests/bpf/json_writer.h
>

[...]

> @@ -269,10 +270,22 @@ static void print_subtest_name(int test_num, int subtest_num,
>         fprintf(env.stdout, "\n");
>  }
>
> +static void jsonw_write_log_message(json_writer_t *w, char *log_buf, size_t log_cnt)
> +{
> +       // open_memstream (from stdio_hijack_init) ensures that log_bug is terminated by a
> +       // null byte. Yet in parralel mode, log_buf will be NULL if there is no message.

please don't use C++-style comments, let's use /* */ consistently

also typo: parallel

> +       if (log_cnt) {
> +               jsonw_string_field(w, "message", log_buf);
> +       } else {
> +               jsonw_string_field(w, "message", "");
> +       }
> +}
> +

[...]

> @@ -1283,7 +1327,7 @@ static void *dispatch_thread(void *ctx)
>                 } while (false);
>
>                 pthread_mutex_lock(&stdout_output_lock);
> -               dump_test_log(test, state, false, true);
> +               dump_test_log(test, state, false, true, NULL);
>                 pthread_mutex_unlock(&stdout_output_lock);
>         } /* while (true) */
>  error:
> @@ -1322,8 +1366,28 @@ static void calculate_summary_and_print_errors(struct test_env *env)
>                         fail_cnt++;
>                 else
>                         succ_cnt++;
> +
> +       }
> +
> +       json_writer_t *w = NULL;

please declare variable at the top to follow C89 style

> +
> +       if (env->json) {
> +               w = jsonw_new(env->json);
> +               if (!w)
> +                       fprintf(env->stderr, "Failed to create new JSON stream.");
>         }
>
> +       if (w) {
> +               jsonw_pretty(w, 1);

true, it's bool

> +               jsonw_start_object(w);
> +               jsonw_uint_field(w, "success", succ_cnt);
> +               jsonw_uint_field(w, "success_subtest", sub_succ_cnt);
> +               jsonw_uint_field(w, "skipped", skip_cnt);
> +               jsonw_uint_field(w, "failed", fail_cnt);
> +               jsonw_name(w, "results");
> +               jsonw_start_array(w);
> +
> +       }

[...]
Andrii Nakryiko March 16, 2023, 11:23 p.m. UTC | #5
On Thu, Mar 16, 2023 at 12:18 PM Eduard Zingerman <eddyz87@gmail.com> wrote:
>
> On Thu, 2023-03-16 at 09:38 -0700, Manu Bretelle wrote:
> > [...]
> >
> > I was originally going to do a nested structure similar to this too
> > (minus the repeat of test_* entries for subtests. But while discussing this
> > offline with Andrii, a flatter structured seemed to be easier to parse/manage
> > with tools such as `jq`. I am also very probably missing the right
> > incantation for `jq`.
> >
> > Finding whether a test has subtests (currently only adding failed ones,
> > but this could change in the future) would be easier (essentially checking
> > length(subtests)). But neither is it difficult to reconstruct using
> > higher level language.
> >
>
> `jq` query is a bit more complicated with nested structure, but not terribly so:
>
>   $ cat query.jq
>   .results | map([
>                   .test_name,
>                   (.subtests | map([([.test_name, .subtest_name] | join("/")) ]))
>                  ])
>            | flatten

we should record this in the commit, if we go with nested structure :)
it's not "terribly more complicated", but not obvious either for
someone who uses jq very occasionally :)

>
>   $ jq -f query.jq test.json
>   [
>     "test_global_funcs",
>     "test_global_funcs/global_func16"
>   ]
>
> Test data for reference:
>
>   $ cat test.json | sed -r 's/"[^"]{20,}"/"..."/g'
>   {
>       "success": 1,
>       "success_subtest": 24,
>       "skipped": 0,
>       "failed": 1,
>       "results": [{
>               "test_name": "test_global_funcs",
>               "test_number": 223,
>               "message": "...",
>               "failed": true,
>               "subtests": [{
>                       "test_name": "test_global_funcs",
>                       "subtest_name": "global_func16",
>                       "test_number": 223,
>                       "subtest_number": 16,
>                       "message": "...",
>                       "is_subtest": true,
>                       "failed": true
>                   }
>               ]
>           }
>       ]
>   }
>
> > In term of logical structure and maybe extensibility, this is more appropriate,
> > in term of pragmatism maybe less.
> >
> > I don't have strong opinions and can see benefit for both.
>
> idk, I don't have a strong opinion either.

me neither, flatter struct would be simple to work with either with jq
or hacky grepping, so I guess the question would be how much do we
lose by using flatter structure?

>
> > [...]
Eduard Zingerman March 16, 2023, 11:33 p.m. UTC | #6
On Thu, 2023-03-16 at 16:23 -0700, Andrii Nakryiko wrote:
> > [...]
> > 
> > > In term of logical structure and maybe extensibility, this is more appropriate,
> > > in term of pragmatism maybe less.
> > > 
> > > I don't have strong opinions and can see benefit for both.
> > 
> > idk, I don't have a strong opinion either.
> 
> me neither, flatter struct would be simple to work with either with jq
> or hacky grepping, so I guess the question would be how much do we
> lose by using flatter structure?

Okay, okay, noone wants to read jq manual, I get it :)

I assume that current plan is to consume this output by some CI script
(and e.g. provide a foldable list of failed tests/sub-tests in the
final output). So, we should probably use whatever makes more sense
for those scripts. If you and Manu think that flat structure works
best -- so be it.
Andrii Nakryiko March 17, 2023, 12:07 a.m. UTC | #7
On Thu, Mar 16, 2023 at 4:34 PM Eduard Zingerman <eddyz87@gmail.com> wrote:
>
> On Thu, 2023-03-16 at 16:23 -0700, Andrii Nakryiko wrote:
> > > [...]
> > >
> > > > In term of logical structure and maybe extensibility, this is more appropriate,
> > > > in term of pragmatism maybe less.
> > > >
> > > > I don't have strong opinions and can see benefit for both.
> > >
> > > idk, I don't have a strong opinion either.
> >
> > me neither, flatter struct would be simple to work with either with jq
> > or hacky grepping, so I guess the question would be how much do we
> > lose by using flatter structure?
>
> Okay, okay, noone wants to read jq manual, I get it :)

guilty ;)

>
> I assume that current plan is to consume this output by some CI script
> (and e.g. provide a foldable list of failed tests/sub-tests in the
> final output). So, we should probably use whatever makes more sense
> for those scripts. If you and Manu think that flat structure works
> best -- so be it.

As I said, I don't care. The immediate need is to produce error
summary output from this json. If you are a power user of jq and can
help us achieve this with nested structure json, that would be great.
Code wins.
Manu Bretelle March 17, 2023, 12:24 a.m. UTC | #8
On Fri, Mar 17, 2023 at 01:33:56AM +0200, Eduard Zingerman wrote:
> On Thu, 2023-03-16 at 16:23 -0700, Andrii Nakryiko wrote:
> > > [...]
> > > 
> > > > In term of logical structure and maybe extensibility, this is more appropriate,
> > > > in term of pragmatism maybe less.
> > > > 
> > > > I don't have strong opinions and can see benefit for both.
> > > 
> > > idk, I don't have a strong opinion either.
> > 
> > me neither, flatter struct would be simple to work with either with jq
> > or hacky grepping, so I guess the question would be how much do we
> > lose by using flatter structure?
> 
> Okay, okay, noone wants to read jq manual, I get it :)
> 
> I assume that current plan is to consume this output by some CI script
> (and e.g. provide a foldable list of failed tests/sub-tests in the
> final output).

Correct.

> So, we should probably use whatever makes more sense
> for those scripts. If you and Manu think that flat structure works
> best -- so be it.

Nested is the most logical representation of the data. Given the
one-liner you provided, let me play a bit more with it and see what it
would take to get minimal info out of it.
Manu Bretelle March 17, 2023, 12:40 a.m. UTC | #9
On Thu, Mar 16, 2023 at 04:21:52PM -0700, Andrii Nakryiko wrote:
> On Wed, Mar 15, 2023 at 11:39 PM Manu Bretelle <chantr4@gmail.com> wrote:
> >
> > Currently, test_progs outputs all stdout/stderr as it runs, and when it
> > is done, prints a summary.
> >
> > It is non-trivial for tooling to parse that output and extract meaningful
> > information from it.
> >
> > This change adds a new option, `--json-summary`/`-J` that let the caller
> > specify a file where `test_progs{,-no_alu32}` can write a summary of the
> > run in a json format that can later be parsed by tooling.
> >
> > Currently, it creates a summary section with successes/skipped/failures
> > followed by a list of failed tests/subtests.
> >
> > A test contains the following fields:
> > - test_name: the name of the test
> > - test_number: the number of the test
> > - message: the log message that was printed by the test.
> > - failed: A boolean indicating whether the test failed or not. Currently
> > we only output failed tests, but in the future, successful tests could
> > be added.
> >
> > A subtest contains the following fields:
> > - test_name: same as above
> > - test_number: sanme as above
> > - subtest_name: the name of the subtest
> > - subtest_number: the number of the subtest.
> > - message: the log message that was printed by the subtest.
> > - is_subtest: a boolean indicating that the entry is a subtest.
> 
> "is_" prefix stands out compared to other bool field ("failed"),
> should we call this just "subtest" for consistency?
> 

yes, I will give a try to the nested representation and play a bit more
with jq. In any case, this will become `subtest[s]`.

> > - failed: same as above but for the subtest
> >
> > ```
> > $ sudo ./test_progs -a $(grep -v '^#' ./DENYLIST.aarch64 | awk '{print
> > $1","}' | tr -d '\n') -j -J /tmp/test_progs.json
> > $ jq . < /tmp/test_progs.json | head -n 30
> > $ head -n 30 /tmp/test_progs.json
> > {
> >     "success": 29,
> >     "success_subtest": 23,
> >     "skipped": 3,
> >     "failed": 27,
> >     "results": [{
> >             "test_name": "bpf_cookie",
> >             "test_number": 10,
> >             "message": "test_bpf_cookie:PASS:skel_open 0 nsec\n",
> >             "failed": true
> >         },{
> >             "test_name": "bpf_cookie",
> >             "subtest_name": "multi_kprobe_link_api",
> >             "test_number": 10,
> >             "subtest_number": 2,
> >             "message": "kprobe_multi_link_api_subtest:PASS:load_kallsyms
> > 0 nsec\nlibbpf: extern 'bpf_testmod_fentry_test1' (strong): not
> > resolved\nlibbpf: failed to load object 'kprobe_multi'\nlibbpf: failed
> > to load BPF skeleton 'kprobe_multi':
> > -3\nkprobe_multi_link_api_subtest:FAIL:fentry_raw_skel_load unexpected
> > error: -3\n",
> >             "is_subtest": true,
> >             "failed": true
> >         },{
> >             "test_name": "bpf_cookie",
> >             "subtest_name": "multi_kprobe_attach_api",
> >             "test_number": 10,
> >             "subtest_number": 3,
> >             "message": "libbpf: extern 'bpf_testmod_fentry_test1'
> > (strong): not resolved\nlibbpf: failed to load object
> > 'kprobe_multi'\nlibbpf: failed to load BPF skeleton 'kprobe_multi':
> > -3\nkprobe_multi_attach_api_subtest:FAIL:fentry_raw_skel_load unexpected
> > error: -3\n",
> >             "is_subtest": true,
> >             "failed": true
> >         },{
> >             "test_name": "bpf_cookie",
> >             "subtest_name": "lsm",
> >             "test_number": 10,
> > ```
> >
> > The file can then be used to print a summary of the test run and list of
> > failing tests/subtests:
> >
> > ```
> > $ jq -r < /tmp/test_progs.json '"Success:
> > \(.success)/\(.success_subtest), Skipped: \(.skipped), Failed:
> > \(.failed)"'
> >
> > Success: 29/23, Skipped: 3, Failed: 27
> > $ jq -r <
> > /tmp/test_progs.json  '.results[] | if .is_subtest then
> > "#\(.test_number)/\(.subtest_number) \(.test_name)/\(.subtest_name)"
> > else "#\(.test_number) \(.test_name)" end'
> > ```
> >
> > Signed-off-by: Manu Bretelle <chantr4@gmail.com>
> > ---
> 
> Looks great, some nits below.
> 
> >  tools/testing/selftests/bpf/Makefile      |  4 +-
> >  tools/testing/selftests/bpf/json_writer.c |  1 +
> >  tools/testing/selftests/bpf/json_writer.h |  1 +
> >  tools/testing/selftests/bpf/test_progs.c  | 83 +++++++++++++++++++++--
> >  tools/testing/selftests/bpf/test_progs.h  |  1 +
> >  5 files changed, 84 insertions(+), 6 deletions(-)
> >  create mode 120000 tools/testing/selftests/bpf/json_writer.c
> >  create mode 120000 tools/testing/selftests/bpf/json_writer.h
> >
> 
> [...]
> 
> > @@ -269,10 +270,22 @@ static void print_subtest_name(int test_num, int subtest_num,
> >         fprintf(env.stdout, "\n");
> >  }
> >
> > +static void jsonw_write_log_message(json_writer_t *w, char *log_buf, size_t log_cnt)
> > +{
> > +       // open_memstream (from stdio_hijack_init) ensures that log_bug is terminated by a
> > +       // null byte. Yet in parralel mode, log_buf will be NULL if there is no message.
> 
> please don't use C++-style comments, let's use /* */ consistently
> 
> also typo: parallel
> 
ack

> > +       if (log_cnt) {
> > +               jsonw_string_field(w, "message", log_buf);
> > +       } else {
> > +               jsonw_string_field(w, "message", "");
> > +       }
> > +}
> > +
> 
> [...]
> 
> > @@ -1283,7 +1327,7 @@ static void *dispatch_thread(void *ctx)
> >                 } while (false);
> >
> >                 pthread_mutex_lock(&stdout_output_lock);
> > -               dump_test_log(test, state, false, true);
> > +               dump_test_log(test, state, false, true, NULL);
> >                 pthread_mutex_unlock(&stdout_output_lock);
> >         } /* while (true) */
> >  error:
> > @@ -1322,8 +1366,28 @@ static void calculate_summary_and_print_errors(struct test_env *env)
> >                         fail_cnt++;
> >                 else
> >                         succ_cnt++;
> > +
> > +       }
> > +
> > +       json_writer_t *w = NULL;
> 
> please declare variable at the top to follow C89 style
> 
ack
> > +
> > +       if (env->json) {
> > +               w = jsonw_new(env->json);
> > +               if (!w)
> > +                       fprintf(env->stderr, "Failed to create new JSON stream.");
> >         }
> >
> > +       if (w) {
> > +               jsonw_pretty(w, 1);
> 
> true, it's bool
> 

actually, given that this is not printed to stdout, and jq is widely available...
probably no need to pretty the output.

> > +               jsonw_start_object(w);
> > +               jsonw_uint_field(w, "success", succ_cnt);
> > +               jsonw_uint_field(w, "success_subtest", sub_succ_cnt);
> > +               jsonw_uint_field(w, "skipped", skip_cnt);
> > +               jsonw_uint_field(w, "failed", fail_cnt);
> > +               jsonw_name(w, "results");
> > +               jsonw_start_array(w);
> > +
> > +       }
> 
> [...]
diff mbox series

Patch

diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile
index 55811c448eb7..fc092582d16d 100644
--- a/tools/testing/selftests/bpf/Makefile
+++ b/tools/testing/selftests/bpf/Makefile
@@ -234,6 +234,7 @@  $(TEST_GEN_PROGS) $(TEST_GEN_PROGS_EXTENDED): $(BPFOBJ)
 CGROUP_HELPERS	:= $(OUTPUT)/cgroup_helpers.o
 TESTING_HELPERS	:= $(OUTPUT)/testing_helpers.o
 TRACE_HELPERS	:= $(OUTPUT)/trace_helpers.o
+JSON_WRITER		:= $(OUTPUT)/json_writer.o
 CAP_HELPERS	:= $(OUTPUT)/cap_helpers.o
 
 $(OUTPUT)/test_dev_cgroup: $(CGROUP_HELPERS) $(TESTING_HELPERS)
@@ -559,7 +560,8 @@  TRUNNER_BPF_PROGS_DIR := progs
 TRUNNER_EXTRA_SOURCES := test_progs.c cgroup_helpers.c trace_helpers.c	\
 			 network_helpers.c testing_helpers.c		\
 			 btf_helpers.c flow_dissector_load.h		\
-			 cap_helpers.c test_loader.c xsk.c disasm.c
+			 cap_helpers.c test_loader.c xsk.c disasm.c \
+			 json_writer.c
 TRUNNER_EXTRA_FILES := $(OUTPUT)/urandom_read $(OUTPUT)/bpf_testmod.ko	\
 		       $(OUTPUT)/liburandom_read.so			\
 		       $(OUTPUT)/xdp_synproxy				\
diff --git a/tools/testing/selftests/bpf/json_writer.c b/tools/testing/selftests/bpf/json_writer.c
new file mode 120000
index 000000000000..5effa31e2f39
--- /dev/null
+++ b/tools/testing/selftests/bpf/json_writer.c
@@ -0,0 +1 @@ 
+../../../bpf/bpftool/json_writer.c
\ No newline at end of file
diff --git a/tools/testing/selftests/bpf/json_writer.h b/tools/testing/selftests/bpf/json_writer.h
new file mode 120000
index 000000000000..e0a264c26752
--- /dev/null
+++ b/tools/testing/selftests/bpf/json_writer.h
@@ -0,0 +1 @@ 
+../../../bpf/bpftool/json_writer.h
\ No newline at end of file
diff --git a/tools/testing/selftests/bpf/test_progs.c b/tools/testing/selftests/bpf/test_progs.c
index 6d5e3022c75f..cf56f6a4e1af 100644
--- a/tools/testing/selftests/bpf/test_progs.c
+++ b/tools/testing/selftests/bpf/test_progs.c
@@ -18,6 +18,7 @@ 
 #include <sys/socket.h>
 #include <sys/un.h>
 #include <bpf/btf.h>
+#include "json_writer.h"
 
 static bool verbose(void)
 {
@@ -269,10 +270,22 @@  static void print_subtest_name(int test_num, int subtest_num,
 	fprintf(env.stdout, "\n");
 }
 
+static void jsonw_write_log_message(json_writer_t *w, char *log_buf, size_t log_cnt)
+{
+	// open_memstream (from stdio_hijack_init) ensures that log_bug is terminated by a
+	// null byte. Yet in parralel mode, log_buf will be NULL if there is no message.
+	if (log_cnt) {
+		jsonw_string_field(w, "message", log_buf);
+	} else {
+		jsonw_string_field(w, "message", "");
+	}
+}
+
 static void dump_test_log(const struct prog_test_def *test,
 			  const struct test_state *test_state,
 			  bool skip_ok_subtests,
-			  bool par_exec_result)
+			  bool par_exec_result,
+			  json_writer_t *w)
 {
 	bool test_failed = test_state->error_cnt > 0;
 	bool force_log = test_state->force_log;
@@ -296,6 +309,15 @@  static void dump_test_log(const struct prog_test_def *test,
 	if (test_state->log_cnt && print_test)
 		print_test_log(test_state->log_buf, test_state->log_cnt);
 
+	if (w && print_test) {
+		jsonw_start_object(w);
+		jsonw_string_field(w, "test_name", test->test_name);
+		jsonw_uint_field(w, "test_number", test->test_num);
+		jsonw_write_log_message(w, test_state->log_buf, test_state->log_cnt);
+		jsonw_bool_field(w, "failed", true);
+		jsonw_end_object(w);
+	}
+
 	for (i = 0; i < test_state->subtest_num; i++) {
 		subtest_state = &test_state->subtest_states[i];
 		subtest_failed = subtest_state->error_cnt;
@@ -314,6 +336,19 @@  static void dump_test_log(const struct prog_test_def *test,
 				   test->test_name, subtest_state->name,
 				   test_result(subtest_state->error_cnt,
 					       subtest_state->skipped));
+
+		if (w && print_subtest) {
+			jsonw_start_object(w);
+			jsonw_string_field(w, "test_name", test->test_name);
+			jsonw_string_field(w, "subtest_name", subtest_state->name);
+			jsonw_uint_field(w, "test_number", test->test_num);
+			jsonw_uint_field(w, "subtest_number", i+1);
+			jsonw_write_log_message(w, subtest_state->log_buf, subtest_state->log_cnt);
+			jsonw_bool_field(w, "is_subtest", true);
+			jsonw_bool_field(w, "failed", true);
+			jsonw_end_object(w);
+		}
+
 	}
 
 	print_test_result(test, test_state);
@@ -715,6 +750,7 @@  enum ARG_KEYS {
 	ARG_TEST_NAME_GLOB_DENYLIST = 'd',
 	ARG_NUM_WORKERS = 'j',
 	ARG_DEBUG = -1,
+	ARG_JSON_SUMMARY = 'J'
 };
 
 static const struct argp_option opts[] = {
@@ -740,6 +776,7 @@  static const struct argp_option opts[] = {
 	  "Number of workers to run in parallel, default to number of cpus." },
 	{ "debug", ARG_DEBUG, NULL, 0,
 	  "print extra debug information for test_progs." },
+	{ "json-summary", ARG_JSON_SUMMARY, "FILE", 0, "Write report in json format to this file."},
 	{},
 };
 
@@ -870,6 +907,13 @@  static error_t parse_arg(int key, char *arg, struct argp_state *state)
 	case ARG_DEBUG:
 		env->debug = true;
 		break;
+	case ARG_JSON_SUMMARY:
+		env->json = fopen(arg, "w");
+		if (env->json == NULL) {
+			perror("Failed to open json summary file");
+			return -errno;
+		}
+		break;
 	case ARGP_KEY_ARG:
 		argp_usage(state);
 		break;
@@ -1017,7 +1061,7 @@  void crash_handler(int signum)
 		stdio_restore();
 	if (env.test) {
 		env.test_state->error_cnt++;
-		dump_test_log(env.test, env.test_state, true, false);
+		dump_test_log(env.test, env.test_state, true, false, NULL);
 	}
 	if (env.worker_id != -1)
 		fprintf(stderr, "[%d]: ", env.worker_id);
@@ -1124,7 +1168,7 @@  static void run_one_test(int test_num)
 
 	stdio_restore();
 
-	dump_test_log(test, state, false, false);
+	dump_test_log(test, state, false, false, NULL);
 }
 
 struct dispatch_data {
@@ -1283,7 +1327,7 @@  static void *dispatch_thread(void *ctx)
 		} while (false);
 
 		pthread_mutex_lock(&stdout_output_lock);
-		dump_test_log(test, state, false, true);
+		dump_test_log(test, state, false, true, NULL);
 		pthread_mutex_unlock(&stdout_output_lock);
 	} /* while (true) */
 error:
@@ -1322,8 +1366,28 @@  static void calculate_summary_and_print_errors(struct test_env *env)
 			fail_cnt++;
 		else
 			succ_cnt++;
+
+	}
+
+	json_writer_t *w = NULL;
+
+	if (env->json) {
+		w = jsonw_new(env->json);
+		if (!w)
+			fprintf(env->stderr, "Failed to create new JSON stream.");
 	}
 
+	if (w) {
+		jsonw_pretty(w, 1);
+		jsonw_start_object(w);
+		jsonw_uint_field(w, "success", succ_cnt);
+		jsonw_uint_field(w, "success_subtest", sub_succ_cnt);
+		jsonw_uint_field(w, "skipped", skip_cnt);
+		jsonw_uint_field(w, "failed", fail_cnt);
+		jsonw_name(w, "results");
+		jsonw_start_array(w);
+
+	}
 	/*
 	 * We only print error logs summary when there are failed tests and
 	 * verbose mode is not enabled. Otherwise, results may be incosistent.
@@ -1340,10 +1404,19 @@  static void calculate_summary_and_print_errors(struct test_env *env)
 			if (!state->tested || !state->error_cnt)
 				continue;
 
-			dump_test_log(test, state, true, true);
+			dump_test_log(test, state, true, true, w);
 		}
 	}
 
+	if (w) {
+		jsonw_end_array(w);
+		jsonw_end_object(w);
+		jsonw_destroy(&w);
+	}
+
+	if (env->json)
+		fclose(env->json);
+
 	printf("Summary: %d/%d PASSED, %d SKIPPED, %d FAILED\n",
 	       succ_cnt, sub_succ_cnt, skip_cnt, fail_cnt);
 
diff --git a/tools/testing/selftests/bpf/test_progs.h b/tools/testing/selftests/bpf/test_progs.h
index 3cbf005747ed..4b06b8347cd4 100644
--- a/tools/testing/selftests/bpf/test_progs.h
+++ b/tools/testing/selftests/bpf/test_progs.h
@@ -114,6 +114,7 @@  struct test_env {
 	FILE *stdout;
 	FILE *stderr;
 	int nr_cpus;
+	FILE *json;
 
 	int succ_cnt; /* successful tests */
 	int sub_succ_cnt; /* successful sub-tests */