@@ -113,6 +113,8 @@ static void test_event_introspection(void) {}
static void test_event_counter_config(void) {}
static void test_basic_event_count(void) {}
static void test_mem_access(void) {}
+static void test_chained_counters(void) {}
+static void test_chained_sw_incr(void) {}
#elif defined(__aarch64__)
#define ID_AA64DFR0_PERFMON_SHIFT 8
@@ -459,6 +461,126 @@ static void test_mem_access(void)
read_sysreg(pmovsclr_el0));
}
+static void test_chained_counters(void)
+{
+ uint32_t events[] = { 0x11 /* CPU_CYCLES */, 0x1E /* CHAIN */};
+
+ if (!satisfy_prerequisites(events, ARRAY_SIZE(events)))
+ return;
+
+ pmu_reset();
+
+ write_regn(pmevtyper, 0, events[0] | PMEVTYPER_EXCLUDE_EL0);
+ write_regn(pmevtyper, 1, events[1] | PMEVTYPER_EXCLUDE_EL0);
+ /* enable counters #0 and #1 */
+ write_sysreg_s(0x3, PMCNTENSET_EL0);
+ /* preset counter #0 at 0xFFFFFFF0 */
+ write_regn(pmevcntr, 0, 0xFFFFFFF0);
+
+ precise_instrs_loop(22, pmu.pmcr_ro | PMU_PMCR_E);
+
+ report(read_regn(pmevcntr, 1) == 1, "CHAIN counter #1 incremented");
+ report(!read_sysreg(pmovsclr_el0), "check no overflow is recorded");
+
+ /* test 64b overflow */
+
+ pmu_reset();
+ write_sysreg_s(0x3, PMCNTENSET_EL0);
+
+ write_regn(pmevcntr, 0, 0xFFFFFFF0);
+ write_regn(pmevcntr, 1, 0x1);
+ precise_instrs_loop(22, pmu.pmcr_ro | PMU_PMCR_E);
+ report_info("overflow reg = 0x%lx", read_sysreg(pmovsclr_el0));
+ report(read_regn(pmevcntr, 1) == 2, "CHAIN counter #1 incremented");
+ report(!read_sysreg(pmovsclr_el0), "check no overflow is recorded");
+
+ write_regn(pmevcntr, 0, 0xFFFFFFF0);
+ write_regn(pmevcntr, 1, 0xFFFFFFFF);
+
+ precise_instrs_loop(22, pmu.pmcr_ro | PMU_PMCR_E);
+ report_info("overflow reg = 0x%lx", read_sysreg(pmovsclr_el0));
+ report(!read_regn(pmevcntr, 1), "CHAIN counter #1 wrapped");
+ report(read_sysreg(pmovsclr_el0) == 0x2,
+ "check no overflow is recorded");
+}
+
+static void test_chained_sw_incr(void)
+{
+ uint32_t events[] = { 0x0 /* SW_INCR */, 0x0 /* SW_INCR */};
+ int i;
+
+ if (!satisfy_prerequisites(events, ARRAY_SIZE(events)))
+ return;
+
+ pmu_reset();
+
+ write_regn(pmevtyper, 0, events[0] | PMEVTYPER_EXCLUDE_EL0);
+ write_regn(pmevtyper, 1, events[1] | PMEVTYPER_EXCLUDE_EL0);
+ /* enable counters #0 and #1 */
+ write_sysreg_s(0x3, PMCNTENSET_EL0);
+
+ /* preset counter #0 at 0xFFFFFFF0 */
+ write_regn(pmevcntr, 0, 0xFFFFFFF0);
+
+ for (i = 0; i < 100; i++)
+ write_sysreg(0x1, pmswinc_el0);
+
+ report_info("SW_INCR counter #0 has value %ld", read_regn(pmevcntr, 0));
+ report(read_regn(pmevcntr, 0) == 0xFFFFFFF0,
+ "PWSYNC does not increment if PMCR.E is unset");
+
+ pmu_reset();
+
+ write_regn(pmevcntr, 0, 0xFFFFFFF0);
+ write_sysreg_s(0x3, PMCNTENSET_EL0);
+ set_pmcr(pmu.pmcr_ro | PMU_PMCR_E);
+
+ for (i = 0; i < 100; i++)
+ write_sysreg(0x3, pmswinc_el0);
+
+ report(read_regn(pmevcntr, 0) == 84, "counter #1 after + 100 SW_INCR");
+ report(read_regn(pmevcntr, 1) == 100,
+ "counter #0 after + 100 SW_INCR");
+ report_info("counter values after 100 SW_INCR #0=%ld #1=%ld",
+ read_regn(pmevcntr, 0), read_regn(pmevcntr, 1));
+ report(read_sysreg(pmovsclr_el0) == 0x1,
+ "overflow reg after 100 SW_INCR");
+
+ /* 64b SW_INCR */
+ pmu_reset();
+
+ events[1] = 0x1E /* CHAIN */;
+ write_regn(pmevtyper, 1, events[1] | PMEVTYPER_EXCLUDE_EL0);
+ write_regn(pmevcntr, 0, 0xFFFFFFF0);
+ write_sysreg_s(0x3, PMCNTENSET_EL0);
+ set_pmcr(pmu.pmcr_ro | PMU_PMCR_E);
+ for (i = 0; i < 100; i++)
+ write_sysreg(0x3, pmswinc_el0);
+
+ report(!read_sysreg(pmovsclr_el0) && (read_regn(pmevcntr, 1) == 1),
+ "overflow reg after 100 SW_INCR/CHAIN");
+ report_info("overflow=0x%lx, #0=%ld #1=%ld", read_sysreg(pmovsclr_el0),
+ read_regn(pmevcntr, 0), read_regn(pmevcntr, 1));
+
+ /* 64b SW_INCR and overflow on CHAIN counter*/
+ pmu_reset();
+
+ write_regn(pmevtyper, 1, events[1] | PMEVTYPER_EXCLUDE_EL0);
+ write_regn(pmevcntr, 0, 0xFFFFFFF0);
+ write_regn(pmevcntr, 1, 0xFFFFFFFF);
+ write_sysreg_s(0x3, PMCNTENSET_EL0);
+ set_pmcr(pmu.pmcr_ro | PMU_PMCR_E);
+ for (i = 0; i < 100; i++)
+ write_sysreg(0x3, pmswinc_el0);
+
+ report((read_sysreg(pmovsclr_el0) == 0x2) &&
+ (read_regn(pmevcntr, 1) == 0) &&
+ (read_regn(pmevcntr, 0) == 84),
+ "overflow reg after 100 SW_INCR/CHAIN");
+ report_info("overflow=0x%lx, #0=%ld #1=%ld", read_sysreg(pmovsclr_el0),
+ read_regn(pmevcntr, 0), read_regn(pmevcntr, 1));
+}
+
#endif
/*
@@ -658,6 +780,12 @@ int main(int argc, char *argv[])
} else if (strcmp(argv[1], "mem-access") == 0) {
report_prefix_push(argv[1]);
test_mem_access();
+ } else if (strcmp(argv[1], "chained-counters") == 0) {
+ report_prefix_push(argv[1]);
+ test_chained_counters();
+ } else if (strcmp(argv[1], "chained-sw-incr") == 0) {
+ report_prefix_push(argv[1]);
+ test_chained_sw_incr();
} else {
report_abort("Unknown sub-test '%s'", argv[1]);
}
@@ -90,6 +90,18 @@ groups = pmu
arch = arm64
extra_params = -append 'mem-access'
+[pmu-chained-counters]
+file = pmu.flat
+groups = pmu
+arch = arm64
+extra_params = -append 'chained-counters'
+
+[pmu-chained-sw-incr]
+file = pmu.flat
+groups = pmu
+arch = arm64
+extra_params = -append 'chained-sw-incr'
+
# Test PMU support (TCG) with -icount IPC=1
#[pmu-tcg-icount-1]
#file = pmu.flat
Add 2 tests exercising chained counters. The first one uses CPU_CYCLES and the second one uses SW_INCR. Signed-off-by: Eric Auger <eric.auger@redhat.com> --- arm/pmu.c | 128 ++++++++++++++++++++++++++++++++++++++++++++++ arm/unittests.cfg | 12 +++++ 2 files changed, 140 insertions(+)