@@ -115,6 +115,7 @@ static void test_basic_event_count(void) {}
static void test_mem_access(void) {}
static void test_chained_counters(void) {}
static void test_chained_sw_incr(void) {}
+static void test_chain_promotion(void) {}
#elif defined(__aarch64__)
#define ID_AA64DFR0_PERFMON_SHIFT 8
@@ -581,6 +582,137 @@ static void test_chained_sw_incr(void)
read_regn(pmevcntr, 0), read_regn(pmevcntr, 1));
}
+static void test_chain_promotion(void)
+{
+ uint32_t events[] = { 0x13 /* MEM_ACCESS */, 0x1E /* CHAIN */};
+ void *addr = malloc(PAGE_SIZE);
+
+ if (!satisfy_prerequisites(events, ARRAY_SIZE(events)))
+ return;
+
+ /* Only enable CHAIN counter */
+ pmu_reset();
+ write_regn(pmevtyper, 0, events[0] | PMEVTYPER_EXCLUDE_EL0);
+ write_regn(pmevtyper, 1, events[1] | PMEVTYPER_EXCLUDE_EL0);
+ write_sysreg_s(0x2, PMCNTENSET_EL0);
+ isb();
+
+ mem_access_loop(addr, 20, pmu.pmcr_ro | PMU_PMCR_E);
+ report(!read_regn(pmevcntr, 0),
+ "chain counter not counting if even counter is disabled");
+
+ /* Only enable even counter */
+ pmu_reset();
+ write_regn(pmevcntr, 0, 0xFFFFFFF0);
+ write_sysreg_s(0x1, PMCNTENSET_EL0);
+ isb();
+
+ mem_access_loop(addr, 20, pmu.pmcr_ro | PMU_PMCR_E);
+ report(!read_regn(pmevcntr, 1) && (read_sysreg(pmovsclr_el0) == 0x1),
+ "odd counter did not increment on overflow if disabled");
+ report_info("MEM_ACCESS counter #0 has value %ld",
+ read_regn(pmevcntr, 0));
+ report_info("CHAIN counter #1 has value %ld",
+ read_regn(pmevcntr, 1));
+ report_info("overflow counter %ld", read_sysreg(pmovsclr_el0));
+
+ /* start at 0xFFFFFFDC, +20 with CHAIN enabled, +20 with CHAIN disabled */
+ pmu_reset();
+ write_sysreg_s(0x3, PMCNTENSET_EL0);
+ write_regn(pmevcntr, 0, 0xFFFFFFDC);
+ isb();
+
+ mem_access_loop(addr, 20, pmu.pmcr_ro | PMU_PMCR_E);
+ report_info("MEM_ACCESS counter #0 has value 0x%lx",
+ read_regn(pmevcntr, 0));
+
+ /* disable the CHAIN event */
+ write_sysreg_s(0x2, PMCNTENCLR_EL0);
+ mem_access_loop(addr, 20, pmu.pmcr_ro | PMU_PMCR_E);
+ report_info("MEM_ACCESS counter #0 has value 0x%lx",
+ read_regn(pmevcntr, 0));
+ report(read_sysreg(pmovsclr_el0) == 0x1,
+ "should have triggered an overflow on #0");
+ report(!read_regn(pmevcntr, 1),
+ "CHAIN counter #1 shouldn't have incremented");
+
+ /* start at 0xFFFFFFDC, +20 with CHAIN disabled, +20 with CHAIN enabled */
+
+ pmu_reset();
+ write_sysreg_s(0x1, PMCNTENSET_EL0);
+ write_regn(pmevcntr, 0, 0xFFFFFFDC);
+ isb();
+ report_info("counter #0 = 0x%lx, counter #1 = 0x%lx overflow=0x%lx",
+ read_regn(pmevcntr, 0), read_regn(pmevcntr, 1),
+ read_sysreg(pmovsclr_el0));
+
+ mem_access_loop(addr, 20, pmu.pmcr_ro | PMU_PMCR_E);
+ report_info("MEM_ACCESS counter #0 has value 0x%lx",
+ read_regn(pmevcntr, 0));
+
+ /* enable the CHAIN event */
+ write_sysreg_s(0x3, PMCNTENSET_EL0);
+ isb();
+ mem_access_loop(addr, 20, pmu.pmcr_ro | PMU_PMCR_E);
+ report_info("MEM_ACCESS counter #0 has value 0x%lx",
+ read_regn(pmevcntr, 0));
+
+ report((read_regn(pmevcntr, 1) == 1) && !read_sysreg(pmovsclr_el0),
+ "CHAIN counter #1 should have incremented and no overflow expected");
+
+ report_info("CHAIN counter #1 = 0x%lx, overflow=0x%lx",
+ read_regn(pmevcntr, 1), read_sysreg(pmovsclr_el0));
+
+ /* start as MEM_ACCESS/CPU_CYCLES and move to CHAIN/MEM_ACCESS */
+ pmu_reset();
+ write_regn(pmevtyper, 0, 0x13 /* MEM_ACCESS */ | PMEVTYPER_EXCLUDE_EL0);
+ write_regn(pmevtyper, 1, 0x11 /* CPU_CYCLES */ | PMEVTYPER_EXCLUDE_EL0);
+ write_sysreg_s(0x3, PMCNTENSET_EL0);
+ write_regn(pmevcntr, 0, 0xFFFFFFDC);
+ isb();
+
+ mem_access_loop(addr, 20, pmu.pmcr_ro | PMU_PMCR_E);
+ report_info("MEM_ACCESS counter #0 has value 0x%lx",
+ read_regn(pmevcntr, 0));
+
+ /* 0 becomes CHAINED */
+ write_sysreg_s(0x0, PMCNTENSET_EL0);
+ write_regn(pmevtyper, 1, 0x1E /* CHAIN */ | PMEVTYPER_EXCLUDE_EL0);
+ write_sysreg_s(0x3, PMCNTENSET_EL0);
+
+ mem_access_loop(addr, 20, pmu.pmcr_ro | PMU_PMCR_E);
+ report_info("MEM_ACCESS counter #0 has value 0x%lx",
+ read_regn(pmevcntr, 0));
+
+ report((read_regn(pmevcntr, 1) == 1) && !read_sysreg(pmovsclr_el0),
+ "CHAIN counter #1 should have incremented and no overflow expected");
+
+ report_info("CHAIN counter #1 = 0x%lx, overflow=0x%lx",
+ read_regn(pmevcntr, 1), read_sysreg(pmovsclr_el0));
+
+ /* start as CHAIN/MEM_ACCESS and move to MEM_ACCESS/CPU_CYCLES */
+ pmu_reset();
+ write_regn(pmevtyper, 0, 0x13 /* MEM_ACCESS */ | PMEVTYPER_EXCLUDE_EL0);
+ write_regn(pmevtyper, 1, 0x1E /* CHAIN */ | PMEVTYPER_EXCLUDE_EL0);
+ write_regn(pmevcntr, 0, 0xFFFFFFDC);
+ write_sysreg_s(0x3, PMCNTENSET_EL0);
+
+ mem_access_loop(addr, 20, pmu.pmcr_ro | PMU_PMCR_E);
+ report_info("counter #0=0x%lx, counter #1=0x%lx",
+ read_regn(pmevcntr, 0), read_regn(pmevcntr, 1));
+
+ write_sysreg_s(0x0, PMCNTENSET_EL0);
+ write_regn(pmevtyper, 1, 0x11 /* CPU_CYCLES */ | PMEVTYPER_EXCLUDE_EL0);
+ write_sysreg_s(0x3, PMCNTENSET_EL0);
+
+ mem_access_loop(addr, 20, pmu.pmcr_ro | PMU_PMCR_E);
+ report(read_sysreg(pmovsclr_el0) == 1,
+ "overflow is expected on counter 0");
+ report_info("counter #0=0x%lx, counter #1=0x%lx overflow=0x%lx",
+ read_regn(pmevcntr, 0), read_regn(pmevcntr, 1),
+ read_sysreg(pmovsclr_el0));
+}
+
#endif
/*
@@ -786,6 +918,9 @@ int main(int argc, char *argv[])
} else if (strcmp(argv[1], "chained-sw-incr") == 0) {
report_prefix_push(argv[1]);
test_chained_sw_incr();
+ } else if (strcmp(argv[1], "chain-promotion") == 0) {
+ report_prefix_push(argv[1]);
+ test_chain_promotion();
} else {
report_abort("Unknown sub-test '%s'", argv[1]);
}
@@ -102,6 +102,12 @@ groups = pmu
arch = arm64
extra_params = -append 'chained-sw-incr'
+[pmu-chain-promotion]
+file = pmu.flat
+groups = pmu
+arch = arm64
+extra_params = -append 'chain-promotion'
+
# Test PMU support (TCG) with -icount IPC=1
#[pmu-tcg-icount-1]
#file = pmu.flat
Test configurations where we transit from 32b to 64b counters and conversely. Also tests configuration where chain counters are configured but only one counter is enabled. Signed-off-by: Eric Auger <eric.auger@redhat.com> --- arm/pmu.c | 135 ++++++++++++++++++++++++++++++++++++++++++++++ arm/unittests.cfg | 6 +++ 2 files changed, 141 insertions(+)