@@ -42,6 +42,18 @@
#define SYSREG_OSLSR_EL1 SYSREG(2, 0, 1, 1, 4)
#define SYSREG_OSDLR_EL1 SYSREG(2, 0, 1, 3, 4)
#define SYSREG_CNTPCT_EL0 SYSREG(3, 3, 14, 0, 1)
+#define SYSREG_PMCR_EL0 SYSREG(3, 3, 9, 12, 0)
+#define SYSREG_PMUSERENR_EL0 SYSREG(3, 3, 9, 14, 0)
+#define SYSREG_PMCNTENSET_EL0 SYSREG(3, 3, 9, 12, 1)
+#define SYSREG_PMCNTENCLR_EL0 SYSREG(3, 3, 9, 12, 2)
+#define SYSREG_PMINTENCLR_EL1 SYSREG(3, 0, 9, 14, 2)
+#define SYSREG_PMOVSCLR_EL0 SYSREG(3, 3, 9, 12, 3)
+#define SYSREG_PMSWINC_EL0 SYSREG(3, 3, 9, 12, 4)
+#define SYSREG_PMSELR_EL0 SYSREG(3, 3, 9, 12, 5)
+#define SYSREG_PMCEID0_EL0 SYSREG(3, 3, 9, 12, 6)
+#define SYSREG_PMCEID1_EL0 SYSREG(3, 3, 9, 12, 7)
+#define SYSREG_PMCCNTR_EL0 SYSREG(3, 3, 9, 13, 0)
+#define SYSREG_PMCCFILTR_EL0 SYSREG(3, 3, 14, 15, 7)
#define WFX_IS_WFE (1 << 0)
@@ -723,6 +735,40 @@ static int hvf_sysreg_read(CPUState *cpu, uint32_t reg, uint32_t rt)
val = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) /
gt_cntfrq_period_ns(arm_cpu);
break;
+ case SYSREG_PMCR_EL0:
+ val = env->cp15.c9_pmcr;
+ break;
+ case SYSREG_PMCCNTR_EL0:
+ pmu_op_start(env);
+ val = env->cp15.c15_ccnt;
+ pmu_op_finish(env);
+ break;
+ case SYSREG_PMCNTENCLR_EL0:
+ val = env->cp15.c9_pmcnten;
+ break;
+ case SYSREG_PMOVSCLR_EL0:
+ val = env->cp15.c9_pmovsr;
+ break;
+ case SYSREG_PMSELR_EL0:
+ val = env->cp15.c9_pmselr;
+ break;
+ case SYSREG_PMINTENCLR_EL1:
+ val = env->cp15.c9_pminten;
+ break;
+ case SYSREG_PMCCFILTR_EL0:
+ val = env->cp15.pmccfiltr_el0;
+ break;
+ case SYSREG_PMCNTENSET_EL0:
+ val = env->cp15.c9_pmcnten;
+ break;
+ case SYSREG_PMUSERENR_EL0:
+ val = env->cp15.c9_pmuserenr;
+ break;
+ case SYSREG_PMCEID0_EL0:
+ case SYSREG_PMCEID1_EL0:
+ /* We can't really count anything yet, declare all events invalid */
+ val = 0;
+ break;
case SYSREG_OSLSR_EL1:
val = env->cp15.oslsr_el1;
break;
@@ -753,6 +799,82 @@ static int hvf_sysreg_read(CPUState *cpu, uint32_t reg, uint32_t rt)
return 0;
}
+static void pmu_update_irq(CPUARMState *env)
+{
+ ARMCPU *cpu = env_archcpu(env);
+ qemu_set_irq(cpu->pmu_interrupt, (env->cp15.c9_pmcr & PMCRE) &&
+ (env->cp15.c9_pminten & env->cp15.c9_pmovsr));
+}
+
+static bool pmu_event_supported(uint16_t number)
+{
+ return false;
+}
+
+/* Returns true if the counter (pass 31 for PMCCNTR) should count events using
+ * the current EL, security state, and register configuration.
+ */
+static bool pmu_counter_enabled(CPUARMState *env, uint8_t counter)
+{
+ uint64_t filter;
+ bool enabled, filtered = true;
+ int el = arm_current_el(env);
+
+ enabled = (env->cp15.c9_pmcr & PMCRE) &&
+ (env->cp15.c9_pmcnten & (1 << counter));
+
+ if (counter == 31) {
+ filter = env->cp15.pmccfiltr_el0;
+ } else {
+ filter = env->cp15.c14_pmevtyper[counter];
+ }
+
+ if (el == 0) {
+ filtered = filter & PMXEVTYPER_U;
+ } else if (el == 1) {
+ filtered = filter & PMXEVTYPER_P;
+ }
+
+ if (counter != 31) {
+ /*
+ * If not checking PMCCNTR, ensure the counter is setup to an event we
+ * support
+ */
+ uint16_t event = filter & PMXEVTYPER_EVTCOUNT;
+ if (!pmu_event_supported(event)) {
+ return false;
+ }
+ }
+
+ return enabled && !filtered;
+}
+
+static void pmswinc_write(CPUARMState *env, uint64_t value)
+{
+ unsigned int i;
+ for (i = 0; i < pmu_num_counters(env); i++) {
+ /* Increment a counter's count iff: */
+ if ((value & (1 << i)) && /* counter's bit is set */
+ /* counter is enabled and not filtered */
+ pmu_counter_enabled(env, i) &&
+ /* counter is SW_INCR */
+ (env->cp15.c14_pmevtyper[i] & PMXEVTYPER_EVTCOUNT) == 0x0) {
+ /*
+ * Detect if this write causes an overflow since we can't predict
+ * PMSWINC overflows like we can for other events
+ */
+ uint32_t new_pmswinc = env->cp15.c14_pmevcntr[i] + 1;
+
+ if (env->cp15.c14_pmevcntr[i] & ~new_pmswinc & INT32_MIN) {
+ env->cp15.c9_pmovsr |= (1 << i);
+ pmu_update_irq(env);
+ }
+
+ env->cp15.c14_pmevcntr[i] = new_pmswinc;
+ }
+ }
+}
+
static int hvf_sysreg_write(CPUState *cpu, uint32_t reg, uint64_t val)
{
ARMCPU *arm_cpu = ARM_CPU(cpu);
@@ -767,6 +889,63 @@ static int hvf_sysreg_write(CPUState *cpu, uint32_t reg, uint64_t val)
val);
switch (reg) {
+ case SYSREG_PMCCNTR_EL0:
+ pmu_op_start(env);
+ env->cp15.c15_ccnt = val;
+ pmu_op_finish(env);
+ break;
+ case SYSREG_PMCR_EL0:
+ pmu_op_start(env);
+
+ if (val & PMCRC) {
+ /* The counter has been reset */
+ env->cp15.c15_ccnt = 0;
+ }
+
+ if (val & PMCRP) {
+ unsigned int i;
+ for (i = 0; i < pmu_num_counters(env); i++) {
+ env->cp15.c14_pmevcntr[i] = 0;
+ }
+ }
+
+ env->cp15.c9_pmcr &= ~PMCR_WRITEABLE_MASK;
+ env->cp15.c9_pmcr |= (val & PMCR_WRITEABLE_MASK);
+
+ pmu_op_finish(env);
+ break;
+ case SYSREG_PMUSERENR_EL0:
+ env->cp15.c9_pmuserenr = val & 0xf;
+ break;
+ case SYSREG_PMCNTENSET_EL0:
+ env->cp15.c9_pmcnten |= (val & pmu_counter_mask(env));
+ break;
+ case SYSREG_PMCNTENCLR_EL0:
+ env->cp15.c9_pmcnten &= ~(val & pmu_counter_mask(env));
+ break;
+ case SYSREG_PMINTENCLR_EL1:
+ pmu_op_start(env);
+ env->cp15.c9_pminten |= val;
+ pmu_op_finish(env);
+ break;
+ case SYSREG_PMOVSCLR_EL0:
+ pmu_op_start(env);
+ env->cp15.c9_pmovsr &= ~val;
+ pmu_op_finish(env);
+ break;
+ case SYSREG_PMSWINC_EL0:
+ pmu_op_start(env);
+ pmswinc_write(env, val);
+ pmu_op_finish(env);
+ break;
+ case SYSREG_PMSELR_EL0:
+ env->cp15.c9_pmselr = val & 0x1f;
+ break;
+ case SYSREG_PMCCFILTR_EL0:
+ pmu_op_start(env);
+ env->cp15.pmccfiltr_el0 = val & PMCCFILTR_EL0;
+ pmu_op_finish(env);
+ break;
case SYSREG_OSLAR_EL1:
env->cp15.oslsr_el1 = val & 1;
break;
We can expose cycle counters on the PMU easily. To be as compatible as possible, let's do so, but make sure we don't expose any other architectural counters that we can not model yet. This allows OSs to work that require PMU support. Signed-off-by: Alexander Graf <agraf@csgraf.de> --- target/arm/hvf/hvf.c | 179 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 179 insertions(+)