@@ -281,3 +281,91 @@ void riscv_iommu_process_hpmcycle_write(RISCVIOMMUState *s)
s->hpmcycle_prev = get_cycles();
hpm_setup_timer(s, s->hpmcycle_val);
}
+
+static inline bool check_valid_event_id(unsigned event_id)
+{
+ return event_id > RISCV_IOMMU_HPMEVENT_INVALID &&
+ event_id < RISCV_IOMMU_HPMEVENT_MAX;
+}
+
+static gboolean hpm_event_equal(gpointer key, gpointer value, gpointer udata)
+{
+ uint32_t *pair = udata;
+
+ if (GPOINTER_TO_UINT(value) & (1 << pair[0])) {
+ pair[1] = GPOINTER_TO_UINT(key);
+ return true;
+ }
+
+ return false;
+}
+
+/* Caller must check ctr_idx against hpm_ctrs to see if its supported or not. */
+static void update_event_map(RISCVIOMMUState *s, uint64_t value,
+ uint32_t ctr_idx)
+{
+ unsigned event_id = get_field(value, RISCV_IOMMU_IOHPMEVT_EVENT_ID);
+ uint32_t pair[2] = { ctr_idx, RISCV_IOMMU_HPMEVENT_INVALID };
+ uint32_t new_value = 1 << ctr_idx;
+ gpointer data;
+
+ /*
+ * If EventID field is RISCV_IOMMU_HPMEVENT_INVALID
+ * remove the current mapping.
+ */
+ if (event_id == RISCV_IOMMU_HPMEVENT_INVALID) {
+ data = g_hash_table_find(s->hpm_event_ctr_map, hpm_event_equal, pair);
+
+ new_value = GPOINTER_TO_UINT(data) & ~(new_value);
+ if (new_value != 0) {
+ g_hash_table_replace(s->hpm_event_ctr_map,
+ GUINT_TO_POINTER(pair[1]),
+ GUINT_TO_POINTER(new_value));
+ } else {
+ g_hash_table_remove(s->hpm_event_ctr_map,
+ GUINT_TO_POINTER(pair[1]));
+ }
+
+ return;
+ }
+
+ /* Update the counter mask if the event is already enabled. */
+ if (g_hash_table_lookup_extended(s->hpm_event_ctr_map,
+ GUINT_TO_POINTER(event_id),
+ NULL,
+ &data)) {
+ new_value |= GPOINTER_TO_UINT(data);
+ }
+
+ g_hash_table_insert(s->hpm_event_ctr_map,
+ GUINT_TO_POINTER(event_id),
+ GUINT_TO_POINTER(new_value));
+}
+
+void riscv_iommu_process_hpmevt_write(RISCVIOMMUState *s, uint32_t evt_reg)
+{
+ const uint32_t ctr_idx = (evt_reg - RISCV_IOMMU_REG_IOHPMEVT_BASE) >> 3;
+ const uint32_t ovf = riscv_iommu_reg_get32(s, RISCV_IOMMU_REG_IOCOUNTOVF);
+ uint64_t val = riscv_iommu_reg_get64(s, evt_reg);
+
+ if (ctr_idx >= s->hpm_cntrs) {
+ return;
+ }
+
+ /* Clear OF bit in IOCNTOVF if it's being cleared in IOHPMEVT register. */
+ if (get_field(ovf, BIT(ctr_idx + 1)) &&
+ !get_field(val, RISCV_IOMMU_IOHPMEVT_OF)) {
+ /* +1 to offset CYCLE register OF bit. */
+ riscv_iommu_reg_mod32(
+ s, RISCV_IOMMU_REG_IOCOUNTOVF, 0, BIT(ctr_idx + 1));
+ }
+
+ if (!check_valid_event_id(get_field(val, RISCV_IOMMU_IOHPMEVT_EVENT_ID))) {
+ /* Reset EventID (WARL) field to invalid. */
+ val = set_field(val, RISCV_IOMMU_IOHPMEVT_EVENT_ID,
+ RISCV_IOMMU_HPMEVENT_INVALID);
+ riscv_iommu_reg_set64(s, evt_reg, val);
+ }
+
+ update_event_map(s, val, ctr_idx);
+}
@@ -28,5 +28,6 @@ void riscv_iommu_hpm_incr_ctr(RISCVIOMMUState *s, RISCVIOMMUContext *ctx,
void riscv_iommu_hpm_timer_cb(void *priv);
void riscv_iommu_process_iocntinh_cy(RISCVIOMMUState *s, bool prev_cy_inh);
void riscv_iommu_process_hpmcycle_write(RISCVIOMMUState *s);
+void riscv_iommu_process_hpmevt_write(RISCVIOMMUState *s, uint32_t evt_reg);
#endif
@@ -1939,7 +1939,7 @@ static void riscv_iommu_process_hpm_writes(RISCVIOMMUState *s,
case RISCV_IOMMU_REG_IOHPMEVT_BASE ...
RISCV_IOMMU_REG_IOHPMEVT(RISCV_IOMMU_IOCOUNT_NUM) + 4:
- /* not yet implemented */
+ riscv_iommu_process_hpmevt_write(s, regb & ~7);
break;
}
}
@@ -2386,6 +2386,8 @@ static Property riscv_iommu_properties[] = {
DEFINE_PROP_BOOL("g-stage", RISCVIOMMUState, enable_g_stage, TRUE),
DEFINE_PROP_LINK("downstream-mr", RISCVIOMMUState, target_mr,
TYPE_MEMORY_REGION, MemoryRegion *),
+ DEFINE_PROP_UINT8("hpm-counters", RISCVIOMMUState, hpm_cntrs,
+ RISCV_IOMMU_IOCOUNT_NUM),
DEFINE_PROP_END_OF_LIST(),
};
@@ -95,6 +95,7 @@ struct RISCVIOMMUState {
/* HPM event counters */
GHashTable *hpm_event_ctr_map; /* Mapping of events to counters */
+ uint8_t hpm_cntrs;
};
void riscv_iommu_pci_setup_iommu(RISCVIOMMUState *iommu, PCIBus *bus,