@@ -307,6 +307,56 @@ static struct sbi_pmu_event_data pmu_cache_event_sbi_map[PERF_COUNT_HW_CACHE_MAX
},
};
+/*
+ * Vendor specific PMU events.
+ */
+struct riscv_pmu_event {
+ u64 event_id;
+ u32 counter_mask;
+};
+
+struct riscv_vendor_pmu_events {
+ unsigned long vendorid;
+ unsigned long archid;
+ unsigned long implid;
+ const struct riscv_pmu_event *hw_event_map;
+ const struct riscv_pmu_event (*cache_event_map)[PERF_COUNT_HW_CACHE_OP_MAX]
+ [PERF_COUNT_HW_CACHE_RESULT_MAX];
+};
+
+#define RISCV_VENDOR_PMU_EVENTS(_vendorid, _archid, _implid, _hw_event_map, _cache_event_map) \
+ { .vendorid = _vendorid, .archid = _archid, .implid = _implid, \
+ .hw_event_map = _hw_event_map, .cache_event_map = _cache_event_map },
+
+static struct riscv_vendor_pmu_events pmu_vendor_events_table[] = {
+};
+
+const struct riscv_pmu_event *current_pmu_hw_event_map;
+const struct riscv_pmu_event (*current_pmu_cache_event_map)[PERF_COUNT_HW_CACHE_OP_MAX]
+ [PERF_COUNT_HW_CACHE_RESULT_MAX];
+
+static void rvpmu_vendor_register_events(void)
+{
+ int cpu = raw_smp_processor_id();
+ unsigned long vendor_id = riscv_cached_mvendorid(cpu);
+ unsigned long impl_id = riscv_cached_mimpid(cpu);
+ unsigned long arch_id = riscv_cached_marchid(cpu);
+
+ for (int i = 0; i < ARRAY_SIZE(pmu_vendor_events_table); i++) {
+ if (pmu_vendor_events_table[i].vendorid == vendor_id &&
+ pmu_vendor_events_table[i].implid == impl_id &&
+ pmu_vendor_events_table[i].archid == arch_id) {
+ current_pmu_hw_event_map = pmu_vendor_events_table[i].hw_event_map;
+ current_pmu_cache_event_map = pmu_vendor_events_table[i].cache_event_map;
+ break;
+ }
+ }
+
+ if (!current_pmu_hw_event_map || !current_pmu_cache_event_map) {
+ pr_info("No default PMU events found\n");
+ }
+}
+
static void rvpmu_sbi_check_event(struct sbi_pmu_event_data *edata)
{
struct sbiret ret;
@@ -1547,6 +1597,7 @@ static int __init rvpmu_devinit(void)
riscv_isa_extension_available(NULL, SSCSRIND)) {
static_branch_enable(&riscv_pmu_cdeleg_available);
cdeleg_available = true;
+ rvpmu_vendor_register_events();
}
if (!(sbi_available || cdeleg_available))
@@ -28,6 +28,19 @@
#define RISCV_PMU_CONFIG1_GUEST_EVENTS 0x1
+#define HW_OP_UNSUPPORTED 0xFFFF
+#define CACHE_OP_UNSUPPORTED 0xFFFF
+
+#define PERF_MAP_ALL_UNSUPPORTED \
+ [0 ... PERF_COUNT_HW_MAX - 1] = {HW_OP_UNSUPPORTED, 0x0}
+
+#define PERF_CACHE_MAP_ALL_UNSUPPORTED \
+[0 ... C(MAX) - 1] = { \
+ [0 ... C(OP_MAX) - 1] = { \
+ [0 ... C(RESULT_MAX) - 1] = {CACHE_OP_UNSUPPORTED, 0x0} \
+ }, \
+}
+
struct cpu_hw_events {
/* currently enabled events */
int n_events;
RISC-V ISA doesn't define any standard event encodings or specify any event to counter mapping. Thus, event encoding information and corresponding counter mapping fot those events needs to be provided in the driver for each vendor. Add a framework to support that. The individual platform events will be added later. Signed-off-by: Atish Patra <atishp@rivosinc.com> --- drivers/perf/riscv_pmu_dev.c | 51 ++++++++++++++++++++++++++++++++++++++++++ include/linux/perf/riscv_pmu.h | 13 +++++++++++ 2 files changed, 64 insertions(+)