@@ -49,6 +49,7 @@ static ssize_t __maybe_unused rvpmu_format_show(struct device *dev,
RVPMU_ATTR_ENTRY(_name, rvpmu_format_show, (char *)_config)
PMU_FORMAT_ATTR(firmware, "config:63");
+PMU_FORMAT_ATTR(counterid_mask, "config2:0-31");
static DEFINE_STATIC_KEY_FALSE(riscv_pmu_sbi_available);
static DEFINE_STATIC_KEY_FALSE(riscv_pmu_cdeleg_available);
@@ -74,6 +75,7 @@ static const struct attribute_group *riscv_sbi_pmu_attr_groups[] = {
static struct attribute *riscv_cdeleg_pmu_formats_attr[] = {
RVPMU_FORMAT_ATTR_ENTRY(event, RVPMU_CDELEG_PMU_FORMAT_ATTR),
&format_attr_firmware.attr,
+ &format_attr_counterid_mask.attr,
NULL,
};
@@ -974,23 +976,39 @@ static int rvpmu_deleg_find_ctrs(void)
return num_hw_ctr;
}
+/* The json file must correctly specify counter 0 or counter 2 is available
+ * in the counter lists for cycle/instret events. Otherwise, the drivers have
+ * no way to figure out if a fixed counter must be used and pick a programmable
+ * counter if available.
+ */
static int get_deleg_fixed_hw_idx(struct cpu_hw_events *cpuc, struct perf_event *event)
{
- return -EINVAL;
+ if (!event->attr.config2)
+ return -EINVAL;
+
+ if (event->attr.config2 & RISCV_PMU_CYCLE_FIXED_CTR_MASK)
+ return 0; /* CY counter */
+ else if (event->attr.config2 & RISCV_PMU_INSTRUCTION_FIXED_CTR_MASK)
+ return 2; /* IR counter */
+ else
+ return -EINVAL;
}
static int get_deleg_next_hpm_hw_idx(struct cpu_hw_events *cpuc, struct perf_event *event)
{
unsigned long hw_ctr_mask = 0;
- /*
- * TODO: Treat every hpmcounter can monitor every event for now.
- * The event to counter mapping should come from the json file.
- * The mapping should also tell if sampling is supported or not.
- */
-
/* Select only hpmcounters */
hw_ctr_mask = cmask & (~0x7);
+
+ /*
+ * Mask off the counters that can't monitor this event (specified via json)
+ * The counter mask for this event is set in config2 via the property 'Counter'
+ * in the json file or manual configuration of config2. If the config2 is not set, it
+ * is assumed all the available hpmcounters can monitor this event.
+ */
+ if (event->attr.config2)
+ hw_ctr_mask = hw_ctr_mask & event->attr.config2;
hw_ctr_mask &= ~(cpuc->used_hw_ctrs[0]);
return __ffs(hw_ctr_mask);
}
@@ -1020,10 +1038,6 @@ static int rvpmu_deleg_ctr_get_idx(struct perf_event *event)
uint64_t priv_filter;
int idx;
- /*
- * TODO: We should not rely on SBI Perf encoding to check if the event
- * is a fixed one or not.
- */
if (!is_sampling_event(event)) {
idx = get_deleg_fixed_hw_idx(cpuc, event);
if (idx == 0 || idx == 2) {
@@ -30,6 +30,8 @@
#define RISCV_PMU_CONFIG1_GUEST_EVENTS 0x1
#define RISCV_PMU_DELEG_RAW_EVENT_MASK GENMASK_ULL(55, 0)
+#define RISCV_PMU_CYCLE_FIXED_CTR_MASK 0x01
+#define RISCV_PMU_INSTRUCTION_FIXED_CTR_MASK 0x04
struct cpu_hw_events {
/* currently enabled events */
The counter restriction specified in the json file is passed to the drivers via config2 paarameter in perf attributes. This allows any platform vendor to define their custom mapping between event and hpmcounters without any rules defined in the ISA. However, the cycle and instruction counters are fixed (0 and 2 respectively) by the ISA. The platform vendor must specify this in the json file if intended to be used while profiling. Otherwise, they can just specify the alternate hpmcounters that may monitor and/or sample the cycle/instruction counts. Signed-off-by: Atish Patra <atishp@rivosinc.com> --- drivers/perf/riscv_pmu_dev.c | 36 +++++++++++++++++++++++----------- include/linux/perf/riscv_pmu.h | 2 ++ 2 files changed, 27 insertions(+), 11 deletions(-)