diff mbox series

[v2,5/5] arm64: perf: Support new DT compatibles

Message ID 6e5087621bd8112a35733054689d7c785b4bdde5.1582312530.git.robin.murphy@arm.com (mailing list archive)
State Mainlined
Commit 29cc4ceeac1274ab8363a11b81ebd99f3b023985
Headers show
Series arm64 CPU DT binding updates | expand

Commit Message

Robin Murphy Feb. 21, 2020, 7:35 p.m. UTC
Add support for matching the new PMUs. For now, this just wires them up
as generic PMUv3 such that people writing DTs for new SoCs can do the
right thing, and at least have architectural and raw events be usable.
We can come back and fill in event maps for sysfs and/or perf tools at
a later date.

Signed-off-by: Robin Murphy <robin.murphy@arm.com>
---

v2: define separate init functions to preserve the user ABI for naming
    (and perhaps more crucially, to simply avoid sysfs collisions on
     the inevitable A7[567] + A55 big.LITTLE systems)

 arch/arm64/kernel/perf_event.c | 56 ++++++++++++++++++++++++++++++++++
 1 file changed, 56 insertions(+)

Comments

Mark Rutland Feb. 28, 2020, 12:17 p.m. UTC | #1
On Fri, Feb 21, 2020 at 07:35:32PM +0000, Robin Murphy wrote:
> Add support for matching the new PMUs. For now, this just wires them up
> as generic PMUv3 such that people writing DTs for new SoCs can do the
> right thing, and at least have architectural and raw events be usable.
> We can come back and fill in event maps for sysfs and/or perf tools at
> a later date.
> 
> Signed-off-by: Robin Murphy <robin.murphy@arm.com>

Thanks for putting this together!

Acked-by: Mark Rutland <mark.rutland@arm.com>

Will, are you happy to queue this and the previous patch?

Thanks,
Mark.

> ---
> 
> v2: define separate init functions to preserve the user ABI for naming
>     (and perhaps more crucially, to simply avoid sysfs collisions on
>      the inevitable A7[567] + A55 big.LITTLE systems)
> 
>  arch/arm64/kernel/perf_event.c | 56 ++++++++++++++++++++++++++++++++++
>  1 file changed, 56 insertions(+)
> 
> diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
> index 1e0b04da2f3a..726cd8bda025 100644
> --- a/arch/arm64/kernel/perf_event.c
> +++ b/arch/arm64/kernel/perf_event.c
> @@ -991,6 +991,12 @@ static int armv8_pmuv3_init(struct arm_pmu *cpu_pmu)
>  			      armv8_pmuv3_map_event, NULL, NULL);
>  }
>  
> +static int armv8_a34_pmu_init(struct arm_pmu *cpu_pmu)
> +{
> +	return armv8_pmu_init(cpu_pmu, "armv8_cortex_a34",
> +			      armv8_pmuv3_map_event, NULL, NULL);
> +}
> +
>  static int armv8_a35_pmu_init(struct arm_pmu *cpu_pmu)
>  {
>  	return armv8_pmu_init(cpu_pmu, "armv8_cortex_a35",
> @@ -1003,12 +1009,24 @@ static int armv8_a53_pmu_init(struct arm_pmu *cpu_pmu)
>  			      armv8_a53_map_event, NULL, NULL);
>  }
>  
> +static int armv8_a55_pmu_init(struct arm_pmu *cpu_pmu)
> +{
> +	return armv8_pmu_init(cpu_pmu, "armv8_cortex_a55",
> +			      armv8_pmuv3_map_event, NULL, NULL);
> +}
> +
>  static int armv8_a57_pmu_init(struct arm_pmu *cpu_pmu)
>  {
>  	return armv8_pmu_init(cpu_pmu, "armv8_cortex_a57",
>  			      armv8_a57_map_event, NULL, NULL);
>  }
>  
> +static int armv8_a65_pmu_init(struct arm_pmu *cpu_pmu)
> +{
> +	return armv8_pmu_init(cpu_pmu, "armv8_cortex_a65",
> +			      armv8_pmuv3_map_event, NULL, NULL);
> +}
> +
>  static int armv8_a72_pmu_init(struct arm_pmu *cpu_pmu)
>  {
>  	return armv8_pmu_init(cpu_pmu, "armv8_cortex_a72",
> @@ -1021,6 +1039,36 @@ static int armv8_a73_pmu_init(struct arm_pmu *cpu_pmu)
>  			      armv8_a73_map_event, NULL, NULL);
>  }
>  
> +static int armv8_a75_pmu_init(struct arm_pmu *cpu_pmu)
> +{
> +	return armv8_pmu_init(cpu_pmu, "armv8_cortex_a75",
> +			      armv8_pmuv3_map_event, NULL, NULL);
> +}
> +
> +static int armv8_a76_pmu_init(struct arm_pmu *cpu_pmu)
> +{
> +	return armv8_pmu_init(cpu_pmu, "armv8_cortex_a76",
> +			      armv8_pmuv3_map_event, NULL, NULL);
> +}
> +
> +static int armv8_a77_pmu_init(struct arm_pmu *cpu_pmu)
> +{
> +	return armv8_pmu_init(cpu_pmu, "armv8_cortex_a77",
> +			      armv8_pmuv3_map_event, NULL, NULL);
> +}
> +
> +static int armv8_e1_pmu_init(struct arm_pmu *cpu_pmu)
> +{
> +	return armv8_pmu_init(cpu_pmu, "armv8_neoverse_e1",
> +			      armv8_pmuv3_map_event, NULL, NULL);
> +}
> +
> +static int armv8_n1_pmu_init(struct arm_pmu *cpu_pmu)
> +{
> +	return armv8_pmu_init(cpu_pmu, "armv8_neoverse_n1",
> +			      armv8_pmuv3_map_event, NULL, NULL);
> +}
> +
>  static int armv8_thunder_pmu_init(struct arm_pmu *cpu_pmu)
>  {
>  	return armv8_pmu_init(cpu_pmu, "armv8_cavium_thunder",
> @@ -1035,11 +1083,19 @@ static int armv8_vulcan_pmu_init(struct arm_pmu *cpu_pmu)
>  
>  static const struct of_device_id armv8_pmu_of_device_ids[] = {
>  	{.compatible = "arm,armv8-pmuv3",	.data = armv8_pmuv3_init},
> +	{.compatible = "arm,cortex-a34-pmu",	.data = armv8_a34_pmu_init},
>  	{.compatible = "arm,cortex-a35-pmu",	.data = armv8_a35_pmu_init},
>  	{.compatible = "arm,cortex-a53-pmu",	.data = armv8_a53_pmu_init},
> +	{.compatible = "arm,cortex-a55-pmu",	.data = armv8_a55_pmu_init},
>  	{.compatible = "arm,cortex-a57-pmu",	.data = armv8_a57_pmu_init},
> +	{.compatible = "arm,cortex-a65-pmu",	.data = armv8_a65_pmu_init},
>  	{.compatible = "arm,cortex-a72-pmu",	.data = armv8_a72_pmu_init},
>  	{.compatible = "arm,cortex-a73-pmu",	.data = armv8_a73_pmu_init},
> +	{.compatible = "arm,cortex-a75-pmu",	.data = armv8_a75_pmu_init},
> +	{.compatible = "arm,cortex-a76-pmu",	.data = armv8_a76_pmu_init},
> +	{.compatible = "arm,cortex-a77-pmu",	.data = armv8_a77_pmu_init},
> +	{.compatible = "arm,neoverse-e1-pmu",	.data = armv8_e1_pmu_init},
> +	{.compatible = "arm,neoverse-n1-pmu",	.data = armv8_n1_pmu_init},
>  	{.compatible = "cavium,thunder-pmu",	.data = armv8_thunder_pmu_init},
>  	{.compatible = "brcm,vulcan-pmu",	.data = armv8_vulcan_pmu_init},
>  	{},
> -- 
> 2.23.0.dirty
>
Will Deacon Feb. 28, 2020, 12:24 p.m. UTC | #2
On Fri, Feb 28, 2020 at 12:17:13PM +0000, Mark Rutland wrote:
> On Fri, Feb 21, 2020 at 07:35:32PM +0000, Robin Murphy wrote:
> > Add support for matching the new PMUs. For now, this just wires them up
> > as generic PMUv3 such that people writing DTs for new SoCs can do the
> > right thing, and at least have architectural and raw events be usable.
> > We can come back and fill in event maps for sysfs and/or perf tools at
> > a later date.
> > 
> > Signed-off-by: Robin Murphy <robin.murphy@arm.com>
> 
> Thanks for putting this together!
> 
> Acked-by: Mark Rutland <mark.rutland@arm.com>
> 
> Will, are you happy to queue this and the previous patch?

Sure thing. I haven't queued anything for 5.7 yet, but I'll flag these
two so I don't forget.

Will
Will Deacon March 2, 2020, 11:54 a.m. UTC | #3
On Fri, Feb 28, 2020 at 12:17:13PM +0000, Mark Rutland wrote:
> On Fri, Feb 21, 2020 at 07:35:32PM +0000, Robin Murphy wrote:
> > Add support for matching the new PMUs. For now, this just wires them up
> > as generic PMUv3 such that people writing DTs for new SoCs can do the
> > right thing, and at least have architectural and raw events be usable.
> > We can come back and fill in event maps for sysfs and/or perf tools at
> > a later date.
> > 
> > Signed-off-by: Robin Murphy <robin.murphy@arm.com>
> 
> Thanks for putting this together!
> 
> Acked-by: Mark Rutland <mark.rutland@arm.com>
> 
> Will, are you happy to queue this and the previous patch?

Yup, I'll pick these two up shortly.

Will
diff mbox series

Patch

diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
index 1e0b04da2f3a..726cd8bda025 100644
--- a/arch/arm64/kernel/perf_event.c
+++ b/arch/arm64/kernel/perf_event.c
@@ -991,6 +991,12 @@  static int armv8_pmuv3_init(struct arm_pmu *cpu_pmu)
 			      armv8_pmuv3_map_event, NULL, NULL);
 }
 
+static int armv8_a34_pmu_init(struct arm_pmu *cpu_pmu)
+{
+	return armv8_pmu_init(cpu_pmu, "armv8_cortex_a34",
+			      armv8_pmuv3_map_event, NULL, NULL);
+}
+
 static int armv8_a35_pmu_init(struct arm_pmu *cpu_pmu)
 {
 	return armv8_pmu_init(cpu_pmu, "armv8_cortex_a35",
@@ -1003,12 +1009,24 @@  static int armv8_a53_pmu_init(struct arm_pmu *cpu_pmu)
 			      armv8_a53_map_event, NULL, NULL);
 }
 
+static int armv8_a55_pmu_init(struct arm_pmu *cpu_pmu)
+{
+	return armv8_pmu_init(cpu_pmu, "armv8_cortex_a55",
+			      armv8_pmuv3_map_event, NULL, NULL);
+}
+
 static int armv8_a57_pmu_init(struct arm_pmu *cpu_pmu)
 {
 	return armv8_pmu_init(cpu_pmu, "armv8_cortex_a57",
 			      armv8_a57_map_event, NULL, NULL);
 }
 
+static int armv8_a65_pmu_init(struct arm_pmu *cpu_pmu)
+{
+	return armv8_pmu_init(cpu_pmu, "armv8_cortex_a65",
+			      armv8_pmuv3_map_event, NULL, NULL);
+}
+
 static int armv8_a72_pmu_init(struct arm_pmu *cpu_pmu)
 {
 	return armv8_pmu_init(cpu_pmu, "armv8_cortex_a72",
@@ -1021,6 +1039,36 @@  static int armv8_a73_pmu_init(struct arm_pmu *cpu_pmu)
 			      armv8_a73_map_event, NULL, NULL);
 }
 
+static int armv8_a75_pmu_init(struct arm_pmu *cpu_pmu)
+{
+	return armv8_pmu_init(cpu_pmu, "armv8_cortex_a75",
+			      armv8_pmuv3_map_event, NULL, NULL);
+}
+
+static int armv8_a76_pmu_init(struct arm_pmu *cpu_pmu)
+{
+	return armv8_pmu_init(cpu_pmu, "armv8_cortex_a76",
+			      armv8_pmuv3_map_event, NULL, NULL);
+}
+
+static int armv8_a77_pmu_init(struct arm_pmu *cpu_pmu)
+{
+	return armv8_pmu_init(cpu_pmu, "armv8_cortex_a77",
+			      armv8_pmuv3_map_event, NULL, NULL);
+}
+
+static int armv8_e1_pmu_init(struct arm_pmu *cpu_pmu)
+{
+	return armv8_pmu_init(cpu_pmu, "armv8_neoverse_e1",
+			      armv8_pmuv3_map_event, NULL, NULL);
+}
+
+static int armv8_n1_pmu_init(struct arm_pmu *cpu_pmu)
+{
+	return armv8_pmu_init(cpu_pmu, "armv8_neoverse_n1",
+			      armv8_pmuv3_map_event, NULL, NULL);
+}
+
 static int armv8_thunder_pmu_init(struct arm_pmu *cpu_pmu)
 {
 	return armv8_pmu_init(cpu_pmu, "armv8_cavium_thunder",
@@ -1035,11 +1083,19 @@  static int armv8_vulcan_pmu_init(struct arm_pmu *cpu_pmu)
 
 static const struct of_device_id armv8_pmu_of_device_ids[] = {
 	{.compatible = "arm,armv8-pmuv3",	.data = armv8_pmuv3_init},
+	{.compatible = "arm,cortex-a34-pmu",	.data = armv8_a34_pmu_init},
 	{.compatible = "arm,cortex-a35-pmu",	.data = armv8_a35_pmu_init},
 	{.compatible = "arm,cortex-a53-pmu",	.data = armv8_a53_pmu_init},
+	{.compatible = "arm,cortex-a55-pmu",	.data = armv8_a55_pmu_init},
 	{.compatible = "arm,cortex-a57-pmu",	.data = armv8_a57_pmu_init},
+	{.compatible = "arm,cortex-a65-pmu",	.data = armv8_a65_pmu_init},
 	{.compatible = "arm,cortex-a72-pmu",	.data = armv8_a72_pmu_init},
 	{.compatible = "arm,cortex-a73-pmu",	.data = armv8_a73_pmu_init},
+	{.compatible = "arm,cortex-a75-pmu",	.data = armv8_a75_pmu_init},
+	{.compatible = "arm,cortex-a76-pmu",	.data = armv8_a76_pmu_init},
+	{.compatible = "arm,cortex-a77-pmu",	.data = armv8_a77_pmu_init},
+	{.compatible = "arm,neoverse-e1-pmu",	.data = armv8_e1_pmu_init},
+	{.compatible = "arm,neoverse-n1-pmu",	.data = armv8_n1_pmu_init},
 	{.compatible = "cavium,thunder-pmu",	.data = armv8_thunder_pmu_init},
 	{.compatible = "brcm,vulcan-pmu",	.data = armv8_vulcan_pmu_init},
 	{},