diff mbox

[v4,3/6] perf: hisi: Add support for HiSilicon SoC L3C PMU driver

Message ID 1500984642-204676-4-git-send-email-zhangshaokun@hisilicon.com (mailing list archive)
State New, archived
Headers show

Commit Message

Shaokun Zhang July 25, 2017, 12:10 p.m. UTC
This patch adds support for L3C PMU driver in HiSilicon SoC chip, Each
L3C has own control, counter and interrupt registers and is an separate
PMU. For each L3C PMU, it has 8-programable counters and supports 0x60
events, event code is 8-bits and every counter is free-running.
Interrupt is supported to handle counter (48-bits) overflow.

Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Signed-off-by: Shaokun Zhang <zhangshaokun@hisilicon.com>
Signed-off-by: Anurup M <anurup.m@huawei.com>
---
 drivers/perf/hisilicon/Makefile              |   2 +-
 drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c | 538 +++++++++++++++++++++++++++
 include/linux/cpuhotplug.h                   |   1 +
 3 files changed, 540 insertions(+), 1 deletion(-)
 create mode 100644 drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c

Comments

Mark Rutland Aug. 15, 2017, 10:41 a.m. UTC | #1
On Tue, Jul 25, 2017 at 08:10:39PM +0800, Shaokun Zhang wrote:
> This patch adds support for L3C PMU driver in HiSilicon SoC chip, Each
> L3C has own control, counter and interrupt registers and is an separate
> PMU. For each L3C PMU, it has 8-programable counters and supports 0x60
> events, event code is 8-bits and every counter is free-running.
> Interrupt is supported to handle counter (48-bits) overflow.

[...]

> +/* L3C register definition */
> +#define L3C_PERF_CTRL		0x0408
> +#define L3C_INT_MASK		0x0800
> +#define L3C_INT_STATUS		0x0808
> +#define L3C_INT_CLEAR		0x080c
> +#define L3C_EVENT_CTRL	        0x1c00
> +#define L3C_EVENT_TYPE0		0x1d00
> +#define L3C_CNTR0_LOWER		0x1e00

Why does this have a _LOWER suffix?

How big is this regsiter? is it part of a pair of registers?

> +
> +/* L3C has 8-counters and supports 0x60 events */
> +#define L3C_NR_COUNTERS		0x8
> +#define L3C_NR_EVENTS		0x60

What exactly is meant by "supports 0x60 events"?

e.g. does tha mean event IDs 0-0x5f are valid?

> +static irqreturn_t hisi_l3c_pmu_isr(int irq, void *dev_id)
> +{
> +	struct hisi_pmu *l3c_pmu = dev_id;
> +	struct perf_event *event;
> +	unsigned long overflown;
> +	u32 status;
> +	int idx;
> +
> +	/* Read L3C_INT_STATUS register */
> +	status = readl(l3c_pmu->base + L3C_INT_STATUS);
> +	if (!status)
> +		return IRQ_NONE;
> +	overflown = status;

You don't need the temporary u32 value here; you can assign directly to
an unsigned lnog and do all the manipulation on that.

[...]

> +/* Check if the CPU belongs to the SCCL and CCL of PMU */
> +static bool hisi_l3c_is_cpu_in_ccl(struct hisi_pmu *l3c_pmu)
> +{
> +	u32 ccl_id, sccl_id;
> +
> +	hisi_read_sccl_and_ccl_id(&sccl_id, &ccl_id);
> +
> +	if (sccl_id != l3c_pmu->sccl_id)
> +		return false;
> +
> +	if (ccl_id != l3c_pmu->ccl_id)
> +		return false;
> +
> +	/* Return true if matched */
> +	return true;
> +}

The conditionals would be simpler as:

	return (sccl_id == l3c_pmu->sccl_id &&
	        ccl_id == l3c_pmu->ccl_id);

> +
> +static int hisi_l3c_pmu_online_cpu(unsigned int cpu, struct hlist_node *node)
> +{
> +	struct hisi_pmu *l3c_pmu;
> +
> +	l3c_pmu = hlist_entry_safe(node, struct hisi_pmu, node);
> +
> +	/* Proceed only when CPU belongs to the same SCCL and CCL */
> +	if (!hisi_l3c_is_cpu_in_ccl(l3c_pmu))
> +		return 0;

Surely you have a mask of CPUs that you can check instead? You'll need
that for the offline path.

[...]

> +static int hisi_l3c_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
> +{
> +	struct hisi_pmu *l3c_pmu;
> +	cpumask_t ccl_online_cpus;
> +	unsigned int target;
> +
> +	l3c_pmu = hlist_entry_safe(node, struct hisi_pmu, node);
> +
> +	/* Proceed only when CPU belongs to the same SCCL and CCL */
> +	if (!hisi_l3c_is_cpu_in_ccl(l3c_pmu))
> +		return 0;

Again, surely you can check a pre-computed mask?

> +
> +	/* Proceed if this CPU is used for event counting */
> +	if (!cpumask_test_cpu(cpu, &l3c_pmu->cpus))
> +		return 0;

You need to set up the CPU state regardless of whether there are active
events currently. Otherwise the cpumask can be stale, pointing at an
offline CPU, leaving the PMU unusable.

> +
> +	/* Give up ownership of CCL */
> +	cpumask_test_and_clear_cpu(cpu, &l3c_pmu->cpus);
> +
> +	/* Any other CPU for this CCL which is still online */
> +	cpumask_and(&ccl_online_cpus, cpu_coregroup_mask(cpu),
> +		    cpu_online_mask);

What is cpu_coregroup_mask? I do not think you can rely on that
happening to align with the physical CCL mask.

Instead, please:

* Keep track of which CPU(s) this PMU can be used from, with a cpumask.
  Either initialise that at probe time, or add CPUs to that in the
  hotplug callback.

* Use only that mask to determine which CPU to move the PMU context to.

* Use an int to hold the current CPU; there's no need to use a mask to
  hold what shoule be a single CPU ID.

[...]

> +	/* Get the L3C SCCL ID */
> +	if (device_property_read_u32(dev, "hisilicon,scl-id",
> +				     &l3c_pmu->sccl_id)) {
> +		dev_err(dev, "Can not read l3c sccl-id!\n");
> +		return -EINVAL;
> +	}
> +
> +	/* Get the L3C CPU cluster(CCL) ID */
> +	if (device_property_read_u32(dev, "hisilicon,ccl-id",
> +				     &l3c_pmu->ccl_id)) {
> +		dev_err(dev, "Can not read l3c ccl-id!\n");
> +		return -EINVAL;
> +	}

Previously, you expect that these happen to match particular bits of the
MPIDR, which vary for multi-threaded cores. Please document this.

> +static int hisi_l3c_pmu_dev_probe(struct platform_device *pdev,
> +				  struct hisi_pmu *l3c_pmu)
> +{
> +	struct device *dev = &pdev->dev;
> +	int ret;
> +
> +	ret = hisi_l3c_pmu_init_data(pdev, l3c_pmu);
> +	if (ret)
> +		return ret;
> +
> +	ret = hisi_l3c_pmu_init_irq(l3c_pmu, pdev);
> +	if (ret)
> +		return ret;
> +
> +	l3c_pmu->name = devm_kasprintf(dev, GFP_KERNEL, "hisi_l3c%u_%u",
> +				       l3c_pmu->l3c_tag_id, l3c_pmu->sccl_id);

As mentioned on the documentation patch, it would be nicer for the name
to be hierarchical, i.e. placing the SCCL ID first.

Thanks,
Mark.
Shaokun Zhang Aug. 17, 2017, 3:31 a.m. UTC | #2
Hi Mark,

On 2017/8/15 18:41, Mark Rutland wrote:
> On Tue, Jul 25, 2017 at 08:10:39PM +0800, Shaokun Zhang wrote:
>> This patch adds support for L3C PMU driver in HiSilicon SoC chip, Each
>> L3C has own control, counter and interrupt registers and is an separate
>> PMU. For each L3C PMU, it has 8-programable counters and supports 0x60
>> events, event code is 8-bits and every counter is free-running.
>> Interrupt is supported to handle counter (48-bits) overflow.
> 
> [...]
> 
>> +/* L3C register definition */
>> +#define L3C_PERF_CTRL		0x0408
>> +#define L3C_INT_MASK		0x0800
>> +#define L3C_INT_STATUS		0x0808
>> +#define L3C_INT_CLEAR		0x080c
>> +#define L3C_EVENT_CTRL	        0x1c00
>> +#define L3C_EVENT_TYPE0		0x1d00
>> +#define L3C_CNTR0_LOWER		0x1e00
> 
> Why does this have a _LOWER suffix?
> 
> How big is this regsiter? is it part of a pair of registers?
> 

Each counter is 48-bits, for counter0, the register offset of the lower
32-bits is 0x1e00 and the higher 16-bits is in 0x1e04 (while the upper
16-bits is reserved for 0x1e04, RAZ and WI), other counters are the same.

>> +
>> +/* L3C has 8-counters and supports 0x60 events */
>> +#define L3C_NR_COUNTERS		0x8
>> +#define L3C_NR_EVENTS		0x60
> 
> What exactly is meant by "supports 0x60 events"?
> 
> e.g. does tha mean event IDs 0-0x5f are valid?
> 

It is event IDs, my apologies to describe it vaguely.

>> +static irqreturn_t hisi_l3c_pmu_isr(int irq, void *dev_id)
>> +{
>> +	struct hisi_pmu *l3c_pmu = dev_id;
>> +	struct perf_event *event;
>> +	unsigned long overflown;
>> +	u32 status;
>> +	int idx;
>> +
>> +	/* Read L3C_INT_STATUS register */
>> +	status = readl(l3c_pmu->base + L3C_INT_STATUS);
>> +	if (!status)
>> +		return IRQ_NONE;
>> +	overflown = status;
> 
> You don't need the temporary u32 value here; you can assign directly to
> an unsigned lnog and do all the manipulation on that.
> 

Ok.

> [...]
> 
>> +/* Check if the CPU belongs to the SCCL and CCL of PMU */
>> +static bool hisi_l3c_is_cpu_in_ccl(struct hisi_pmu *l3c_pmu)
>> +{
>> +	u32 ccl_id, sccl_id;
>> +
>> +	hisi_read_sccl_and_ccl_id(&sccl_id, &ccl_id);
>> +
>> +	if (sccl_id != l3c_pmu->sccl_id)
>> +		return false;
>> +
>> +	if (ccl_id != l3c_pmu->ccl_id)
>> +		return false;
>> +
>> +	/* Return true if matched */
>> +	return true;
>> +}
> 
> The conditionals would be simpler as:
> 
> 	return (sccl_id == l3c_pmu->sccl_id &&
> 	        ccl_id == l3c_pmu->ccl_id);
> 

Ok.

>> +
>> +static int hisi_l3c_pmu_online_cpu(unsigned int cpu, struct hlist_node *node)
>> +{
>> +	struct hisi_pmu *l3c_pmu;
>> +
>> +	l3c_pmu = hlist_entry_safe(node, struct hisi_pmu, node);
>> +
>> +	/* Proceed only when CPU belongs to the same SCCL and CCL */
>> +	if (!hisi_l3c_is_cpu_in_ccl(l3c_pmu))
>> +		return 0;
> 
> Surely you have a mask of CPUs that you can check instead? You'll need
> that for the offline path.
> 

Ok, Shall create the cpumask and update it during CPU hotplug callback.

> [...]
> 
>> +static int hisi_l3c_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
>> +{
>> +	struct hisi_pmu *l3c_pmu;
>> +	cpumask_t ccl_online_cpus;
>> +	unsigned int target;
>> +
>> +	l3c_pmu = hlist_entry_safe(node, struct hisi_pmu, node);
>> +
>> +	/* Proceed only when CPU belongs to the same SCCL and CCL */
>> +	if (!hisi_l3c_is_cpu_in_ccl(l3c_pmu))
>> +		return 0;
> 
> Again, surely you can check a pre-computed mask?
> 

Ok.

>> +
>> +	/* Proceed if this CPU is used for event counting */
>> +	if (!cpumask_test_cpu(cpu, &l3c_pmu->cpus))
>> +		return 0;
> 
> You need to set up the CPU state regardless of whether there are active
> events currently. Otherwise the cpumask can be stale, pointing at an
> offline CPU, leaving the PMU unusable.
>

Ok. Shall update the cpumask and also hisi_pmu::init cpu to hold the next
online CPU

>> +
>> +	/* Give up ownership of CCL */
>> +	cpumask_test_and_clear_cpu(cpu, &l3c_pmu->cpus);
>> +
>> +	/* Any other CPU for this CCL which is still online */
>> +	cpumask_and(&ccl_online_cpus, cpu_coregroup_mask(cpu),
>> +		    cpu_online_mask);
> 
> What is cpu_coregroup_mask? I do not think you can rely on that
> happening to align with the physical CCL mask.
> 

The cpu_coregroup_mask return the CPU cores within the cluster. So we used this.

> Instead, please:
> 
> * Keep track of which CPU(s) this PMU can be used from, with a cpumask.
>   Either initialise that at probe time, or add CPUs to that in the
>   hotplug callback.
> 
> * Use only that mask to determine which CPU to move the PMU context to.
> 
> * Use an int to hold the current CPU; there's no need to use a mask to
>   hold what shoule be a single CPU ID.
> 

Shall modify as suggested.

> [...]
> 
>> +	/* Get the L3C SCCL ID */
>> +	if (device_property_read_u32(dev, "hisilicon,scl-id",
>> +				     &l3c_pmu->sccl_id)) {
>> +		dev_err(dev, "Can not read l3c sccl-id!\n");
>> +		return -EINVAL;
>> +	}
>> +
>> +	/* Get the L3C CPU cluster(CCL) ID */
>> +	if (device_property_read_u32(dev, "hisilicon,ccl-id",
>> +				     &l3c_pmu->ccl_id)) {
>> +		dev_err(dev, "Can not read l3c ccl-id!\n");
>> +		return -EINVAL;
>> +	}
> 
> Previously, you expect that these happen to match particular bits of the
> MPIDR, which vary for multi-threaded cores. Please document this.
> 

Ok.

>> +static int hisi_l3c_pmu_dev_probe(struct platform_device *pdev,
>> +				  struct hisi_pmu *l3c_pmu)
>> +{
>> +	struct device *dev = &pdev->dev;
>> +	int ret;
>> +
>> +	ret = hisi_l3c_pmu_init_data(pdev, l3c_pmu);
>> +	if (ret)
>> +		return ret;
>> +
>> +	ret = hisi_l3c_pmu_init_irq(l3c_pmu, pdev);
>> +	if (ret)
>> +		return ret;
>> +
>> +	l3c_pmu->name = devm_kasprintf(dev, GFP_KERNEL, "hisi_l3c%u_%u",
>> +				       l3c_pmu->l3c_tag_id, l3c_pmu->sccl_id);
> 
> As mentioned on the documentation patch, it would be nicer for the name
> to be hierarchical, i.e. placing the SCCL ID first.
> 

Surely.

Thanks.
Shaokun

> Thanks,
> Mark.
> _______________________________________________
> linuxarm mailing list
> linuxarm@huawei.com
> http://rnd-openeuler.huawei.com/mailman/listinfo/linuxarm
> 
> .
>
diff mbox

Patch

diff --git a/drivers/perf/hisilicon/Makefile b/drivers/perf/hisilicon/Makefile
index 2783bb3..4a3d3e6 100644
--- a/drivers/perf/hisilicon/Makefile
+++ b/drivers/perf/hisilicon/Makefile
@@ -1 +1 @@ 
-obj-$(CONFIG_HISI_PMU) += hisi_uncore_pmu.o
+obj-$(CONFIG_HISI_PMU) += hisi_uncore_pmu.o hisi_uncore_l3c_pmu.o
diff --git a/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c b/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c
new file mode 100644
index 0000000..33146bb
--- /dev/null
+++ b/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c
@@ -0,0 +1,538 @@ 
+/*
+ * HiSilicon SoC L3C uncore Hardware event counters support
+ *
+ * Copyright (C) 2017 Hisilicon Limited
+ * Author: Anurup M <anurup.m@huawei.com>
+ *         Shaokun Zhang <zhangshaokun@hisilicon.com>
+ *
+ * This code is based on the uncore PMUs like arm-cci and arm-ccn.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/acpi.h>
+#include <linux/bug.h>
+#include <linux/cpuhotplug.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/list.h>
+#include <linux/platform_device.h>
+#include <linux/topology.h>
+#include "hisi_uncore_pmu.h"
+
+/* L3C register definition */
+#define L3C_PERF_CTRL		0x0408
+#define L3C_INT_MASK		0x0800
+#define L3C_INT_STATUS		0x0808
+#define L3C_INT_CLEAR		0x080c
+#define L3C_EVENT_CTRL	        0x1c00
+#define L3C_EVENT_TYPE0		0x1d00
+#define L3C_CNTR0_LOWER		0x1e00
+
+/* L3C has 8-counters and supports 0x60 events */
+#define L3C_NR_COUNTERS		0x8
+#define L3C_NR_EVENTS		0x60
+
+#define L3C_PERF_CTRL_EN	0x20000
+#define L3C_EVTYPE_NONE		0xff
+
+/*
+ * Select the counter register offset using the counter index
+ * every counter is 48-bits and [48:63] is reserved.
+ */
+static u32 get_counter_reg_off(int cntr_idx)
+{
+	return (L3C_CNTR0_LOWER + (cntr_idx * 8));
+}
+
+static u64 hisi_l3c_pmu_read_counter(struct hisi_pmu *l3c_pmu,
+				     struct hw_perf_event *hwc)
+{
+	u32 idx = hwc->idx;
+	u32 reg;
+
+	if (!hisi_uncore_pmu_counter_valid(l3c_pmu, idx)) {
+		dev_err(l3c_pmu->dev, "Unsupported event index:%d!\n", idx);
+		return 0;
+	}
+
+	reg = get_counter_reg_off(idx);
+
+	/* Read 64-bits and the upper 16 bits are Read-As-Zero */
+	return readq(l3c_pmu->base + reg);
+}
+
+static void hisi_l3c_pmu_write_counter(struct hisi_pmu *l3c_pmu,
+				       struct hw_perf_event *hwc, u64 val)
+{
+	u32 idx = hwc->idx;
+	u32 reg;
+
+	if (!hisi_uncore_pmu_counter_valid(l3c_pmu, idx)) {
+		dev_err(l3c_pmu->dev, "Unsupported event index:%d!\n", idx);
+		return;
+	}
+
+	reg = get_counter_reg_off(idx);
+	/* Write 64-bits and the upper 16 bits are Writes-Ignored */
+	writeq(val, l3c_pmu->base + reg);
+}
+
+static void hisi_l3c_pmu_write_evtype(struct hisi_pmu *l3c_pmu, int idx,
+				      u32 type)
+{
+	u32 reg, reg_idx, shift, val;
+
+	/*
+	 * Select the appropriate event select register(L3C_EVENT_TYPE0/1).
+	 * There are 2 event select registers for the 8 hardware counters.
+	 * Event code is 8-bits and for the former 4 hardware counters,
+	 * L3C_EVENT_TYPE0 is chosen. For the latter 4 hardware counters,
+	 * L3C_EVENT_TYPE1 is chosen.
+	 */
+	reg = L3C_EVENT_TYPE0 + (idx / 4) * 4;
+	reg_idx = idx % 4;
+	shift = 8 * reg_idx;
+
+	/* Write event code to L3C_EVENT_TYPEx Register */
+	val = readl(l3c_pmu->base + reg);
+	val &= ~(L3C_EVTYPE_NONE << shift);
+	val |= (type << shift);
+	writel(val, l3c_pmu->base + reg);
+}
+
+static void hisi_l3c_pmu_start_counters(struct hisi_pmu *l3c_pmu)
+{
+	u32 val;
+
+	/*
+	 * Set perf_enable bit in L3C_PERF_CTRL register to start counting
+	 * for all enabled counters.
+	 */
+	val = readl(l3c_pmu->base + L3C_PERF_CTRL);
+	val |= L3C_PERF_CTRL_EN;
+	writel(val, l3c_pmu->base + L3C_PERF_CTRL);
+}
+
+static void hisi_l3c_pmu_stop_counters(struct hisi_pmu *l3c_pmu)
+{
+	u32 val;
+
+	/*
+	 * Clear perf_enable bit in L3C_PERF_CTRL register to stop counting
+	 * for all enabled counters.
+	 */
+	val = readl(l3c_pmu->base + L3C_PERF_CTRL);
+	val &= ~(L3C_PERF_CTRL_EN);
+	writel(val, l3c_pmu->base + L3C_PERF_CTRL);
+}
+
+static void hisi_l3c_pmu_enable_counter(struct hisi_pmu *l3c_pmu,
+					struct hw_perf_event *hwc)
+{
+	u32 val;
+
+	/* Enable counter index in L3C_EVENT_CTRL register */
+	val = readl(l3c_pmu->base + L3C_EVENT_CTRL);
+	val |= (1 << hwc->idx);
+	writel(val, l3c_pmu->base + L3C_EVENT_CTRL);
+}
+
+static void hisi_l3c_pmu_disable_counter(struct hisi_pmu *l3c_pmu,
+					 struct hw_perf_event *hwc)
+{
+	u32 val;
+
+	/* Clear counter index in L3C_EVENT_CTRL register */
+	val = readl(l3c_pmu->base + L3C_EVENT_CTRL);
+	val &= ~(1 << hwc->idx);
+	writel(val, l3c_pmu->base + L3C_EVENT_CTRL);
+}
+
+static void hisi_l3c_pmu_enable_counter_int(struct hisi_pmu *l3c_pmu,
+					    struct hw_perf_event *hwc)
+{
+	u32 val;
+
+	val = readl(l3c_pmu->base + L3C_INT_MASK);
+	/* Write 0 to enable interrupt */
+	val &= ~(1 << hwc->idx);
+	writel(val, l3c_pmu->base + L3C_INT_MASK);
+}
+
+static void hisi_l3c_pmu_disable_counter_int(struct hisi_pmu *l3c_pmu,
+					     struct hw_perf_event *hwc)
+{
+	u32 val;
+
+	val = readl(l3c_pmu->base + L3C_INT_MASK);
+	/* Write 1 to mask interrupt */
+	val |= (1 << hwc->idx);
+	writel(val, l3c_pmu->base + L3C_INT_MASK);
+}
+
+static irqreturn_t hisi_l3c_pmu_isr(int irq, void *dev_id)
+{
+	struct hisi_pmu *l3c_pmu = dev_id;
+	struct perf_event *event;
+	unsigned long overflown;
+	u32 status;
+	int idx;
+
+	/* Read L3C_INT_STATUS register */
+	status = readl(l3c_pmu->base + L3C_INT_STATUS);
+	if (!status)
+		return IRQ_NONE;
+	overflown = status;
+
+	/*
+	 * Find the counter index which overflowed if the bit was set
+	 * and handle it.
+	 */
+	for_each_set_bit(idx, &overflown, L3C_NR_COUNTERS) {
+		/* Write 1 to clear the IRQ status flag */
+		writel((1 << idx), l3c_pmu->base + L3C_INT_CLEAR);
+
+		/* Get the corresponding event struct */
+		event = l3c_pmu->pmu_events.hw_events[idx];
+		if (!event)
+			continue;
+
+		hisi_uncore_pmu_event_update(event);
+		hisi_uncore_pmu_set_event_period(event);
+	}
+
+	return IRQ_HANDLED;
+}
+
+static int hisi_l3c_pmu_init_irq(struct hisi_pmu *l3c_pmu,
+				 struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	int irq, ret;
+
+	/* Read and init IRQ */
+	irq = platform_get_irq(pdev, 0);
+	if (irq < 0) {
+		dev_err(dev, "irq init: fail L3C overflow interrupt\n");
+		return irq;
+	}
+
+	ret = devm_request_irq(dev, irq, hisi_l3c_pmu_isr,
+			       IRQF_NOBALANCING | IRQF_NO_THREAD,
+			       dev_name(dev), l3c_pmu);
+	if (ret < 0) {
+		dev_err(dev, "Fail to request IRQ:%d ret:%d\n", irq, ret);
+		return ret;
+	}
+
+	l3c_pmu->irq = irq;
+
+	return 0;
+}
+
+/* Check if the CPU belongs to the SCCL and CCL of PMU */
+static bool hisi_l3c_is_cpu_in_ccl(struct hisi_pmu *l3c_pmu)
+{
+	u32 ccl_id, sccl_id;
+
+	hisi_read_sccl_and_ccl_id(&sccl_id, &ccl_id);
+
+	if (sccl_id != l3c_pmu->sccl_id)
+		return false;
+
+	if (ccl_id != l3c_pmu->ccl_id)
+		return false;
+
+	/* Return true if matched */
+	return true;
+}
+
+static int hisi_l3c_pmu_online_cpu(unsigned int cpu, struct hlist_node *node)
+{
+	struct hisi_pmu *l3c_pmu;
+
+	l3c_pmu = hlist_entry_safe(node, struct hisi_pmu, node);
+
+	/* Proceed only when CPU belongs to the same SCCL and CCL */
+	if (!hisi_l3c_is_cpu_in_ccl(l3c_pmu))
+		return 0;
+
+	/* If another CPU is already managing the CPU cluster, simply return */
+	if (!cpumask_empty(&l3c_pmu->cpus))
+		return 0;
+
+	/* Use this CPU for event counting in the CCL */
+	cpumask_set_cpu(cpu, &l3c_pmu->cpus);
+
+	/* Overflow interrupt also should use the same CPU */
+	WARN_ON(irq_set_affinity(l3c_pmu->irq, &l3c_pmu->cpus));
+
+	return 0;
+}
+
+static int hisi_l3c_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
+{
+	struct hisi_pmu *l3c_pmu;
+	cpumask_t ccl_online_cpus;
+	unsigned int target;
+
+	l3c_pmu = hlist_entry_safe(node, struct hisi_pmu, node);
+
+	/* Proceed only when CPU belongs to the same SCCL and CCL */
+	if (!hisi_l3c_is_cpu_in_ccl(l3c_pmu))
+		return 0;
+
+	/* Proceed if this CPU is used for event counting */
+	if (!cpumask_test_cpu(cpu, &l3c_pmu->cpus))
+		return 0;
+
+	/* Give up ownership of CCL */
+	cpumask_test_and_clear_cpu(cpu, &l3c_pmu->cpus);
+
+	/* Any other CPU for this CCL which is still online */
+	cpumask_and(&ccl_online_cpus, cpu_coregroup_mask(cpu),
+		    cpu_online_mask);
+	target = cpumask_any_but(&ccl_online_cpus, cpu);
+	if (target >= nr_cpu_ids)
+		return 0;
+
+	perf_pmu_migrate_context(&l3c_pmu->pmu, cpu, target);
+	/* Use this CPU for event counting in the CCL */
+	cpumask_set_cpu(target, &l3c_pmu->cpus);
+
+	return 0;
+}
+
+static const struct acpi_device_id hisi_l3c_pmu_acpi_match[] = {
+	{ "HISI0213", },
+	{},
+};
+MODULE_DEVICE_TABLE(acpi, hisi_l3c_pmu_acpi_match);
+
+static int hisi_l3c_pmu_init_data(struct platform_device *pdev,
+				  struct hisi_pmu *l3c_pmu)
+{
+	struct device *dev = &pdev->dev;
+	struct resource *res;
+	unsigned long long id;
+	acpi_status status;
+
+	status = acpi_evaluate_integer(ACPI_HANDLE(dev), "_UID", NULL, &id);
+	if (ACPI_FAILURE(status))
+		return false;
+
+	l3c_pmu->l3c_tag_id = id;
+
+	/* Get the L3C SCCL ID */
+	if (device_property_read_u32(dev, "hisilicon,scl-id",
+				     &l3c_pmu->sccl_id)) {
+		dev_err(dev, "Can not read l3c sccl-id!\n");
+		return -EINVAL;
+	}
+
+	/* Get the L3C CPU cluster(CCL) ID */
+	if (device_property_read_u32(dev, "hisilicon,ccl-id",
+				     &l3c_pmu->ccl_id)) {
+		dev_err(dev, "Can not read l3c ccl-id!\n");
+		return -EINVAL;
+	}
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	l3c_pmu->base = devm_ioremap_resource(dev, res);
+	if (IS_ERR(l3c_pmu->base)) {
+		dev_err(dev, "ioremap failed for l3c_pmu resource\n");
+		return PTR_ERR(l3c_pmu->base);
+	}
+
+	return 0;
+}
+
+static struct attribute *hisi_l3c_pmu_format_attr[] = {
+	HISI_PMU_FORMAT_ATTR(event, "config:0-7"),
+	NULL,
+};
+
+static const struct attribute_group hisi_l3c_pmu_format_group = {
+	.name = "format",
+	.attrs = hisi_l3c_pmu_format_attr,
+};
+
+static struct attribute *hisi_l3c_pmu_events_attr[] = {
+	HISI_PMU_EVENT_ATTR(rd_cpipe,		0x00),
+	HISI_PMU_EVENT_ATTR(wr_cpipe,		0x01),
+	HISI_PMU_EVENT_ATTR(rd_hit_cpipe,	0x02),
+	HISI_PMU_EVENT_ATTR(wr_hit_cpipe,	0x03),
+	HISI_PMU_EVENT_ATTR(victim_num,		0x04),
+	HISI_PMU_EVENT_ATTR(rd_spipe,		0x20),
+	HISI_PMU_EVENT_ATTR(wr_spipe,		0x21),
+	HISI_PMU_EVENT_ATTR(rd_hit_spipe,	0x22),
+	HISI_PMU_EVENT_ATTR(wr_hit_spipe,	0x23),
+	HISI_PMU_EVENT_ATTR(back_invalid,	0x29),
+	HISI_PMU_EVENT_ATTR(retry_cpu,		0x40),
+	HISI_PMU_EVENT_ATTR(retry_ring,		0x41),
+	HISI_PMU_EVENT_ATTR(prefetch_drop,	0x42),
+	NULL,
+};
+
+static const struct attribute_group hisi_l3c_pmu_events_group = {
+	.name = "events",
+	.attrs = hisi_l3c_pmu_events_attr,
+};
+
+static DEVICE_ATTR(cpumask, 0444, hisi_cpumask_sysfs_show, NULL);
+
+static struct attribute *hisi_l3c_pmu_cpumask_attrs[] = {
+	&dev_attr_cpumask.attr,
+	NULL,
+};
+
+static const struct attribute_group hisi_l3c_pmu_cpumask_attr_group = {
+	.attrs = hisi_l3c_pmu_cpumask_attrs,
+};
+
+static const struct attribute_group *hisi_l3c_pmu_attr_groups[] = {
+	&hisi_l3c_pmu_format_group,
+	&hisi_l3c_pmu_events_group,
+	&hisi_l3c_pmu_cpumask_attr_group,
+	NULL,
+};
+
+static const struct hisi_uncore_ops hisi_uncore_l3c_ops = {
+	.write_evtype		= hisi_l3c_pmu_write_evtype,
+	.get_event_idx		= hisi_uncore_pmu_get_event_idx,
+	.start_counters		= hisi_l3c_pmu_start_counters,
+	.stop_counters		= hisi_l3c_pmu_stop_counters,
+	.enable_counter		= hisi_l3c_pmu_enable_counter,
+	.disable_counter	= hisi_l3c_pmu_disable_counter,
+	.enable_counter_int	= hisi_l3c_pmu_enable_counter_int,
+	.disable_counter_int	= hisi_l3c_pmu_disable_counter_int,
+	.write_counter		= hisi_l3c_pmu_write_counter,
+	.read_counter		= hisi_l3c_pmu_read_counter,
+};
+
+static int hisi_l3c_pmu_dev_probe(struct platform_device *pdev,
+				  struct hisi_pmu *l3c_pmu)
+{
+	struct device *dev = &pdev->dev;
+	int ret;
+
+	ret = hisi_l3c_pmu_init_data(pdev, l3c_pmu);
+	if (ret)
+		return ret;
+
+	ret = hisi_l3c_pmu_init_irq(l3c_pmu, pdev);
+	if (ret)
+		return ret;
+
+	l3c_pmu->name = devm_kasprintf(dev, GFP_KERNEL, "hisi_l3c%u_%u",
+				       l3c_pmu->l3c_tag_id, l3c_pmu->sccl_id);
+	l3c_pmu->num_events = L3C_NR_EVENTS;
+	l3c_pmu->num_counters = L3C_NR_COUNTERS;
+	l3c_pmu->counter_bits = 48;
+	l3c_pmu->ops = &hisi_uncore_l3c_ops;
+	l3c_pmu->dev = dev;
+
+	return 0;
+}
+
+static int hisi_l3c_pmu_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct hisi_pmu *l3c_pmu;
+	int ret;
+
+	l3c_pmu = hisi_pmu_alloc(dev, L3C_NR_COUNTERS);
+	if (!l3c_pmu)
+		return -ENOMEM;
+
+	platform_set_drvdata(pdev, l3c_pmu);
+
+	ret = hisi_l3c_pmu_dev_probe(pdev, l3c_pmu);
+	if (ret)
+		return ret;
+
+	ret = cpuhp_state_add_instance(CPUHP_AP_PERF_ARM_HISI_L3_ONLINE,
+				       &l3c_pmu->node);
+	if (ret) {
+		dev_err(dev, "Error %d registering hotplug\n", ret);
+		return ret;
+	}
+
+	l3c_pmu->pmu = (struct pmu) {
+		.name		= l3c_pmu->name,
+		.task_ctx_nr	= perf_invalid_context,
+		.event_init	= hisi_uncore_pmu_event_init,
+		.pmu_enable	= hisi_uncore_pmu_enable,
+		.pmu_disable	= hisi_uncore_pmu_disable,
+		.add		= hisi_uncore_pmu_add,
+		.del		= hisi_uncore_pmu_del,
+		.start		= hisi_uncore_pmu_start,
+		.stop		= hisi_uncore_pmu_stop,
+		.read		= hisi_uncore_pmu_read,
+		.attr_groups	= hisi_l3c_pmu_attr_groups,
+	};
+
+	ret = perf_pmu_register(&l3c_pmu->pmu, l3c_pmu->name, -1);
+	if (ret) {
+		dev_err(l3c_pmu->dev, "l3c_pmu register failed!\n");
+		cpuhp_state_remove_instance(CPUHP_AP_PERF_ARM_HISI_L3_ONLINE,
+					    &l3c_pmu->node);
+	}
+
+	return ret;
+}
+
+static int hisi_l3c_pmu_remove(struct platform_device *pdev)
+{
+	struct hisi_pmu *l3c_pmu = platform_get_drvdata(pdev);
+
+	perf_pmu_unregister(&l3c_pmu->pmu);
+	cpuhp_state_remove_instance(CPUHP_AP_PERF_ARM_HISI_L3_ONLINE,
+				    &l3c_pmu->node);
+
+	return 0;
+}
+
+static struct platform_driver hisi_l3c_pmu_driver = {
+	.driver = {
+		.name = "hisi_l3c_pmu",
+		.acpi_match_table = ACPI_PTR(hisi_l3c_pmu_acpi_match),
+	},
+	.probe = hisi_l3c_pmu_probe,
+	.remove = hisi_l3c_pmu_remove,
+};
+
+static int __init hisi_l3c_pmu_module_init(void)
+{
+	int ret;
+
+	ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_HISI_L3_ONLINE,
+				      "AP_PERF_ARM_HISI_L3_ONLINE",
+				      hisi_l3c_pmu_online_cpu,
+				      hisi_l3c_pmu_offline_cpu);
+	if (ret) {
+		pr_err("l3c_pmu_module_init: Error setup hotplug, ret=%d", ret);
+		return ret;
+	}
+
+	ret = platform_driver_register(&hisi_l3c_pmu_driver);
+	if (ret)
+		cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_HISI_L3_ONLINE);
+
+	return ret;
+}
+module_init(hisi_l3c_pmu_module_init);
+
+static void __exit hisi_l3c_pmu_module_exit(void)
+{
+	platform_driver_unregister(&hisi_l3c_pmu_driver);
+	cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_HISI_L3_ONLINE);
+}
+module_exit(hisi_l3c_pmu_module_exit);
+
+MODULE_DESCRIPTION("HiSilicon SoC L3C uncore PMU driver");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Anurup M <anurup.m@huawei.com>");
+MODULE_AUTHOR("Shaokun Zhang <zhangshaokun@hisilicon.com>");
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
index b56573b..2eab157 100644
--- a/include/linux/cpuhotplug.h
+++ b/include/linux/cpuhotplug.h
@@ -136,6 +136,7 @@  enum cpuhp_state {
 	CPUHP_AP_PERF_S390_SF_ONLINE,
 	CPUHP_AP_PERF_ARM_CCI_ONLINE,
 	CPUHP_AP_PERF_ARM_CCN_ONLINE,
+	CPUHP_AP_PERF_ARM_HISI_L3_ONLINE,
 	CPUHP_AP_PERF_ARM_L2X0_ONLINE,
 	CPUHP_AP_PERF_ARM_QCOM_L2_ONLINE,
 	CPUHP_AP_PERF_ARM_QCOM_L3_ONLINE,