diff mbox series

[v9,3/8] perf: imx_perf: let the driver manage the counter usage rather the user

Message ID 20240415020353.3833367-3-xu.yang_2@nxp.com (mailing list archive)
State New, archived
Headers show
Series [v9,1/8] dt-bindings: perf: fsl-imx-ddr: Add i.MX95 compatible | expand

Commit Message

Xu Yang April 15, 2024, 2:03 a.m. UTC
In current design, the user of perf app needs to input counter ID to count
events. However, this is not user-friendly since the user needs to lookup
the map table to find the counter. Instead of letting the user to input
the counter, let this driver to manage the counters in this patch.

This will be implemented by:
 1. allocate counter 0 for cycle event.
 2. find unused counter from 1-10 for reference events.
 3. allocate specific counter for counter-specific events.

In this patch, counter attribute is removed too. To mark counter-specific
events, counter ID will be encoded into perf_pmu_events_attr.id.

Reviewed-by: Frank Li <Frank.Li@nxp.com>
Signed-off-by: Xu Yang <xu.yang_2@nxp.com>

---
Changes in v6:
 - new patch
Changes in v7:
 - no changes
Changes in v8:
 - add Rb tag
Changes in v9:
 - keep 'counter' attr
---
 drivers/perf/fsl_imx9_ddr_perf.c | 164 ++++++++++++++++++-------------
 1 file changed, 98 insertions(+), 66 deletions(-)

Comments

Frank Li April 15, 2024, 2:46 a.m. UTC | #1
On Mon, Apr 15, 2024 at 10:03:48AM +0800, Xu Yang wrote:
> In current design, the user of perf app needs to input counter ID to count
> events. However, this is not user-friendly since the user needs to lookup
> the map table to find the counter. Instead of letting the user to input
> the counter, let this driver to manage the counters in this patch.
> 
> This will be implemented by:
>  1. allocate counter 0 for cycle event.
>  2. find unused counter from 1-10 for reference events.
>  3. allocate specific counter for counter-specific events.
> 
> In this patch, counter attribute is removed too. To mark counter-specific
> events, counter ID will be encoded into perf_pmu_events_attr.id.

In change log, you said keep 'counter' attr. You should memtion keep 
user compatible and but omit all value pass down by count=<n>. 

> 
> Reviewed-by: Frank Li <Frank.Li@nxp.com>
> Signed-off-by: Xu Yang <xu.yang_2@nxp.com>
> 
> ---
> Changes in v6:
>  - new patch
> Changes in v7:
>  - no changes
> Changes in v8:
>  - add Rb tag
> Changes in v9:
>  - keep 'counter' attr
> ---
>  drivers/perf/fsl_imx9_ddr_perf.c | 164 ++++++++++++++++++-------------
>  1 file changed, 98 insertions(+), 66 deletions(-)
> 
> diff --git a/drivers/perf/fsl_imx9_ddr_perf.c b/drivers/perf/fsl_imx9_ddr_perf.c
> index 0017f2c9ef91..b00cbd920c0d 100644
> --- a/drivers/perf/fsl_imx9_ddr_perf.c
> +++ b/drivers/perf/fsl_imx9_ddr_perf.c
> @@ -41,9 +41,11 @@
>  
>  #define NUM_COUNTERS		11
>  #define CYCLES_COUNTER		0
> +#define CYCLES_EVENT_ID		0
>  
>  #define CONFIG_EVENT		GENMASK(7, 0)
>  #define CONFIG_COUNTER		GENMASK(15, 8)
> +#define CONFIG_COUNTER_OFFSET	8
>  
>  #define to_ddr_pmu(p)		container_of(p, struct ddr_pmu, pmu)
>  
> @@ -130,6 +132,8 @@ static ssize_t ddr_pmu_event_show(struct device *dev,
>  	return sysfs_emit(page, "event=0x%02llx\n", pmu_attr->id);
>  }
>  
> +#define ID(counter, id) ((counter << CONFIG_COUNTER_OFFSET) | id)
> +
>  #define IMX9_DDR_PMU_EVENT_ATTR(_name, _id)				\
>  	(&((struct perf_pmu_events_attr[]) {				\
>  		{ .attr = __ATTR(_name, 0444, ddr_pmu_event_show, NULL),\
> @@ -162,81 +166,81 @@ static struct attribute *ddr_perf_events_attrs[] = {
>  	IMX9_DDR_PMU_EVENT_ATTR(ddrc_pm_29, 63),
>  
>  	/* counter1 specific events */
> -	IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_riq_0, 64),
> -	IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_riq_1, 65),
> -	IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_riq_2, 66),
> -	IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_riq_3, 67),
> -	IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_riq_4, 68),
> -	IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_riq_5, 69),
> -	IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_riq_6, 70),
> -	IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_riq_7, 71),
> +	IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_riq_0, ID(1, 64)),
> +	IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_riq_1, ID(1, 65)),
> +	IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_riq_2, ID(1, 66)),
> +	IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_riq_3, ID(1, 67)),
> +	IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_riq_4, ID(1, 68)),
> +	IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_riq_5, ID(1, 69)),
> +	IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_riq_6, ID(1, 70)),
> +	IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_riq_7, ID(1, 71)),
>  
>  	/* counter2 specific events */
> -	IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_wiq_0, 64),
> -	IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_wiq_1, 65),
> -	IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_wiq_2, 66),
> -	IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_wiq_3, 67),
> -	IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_wiq_4, 68),
> -	IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_wiq_5, 69),
> -	IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_wiq_6, 70),
> -	IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_wiq_7, 71),
> -	IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_empty, 72),
> -	IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pm_rd_trans_filt, 73),
> +	IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_wiq_0, ID(2, 64)),
> +	IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_wiq_1, ID(2, 65)),
> +	IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_wiq_2, ID(2, 66)),
> +	IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_wiq_3, ID(2, 67)),
> +	IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_wiq_4, ID(2, 68)),
> +	IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_wiq_5, ID(2, 69)),
> +	IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_wiq_6, ID(2, 70)),
> +	IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_wiq_7, ID(2, 71)),
> +	IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_empty, ID(2, 72)),
> +	IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pm_rd_trans_filt, ID(2, 73)),
>  
>  	/* counter3 specific events */
> -	IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_collision_0, 64),
> -	IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_collision_1, 65),
> -	IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_collision_2, 66),
> -	IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_collision_3, 67),
> -	IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_collision_4, 68),
> -	IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_collision_5, 69),
> -	IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_collision_6, 70),
> -	IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_collision_7, 71),
> -	IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_full, 72),
> -	IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pm_wr_trans_filt, 73),
> +	IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_collision_0, ID(3, 64)),
> +	IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_collision_1, ID(3, 65)),
> +	IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_collision_2, ID(3, 66)),
> +	IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_collision_3, ID(3, 67)),
> +	IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_collision_4, ID(3, 68)),
> +	IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_collision_5, ID(3, 69)),
> +	IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_collision_6, ID(3, 70)),
> +	IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_collision_7, ID(3, 71)),
> +	IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_full, ID(3, 72)),
> +	IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pm_wr_trans_filt, ID(3, 73)),
>  
>  	/* counter4 specific events */
> -	IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_open_0, 64),
> -	IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_open_1, 65),
> -	IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_open_2, 66),
> -	IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_open_3, 67),
> -	IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_open_4, 68),
> -	IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_open_5, 69),
> -	IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_open_6, 70),
> -	IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_open_7, 71),
> -	IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_ld_rdq2_rmw, 72),
> -	IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pm_rd_beat_filt, 73),
> +	IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_open_0, ID(4, 64)),
> +	IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_open_1, ID(4, 65)),
> +	IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_open_2, ID(4, 66)),
> +	IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_open_3, ID(4, 67)),
> +	IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_open_4, ID(4, 68)),
> +	IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_open_5, ID(4, 69)),
> +	IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_open_6, ID(4, 70)),
> +	IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_open_7, ID(4, 71)),
> +	IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_ld_rdq2_rmw, ID(4, 72)),
> +	IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pm_rd_beat_filt, ID(4, 73)),
>  
>  	/* counter5 specific events */
> -	IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_valid_start_0, 64),
> -	IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_valid_start_1, 65),
> -	IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_valid_start_2, 66),
> -	IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_valid_start_3, 67),
> -	IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_valid_start_4, 68),
> -	IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_valid_start_5, 69),
> -	IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_valid_start_6, 70),
> -	IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_valid_start_7, 71),
> -	IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_ld_rdq1, 72),
> +	IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_valid_start_0, ID(5, 64)),
> +	IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_valid_start_1, ID(5, 65)),
> +	IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_valid_start_2, ID(5, 66)),
> +	IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_valid_start_3, ID(5, 67)),
> +	IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_valid_start_4, ID(5, 68)),
> +	IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_valid_start_5, ID(5, 69)),
> +	IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_valid_start_6, ID(5, 70)),
> +	IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_valid_start_7, ID(5, 71)),
> +	IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_ld_rdq1, ID(5, 72)),
>  
>  	/* counter6 specific events */
> -	IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_valid_end_0, 64),
> -	IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_ld_rdq2, 72),
> +	IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_valid_end_0, ID(6, 64)),
> +	IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_ld_rdq2, ID(6, 72)),
>  
>  	/* counter7 specific events */
> -	IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_1_2_full, 64),
> -	IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_ld_wrq0, 65),
> +	IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_1_2_full, ID(7, 64)),
> +	IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_ld_wrq0, ID(7, 65)),
>  
>  	/* counter8 specific events */
> -	IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_bias_switched, 64),
> -	IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_1_4_full, 65),
> +	IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_bias_switched, ID(8, 64)),
> +	IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_1_4_full, ID(8, 65)),
>  
>  	/* counter9 specific events */
> -	IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_ld_wrq1, 65),
> -	IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_3_4_full, 66),
> +	IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_ld_wrq1, ID(9, 65)),
> +	IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_3_4_full, ID(9, 66)),
>  
>  	/* counter10 specific events */
> -	IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_misc_mrk, 65),
> -	IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_ld_rdq0, 66),
> +	IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_misc_mrk, ID(10, 65)),
> +	IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_ld_rdq0, ID(10, 66)),
>  	NULL,
>  };
>  
> @@ -366,13 +370,10 @@ static void ddr_perf_counter_local_config(struct ddr_pmu *pmu, int config,
>  	}
>  }
>  
> -static void ddr_perf_monitor_config(struct ddr_pmu *pmu, int cfg, int cfg1, int cfg2)
> +static void ddr_perf_monitor_config(struct ddr_pmu *pmu, int event,
> +				    int counter, int axi_id, int axi_mask)
>  {
>  	u32 pmcfg1, pmcfg2;
> -	int event, counter;
> -
> -	event = FIELD_GET(CONFIG_EVENT, cfg);
> -	counter = FIELD_GET(CONFIG_COUNTER, cfg);
>  
>  	pmcfg1 = readl_relaxed(pmu->base + PMCFG1);
>  
> @@ -392,12 +393,12 @@ static void ddr_perf_monitor_config(struct ddr_pmu *pmu, int cfg, int cfg1, int
>  		pmcfg1 &= ~PMCFG1_RD_BT_FILT_EN;
>  
>  	pmcfg1 &= ~FIELD_PREP(PMCFG1_ID_MASK, 0x3FFFF);
> -	pmcfg1 |= FIELD_PREP(PMCFG1_ID_MASK, cfg2);
> +	pmcfg1 |= FIELD_PREP(PMCFG1_ID_MASK, axi_mask);
>  	writel(pmcfg1, pmu->base + PMCFG1);
>  
>  	pmcfg2 = readl_relaxed(pmu->base + PMCFG2);
>  	pmcfg2 &= ~FIELD_PREP(PMCFG2_ID, 0x3FFFF);
> -	pmcfg2 |= FIELD_PREP(PMCFG2_ID, cfg1);
> +	pmcfg2 |= FIELD_PREP(PMCFG2_ID, axi_id);
>  	writel(pmcfg2, pmu->base + PMCFG2);
>  }
>  
> @@ -465,6 +466,28 @@ static void ddr_perf_event_start(struct perf_event *event, int flags)
>  	hwc->state = 0;
>  }
>  
> +static int ddr_perf_alloc_counter(struct ddr_pmu *pmu, int event, int counter)
> +{
> +	int i;
> +
> +	if (event == CYCLES_EVENT_ID) {
> +		// Cycles counter is dedicated for cycle event.
> +		if (pmu->events[CYCLES_COUNTER] == NULL)
> +			return CYCLES_COUNTER;
> +	} else if (counter != 0) {
> +		// Counter specific event use specific counter.
> +		if (pmu->events[counter] == NULL)
> +			return counter;
> +	} else {
> +		// Auto allocate counter for referene event.
> +		for (i = 1; i < NUM_COUNTERS; i++)
> +			if (pmu->events[i] == NULL)
> +				return i;
> +	}
> +
> +	return -ENOENT;
> +}
> +
>  static int ddr_perf_event_add(struct perf_event *event, int flags)
>  {
>  	struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
> @@ -472,10 +495,17 @@ static int ddr_perf_event_add(struct perf_event *event, int flags)
>  	int cfg = event->attr.config;
>  	int cfg1 = event->attr.config1;
>  	int cfg2 = event->attr.config2;
> -	int counter;
> +	int event_id, counter;
>  
> +	event_id = FIELD_GET(CONFIG_EVENT, cfg);
>  	counter = FIELD_GET(CONFIG_COUNTER, cfg);
>  
> +	counter = ddr_perf_alloc_counter(pmu, event_id, counter);
> +	if (counter < 0) {
> +		dev_dbg(pmu->dev, "There are not enough counters\n");
> +		return -EOPNOTSUPP;
> +	}
> +
>  	pmu->events[counter] = event;
>  	pmu->active_events++;
>  	hwc->idx = counter;
> @@ -485,7 +515,7 @@ static int ddr_perf_event_add(struct perf_event *event, int flags)
>  		ddr_perf_event_start(event, flags);
>  
>  	/* read trans, write trans, read beat */
> -	ddr_perf_monitor_config(pmu, cfg, cfg1, cfg2);
> +	ddr_perf_monitor_config(pmu, event_id, counter, cfg1, cfg2);
>  
>  	return 0;
>  }
> @@ -506,9 +536,11 @@ static void ddr_perf_event_del(struct perf_event *event, int flags)
>  {
>  	struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
>  	struct hw_perf_event *hwc = &event->hw;
> +	int counter = hwc->idx;
>  
>  	ddr_perf_event_stop(event, PERF_EF_UPDATE);
>  
> +	pmu->events[counter] = NULL;
>  	pmu->active_events--;
>  	hwc->idx = -1;
>  }
> -- 
> 2.34.1
>
Xu Yang April 15, 2024, 2:57 a.m. UTC | #2
On Sun, Apr 14, 2024 at 10:46:21PM -0400, Frank Li wrote:
> On Mon, Apr 15, 2024 at 10:03:48AM +0800, Xu Yang wrote:
> > In current design, the user of perf app needs to input counter ID to count
> > events. However, this is not user-friendly since the user needs to lookup
> > the map table to find the counter. Instead of letting the user to input
> > the counter, let this driver to manage the counters in this patch.
> > 
> > This will be implemented by:
> >  1. allocate counter 0 for cycle event.
> >  2. find unused counter from 1-10 for reference events.
> >  3. allocate specific counter for counter-specific events.
> > 
> > In this patch, counter attribute is removed too. To mark counter-specific
> > events, counter ID will be encoded into perf_pmu_events_attr.id.
> 
> In change log, you said keep 'counter' attr. You should memtion keep 
> user compatible and but omit all value pass down by count=<n>. 
> 

Oh, sorry, I need to modify commit message too. Thanks for your reminder.

Thanks,
Xu Yang

> > 
> > Reviewed-by: Frank Li <Frank.Li@nxp.com>
> > Signed-off-by: Xu Yang <xu.yang_2@nxp.com>
> > 
> > ---
> > Changes in v6:
> >  - new patch
> > Changes in v7:
> >  - no changes
> > Changes in v8:
> >  - add Rb tag
> > Changes in v9:
> >  - keep 'counter' attr
> > ---
> >  drivers/perf/fsl_imx9_ddr_perf.c | 164 ++++++++++++++++++-------------
> >  1 file changed, 98 insertions(+), 66 deletions(-)
> > 
> > diff --git a/drivers/perf/fsl_imx9_ddr_perf.c b/drivers/perf/fsl_imx9_ddr_perf.c
> > index 0017f2c9ef91..b00cbd920c0d 100644
> > --- a/drivers/perf/fsl_imx9_ddr_perf.c
> > +++ b/drivers/perf/fsl_imx9_ddr_perf.c
> > @@ -41,9 +41,11 @@
> >  
> >  #define NUM_COUNTERS		11
> >  #define CYCLES_COUNTER		0
> > +#define CYCLES_EVENT_ID		0
> >  
> >  #define CONFIG_EVENT		GENMASK(7, 0)
> >  #define CONFIG_COUNTER		GENMASK(15, 8)
> > +#define CONFIG_COUNTER_OFFSET	8
> >  
> >  #define to_ddr_pmu(p)		container_of(p, struct ddr_pmu, pmu)
> >  
> > @@ -130,6 +132,8 @@ static ssize_t ddr_pmu_event_show(struct device *dev,
> >  	return sysfs_emit(page, "event=0x%02llx\n", pmu_attr->id);
> >  }
> >  
> > +#define ID(counter, id) ((counter << CONFIG_COUNTER_OFFSET) | id)
> > +
> >  #define IMX9_DDR_PMU_EVENT_ATTR(_name, _id)				\
> >  	(&((struct perf_pmu_events_attr[]) {				\
> >  		{ .attr = __ATTR(_name, 0444, ddr_pmu_event_show, NULL),\
> > @@ -162,81 +166,81 @@ static struct attribute *ddr_perf_events_attrs[] = {
> >  	IMX9_DDR_PMU_EVENT_ATTR(ddrc_pm_29, 63),
> >  
> >  	/* counter1 specific events */
> > -	IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_riq_0, 64),
> > -	IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_riq_1, 65),
> > -	IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_riq_2, 66),
> > -	IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_riq_3, 67),
> > -	IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_riq_4, 68),
> > -	IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_riq_5, 69),
> > -	IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_riq_6, 70),
> > -	IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_riq_7, 71),
> > +	IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_riq_0, ID(1, 64)),
> > +	IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_riq_1, ID(1, 65)),
> > +	IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_riq_2, ID(1, 66)),
> > +	IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_riq_3, ID(1, 67)),
> > +	IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_riq_4, ID(1, 68)),
> > +	IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_riq_5, ID(1, 69)),
> > +	IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_riq_6, ID(1, 70)),
> > +	IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_riq_7, ID(1, 71)),
> >  
> >  	/* counter2 specific events */
> > -	IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_wiq_0, 64),
> > -	IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_wiq_1, 65),
> > -	IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_wiq_2, 66),
> > -	IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_wiq_3, 67),
> > -	IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_wiq_4, 68),
> > -	IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_wiq_5, 69),
> > -	IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_wiq_6, 70),
> > -	IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_wiq_7, 71),
> > -	IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_empty, 72),
> > -	IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pm_rd_trans_filt, 73),
> > +	IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_wiq_0, ID(2, 64)),
> > +	IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_wiq_1, ID(2, 65)),
> > +	IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_wiq_2, ID(2, 66)),
> > +	IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_wiq_3, ID(2, 67)),
> > +	IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_wiq_4, ID(2, 68)),
> > +	IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_wiq_5, ID(2, 69)),
> > +	IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_wiq_6, ID(2, 70)),
> > +	IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_wiq_7, ID(2, 71)),
> > +	IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_empty, ID(2, 72)),
> > +	IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pm_rd_trans_filt, ID(2, 73)),
> >  
> >  	/* counter3 specific events */
> > -	IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_collision_0, 64),
> > -	IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_collision_1, 65),
> > -	IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_collision_2, 66),
> > -	IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_collision_3, 67),
> > -	IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_collision_4, 68),
> > -	IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_collision_5, 69),
> > -	IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_collision_6, 70),
> > -	IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_collision_7, 71),
> > -	IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_full, 72),
> > -	IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pm_wr_trans_filt, 73),
> > +	IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_collision_0, ID(3, 64)),
> > +	IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_collision_1, ID(3, 65)),
> > +	IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_collision_2, ID(3, 66)),
> > +	IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_collision_3, ID(3, 67)),
> > +	IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_collision_4, ID(3, 68)),
> > +	IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_collision_5, ID(3, 69)),
> > +	IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_collision_6, ID(3, 70)),
> > +	IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_collision_7, ID(3, 71)),
> > +	IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_full, ID(3, 72)),
> > +	IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pm_wr_trans_filt, ID(3, 73)),
> >  
> >  	/* counter4 specific events */
> > -	IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_open_0, 64),
> > -	IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_open_1, 65),
> > -	IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_open_2, 66),
> > -	IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_open_3, 67),
> > -	IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_open_4, 68),
> > -	IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_open_5, 69),
> > -	IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_open_6, 70),
> > -	IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_open_7, 71),
> > -	IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_ld_rdq2_rmw, 72),
> > -	IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pm_rd_beat_filt, 73),
> > +	IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_open_0, ID(4, 64)),
> > +	IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_open_1, ID(4, 65)),
> > +	IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_open_2, ID(4, 66)),
> > +	IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_open_3, ID(4, 67)),
> > +	IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_open_4, ID(4, 68)),
> > +	IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_open_5, ID(4, 69)),
> > +	IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_open_6, ID(4, 70)),
> > +	IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_open_7, ID(4, 71)),
> > +	IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_ld_rdq2_rmw, ID(4, 72)),
> > +	IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pm_rd_beat_filt, ID(4, 73)),
> >  
> >  	/* counter5 specific events */
> > -	IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_valid_start_0, 64),
> > -	IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_valid_start_1, 65),
> > -	IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_valid_start_2, 66),
> > -	IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_valid_start_3, 67),
> > -	IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_valid_start_4, 68),
> > -	IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_valid_start_5, 69),
> > -	IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_valid_start_6, 70),
> > -	IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_valid_start_7, 71),
> > -	IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_ld_rdq1, 72),
> > +	IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_valid_start_0, ID(5, 64)),
> > +	IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_valid_start_1, ID(5, 65)),
> > +	IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_valid_start_2, ID(5, 66)),
> > +	IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_valid_start_3, ID(5, 67)),
> > +	IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_valid_start_4, ID(5, 68)),
> > +	IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_valid_start_5, ID(5, 69)),
> > +	IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_valid_start_6, ID(5, 70)),
> > +	IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_valid_start_7, ID(5, 71)),
> > +	IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_ld_rdq1, ID(5, 72)),
> >  
> >  	/* counter6 specific events */
> > -	IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_valid_end_0, 64),
> > -	IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_ld_rdq2, 72),
> > +	IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_valid_end_0, ID(6, 64)),
> > +	IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_ld_rdq2, ID(6, 72)),
> >  
> >  	/* counter7 specific events */
> > -	IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_1_2_full, 64),
> > -	IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_ld_wrq0, 65),
> > +	IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_1_2_full, ID(7, 64)),
> > +	IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_ld_wrq0, ID(7, 65)),
> >  
> >  	/* counter8 specific events */
> > -	IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_bias_switched, 64),
> > -	IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_1_4_full, 65),
> > +	IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_bias_switched, ID(8, 64)),
> > +	IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_1_4_full, ID(8, 65)),
> >  
> >  	/* counter9 specific events */
> > -	IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_ld_wrq1, 65),
> > -	IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_3_4_full, 66),
> > +	IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_ld_wrq1, ID(9, 65)),
> > +	IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_3_4_full, ID(9, 66)),
> >  
> >  	/* counter10 specific events */
> > -	IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_misc_mrk, 65),
> > -	IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_ld_rdq0, 66),
> > +	IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_misc_mrk, ID(10, 65)),
> > +	IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_ld_rdq0, ID(10, 66)),
> >  	NULL,
> >  };
> >  
> > @@ -366,13 +370,10 @@ static void ddr_perf_counter_local_config(struct ddr_pmu *pmu, int config,
> >  	}
> >  }
> >  
> > -static void ddr_perf_monitor_config(struct ddr_pmu *pmu, int cfg, int cfg1, int cfg2)
> > +static void ddr_perf_monitor_config(struct ddr_pmu *pmu, int event,
> > +				    int counter, int axi_id, int axi_mask)
> >  {
> >  	u32 pmcfg1, pmcfg2;
> > -	int event, counter;
> > -
> > -	event = FIELD_GET(CONFIG_EVENT, cfg);
> > -	counter = FIELD_GET(CONFIG_COUNTER, cfg);
> >  
> >  	pmcfg1 = readl_relaxed(pmu->base + PMCFG1);
> >  
> > @@ -392,12 +393,12 @@ static void ddr_perf_monitor_config(struct ddr_pmu *pmu, int cfg, int cfg1, int
> >  		pmcfg1 &= ~PMCFG1_RD_BT_FILT_EN;
> >  
> >  	pmcfg1 &= ~FIELD_PREP(PMCFG1_ID_MASK, 0x3FFFF);
> > -	pmcfg1 |= FIELD_PREP(PMCFG1_ID_MASK, cfg2);
> > +	pmcfg1 |= FIELD_PREP(PMCFG1_ID_MASK, axi_mask);
> >  	writel(pmcfg1, pmu->base + PMCFG1);
> >  
> >  	pmcfg2 = readl_relaxed(pmu->base + PMCFG2);
> >  	pmcfg2 &= ~FIELD_PREP(PMCFG2_ID, 0x3FFFF);
> > -	pmcfg2 |= FIELD_PREP(PMCFG2_ID, cfg1);
> > +	pmcfg2 |= FIELD_PREP(PMCFG2_ID, axi_id);
> >  	writel(pmcfg2, pmu->base + PMCFG2);
> >  }
> >  
> > @@ -465,6 +466,28 @@ static void ddr_perf_event_start(struct perf_event *event, int flags)
> >  	hwc->state = 0;
> >  }
> >  
> > +static int ddr_perf_alloc_counter(struct ddr_pmu *pmu, int event, int counter)
> > +{
> > +	int i;
> > +
> > +	if (event == CYCLES_EVENT_ID) {
> > +		// Cycles counter is dedicated for cycle event.
> > +		if (pmu->events[CYCLES_COUNTER] == NULL)
> > +			return CYCLES_COUNTER;
> > +	} else if (counter != 0) {
> > +		// Counter specific event use specific counter.
> > +		if (pmu->events[counter] == NULL)
> > +			return counter;
> > +	} else {
> > +		// Auto allocate counter for referene event.
> > +		for (i = 1; i < NUM_COUNTERS; i++)
> > +			if (pmu->events[i] == NULL)
> > +				return i;
> > +	}
> > +
> > +	return -ENOENT;
> > +}
> > +
> >  static int ddr_perf_event_add(struct perf_event *event, int flags)
> >  {
> >  	struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
> > @@ -472,10 +495,17 @@ static int ddr_perf_event_add(struct perf_event *event, int flags)
> >  	int cfg = event->attr.config;
> >  	int cfg1 = event->attr.config1;
> >  	int cfg2 = event->attr.config2;
> > -	int counter;
> > +	int event_id, counter;
> >  
> > +	event_id = FIELD_GET(CONFIG_EVENT, cfg);
> >  	counter = FIELD_GET(CONFIG_COUNTER, cfg);
> >  
> > +	counter = ddr_perf_alloc_counter(pmu, event_id, counter);
> > +	if (counter < 0) {
> > +		dev_dbg(pmu->dev, "There are not enough counters\n");
> > +		return -EOPNOTSUPP;
> > +	}
> > +
> >  	pmu->events[counter] = event;
> >  	pmu->active_events++;
> >  	hwc->idx = counter;
> > @@ -485,7 +515,7 @@ static int ddr_perf_event_add(struct perf_event *event, int flags)
> >  		ddr_perf_event_start(event, flags);
> >  
> >  	/* read trans, write trans, read beat */
> > -	ddr_perf_monitor_config(pmu, cfg, cfg1, cfg2);
> > +	ddr_perf_monitor_config(pmu, event_id, counter, cfg1, cfg2);
> >  
> >  	return 0;
> >  }
> > @@ -506,9 +536,11 @@ static void ddr_perf_event_del(struct perf_event *event, int flags)
> >  {
> >  	struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
> >  	struct hw_perf_event *hwc = &event->hw;
> > +	int counter = hwc->idx;
> >  
> >  	ddr_perf_event_stop(event, PERF_EF_UPDATE);
> >  
> > +	pmu->events[counter] = NULL;
> >  	pmu->active_events--;
> >  	hwc->idx = -1;
> >  }
> > -- 
> > 2.34.1
> >
diff mbox series

Patch

diff --git a/drivers/perf/fsl_imx9_ddr_perf.c b/drivers/perf/fsl_imx9_ddr_perf.c
index 0017f2c9ef91..b00cbd920c0d 100644
--- a/drivers/perf/fsl_imx9_ddr_perf.c
+++ b/drivers/perf/fsl_imx9_ddr_perf.c
@@ -41,9 +41,11 @@ 
 
 #define NUM_COUNTERS		11
 #define CYCLES_COUNTER		0
+#define CYCLES_EVENT_ID		0
 
 #define CONFIG_EVENT		GENMASK(7, 0)
 #define CONFIG_COUNTER		GENMASK(15, 8)
+#define CONFIG_COUNTER_OFFSET	8
 
 #define to_ddr_pmu(p)		container_of(p, struct ddr_pmu, pmu)
 
@@ -130,6 +132,8 @@  static ssize_t ddr_pmu_event_show(struct device *dev,
 	return sysfs_emit(page, "event=0x%02llx\n", pmu_attr->id);
 }
 
+#define ID(counter, id) ((counter << CONFIG_COUNTER_OFFSET) | id)
+
 #define IMX9_DDR_PMU_EVENT_ATTR(_name, _id)				\
 	(&((struct perf_pmu_events_attr[]) {				\
 		{ .attr = __ATTR(_name, 0444, ddr_pmu_event_show, NULL),\
@@ -162,81 +166,81 @@  static struct attribute *ddr_perf_events_attrs[] = {
 	IMX9_DDR_PMU_EVENT_ATTR(ddrc_pm_29, 63),
 
 	/* counter1 specific events */
-	IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_riq_0, 64),
-	IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_riq_1, 65),
-	IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_riq_2, 66),
-	IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_riq_3, 67),
-	IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_riq_4, 68),
-	IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_riq_5, 69),
-	IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_riq_6, 70),
-	IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_riq_7, 71),
+	IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_riq_0, ID(1, 64)),
+	IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_riq_1, ID(1, 65)),
+	IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_riq_2, ID(1, 66)),
+	IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_riq_3, ID(1, 67)),
+	IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_riq_4, ID(1, 68)),
+	IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_riq_5, ID(1, 69)),
+	IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_riq_6, ID(1, 70)),
+	IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_riq_7, ID(1, 71)),
 
 	/* counter2 specific events */
-	IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_wiq_0, 64),
-	IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_wiq_1, 65),
-	IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_wiq_2, 66),
-	IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_wiq_3, 67),
-	IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_wiq_4, 68),
-	IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_wiq_5, 69),
-	IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_wiq_6, 70),
-	IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_wiq_7, 71),
-	IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_empty, 72),
-	IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pm_rd_trans_filt, 73),
+	IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_wiq_0, ID(2, 64)),
+	IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_wiq_1, ID(2, 65)),
+	IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_wiq_2, ID(2, 66)),
+	IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_wiq_3, ID(2, 67)),
+	IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_wiq_4, ID(2, 68)),
+	IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_wiq_5, ID(2, 69)),
+	IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_wiq_6, ID(2, 70)),
+	IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_wiq_7, ID(2, 71)),
+	IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_empty, ID(2, 72)),
+	IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pm_rd_trans_filt, ID(2, 73)),
 
 	/* counter3 specific events */
-	IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_collision_0, 64),
-	IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_collision_1, 65),
-	IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_collision_2, 66),
-	IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_collision_3, 67),
-	IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_collision_4, 68),
-	IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_collision_5, 69),
-	IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_collision_6, 70),
-	IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_collision_7, 71),
-	IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_full, 72),
-	IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pm_wr_trans_filt, 73),
+	IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_collision_0, ID(3, 64)),
+	IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_collision_1, ID(3, 65)),
+	IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_collision_2, ID(3, 66)),
+	IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_collision_3, ID(3, 67)),
+	IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_collision_4, ID(3, 68)),
+	IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_collision_5, ID(3, 69)),
+	IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_collision_6, ID(3, 70)),
+	IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_collision_7, ID(3, 71)),
+	IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_full, ID(3, 72)),
+	IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pm_wr_trans_filt, ID(3, 73)),
 
 	/* counter4 specific events */
-	IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_open_0, 64),
-	IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_open_1, 65),
-	IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_open_2, 66),
-	IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_open_3, 67),
-	IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_open_4, 68),
-	IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_open_5, 69),
-	IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_open_6, 70),
-	IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_open_7, 71),
-	IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_ld_rdq2_rmw, 72),
-	IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pm_rd_beat_filt, 73),
+	IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_open_0, ID(4, 64)),
+	IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_open_1, ID(4, 65)),
+	IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_open_2, ID(4, 66)),
+	IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_open_3, ID(4, 67)),
+	IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_open_4, ID(4, 68)),
+	IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_open_5, ID(4, 69)),
+	IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_open_6, ID(4, 70)),
+	IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_open_7, ID(4, 71)),
+	IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_ld_rdq2_rmw, ID(4, 72)),
+	IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pm_rd_beat_filt, ID(4, 73)),
 
 	/* counter5 specific events */
-	IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_valid_start_0, 64),
-	IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_valid_start_1, 65),
-	IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_valid_start_2, 66),
-	IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_valid_start_3, 67),
-	IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_valid_start_4, 68),
-	IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_valid_start_5, 69),
-	IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_valid_start_6, 70),
-	IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_valid_start_7, 71),
-	IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_ld_rdq1, 72),
+	IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_valid_start_0, ID(5, 64)),
+	IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_valid_start_1, ID(5, 65)),
+	IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_valid_start_2, ID(5, 66)),
+	IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_valid_start_3, ID(5, 67)),
+	IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_valid_start_4, ID(5, 68)),
+	IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_valid_start_5, ID(5, 69)),
+	IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_valid_start_6, ID(5, 70)),
+	IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_valid_start_7, ID(5, 71)),
+	IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_ld_rdq1, ID(5, 72)),
 
 	/* counter6 specific events */
-	IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_valid_end_0, 64),
-	IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_ld_rdq2, 72),
+	IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_valid_end_0, ID(6, 64)),
+	IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_ld_rdq2, ID(6, 72)),
 
 	/* counter7 specific events */
-	IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_1_2_full, 64),
-	IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_ld_wrq0, 65),
+	IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_1_2_full, ID(7, 64)),
+	IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_ld_wrq0, ID(7, 65)),
 
 	/* counter8 specific events */
-	IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_bias_switched, 64),
-	IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_1_4_full, 65),
+	IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_bias_switched, ID(8, 64)),
+	IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_1_4_full, ID(8, 65)),
 
 	/* counter9 specific events */
-	IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_ld_wrq1, 65),
-	IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_3_4_full, 66),
+	IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_ld_wrq1, ID(9, 65)),
+	IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_3_4_full, ID(9, 66)),
 
 	/* counter10 specific events */
-	IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_misc_mrk, 65),
-	IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_ld_rdq0, 66),
+	IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_misc_mrk, ID(10, 65)),
+	IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_ld_rdq0, ID(10, 66)),
 	NULL,
 };
 
@@ -366,13 +370,10 @@  static void ddr_perf_counter_local_config(struct ddr_pmu *pmu, int config,
 	}
 }
 
-static void ddr_perf_monitor_config(struct ddr_pmu *pmu, int cfg, int cfg1, int cfg2)
+static void ddr_perf_monitor_config(struct ddr_pmu *pmu, int event,
+				    int counter, int axi_id, int axi_mask)
 {
 	u32 pmcfg1, pmcfg2;
-	int event, counter;
-
-	event = FIELD_GET(CONFIG_EVENT, cfg);
-	counter = FIELD_GET(CONFIG_COUNTER, cfg);
 
 	pmcfg1 = readl_relaxed(pmu->base + PMCFG1);
 
@@ -392,12 +393,12 @@  static void ddr_perf_monitor_config(struct ddr_pmu *pmu, int cfg, int cfg1, int
 		pmcfg1 &= ~PMCFG1_RD_BT_FILT_EN;
 
 	pmcfg1 &= ~FIELD_PREP(PMCFG1_ID_MASK, 0x3FFFF);
-	pmcfg1 |= FIELD_PREP(PMCFG1_ID_MASK, cfg2);
+	pmcfg1 |= FIELD_PREP(PMCFG1_ID_MASK, axi_mask);
 	writel(pmcfg1, pmu->base + PMCFG1);
 
 	pmcfg2 = readl_relaxed(pmu->base + PMCFG2);
 	pmcfg2 &= ~FIELD_PREP(PMCFG2_ID, 0x3FFFF);
-	pmcfg2 |= FIELD_PREP(PMCFG2_ID, cfg1);
+	pmcfg2 |= FIELD_PREP(PMCFG2_ID, axi_id);
 	writel(pmcfg2, pmu->base + PMCFG2);
 }
 
@@ -465,6 +466,28 @@  static void ddr_perf_event_start(struct perf_event *event, int flags)
 	hwc->state = 0;
 }
 
+static int ddr_perf_alloc_counter(struct ddr_pmu *pmu, int event, int counter)
+{
+	int i;
+
+	if (event == CYCLES_EVENT_ID) {
+		// Cycles counter is dedicated for cycle event.
+		if (pmu->events[CYCLES_COUNTER] == NULL)
+			return CYCLES_COUNTER;
+	} else if (counter != 0) {
+		// Counter specific event use specific counter.
+		if (pmu->events[counter] == NULL)
+			return counter;
+	} else {
+		// Auto allocate counter for referene event.
+		for (i = 1; i < NUM_COUNTERS; i++)
+			if (pmu->events[i] == NULL)
+				return i;
+	}
+
+	return -ENOENT;
+}
+
 static int ddr_perf_event_add(struct perf_event *event, int flags)
 {
 	struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
@@ -472,10 +495,17 @@  static int ddr_perf_event_add(struct perf_event *event, int flags)
 	int cfg = event->attr.config;
 	int cfg1 = event->attr.config1;
 	int cfg2 = event->attr.config2;
-	int counter;
+	int event_id, counter;
 
+	event_id = FIELD_GET(CONFIG_EVENT, cfg);
 	counter = FIELD_GET(CONFIG_COUNTER, cfg);
 
+	counter = ddr_perf_alloc_counter(pmu, event_id, counter);
+	if (counter < 0) {
+		dev_dbg(pmu->dev, "There are not enough counters\n");
+		return -EOPNOTSUPP;
+	}
+
 	pmu->events[counter] = event;
 	pmu->active_events++;
 	hwc->idx = counter;
@@ -485,7 +515,7 @@  static int ddr_perf_event_add(struct perf_event *event, int flags)
 		ddr_perf_event_start(event, flags);
 
 	/* read trans, write trans, read beat */
-	ddr_perf_monitor_config(pmu, cfg, cfg1, cfg2);
+	ddr_perf_monitor_config(pmu, event_id, counter, cfg1, cfg2);
 
 	return 0;
 }
@@ -506,9 +536,11 @@  static void ddr_perf_event_del(struct perf_event *event, int flags)
 {
 	struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
 	struct hw_perf_event *hwc = &event->hw;
+	int counter = hwc->idx;
 
 	ddr_perf_event_stop(event, PERF_EF_UPDATE);
 
+	pmu->events[counter] = NULL;
 	pmu->active_events--;
 	hwc->idx = -1;
 }