diff mbox series

[v8,5/6] KVM: arm/arm64: remove pmc->bitmask

Message ID 20190522153019.18645-6-andrew.murray@arm.com (mailing list archive)
State New, archived
Headers show
Series KVM: arm/arm64: add support for chained counters | expand

Commit Message

Andrew Murray May 22, 2019, 3:30 p.m. UTC
We currently use pmc->bitmask to determine the width of the pmc - however
it's superfluous as the pmc index already describes if the pmc is a cycle
counter or event counter. The architecture clearly describes the widths of
these counters.

Let's remove the bitmask to simplify the code.

Signed-off-by: Andrew Murray <andrew.murray@arm.com>
---
 include/kvm/arm_pmu.h |  1 -
 virt/kvm/arm/pmu.c    | 15 +++++----------
 2 files changed, 5 insertions(+), 11 deletions(-)

Comments

Marc Zyngier May 22, 2019, 4:07 p.m. UTC | #1
On 22/05/2019 16:30, Andrew Murray wrote:
> We currently use pmc->bitmask to determine the width of the pmc - however
> it's superfluous as the pmc index already describes if the pmc is a cycle
> counter or event counter. The architecture clearly describes the widths of
> these counters.
> 
> Let's remove the bitmask to simplify the code.
> 
> Signed-off-by: Andrew Murray <andrew.murray@arm.com>
> ---
>  include/kvm/arm_pmu.h |  1 -
>  virt/kvm/arm/pmu.c    | 15 +++++----------
>  2 files changed, 5 insertions(+), 11 deletions(-)
> 
> diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h
> index b73f31baca52..2f0e28dc5a9e 100644
> --- a/include/kvm/arm_pmu.h
> +++ b/include/kvm/arm_pmu.h
> @@ -28,7 +28,6 @@
>  struct kvm_pmc {
>  	u8 idx;	/* index into the pmu->pmc array */
>  	struct perf_event *perf_event;
> -	u64 bitmask;
>  };
>  
>  struct kvm_pmu {
> diff --git a/virt/kvm/arm/pmu.c b/virt/kvm/arm/pmu.c
> index ae1e886d4a1a..c4e2bc213617 100644
> --- a/virt/kvm/arm/pmu.c
> +++ b/virt/kvm/arm/pmu.c
> @@ -47,7 +47,10 @@ u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx)
>  		counter += perf_event_read_value(pmc->perf_event, &enabled,
>  						 &running);
>  
> -	return counter & pmc->bitmask;
> +	if (select_idx != ARMV8_PMU_CYCLE_IDX)
> +		counter = lower_32_bits(counter);
> +
> +	return counter;
>  }
>  
>  /**
> @@ -113,7 +116,6 @@ void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu)
>  	for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) {
>  		kvm_pmu_stop_counter(vcpu, &pmu->pmc[i]);
>  		pmu->pmc[i].idx = i;
> -		pmu->pmc[i].bitmask = 0xffffffffUL;
>  	}
>  }
>  
> @@ -348,8 +350,6 @@ void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val)
>   */
>  void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val)
>  {
> -	struct kvm_pmu *pmu = &vcpu->arch.pmu;
> -	struct kvm_pmc *pmc;
>  	u64 mask;
>  	int i;
>  
> @@ -368,11 +368,6 @@ void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val)
>  		for (i = 0; i < ARMV8_PMU_CYCLE_IDX; i++)
>  			kvm_pmu_set_counter_value(vcpu, i, 0);
>  	}
> -
> -	if (val & ARMV8_PMU_PMCR_LC) {
> -		pmc = &pmu->pmc[ARMV8_PMU_CYCLE_IDX];
> -		pmc->bitmask = 0xffffffffffffffffUL;
> -	}
>  }
>  
>  static bool kvm_pmu_counter_is_enabled(struct kvm_vcpu *vcpu, u64 select_idx)
> @@ -420,7 +415,7 @@ static void kvm_pmu_create_perf_event(struct kvm_vcpu *vcpu, u64 select_idx)
>  
>  	counter = kvm_pmu_get_counter_value(vcpu, select_idx);
>  	/* The initial sample period (overflow count) of an event. */
> -	attr.sample_period = (-counter) & pmc->bitmask;
> +	attr.sample_period = (-counter) & GENMASK(31, 0);

Isn't this the one case where the bitmask actually matters? If we're
dealing with the cycle counter, it shouldn't be truncated, right?

>  
>  	event = perf_event_create_kernel_counter(&attr, -1, current,
>  						 kvm_pmu_perf_overflow, pmc);
> 

Thanks,

	M.
Andrew Murray May 22, 2019, 4:26 p.m. UTC | #2
On Wed, May 22, 2019 at 05:07:31PM +0100, Marc Zyngier wrote:
> On 22/05/2019 16:30, Andrew Murray wrote:
> > We currently use pmc->bitmask to determine the width of the pmc - however
> > it's superfluous as the pmc index already describes if the pmc is a cycle
> > counter or event counter. The architecture clearly describes the widths of
> > these counters.
> > 
> > Let's remove the bitmask to simplify the code.
> > 
> > Signed-off-by: Andrew Murray <andrew.murray@arm.com>
> > ---
> >  include/kvm/arm_pmu.h |  1 -
> >  virt/kvm/arm/pmu.c    | 15 +++++----------
> >  2 files changed, 5 insertions(+), 11 deletions(-)
> > 
> > diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h
> > index b73f31baca52..2f0e28dc5a9e 100644
> > --- a/include/kvm/arm_pmu.h
> > +++ b/include/kvm/arm_pmu.h
> > @@ -28,7 +28,6 @@
> >  struct kvm_pmc {
> >  	u8 idx;	/* index into the pmu->pmc array */
> >  	struct perf_event *perf_event;
> > -	u64 bitmask;
> >  };
> >  
> >  struct kvm_pmu {
> > diff --git a/virt/kvm/arm/pmu.c b/virt/kvm/arm/pmu.c
> > index ae1e886d4a1a..c4e2bc213617 100644
> > --- a/virt/kvm/arm/pmu.c
> > +++ b/virt/kvm/arm/pmu.c
> > @@ -47,7 +47,10 @@ u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx)
> >  		counter += perf_event_read_value(pmc->perf_event, &enabled,
> >  						 &running);
> >  
> > -	return counter & pmc->bitmask;
> > +	if (select_idx != ARMV8_PMU_CYCLE_IDX)
> > +		counter = lower_32_bits(counter);
> > +
> > +	return counter;
> >  }
> >  
> >  /**
> > @@ -113,7 +116,6 @@ void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu)
> >  	for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) {
> >  		kvm_pmu_stop_counter(vcpu, &pmu->pmc[i]);
> >  		pmu->pmc[i].idx = i;
> > -		pmu->pmc[i].bitmask = 0xffffffffUL;
> >  	}
> >  }
> >  
> > @@ -348,8 +350,6 @@ void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val)
> >   */
> >  void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val)
> >  {
> > -	struct kvm_pmu *pmu = &vcpu->arch.pmu;
> > -	struct kvm_pmc *pmc;
> >  	u64 mask;
> >  	int i;
> >  
> > @@ -368,11 +368,6 @@ void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val)
> >  		for (i = 0; i < ARMV8_PMU_CYCLE_IDX; i++)
> >  			kvm_pmu_set_counter_value(vcpu, i, 0);
> >  	}
> > -
> > -	if (val & ARMV8_PMU_PMCR_LC) {
> > -		pmc = &pmu->pmc[ARMV8_PMU_CYCLE_IDX];
> > -		pmc->bitmask = 0xffffffffffffffffUL;
> > -	}
> >  }
> >  
> >  static bool kvm_pmu_counter_is_enabled(struct kvm_vcpu *vcpu, u64 select_idx)
> > @@ -420,7 +415,7 @@ static void kvm_pmu_create_perf_event(struct kvm_vcpu *vcpu, u64 select_idx)
> >  
> >  	counter = kvm_pmu_get_counter_value(vcpu, select_idx);
> >  	/* The initial sample period (overflow count) of an event. */
> > -	attr.sample_period = (-counter) & pmc->bitmask;
> > +	attr.sample_period = (-counter) & GENMASK(31, 0);
> 
> Isn't this the one case where the bitmask actually matters? If we're
> dealing with the cycle counter, it shouldn't be truncated, right?

Ah yes, that should be conditional on idx as well. 

Thanks,

Andrew Murray

> 
> >  
> >  	event = perf_event_create_kernel_counter(&attr, -1, current,
> >  						 kvm_pmu_perf_overflow, pmc);
> > 
> 
> Thanks,
> 
> 	M.
> -- 
> Jazz is not dead. It just smells funny...
Suzuki K Poulose June 10, 2019, 12:54 p.m. UTC | #3
On 22/05/2019 17:26, Andrew Murray wrote:
> On Wed, May 22, 2019 at 05:07:31PM +0100, Marc Zyngier wrote:
>> On 22/05/2019 16:30, Andrew Murray wrote:
>>> We currently use pmc->bitmask to determine the width of the pmc - however
>>> it's superfluous as the pmc index already describes if the pmc is a cycle
>>> counter or event counter. The architecture clearly describes the widths of
>>> these counters.
>>>
>>> Let's remove the bitmask to simplify the code.
>>>
>>> Signed-off-by: Andrew Murray <andrew.murray@arm.com>
>>> ---
>>>   include/kvm/arm_pmu.h |  1 -
>>>   virt/kvm/arm/pmu.c    | 15 +++++----------
>>>   2 files changed, 5 insertions(+), 11 deletions(-)
>>>
>>> diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h
>>> index b73f31baca52..2f0e28dc5a9e 100644
>>> --- a/include/kvm/arm_pmu.h
>>> +++ b/include/kvm/arm_pmu.h
>>> @@ -28,7 +28,6 @@
>>>   struct kvm_pmc {
>>>   	u8 idx;	/* index into the pmu->pmc array */
>>>   	struct perf_event *perf_event;
>>> -	u64 bitmask;
>>>   };
>>>   


>>> -
>>> -	if (val & ARMV8_PMU_PMCR_LC) {
>>> -		pmc = &pmu->pmc[ARMV8_PMU_CYCLE_IDX];
>>> -		pmc->bitmask = 0xffffffffffffffffUL;
>>> -	}
>>>   }

...

>>>   
>>>   static bool kvm_pmu_counter_is_enabled(struct kvm_vcpu *vcpu, u64 select_idx)
>>> @@ -420,7 +415,7 @@ static void kvm_pmu_create_perf_event(struct kvm_vcpu *vcpu, u64 select_idx)
>>>   
>>>   	counter = kvm_pmu_get_counter_value(vcpu, select_idx);
>>>   	/* The initial sample period (overflow count) of an event. */
>>> -	attr.sample_period = (-counter) & pmc->bitmask;
>>> +	attr.sample_period = (-counter) & GENMASK(31, 0);
>>
>> Isn't this the one case where the bitmask actually matters? If we're
>> dealing with the cycle counter, it shouldn't be truncated, right?
> 
> Ah yes, that should be conditional on idx as well.

The mask for Cycle counter also depends on the PMCR.LC field set by the
guest, isn't it ? So unless we correlate that with the idx, we could be
passing in wrong results ?

Suzuki
Andrew Murray June 12, 2019, 2:30 p.m. UTC | #4
On Mon, Jun 10, 2019 at 01:54:35PM +0100, Suzuki K Poulose wrote:
> 
> 
> On 22/05/2019 17:26, Andrew Murray wrote:
> > On Wed, May 22, 2019 at 05:07:31PM +0100, Marc Zyngier wrote:
> > > On 22/05/2019 16:30, Andrew Murray wrote:
> > > > We currently use pmc->bitmask to determine the width of the pmc - however
> > > > it's superfluous as the pmc index already describes if the pmc is a cycle
> > > > counter or event counter. The architecture clearly describes the widths of
> > > > these counters.
> > > > 
> > > > Let's remove the bitmask to simplify the code.
> > > > 
> > > > Signed-off-by: Andrew Murray <andrew.murray@arm.com>
> > > > ---
> > > >   include/kvm/arm_pmu.h |  1 -
> > > >   virt/kvm/arm/pmu.c    | 15 +++++----------
> > > >   2 files changed, 5 insertions(+), 11 deletions(-)
> > > > 
> > > > diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h
> > > > index b73f31baca52..2f0e28dc5a9e 100644
> > > > --- a/include/kvm/arm_pmu.h
> > > > +++ b/include/kvm/arm_pmu.h
> > > > @@ -28,7 +28,6 @@
> > > >   struct kvm_pmc {
> > > >   	u8 idx;	/* index into the pmu->pmc array */
> > > >   	struct perf_event *perf_event;
> > > > -	u64 bitmask;
> > > >   };
> 
> 
> > > > -
> > > > -	if (val & ARMV8_PMU_PMCR_LC) {
> > > > -		pmc = &pmu->pmc[ARMV8_PMU_CYCLE_IDX];
> > > > -		pmc->bitmask = 0xffffffffffffffffUL;
> > > > -	}
> > > >   }
> 
> ...
> 
> > > >   static bool kvm_pmu_counter_is_enabled(struct kvm_vcpu *vcpu, u64 select_idx)
> > > > @@ -420,7 +415,7 @@ static void kvm_pmu_create_perf_event(struct kvm_vcpu *vcpu, u64 select_idx)
> > > >   	counter = kvm_pmu_get_counter_value(vcpu, select_idx);
> > > >   	/* The initial sample period (overflow count) of an event. */
> > > > -	attr.sample_period = (-counter) & pmc->bitmask;
> > > > +	attr.sample_period = (-counter) & GENMASK(31, 0);
> > > 
> > > Isn't this the one case where the bitmask actually matters? If we're
> > > dealing with the cycle counter, it shouldn't be truncated, right?
> > 
> > Ah yes, that should be conditional on idx as well.
> 
> The mask for Cycle counter also depends on the PMCR.LC field set by the
> guest, isn't it ? So unless we correlate that with the idx, we could be
> passing in wrong results ?

Yes that's right, I misread the description of LC - it's RES1 for AArch64
but of course this doesn't stop a 32 bit host kernel. I'll update this.

Thanks,

Andrew Murray

> 
> Suzuki
diff mbox series

Patch

diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h
index b73f31baca52..2f0e28dc5a9e 100644
--- a/include/kvm/arm_pmu.h
+++ b/include/kvm/arm_pmu.h
@@ -28,7 +28,6 @@ 
 struct kvm_pmc {
 	u8 idx;	/* index into the pmu->pmc array */
 	struct perf_event *perf_event;
-	u64 bitmask;
 };
 
 struct kvm_pmu {
diff --git a/virt/kvm/arm/pmu.c b/virt/kvm/arm/pmu.c
index ae1e886d4a1a..c4e2bc213617 100644
--- a/virt/kvm/arm/pmu.c
+++ b/virt/kvm/arm/pmu.c
@@ -47,7 +47,10 @@  u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx)
 		counter += perf_event_read_value(pmc->perf_event, &enabled,
 						 &running);
 
-	return counter & pmc->bitmask;
+	if (select_idx != ARMV8_PMU_CYCLE_IDX)
+		counter = lower_32_bits(counter);
+
+	return counter;
 }
 
 /**
@@ -113,7 +116,6 @@  void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu)
 	for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) {
 		kvm_pmu_stop_counter(vcpu, &pmu->pmc[i]);
 		pmu->pmc[i].idx = i;
-		pmu->pmc[i].bitmask = 0xffffffffUL;
 	}
 }
 
@@ -348,8 +350,6 @@  void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val)
  */
 void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val)
 {
-	struct kvm_pmu *pmu = &vcpu->arch.pmu;
-	struct kvm_pmc *pmc;
 	u64 mask;
 	int i;
 
@@ -368,11 +368,6 @@  void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val)
 		for (i = 0; i < ARMV8_PMU_CYCLE_IDX; i++)
 			kvm_pmu_set_counter_value(vcpu, i, 0);
 	}
-
-	if (val & ARMV8_PMU_PMCR_LC) {
-		pmc = &pmu->pmc[ARMV8_PMU_CYCLE_IDX];
-		pmc->bitmask = 0xffffffffffffffffUL;
-	}
 }
 
 static bool kvm_pmu_counter_is_enabled(struct kvm_vcpu *vcpu, u64 select_idx)
@@ -420,7 +415,7 @@  static void kvm_pmu_create_perf_event(struct kvm_vcpu *vcpu, u64 select_idx)
 
 	counter = kvm_pmu_get_counter_value(vcpu, select_idx);
 	/* The initial sample period (overflow count) of an event. */
-	attr.sample_period = (-counter) & pmc->bitmask;
+	attr.sample_period = (-counter) & GENMASK(31, 0);
 
 	event = perf_event_create_kernel_counter(&attr, -1, current,
 						 kvm_pmu_perf_overflow, pmc);