diff mbox series

[v3,4/5] KVM: arm64: Mask out filtered events in PCMEID{0, 1}_EL1

Message ID 20200908075830.1161921-5-maz@kernel.org (mailing list archive)
State New, archived
Headers show
Series KVM: arm64: Filtering PMU events | expand

Commit Message

Marc Zyngier Sept. 8, 2020, 7:58 a.m. UTC
As we can now hide events from the guest, let's also adjust its view of
PCMEID{0,1}_EL1 so that it can figure out why some common events are not
counting as they should.

The astute user can still look into the TRM for their CPU and find out
they've been cheated, though. Nobody's perfect.

Signed-off-by: Marc Zyngier <maz@kernel.org>
---
 arch/arm64/kvm/pmu-emul.c | 29 +++++++++++++++++++++++++++++
 arch/arm64/kvm/sys_regs.c |  5 +----
 include/kvm/arm_pmu.h     |  5 +++++
 3 files changed, 35 insertions(+), 4 deletions(-)

Comments

Eric Auger Sept. 9, 2020, 5:43 p.m. UTC | #1
Hi Marc,

On 9/8/20 9:58 AM, Marc Zyngier wrote:
> As we can now hide events from the guest, let's also adjust its view of
> PCMEID{0,1}_EL1 so that it can figure out why some common events are not
> counting as they should.
Referring to my previous comment should we filter the cycle counter out?
> 
> The astute user can still look into the TRM for their CPU and find out
> they've been cheated, though. Nobody's perfect.
> 
> Signed-off-by: Marc Zyngier <maz@kernel.org>
> ---
>  arch/arm64/kvm/pmu-emul.c | 29 +++++++++++++++++++++++++++++
>  arch/arm64/kvm/sys_regs.c |  5 +----
>  include/kvm/arm_pmu.h     |  5 +++++
>  3 files changed, 35 insertions(+), 4 deletions(-)
> 
> diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c
> index 67a731bafbc9..0458860bade2 100644
> --- a/arch/arm64/kvm/pmu-emul.c
> +++ b/arch/arm64/kvm/pmu-emul.c
> @@ -765,6 +765,35 @@ static int kvm_pmu_probe_pmuver(void)
>  	return pmuver;
>  }
>  
> +u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1)
> +{
> +	unsigned long *bmap = vcpu->kvm->arch.pmu_filter;
> +	u64 val, mask = 0;
> +	int base, i;
> +
> +	if (!pmceid1) {
> +		val = read_sysreg(pmceid0_el0);
> +		base = 0;
> +	} else {
> +		val = read_sysreg(pmceid1_el0);
> +		base = 32;
> +	}
> +
> +	if (!bmap)
> +		return val;
> +
> +	for (i = 0; i < 32; i += 8) {
s/32/4?

Thanks

Eric
> +		u64 byte;
> +
> +		byte = bitmap_get_value8(bmap, base + i);
> +		mask |= byte << i;
> +		byte = bitmap_get_value8(bmap, 0x4000 + base + i);
> +		mask |= byte << (32 + i);
> +	}
> +
> +	return val & mask;
> +}
> +
>  bool kvm_arm_support_pmu_v3(void)
>  {
>  	/*
> diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
> index 077293b5115f..20ab2a7d37ca 100644
> --- a/arch/arm64/kvm/sys_regs.c
> +++ b/arch/arm64/kvm/sys_regs.c
> @@ -769,10 +769,7 @@ static bool access_pmceid(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
>  	if (pmu_access_el0_disabled(vcpu))
>  		return false;
>  
> -	if (!(p->Op2 & 1))
> -		pmceid = read_sysreg(pmceid0_el0);
> -	else
> -		pmceid = read_sysreg(pmceid1_el0);
> +	pmceid = kvm_pmu_get_pmceid(vcpu, (p->Op2 & 1));
>  
>  	p->regval = pmceid;
>  
> diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h
> index 6db030439e29..98cbfe885a53 100644
> --- a/include/kvm/arm_pmu.h
> +++ b/include/kvm/arm_pmu.h
> @@ -34,6 +34,7 @@ struct kvm_pmu {
>  u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx);
>  void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val);
>  u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu);
> +u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1);
>  void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu);
>  void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu);
>  void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu);
> @@ -108,6 +109,10 @@ static inline int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu)
>  {
>  	return 0;
>  }
> +static inline u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1)
> +{
> +	return 0;
> +}
>  #endif
>  
>  #endif
>
Marc Zyngier Sept. 9, 2020, 5:50 p.m. UTC | #2
Hi Eric,

On 2020-09-09 18:43, Auger Eric wrote:
> Hi Marc,
> 
> On 9/8/20 9:58 AM, Marc Zyngier wrote:
>> As we can now hide events from the guest, let's also adjust its view 
>> of
>> PCMEID{0,1}_EL1 so that it can figure out why some common events are 
>> not
>> counting as they should.
> Referring to my previous comment should we filter the cycle counter 
> out?
>> 
>> The astute user can still look into the TRM for their CPU and find out
>> they've been cheated, though. Nobody's perfect.
>> 
>> Signed-off-by: Marc Zyngier <maz@kernel.org>
>> ---
>>  arch/arm64/kvm/pmu-emul.c | 29 +++++++++++++++++++++++++++++
>>  arch/arm64/kvm/sys_regs.c |  5 +----
>>  include/kvm/arm_pmu.h     |  5 +++++
>>  3 files changed, 35 insertions(+), 4 deletions(-)
>> 
>> diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c
>> index 67a731bafbc9..0458860bade2 100644
>> --- a/arch/arm64/kvm/pmu-emul.c
>> +++ b/arch/arm64/kvm/pmu-emul.c
>> @@ -765,6 +765,35 @@ static int kvm_pmu_probe_pmuver(void)
>>  	return pmuver;
>>  }
>> 
>> +u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1)
>> +{
>> +	unsigned long *bmap = vcpu->kvm->arch.pmu_filter;
>> +	u64 val, mask = 0;
>> +	int base, i;
>> +
>> +	if (!pmceid1) {
>> +		val = read_sysreg(pmceid0_el0);
>> +		base = 0;
>> +	} else {
>> +		val = read_sysreg(pmceid1_el0);
>> +		base = 32;
>> +	}
>> +
>> +	if (!bmap)
>> +		return val;
>> +
>> +	for (i = 0; i < 32; i += 8) {
> s/32/4?

I don't think so, see below.

> 
> Thanks
> 
> Eric
>> +		u64 byte;
>> +
>> +		byte = bitmap_get_value8(bmap, base + i);
>> +		mask |= byte << i;

For each iteration of the loop, we read a byte from the bitmap
(hence the += 8 above), and orr it into the mask. This makes 4
iteration of the loop.

Or am I missing your point entirely?

Thanks,

         M.
Eric Auger Sept. 9, 2020, 6:07 p.m. UTC | #3
Hi Marc,

On 9/9/20 7:50 PM, Marc Zyngier wrote:
> Hi Eric,
> 
> On 2020-09-09 18:43, Auger Eric wrote:
>> Hi Marc,
>>
>> On 9/8/20 9:58 AM, Marc Zyngier wrote:
>>> As we can now hide events from the guest, let's also adjust its view of
>>> PCMEID{0,1}_EL1 so that it can figure out why some common events are not
>>> counting as they should.
>> Referring to my previous comment should we filter the cycle counter out?
>>>
>>> The astute user can still look into the TRM for their CPU and find out
>>> they've been cheated, though. Nobody's perfect.
>>>
>>> Signed-off-by: Marc Zyngier <maz@kernel.org>
>>> ---
>>>  arch/arm64/kvm/pmu-emul.c | 29 +++++++++++++++++++++++++++++
>>>  arch/arm64/kvm/sys_regs.c |  5 +----
>>>  include/kvm/arm_pmu.h     |  5 +++++
>>>  3 files changed, 35 insertions(+), 4 deletions(-)
>>>
>>> diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c
>>> index 67a731bafbc9..0458860bade2 100644
>>> --- a/arch/arm64/kvm/pmu-emul.c
>>> +++ b/arch/arm64/kvm/pmu-emul.c
>>> @@ -765,6 +765,35 @@ static int kvm_pmu_probe_pmuver(void)
>>>      return pmuver;
>>>  }
>>>
>>> +u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1)
>>> +{
>>> +    unsigned long *bmap = vcpu->kvm->arch.pmu_filter;
>>> +    u64 val, mask = 0;
>>> +    int base, i;
>>> +
>>> +    if (!pmceid1) {
>>> +        val = read_sysreg(pmceid0_el0);
>>> +        base = 0;
>>> +    } else {
>>> +        val = read_sysreg(pmceid1_el0);
>>> +        base = 32;
>>> +    }
>>> +
>>> +    if (!bmap)
>>> +        return val;
>>> +
>>> +    for (i = 0; i < 32; i += 8) {
>> s/32/4?
> 
> I don't think so, see below.
> 
>>
>> Thanks
>>
>> Eric
>>> +        u64 byte;
>>> +
>>> +        byte = bitmap_get_value8(bmap, base + i);
>>> +        mask |= byte << i;
> 
> For each iteration of the loop, we read a byte from the bitmap
> (hence the += 8 above), and orr it into the mask. This makes 4
> iteration of the loop.
> 
> Or am I missing your point entirely?

Hum no you're right. Sorry for the noise.

Looks good to me:

Reviewed-by: Eric Auger <eric.auger@redhat.com>

Eric


> 
> Thanks,
> 
>         M.
diff mbox series

Patch

diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c
index 67a731bafbc9..0458860bade2 100644
--- a/arch/arm64/kvm/pmu-emul.c
+++ b/arch/arm64/kvm/pmu-emul.c
@@ -765,6 +765,35 @@  static int kvm_pmu_probe_pmuver(void)
 	return pmuver;
 }
 
+u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1)
+{
+	unsigned long *bmap = vcpu->kvm->arch.pmu_filter;
+	u64 val, mask = 0;
+	int base, i;
+
+	if (!pmceid1) {
+		val = read_sysreg(pmceid0_el0);
+		base = 0;
+	} else {
+		val = read_sysreg(pmceid1_el0);
+		base = 32;
+	}
+
+	if (!bmap)
+		return val;
+
+	for (i = 0; i < 32; i += 8) {
+		u64 byte;
+
+		byte = bitmap_get_value8(bmap, base + i);
+		mask |= byte << i;
+		byte = bitmap_get_value8(bmap, 0x4000 + base + i);
+		mask |= byte << (32 + i);
+	}
+
+	return val & mask;
+}
+
 bool kvm_arm_support_pmu_v3(void)
 {
 	/*
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index 077293b5115f..20ab2a7d37ca 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -769,10 +769,7 @@  static bool access_pmceid(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
 	if (pmu_access_el0_disabled(vcpu))
 		return false;
 
-	if (!(p->Op2 & 1))
-		pmceid = read_sysreg(pmceid0_el0);
-	else
-		pmceid = read_sysreg(pmceid1_el0);
+	pmceid = kvm_pmu_get_pmceid(vcpu, (p->Op2 & 1));
 
 	p->regval = pmceid;
 
diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h
index 6db030439e29..98cbfe885a53 100644
--- a/include/kvm/arm_pmu.h
+++ b/include/kvm/arm_pmu.h
@@ -34,6 +34,7 @@  struct kvm_pmu {
 u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx);
 void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val);
 u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu);
+u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1);
 void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu);
 void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu);
 void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu);
@@ -108,6 +109,10 @@  static inline int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu)
 {
 	return 0;
 }
+static inline u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1)
+{
+	return 0;
+}
 #endif
 
 #endif