diff mbox series

[5/8] KVM: arm64: Remove PMU RAZ/WI handling

Message ID 20201113182602.471776-6-maz@kernel.org (mailing list archive)
State New, archived
Headers show
Series KVM: arm64: Disabled PMU handling | expand

Commit Message

Marc Zyngier Nov. 13, 2020, 6:25 p.m. UTC
There is no RAZ/WI handling allowed for the PMU registers in the
ARMv8 architecture. Nobody can remember how we cam to the conclusion
that we could do this, but the ARMv8 ARM is pretty clear that we cannot.

Remove the RAZ/WI handling of the PMU system registers when it is
not configured.

Signed-off-by: Marc Zyngier <maz@kernel.org>
---
 arch/arm64/kvm/sys_regs.c | 30 ------------------------------
 1 file changed, 30 deletions(-)

Comments

Alexandru Elisei Nov. 26, 2020, 3:06 p.m. UTC | #1
Hi Marc,

This patch looks correct to me, I checked in the Arm ARM DDI 0487F.b and indeed
all accesses to the PMU registers are UNDEFINED if the PMU is not present.

I checked all the accessors and now all the PMU registers that KVM emulates will
inject an undefined exception if the VCPU feature isn't set. There's one register
that we don't emulate, PMMIR_EL1, I suppose that's because it's part of PMU
ARMv8.4 and KVM advertises ARMv8.1; if the guest tries to access it, it will get
an undefined exception and KVM will print a warning in emulate_sys_reg().

Reviewed-by: Alexandru Elisei <alexandru.elisei@arm.com>

On 11/13/20 6:25 PM, Marc Zyngier wrote:
> There is no RAZ/WI handling allowed for the PMU registers in the
> ARMv8 architecture. Nobody can remember how we cam to the conclusion
> that we could do this, but the ARMv8 ARM is pretty clear that we cannot.
>
> Remove the RAZ/WI handling of the PMU system registers when it is
> not configured.
>
> Signed-off-by: Marc Zyngier <maz@kernel.org>
> ---
>  arch/arm64/kvm/sys_regs.c | 30 ------------------------------
>  1 file changed, 30 deletions(-)
>
> diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
> index b098d667bb42..3bd4cc40536b 100644
> --- a/arch/arm64/kvm/sys_regs.c
> +++ b/arch/arm64/kvm/sys_regs.c
> @@ -643,9 +643,6 @@ static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
>  {
>  	u64 val;
>  
> -	if (!kvm_arm_pmu_v3_ready(vcpu))
> -		return trap_raz_wi(vcpu, p, r);
> -
>  	if (pmu_access_el0_disabled(vcpu))
>  		return false;
>  
> @@ -672,9 +669,6 @@ static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
>  static bool access_pmselr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
>  			  const struct sys_reg_desc *r)
>  {
> -	if (!kvm_arm_pmu_v3_ready(vcpu))
> -		return trap_raz_wi(vcpu, p, r);
> -
>  	if (pmu_access_event_counter_el0_disabled(vcpu))
>  		return false;
>  
> @@ -693,9 +687,6 @@ static bool access_pmceid(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
>  {
>  	u64 pmceid;
>  
> -	if (!kvm_arm_pmu_v3_ready(vcpu))
> -		return trap_raz_wi(vcpu, p, r);
> -
>  	BUG_ON(p->is_write);
>  
>  	if (pmu_access_el0_disabled(vcpu))
> @@ -728,9 +719,6 @@ static bool access_pmu_evcntr(struct kvm_vcpu *vcpu,
>  {
>  	u64 idx;
>  
> -	if (!kvm_arm_pmu_v3_ready(vcpu))
> -		return trap_raz_wi(vcpu, p, r);
> -
>  	if (r->CRn == 9 && r->CRm == 13) {
>  		if (r->Op2 == 2) {
>  			/* PMXEVCNTR_EL0 */
> @@ -784,9 +772,6 @@ static bool access_pmu_evtyper(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
>  {
>  	u64 idx, reg;
>  
> -	if (!kvm_arm_pmu_v3_ready(vcpu))
> -		return trap_raz_wi(vcpu, p, r);
> -
>  	if (pmu_access_el0_disabled(vcpu))
>  		return false;
>  
> @@ -824,9 +809,6 @@ static bool access_pmcnten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
>  {
>  	u64 val, mask;
>  
> -	if (!kvm_arm_pmu_v3_ready(vcpu))
> -		return trap_raz_wi(vcpu, p, r);
> -
>  	if (pmu_access_el0_disabled(vcpu))
>  		return false;
>  
> @@ -855,9 +837,6 @@ static bool access_pminten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
>  {
>  	u64 mask = kvm_pmu_valid_counter_mask(vcpu);
>  
> -	if (!kvm_arm_pmu_v3_ready(vcpu))
> -		return trap_raz_wi(vcpu, p, r);
> -
>  	if (check_pmu_access_disabled(vcpu, 0))
>  		return false;
>  
> @@ -882,9 +861,6 @@ static bool access_pmovs(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
>  {
>  	u64 mask = kvm_pmu_valid_counter_mask(vcpu);
>  
> -	if (!kvm_arm_pmu_v3_ready(vcpu))
> -		return trap_raz_wi(vcpu, p, r);
> -
>  	if (pmu_access_el0_disabled(vcpu))
>  		return false;
>  
> @@ -907,9 +883,6 @@ static bool access_pmswinc(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
>  {
>  	u64 mask;
>  
> -	if (!kvm_arm_pmu_v3_ready(vcpu))
> -		return trap_raz_wi(vcpu, p, r);
> -
>  	if (!p->is_write)
>  		return read_from_write_only(vcpu, p, r);
>  
> @@ -924,9 +897,6 @@ static bool access_pmswinc(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
>  static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
>  			     const struct sys_reg_desc *r)
>  {
> -	if (!kvm_arm_pmu_v3_ready(vcpu))
> -		return trap_raz_wi(vcpu, p, r);
> -
>  	if (!kvm_vcpu_has_pmu(vcpu)) {
>  		kvm_inject_undefined(vcpu);
>  		return false;
Marc Zyngier Nov. 27, 2020, 8:50 a.m. UTC | #2
On 2020-11-26 15:06, Alexandru Elisei wrote:
> Hi Marc,
> 
> This patch looks correct to me, I checked in the Arm ARM DDI 0487F.b 
> and indeed
> all accesses to the PMU registers are UNDEFINED if the PMU is not 
> present.
> 
> I checked all the accessors and now all the PMU registers that KVM 
> emulates will
> inject an undefined exception if the VCPU feature isn't set. There's
> one register
> that we don't emulate, PMMIR_EL1, I suppose that's because it's part of 
> PMU
> ARMv8.4 and KVM advertises ARMv8.1; if the guest tries to access it, it 
> will get
> an undefined exception and KVM will print a warning in 
> emulate_sys_reg().

Funny that. I wrote a patch for that a long while ago, and obviously
never did anything with it [1]... Actually, the whole series was 
silently
dropped. I guess I had other things to think about at the time!

Let me pick that up again.

> Reviewed-by: Alexandru Elisei <alexandru.elisei@arm.com>

Thanks!

         M.

[1] 
https://lore.kernel.org/kvmarm/20200216185324.32596-6-maz@kernel.org/
diff mbox series

Patch

diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index b098d667bb42..3bd4cc40536b 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -643,9 +643,6 @@  static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
 {
 	u64 val;
 
-	if (!kvm_arm_pmu_v3_ready(vcpu))
-		return trap_raz_wi(vcpu, p, r);
-
 	if (pmu_access_el0_disabled(vcpu))
 		return false;
 
@@ -672,9 +669,6 @@  static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
 static bool access_pmselr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
 			  const struct sys_reg_desc *r)
 {
-	if (!kvm_arm_pmu_v3_ready(vcpu))
-		return trap_raz_wi(vcpu, p, r);
-
 	if (pmu_access_event_counter_el0_disabled(vcpu))
 		return false;
 
@@ -693,9 +687,6 @@  static bool access_pmceid(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
 {
 	u64 pmceid;
 
-	if (!kvm_arm_pmu_v3_ready(vcpu))
-		return trap_raz_wi(vcpu, p, r);
-
 	BUG_ON(p->is_write);
 
 	if (pmu_access_el0_disabled(vcpu))
@@ -728,9 +719,6 @@  static bool access_pmu_evcntr(struct kvm_vcpu *vcpu,
 {
 	u64 idx;
 
-	if (!kvm_arm_pmu_v3_ready(vcpu))
-		return trap_raz_wi(vcpu, p, r);
-
 	if (r->CRn == 9 && r->CRm == 13) {
 		if (r->Op2 == 2) {
 			/* PMXEVCNTR_EL0 */
@@ -784,9 +772,6 @@  static bool access_pmu_evtyper(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
 {
 	u64 idx, reg;
 
-	if (!kvm_arm_pmu_v3_ready(vcpu))
-		return trap_raz_wi(vcpu, p, r);
-
 	if (pmu_access_el0_disabled(vcpu))
 		return false;
 
@@ -824,9 +809,6 @@  static bool access_pmcnten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
 {
 	u64 val, mask;
 
-	if (!kvm_arm_pmu_v3_ready(vcpu))
-		return trap_raz_wi(vcpu, p, r);
-
 	if (pmu_access_el0_disabled(vcpu))
 		return false;
 
@@ -855,9 +837,6 @@  static bool access_pminten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
 {
 	u64 mask = kvm_pmu_valid_counter_mask(vcpu);
 
-	if (!kvm_arm_pmu_v3_ready(vcpu))
-		return trap_raz_wi(vcpu, p, r);
-
 	if (check_pmu_access_disabled(vcpu, 0))
 		return false;
 
@@ -882,9 +861,6 @@  static bool access_pmovs(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
 {
 	u64 mask = kvm_pmu_valid_counter_mask(vcpu);
 
-	if (!kvm_arm_pmu_v3_ready(vcpu))
-		return trap_raz_wi(vcpu, p, r);
-
 	if (pmu_access_el0_disabled(vcpu))
 		return false;
 
@@ -907,9 +883,6 @@  static bool access_pmswinc(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
 {
 	u64 mask;
 
-	if (!kvm_arm_pmu_v3_ready(vcpu))
-		return trap_raz_wi(vcpu, p, r);
-
 	if (!p->is_write)
 		return read_from_write_only(vcpu, p, r);
 
@@ -924,9 +897,6 @@  static bool access_pmswinc(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
 static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
 			     const struct sys_reg_desc *r)
 {
-	if (!kvm_arm_pmu_v3_ready(vcpu))
-		return trap_raz_wi(vcpu, p, r);
-
 	if (!kvm_vcpu_has_pmu(vcpu)) {
 		kvm_inject_undefined(vcpu);
 		return false;