diff mbox series

[v10,2/5] KVM: arm64: Use per guest ID register for ID_AA64PFR0_EL1.[CSV2|CSV3]

Message ID 20230522221835.957419-3-jingzhangos@google.com (mailing list archive)
State New, archived
Headers show
Series Support writable CPU ID registers from userspace | expand

Commit Message

Jing Zhang May 22, 2023, 10:18 p.m. UTC
With per guest ID registers, ID_AA64PFR0_EL1.[CSV2|CSV3] settings from
userspace can be stored in its corresponding ID register.

The setting of CSV bits for protected VMs are removed according to the
discussion from Fuad below:
https://lore.kernel.org/all/CA+EHjTwXA9TprX4jeG+-D+c8v9XG+oFdU1o6TSkvVye145_OvA@mail.gmail.com

Besides the removal of CSV bits setting for protected VMs, No other
functional change intended.

Signed-off-by: Jing Zhang <jingzhangos@google.com>
---
 arch/arm64/include/asm/kvm_host.h |  2 --
 arch/arm64/kvm/arm.c              | 17 ---------
 arch/arm64/kvm/sys_regs.c         | 58 +++++++++++++++++++++++++------
 3 files changed, 47 insertions(+), 30 deletions(-)

Comments

Marc Zyngier May 28, 2023, 10:29 a.m. UTC | #1
On Mon, 22 May 2023 23:18:32 +0100,
Jing Zhang <jingzhangos@google.com> wrote:
> 
> With per guest ID registers, ID_AA64PFR0_EL1.[CSV2|CSV3] settings from
> userspace can be stored in its corresponding ID register.
> 
> The setting of CSV bits for protected VMs are removed according to the
> discussion from Fuad below:
> https://lore.kernel.org/all/CA+EHjTwXA9TprX4jeG+-D+c8v9XG+oFdU1o6TSkvVye145_OvA@mail.gmail.com
> 
> Besides the removal of CSV bits setting for protected VMs, No other
> functional change intended.

One thing that you don't mention is the addition of some locking,
which is a pretty significant change.

> 
> Signed-off-by: Jing Zhang <jingzhangos@google.com>
> ---
>  arch/arm64/include/asm/kvm_host.h |  2 --
>  arch/arm64/kvm/arm.c              | 17 ---------
>  arch/arm64/kvm/sys_regs.c         | 58 +++++++++++++++++++++++++------
>  3 files changed, 47 insertions(+), 30 deletions(-)
> 
> diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
> index 069606170c82..8a2fde6c04c4 100644
> --- a/arch/arm64/include/asm/kvm_host.h
> +++ b/arch/arm64/include/asm/kvm_host.h
> @@ -257,8 +257,6 @@ struct kvm_arch {
>  
>  	cpumask_var_t supported_cpus;
>  
> -	u8 pfr0_csv2;
> -	u8 pfr0_csv3;
>  	struct {
>  		u8 imp:4;
>  		u8 unimp:4;
> diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
> index 774656a0718d..5114521ace60 100644
> --- a/arch/arm64/kvm/arm.c
> +++ b/arch/arm64/kvm/arm.c
> @@ -102,22 +102,6 @@ static int kvm_arm_default_max_vcpus(void)
>  	return vgic_present ? kvm_vgic_get_max_vcpus() : KVM_MAX_VCPUS;
>  }
>  
> -static void set_default_spectre(struct kvm *kvm)
> -{
> -	/*
> -	 * The default is to expose CSV2 == 1 if the HW isn't affected.
> -	 * Although this is a per-CPU feature, we make it global because
> -	 * asymmetric systems are just a nuisance.
> -	 *
> -	 * Userspace can override this as long as it doesn't promise
> -	 * the impossible.
> -	 */
> -	if (arm64_get_spectre_v2_state() == SPECTRE_UNAFFECTED)
> -		kvm->arch.pfr0_csv2 = 1;
> -	if (arm64_get_meltdown_state() == SPECTRE_UNAFFECTED)
> -		kvm->arch.pfr0_csv3 = 1;
> -}
> -
>  /**
>   * kvm_arch_init_vm - initializes a VM data structure
>   * @kvm:	pointer to the KVM struct
> @@ -161,7 +145,6 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
>  	/* The maximum number of VCPUs is limited by the host's GIC model */
>  	kvm->max_vcpus = kvm_arm_default_max_vcpus();
>  
> -	set_default_spectre(kvm);
>  	kvm_arm_init_hypercalls(kvm);
>  	kvm_arm_init_id_regs(kvm);
>  
> diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
> index d2ee3a1c7f03..9fb1c2f8f5a5 100644
> --- a/arch/arm64/kvm/sys_regs.c
> +++ b/arch/arm64/kvm/sys_regs.c
> @@ -1218,10 +1218,6 @@ static u64 kvm_arm_read_id_reg(const struct kvm_vcpu *vcpu, u32 id)
>  		if (!vcpu_has_sve(vcpu))
>  			val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_SVE);
>  		val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_AMU);
> -		val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2);
> -		val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2), (u64)vcpu->kvm->arch.pfr0_csv2);
> -		val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3);
> -		val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3), (u64)vcpu->kvm->arch.pfr0_csv3);
>  		if (kvm_vgic_global_state.type == VGIC_V3) {
>  			val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_GIC);
>  			val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_GIC), 1);
> @@ -1359,7 +1355,11 @@ static int set_id_aa64pfr0_el1(struct kvm_vcpu *vcpu,
>  			       const struct sys_reg_desc *rd,
>  			       u64 val)
>  {
> +	struct kvm_arch *arch = &vcpu->kvm->arch;

The use of kvm_arch as an anchor is very non-idiomatic. Use the kvm
pointer for this if you must, but I'd rather you spell the whole thing
out.

> +	u64 old_val = read_id_reg(vcpu, rd);
> +	u64 new_val = val;
>  	u8 csv2, csv3;
> +	int ret = 0;
>  
>  	/*
>  	 * Allow AA64PFR0_EL1.CSV2 to be set from userspace as long as
> @@ -1377,17 +1377,26 @@ static int set_id_aa64pfr0_el1(struct kvm_vcpu *vcpu,
>  	    (csv3 && arm64_get_meltdown_state() != SPECTRE_UNAFFECTED))
>  		return -EINVAL;
>  
> +	mutex_lock(&arch->config_lock);
>  	/* We can only differ with CSV[23], and anything else is an error */
> -	val ^= read_id_reg(vcpu, rd);
> +	val ^= old_val;
>  	val &= ~(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2) |
>  		 ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3));
> -	if (val)
> -		return -EINVAL;
> -
> -	vcpu->kvm->arch.pfr0_csv2 = csv2;
> -	vcpu->kvm->arch.pfr0_csv3 = csv3;
> +	if (val) {
> +		ret = -EINVAL;
> +		goto out;
> +	}
>  
> -	return 0;
> +	/* Only allow userspace to change the idregs before VM running */
> +	if (kvm_vm_has_ran_once(vcpu->kvm)) {
> +		if (new_val != old_val)
> +			ret = -EBUSY;

This sort of check should be done exactly once in a central spot. For
similar reasons, the config_lock should be take in a unique location
so that we can actually reason about this globally rather than at a
microscopic level.

Something like this (which applies to the full series):

diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index b3eacfc592eb..e184b9350166 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -1534,7 +1534,6 @@ static int set_id_aa64dfr0_el1(struct kvm_vcpu *vcpu,
 			       const struct sys_reg_desc *rd,
 			       u64 val)
 {
-	struct kvm_arch *arch = &vcpu->kvm->arch;
 	u8 pmuver, host_pmuver;
 	bool valid_pmu;
 	int ret = 0;
@@ -1557,14 +1556,6 @@ static int set_id_aa64dfr0_el1(struct kvm_vcpu *vcpu,
 	if (kvm_vcpu_has_pmu(vcpu) != valid_pmu)
 		return -EINVAL;
 
-	mutex_lock(&arch->config_lock);
-	/* Only allow userspace to change the idregs before VM running */
-	if (kvm_vm_has_ran_once(vcpu->kvm)) {
-		if (val != read_id_reg(vcpu, rd))
-			ret = -EBUSY;
-		goto out;
-	}
-
 	if (!valid_pmu) {
 		/*
 		 * Ignore the PMUVer field in @val. The PMUVer would be determined
@@ -1592,7 +1583,6 @@ static int set_id_aa64dfr0_el1(struct kvm_vcpu *vcpu,
 			   pmuver == ID_AA64DFR0_EL1_PMUVer_IMP_DEF);
 
 out:
-	mutex_unlock(&arch->config_lock);
 	return ret;
 }
 
@@ -1617,7 +1607,6 @@ static int set_id_dfr0_el1(struct kvm_vcpu *vcpu,
 			   const struct sys_reg_desc *rd,
 			   u64 val)
 {
-	struct kvm_arch *arch = &vcpu->kvm->arch;
 	u8 perfmon, host_perfmon;
 	bool valid_pmu;
 	int ret = 0;
@@ -1641,14 +1630,6 @@ static int set_id_dfr0_el1(struct kvm_vcpu *vcpu,
 	if (kvm_vcpu_has_pmu(vcpu) != valid_pmu)
 		return -EINVAL;
 
-	mutex_lock(&arch->config_lock);
-	/* Only allow userspace to change the idregs before VM running */
-	if (kvm_vm_has_ran_once(vcpu->kvm)) {
-		if (val != read_id_reg(vcpu, rd))
-			ret = -EBUSY;
-		goto out;
-	}
-
 	if (!valid_pmu) {
 		/*
 		 * Ignore the PerfMon field in @val. The PerfMon would be determined
@@ -1676,7 +1657,6 @@ static int set_id_dfr0_el1(struct kvm_vcpu *vcpu,
 			   perfmon == ID_DFR0_EL1_PerfMon_IMPDEF);
 
 out:
-	mutex_unlock(&arch->config_lock);
 	return ret;
 }
 
@@ -1690,11 +1670,7 @@ static int set_id_dfr0_el1(struct kvm_vcpu *vcpu,
 static int get_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
 		      u64 *val)
 {
-	struct kvm_arch *arch = &vcpu->kvm->arch;
-
-	mutex_lock(&arch->config_lock);
 	*val = read_id_reg(vcpu, rd);
-	mutex_unlock(&arch->config_lock);
 
 	return 0;
 }
@@ -1702,21 +1678,12 @@ static int get_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
 static int set_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
 		      u64 val)
 {
-	struct kvm_arch *arch = &vcpu->kvm->arch;
 	u32 id = reg_to_encoding(rd);
 	int ret = 0;
 
-	mutex_lock(&arch->config_lock);
-	/* Only allow userspace to change the idregs before VM running */
-	if (kvm_vm_has_ran_once(vcpu->kvm)) {
-		if (val != read_id_reg(vcpu, rd))
-			ret = -EBUSY;
-	} else {
-		ret = arm64_check_features(vcpu, rd, val);
-		if (!ret)
-			IDREG(vcpu->kvm, id) = val;
-	}
-	mutex_unlock(&arch->config_lock);
+	ret = arm64_check_features(vcpu, rd, val);
+	if (!ret)
+		IDREG(vcpu->kvm, id) = val;
 
 	return ret;
 }
@@ -3438,6 +3405,9 @@ int kvm_sys_reg_get_user(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg,
 	if (!r || sysreg_hidden_user(vcpu, r))
 		return -ENOENT;
 
+	if (is_id_reg(reg_to_encoding(r)))
+		mutex_lock(&vcpu->kvm->arch.config_lock);
+
 	if (r->get_user) {
 		ret = (r->get_user)(vcpu, r, &val);
 	} else {
@@ -3445,6 +3415,9 @@ int kvm_sys_reg_get_user(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg,
 		ret = 0;
 	}
 
+	if (is_id_reg(reg_to_encoding(r)))
+		mutex_unlock(&vcpu->kvm->arch.config_lock);
+
 	if (!ret)
 		ret = put_user(val, uaddr);
 
@@ -3482,9 +3455,21 @@ int kvm_sys_reg_set_user(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg,
 	if (!r || sysreg_hidden_user(vcpu, r))
 		return -ENOENT;
 
+	/* Only allow userspace to change the idregs before VM running */
+	if (is_id_reg(reg_to_encoding(r)) &&
+	    kvm_vm_has_ran_once(vcpu->kvm)) {
+		if (val == read_id_reg(vcpu, r))
+			return 0;
+		return -EBUSY;
+	}
+
 	if (sysreg_user_write_ignore(vcpu, r))
 		return 0;
 
+	/* ID regs are global to the VM and cannot be updated concurrently */
+	if (is_id_reg(reg_to_encoding(r)))
+		mutex_lock(&vcpu->kvm->arch.config_lock);
+
 	if (r->set_user) {
 		ret = (r->set_user)(vcpu, r, val);
 	} else {
@@ -3492,6 +3477,9 @@ int kvm_sys_reg_set_user(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg,
 		ret = 0;
 	}
 
+	if (is_id_reg(reg_to_encoding(r)))
+		mutex_unlock(&vcpu->kvm->arch.config_lock);
+
 	return ret;
 }

and you can then restore the code to its original shape, as there is
no need to change the control flow anymore.

> +	} else {
> +		IDREG(vcpu->kvm, reg_to_encoding(rd)) = new_val;
> +	}
> +out:
> +	mutex_unlock(&arch->config_lock);
> +	return ret;
>  }
>  
>  static int set_id_aa64dfr0_el1(struct kvm_vcpu *vcpu,
> @@ -1479,7 +1488,12 @@ static int set_id_dfr0_el1(struct kvm_vcpu *vcpu,
>  static int get_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
>  		      u64 *val)
>  {

Right above this function is a comment that says the idreg are
immutable. Time to revisit it?

> +	struct kvm_arch *arch = &vcpu->kvm->arch;
> +
> +	mutex_lock(&arch->config_lock);
>  	*val = read_id_reg(vcpu, rd);
> +	mutex_unlock(&arch->config_lock);
> +
>  	return 0;
>  }
>  
> @@ -3364,6 +3378,7 @@ void kvm_arm_init_id_regs(struct kvm *kvm)
>  {
>  	const struct sys_reg_desc *idreg;
>  	struct sys_reg_params params;
> +	u64 val;
>  	u32 id;
>  
>  	/* Find the first idreg (SYS_ID_PFR0_EL1) in sys_reg_descs. */
> @@ -3386,6 +3401,27 @@ void kvm_arm_init_id_regs(struct kvm *kvm)
>  		idreg++;
>  		id = reg_to_encoding(idreg);
>  	}
> +
> +	/*
> +	 * The default is to expose CSV2 == 1 if the HW isn't affected.
> +	 * Although this is a per-CPU feature, we make it global because
> +	 * asymmetric systems are just a nuisance.
> +	 *
> +	 * Userspace can override this as long as it doesn't promise
> +	 * the impossible.
> +	 */
> +	val = IDREG(kvm, SYS_ID_AA64PFR0_EL1);
> +
> +	if (arm64_get_spectre_v2_state() == SPECTRE_UNAFFECTED) {
> +		val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2);
> +		val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2), 1);
> +	}
> +	if (arm64_get_meltdown_state() == SPECTRE_UNAFFECTED) {
> +		val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3);
> +		val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3), 1);
> +	}
> +
> +	IDREG(kvm, SYS_ID_AA64PFR0_EL1) = val;
>  }
>  
>  int __init kvm_sys_reg_table_init(void)

Thanks,

	M.
Jing Zhang May 30, 2023, 6:32 p.m. UTC | #2
On Sun, May 28, 2023 at 3:29 AM Marc Zyngier <maz@kernel.org> wrote:
>
> On Mon, 22 May 2023 23:18:32 +0100,
> Jing Zhang <jingzhangos@google.com> wrote:
> >
> > With per guest ID registers, ID_AA64PFR0_EL1.[CSV2|CSV3] settings from
> > userspace can be stored in its corresponding ID register.
> >
> > The setting of CSV bits for protected VMs are removed according to the
> > discussion from Fuad below:
> > https://lore.kernel.org/all/CA+EHjTwXA9TprX4jeG+-D+c8v9XG+oFdU1o6TSkvVye145_OvA@mail.gmail.com
> >
> > Besides the removal of CSV bits setting for protected VMs, No other
> > functional change intended.
>
> One thing that you don't mention is the addition of some locking,
> which is a pretty significant change.
Will update the commit message.
>
> >
> > Signed-off-by: Jing Zhang <jingzhangos@google.com>
> > ---
> >  arch/arm64/include/asm/kvm_host.h |  2 --
> >  arch/arm64/kvm/arm.c              | 17 ---------
> >  arch/arm64/kvm/sys_regs.c         | 58 +++++++++++++++++++++++++------
> >  3 files changed, 47 insertions(+), 30 deletions(-)
> >
> > diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
> > index 069606170c82..8a2fde6c04c4 100644
> > --- a/arch/arm64/include/asm/kvm_host.h
> > +++ b/arch/arm64/include/asm/kvm_host.h
> > @@ -257,8 +257,6 @@ struct kvm_arch {
> >
> >       cpumask_var_t supported_cpus;
> >
> > -     u8 pfr0_csv2;
> > -     u8 pfr0_csv3;
> >       struct {
> >               u8 imp:4;
> >               u8 unimp:4;
> > diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
> > index 774656a0718d..5114521ace60 100644
> > --- a/arch/arm64/kvm/arm.c
> > +++ b/arch/arm64/kvm/arm.c
> > @@ -102,22 +102,6 @@ static int kvm_arm_default_max_vcpus(void)
> >       return vgic_present ? kvm_vgic_get_max_vcpus() : KVM_MAX_VCPUS;
> >  }
> >
> > -static void set_default_spectre(struct kvm *kvm)
> > -{
> > -     /*
> > -      * The default is to expose CSV2 == 1 if the HW isn't affected.
> > -      * Although this is a per-CPU feature, we make it global because
> > -      * asymmetric systems are just a nuisance.
> > -      *
> > -      * Userspace can override this as long as it doesn't promise
> > -      * the impossible.
> > -      */
> > -     if (arm64_get_spectre_v2_state() == SPECTRE_UNAFFECTED)
> > -             kvm->arch.pfr0_csv2 = 1;
> > -     if (arm64_get_meltdown_state() == SPECTRE_UNAFFECTED)
> > -             kvm->arch.pfr0_csv3 = 1;
> > -}
> > -
> >  /**
> >   * kvm_arch_init_vm - initializes a VM data structure
> >   * @kvm:     pointer to the KVM struct
> > @@ -161,7 +145,6 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
> >       /* The maximum number of VCPUs is limited by the host's GIC model */
> >       kvm->max_vcpus = kvm_arm_default_max_vcpus();
> >
> > -     set_default_spectre(kvm);
> >       kvm_arm_init_hypercalls(kvm);
> >       kvm_arm_init_id_regs(kvm);
> >
> > diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
> > index d2ee3a1c7f03..9fb1c2f8f5a5 100644
> > --- a/arch/arm64/kvm/sys_regs.c
> > +++ b/arch/arm64/kvm/sys_regs.c
> > @@ -1218,10 +1218,6 @@ static u64 kvm_arm_read_id_reg(const struct kvm_vcpu *vcpu, u32 id)
> >               if (!vcpu_has_sve(vcpu))
> >                       val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_SVE);
> >               val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_AMU);
> > -             val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2);
> > -             val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2), (u64)vcpu->kvm->arch.pfr0_csv2);
> > -             val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3);
> > -             val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3), (u64)vcpu->kvm->arch.pfr0_csv3);
> >               if (kvm_vgic_global_state.type == VGIC_V3) {
> >                       val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_GIC);
> >                       val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_GIC), 1);
> > @@ -1359,7 +1355,11 @@ static int set_id_aa64pfr0_el1(struct kvm_vcpu *vcpu,
> >                              const struct sys_reg_desc *rd,
> >                              u64 val)
> >  {
> > +     struct kvm_arch *arch = &vcpu->kvm->arch;
>
> The use of kvm_arch as an anchor is very non-idiomatic. Use the kvm
> pointer for this if you must, but I'd rather you spell the whole thing
> out.
Sure, will spell the whole thing out.
>
> > +     u64 old_val = read_id_reg(vcpu, rd);
> > +     u64 new_val = val;
> >       u8 csv2, csv3;
> > +     int ret = 0;
> >
> >       /*
> >        * Allow AA64PFR0_EL1.CSV2 to be set from userspace as long as
> > @@ -1377,17 +1377,26 @@ static int set_id_aa64pfr0_el1(struct kvm_vcpu *vcpu,
> >           (csv3 && arm64_get_meltdown_state() != SPECTRE_UNAFFECTED))
> >               return -EINVAL;
> >
> > +     mutex_lock(&arch->config_lock);
> >       /* We can only differ with CSV[23], and anything else is an error */
> > -     val ^= read_id_reg(vcpu, rd);
> > +     val ^= old_val;
> >       val &= ~(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2) |
> >                ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3));
> > -     if (val)
> > -             return -EINVAL;
> > -
> > -     vcpu->kvm->arch.pfr0_csv2 = csv2;
> > -     vcpu->kvm->arch.pfr0_csv3 = csv3;
> > +     if (val) {
> > +             ret = -EINVAL;
> > +             goto out;
> > +     }
> >
> > -     return 0;
> > +     /* Only allow userspace to change the idregs before VM running */
> > +     if (kvm_vm_has_ran_once(vcpu->kvm)) {
> > +             if (new_val != old_val)
> > +                     ret = -EBUSY;
>
> This sort of check should be done exactly once in a central spot. For
> similar reasons, the config_lock should be take in a unique location
> so that we can actually reason about this globally rather than at a
> microscopic level.
>
> Something like this (which applies to the full series):
>
> diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
> index b3eacfc592eb..e184b9350166 100644
> --- a/arch/arm64/kvm/sys_regs.c
> +++ b/arch/arm64/kvm/sys_regs.c
> @@ -1534,7 +1534,6 @@ static int set_id_aa64dfr0_el1(struct kvm_vcpu *vcpu,
>                                const struct sys_reg_desc *rd,
>                                u64 val)
>  {
> -       struct kvm_arch *arch = &vcpu->kvm->arch;
>         u8 pmuver, host_pmuver;
>         bool valid_pmu;
>         int ret = 0;
> @@ -1557,14 +1556,6 @@ static int set_id_aa64dfr0_el1(struct kvm_vcpu *vcpu,
>         if (kvm_vcpu_has_pmu(vcpu) != valid_pmu)
>                 return -EINVAL;
>
> -       mutex_lock(&arch->config_lock);
> -       /* Only allow userspace to change the idregs before VM running */
> -       if (kvm_vm_has_ran_once(vcpu->kvm)) {
> -               if (val != read_id_reg(vcpu, rd))
> -                       ret = -EBUSY;
> -               goto out;
> -       }
> -
>         if (!valid_pmu) {
>                 /*
>                  * Ignore the PMUVer field in @val. The PMUVer would be determined
> @@ -1592,7 +1583,6 @@ static int set_id_aa64dfr0_el1(struct kvm_vcpu *vcpu,
>                            pmuver == ID_AA64DFR0_EL1_PMUVer_IMP_DEF);
>
>  out:
> -       mutex_unlock(&arch->config_lock);
>         return ret;
>  }
>
> @@ -1617,7 +1607,6 @@ static int set_id_dfr0_el1(struct kvm_vcpu *vcpu,
>                            const struct sys_reg_desc *rd,
>                            u64 val)
>  {
> -       struct kvm_arch *arch = &vcpu->kvm->arch;
>         u8 perfmon, host_perfmon;
>         bool valid_pmu;
>         int ret = 0;
> @@ -1641,14 +1630,6 @@ static int set_id_dfr0_el1(struct kvm_vcpu *vcpu,
>         if (kvm_vcpu_has_pmu(vcpu) != valid_pmu)
>                 return -EINVAL;
>
> -       mutex_lock(&arch->config_lock);
> -       /* Only allow userspace to change the idregs before VM running */
> -       if (kvm_vm_has_ran_once(vcpu->kvm)) {
> -               if (val != read_id_reg(vcpu, rd))
> -                       ret = -EBUSY;
> -               goto out;
> -       }
> -
>         if (!valid_pmu) {
>                 /*
>                  * Ignore the PerfMon field in @val. The PerfMon would be determined
> @@ -1676,7 +1657,6 @@ static int set_id_dfr0_el1(struct kvm_vcpu *vcpu,
>                            perfmon == ID_DFR0_EL1_PerfMon_IMPDEF);
>
>  out:
> -       mutex_unlock(&arch->config_lock);
>         return ret;
>  }
>
> @@ -1690,11 +1670,7 @@ static int set_id_dfr0_el1(struct kvm_vcpu *vcpu,
>  static int get_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
>                       u64 *val)
>  {
> -       struct kvm_arch *arch = &vcpu->kvm->arch;
> -
> -       mutex_lock(&arch->config_lock);
>         *val = read_id_reg(vcpu, rd);
> -       mutex_unlock(&arch->config_lock);
>
>         return 0;
>  }
> @@ -1702,21 +1678,12 @@ static int get_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
>  static int set_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
>                       u64 val)
>  {
> -       struct kvm_arch *arch = &vcpu->kvm->arch;
>         u32 id = reg_to_encoding(rd);
>         int ret = 0;
>
> -       mutex_lock(&arch->config_lock);
> -       /* Only allow userspace to change the idregs before VM running */
> -       if (kvm_vm_has_ran_once(vcpu->kvm)) {
> -               if (val != read_id_reg(vcpu, rd))
> -                       ret = -EBUSY;
> -       } else {
> -               ret = arm64_check_features(vcpu, rd, val);
> -               if (!ret)
> -                       IDREG(vcpu->kvm, id) = val;
> -       }
> -       mutex_unlock(&arch->config_lock);
> +       ret = arm64_check_features(vcpu, rd, val);
> +       if (!ret)
> +               IDREG(vcpu->kvm, id) = val;
>
>         return ret;
>  }
> @@ -3438,6 +3405,9 @@ int kvm_sys_reg_get_user(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg,
>         if (!r || sysreg_hidden_user(vcpu, r))
>                 return -ENOENT;
>
> +       if (is_id_reg(reg_to_encoding(r)))
> +               mutex_lock(&vcpu->kvm->arch.config_lock);
> +
>         if (r->get_user) {
>                 ret = (r->get_user)(vcpu, r, &val);
>         } else {
> @@ -3445,6 +3415,9 @@ int kvm_sys_reg_get_user(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg,
>                 ret = 0;
>         }
>
> +       if (is_id_reg(reg_to_encoding(r)))
> +               mutex_unlock(&vcpu->kvm->arch.config_lock);
> +
>         if (!ret)
>                 ret = put_user(val, uaddr);
>
> @@ -3482,9 +3455,21 @@ int kvm_sys_reg_set_user(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg,
>         if (!r || sysreg_hidden_user(vcpu, r))
>                 return -ENOENT;
>
> +       /* Only allow userspace to change the idregs before VM running */
> +       if (is_id_reg(reg_to_encoding(r)) &&
> +           kvm_vm_has_ran_once(vcpu->kvm)) {
> +               if (val == read_id_reg(vcpu, r))
> +                       return 0;
> +               return -EBUSY;
> +       }
> +
>         if (sysreg_user_write_ignore(vcpu, r))
>                 return 0;
>
> +       /* ID regs are global to the VM and cannot be updated concurrently */
> +       if (is_id_reg(reg_to_encoding(r)))
> +               mutex_lock(&vcpu->kvm->arch.config_lock);
> +
>         if (r->set_user) {
>                 ret = (r->set_user)(vcpu, r, val);
>         } else {
> @@ -3492,6 +3477,9 @@ int kvm_sys_reg_set_user(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg,
>                 ret = 0;
>         }
>
> +       if (is_id_reg(reg_to_encoding(r)))
> +               mutex_unlock(&vcpu->kvm->arch.config_lock);
> +
>         return ret;
>  }
>
> and you can then restore the code to its original shape, as there is
> no need to change the control flow anymore.
Thanks. Will do as you suggested.
>
> > +     } else {
> > +             IDREG(vcpu->kvm, reg_to_encoding(rd)) = new_val;
> > +     }
> > +out:
> > +     mutex_unlock(&arch->config_lock);
> > +     return ret;
> >  }
> >
> >  static int set_id_aa64dfr0_el1(struct kvm_vcpu *vcpu,
> > @@ -1479,7 +1488,12 @@ static int set_id_dfr0_el1(struct kvm_vcpu *vcpu,
> >  static int get_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
> >                     u64 *val)
> >  {
>
> Right above this function is a comment that says the idreg are
> immutable. Time to revisit it?
Will update the comment.
>
> > +     struct kvm_arch *arch = &vcpu->kvm->arch;
> > +
> > +     mutex_lock(&arch->config_lock);
> >       *val = read_id_reg(vcpu, rd);
> > +     mutex_unlock(&arch->config_lock);
> > +
> >       return 0;
> >  }
> >
> > @@ -3364,6 +3378,7 @@ void kvm_arm_init_id_regs(struct kvm *kvm)
> >  {
> >       const struct sys_reg_desc *idreg;
> >       struct sys_reg_params params;
> > +     u64 val;
> >       u32 id;
> >
> >       /* Find the first idreg (SYS_ID_PFR0_EL1) in sys_reg_descs. */
> > @@ -3386,6 +3401,27 @@ void kvm_arm_init_id_regs(struct kvm *kvm)
> >               idreg++;
> >               id = reg_to_encoding(idreg);
> >       }
> > +
> > +     /*
> > +      * The default is to expose CSV2 == 1 if the HW isn't affected.
> > +      * Although this is a per-CPU feature, we make it global because
> > +      * asymmetric systems are just a nuisance.
> > +      *
> > +      * Userspace can override this as long as it doesn't promise
> > +      * the impossible.
> > +      */
> > +     val = IDREG(kvm, SYS_ID_AA64PFR0_EL1);
> > +
> > +     if (arm64_get_spectre_v2_state() == SPECTRE_UNAFFECTED) {
> > +             val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2);
> > +             val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2), 1);
> > +     }
> > +     if (arm64_get_meltdown_state() == SPECTRE_UNAFFECTED) {
> > +             val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3);
> > +             val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3), 1);
> > +     }
> > +
> > +     IDREG(kvm, SYS_ID_AA64PFR0_EL1) = val;
> >  }
> >
> >  int __init kvm_sys_reg_table_init(void)
>
> Thanks,
>
>         M.
>
> --
> Without deviation from the norm, progress is not possible.
Thanks,
Jing
diff mbox series

Patch

diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 069606170c82..8a2fde6c04c4 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -257,8 +257,6 @@  struct kvm_arch {
 
 	cpumask_var_t supported_cpus;
 
-	u8 pfr0_csv2;
-	u8 pfr0_csv3;
 	struct {
 		u8 imp:4;
 		u8 unimp:4;
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index 774656a0718d..5114521ace60 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -102,22 +102,6 @@  static int kvm_arm_default_max_vcpus(void)
 	return vgic_present ? kvm_vgic_get_max_vcpus() : KVM_MAX_VCPUS;
 }
 
-static void set_default_spectre(struct kvm *kvm)
-{
-	/*
-	 * The default is to expose CSV2 == 1 if the HW isn't affected.
-	 * Although this is a per-CPU feature, we make it global because
-	 * asymmetric systems are just a nuisance.
-	 *
-	 * Userspace can override this as long as it doesn't promise
-	 * the impossible.
-	 */
-	if (arm64_get_spectre_v2_state() == SPECTRE_UNAFFECTED)
-		kvm->arch.pfr0_csv2 = 1;
-	if (arm64_get_meltdown_state() == SPECTRE_UNAFFECTED)
-		kvm->arch.pfr0_csv3 = 1;
-}
-
 /**
  * kvm_arch_init_vm - initializes a VM data structure
  * @kvm:	pointer to the KVM struct
@@ -161,7 +145,6 @@  int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
 	/* The maximum number of VCPUs is limited by the host's GIC model */
 	kvm->max_vcpus = kvm_arm_default_max_vcpus();
 
-	set_default_spectre(kvm);
 	kvm_arm_init_hypercalls(kvm);
 	kvm_arm_init_id_regs(kvm);
 
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index d2ee3a1c7f03..9fb1c2f8f5a5 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -1218,10 +1218,6 @@  static u64 kvm_arm_read_id_reg(const struct kvm_vcpu *vcpu, u32 id)
 		if (!vcpu_has_sve(vcpu))
 			val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_SVE);
 		val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_AMU);
-		val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2);
-		val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2), (u64)vcpu->kvm->arch.pfr0_csv2);
-		val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3);
-		val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3), (u64)vcpu->kvm->arch.pfr0_csv3);
 		if (kvm_vgic_global_state.type == VGIC_V3) {
 			val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_GIC);
 			val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_GIC), 1);
@@ -1359,7 +1355,11 @@  static int set_id_aa64pfr0_el1(struct kvm_vcpu *vcpu,
 			       const struct sys_reg_desc *rd,
 			       u64 val)
 {
+	struct kvm_arch *arch = &vcpu->kvm->arch;
+	u64 old_val = read_id_reg(vcpu, rd);
+	u64 new_val = val;
 	u8 csv2, csv3;
+	int ret = 0;
 
 	/*
 	 * Allow AA64PFR0_EL1.CSV2 to be set from userspace as long as
@@ -1377,17 +1377,26 @@  static int set_id_aa64pfr0_el1(struct kvm_vcpu *vcpu,
 	    (csv3 && arm64_get_meltdown_state() != SPECTRE_UNAFFECTED))
 		return -EINVAL;
 
+	mutex_lock(&arch->config_lock);
 	/* We can only differ with CSV[23], and anything else is an error */
-	val ^= read_id_reg(vcpu, rd);
+	val ^= old_val;
 	val &= ~(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2) |
 		 ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3));
-	if (val)
-		return -EINVAL;
-
-	vcpu->kvm->arch.pfr0_csv2 = csv2;
-	vcpu->kvm->arch.pfr0_csv3 = csv3;
+	if (val) {
+		ret = -EINVAL;
+		goto out;
+	}
 
-	return 0;
+	/* Only allow userspace to change the idregs before VM running */
+	if (kvm_vm_has_ran_once(vcpu->kvm)) {
+		if (new_val != old_val)
+			ret = -EBUSY;
+	} else {
+		IDREG(vcpu->kvm, reg_to_encoding(rd)) = new_val;
+	}
+out:
+	mutex_unlock(&arch->config_lock);
+	return ret;
 }
 
 static int set_id_aa64dfr0_el1(struct kvm_vcpu *vcpu,
@@ -1479,7 +1488,12 @@  static int set_id_dfr0_el1(struct kvm_vcpu *vcpu,
 static int get_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
 		      u64 *val)
 {
+	struct kvm_arch *arch = &vcpu->kvm->arch;
+
+	mutex_lock(&arch->config_lock);
 	*val = read_id_reg(vcpu, rd);
+	mutex_unlock(&arch->config_lock);
+
 	return 0;
 }
 
@@ -3364,6 +3378,7 @@  void kvm_arm_init_id_regs(struct kvm *kvm)
 {
 	const struct sys_reg_desc *idreg;
 	struct sys_reg_params params;
+	u64 val;
 	u32 id;
 
 	/* Find the first idreg (SYS_ID_PFR0_EL1) in sys_reg_descs. */
@@ -3386,6 +3401,27 @@  void kvm_arm_init_id_regs(struct kvm *kvm)
 		idreg++;
 		id = reg_to_encoding(idreg);
 	}
+
+	/*
+	 * The default is to expose CSV2 == 1 if the HW isn't affected.
+	 * Although this is a per-CPU feature, we make it global because
+	 * asymmetric systems are just a nuisance.
+	 *
+	 * Userspace can override this as long as it doesn't promise
+	 * the impossible.
+	 */
+	val = IDREG(kvm, SYS_ID_AA64PFR0_EL1);
+
+	if (arm64_get_spectre_v2_state() == SPECTRE_UNAFFECTED) {
+		val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2);
+		val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2), 1);
+	}
+	if (arm64_get_meltdown_state() == SPECTRE_UNAFFECTED) {
+		val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3);
+		val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3), 1);
+	}
+
+	IDREG(kvm, SYS_ID_AA64PFR0_EL1) = val;
 }
 
 int __init kvm_sys_reg_table_init(void)