diff mbox series

[1/4] KVM: arm64: Fix read-side race on updates to vcpu reset state

Message ID 20210818085047.1005285-2-oupton@google.com (mailing list archive)
State New, archived
Headers show
Series KVM: arm64: Fix some races in CPU_ON PSCI call | expand

Commit Message

Oliver Upton Aug. 18, 2021, 8:50 a.m. UTC
KVM correctly serializes writes to a vCPU's reset state, however since
we do not take the KVM lock on the read side it is entirely possible to
read state from two different reset requests.

Cure the race for now by taking the KVM lock when reading the
reset_state structure.

Fixes: 358b28f09f0a ("arm/arm64: KVM: Allow a VCPU to fully reset itself")
Signed-off-by: Oliver Upton <oupton@google.com>
---
 arch/arm64/kvm/reset.c | 16 ++++++++++------
 1 file changed, 10 insertions(+), 6 deletions(-)

Comments

Marc Zyngier Aug. 18, 2021, 10:06 a.m. UTC | #1
On Wed, 18 Aug 2021 09:50:44 +0100,
Oliver Upton <oupton@google.com> wrote:
> 
> KVM correctly serializes writes to a vCPU's reset state, however since
> we do not take the KVM lock on the read side it is entirely possible to
> read state from two different reset requests.
> 
> Cure the race for now by taking the KVM lock when reading the
> reset_state structure.
> 
> Fixes: 358b28f09f0a ("arm/arm64: KVM: Allow a VCPU to fully reset itself")
> Signed-off-by: Oliver Upton <oupton@google.com>
> ---
>  arch/arm64/kvm/reset.c | 16 ++++++++++------
>  1 file changed, 10 insertions(+), 6 deletions(-)
> 
> diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c
> index 18ffc6ad67b8..3507e64ff8ad 100644
> --- a/arch/arm64/kvm/reset.c
> +++ b/arch/arm64/kvm/reset.c
> @@ -210,10 +210,16 @@ static bool vcpu_allowed_register_width(struct kvm_vcpu *vcpu)
>   */
>  int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
>  {
> +	struct vcpu_reset_state reset_state;
>  	int ret;
>  	bool loaded;
>  	u32 pstate;
>  
> +	mutex_lock(&vcpu->kvm->lock);
> +	memcpy(&reset_state, &vcpu->arch.reset_state, sizeof(reset_state));

nit: "reset_state = vcpu->arch.reset_state;" should do the trick.

> +	vcpu->arch.reset_state.reset = false;

We should probably need to upgrade this to a WRITE_ONCE() to match the
PSCI side.

> +	mutex_unlock(&vcpu->kvm->lock);
> +
>  	/* Reset PMU outside of the non-preemptible section */
>  	kvm_pmu_vcpu_reset(vcpu);
>  
> @@ -276,8 +282,8 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
>  	 * Additional reset state handling that PSCI may have imposed on us.
>  	 * Must be done after all the sys_reg reset.
>  	 */
> -	if (vcpu->arch.reset_state.reset) {
> -		unsigned long target_pc = vcpu->arch.reset_state.pc;
> +	if (reset_state.reset) {
> +		unsigned long target_pc = reset_state.pc;
>  
>  		/* Gracefully handle Thumb2 entry point */
>  		if (vcpu_mode_is_32bit(vcpu) && (target_pc & 1)) {
> @@ -286,13 +292,11 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
>  		}
>  
>  		/* Propagate caller endianness */
> -		if (vcpu->arch.reset_state.be)
> +		if (reset_state.be)
>  			kvm_vcpu_set_be(vcpu);
>  
>  		*vcpu_pc(vcpu) = target_pc;
> -		vcpu_set_reg(vcpu, 0, vcpu->arch.reset_state.r0);
> -
> -		vcpu->arch.reset_state.reset = false;
> +		vcpu_set_reg(vcpu, 0, reset_state.r0);
>  	}
>  
>  	/* Reset timer */

Thanks,

	M.
diff mbox series

Patch

diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c
index 18ffc6ad67b8..3507e64ff8ad 100644
--- a/arch/arm64/kvm/reset.c
+++ b/arch/arm64/kvm/reset.c
@@ -210,10 +210,16 @@  static bool vcpu_allowed_register_width(struct kvm_vcpu *vcpu)
  */
 int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
 {
+	struct vcpu_reset_state reset_state;
 	int ret;
 	bool loaded;
 	u32 pstate;
 
+	mutex_lock(&vcpu->kvm->lock);
+	memcpy(&reset_state, &vcpu->arch.reset_state, sizeof(reset_state));
+	vcpu->arch.reset_state.reset = false;
+	mutex_unlock(&vcpu->kvm->lock);
+
 	/* Reset PMU outside of the non-preemptible section */
 	kvm_pmu_vcpu_reset(vcpu);
 
@@ -276,8 +282,8 @@  int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
 	 * Additional reset state handling that PSCI may have imposed on us.
 	 * Must be done after all the sys_reg reset.
 	 */
-	if (vcpu->arch.reset_state.reset) {
-		unsigned long target_pc = vcpu->arch.reset_state.pc;
+	if (reset_state.reset) {
+		unsigned long target_pc = reset_state.pc;
 
 		/* Gracefully handle Thumb2 entry point */
 		if (vcpu_mode_is_32bit(vcpu) && (target_pc & 1)) {
@@ -286,13 +292,11 @@  int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
 		}
 
 		/* Propagate caller endianness */
-		if (vcpu->arch.reset_state.be)
+		if (reset_state.be)
 			kvm_vcpu_set_be(vcpu);
 
 		*vcpu_pc(vcpu) = target_pc;
-		vcpu_set_reg(vcpu, 0, vcpu->arch.reset_state.r0);
-
-		vcpu->arch.reset_state.reset = false;
+		vcpu_set_reg(vcpu, 0, reset_state.r0);
 	}
 
 	/* Reset timer */