diff mbox series

KVM: x86: block KVM_CAP_SYNC_REGS if guest state is protected

Message ID 20250306202923.646075-1-pbonzini@redhat.com (mailing list archive)
State New
Headers show
Series KVM: x86: block KVM_CAP_SYNC_REGS if guest state is protected | expand

Commit Message

Paolo Bonzini March 6, 2025, 8:29 p.m. UTC
KVM_CAP_SYNC_REGS does not make sense for VMs with protected guest state,
since the register values cannot actually be written.  Return 0
when using the VM-level KVM_CHECK_EXTENSION ioctl, and accordingly
return -EINVAL from KVM_RUN if the valid/dirty fields are nonzero.

However, on exit from KVM_RUN userspace could have placed a nonzero
value into kvm_run->kvm_valid_regs, so check guest_state_protected
again and skip store_regs() in that case.

Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
 arch/x86/kvm/x86.c | 15 +++++++++++----
 1 file changed, 11 insertions(+), 4 deletions(-)

Comments

Gupta, Pankaj March 7, 2025, 2:20 p.m. UTC | #1
On 3/6/2025 9:29 PM, Paolo Bonzini wrote:
> KVM_CAP_SYNC_REGS does not make sense for VMs with protected guest state,
> since the register values cannot actually be written.  Return 0
> when using the VM-level KVM_CHECK_EXTENSION ioctl, and accordingly
> return -EINVAL from KVM_RUN if the valid/dirty fields are nonzero.
> 
> However, on exit from KVM_RUN userspace could have placed a nonzero
> value into kvm_run->kvm_valid_regs, so check guest_state_protected
> again and skip store_regs() in that case.
> 
> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>

Reviewed-by: Pankaj Gupta <pankaj.gupta@amd.com>

Also, boot tested a SNP guest on 6.14-rc5 host with the patch applied.

Thanks,
Pankaj
> ---
>   arch/x86/kvm/x86.c | 15 +++++++++++----
>   1 file changed, 11 insertions(+), 4 deletions(-)
> 
> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> index aaa067b79095..b416eec5c167 100644
> --- a/arch/x86/kvm/x86.c
> +++ b/arch/x86/kvm/x86.c
> @@ -4586,6 +4586,11 @@ static bool kvm_is_vm_type_supported(unsigned long type)
>   	return type < 32 && (kvm_caps.supported_vm_types & BIT(type));
>   }
>   
> +static inline u32 kvm_sync_valid_fields(struct kvm *kvm)
> +{
> +	return kvm && kvm->arch.has_protected_state ? 0 : KVM_SYNC_X86_VALID_FIELDS;
> +}
> +
>   int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
>   {
>   	int r = 0;
> @@ -4694,7 +4699,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
>   		break;
>   #endif
>   	case KVM_CAP_SYNC_REGS:
> -		r = KVM_SYNC_X86_VALID_FIELDS;
> +		r = kvm_sync_valid_fields(kvm);
>   		break;
>   	case KVM_CAP_ADJUST_CLOCK:
>   		r = KVM_CLOCK_VALID_FLAGS;
> @@ -11503,6 +11508,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
>   {
>   	struct kvm_queued_exception *ex = &vcpu->arch.exception;
>   	struct kvm_run *kvm_run = vcpu->run;
> +	u32 sync_valid_fields;
>   	int r;
>   
>   	r = kvm_mmu_post_init_vm(vcpu->kvm);
> @@ -11548,8 +11554,9 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
>   		goto out;
>   	}
>   
> -	if ((kvm_run->kvm_valid_regs & ~KVM_SYNC_X86_VALID_FIELDS) ||
> -	    (kvm_run->kvm_dirty_regs & ~KVM_SYNC_X86_VALID_FIELDS)) {
> +	sync_valid_fields = kvm_sync_valid_fields(vcpu->kvm);
> +	if ((kvm_run->kvm_valid_regs & ~sync_valid_fields) ||
> +	    (kvm_run->kvm_dirty_regs & ~sync_valid_fields)) {
>   		r = -EINVAL;
>   		goto out;
>   	}
> @@ -11607,7 +11614,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
>   
>   out:
>   	kvm_put_guest_fpu(vcpu);
> -	if (kvm_run->kvm_valid_regs)
> +	if (kvm_run->kvm_valid_regs && likely(!vcpu->arch.guest_state_protected))
>   		store_regs(vcpu);
>   	post_kvm_run_save(vcpu);
>   	kvm_vcpu_srcu_read_unlock(vcpu);
diff mbox series

Patch

diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index aaa067b79095..b416eec5c167 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -4586,6 +4586,11 @@  static bool kvm_is_vm_type_supported(unsigned long type)
 	return type < 32 && (kvm_caps.supported_vm_types & BIT(type));
 }
 
+static inline u32 kvm_sync_valid_fields(struct kvm *kvm)
+{
+	return kvm && kvm->arch.has_protected_state ? 0 : KVM_SYNC_X86_VALID_FIELDS;
+}
+
 int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
 {
 	int r = 0;
@@ -4694,7 +4699,7 @@  int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
 		break;
 #endif
 	case KVM_CAP_SYNC_REGS:
-		r = KVM_SYNC_X86_VALID_FIELDS;
+		r = kvm_sync_valid_fields(kvm);
 		break;
 	case KVM_CAP_ADJUST_CLOCK:
 		r = KVM_CLOCK_VALID_FLAGS;
@@ -11503,6 +11508,7 @@  int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
 {
 	struct kvm_queued_exception *ex = &vcpu->arch.exception;
 	struct kvm_run *kvm_run = vcpu->run;
+	u32 sync_valid_fields;
 	int r;
 
 	r = kvm_mmu_post_init_vm(vcpu->kvm);
@@ -11548,8 +11554,9 @@  int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
 		goto out;
 	}
 
-	if ((kvm_run->kvm_valid_regs & ~KVM_SYNC_X86_VALID_FIELDS) ||
-	    (kvm_run->kvm_dirty_regs & ~KVM_SYNC_X86_VALID_FIELDS)) {
+	sync_valid_fields = kvm_sync_valid_fields(vcpu->kvm);
+	if ((kvm_run->kvm_valid_regs & ~sync_valid_fields) ||
+	    (kvm_run->kvm_dirty_regs & ~sync_valid_fields)) {
 		r = -EINVAL;
 		goto out;
 	}
@@ -11607,7 +11614,7 @@  int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
 
 out:
 	kvm_put_guest_fpu(vcpu);
-	if (kvm_run->kvm_valid_regs)
+	if (kvm_run->kvm_valid_regs && likely(!vcpu->arch.guest_state_protected))
 		store_regs(vcpu);
 	post_kvm_run_save(vcpu);
 	kvm_vcpu_srcu_read_unlock(vcpu);