diff mbox series

KVM: VMX: Intercept FS/GS_BASE MSR accesses for 32-bit KVM

Message ID 20210422023831.3473491-1-seanjc@google.com (mailing list archive)
State New, archived
Headers show
Series KVM: VMX: Intercept FS/GS_BASE MSR accesses for 32-bit KVM | expand

Commit Message

Sean Christopherson April 22, 2021, 2:38 a.m. UTC
Disable pass-through of the FS and GS base MSRs for 32-bit KVM.  Intel's
SDM unequivocally states that the MSRs exist if and only if the CPU
supports x86-64.  FS_BASE and GS_BASE are mostly a non-issue; a clever
guest could opportunistically use the MSRs without issue.  KERNEL_GS_BASE
is a bigger problem, as a clever guest would subtly be broken if it were
migrated, as KVM disallows software access to the MSRs, and unlike the
direct variants, KERNEL_GS_BASE needs to be explicitly migrated as it's
not captured in the VMCS.

Fixes: 25c5f225beda ("KVM: VMX: Enable MSR Bitmap feature")
Signed-off-by: Sean Christopherson <seanjc@google.com>
---

Note, this breaks kvm-unit-tests on 32-bit KVM VMX due to the boot code
using WRMSR(MSR_GS_BASE).  But, the tests are already broken on SVM, and
have always been broken on SVM, which is honestly the main reason I
didn't just turn a blind eye.  :-)  I post the fix shortly.

 arch/x86/kvm/vmx/nested.c | 2 ++
 arch/x86/kvm/vmx/vmx.c    | 4 ++++
 2 files changed, 6 insertions(+)

Comments

Paolo Bonzini April 22, 2021, 6:47 a.m. UTC | #1
On 22/04/21 04:38, Sean Christopherson wrote:
> Disable pass-through of the FS and GS base MSRs for 32-bit KVM.  Intel's
> SDM unequivocally states that the MSRs exist if and only if the CPU
> supports x86-64.  FS_BASE and GS_BASE are mostly a non-issue; a clever
> guest could opportunistically use the MSRs without issue.  KERNEL_GS_BASE
> is a bigger problem, as a clever guest would subtly be broken if it were
> migrated, as KVM disallows software access to the MSRs, and unlike the
> direct variants, KERNEL_GS_BASE needs to be explicitly migrated as it's
> not captured in the VMCS.
> 
> Fixes: 25c5f225beda ("KVM: VMX: Enable MSR Bitmap feature")
> Signed-off-by: Sean Christopherson <seanjc@google.com>

I added an explicit note that this is not for stable kernels.  The 
clever guest breaking after migration is the clever guest's problem.

> ---
> 
> Note, this breaks kvm-unit-tests on 32-bit KVM VMX due to the boot code
> using WRMSR(MSR_GS_BASE).  But, the tests are already broken on SVM, and
> have always been broken on SVM, which is honestly the main reason I
> didn't just turn a blind eye.  :-)  I post the fix shortly.

Fair enough.  Queued, thanks.

>   arch/x86/kvm/vmx/nested.c | 2 ++
>   arch/x86/kvm/vmx/vmx.c    | 4 ++++
>   2 files changed, 6 insertions(+)
> 
> diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
> index 8b111682fe5c..0f8c118ebc35 100644
> --- a/arch/x86/kvm/vmx/nested.c
> +++ b/arch/x86/kvm/vmx/nested.c
> @@ -614,6 +614,7 @@ static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu,
>   	}
>   
>   	/* KVM unconditionally exposes the FS/GS base MSRs to L1. */
> +#ifdef CONFIG_X86_64
>   	nested_vmx_disable_intercept_for_msr(msr_bitmap_l1, msr_bitmap_l0,
>   					     MSR_FS_BASE, MSR_TYPE_RW);
>   
> @@ -622,6 +623,7 @@ static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu,
>   
>   	nested_vmx_disable_intercept_for_msr(msr_bitmap_l1, msr_bitmap_l0,
>   					     MSR_KERNEL_GS_BASE, MSR_TYPE_RW);
> +#endif
>   
>   	/*
>   	 * Checking the L0->L1 bitmap is trying to verify two things:
> diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
> index 6501d66167b8..b58dc2d454f1 100644
> --- a/arch/x86/kvm/vmx/vmx.c
> +++ b/arch/x86/kvm/vmx/vmx.c
> @@ -157,9 +157,11 @@ static u32 vmx_possible_passthrough_msrs[MAX_POSSIBLE_PASSTHROUGH_MSRS] = {
>   	MSR_IA32_SPEC_CTRL,
>   	MSR_IA32_PRED_CMD,
>   	MSR_IA32_TSC,
> +#ifdef CONFIG_X86_64
>   	MSR_FS_BASE,
>   	MSR_GS_BASE,
>   	MSR_KERNEL_GS_BASE,
> +#endif
>   	MSR_IA32_SYSENTER_CS,
>   	MSR_IA32_SYSENTER_ESP,
>   	MSR_IA32_SYSENTER_EIP,
> @@ -6969,9 +6971,11 @@ static int vmx_create_vcpu(struct kvm_vcpu *vcpu)
>   	bitmap_fill(vmx->shadow_msr_intercept.write, MAX_POSSIBLE_PASSTHROUGH_MSRS);
>   
>   	vmx_disable_intercept_for_msr(vcpu, MSR_IA32_TSC, MSR_TYPE_R);
> +#ifdef CONFIG_X86_64
>   	vmx_disable_intercept_for_msr(vcpu, MSR_FS_BASE, MSR_TYPE_RW);
>   	vmx_disable_intercept_for_msr(vcpu, MSR_GS_BASE, MSR_TYPE_RW);
>   	vmx_disable_intercept_for_msr(vcpu, MSR_KERNEL_GS_BASE, MSR_TYPE_RW);
> +#endif
>   	vmx_disable_intercept_for_msr(vcpu, MSR_IA32_SYSENTER_CS, MSR_TYPE_RW);
>   	vmx_disable_intercept_for_msr(vcpu, MSR_IA32_SYSENTER_ESP, MSR_TYPE_RW);
>   	vmx_disable_intercept_for_msr(vcpu, MSR_IA32_SYSENTER_EIP, MSR_TYPE_RW);
>
diff mbox series

Patch

diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
index 8b111682fe5c..0f8c118ebc35 100644
--- a/arch/x86/kvm/vmx/nested.c
+++ b/arch/x86/kvm/vmx/nested.c
@@ -614,6 +614,7 @@  static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu,
 	}
 
 	/* KVM unconditionally exposes the FS/GS base MSRs to L1. */
+#ifdef CONFIG_X86_64
 	nested_vmx_disable_intercept_for_msr(msr_bitmap_l1, msr_bitmap_l0,
 					     MSR_FS_BASE, MSR_TYPE_RW);
 
@@ -622,6 +623,7 @@  static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu,
 
 	nested_vmx_disable_intercept_for_msr(msr_bitmap_l1, msr_bitmap_l0,
 					     MSR_KERNEL_GS_BASE, MSR_TYPE_RW);
+#endif
 
 	/*
 	 * Checking the L0->L1 bitmap is trying to verify two things:
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 6501d66167b8..b58dc2d454f1 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -157,9 +157,11 @@  static u32 vmx_possible_passthrough_msrs[MAX_POSSIBLE_PASSTHROUGH_MSRS] = {
 	MSR_IA32_SPEC_CTRL,
 	MSR_IA32_PRED_CMD,
 	MSR_IA32_TSC,
+#ifdef CONFIG_X86_64
 	MSR_FS_BASE,
 	MSR_GS_BASE,
 	MSR_KERNEL_GS_BASE,
+#endif
 	MSR_IA32_SYSENTER_CS,
 	MSR_IA32_SYSENTER_ESP,
 	MSR_IA32_SYSENTER_EIP,
@@ -6969,9 +6971,11 @@  static int vmx_create_vcpu(struct kvm_vcpu *vcpu)
 	bitmap_fill(vmx->shadow_msr_intercept.write, MAX_POSSIBLE_PASSTHROUGH_MSRS);
 
 	vmx_disable_intercept_for_msr(vcpu, MSR_IA32_TSC, MSR_TYPE_R);
+#ifdef CONFIG_X86_64
 	vmx_disable_intercept_for_msr(vcpu, MSR_FS_BASE, MSR_TYPE_RW);
 	vmx_disable_intercept_for_msr(vcpu, MSR_GS_BASE, MSR_TYPE_RW);
 	vmx_disable_intercept_for_msr(vcpu, MSR_KERNEL_GS_BASE, MSR_TYPE_RW);
+#endif
 	vmx_disable_intercept_for_msr(vcpu, MSR_IA32_SYSENTER_CS, MSR_TYPE_RW);
 	vmx_disable_intercept_for_msr(vcpu, MSR_IA32_SYSENTER_ESP, MSR_TYPE_RW);
 	vmx_disable_intercept_for_msr(vcpu, MSR_IA32_SYSENTER_EIP, MSR_TYPE_RW);