diff mbox

[v3,2/4] KVM/X86: Intel MPX vmx and msr handle

Message ID DE8DF0795D48FD4CA783C40EC82923350149E31D@SHSMSX101.ccr.corp.intel.com (mailing list archive)
State New, archived
Headers show

Commit Message

Liu, Jinsong Jan. 21, 2014, 7:01 p.m. UTC
From 31e68d752ac395dc6b65e6adf45be5324e92cdc8 Mon Sep 17 00:00:00 2001
From: Liu Jinsong <jinsong.liu@intel.com>
Date: Fri, 13 Dec 2013 02:32:43 +0800
Subject: [PATCH v3 2/4] KVM/X86: Intel MPX vmx and msr handle

This patch handle vmx and msr of Intel MPX feature.

Signed-off-by: Xudong Hao <xudong.hao@intel.com>
Signed-off-by: Liu Jinsong <jinsong.liu@intel.com>
---
 arch/x86/include/asm/vmx.h            |    2 ++
 arch/x86/include/uapi/asm/msr-index.h |    1 +
 arch/x86/kvm/vmx.c                    |   12 ++++++++++--
 3 files changed, 13 insertions(+), 2 deletions(-)

Comments

Paolo Bonzini Jan. 22, 2014, 11:38 a.m. UTC | #1
Il 21/01/2014 20:01, Liu, Jinsong ha scritto:
> From 31e68d752ac395dc6b65e6adf45be5324e92cdc8 Mon Sep 17 00:00:00 2001
> From: Liu Jinsong <jinsong.liu@intel.com>
> Date: Fri, 13 Dec 2013 02:32:43 +0800
> Subject: [PATCH v3 2/4] KVM/X86: Intel MPX vmx and msr handle
>
> This patch handle vmx and msr of Intel MPX feature.
>
> Signed-off-by: Xudong Hao <xudong.hao@intel.com>
> Signed-off-by: Liu Jinsong <jinsong.liu@intel.com>
> ---
>  arch/x86/include/asm/vmx.h            |    2 ++
>  arch/x86/include/uapi/asm/msr-index.h |    1 +
>  arch/x86/kvm/vmx.c                    |   12 ++++++++++--
>  3 files changed, 13 insertions(+), 2 deletions(-)
>
> diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h
> index 966502d..1bf4681 100644
> --- a/arch/x86/include/asm/vmx.h
> +++ b/arch/x86/include/asm/vmx.h
> @@ -85,6 +85,7 @@
>  #define VM_EXIT_SAVE_IA32_EFER                  0x00100000
>  #define VM_EXIT_LOAD_IA32_EFER                  0x00200000
>  #define VM_EXIT_SAVE_VMX_PREEMPTION_TIMER       0x00400000
> +#define VM_EXIT_CLEAR_BNDCFGS                   0x00800000
>
>  #define VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR	0x00036dff
>
> @@ -95,6 +96,7 @@
>  #define VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL     0x00002000
>  #define VM_ENTRY_LOAD_IA32_PAT			0x00004000
>  #define VM_ENTRY_LOAD_IA32_EFER                 0x00008000
> +#define VM_ENTRY_LOAD_BNDCFGS                   0x00010000
>
>  #define VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR	0x000011ff
>
> diff --git a/arch/x86/include/uapi/asm/msr-index.h b/arch/x86/include/uapi/asm/msr-index.h
> index 37813b5..2a418c4 100644
> --- a/arch/x86/include/uapi/asm/msr-index.h
> +++ b/arch/x86/include/uapi/asm/msr-index.h
> @@ -294,6 +294,7 @@
>  #define MSR_SMI_COUNT			0x00000034
>  #define MSR_IA32_FEATURE_CONTROL        0x0000003a
>  #define MSR_IA32_TSC_ADJUST             0x0000003b
> +#define MSR_IA32_BNDCFGS		0x00000d90
>
>  #define FEATURE_CONTROL_LOCKED				(1<<0)
>  #define FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX	(1<<1)
> diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
> index b2fe1c2..6d7d9ad 100644
> --- a/arch/x86/kvm/vmx.c
> +++ b/arch/x86/kvm/vmx.c
> @@ -439,6 +439,7 @@ struct vcpu_vmx {
>  #endif
>  		int           gs_ldt_reload_needed;
>  		int           fs_reload_needed;
> +		u64           msr_host_bndcfgs;
>  	} host_state;
>  	struct {
>  		int vm86_active;
> @@ -1647,6 +1648,8 @@ static void vmx_save_host_state(struct kvm_vcpu *vcpu)
>  	if (is_long_mode(&vmx->vcpu))
>  		wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
>  #endif
> +	if (boot_cpu_has(X86_FEATURE_MPX))
> +		rdmsrl(MSR_IA32_BNDCFGS, vmx->host_state.msr_host_bndcfgs);
>  	for (i = 0; i < vmx->save_nmsrs; ++i)
>  		kvm_set_shared_msr(vmx->guest_msrs[i].index,
>  				   vmx->guest_msrs[i].data,
> @@ -1684,6 +1687,8 @@ static void __vmx_load_host_state(struct vcpu_vmx *vmx)
>  #ifdef CONFIG_X86_64
>  	wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base);
>  #endif
> +	if (vmx->host_state.msr_host_bndcfgs)
> +		wrmsrl(MSR_IA32_BNDCFGS, vmx->host_state.msr_host_bndcfgs);
>  	/*
>  	 * If the FPU is not active (through the host task or
>  	 * the guest vcpu), then restore the cr0.TS bit.
> @@ -2800,7 +2805,7 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf)
>  	min |= VM_EXIT_HOST_ADDR_SPACE_SIZE;
>  #endif
>  	opt = VM_EXIT_SAVE_IA32_PAT | VM_EXIT_LOAD_IA32_PAT |
> -		VM_EXIT_ACK_INTR_ON_EXIT;
> +		VM_EXIT_ACK_INTR_ON_EXIT | VM_EXIT_CLEAR_BNDCFGS;
>  	if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_EXIT_CTLS,
>  				&_vmexit_control) < 0)
>  		return -EIO;
> @@ -2817,7 +2822,7 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf)
>  		_pin_based_exec_control &= ~PIN_BASED_POSTED_INTR;
>
>  	min = 0;
> -	opt = VM_ENTRY_LOAD_IA32_PAT;
> +	opt = VM_ENTRY_LOAD_IA32_PAT | VM_ENTRY_LOAD_BNDCFGS;
>  	if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_ENTRY_CTLS,
>  				&_vmentry_control) < 0)
>  		return -EIO;

You need to disable MPX in the guest if the two controls are not 
available.  You can do this, for example, in vmx_cpuid_update. 
Otherwise, nested VMX is broken.

> @@ -8636,6 +8641,9 @@ static int __init vmx_init(void)
>  	vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_CS, false);
>  	vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_ESP, false);
>  	vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_EIP, false);
> +	if (boot_cpu_has(X86_FEATURE_MPX))
> +		vmx_disable_intercept_for_msr(MSR_IA32_BNDCFGS, true);

This needs to be done unconditionally.  Otherwise, reading/writing 
BNDCFGS will access a nonexistent VMCS field.

Paolo

>  	memcpy(vmx_msr_bitmap_legacy_x2apic,
>  			vmx_msr_bitmap_legacy, PAGE_SIZE);
>  	memcpy(vmx_msr_bitmap_longmode_x2apic,
>

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Paolo Bonzini Jan. 22, 2014, 11:48 a.m. UTC | #2
Il 22/01/2014 12:38, Paolo Bonzini ha scritto:
> Il 21/01/2014 20:01, Liu, Jinsong ha scritto:
>> From 31e68d752ac395dc6b65e6adf45be5324e92cdc8 Mon Sep 17 00:00:00 2001
>> From: Liu Jinsong <jinsong.liu@intel.com>
>> Date: Fri, 13 Dec 2013 02:32:43 +0800
>> Subject: [PATCH v3 2/4] KVM/X86: Intel MPX vmx and msr handle
>>
>> This patch handle vmx and msr of Intel MPX feature.
>>
>> Signed-off-by: Xudong Hao <xudong.hao@intel.com>
>> Signed-off-by: Liu Jinsong <jinsong.liu@intel.com>
>> ---
>>  arch/x86/include/asm/vmx.h            |    2 ++
>>  arch/x86/include/uapi/asm/msr-index.h |    1 +
>>  arch/x86/kvm/vmx.c                    |   12 ++++++++++--
>>  3 files changed, 13 insertions(+), 2 deletions(-)
>>
>> diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h
>> index 966502d..1bf4681 100644
>> --- a/arch/x86/include/asm/vmx.h
>> +++ b/arch/x86/include/asm/vmx.h
>> @@ -85,6 +85,7 @@
>>  #define VM_EXIT_SAVE_IA32_EFER                  0x00100000
>>  #define VM_EXIT_LOAD_IA32_EFER                  0x00200000
>>  #define VM_EXIT_SAVE_VMX_PREEMPTION_TIMER       0x00400000
>> +#define VM_EXIT_CLEAR_BNDCFGS                   0x00800000
>>
>>  #define VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR    0x00036dff
>>
>> @@ -95,6 +96,7 @@
>>  #define VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL     0x00002000
>>  #define VM_ENTRY_LOAD_IA32_PAT            0x00004000
>>  #define VM_ENTRY_LOAD_IA32_EFER                 0x00008000
>> +#define VM_ENTRY_LOAD_BNDCFGS                   0x00010000
>>
>>  #define VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR    0x000011ff
>>
>> diff --git a/arch/x86/include/uapi/asm/msr-index.h
>> b/arch/x86/include/uapi/asm/msr-index.h
>> index 37813b5..2a418c4 100644
>> --- a/arch/x86/include/uapi/asm/msr-index.h
>> +++ b/arch/x86/include/uapi/asm/msr-index.h
>> @@ -294,6 +294,7 @@
>>  #define MSR_SMI_COUNT            0x00000034
>>  #define MSR_IA32_FEATURE_CONTROL        0x0000003a
>>  #define MSR_IA32_TSC_ADJUST             0x0000003b
>> +#define MSR_IA32_BNDCFGS        0x00000d90
>>
>>  #define FEATURE_CONTROL_LOCKED                (1<<0)
>>  #define FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX    (1<<1)
>> diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
>> index b2fe1c2..6d7d9ad 100644
>> --- a/arch/x86/kvm/vmx.c
>> +++ b/arch/x86/kvm/vmx.c
>> @@ -439,6 +439,7 @@ struct vcpu_vmx {
>>  #endif
>>          int           gs_ldt_reload_needed;
>>          int           fs_reload_needed;
>> +        u64           msr_host_bndcfgs;
>>      } host_state;
>>      struct {
>>          int vm86_active;
>> @@ -1647,6 +1648,8 @@ static void vmx_save_host_state(struct kvm_vcpu
>> *vcpu)
>>      if (is_long_mode(&vmx->vcpu))
>>          wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
>>  #endif
>> +    if (boot_cpu_has(X86_FEATURE_MPX))
>> +        rdmsrl(MSR_IA32_BNDCFGS, vmx->host_state.msr_host_bndcfgs);
>>      for (i = 0; i < vmx->save_nmsrs; ++i)
>>          kvm_set_shared_msr(vmx->guest_msrs[i].index,
>>                     vmx->guest_msrs[i].data,
>> @@ -1684,6 +1687,8 @@ static void __vmx_load_host_state(struct
>> vcpu_vmx *vmx)
>>  #ifdef CONFIG_X86_64
>>      wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base);
>>  #endif
>> +    if (vmx->host_state.msr_host_bndcfgs)
>> +        wrmsrl(MSR_IA32_BNDCFGS, vmx->host_state.msr_host_bndcfgs);
>>      /*
>>       * If the FPU is not active (through the host task or
>>       * the guest vcpu), then restore the cr0.TS bit.
>> @@ -2800,7 +2805,7 @@ static __init int setup_vmcs_config(struct
>> vmcs_config *vmcs_conf)
>>      min |= VM_EXIT_HOST_ADDR_SPACE_SIZE;
>>  #endif
>>      opt = VM_EXIT_SAVE_IA32_PAT | VM_EXIT_LOAD_IA32_PAT |
>> -        VM_EXIT_ACK_INTR_ON_EXIT;
>> +        VM_EXIT_ACK_INTR_ON_EXIT | VM_EXIT_CLEAR_BNDCFGS;
>>      if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_EXIT_CTLS,
>>                  &_vmexit_control) < 0)
>>          return -EIO;
>> @@ -2817,7 +2822,7 @@ static __init int setup_vmcs_config(struct
>> vmcs_config *vmcs_conf)
>>          _pin_based_exec_control &= ~PIN_BASED_POSTED_INTR;
>>
>>      min = 0;
>> -    opt = VM_ENTRY_LOAD_IA32_PAT;
>> +    opt = VM_ENTRY_LOAD_IA32_PAT | VM_ENTRY_LOAD_BNDCFGS;
>>      if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_ENTRY_CTLS,
>>                  &_vmentry_control) < 0)
>>          return -EIO;
>
> You need to disable MPX in the guest if the two controls are not
> available.  You can do this, for example, in vmx_cpuid_update.

Better: add a mpx_supported field to struct kvm_x86_ops.  You can use 
invpcid_supported as a model.

> Otherwise, nested VMX is broken.

>> @@ -8636,6 +8641,9 @@ static int __init vmx_init(void)
>>      vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_CS, false);
>>      vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_ESP, false);
>>      vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_EIP, false);
>> +    if (boot_cpu_has(X86_FEATURE_MPX))
>> +        vmx_disable_intercept_for_msr(MSR_IA32_BNDCFGS, true);
>
> This needs to be done unconditionally.  Otherwise, reading/writing
> BNDCFGS will access a nonexistent VMCS field.
>
> Paolo
>
>>      memcpy(vmx_msr_bitmap_legacy_x2apic,
>>              vmx_msr_bitmap_legacy, PAGE_SIZE);
>>      memcpy(vmx_msr_bitmap_longmode_x2apic,
>>
>

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
diff mbox

Patch

diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h
index 966502d..1bf4681 100644
--- a/arch/x86/include/asm/vmx.h
+++ b/arch/x86/include/asm/vmx.h
@@ -85,6 +85,7 @@ 
 #define VM_EXIT_SAVE_IA32_EFER                  0x00100000
 #define VM_EXIT_LOAD_IA32_EFER                  0x00200000
 #define VM_EXIT_SAVE_VMX_PREEMPTION_TIMER       0x00400000
+#define VM_EXIT_CLEAR_BNDCFGS                   0x00800000
 
 #define VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR	0x00036dff
 
@@ -95,6 +96,7 @@ 
 #define VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL     0x00002000
 #define VM_ENTRY_LOAD_IA32_PAT			0x00004000
 #define VM_ENTRY_LOAD_IA32_EFER                 0x00008000
+#define VM_ENTRY_LOAD_BNDCFGS                   0x00010000
 
 #define VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR	0x000011ff
 
diff --git a/arch/x86/include/uapi/asm/msr-index.h b/arch/x86/include/uapi/asm/msr-index.h
index 37813b5..2a418c4 100644
--- a/arch/x86/include/uapi/asm/msr-index.h
+++ b/arch/x86/include/uapi/asm/msr-index.h
@@ -294,6 +294,7 @@ 
 #define MSR_SMI_COUNT			0x00000034
 #define MSR_IA32_FEATURE_CONTROL        0x0000003a
 #define MSR_IA32_TSC_ADJUST             0x0000003b
+#define MSR_IA32_BNDCFGS		0x00000d90
 
 #define FEATURE_CONTROL_LOCKED				(1<<0)
 #define FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX	(1<<1)
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index b2fe1c2..6d7d9ad 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -439,6 +439,7 @@  struct vcpu_vmx {
 #endif
 		int           gs_ldt_reload_needed;
 		int           fs_reload_needed;
+		u64           msr_host_bndcfgs;
 	} host_state;
 	struct {
 		int vm86_active;
@@ -1647,6 +1648,8 @@  static void vmx_save_host_state(struct kvm_vcpu *vcpu)
 	if (is_long_mode(&vmx->vcpu))
 		wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
 #endif
+	if (boot_cpu_has(X86_FEATURE_MPX))
+		rdmsrl(MSR_IA32_BNDCFGS, vmx->host_state.msr_host_bndcfgs);
 	for (i = 0; i < vmx->save_nmsrs; ++i)
 		kvm_set_shared_msr(vmx->guest_msrs[i].index,
 				   vmx->guest_msrs[i].data,
@@ -1684,6 +1687,8 @@  static void __vmx_load_host_state(struct vcpu_vmx *vmx)
 #ifdef CONFIG_X86_64
 	wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base);
 #endif
+	if (vmx->host_state.msr_host_bndcfgs)
+		wrmsrl(MSR_IA32_BNDCFGS, vmx->host_state.msr_host_bndcfgs);
 	/*
 	 * If the FPU is not active (through the host task or
 	 * the guest vcpu), then restore the cr0.TS bit.
@@ -2800,7 +2805,7 @@  static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf)
 	min |= VM_EXIT_HOST_ADDR_SPACE_SIZE;
 #endif
 	opt = VM_EXIT_SAVE_IA32_PAT | VM_EXIT_LOAD_IA32_PAT |
-		VM_EXIT_ACK_INTR_ON_EXIT;
+		VM_EXIT_ACK_INTR_ON_EXIT | VM_EXIT_CLEAR_BNDCFGS;
 	if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_EXIT_CTLS,
 				&_vmexit_control) < 0)
 		return -EIO;
@@ -2817,7 +2822,7 @@  static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf)
 		_pin_based_exec_control &= ~PIN_BASED_POSTED_INTR;
 
 	min = 0;
-	opt = VM_ENTRY_LOAD_IA32_PAT;
+	opt = VM_ENTRY_LOAD_IA32_PAT | VM_ENTRY_LOAD_BNDCFGS;
 	if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_ENTRY_CTLS,
 				&_vmentry_control) < 0)
 		return -EIO;
@@ -8636,6 +8641,9 @@  static int __init vmx_init(void)
 	vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_CS, false);
 	vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_ESP, false);
 	vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_EIP, false);
+	if (boot_cpu_has(X86_FEATURE_MPX))
+		vmx_disable_intercept_for_msr(MSR_IA32_BNDCFGS, true);
+
 	memcpy(vmx_msr_bitmap_legacy_x2apic,
 			vmx_msr_bitmap_legacy, PAGE_SIZE);
 	memcpy(vmx_msr_bitmap_longmode_x2apic,