diff mbox

[4/4] KVM: SVM: Enable Virtual VMLOAD VMSAVE feature

Message ID 9d287324be66db8b4a14d75b16f53cfbd0c5703f.1499265201.git.Janakarajan.Natarajan@amd.com (mailing list archive)
State New, archived
Headers show

Commit Message

Janakarajan Natarajan July 5, 2017, 4:48 p.m. UTC
Enable the Virtual VMLOAD VMSAVE feature. This is done by setting bit 1
at position B8h in the vmcb.

The processor must have nested paging enabled, be in 64-bit mode and
have support for the Virtual VMLOAD VMSAVE feature for the bit to be set
in the vmcb.

Signed-off-by: Janakarajan Natarajan <Janakarajan.Natarajan@amd.com>
---
 arch/x86/include/asm/svm.h |  1 +
 arch/x86/kvm/svm.c         | 21 +++++++++++++++++++++
 2 files changed, 22 insertions(+)

Comments

Paolo Bonzini July 5, 2017, 4:53 p.m. UTC | #1
On 05/07/2017 18:48, Janakarajan Natarajan wrote:
> Enable the Virtual VMLOAD VMSAVE feature. This is done by setting bit 1
> at position B8h in the vmcb.
> 
> The processor must have nested paging enabled, be in 64-bit mode and
> have support for the Virtual VMLOAD VMSAVE feature for the bit to be set
> in the vmcb.
> 
> Signed-off-by: Janakarajan Natarajan <Janakarajan.Natarajan@amd.com>
> ---
>  arch/x86/include/asm/svm.h |  1 +
>  arch/x86/kvm/svm.c         | 21 +++++++++++++++++++++
>  2 files changed, 22 insertions(+)
> 
> diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h
> index 74d1393..58fffe7 100644
> --- a/arch/x86/include/asm/svm.h
> +++ b/arch/x86/include/asm/svm.h
> @@ -120,6 +120,7 @@ struct __attribute__ ((__packed__)) vmcb_control_area {
>  #define AVIC_ENABLE_MASK (1 << AVIC_ENABLE_SHIFT)
>  
>  #define LBR_CTL_ENABLE_MASK BIT_ULL(0)
> +#define VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK BIT_ULL(1)
>  
>  #define SVM_INTERRUPT_SHADOW_MASK 1
>  
> diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
> index eadecee..ae73e7c 100644
> --- a/arch/x86/kvm/svm.c
> +++ b/arch/x86/kvm/svm.c
> @@ -275,6 +275,9 @@ static int avic;
>  module_param(avic, int, S_IRUGO);
>  #endif
>  
> +/* enable/disable Virtual VMLOAD VMSAVE */
> +static bool has_vls = false;

Please make this a module parameter.  Initialize the variable to true...

>  /* AVIC VM ID bit masks and lock */
>  static DECLARE_BITMAP(avic_vm_id_bitmap, AVIC_VM_ID_NR);
>  static DEFINE_SPINLOCK(avic_vm_id_lock);
> @@ -1079,6 +1082,14 @@ static __init int svm_hardware_setup(void)
>  		}
>  	}
>  
> +	if (npt_enabled) {
> +		if (boot_cpu_has(X86_FEATURE_VIRTUAL_VMLOAD_VMSAVE) &&
> +		    IS_ENABLED(CONFIG_X86_64)) {

... and reset it here if !npt_enabled ||
!boot_cpu_has(X86_FEATURE_VIRTUAL_VMLOAD_VMSAVE) ||
!IS_ENABLED(CONFIG_X86_64).

Paolo

> +			pr_info("Virtual VMLOAD VMSAVE supported\n");
> +			has_vls = true;
> +		}
> +	}
> +
>  	return 0;
>  
>  err:
> @@ -1266,6 +1277,16 @@ static void init_vmcb(struct vcpu_svm *svm)
>  	if (avic)
>  		avic_init_vmcb(svm);
>  
> +	/*
> +	 * If hardware supports Virtual VMLOAD VMSAVE then enable it
> +	 * in VMCB and clear intercepts to avoid #VMEXIT.
> +	 */
> +	if (has_vls) {
> +		clr_intercept(svm, INTERCEPT_VMLOAD);
> +		clr_intercept(svm, INTERCEPT_VMSAVE);
> +		svm->vmcb->control.virt_ext |= VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK;
> +	}
> +
>  	mark_all_dirty(svm->vmcb);
>  
>  	enable_gif(svm);
>
diff mbox

Patch

diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h
index 74d1393..58fffe7 100644
--- a/arch/x86/include/asm/svm.h
+++ b/arch/x86/include/asm/svm.h
@@ -120,6 +120,7 @@  struct __attribute__ ((__packed__)) vmcb_control_area {
 #define AVIC_ENABLE_MASK (1 << AVIC_ENABLE_SHIFT)
 
 #define LBR_CTL_ENABLE_MASK BIT_ULL(0)
+#define VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK BIT_ULL(1)
 
 #define SVM_INTERRUPT_SHADOW_MASK 1
 
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index eadecee..ae73e7c 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -275,6 +275,9 @@  static int avic;
 module_param(avic, int, S_IRUGO);
 #endif
 
+/* enable/disable Virtual VMLOAD VMSAVE */
+static bool has_vls = false;
+
 /* AVIC VM ID bit masks and lock */
 static DECLARE_BITMAP(avic_vm_id_bitmap, AVIC_VM_ID_NR);
 static DEFINE_SPINLOCK(avic_vm_id_lock);
@@ -1079,6 +1082,14 @@  static __init int svm_hardware_setup(void)
 		}
 	}
 
+	if (npt_enabled) {
+		if (boot_cpu_has(X86_FEATURE_VIRTUAL_VMLOAD_VMSAVE) &&
+		    IS_ENABLED(CONFIG_X86_64)) {
+			pr_info("Virtual VMLOAD VMSAVE supported\n");
+			has_vls = true;
+		}
+	}
+
 	return 0;
 
 err:
@@ -1266,6 +1277,16 @@  static void init_vmcb(struct vcpu_svm *svm)
 	if (avic)
 		avic_init_vmcb(svm);
 
+	/*
+	 * If hardware supports Virtual VMLOAD VMSAVE then enable it
+	 * in VMCB and clear intercepts to avoid #VMEXIT.
+	 */
+	if (has_vls) {
+		clr_intercept(svm, INTERCEPT_VMLOAD);
+		clr_intercept(svm, INTERCEPT_VMSAVE);
+		svm->vmcb->control.virt_ext |= VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK;
+	}
+
 	mark_all_dirty(svm->vmcb);
 
 	enable_gif(svm);