diff mbox series

[11/13] KVM: SVM: Add support for IBS virtualization for SEV-ES guests

Message ID 20230904095347.14994-12-manali.shukla@amd.com (mailing list archive)
State New, archived
Headers show
Series Implement support for IBS virtualization | expand

Commit Message

Manali Shukla Sept. 4, 2023, 9:53 a.m. UTC
Since the IBS state is swap type C, the hypervisor is responsible for
saving its own IBS state before VMRUN and restoring it after VMEXIT.
It is also responsible for disabling IBS before VMRUN and re-enabling
it after VMEXIT. For a SEV-ES guest with IBS virtualization enabled,
a VMEXIT_INVALID will happen if IBS is found to be enabled on VMRUN
[1].

The IBS virtualization feature for SEV-ES guests is not enabled in this
patch. Later patches enable IBS virtualization for SEV-ES guests.

[1]: https://bugzilla.kernel.org/attachment.cgi?id=304653
     AMD64 Architecture Programmer’s Manual, Vol 2, Section 15.38
     Instruction-Based Sampling Virtualization.

Signed-off-by: Manali Shukla <manali.shukla@amd.com>
---
 arch/x86/include/asm/svm.h | 14 +++++++++++++-
 arch/x86/kvm/svm/sev.c     |  7 +++++++
 arch/x86/kvm/svm/svm.c     | 11 +++++------
 3 files changed, 25 insertions(+), 7 deletions(-)

Comments

Tom Lendacky Sept. 5, 2023, 3:43 p.m. UTC | #1
On 9/4/23 04:53, Manali Shukla wrote:
> Since the IBS state is swap type C, the hypervisor is responsible for
> saving its own IBS state before VMRUN and restoring it after VMEXIT.
> It is also responsible for disabling IBS before VMRUN and re-enabling
> it after VMEXIT. For a SEV-ES guest with IBS virtualization enabled,
> a VMEXIT_INVALID will happen if IBS is found to be enabled on VMRUN
> [1].
> 
> The IBS virtualization feature for SEV-ES guests is not enabled in this
> patch. Later patches enable IBS virtualization for SEV-ES guests.
> 
> [1]: https://bugzilla.kernel.org/attachment.cgi?id=304653
>       AMD64 Architecture Programmer’s Manual, Vol 2, Section 15.38
>       Instruction-Based Sampling Virtualization.
> 
> Signed-off-by: Manali Shukla <manali.shukla@amd.com>
> ---
>   arch/x86/include/asm/svm.h | 14 +++++++++++++-
>   arch/x86/kvm/svm/sev.c     |  7 +++++++
>   arch/x86/kvm/svm/svm.c     | 11 +++++------
>   3 files changed, 25 insertions(+), 7 deletions(-)
> 
> diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h
> index 4096d2f68770..58b60842a3b7 100644
> --- a/arch/x86/include/asm/svm.h
> +++ b/arch/x86/include/asm/svm.h
> @@ -469,6 +469,18 @@ struct sev_es_save_area {
>   	u8 fpreg_x87[80];
>   	u8 fpreg_xmm[256];
>   	u8 fpreg_ymm[256];
> +	u8 lbr_stack_from_to[256];
> +	u64 lbr_select;
> +	u64 ibs_fetch_ctl;
> +	u64 ibs_fetch_linear_addr;
> +	u64 ibs_op_ctl;
> +	u64 ibs_op_rip;
> +	u64 ibs_op_data;
> +	u64 ibs_op_data2;
> +	u64 ibs_op_data3;
> +	u64 ibs_dc_linear_addr;
> +	u64 ibs_br_target;
> +	u64 ibs_fetch_extd_ctl;
>   } __packed;
>   
>   struct ghcb_save_area {
> @@ -527,7 +539,7 @@ struct ghcb {
>   
>   #define EXPECTED_VMCB_SAVE_AREA_SIZE		1992
>   #define EXPECTED_GHCB_SAVE_AREA_SIZE		1032
> -#define EXPECTED_SEV_ES_SAVE_AREA_SIZE		1648
> +#define EXPECTED_SEV_ES_SAVE_AREA_SIZE		1992
>   #define EXPECTED_VMCB_CONTROL_AREA_SIZE		1024
>   #define EXPECTED_GHCB_SIZE			PAGE_SIZE
>   
> diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
> index d3aec1f2cad2..41706335cedd 100644
> --- a/arch/x86/kvm/svm/sev.c
> +++ b/arch/x86/kvm/svm/sev.c
> @@ -59,6 +59,7 @@ module_param_named(sev_es, sev_es_enabled, bool, 0444);
>   #define sev_es_enabled false
>   #endif /* CONFIG_KVM_AMD_SEV */
>   
> +static bool sev_es_vibs_enabled;
>   static u8 sev_enc_bit;
>   static DECLARE_RWSEM(sev_deactivate_lock);
>   static DEFINE_MUTEX(sev_bitmap_lock);
> @@ -2256,6 +2257,9 @@ void __init sev_hardware_setup(void)
>   
>   	sev_enabled = sev_supported;
>   	sev_es_enabled = sev_es_supported;
> +
> +	if (!sev_es_enabled || !cpu_feature_enabled(X86_FEATURE_SEV_ES_VIBS))
> +		sev_es_vibs_enabled = false;

sev_es_vibs_enabled = sev_es_enabled && cpu_feature_enabled(X86_FEATURE_SEV_ES_VIBS);

But won't this require VNMI support, too? So should that also be checked
along with vibs from svm.c (since AVIC isn't supported with SEV).

>   #endif
>   }
>   
> @@ -2993,6 +2997,9 @@ static void sev_es_init_vmcb(struct vcpu_svm *svm)
>   		if (guest_cpuid_has(&svm->vcpu, X86_FEATURE_RDTSCP))
>   			svm_clr_intercept(svm, INTERCEPT_RDTSCP);
>   	}
> +
> +	if (sev_es_vibs_enabled && svm->ibs_enabled)
> +		svm_ibs_msr_interception(svm, false);

I might be missing something here...  if svm->ibs_enabled is true, then
this intercept change will already have been done. Shouldn't this be
doing the reverse?  But, it looks like svm->ibs_enabled is set in the
init_vmcb_after_set_cpuid() function, which is called after
sev_es_init_vmcb() is called...  so it can never be true here, right?

Thanks,
Tom

>   }
>   
>   void sev_init_vmcb(struct vcpu_svm *svm)
> diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
> index 6f566ed93f4c..0cfe23bb144a 100644
> --- a/arch/x86/kvm/svm/svm.c
> +++ b/arch/x86/kvm/svm/svm.c
> @@ -4194,16 +4194,15 @@ static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu, bool spec_ctrl_in
>   	guest_state_enter_irqoff();
>   
>   	amd_clear_divider();
> +	restore_mask = svm_save_swap_type_c(vcpu);
>   
> -	if (sev_es_guest(vcpu->kvm)) {
> +	if (sev_es_guest(vcpu->kvm))
>   		__svm_sev_es_vcpu_run(svm, spec_ctrl_intercepted);
> -	} else {
> -		restore_mask = svm_save_swap_type_c(vcpu);
> +	else
>   		__svm_vcpu_run(svm, spec_ctrl_intercepted);
>   
> -		if (restore_mask)
> -			svm_restore_swap_type_c(vcpu, restore_mask);
> -	}
> +	if (restore_mask)
> +		svm_restore_swap_type_c(vcpu, restore_mask);
>   
>   	guest_state_exit_irqoff();
>   }
diff mbox series

Patch

diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h
index 4096d2f68770..58b60842a3b7 100644
--- a/arch/x86/include/asm/svm.h
+++ b/arch/x86/include/asm/svm.h
@@ -469,6 +469,18 @@  struct sev_es_save_area {
 	u8 fpreg_x87[80];
 	u8 fpreg_xmm[256];
 	u8 fpreg_ymm[256];
+	u8 lbr_stack_from_to[256];
+	u64 lbr_select;
+	u64 ibs_fetch_ctl;
+	u64 ibs_fetch_linear_addr;
+	u64 ibs_op_ctl;
+	u64 ibs_op_rip;
+	u64 ibs_op_data;
+	u64 ibs_op_data2;
+	u64 ibs_op_data3;
+	u64 ibs_dc_linear_addr;
+	u64 ibs_br_target;
+	u64 ibs_fetch_extd_ctl;
 } __packed;
 
 struct ghcb_save_area {
@@ -527,7 +539,7 @@  struct ghcb {
 
 #define EXPECTED_VMCB_SAVE_AREA_SIZE		1992
 #define EXPECTED_GHCB_SAVE_AREA_SIZE		1032
-#define EXPECTED_SEV_ES_SAVE_AREA_SIZE		1648
+#define EXPECTED_SEV_ES_SAVE_AREA_SIZE		1992
 #define EXPECTED_VMCB_CONTROL_AREA_SIZE		1024
 #define EXPECTED_GHCB_SIZE			PAGE_SIZE
 
diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
index d3aec1f2cad2..41706335cedd 100644
--- a/arch/x86/kvm/svm/sev.c
+++ b/arch/x86/kvm/svm/sev.c
@@ -59,6 +59,7 @@  module_param_named(sev_es, sev_es_enabled, bool, 0444);
 #define sev_es_enabled false
 #endif /* CONFIG_KVM_AMD_SEV */
 
+static bool sev_es_vibs_enabled;
 static u8 sev_enc_bit;
 static DECLARE_RWSEM(sev_deactivate_lock);
 static DEFINE_MUTEX(sev_bitmap_lock);
@@ -2256,6 +2257,9 @@  void __init sev_hardware_setup(void)
 
 	sev_enabled = sev_supported;
 	sev_es_enabled = sev_es_supported;
+
+	if (!sev_es_enabled || !cpu_feature_enabled(X86_FEATURE_SEV_ES_VIBS))
+		sev_es_vibs_enabled = false;
 #endif
 }
 
@@ -2993,6 +2997,9 @@  static void sev_es_init_vmcb(struct vcpu_svm *svm)
 		if (guest_cpuid_has(&svm->vcpu, X86_FEATURE_RDTSCP))
 			svm_clr_intercept(svm, INTERCEPT_RDTSCP);
 	}
+
+	if (sev_es_vibs_enabled && svm->ibs_enabled)
+		svm_ibs_msr_interception(svm, false);
 }
 
 void sev_init_vmcb(struct vcpu_svm *svm)
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index 6f566ed93f4c..0cfe23bb144a 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -4194,16 +4194,15 @@  static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu, bool spec_ctrl_in
 	guest_state_enter_irqoff();
 
 	amd_clear_divider();
+	restore_mask = svm_save_swap_type_c(vcpu);
 
-	if (sev_es_guest(vcpu->kvm)) {
+	if (sev_es_guest(vcpu->kvm))
 		__svm_sev_es_vcpu_run(svm, spec_ctrl_intercepted);
-	} else {
-		restore_mask = svm_save_swap_type_c(vcpu);
+	else
 		__svm_vcpu_run(svm, spec_ctrl_intercepted);
 
-		if (restore_mask)
-			svm_restore_swap_type_c(vcpu, restore_mask);
-	}
+	if (restore_mask)
+		svm_restore_swap_type_c(vcpu, restore_mask);
 
 	guest_state_exit_irqoff();
 }