Message ID | 20250310064522.14100-2-nikunj@amd.com (mailing list archive) |
---|---|
State | New |
Headers | show |
Series | Enable Secure TSC for SEV-SNP | expand |
On 3/10/25 01:45, Nikunj A Dadhania wrote: > Introduce the read-only MSR GUEST_TSC_FREQ (0xc0010134) that returns > guest's effective frequency in MHZ when Secure TSC is enabled for SNP > guests. Disable interception of this MSR when Secure TSC is enabled. Note > that GUEST_TSC_FREQ MSR is accessible only to the guest and not from the > hypervisor context. > > Signed-off-by: Nikunj A Dadhania <nikunj@amd.com> Reviewed-by: Tom Lendacky <thomas.lendacky@amd.com> > --- > arch/x86/include/asm/svm.h | 1 + > arch/x86/kvm/svm/sev.c | 3 +++ > arch/x86/kvm/svm/svm.c | 1 + > arch/x86/kvm/svm/svm.h | 11 ++++++++++- > 4 files changed, 15 insertions(+), 1 deletion(-) > > diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h > index 9b7fa99ae951..6ab66b80e751 100644 > --- a/arch/x86/include/asm/svm.h > +++ b/arch/x86/include/asm/svm.h > @@ -290,6 +290,7 @@ static_assert((X2AVIC_MAX_PHYSICAL_ID & AVIC_PHYSICAL_MAX_INDEX_MASK) == X2AVIC_ > #define SVM_SEV_FEAT_RESTRICTED_INJECTION BIT(3) > #define SVM_SEV_FEAT_ALTERNATE_INJECTION BIT(4) > #define SVM_SEV_FEAT_DEBUG_SWAP BIT(5) > +#define SVM_SEV_FEAT_SECURE_TSC BIT(9) > > struct vmcb_seg { > u16 selector; > diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c > index 0bc708ee2788..50263b473f95 100644 > --- a/arch/x86/kvm/svm/sev.c > +++ b/arch/x86/kvm/svm/sev.c > @@ -4504,6 +4504,9 @@ static void sev_es_init_vmcb(struct vcpu_svm *svm) > /* Clear intercepts on selected MSRs */ > set_msr_interception(vcpu, svm->msrpm, MSR_EFER, 1, 1); > set_msr_interception(vcpu, svm->msrpm, MSR_IA32_CR_PAT, 1, 1); > + > + if (snp_secure_tsc_enabled(vcpu->kvm)) > + set_msr_interception(vcpu, svm->msrpm, MSR_AMD64_GUEST_TSC_FREQ, 1, 1); > } > > void sev_init_vmcb(struct vcpu_svm *svm) > diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c > index 8abeab91d329..e65721db1f81 100644 > --- a/arch/x86/kvm/svm/svm.c > +++ b/arch/x86/kvm/svm/svm.c > @@ -143,6 +143,7 @@ static const struct svm_direct_access_msrs { > { .index = X2APIC_MSR(APIC_TMICT), .always = false }, > { .index = X2APIC_MSR(APIC_TMCCT), .always = false }, > { .index = X2APIC_MSR(APIC_TDCR), .always = false }, > + { .index = MSR_AMD64_GUEST_TSC_FREQ, .always = false }, > { .index = MSR_INVALID, .always = false }, > }; > > diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h > index d4490eaed55d..711e21b7a3d0 100644 > --- a/arch/x86/kvm/svm/svm.h > +++ b/arch/x86/kvm/svm/svm.h > @@ -44,7 +44,7 @@ static inline struct page *__sme_pa_to_page(unsigned long pa) > #define IOPM_SIZE PAGE_SIZE * 3 > #define MSRPM_SIZE PAGE_SIZE * 2 > > -#define MAX_DIRECT_ACCESS_MSRS 48 > +#define MAX_DIRECT_ACCESS_MSRS 49 > #define MSRPM_OFFSETS 32 > extern u32 msrpm_offsets[MSRPM_OFFSETS] __read_mostly; > extern bool npt_enabled; > @@ -377,10 +377,19 @@ static __always_inline bool sev_snp_guest(struct kvm *kvm) > return (sev->vmsa_features & SVM_SEV_FEAT_SNP_ACTIVE) && > !WARN_ON_ONCE(!sev_es_guest(kvm)); > } > + > +static inline bool snp_secure_tsc_enabled(struct kvm *kvm) > +{ > + struct kvm_sev_info *sev = to_kvm_sev_info(kvm); > + > + return (sev->vmsa_features & SVM_SEV_FEAT_SECURE_TSC) && > + !WARN_ON_ONCE(!sev_snp_guest(kvm)); > +} > #else > #define sev_guest(kvm) false > #define sev_es_guest(kvm) false > #define sev_snp_guest(kvm) false > +#define snp_secure_tsc_enabled(kvm) false > #endif > > static inline bool ghcb_gpa_is_registered(struct vcpu_svm *svm, u64 val)
diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h index 9b7fa99ae951..6ab66b80e751 100644 --- a/arch/x86/include/asm/svm.h +++ b/arch/x86/include/asm/svm.h @@ -290,6 +290,7 @@ static_assert((X2AVIC_MAX_PHYSICAL_ID & AVIC_PHYSICAL_MAX_INDEX_MASK) == X2AVIC_ #define SVM_SEV_FEAT_RESTRICTED_INJECTION BIT(3) #define SVM_SEV_FEAT_ALTERNATE_INJECTION BIT(4) #define SVM_SEV_FEAT_DEBUG_SWAP BIT(5) +#define SVM_SEV_FEAT_SECURE_TSC BIT(9) struct vmcb_seg { u16 selector; diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index 0bc708ee2788..50263b473f95 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -4504,6 +4504,9 @@ static void sev_es_init_vmcb(struct vcpu_svm *svm) /* Clear intercepts on selected MSRs */ set_msr_interception(vcpu, svm->msrpm, MSR_EFER, 1, 1); set_msr_interception(vcpu, svm->msrpm, MSR_IA32_CR_PAT, 1, 1); + + if (snp_secure_tsc_enabled(vcpu->kvm)) + set_msr_interception(vcpu, svm->msrpm, MSR_AMD64_GUEST_TSC_FREQ, 1, 1); } void sev_init_vmcb(struct vcpu_svm *svm) diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 8abeab91d329..e65721db1f81 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -143,6 +143,7 @@ static const struct svm_direct_access_msrs { { .index = X2APIC_MSR(APIC_TMICT), .always = false }, { .index = X2APIC_MSR(APIC_TMCCT), .always = false }, { .index = X2APIC_MSR(APIC_TDCR), .always = false }, + { .index = MSR_AMD64_GUEST_TSC_FREQ, .always = false }, { .index = MSR_INVALID, .always = false }, }; diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h index d4490eaed55d..711e21b7a3d0 100644 --- a/arch/x86/kvm/svm/svm.h +++ b/arch/x86/kvm/svm/svm.h @@ -44,7 +44,7 @@ static inline struct page *__sme_pa_to_page(unsigned long pa) #define IOPM_SIZE PAGE_SIZE * 3 #define MSRPM_SIZE PAGE_SIZE * 2 -#define MAX_DIRECT_ACCESS_MSRS 48 +#define MAX_DIRECT_ACCESS_MSRS 49 #define MSRPM_OFFSETS 32 extern u32 msrpm_offsets[MSRPM_OFFSETS] __read_mostly; extern bool npt_enabled; @@ -377,10 +377,19 @@ static __always_inline bool sev_snp_guest(struct kvm *kvm) return (sev->vmsa_features & SVM_SEV_FEAT_SNP_ACTIVE) && !WARN_ON_ONCE(!sev_es_guest(kvm)); } + +static inline bool snp_secure_tsc_enabled(struct kvm *kvm) +{ + struct kvm_sev_info *sev = to_kvm_sev_info(kvm); + + return (sev->vmsa_features & SVM_SEV_FEAT_SECURE_TSC) && + !WARN_ON_ONCE(!sev_snp_guest(kvm)); +} #else #define sev_guest(kvm) false #define sev_es_guest(kvm) false #define sev_snp_guest(kvm) false +#define snp_secure_tsc_enabled(kvm) false #endif static inline bool ghcb_gpa_is_registered(struct vcpu_svm *svm, u64 val)
Introduce the read-only MSR GUEST_TSC_FREQ (0xc0010134) that returns guest's effective frequency in MHZ when Secure TSC is enabled for SNP guests. Disable interception of this MSR when Secure TSC is enabled. Note that GUEST_TSC_FREQ MSR is accessible only to the guest and not from the hypervisor context. Signed-off-by: Nikunj A Dadhania <nikunj@amd.com> --- arch/x86/include/asm/svm.h | 1 + arch/x86/kvm/svm/sev.c | 3 +++ arch/x86/kvm/svm/svm.c | 1 + arch/x86/kvm/svm/svm.h | 11 ++++++++++- 4 files changed, 15 insertions(+), 1 deletion(-)