diff mbox series

[v2,13/15] KVM: x86/vmx: Save/Restore guest Arch LBR Ctrl msr at SMM entry/exit

Message ID 20221125040604.5051-14-weijiang.yang@intel.com (mailing list archive)
State New, archived
Headers show
Series Introduce Architectural LBR for vPMU | expand

Commit Message

Yang, Weijiang Nov. 25, 2022, 4:06 a.m. UTC
Per SDM 3B Chapter 18: "IA32_LBR_CTL.LBREn is saved and cleared on #SMI,
and restored on RSM", store guest IA32_LBR_CTL in SMRAM and clear LBREn
in VMCS at SMM entry, and do reverse things at SMM exit.

Suggested-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Yang Weijiang <weijiang.yang@intel.com>
---
 arch/x86/kvm/smm.c     |  1 +
 arch/x86/kvm/smm.h     |  3 ++-
 arch/x86/kvm/vmx/vmx.c | 22 ++++++++++++++++++++++
 3 files changed, 25 insertions(+), 1 deletion(-)

Comments

Sean Christopherson Jan. 27, 2023, 10:11 p.m. UTC | #1
On Thu, Nov 24, 2022, Yang Weijiang wrote:
> Per SDM 3B Chapter 18: "IA32_LBR_CTL.LBREn is saved and cleared on #SMI,
> and restored on RSM", store guest IA32_LBR_CTL in SMRAM and clear LBREn
> in VMCS at SMM entry, and do reverse things at SMM exit.
> 
> Suggested-by: Paolo Bonzini <pbonzini@redhat.com>
> Signed-off-by: Yang Weijiang <weijiang.yang@intel.com>
> ---
>  arch/x86/kvm/smm.c     |  1 +
>  arch/x86/kvm/smm.h     |  3 ++-
>  arch/x86/kvm/vmx/vmx.c | 22 ++++++++++++++++++++++
>  3 files changed, 25 insertions(+), 1 deletion(-)
> 
> diff --git a/arch/x86/kvm/smm.c b/arch/x86/kvm/smm.c
> index a9c1c2af8d94..5987090b440f 100644
> --- a/arch/x86/kvm/smm.c
> +++ b/arch/x86/kvm/smm.c
> @@ -86,6 +86,7 @@ static void check_smram_offsets(void)
>  	CHECK_SMRAM64_OFFSET(smm_revison,		0xFEFC);
>  	CHECK_SMRAM64_OFFSET(smbase,			0xFF00);
>  	CHECK_SMRAM64_OFFSET(reserved4,			0xFF04);
> +	CHECK_SMRAM64_OFFSET(arch_lbr_ctl,		0xFF10);
>  	CHECK_SMRAM64_OFFSET(ssp,			0xFF18);
>  	CHECK_SMRAM64_OFFSET(svm_guest_pat,		0xFF20);
>  	CHECK_SMRAM64_OFFSET(svm_host_efer,		0xFF28);
> diff --git a/arch/x86/kvm/smm.h b/arch/x86/kvm/smm.h
> index a1cf2ac5bd78..5a6479205d91 100644
> --- a/arch/x86/kvm/smm.h
> +++ b/arch/x86/kvm/smm.h
> @@ -114,7 +114,8 @@ struct kvm_smram_state_64 {
>  	u32 reserved3[3];
>  	u32 smm_revison;
>  	u32 smbase;
> -	u32 reserved4[5];
> +	u32 reserved4[3];
> +	u64 arch_lbr_ctl;
>  
>  	/* ssp and svm_* fields below are not implemented by KVM */
>  	u64 ssp;
> diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
> index 6ad765ea4059..cc782233c075 100644
> --- a/arch/x86/kvm/vmx/vmx.c
> +++ b/arch/x86/kvm/vmx/vmx.c
> @@ -8006,11 +8006,21 @@ static int vmx_enter_smm(struct kvm_vcpu *vcpu, union kvm_smram *smram)
>  	vmx->nested.smm.vmxon = vmx->nested.vmxon;
>  	vmx->nested.vmxon = false;
>  	vmx_clear_hlt(vcpu);
> +
> +	if (kvm_cpu_cap_has(X86_FEATURE_ARCH_LBR) &&
> +	    guest_cpuid_has(vcpu, X86_FEATURE_LM)) {

Uh, so this arbitrary dependency on 64-bit vCPUs needs to be factored into the
enabling.  And KVM should WARN if arch LBRs get enabled for a 32-bit vCPU.

> +		u64 ctl = vmcs_read64(GUEST_IA32_LBR_CTL);
> +
> +		smram->smram64.arch_lbr_ctl = ctl;
> +		vmcs_write64(GUEST_IA32_LBR_CTL, ctl & ~ARCH_LBR_CTL_LBREN);
> +	}
> +
>  	return 0;
>  }
>  
>  static int vmx_leave_smm(struct kvm_vcpu *vcpu, const union kvm_smram *smram)
>  {
> +	struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu);
>  	struct vcpu_vmx *vmx = to_vmx(vcpu);
>  	int ret;
>  
> @@ -8027,6 +8037,18 @@ static int vmx_leave_smm(struct kvm_vcpu *vcpu, const union kvm_smram *smram)
>  		vmx->nested.nested_run_pending = 1;
>  		vmx->nested.smm.guest_mode = false;
>  	}
> +
> +	if (kvm_cpu_cap_has(X86_FEATURE_ARCH_LBR) &&
> +	    guest_cpuid_has(vcpu, X86_FEATURE_LM)) {
> +		u64 ctl = smram->smram64.arch_lbr_ctl;
> +
> +		vmcs_write64(GUEST_IA32_LBR_CTL, ctl & ARCH_LBR_CTL_LBREN);

IIUC, this should set only LBREn and preserve all other bits, not clobber the
entire MSR.

> +
> +		if (intel_pmu_lbr_is_enabled(vcpu) &&
> +		    (ctl & ARCH_LBR_CTL_LBREN) && !lbr_desc->event)
> +			intel_pmu_create_guest_lbr_event(vcpu);
> +	}
> +
>  	return 0;
>  }
>  
> -- 
> 2.27.0
>
Yang, Weijiang Jan. 30, 2023, 12:50 p.m. UTC | #2
On 1/28/2023 6:11 AM, Sean Christopherson wrote:
> On Thu, Nov 24, 2022, Yang Weijiang wrote:
>> Per SDM 3B Chapter 18: "IA32_LBR_CTL.LBREn is saved and cleared on #SMI,
>> and restored on RSM", store guest IA32_LBR_CTL in SMRAM and clear LBREn
>> in VMCS at SMM entry, and do reverse things at SMM exit.

[...]


>> @@ -8006,11 +8006,21 @@ static int vmx_enter_smm(struct kvm_vcpu *vcpu, union kvm_smram *smram)
>>   	vmx->nested.smm.vmxon = vmx->nested.vmxon;
>>   	vmx->nested.vmxon = false;
>>   	vmx_clear_hlt(vcpu);
>> +
>> +	if (kvm_cpu_cap_has(X86_FEATURE_ARCH_LBR) &&
>> +	    guest_cpuid_has(vcpu, X86_FEATURE_LM)) {
> Uh, so this arbitrary dependency on 64-bit vCPUs needs to be factored into the
> enabling.  And KVM should WARN if arch LBRs get enabled for a 32-bit vCPU.

OK, will add the check while creating event.

>
>> +		u64 ctl = vmcs_read64(GUEST_IA32_LBR_CTL);
>> +
>> +		smram->smram64.arch_lbr_ctl = ctl;
>> +		vmcs_write64(GUEST_IA32_LBR_CTL, ctl & ~ARCH_LBR_CTL_LBREN);
>> +	}
>> +
>>   	return 0;
>>   }
>>   
>>   static int vmx_leave_smm(struct kvm_vcpu *vcpu, const union kvm_smram *smram)
>>   {
>> +	struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu);
>>   	struct vcpu_vmx *vmx = to_vmx(vcpu);
>>   	int ret;
>>   
>> @@ -8027,6 +8037,18 @@ static int vmx_leave_smm(struct kvm_vcpu *vcpu, const union kvm_smram *smram)
>>   		vmx->nested.nested_run_pending = 1;
>>   		vmx->nested.smm.guest_mode = false;
>>   	}
>> +
>> +	if (kvm_cpu_cap_has(X86_FEATURE_ARCH_LBR) &&
>> +	    guest_cpuid_has(vcpu, X86_FEATURE_LM)) {
>> +		u64 ctl = smram->smram64.arch_lbr_ctl;
>> +
>> +		vmcs_write64(GUEST_IA32_LBR_CTL, ctl & ARCH_LBR_CTL_LBREN);
> IIUC, this should set only LBREn and preserve all other bits, not clobber the
> entire MSR.

Oops, it's a typo, thanks!

>
>> +
>> +		if (intel_pmu_lbr_is_enabled(vcpu) &&
>> +		    (ctl & ARCH_LBR_CTL_LBREN) && !lbr_desc->event)
>> +			intel_pmu_create_guest_lbr_event(vcpu);
>> +	}
>> +
>>   	return 0;
>>   }
>>   
>> -- 
>> 2.27.0
>>
diff mbox series

Patch

diff --git a/arch/x86/kvm/smm.c b/arch/x86/kvm/smm.c
index a9c1c2af8d94..5987090b440f 100644
--- a/arch/x86/kvm/smm.c
+++ b/arch/x86/kvm/smm.c
@@ -86,6 +86,7 @@  static void check_smram_offsets(void)
 	CHECK_SMRAM64_OFFSET(smm_revison,		0xFEFC);
 	CHECK_SMRAM64_OFFSET(smbase,			0xFF00);
 	CHECK_SMRAM64_OFFSET(reserved4,			0xFF04);
+	CHECK_SMRAM64_OFFSET(arch_lbr_ctl,		0xFF10);
 	CHECK_SMRAM64_OFFSET(ssp,			0xFF18);
 	CHECK_SMRAM64_OFFSET(svm_guest_pat,		0xFF20);
 	CHECK_SMRAM64_OFFSET(svm_host_efer,		0xFF28);
diff --git a/arch/x86/kvm/smm.h b/arch/x86/kvm/smm.h
index a1cf2ac5bd78..5a6479205d91 100644
--- a/arch/x86/kvm/smm.h
+++ b/arch/x86/kvm/smm.h
@@ -114,7 +114,8 @@  struct kvm_smram_state_64 {
 	u32 reserved3[3];
 	u32 smm_revison;
 	u32 smbase;
-	u32 reserved4[5];
+	u32 reserved4[3];
+	u64 arch_lbr_ctl;
 
 	/* ssp and svm_* fields below are not implemented by KVM */
 	u64 ssp;
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 6ad765ea4059..cc782233c075 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -8006,11 +8006,21 @@  static int vmx_enter_smm(struct kvm_vcpu *vcpu, union kvm_smram *smram)
 	vmx->nested.smm.vmxon = vmx->nested.vmxon;
 	vmx->nested.vmxon = false;
 	vmx_clear_hlt(vcpu);
+
+	if (kvm_cpu_cap_has(X86_FEATURE_ARCH_LBR) &&
+	    guest_cpuid_has(vcpu, X86_FEATURE_LM)) {
+		u64 ctl = vmcs_read64(GUEST_IA32_LBR_CTL);
+
+		smram->smram64.arch_lbr_ctl = ctl;
+		vmcs_write64(GUEST_IA32_LBR_CTL, ctl & ~ARCH_LBR_CTL_LBREN);
+	}
+
 	return 0;
 }
 
 static int vmx_leave_smm(struct kvm_vcpu *vcpu, const union kvm_smram *smram)
 {
+	struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu);
 	struct vcpu_vmx *vmx = to_vmx(vcpu);
 	int ret;
 
@@ -8027,6 +8037,18 @@  static int vmx_leave_smm(struct kvm_vcpu *vcpu, const union kvm_smram *smram)
 		vmx->nested.nested_run_pending = 1;
 		vmx->nested.smm.guest_mode = false;
 	}
+
+	if (kvm_cpu_cap_has(X86_FEATURE_ARCH_LBR) &&
+	    guest_cpuid_has(vcpu, X86_FEATURE_LM)) {
+		u64 ctl = smram->smram64.arch_lbr_ctl;
+
+		vmcs_write64(GUEST_IA32_LBR_CTL, ctl & ARCH_LBR_CTL_LBREN);
+
+		if (intel_pmu_lbr_is_enabled(vcpu) &&
+		    (ctl & ARCH_LBR_CTL_LBREN) && !lbr_desc->event)
+			intel_pmu_create_guest_lbr_event(vcpu);
+	}
+
 	return 0;
 }