diff mbox series

[v10,15/16] KVM: x86: Add Arch LBR data MSR access interface

Message ID 20220422075509.353942-16-weijiang.yang@intel.com (mailing list archive)
State New, archived
Headers show
Series Introduce Architectural LBR for vPMU | expand

Commit Message

Yang, Weijiang April 22, 2022, 7:55 a.m. UTC
Arch LBR MSRs are xsave-supported, but they're operated as "independent"
xsave feature by PMU code, i.e., during thread/process context switch,
the MSRs are saved/restored with PMU specific code instead of generic
kernel fpu XSAVES/XRSTORS operation. When vcpu guest/host fpu state swap
happens, Arch LBR MSRs won't be touched so access them directly.

Signed-off-by: Yang Weijiang <weijiang.yang@intel.com>
---
 arch/x86/kvm/vmx/pmu_intel.c | 10 ++++++++++
 1 file changed, 10 insertions(+)

Comments

Liang, Kan April 28, 2022, 3:05 p.m. UTC | #1
On 4/22/2022 3:55 AM, Yang Weijiang wrote:
> Arch LBR MSRs are xsave-supported, but they're operated as "independent"
> xsave feature by PMU code, i.e., during thread/process context switch,
> the MSRs are saved/restored with PMU specific code instead of generic
> kernel fpu XSAVES/XRSTORS operation. 

During thread/process context switch, Linux perf still uses the 
XSAVES/XRSTORS operation to save/restore the LBR MSRs.

Linux perf only manipulates these MSRs only when the xsave feature is 
not supported.

Thanks,
Kan

> When vcpu guest/host fpu state swap
> happens, Arch LBR MSRs won't be touched so access them directly.
> 
> Signed-off-by: Yang Weijiang <weijiang.yang@intel.com>
> ---
>   arch/x86/kvm/vmx/pmu_intel.c | 10 ++++++++++
>   1 file changed, 10 insertions(+)
> 
> diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c
> index 79eecbffa07b..5f81644c4612 100644
> --- a/arch/x86/kvm/vmx/pmu_intel.c
> +++ b/arch/x86/kvm/vmx/pmu_intel.c
> @@ -431,6 +431,11 @@ static int intel_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
>   	case MSR_ARCH_LBR_CTL:
>   		msr_info->data = vmcs_read64(GUEST_IA32_LBR_CTL);
>   		return 0;
> +	case MSR_ARCH_LBR_FROM_0 ... MSR_ARCH_LBR_FROM_0 + 31:
> +	case MSR_ARCH_LBR_TO_0 ... MSR_ARCH_LBR_TO_0 + 31:
> +	case MSR_ARCH_LBR_INFO_0 ... MSR_ARCH_LBR_INFO_0 + 31:
> +		rdmsrl(msr_info->index, msr_info->data);
> +		return 0;
>   	default:
>   		if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)) ||
>   		    (pmc = get_gp_pmc(pmu, msr, MSR_IA32_PMC0))) {
> @@ -512,6 +517,11 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
>   		    (data & ARCH_LBR_CTL_LBREN))
>   			intel_pmu_create_guest_lbr_event(vcpu);
>   		return 0;
> +	case MSR_ARCH_LBR_FROM_0 ... MSR_ARCH_LBR_FROM_0 + 31:
> +	case MSR_ARCH_LBR_TO_0 ... MSR_ARCH_LBR_TO_0 + 31:
> +	case MSR_ARCH_LBR_INFO_0 ... MSR_ARCH_LBR_INFO_0 + 31:
> +		wrmsrl(msr_info->index, msr_info->data);
> +		return 0;
>   	default:
>   		if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)) ||
>   		    (pmc = get_gp_pmc(pmu, msr, MSR_IA32_PMC0))) {
Yang, Weijiang April 29, 2022, 6:34 a.m. UTC | #2
On 4/28/2022 11:05 PM, Liang, Kan wrote:
>
> On 4/22/2022 3:55 AM, Yang Weijiang wrote:
>> Arch LBR MSRs are xsave-supported, but they're operated as "independent"
>> xsave feature by PMU code, i.e., during thread/process context switch,
>> the MSRs are saved/restored with PMU specific code instead of generic
>> kernel fpu XSAVES/XRSTORS operation.
> During thread/process context switch, Linux perf still uses the
> XSAVES/XRSTORS operation to save/restore the LBR MSRs.

I meant Arch LBR MSRs are switched with perf_event_task_sched_out()/

perf_event_task_sched_in() instead of save_fpregs_to_fpstate()/ 
restore_fpregs_from_fpstate().

sorry for the confusion, will modify it a bit.

>
> Linux perf only manipulates these MSRs only when the xsave feature is
> not supported.
Exactly.
>
> Thanks,
> Kan
>
>> When vcpu guest/host fpu state swap
>> happens, Arch LBR MSRs won't be touched so access them directly.
>>
>> Signed-off-by: Yang Weijiang <weijiang.yang@intel.com>
>> ---
>>    arch/x86/kvm/vmx/pmu_intel.c | 10 ++++++++++
>>    1 file changed, 10 insertions(+)
>>
>> diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c
>> index 79eecbffa07b..5f81644c4612 100644
>> --- a/arch/x86/kvm/vmx/pmu_intel.c
>> +++ b/arch/x86/kvm/vmx/pmu_intel.c
>> @@ -431,6 +431,11 @@ static int intel_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
>>    	case MSR_ARCH_LBR_CTL:
>>    		msr_info->data = vmcs_read64(GUEST_IA32_LBR_CTL);
>>    		return 0;
>> +	case MSR_ARCH_LBR_FROM_0 ... MSR_ARCH_LBR_FROM_0 + 31:
>> +	case MSR_ARCH_LBR_TO_0 ... MSR_ARCH_LBR_TO_0 + 31:
>> +	case MSR_ARCH_LBR_INFO_0 ... MSR_ARCH_LBR_INFO_0 + 31:
>> +		rdmsrl(msr_info->index, msr_info->data);
>> +		return 0;
>>    	default:
>>    		if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)) ||
>>    		    (pmc = get_gp_pmc(pmu, msr, MSR_IA32_PMC0))) {
>> @@ -512,6 +517,11 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
>>    		    (data & ARCH_LBR_CTL_LBREN))
>>    			intel_pmu_create_guest_lbr_event(vcpu);
>>    		return 0;
>> +	case MSR_ARCH_LBR_FROM_0 ... MSR_ARCH_LBR_FROM_0 + 31:
>> +	case MSR_ARCH_LBR_TO_0 ... MSR_ARCH_LBR_TO_0 + 31:
>> +	case MSR_ARCH_LBR_INFO_0 ... MSR_ARCH_LBR_INFO_0 + 31:
>> +		wrmsrl(msr_info->index, msr_info->data);
>> +		return 0;
>>    	default:
>>    		if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)) ||
>>    		    (pmc = get_gp_pmc(pmu, msr, MSR_IA32_PMC0))) {
diff mbox series

Patch

diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c
index 79eecbffa07b..5f81644c4612 100644
--- a/arch/x86/kvm/vmx/pmu_intel.c
+++ b/arch/x86/kvm/vmx/pmu_intel.c
@@ -431,6 +431,11 @@  static int intel_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 	case MSR_ARCH_LBR_CTL:
 		msr_info->data = vmcs_read64(GUEST_IA32_LBR_CTL);
 		return 0;
+	case MSR_ARCH_LBR_FROM_0 ... MSR_ARCH_LBR_FROM_0 + 31:
+	case MSR_ARCH_LBR_TO_0 ... MSR_ARCH_LBR_TO_0 + 31:
+	case MSR_ARCH_LBR_INFO_0 ... MSR_ARCH_LBR_INFO_0 + 31:
+		rdmsrl(msr_info->index, msr_info->data);
+		return 0;
 	default:
 		if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)) ||
 		    (pmc = get_gp_pmc(pmu, msr, MSR_IA32_PMC0))) {
@@ -512,6 +517,11 @@  static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 		    (data & ARCH_LBR_CTL_LBREN))
 			intel_pmu_create_guest_lbr_event(vcpu);
 		return 0;
+	case MSR_ARCH_LBR_FROM_0 ... MSR_ARCH_LBR_FROM_0 + 31:
+	case MSR_ARCH_LBR_TO_0 ... MSR_ARCH_LBR_TO_0 + 31:
+	case MSR_ARCH_LBR_INFO_0 ... MSR_ARCH_LBR_INFO_0 + 31:
+		wrmsrl(msr_info->index, msr_info->data);
+		return 0;
 	default:
 		if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)) ||
 		    (pmc = get_gp_pmc(pmu, msr, MSR_IA32_PMC0))) {