Message ID | 20220506033305.5135-16-weijiang.yang@intel.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | Introduce Architectural LBR for vPMU | expand |
On 5/5/2022 11:33 PM, Yang Weijiang wrote: > Arch LBR MSRs are xsave-supported, but they're operated as "independent" > xsave feature by PMU code, i.e., during thread/process context switch, > the MSRs are saved/restored with perf_event_task_sched_{in|out} instead > of generic kernel fpu switch code, i.e.,save_fpregs_to_fpstate() and > restore_fpregs_from_fpstate(). When vcpu guest/host fpu state swap happens, > Arch LBR MSRs are retained so they can be accessed directly. > > Signed-off-by: Yang Weijiang <weijiang.yang@intel.com> Reviewed-by: Kan Liang <kan.liang@linux.intel.com> > --- > arch/x86/kvm/vmx/pmu_intel.c | 10 ++++++++++ > 1 file changed, 10 insertions(+) > > diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c > index 3adc8f28d142..c2eab6272b35 100644 > --- a/arch/x86/kvm/vmx/pmu_intel.c > +++ b/arch/x86/kvm/vmx/pmu_intel.c > @@ -431,6 +431,11 @@ static int intel_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) > case MSR_ARCH_LBR_CTL: > msr_info->data = vmcs_read64(GUEST_IA32_LBR_CTL); > return 0; > + case MSR_ARCH_LBR_FROM_0 ... MSR_ARCH_LBR_FROM_0 + 31: > + case MSR_ARCH_LBR_TO_0 ... MSR_ARCH_LBR_TO_0 + 31: > + case MSR_ARCH_LBR_INFO_0 ... MSR_ARCH_LBR_INFO_0 + 31: > + rdmsrl(msr_info->index, msr_info->data); > + return 0; > default: > if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)) || > (pmc = get_gp_pmc(pmu, msr, MSR_IA32_PMC0))) { > @@ -512,6 +517,11 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) > (data & ARCH_LBR_CTL_LBREN)) > intel_pmu_create_guest_lbr_event(vcpu); > return 0; > + case MSR_ARCH_LBR_FROM_0 ... MSR_ARCH_LBR_FROM_0 + 31: > + case MSR_ARCH_LBR_TO_0 ... MSR_ARCH_LBR_TO_0 + 31: > + case MSR_ARCH_LBR_INFO_0 ... MSR_ARCH_LBR_INFO_0 + 31: > + wrmsrl(msr_info->index, msr_info->data); > + return 0; > default: > if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)) || > (pmc = get_gp_pmc(pmu, msr, MSR_IA32_PMC0))) {
diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c index 3adc8f28d142..c2eab6272b35 100644 --- a/arch/x86/kvm/vmx/pmu_intel.c +++ b/arch/x86/kvm/vmx/pmu_intel.c @@ -431,6 +431,11 @@ static int intel_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) case MSR_ARCH_LBR_CTL: msr_info->data = vmcs_read64(GUEST_IA32_LBR_CTL); return 0; + case MSR_ARCH_LBR_FROM_0 ... MSR_ARCH_LBR_FROM_0 + 31: + case MSR_ARCH_LBR_TO_0 ... MSR_ARCH_LBR_TO_0 + 31: + case MSR_ARCH_LBR_INFO_0 ... MSR_ARCH_LBR_INFO_0 + 31: + rdmsrl(msr_info->index, msr_info->data); + return 0; default: if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)) || (pmc = get_gp_pmc(pmu, msr, MSR_IA32_PMC0))) { @@ -512,6 +517,11 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) (data & ARCH_LBR_CTL_LBREN)) intel_pmu_create_guest_lbr_event(vcpu); return 0; + case MSR_ARCH_LBR_FROM_0 ... MSR_ARCH_LBR_FROM_0 + 31: + case MSR_ARCH_LBR_TO_0 ... MSR_ARCH_LBR_TO_0 + 31: + case MSR_ARCH_LBR_INFO_0 ... MSR_ARCH_LBR_INFO_0 + 31: + wrmsrl(msr_info->index, msr_info->data); + return 0; default: if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)) || (pmc = get_gp_pmc(pmu, msr, MSR_IA32_PMC0))) {
Arch LBR MSRs are xsave-supported, but they're operated as "independent" xsave feature by PMU code, i.e., during thread/process context switch, the MSRs are saved/restored with perf_event_task_sched_{in|out} instead of generic kernel fpu switch code, i.e.,save_fpregs_to_fpstate() and restore_fpregs_from_fpstate(). When vcpu guest/host fpu state swap happens, Arch LBR MSRs are retained so they can be accessed directly. Signed-off-by: Yang Weijiang <weijiang.yang@intel.com> --- arch/x86/kvm/vmx/pmu_intel.c | 10 ++++++++++ 1 file changed, 10 insertions(+)