@@ -431,6 +431,11 @@ static int intel_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
case MSR_ARCH_LBR_CTL:
msr_info->data = vmcs_read64(GUEST_IA32_LBR_CTL);
return 0;
+ case MSR_ARCH_LBR_FROM_0 ... MSR_ARCH_LBR_FROM_0 + 31:
+ case MSR_ARCH_LBR_TO_0 ... MSR_ARCH_LBR_TO_0 + 31:
+ case MSR_ARCH_LBR_INFO_0 ... MSR_ARCH_LBR_INFO_0 + 31:
+ kvm_get_xsave_msr(msr_info);
+ return 0;
default:
if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)) ||
(pmc = get_gp_pmc(pmu, msr, MSR_IA32_PMC0))) {
@@ -511,6 +516,11 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
(data & ARCH_LBR_CTL_LBREN))
intel_pmu_create_guest_lbr_event(vcpu);
return 0;
+ case MSR_ARCH_LBR_FROM_0 ... MSR_ARCH_LBR_FROM_0 + 31:
+ case MSR_ARCH_LBR_TO_0 ... MSR_ARCH_LBR_TO_0 + 31:
+ case MSR_ARCH_LBR_INFO_0 ... MSR_ARCH_LBR_INFO_0 + 31:
+ kvm_set_xsave_msr(msr_info);
+ return 0;
default:
if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)) ||
(pmc = get_gp_pmc(pmu, msr, MSR_IA32_PMC0))) {
@@ -4105,7 +4105,14 @@ EXPORT_SYMBOL_GPL(kvm_set_xsave_msr);
*/
static bool is_xsaves_msr(u32 index)
{
- return false;
+ bool xsaves_msr = (index >= MSR_ARCH_LBR_FROM_0 &&
+ index <= MSR_ARCH_LBR_FROM_0 + 31) ||
+ (index >= MSR_ARCH_LBR_TO_0 &&
+ index <= MSR_ARCH_LBR_TO_0 + 31) ||
+ (index >= MSR_ARCH_LBR_INFO_0 &&
+ index <= MSR_ARCH_LBR_INFO_0 + 31);
+
+ return xsaves_msr;
}
/*
When userspace wants to access guest Arch LBR data MSRs, these MSRs actually reside in guest FPU area, so need to load them to HW before RDMSR and save them back into FPU area after WRMSR. Signed-off-by: Yang Weijiang <weijiang.yang@intel.com> --- arch/x86/kvm/vmx/pmu_intel.c | 10 ++++++++++ arch/x86/kvm/x86.c | 9 ++++++++- 2 files changed, 18 insertions(+), 1 deletion(-)