Message ID | 20220506033305.5135-9-weijiang.yang@intel.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | Introduce Architectural LBR for vPMU | expand |
On 5/5/2022 11:32 PM, Yang Weijiang wrote: > Take account of Arch LBR when do sanity checks before program > vPMU for guest. Pass through Arch LBR recording MSRs to guest > to gain better performance. Note, Arch LBR and Legacy LBR support > are mutually exclusive, i.e., they're not both available on one > platform. > > Co-developed-by: Like Xu <like.xu@linux.intel.com> > Signed-off-by: Like Xu <like.xu@linux.intel.com> > Signed-off-by: Yang Weijiang <weijiang.yang@intel.com> > --- > arch/x86/kvm/vmx/pmu_intel.c | 40 ++++++++++++++++++++++++++++-------- > arch/x86/kvm/vmx/vmx.c | 3 +++ > 2 files changed, 35 insertions(+), 8 deletions(-) > > diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c > index aa36d2072b91..bd4ddf63ba8f 100644 > --- a/arch/x86/kvm/vmx/pmu_intel.c > +++ b/arch/x86/kvm/vmx/pmu_intel.c > @@ -170,12 +170,16 @@ static inline struct kvm_pmc *get_fw_gp_pmc(struct kvm_pmu *pmu, u32 msr) > > bool intel_pmu_lbr_is_compatible(struct kvm_vcpu *vcpu) > { > + if (kvm_cpu_cap_has(X86_FEATURE_ARCH_LBR)) > + return guest_cpuid_has(vcpu, X86_FEATURE_ARCH_LBR); > + > /* > * As a first step, a guest could only enable LBR feature if its > * cpu model is the same as the host because the LBR registers > * would be pass-through to the guest and they're model specific. > */ > - return boot_cpu_data.x86_model == guest_cpuid_model(vcpu); > + return !boot_cpu_has(X86_FEATURE_ARCH_LBR) && > + boot_cpu_data.x86_model == guest_cpuid_model(vcpu); > } > > bool intel_pmu_lbr_is_enabled(struct kvm_vcpu *vcpu) > @@ -199,12 +203,20 @@ static bool intel_pmu_is_valid_lbr_msr(struct kvm_vcpu *vcpu, u32 index) > return ret; > } > > - ret = (index == MSR_LBR_SELECT) || (index == MSR_LBR_TOS) || > - (index >= records->from && index < records->from + records->nr) || > - (index >= records->to && index < records->to + records->nr); > + if (!guest_cpuid_has(vcpu, X86_FEATURE_ARCH_LBR)) > + ret = (index == MSR_LBR_SELECT) || (index == MSR_LBR_TOS); > + Shouldn't we return immediately if (ret == true)? Keep checking if (!ret) looks uncommon. Actually we probably don't need the ret in this function. if (!guest_cpuid_has(vcpu, X86_FEATURE_ARCH_LBR) && ((index == MSR_LBR_SELECT) || (index == MSR_LBR_TOS))) return true; > + if (!ret) { > + ret = (index >= records->from && > + index < records->from + records->nr) || > + (index >= records->to && > + index < records->to + records->nr); > + } if ((index >= records->from && index < records->from + records->nr) || (index >= records->to && index < records->to + records->nr)) return true; > > - if (!ret && records->info) > - ret = (index >= records->info && index < records->info + records->nr); > + if (!ret && records->info) { > + ret = (index >= records->info && > + index < records->info + records->nr); > + } if (records->info && (index >= records->info && index < records->info + records->nr) return true; return false; Sorry, I didn't notice it in the previous review. Thanks, Kan > > return ret; > } > @@ -742,6 +754,9 @@ static void vmx_update_intercept_for_lbr_msrs(struct kvm_vcpu *vcpu, bool set) > vmx_set_intercept_for_msr(vcpu, lbr->info + i, MSR_TYPE_RW, set); > } > > + if (guest_cpuid_has(vcpu, X86_FEATURE_ARCH_LBR)) > + return; > + > vmx_set_intercept_for_msr(vcpu, MSR_LBR_SELECT, MSR_TYPE_RW, set); > vmx_set_intercept_for_msr(vcpu, MSR_LBR_TOS, MSR_TYPE_RW, set); > } > @@ -782,10 +797,13 @@ void vmx_passthrough_lbr_msrs(struct kvm_vcpu *vcpu) > { > struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); > struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu); > + bool lbr_enable = guest_cpuid_has(vcpu, X86_FEATURE_ARCH_LBR) ? > + (vmcs_read64(GUEST_IA32_LBR_CTL) & ARCH_LBR_CTL_LBREN) : > + (vmcs_read64(GUEST_IA32_DEBUGCTL) & DEBUGCTLMSR_LBR); > > if (!lbr_desc->event) { > vmx_disable_lbr_msrs_passthrough(vcpu); > - if (vmcs_read64(GUEST_IA32_DEBUGCTL) & DEBUGCTLMSR_LBR) > + if (lbr_enable) > goto warn; > if (test_bit(INTEL_PMC_IDX_FIXED_VLBR, pmu->pmc_in_use)) > goto warn; > @@ -802,13 +820,19 @@ void vmx_passthrough_lbr_msrs(struct kvm_vcpu *vcpu) > return; > > warn: > + if (kvm_cpu_cap_has(X86_FEATURE_ARCH_LBR)) > + wrmsrl(MSR_ARCH_LBR_DEPTH, lbr_desc->records.nr); > pr_warn_ratelimited("kvm: vcpu-%d: fail to passthrough LBR.\n", > vcpu->vcpu_id); > } > > static void intel_pmu_cleanup(struct kvm_vcpu *vcpu) > { > - if (!(vmcs_read64(GUEST_IA32_DEBUGCTL) & DEBUGCTLMSR_LBR)) > + bool lbr_enable = guest_cpuid_has(vcpu, X86_FEATURE_ARCH_LBR) ? > + (vmcs_read64(GUEST_IA32_LBR_CTL) & ARCH_LBR_CTL_LBREN) : > + (vmcs_read64(GUEST_IA32_DEBUGCTL) & DEBUGCTLMSR_LBR); > + > + if (!lbr_enable) > intel_pmu_release_guest_lbr_event(vcpu); > } > > diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c > index b6bc7d97e4b4..98e56a909c01 100644 > --- a/arch/x86/kvm/vmx/vmx.c > +++ b/arch/x86/kvm/vmx/vmx.c > @@ -573,6 +573,9 @@ static bool is_valid_passthrough_msr(u32 msr) > case MSR_LBR_NHM_TO ... MSR_LBR_NHM_TO + 31: > case MSR_LBR_CORE_FROM ... MSR_LBR_CORE_FROM + 8: > case MSR_LBR_CORE_TO ... MSR_LBR_CORE_TO + 8: > + case MSR_ARCH_LBR_FROM_0 ... MSR_ARCH_LBR_FROM_0 + 31: > + case MSR_ARCH_LBR_TO_0 ... MSR_ARCH_LBR_TO_0 + 31: > + case MSR_ARCH_LBR_INFO_0 ... MSR_ARCH_LBR_INFO_0 + 31: > /* LBR MSRs. These are handled in vmx_update_intercept_for_lbr_msrs() */ > return true; > }
On 5/6/2022 11:03 PM, Liang, Kan wrote: > On 5/5/2022 11:32 PM, Yang Weijiang wrote: > > bool intel_pmu_lbr_is_enabled(struct kvm_vcpu *vcpu) > @@ -199,12 +203,20 @@ static bool intel_pmu_is_valid_lbr_msr(struct kvm_vcpu *vcpu, u32 index) > return ret; > } > > - ret = (index == MSR_LBR_SELECT) || (index == MSR_LBR_TOS) || > - (index >= records->from && index < records->from + records->nr) || > - (index >= records->to && index < records->to + records->nr); > + if (!guest_cpuid_has(vcpu, X86_FEATURE_ARCH_LBR)) > + ret = (index == MSR_LBR_SELECT) || (index == MSR_LBR_TOS); > + > Shouldn't we return immediately if (ret == true)? > Keep checking if (!ret) looks uncommon. > > Actually we probably don't need the ret in this function. > > if (!guest_cpuid_has(vcpu, X86_FEATURE_ARCH_LBR) && > ((index == MSR_LBR_SELECT) || (index == MSR_LBR_TOS))) > return true; > >> + if (!ret) { >> + ret = (index >= records->from && >> + index < records->from + records->nr) || >> + (index >= records->to && >> + index < records->to + records->nr); >> + } > if ((index >= records->from && > index < records->from + records->nr) || > (index >= records->to && > index < records->to + records->nr)) > return true; > >> >> - if (!ret && records->info) >> - ret = (index >= records->info && index < records->info + records->nr); >> + if (!ret && records->info) { >> + ret = (index >= records->info && >> + index < records->info + records->nr); >> + } > if (records->info && > (index >= records->info && index < records->info + records->nr) > return true; > > return false; > Sorry, I didn't notice it in the previous review. Thanks Kan, so I'll modify this function as below (keeping other part unchanged): From 642d5e05e8a8578e75531632d714cec5976ab9ac Mon Sep 17 00:00:00 2001 From: Yang Weijiang <weijiang.yang@intel.com> Date: Thu, 8 Jul 2021 23:51:02 +0800 Subject: [PATCH] KVM: x86/pmu: Refactor code to support guest Arch LBR Take account of Arch LBR when do sanity checks before program vPMU for guest. Pass through Arch LBR recording MSRs to guest to gain better performance. Note, Arch LBR and Legacy LBR support are mutually exclusive, i.e., they're not both available on one platform. Co-developed-by: Like Xu <like.xu@linux.intel.com> Signed-off-by: Like Xu <like.xu@linux.intel.com> Signed-off-by: Yang Weijiang <weijiang.yang@intel.com> --- arch/x86/kvm/vmx/pmu_intel.c | 47 +++++++++++++++++++++++++----------- arch/x86/kvm/vmx/vmx.c | 3 +++ 2 files changed, 36 insertions(+), 14 deletions(-) diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c index aa36d2072b91..306ce7ac9934 100644 --- a/arch/x86/kvm/vmx/pmu_intel.c +++ b/arch/x86/kvm/vmx/pmu_intel.c @@ -170,12 +170,16 @@ static inline struct kvm_pmc *get_fw_gp_pmc(struct kvm_pmu *pmu, u32 msr) bool intel_pmu_lbr_is_compatible(struct kvm_vcpu *vcpu) { + if (kvm_cpu_cap_has(X86_FEATURE_ARCH_LBR)) + return guest_cpuid_has(vcpu, X86_FEATURE_ARCH_LBR); + /* * As a first step, a guest could only enable LBR feature if its * cpu model is the same as the host because the LBR registers * would be pass-through to the guest and they're model specific. */ - return boot_cpu_data.x86_model == guest_cpuid_model(vcpu); + return !boot_cpu_has(X86_FEATURE_ARCH_LBR) && + boot_cpu_data.x86_model == guest_cpuid_model(vcpu); } bool intel_pmu_lbr_is_enabled(struct kvm_vcpu *vcpu) @@ -188,25 +192,28 @@ bool intel_pmu_lbr_is_enabled(struct kvm_vcpu *vcpu) static bool intel_pmu_is_valid_lbr_msr(struct kvm_vcpu *vcpu, u32 index) { struct x86_pmu_lbr *records = vcpu_to_lbr_records(vcpu); - bool ret = false; if (!intel_pmu_lbr_is_enabled(vcpu)) - return ret; + return false; if (index == MSR_ARCH_LBR_DEPTH || index == MSR_ARCH_LBR_CTL) { - if (kvm_cpu_cap_has(X86_FEATURE_ARCH_LBR)) - ret = guest_cpuid_has(vcpu, X86_FEATURE_ARCH_LBR); - return ret; + return kvm_cpu_cap_has(X86_FEATURE_ARCH_LBR) && + guest_cpuid_has(vcpu, X86_FEATURE_ARCH_LBR); } - ret = (index == MSR_LBR_SELECT) || (index == MSR_LBR_TOS) || - (index >= records->from && index < records->from + records->nr) || - (index >= records->to && index < records->to + records->nr); + if (!guest_cpuid_has(vcpu, X86_FEATURE_ARCH_LBR) && + (index == MSR_LBR_SELECT || index == MSR_LBR_TOS)) + return true; - if (!ret && records->info) - ret = (index >= records->info && index < records->info + records->nr); + if ((index >= records->from && index < records->from + records->nr) || + (index >= records->to && index < records->to + records->nr)) + return true; - return ret; + if (records->info && index >= records->info && + index < records->info + records->nr) + return true; + + return false; } static bool intel_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr) @@ -742,6 +749,9 @@ static void vmx_update_intercept_for_lbr_msrs(struct kvm_vcpu *vcpu, bool set) vmx_set_intercept_for_msr(vcpu, lbr->info + i, MSR_TYPE_RW, set); } + if (guest_cpuid_has(vcpu, X86_FEATURE_ARCH_LBR)) + return; + vmx_set_intercept_for_msr(vcpu, MSR_LBR_SELECT, MSR_TYPE_RW, set); vmx_set_intercept_for_msr(vcpu, MSR_LBR_TOS, MSR_TYPE_RW, set); } @@ -782,10 +792,13 @@ void vmx_passthrough_lbr_msrs(struct kvm_vcpu *vcpu) { struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu); + bool lbr_enable = guest_cpuid_has(vcpu, X86_FEATURE_ARCH_LBR) ? + (vmcs_read64(GUEST_IA32_LBR_CTL) & ARCH_LBR_CTL_LBREN) : + (vmcs_read64(GUEST_IA32_DEBUGCTL) & DEBUGCTLMSR_LBR); if (!lbr_desc->event) { vmx_disable_lbr_msrs_passthrough(vcpu); - if (vmcs_read64(GUEST_IA32_DEBUGCTL) & DEBUGCTLMSR_LBR) + if (lbr_enable) goto warn; if (test_bit(INTEL_PMC_IDX_FIXED_VLBR, pmu->pmc_in_use)) goto warn; @@ -802,13 +815,19 @@ void vmx_passthrough_lbr_msrs(struct kvm_vcpu *vcpu) return; warn: + if (kvm_cpu_cap_has(X86_FEATURE_ARCH_LBR)) + wrmsrl(MSR_ARCH_LBR_DEPTH, lbr_desc->records.nr); pr_warn_ratelimited("kvm: vcpu-%d: fail to passthrough LBR.\n", vcpu->vcpu_id); } static void intel_pmu_cleanup(struct kvm_vcpu *vcpu) { - if (!(vmcs_read64(GUEST_IA32_DEBUGCTL) & DEBUGCTLMSR_LBR)) + bool lbr_enable = guest_cpuid_has(vcpu, X86_FEATURE_ARCH_LBR) ? + (vmcs_read64(GUEST_IA32_LBR_CTL) & ARCH_LBR_CTL_LBREN) : + (vmcs_read64(GUEST_IA32_DEBUGCTL) & DEBUGCTLMSR_LBR); + + if (!lbr_enable) intel_pmu_release_guest_lbr_event(vcpu); } diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index b6bc7d97e4b4..98e56a909c01 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -573,6 +573,9 @@ static bool is_valid_passthrough_msr(u32 msr) case MSR_LBR_NHM_TO ... MSR_LBR_NHM_TO + 31: case MSR_LBR_CORE_FROM ... MSR_LBR_CORE_FROM + 8: case MSR_LBR_CORE_TO ... MSR_LBR_CORE_TO + 8: + case MSR_ARCH_LBR_FROM_0 ... MSR_ARCH_LBR_FROM_0 + 31: + case MSR_ARCH_LBR_TO_0 ... MSR_ARCH_LBR_TO_0 + 31: + case MSR_ARCH_LBR_INFO_0 ... MSR_ARCH_LBR_INFO_0 + 31: /* LBR MSRs. These are handled in vmx_update_intercept_for_lbr_msrs() */ return true; } -- 2.27.0 > Thanks, > Kan >
On 5/6/2022 10:32 PM, Yang, Weijiang wrote: > > On 5/6/2022 11:03 PM, Liang, Kan wrote: >> On 5/5/2022 11:32 PM, Yang Weijiang wrote: >> >> bool intel_pmu_lbr_is_enabled(struct kvm_vcpu *vcpu) >> @@ -199,12 +203,20 @@ static bool intel_pmu_is_valid_lbr_msr(struct >> kvm_vcpu *vcpu, u32 index) >> return ret; >> } >> - ret = (index == MSR_LBR_SELECT) || (index == MSR_LBR_TOS) || >> - (index >= records->from && index < records->from + >> records->nr) || >> - (index >= records->to && index < records->to + records->nr); >> + if (!guest_cpuid_has(vcpu, X86_FEATURE_ARCH_LBR)) >> + ret = (index == MSR_LBR_SELECT) || (index == MSR_LBR_TOS); >> + >> Shouldn't we return immediately if (ret == true)? >> Keep checking if (!ret) looks uncommon. >> >> Actually we probably don't need the ret in this function. >> >> if (!guest_cpuid_has(vcpu, X86_FEATURE_ARCH_LBR) && >> ((index == MSR_LBR_SELECT) || (index == MSR_LBR_TOS))) >> return true; >> >>> + if (!ret) { >>> + ret = (index >= records->from && >>> + index < records->from + records->nr) || >>> + (index >= records->to && >>> + index < records->to + records->nr); >>> + } >> if ((index >= records->from && >> index < records->from + records->nr) || >> (index >= records->to && >> index < records->to + records->nr)) >> return true; >> >>> - if (!ret && records->info) >>> - ret = (index >= records->info && index < records->info + >>> records->nr); >>> + if (!ret && records->info) { >>> + ret = (index >= records->info && >>> + index < records->info + records->nr); >>> + } >> if (records->info && >> (index >= records->info && index < records->info + records->nr) >> return true; >> >> return false; >> Sorry, I didn't notice it in the previous review. > > Thanks Kan, so I'll modify this function as below (keeping other part > unchanged): > > From 642d5e05e8a8578e75531632d714cec5976ab9ac Mon Sep 17 00:00:00 2001 > From: Yang Weijiang <weijiang.yang@intel.com> > Date: Thu, 8 Jul 2021 23:51:02 +0800 > Subject: [PATCH] KVM: x86/pmu: Refactor code to support guest Arch LBR > > Take account of Arch LBR when do sanity checks before program > vPMU for guest. Pass through Arch LBR recording MSRs to guest > to gain better performance. Note, Arch LBR and Legacy LBR support > are mutually exclusive, i.e., they're not both available on one > platform. > > Co-developed-by: Like Xu <like.xu@linux.intel.com> > Signed-off-by: Like Xu <like.xu@linux.intel.com> > Signed-off-by: Yang Weijiang <weijiang.yang@intel.com> > --- This one looks good to me. Reviewed-by: Kan Liang <kan.liang@linux.intel.com> Thanks, Kan > arch/x86/kvm/vmx/pmu_intel.c | 47 +++++++++++++++++++++++++----------- > arch/x86/kvm/vmx/vmx.c | 3 +++ > 2 files changed, 36 insertions(+), 14 deletions(-) > > diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c > index aa36d2072b91..306ce7ac9934 100644 > --- a/arch/x86/kvm/vmx/pmu_intel.c > +++ b/arch/x86/kvm/vmx/pmu_intel.c > @@ -170,12 +170,16 @@ static inline struct kvm_pmc *get_fw_gp_pmc(struct > kvm_pmu *pmu, u32 msr) > > bool intel_pmu_lbr_is_compatible(struct kvm_vcpu *vcpu) > { > + if (kvm_cpu_cap_has(X86_FEATURE_ARCH_LBR)) > + return guest_cpuid_has(vcpu, X86_FEATURE_ARCH_LBR); > + > /* > * As a first step, a guest could only enable LBR feature if its > * cpu model is the same as the host because the LBR registers > * would be pass-through to the guest and they're model specific. > */ > - return boot_cpu_data.x86_model == guest_cpuid_model(vcpu); > + return !boot_cpu_has(X86_FEATURE_ARCH_LBR) && > + boot_cpu_data.x86_model == guest_cpuid_model(vcpu); > } > > bool intel_pmu_lbr_is_enabled(struct kvm_vcpu *vcpu) > @@ -188,25 +192,28 @@ bool intel_pmu_lbr_is_enabled(struct kvm_vcpu *vcpu) > static bool intel_pmu_is_valid_lbr_msr(struct kvm_vcpu *vcpu, u32 index) > { > struct x86_pmu_lbr *records = vcpu_to_lbr_records(vcpu); > - bool ret = false; > > if (!intel_pmu_lbr_is_enabled(vcpu)) > - return ret; > + return false; > > if (index == MSR_ARCH_LBR_DEPTH || index == MSR_ARCH_LBR_CTL) { > - if (kvm_cpu_cap_has(X86_FEATURE_ARCH_LBR)) > - ret = guest_cpuid_has(vcpu, X86_FEATURE_ARCH_LBR); > - return ret; > + return kvm_cpu_cap_has(X86_FEATURE_ARCH_LBR) && > + guest_cpuid_has(vcpu, X86_FEATURE_ARCH_LBR); > } > > - ret = (index == MSR_LBR_SELECT) || (index == MSR_LBR_TOS) || > - (index >= records->from && index < records->from + > records->nr) || > - (index >= records->to && index < records->to + > records->nr); > + if (!guest_cpuid_has(vcpu, X86_FEATURE_ARCH_LBR) && > + (index == MSR_LBR_SELECT || index == MSR_LBR_TOS)) > + return true; > > - if (!ret && records->info) > - ret = (index >= records->info && index < records->info + > records->nr); > + if ((index >= records->from && index < records->from + > records->nr) || > + (index >= records->to && index < records->to + records->nr)) > + return true; > > - return ret; > + if (records->info && index >= records->info && > + index < records->info + records->nr) > + return true; > + > + return false; > } > > static bool intel_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr) > @@ -742,6 +749,9 @@ static void vmx_update_intercept_for_lbr_msrs(struct > kvm_vcpu *vcpu, bool set) > vmx_set_intercept_for_msr(vcpu, lbr->info + i, > MSR_TYPE_RW, set); > } > > + if (guest_cpuid_has(vcpu, X86_FEATURE_ARCH_LBR)) > + return; > + > vmx_set_intercept_for_msr(vcpu, MSR_LBR_SELECT, MSR_TYPE_RW, set); > vmx_set_intercept_for_msr(vcpu, MSR_LBR_TOS, MSR_TYPE_RW, set); > } > @@ -782,10 +792,13 @@ void vmx_passthrough_lbr_msrs(struct kvm_vcpu *vcpu) > { > struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); > struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu); > + bool lbr_enable = guest_cpuid_has(vcpu, X86_FEATURE_ARCH_LBR) ? > + (vmcs_read64(GUEST_IA32_LBR_CTL) & ARCH_LBR_CTL_LBREN) : > + (vmcs_read64(GUEST_IA32_DEBUGCTL) & DEBUGCTLMSR_LBR); > > if (!lbr_desc->event) { > vmx_disable_lbr_msrs_passthrough(vcpu); > - if (vmcs_read64(GUEST_IA32_DEBUGCTL) & DEBUGCTLMSR_LBR) > + if (lbr_enable) > goto warn; > if (test_bit(INTEL_PMC_IDX_FIXED_VLBR, pmu->pmc_in_use)) > goto warn; > @@ -802,13 +815,19 @@ void vmx_passthrough_lbr_msrs(struct kvm_vcpu *vcpu) > return; > > warn: > + if (kvm_cpu_cap_has(X86_FEATURE_ARCH_LBR)) > + wrmsrl(MSR_ARCH_LBR_DEPTH, lbr_desc->records.nr); > pr_warn_ratelimited("kvm: vcpu-%d: fail to passthrough LBR.\n", > vcpu->vcpu_id); > } > > static void intel_pmu_cleanup(struct kvm_vcpu *vcpu) > { > - if (!(vmcs_read64(GUEST_IA32_DEBUGCTL) & DEBUGCTLMSR_LBR)) > + bool lbr_enable = guest_cpuid_has(vcpu, X86_FEATURE_ARCH_LBR) ? > + (vmcs_read64(GUEST_IA32_LBR_CTL) & ARCH_LBR_CTL_LBREN) : > + (vmcs_read64(GUEST_IA32_DEBUGCTL) & DEBUGCTLMSR_LBR); > + > + if (!lbr_enable) > intel_pmu_release_guest_lbr_event(vcpu); > } > > diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c > index b6bc7d97e4b4..98e56a909c01 100644 > --- a/arch/x86/kvm/vmx/vmx.c > +++ b/arch/x86/kvm/vmx/vmx.c > @@ -573,6 +573,9 @@ static bool is_valid_passthrough_msr(u32 msr) > case MSR_LBR_NHM_TO ... MSR_LBR_NHM_TO + 31: > case MSR_LBR_CORE_FROM ... MSR_LBR_CORE_FROM + 8: > case MSR_LBR_CORE_TO ... MSR_LBR_CORE_TO + 8: > + case MSR_ARCH_LBR_FROM_0 ... MSR_ARCH_LBR_FROM_0 + 31: > + case MSR_ARCH_LBR_TO_0 ... MSR_ARCH_LBR_TO_0 + 31: > + case MSR_ARCH_LBR_INFO_0 ... MSR_ARCH_LBR_INFO_0 + 31: > /* LBR MSRs. These are handled in > vmx_update_intercept_for_lbr_msrs() */ > return true; > } > -- > 2.27.0 > >> Thanks, >> Kan >>
diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c index aa36d2072b91..bd4ddf63ba8f 100644 --- a/arch/x86/kvm/vmx/pmu_intel.c +++ b/arch/x86/kvm/vmx/pmu_intel.c @@ -170,12 +170,16 @@ static inline struct kvm_pmc *get_fw_gp_pmc(struct kvm_pmu *pmu, u32 msr) bool intel_pmu_lbr_is_compatible(struct kvm_vcpu *vcpu) { + if (kvm_cpu_cap_has(X86_FEATURE_ARCH_LBR)) + return guest_cpuid_has(vcpu, X86_FEATURE_ARCH_LBR); + /* * As a first step, a guest could only enable LBR feature if its * cpu model is the same as the host because the LBR registers * would be pass-through to the guest and they're model specific. */ - return boot_cpu_data.x86_model == guest_cpuid_model(vcpu); + return !boot_cpu_has(X86_FEATURE_ARCH_LBR) && + boot_cpu_data.x86_model == guest_cpuid_model(vcpu); } bool intel_pmu_lbr_is_enabled(struct kvm_vcpu *vcpu) @@ -199,12 +203,20 @@ static bool intel_pmu_is_valid_lbr_msr(struct kvm_vcpu *vcpu, u32 index) return ret; } - ret = (index == MSR_LBR_SELECT) || (index == MSR_LBR_TOS) || - (index >= records->from && index < records->from + records->nr) || - (index >= records->to && index < records->to + records->nr); + if (!guest_cpuid_has(vcpu, X86_FEATURE_ARCH_LBR)) + ret = (index == MSR_LBR_SELECT) || (index == MSR_LBR_TOS); + + if (!ret) { + ret = (index >= records->from && + index < records->from + records->nr) || + (index >= records->to && + index < records->to + records->nr); + } - if (!ret && records->info) - ret = (index >= records->info && index < records->info + records->nr); + if (!ret && records->info) { + ret = (index >= records->info && + index < records->info + records->nr); + } return ret; } @@ -742,6 +754,9 @@ static void vmx_update_intercept_for_lbr_msrs(struct kvm_vcpu *vcpu, bool set) vmx_set_intercept_for_msr(vcpu, lbr->info + i, MSR_TYPE_RW, set); } + if (guest_cpuid_has(vcpu, X86_FEATURE_ARCH_LBR)) + return; + vmx_set_intercept_for_msr(vcpu, MSR_LBR_SELECT, MSR_TYPE_RW, set); vmx_set_intercept_for_msr(vcpu, MSR_LBR_TOS, MSR_TYPE_RW, set); } @@ -782,10 +797,13 @@ void vmx_passthrough_lbr_msrs(struct kvm_vcpu *vcpu) { struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu); + bool lbr_enable = guest_cpuid_has(vcpu, X86_FEATURE_ARCH_LBR) ? + (vmcs_read64(GUEST_IA32_LBR_CTL) & ARCH_LBR_CTL_LBREN) : + (vmcs_read64(GUEST_IA32_DEBUGCTL) & DEBUGCTLMSR_LBR); if (!lbr_desc->event) { vmx_disable_lbr_msrs_passthrough(vcpu); - if (vmcs_read64(GUEST_IA32_DEBUGCTL) & DEBUGCTLMSR_LBR) + if (lbr_enable) goto warn; if (test_bit(INTEL_PMC_IDX_FIXED_VLBR, pmu->pmc_in_use)) goto warn; @@ -802,13 +820,19 @@ void vmx_passthrough_lbr_msrs(struct kvm_vcpu *vcpu) return; warn: + if (kvm_cpu_cap_has(X86_FEATURE_ARCH_LBR)) + wrmsrl(MSR_ARCH_LBR_DEPTH, lbr_desc->records.nr); pr_warn_ratelimited("kvm: vcpu-%d: fail to passthrough LBR.\n", vcpu->vcpu_id); } static void intel_pmu_cleanup(struct kvm_vcpu *vcpu) { - if (!(vmcs_read64(GUEST_IA32_DEBUGCTL) & DEBUGCTLMSR_LBR)) + bool lbr_enable = guest_cpuid_has(vcpu, X86_FEATURE_ARCH_LBR) ? + (vmcs_read64(GUEST_IA32_LBR_CTL) & ARCH_LBR_CTL_LBREN) : + (vmcs_read64(GUEST_IA32_DEBUGCTL) & DEBUGCTLMSR_LBR); + + if (!lbr_enable) intel_pmu_release_guest_lbr_event(vcpu); } diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index b6bc7d97e4b4..98e56a909c01 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -573,6 +573,9 @@ static bool is_valid_passthrough_msr(u32 msr) case MSR_LBR_NHM_TO ... MSR_LBR_NHM_TO + 31: case MSR_LBR_CORE_FROM ... MSR_LBR_CORE_FROM + 8: case MSR_LBR_CORE_TO ... MSR_LBR_CORE_TO + 8: + case MSR_ARCH_LBR_FROM_0 ... MSR_ARCH_LBR_FROM_0 + 31: + case MSR_ARCH_LBR_TO_0 ... MSR_ARCH_LBR_TO_0 + 31: + case MSR_ARCH_LBR_INFO_0 ... MSR_ARCH_LBR_INFO_0 + 31: /* LBR MSRs. These are handled in vmx_update_intercept_for_lbr_msrs() */ return true; }