Message ID | 20210104131542.495413-4-like.xu@linux.intel.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | KVM: x86/pmu: Add support to enable Guest PEBS via DS | expand |
On Mon, Jan 04, 2021 at 09:15:28PM +0800, Like Xu wrote: > @@ -327,6 +328,7 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu) > pmu->counter_bitmask[KVM_PMC_FIXED] = 0; > pmu->version = 0; > pmu->reserved_bits = 0xffffffff00200000ull; > + pmu->fixed_ctr_ctrl_mask = ~0ull; All 1s > > entry = kvm_find_cpuid_entry(vcpu, 0xa, 0); > if (!entry) > @@ -358,6 +360,9 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu) > ((u64)1 << edx.split.bit_width_fixed) - 1; > } > > + for (i = 0; i < pmu->nr_arch_fixed_counters; i++) > + pmu->fixed_ctr_ctrl_mask |= (0xbull << (i * 4)); With some extra 1s on top > + pmu->fixed_ctr_ctrl_mask = ~pmu->fixed_ctr_ctrl_mask; Inverted is all 0s, always. > pmu->global_ctrl = ((1ull << pmu->nr_arch_gp_counters) - 1) | > (((1ull << pmu->nr_arch_fixed_counters) - 1) << INTEL_PMC_IDX_FIXED); > pmu->global_ctrl_mask = ~pmu->global_ctrl; > -- > 2.29.2 >
On 2021/1/14 2:06, Peter Zijlstra wrote: > On Mon, Jan 04, 2021 at 09:15:28PM +0800, Like Xu wrote: >> @@ -327,6 +328,7 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu) >> pmu->counter_bitmask[KVM_PMC_FIXED] = 0; >> pmu->version = 0; >> pmu->reserved_bits = 0xffffffff00200000ull; >> + pmu->fixed_ctr_ctrl_mask = ~0ull; > All 1s > >> >> entry = kvm_find_cpuid_entry(vcpu, 0xa, 0); >> if (!entry) >> @@ -358,6 +360,9 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu) >> ((u64)1 << edx.split.bit_width_fixed) - 1; >> } >> >> + for (i = 0; i < pmu->nr_arch_fixed_counters; i++) >> + pmu->fixed_ctr_ctrl_mask |= (0xbull << (i * 4)); > With some extra 1s on top You're right, I think it should be: pmu->fixed_ctr_ctrl_mask &= ~(0xbull << (i * 4)); w/o invertion and I will fix it in the next version. > >> + pmu->fixed_ctr_ctrl_mask = ~pmu->fixed_ctr_ctrl_mask; > Inverted is all 0s, always. > >> pmu->global_ctrl = ((1ull << pmu->nr_arch_gp_counters) - 1) | >> (((1ull << pmu->nr_arch_fixed_counters) - 1) << INTEL_PMC_IDX_FIXED); >> pmu->global_ctrl_mask = ~pmu->global_ctrl; >> -- >> 2.29.2 >>
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 39707e72b062..94c8bfee4a82 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -433,6 +433,7 @@ struct kvm_pmu { unsigned nr_arch_fixed_counters; unsigned available_event_types; u64 fixed_ctr_ctrl; + u64 fixed_ctr_ctrl_mask; u64 global_ctrl; u64 global_status; u64 global_ovf_ctrl; diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c index 7c18c85328da..50047114c298 100644 --- a/arch/x86/kvm/vmx/pmu_intel.c +++ b/arch/x86/kvm/vmx/pmu_intel.c @@ -253,7 +253,7 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) case MSR_CORE_PERF_FIXED_CTR_CTRL: if (pmu->fixed_ctr_ctrl == data) return 0; - if (!(data & 0xfffffffffffff444ull)) { + if (!(data & pmu->fixed_ctr_ctrl_mask)) { reprogram_fixed_counters(pmu, data); return 0; } @@ -320,6 +320,7 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu) struct kvm_cpuid_entry2 *entry; union cpuid10_eax eax; union cpuid10_edx edx; + int i; pmu->nr_arch_gp_counters = 0; pmu->nr_arch_fixed_counters = 0; @@ -327,6 +328,7 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu) pmu->counter_bitmask[KVM_PMC_FIXED] = 0; pmu->version = 0; pmu->reserved_bits = 0xffffffff00200000ull; + pmu->fixed_ctr_ctrl_mask = ~0ull; entry = kvm_find_cpuid_entry(vcpu, 0xa, 0); if (!entry) @@ -358,6 +360,9 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu) ((u64)1 << edx.split.bit_width_fixed) - 1; } + for (i = 0; i < pmu->nr_arch_fixed_counters; i++) + pmu->fixed_ctr_ctrl_mask |= (0xbull << (i * 4)); + pmu->fixed_ctr_ctrl_mask = ~pmu->fixed_ctr_ctrl_mask; pmu->global_ctrl = ((1ull << pmu->nr_arch_gp_counters) - 1) | (((1ull << pmu->nr_arch_fixed_counters) - 1) << INTEL_PMC_IDX_FIXED); pmu->global_ctrl_mask = ~pmu->global_ctrl;