@@ -843,16 +843,6 @@ void kvm_pmu_refresh(struct kvm_vcpu *vcpu)
return;
kvm_pmu_call(refresh)(vcpu);
-
- /*
- * At RESET, both Intel and AMD CPUs set all enable bits for general
- * purpose counters in IA32_PERF_GLOBAL_CTRL (so that software that
- * was written for v1 PMUs don't unknowingly leave GP counters disabled
- * in the global controls). Emulate that behavior when refreshing the
- * PMU so that userspace doesn't need to manually set PERF_GLOBAL_CTRL.
- */
- if (kvm_pmu_has_perf_global_ctrl(pmu) && pmu->nr_arch_gp_counters)
- pmu->global_ctrl = GENMASK_ULL(pmu->nr_arch_gp_counters - 1, 0);
}
void kvm_pmu_init(struct kvm_vcpu *vcpu)
@@ -198,12 +198,20 @@ static void __amd_pmu_refresh(struct kvm_vcpu *vcpu)
pmu->nr_arch_gp_counters = min_t(unsigned int, pmu->nr_arch_gp_counters,
kvm_pmu_cap.num_counters_gp);
- if (pmu->version > 1) {
- pmu->global_ctrl_rsvd = ~((1ull << pmu->nr_arch_gp_counters) - 1);
+ if (kvm_pmu_cap.version > 1) {
+ /*
+ * At RESET, AMD CPUs set all enable bits for general purpose counters in
+ * IA32_PERF_GLOBAL_CTRL (so that software that was written for v1 PMUs
+ * don't unknowingly leave GP counters disabled in the global controls).
+ * Emulate that behavior when refreshing the PMU so that userspace doesn't
+ * need to manually set PERF_GLOBAL_CTRL.
+ */
+ pmu->global_ctrl = BIT_ULL(pmu->nr_arch_gp_counters) - 1;
+ pmu->global_ctrl_rsvd = ~pmu->global_ctrl;
pmu->global_status_rsvd = pmu->global_ctrl_rsvd;
}
- pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << 48) - 1;
+ pmu->counter_bitmask[KVM_PMC_GP] = BIT_ULL(48) - 1;
pmu->reserved_bits = 0xfffffff000280000ull;
pmu->raw_event_mask = AMD64_RAW_EVENT_MASK;
/* not applicable to AMD; but clean them to prevent any fall out */
@@ -466,7 +466,6 @@ static void __intel_pmu_refresh(struct kvm_vcpu *vcpu)
union cpuid10_eax eax;
union cpuid10_edx edx;
u64 perf_capabilities;
- u64 counter_rsvd;
memset(&lbr_desc->records, 0, sizeof(lbr_desc->records));
@@ -493,11 +492,10 @@ static void __intel_pmu_refresh(struct kvm_vcpu *vcpu)
kvm_pmu_cap.num_counters_gp);
eax.split.bit_width = min_t(int, eax.split.bit_width,
kvm_pmu_cap.bit_width_gp);
- pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << eax.split.bit_width) - 1;
+ pmu->counter_bitmask[KVM_PMC_GP] = BIT_ULL(eax.split.bit_width) - 1;
eax.split.mask_length = min_t(int, eax.split.mask_length,
kvm_pmu_cap.events_mask_len);
- pmu->available_event_types = ~entry->ebx &
- ((1ull << eax.split.mask_length) - 1);
+ pmu->available_event_types = ~entry->ebx & (BIT_ULL(eax.split.mask_length) - 1);
if (pmu->version == 1) {
pmu->nr_arch_fixed_counters = 0;
@@ -506,29 +504,34 @@ static void __intel_pmu_refresh(struct kvm_vcpu *vcpu)
kvm_pmu_cap.num_counters_fixed);
edx.split.bit_width_fixed = min_t(int, edx.split.bit_width_fixed,
kvm_pmu_cap.bit_width_fixed);
- pmu->counter_bitmask[KVM_PMC_FIXED] =
- ((u64)1 << edx.split.bit_width_fixed) - 1;
+ pmu->counter_bitmask[KVM_PMC_FIXED] = BIT_ULL(edx.split.bit_width_fixed) - 1;
}
intel_pmu_enable_fixed_counter_bits(pmu, INTEL_FIXED_0_KERNEL |
INTEL_FIXED_0_USER |
INTEL_FIXED_0_ENABLE_PMI);
- counter_rsvd = ~(((1ull << pmu->nr_arch_gp_counters) - 1) |
- (((1ull << pmu->nr_arch_fixed_counters) - 1) << KVM_FIXED_PMC_BASE_IDX));
- pmu->global_ctrl_rsvd = counter_rsvd;
+ if (kvm_pmu_has_perf_global_ctrl(pmu)) {
+ /*
+ * At RESET, Intel CPUs set all enable bits for general purpose counters
+ * in IA32_PERF_GLOBAL_CTRL. Emulate this behavior.
+ */
+ pmu->global_ctrl = BIT_ULL(pmu->nr_arch_gp_counters) - 1;
+ pmu->global_ctrl_rsvd = ~((BIT_ULL(pmu->nr_arch_gp_counters) - 1) |
+ ((BIT_ULL(pmu->nr_arch_fixed_counters) - 1) <<
+ KVM_FIXED_PMC_BASE_IDX));
- /*
- * GLOBAL_STATUS and GLOBAL_OVF_CONTROL (a.k.a. GLOBAL_STATUS_RESET)
- * share reserved bit definitions. The kernel just happens to use
- * OVF_CTRL for the names.
- */
- pmu->global_status_rsvd = pmu->global_ctrl_rsvd
- & ~(MSR_CORE_PERF_GLOBAL_OVF_CTRL_OVF_BUF |
- MSR_CORE_PERF_GLOBAL_OVF_CTRL_COND_CHGD);
- if (vmx_pt_mode_is_host_guest())
- pmu->global_status_rsvd &=
- ~MSR_CORE_PERF_GLOBAL_OVF_CTRL_TRACE_TOPA_PMI;
+ /*
+ * GLOBAL_STATUS and GLOBAL_OVF_CONTROL (a.k.a. GLOBAL_STATUS_RESET)
+ * share reserved bit definitions. The kernel just happens to use
+ * OVF_CTRL for the names.
+ */
+ pmu->global_status_rsvd = pmu->global_ctrl_rsvd &
+ ~(MSR_CORE_PERF_GLOBAL_OVF_CTRL_OVF_BUF |
+ MSR_CORE_PERF_GLOBAL_OVF_CTRL_COND_CHGD);
+ if (vmx_pt_mode_is_host_guest())
+ pmu->global_status_rsvd &= ~MSR_CORE_PERF_GLOBAL_OVF_CTRL_TRACE_TOPA_PMI;
+ }
entry = kvm_find_cpuid_entry_index(vcpu, 7, 0);
if (entry &&
@@ -538,10 +541,9 @@ static void __intel_pmu_refresh(struct kvm_vcpu *vcpu)
pmu->raw_event_mask |= (HSW_IN_TX|HSW_IN_TX_CHECKPOINTED);
}
- bitmap_set(pmu->all_valid_pmc_idx,
- 0, pmu->nr_arch_gp_counters);
- bitmap_set(pmu->all_valid_pmc_idx,
- INTEL_PMC_MAX_GENERIC, pmu->nr_arch_fixed_counters);
+ bitmap_set(pmu->all_valid_pmc_idx, 0, pmu->nr_arch_gp_counters);
+ bitmap_set(pmu->all_valid_pmc_idx, INTEL_PMC_MAX_GENERIC,
+ pmu->nr_arch_fixed_counters);
perf_capabilities = vcpu_get_perf_capabilities(vcpu);
if (cpuid_model_is_consistent(vcpu) &&
@@ -555,13 +557,12 @@ static void __intel_pmu_refresh(struct kvm_vcpu *vcpu)
if (perf_capabilities & PERF_CAP_PEBS_FORMAT) {
if (perf_capabilities & PERF_CAP_PEBS_BASELINE) {
- pmu->pebs_enable_rsvd = counter_rsvd;
+ pmu->pebs_enable_rsvd = pmu->global_ctrl_rsvd;
pmu->reserved_bits &= ~ICL_EVENTSEL_ADAPTIVE;
pmu->pebs_data_cfg_rsvd = ~0xff00000full;
intel_pmu_enable_fixed_counter_bits(pmu, ICL_FIXED_0_ADAPTIVE);
} else {
- pmu->pebs_enable_rsvd =
- ~((1ull << pmu->nr_arch_gp_counters) - 1);
+ pmu->pebs_enable_rsvd = ~(BIT_ULL(pmu->nr_arch_gp_counters) - 1);
}
}
}