@@ -2359,7 +2359,7 @@ static inline void intel_pmu_ack_status(u64 ack)
static inline bool event_is_checkpointed(struct perf_event *event)
{
- return unlikely(event->hw.config & HSW_IN_TX_CHECKPOINTED) != 0;
+ return unlikely(event->hw.config & INTEL_HSW_IN_TX_CHECKPOINTED) != 0;
}
static inline void intel_set_masks(struct perf_event *event, int idx)
@@ -2717,8 +2717,8 @@ static void intel_pmu_enable_fixed(struct perf_event *event)
mask = 0xfULL << (idx * 4);
if (x86_pmu.intel_cap.pebs_baseline && event->attr.precise_ip) {
- bits |= ICL_FIXED_0_ADAPTIVE << (idx * 4);
- mask |= ICL_FIXED_0_ADAPTIVE << (idx * 4);
+ bits |= INTEL_ICL_FIXED_0_ADAPTIVE << (idx * 4);
+ mask |= INTEL_ICL_FIXED_0_ADAPTIVE << (idx * 4);
}
rdmsrl(hwc->config_base, ctrl_val);
@@ -4000,14 +4000,14 @@ static int hsw_hw_config(struct perf_event *event)
return ret;
if (!boot_cpu_has(X86_FEATURE_RTM) && !boot_cpu_has(X86_FEATURE_HLE))
return 0;
- event->hw.config |= event->attr.config & (HSW_IN_TX|HSW_IN_TX_CHECKPOINTED);
+ event->hw.config |= event->attr.config & (INTEL_HSW_IN_TX|INTEL_HSW_IN_TX_CHECKPOINTED);
/*
* IN_TX/IN_TX-CP filters are not supported by the Haswell PMU with
* PEBS or in ANY thread mode. Since the results are non-sensical forbid
* this combination.
*/
- if ((event->hw.config & (HSW_IN_TX|HSW_IN_TX_CHECKPOINTED)) &&
+ if ((event->hw.config & (INTEL_HSW_IN_TX|INTEL_HSW_IN_TX_CHECKPOINTED)) &&
((event->hw.config & ARCH_PERFMON_EVENTSEL_ANY) ||
event->attr.precise_ip > 0))
return -EOPNOTSUPP;
@@ -4050,7 +4050,7 @@ hsw_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
c = intel_get_event_constraints(cpuc, idx, event);
/* Handle special quirk on in_tx_checkpointed only in counter 2 */
- if (event->hw.config & HSW_IN_TX_CHECKPOINTED) {
+ if (event->hw.config & INTEL_HSW_IN_TX_CHECKPOINTED) {
if (c->idxmsk64 & (1U << 2))
return &counter2_constraint;
return &emptyconstraint;
@@ -1225,7 +1225,7 @@ void intel_pmu_pebs_enable(struct perf_event *event)
cpuc->pebs_enabled |= 1ULL << 63;
if (x86_pmu.intel_cap.pebs_baseline) {
- hwc->config |= ICL_EVENTSEL_ADAPTIVE;
+ hwc->config |= INTEL_ICL_EVENTSEL_ADAPTIVE;
if (cpuc->pebs_data_cfg != cpuc->active_pebs_data_cfg) {
wrmsrl(MSR_PEBS_DATA_CFG, cpuc->pebs_data_cfg);
cpuc->active_pebs_data_cfg = cpuc->pebs_data_cfg;
@@ -410,7 +410,7 @@ struct cpu_hw_events {
* The other filters are supported by fixed counters.
* The any-thread option is supported starting with v3.
*/
-#define FIXED_EVENT_FLAGS (X86_RAW_EVENT_MASK|HSW_IN_TX|HSW_IN_TX_CHECKPOINTED)
+#define FIXED_EVENT_FLAGS (X86_RAW_EVENT_MASK|INTEL_HSW_IN_TX|INTEL_HSW_IN_TX_CHECKPOINTED)
#define FIXED_EVENT_CONSTRAINT(c, n) \
EVENT_CONSTRAINT(c, (1ULL << (32+n)), FIXED_EVENT_FLAGS)
@@ -30,10 +30,10 @@
#define ARCH_PERFMON_EVENTSEL_INV (1ULL << 23)
#define ARCH_PERFMON_EVENTSEL_CMASK 0xFF000000ULL
-#define HSW_IN_TX (1ULL << 32)
-#define HSW_IN_TX_CHECKPOINTED (1ULL << 33)
-#define ICL_EVENTSEL_ADAPTIVE (1ULL << 34)
-#define ICL_FIXED_0_ADAPTIVE (1ULL << 32)
+#define INTEL_HSW_IN_TX (1ULL << 32)
+#define INTEL_HSW_IN_TX_CHECKPOINTED (1ULL << 33)
+#define INTEL_ICL_EVENTSEL_ADAPTIVE (1ULL << 34)
+#define INTEL_ICL_FIXED_0_ADAPTIVE (1ULL << 32)
#define AMD64_EVENTSEL_INT_CORE_ENABLE (1ULL << 36)
#define AMD64_EVENTSEL_GUESTONLY (1ULL << 40)
@@ -79,8 +79,8 @@
ARCH_PERFMON_EVENTSEL_CMASK | \
ARCH_PERFMON_EVENTSEL_ANY | \
ARCH_PERFMON_EVENTSEL_PIN_CONTROL | \
- HSW_IN_TX | \
- HSW_IN_TX_CHECKPOINTED)
+ INTEL_HSW_IN_TX | \
+ INTEL_HSW_IN_TX_CHECKPOINTED)
#define AMD64_RAW_EVENT_MASK \
(X86_RAW_EVENT_MASK | \
AMD64_EVENTSEL_EVENT)
@@ -117,15 +117,15 @@ static void pmc_reprogram_counter(struct kvm_pmc *pmc, u32 type,
attr.sample_period = get_sample_period(pmc, pmc->counter);
if (in_tx)
- attr.config |= HSW_IN_TX;
+ attr.config |= INTEL_HSW_IN_TX;
if (in_tx_cp) {
/*
- * HSW_IN_TX_CHECKPOINTED is not supported with nonzero
+ * INTEL_HSW_IN_TX_CHECKPOINTED is not supported with nonzero
* period. Just clear the sample period so at least
* allocating the counter doesn't fail.
*/
attr.sample_period = 0;
- attr.config |= HSW_IN_TX_CHECKPOINTED;
+ attr.config |= INTEL_HSW_IN_TX_CHECKPOINTED;
}
event = perf_event_create_kernel_counter(&attr, -1, current,
@@ -213,8 +213,8 @@ void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)
if (!(eventsel & (ARCH_PERFMON_EVENTSEL_EDGE |
ARCH_PERFMON_EVENTSEL_INV |
ARCH_PERFMON_EVENTSEL_CMASK |
- HSW_IN_TX |
- HSW_IN_TX_CHECKPOINTED))) {
+ INTEL_HSW_IN_TX |
+ INTEL_HSW_IN_TX_CHECKPOINTED))) {
config = kvm_x86_ops.pmu_ops->pmc_perf_hw_id(pmc);
if (config != PERF_COUNT_HW_MAX)
type = PERF_TYPE_HARDWARE;
@@ -233,8 +233,8 @@ void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)
!(eventsel & ARCH_PERFMON_EVENTSEL_USR),
!(eventsel & ARCH_PERFMON_EVENTSEL_OS),
eventsel & ARCH_PERFMON_EVENTSEL_INT,
- (eventsel & HSW_IN_TX),
- (eventsel & HSW_IN_TX_CHECKPOINTED));
+ (eventsel & INTEL_HSW_IN_TX),
+ (eventsel & INTEL_HSW_IN_TX_CHECKPOINTED));
}
EXPORT_SYMBOL_GPL(reprogram_gp_counter);
@@ -534,7 +534,7 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
if (entry &&
(boot_cpu_has(X86_FEATURE_HLE) || boot_cpu_has(X86_FEATURE_RTM)) &&
(entry->ebx & (X86_FEATURE_HLE|X86_FEATURE_RTM)))
- pmu->reserved_bits ^= HSW_IN_TX|HSW_IN_TX_CHECKPOINTED;
+ pmu->reserved_bits ^= INTEL_HSW_IN_TX|INTEL_HSW_IN_TX_CHECKPOINTED;
bitmap_set(pmu->all_valid_pmc_idx,
0, pmu->nr_arch_gp_counters);
Replace: s/HSW_IN_TX/INTEL_HSW_IN_TX/ s/HSW_IN_TX_CHECKPOINTED/INTEL_HSW_IN_TX_CHECKPOINTED/ s/ICL_EVENTSEL_ADAPTIVE/INTEL_ICL_EVENTSEL_ADAPTIVE/ s/ICL_FIXED_0_ADAPTIVE/INTEL_ICL_FIXED_0_ADAPTIVE/ No functionality changes. Signed-off-by: Ravi Bangoria <ravi.bangoria@amd.com> --- arch/x86/events/intel/core.c | 12 ++++++------ arch/x86/events/intel/ds.c | 2 +- arch/x86/events/perf_event.h | 2 +- arch/x86/include/asm/perf_event.h | 12 ++++++------ arch/x86/kvm/pmu.c | 14 +++++++------- arch/x86/kvm/vmx/pmu_intel.c | 2 +- 6 files changed, 22 insertions(+), 22 deletions(-)