@@ -86,6 +86,13 @@
#define DEBUGCTLMSR_BTS_OFF_USR (1UL << 10)
#define DEBUGCTLMSR_FREEZE_LBRS_ON_PMI (1UL << 11)
+#define MSR_LBR_NHM_FROM 0x00000680
+#define MSR_LBR_NHM_TO 0x000006c0
+#define MSR_LBR_CORE_FROM 0x00000040
+#define MSR_LBR_CORE_TO 0x00000060
+#define MSR_LBR_TOS 0x000001c9
+#define MSR_LBR_SELECT 0x000001c8
+
#define MSR_IA32_MC0_CTL 0x00000400
#define MSR_IA32_MC0_STATUS 0x00000401
#define MSR_IA32_MC0_ADDR 0x00000402
new file mode 100644
@@ -0,0 +1 @@
+#include "pmu.h"
new file mode 100644
@@ -0,0 +1,100 @@
+#ifndef _X86_PMU_H_
+#define _X86_PMU_H_
+
+#include "processor.h"
+#include "libcflat.h"
+
+#define FIXED_CNT_INDEX 32
+#define MAX_NUM_LBR_ENTRY 32
+
+/* Performance Counter Vector for the LVT PC Register */
+#define PMI_VECTOR 32
+
+#define DEBUGCTLMSR_LBR (1UL << 0)
+
+#define PMU_CAP_LBR_FMT 0x3f
+#define PMU_CAP_FW_WRITES (1ULL << 13)
+
+#define EVNSEL_EVENT_SHIFT 0
+#define EVNTSEL_UMASK_SHIFT 8
+#define EVNTSEL_USR_SHIFT 16
+#define EVNTSEL_OS_SHIFT 17
+#define EVNTSEL_EDGE_SHIFT 18
+#define EVNTSEL_PC_SHIFT 19
+#define EVNTSEL_INT_SHIFT 20
+#define EVNTSEL_EN_SHIF 22
+#define EVNTSEL_INV_SHIF 23
+#define EVNTSEL_CMASK_SHIFT 24
+
+#define EVNTSEL_EN (1 << EVNTSEL_EN_SHIF)
+#define EVNTSEL_USR (1 << EVNTSEL_USR_SHIFT)
+#define EVNTSEL_OS (1 << EVNTSEL_OS_SHIFT)
+#define EVNTSEL_PC (1 << EVNTSEL_PC_SHIFT)
+#define EVNTSEL_INT (1 << EVNTSEL_INT_SHIFT)
+#define EVNTSEL_INV (1 << EVNTSEL_INV_SHIF)
+
+static inline u8 pmu_version(void)
+{
+ return cpuid(10).a & 0xff;
+}
+
+static inline bool this_cpu_has_pmu(void)
+{
+ return !!pmu_version();
+}
+
+static inline bool this_cpu_has_perf_global_ctrl(void)
+{
+ return pmu_version() > 1;
+}
+
+static inline u8 pmu_nr_gp_counters(void)
+{
+ return (cpuid(10).a >> 8) & 0xff;
+}
+
+static inline u8 pmu_gp_counter_width(void)
+{
+ return (cpuid(10).a >> 16) & 0xff;
+}
+
+static inline u8 pmu_gp_counter_mask_length(void)
+{
+ return (cpuid(10).a >> 24) & 0xff;
+}
+
+static inline u8 pmu_nr_fixed_counters(void)
+{
+ struct cpuid id = cpuid(10);
+
+ if ((id.a & 0xff) > 1)
+ return id.d & 0x1f;
+ else
+ return 0;
+}
+
+static inline u8 pmu_fixed_counter_width(void)
+{
+ struct cpuid id = cpuid(10);
+
+ if ((id.a & 0xff) > 1)
+ return (id.d >> 5) & 0xff;
+ else
+ return 0;
+}
+
+static inline bool pmu_gp_counter_is_available(int i)
+{
+ /* CPUID.0xA.EBX bit is '1 if they counter is NOT available. */
+ return !(cpuid(10).b & BIT(i));
+}
+
+static inline u64 this_cpu_perf_capabilities(void)
+{
+ if (!this_cpu_has(X86_FEATURE_PDCM))
+ return 0;
+
+ return rdmsr(MSR_IA32_PERF_CAPABILITIES);
+}
+
+#endif /* _X86_PMU_H_ */
@@ -806,68 +806,4 @@ static inline void flush_tlb(void)
write_cr4(cr4);
}
-static inline u8 pmu_version(void)
-{
- return cpuid(10).a & 0xff;
-}
-
-static inline bool this_cpu_has_pmu(void)
-{
- return !!pmu_version();
-}
-
-static inline bool this_cpu_has_perf_global_ctrl(void)
-{
- return pmu_version() > 1;
-}
-
-static inline u8 pmu_nr_gp_counters(void)
-{
- return (cpuid(10).a >> 8) & 0xff;
-}
-
-static inline u8 pmu_gp_counter_width(void)
-{
- return (cpuid(10).a >> 16) & 0xff;
-}
-
-static inline u8 pmu_gp_counter_mask_length(void)
-{
- return (cpuid(10).a >> 24) & 0xff;
-}
-
-static inline u8 pmu_nr_fixed_counters(void)
-{
- struct cpuid id = cpuid(10);
-
- if ((id.a & 0xff) > 1)
- return id.d & 0x1f;
- else
- return 0;
-}
-
-static inline u8 pmu_fixed_counter_width(void)
-{
- struct cpuid id = cpuid(10);
-
- if ((id.a & 0xff) > 1)
- return (id.d >> 5) & 0xff;
- else
- return 0;
-}
-
-static inline bool pmu_gp_counter_is_available(int i)
-{
- /* CPUID.0xA.EBX bit is '1 if they counter is NOT available. */
- return !(cpuid(10).b & BIT(i));
-}
-
-static inline u64 this_cpu_perf_capabilities(void)
-{
- if (!this_cpu_has(X86_FEATURE_PDCM))
- return 0;
-
- return rdmsr(MSR_IA32_PERF_CAPABILITIES);
-}
-
#endif
@@ -22,6 +22,7 @@ cflatobjs += lib/x86/acpi.o
cflatobjs += lib/x86/stack.o
cflatobjs += lib/x86/fault_test.o
cflatobjs += lib/x86/delay.o
+cflatobjs += lib/x86/pmu.o
ifeq ($(CONFIG_EFI),y)
cflatobjs += lib/x86/amd_sev.o
cflatobjs += lib/efi.o
@@ -1,6 +1,7 @@
#include "x86/msr.h"
#include "x86/processor.h"
+#include "x86/pmu.h"
#include "x86/apic-defs.h"
#include "x86/apic.h"
#include "x86/desc.h"
@@ -10,29 +11,6 @@
#include "libcflat.h"
#include <stdint.h>
-#define FIXED_CNT_INDEX 32
-
-/* Performance Counter Vector for the LVT PC Register */
-#define PMI_VECTOR 32
-
-#define EVNSEL_EVENT_SHIFT 0
-#define EVNTSEL_UMASK_SHIFT 8
-#define EVNTSEL_USR_SHIFT 16
-#define EVNTSEL_OS_SHIFT 17
-#define EVNTSEL_EDGE_SHIFT 18
-#define EVNTSEL_PC_SHIFT 19
-#define EVNTSEL_INT_SHIFT 20
-#define EVNTSEL_EN_SHIF 22
-#define EVNTSEL_INV_SHIF 23
-#define EVNTSEL_CMASK_SHIFT 24
-
-#define EVNTSEL_EN (1 << EVNTSEL_EN_SHIF)
-#define EVNTSEL_USR (1 << EVNTSEL_USR_SHIFT)
-#define EVNTSEL_OS (1 << EVNTSEL_OS_SHIFT)
-#define EVNTSEL_PC (1 << EVNTSEL_PC_SHIFT)
-#define EVNTSEL_INT (1 << EVNTSEL_INT_SHIFT)
-#define EVNTSEL_INV (1 << EVNTSEL_INV_SHIF)
-
#define N 1000000
// These values match the number of instructions and branches in the
@@ -66,7 +44,6 @@ struct pmu_event {
{"fixed 3", MSR_CORE_PERF_FIXED_CTR0 + 2, 0.1*N, 30*N}
};
-#define PMU_CAP_FW_WRITES (1ULL << 13)
static u64 gp_counter_base = MSR_IA32_PERFCTR0;
char *buf;
@@ -1,18 +1,9 @@
#include "x86/msr.h"
#include "x86/processor.h"
+#include "x86/pmu.h"
#include "x86/desc.h"
#define N 1000000
-#define MAX_NUM_LBR_ENTRY 32
-#define DEBUGCTLMSR_LBR (1UL << 0)
-#define PMU_CAP_LBR_FMT 0x3f
-
-#define MSR_LBR_NHM_FROM 0x00000680
-#define MSR_LBR_NHM_TO 0x000006c0
-#define MSR_LBR_CORE_FROM 0x00000040
-#define MSR_LBR_CORE_TO 0x00000060
-#define MSR_LBR_TOS 0x000001c9
-#define MSR_LBR_SELECT 0x000001c8
volatile int count;
u32 lbr_from, lbr_to;
@@ -9,6 +9,7 @@
#include "vmx.h"
#include "msr.h"
#include "processor.h"
+#include "pmu.h"
#include "vm.h"
#include "pci.h"
#include "fwcfg.h"