@@ -281,7 +281,7 @@ static int test_events(const struct evlist_test *events, int cnt)
int test__hybrid(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
{
- if (!perf_pmus__has_hybrid())
+ if (perf_pmus__num_core_pmus() == 1)
return TEST_SKIP;
return test_events(test__hybrid_events, ARRAY_SIZE(test__hybrid_events));
@@ -18,7 +18,7 @@ static int ___evlist__add_default_attrs(struct evlist *evlist,
for (i = 0; i < nr_attrs; i++)
event_attr_init(attrs + i);
- if (!perf_pmus__has_hybrid())
+ if (perf_pmus__num_core_pmus() == 1)
return evlist__add_attrs(evlist, attrs, nr_attrs);
for (i = 0; i < nr_attrs; i++) {
@@ -292,7 +292,7 @@ uint64_t arch__intr_reg_mask(void)
*/
attr.sample_period = 1;
- if (perf_pmus__has_hybrid()) {
+ if (perf_pmus__num_core_pmus() > 1) {
struct perf_pmu *pmu = NULL;
__u64 type = PERF_TYPE_RAW;
@@ -1294,7 +1294,7 @@ static int record__open(struct record *rec)
* of waiting or event synthesis.
*/
if (opts->target.initial_delay || target__has_cpu(&opts->target) ||
- perf_pmus__has_hybrid()) {
+ perf_pmus__num_core_pmus() > 1) {
pos = evlist__get_tracking_event(evlist);
if (!evsel__is_dummy_event(pos)) {
/* Set up dummy event. */
@@ -2193,7 +2193,7 @@ static void record__uniquify_name(struct record *rec)
char *new_name;
int ret;
- if (!perf_pmus__has_hybrid())
+ if (perf_pmus__num_core_pmus() == 1)
return;
evlist__for_each_entry(evlist, pos) {
@@ -185,8 +185,15 @@ static int test__attr(struct test_suite *test __maybe_unused, int subtest __mayb
char path_dir[PATH_MAX];
char *exec_path;
- if (perf_pmus__has_hybrid())
+ if (perf_pmus__num_core_pmus() > 1) {
+ /*
+ * TODO: Attribute tests hard code the PMU type. If there are >1
+ * core PMU then each PMU will have a different type whic
+ * requires additional support.
+ */
+ pr_debug("Skip test on hybrid systems");
return TEST_SKIP;
+ }
/* First try development tree tests. */
if (!lstat("./tests", &st))
@@ -302,11 +302,8 @@ static int test__parse_metric(struct test_suite *test __maybe_unused, int subtes
TEST_ASSERT_VAL("DCache_L2 failed", test_dcache_l2() == 0);
TEST_ASSERT_VAL("recursion fail failed", test_recursion_fail() == 0);
TEST_ASSERT_VAL("Memory bandwidth", test_memory_bandwidth() == 0);
-
- if (!perf_pmus__has_hybrid()) {
- TEST_ASSERT_VAL("cache_miss_cycles failed", test_cache_miss_cycles() == 0);
- TEST_ASSERT_VAL("test metric group", test_metric_group() == 0);
- }
+ TEST_ASSERT_VAL("cache_miss_cycles failed", test_cache_miss_cycles() == 0);
+ TEST_ASSERT_VAL("test metric group", test_metric_group() == 0);
return 0;
}
@@ -375,17 +375,7 @@ static int test__switch_tracking(struct test_suite *test __maybe_unused, int sub
cpu_clocks_evsel = evlist__last(evlist);
/* Second event */
- if (perf_pmus__has_hybrid()) {
- cycles = "cpu_core/cycles/u";
- err = parse_event(evlist, cycles);
- if (err) {
- cycles = "cpu_atom/cycles/u";
- pr_debug("Trying %s\n", cycles);
- err = parse_event(evlist, cycles);
- }
- } else {
- err = parse_event(evlist, cycles);
- }
+ err = parse_event(evlist, cycles);
if (err) {
pr_debug("Failed to parse event %s\n", cycles);
goto out_err;
@@ -41,18 +41,8 @@ static int session_write_header(char *path)
session = perf_session__new(&data, NULL);
TEST_ASSERT_VAL("can't get session", !IS_ERR(session));
- if (!perf_pmus__has_hybrid()) {
- session->evlist = evlist__new_default();
- TEST_ASSERT_VAL("can't get evlist", session->evlist);
- } else {
- struct parse_events_error err;
-
- session->evlist = evlist__new();
- TEST_ASSERT_VAL("can't get evlist", session->evlist);
- parse_events_error__init(&err);
- parse_events(session->evlist, "cpu_core/cycles/", &err);
- parse_events_error__exit(&err);
- }
+ session->evlist = evlist__new_default();
+ TEST_ASSERT_VAL("can't get evlist", session->evlist);
perf_header__set_feat(&session->header, HEADER_CPU_TOPOLOGY);
perf_header__set_feat(&session->header, HEADER_NRCPUS);
@@ -472,15 +472,9 @@ struct hybrid_topology *hybrid_topology__new(void)
{
struct perf_pmu *pmu = NULL;
struct hybrid_topology *tp = NULL;
- u32 nr = 0, i = 0;
+ int nr = perf_pmus__num_core_pmus(), i = 0;
- if (!perf_pmus__has_hybrid())
- return NULL;
-
- while ((pmu = perf_pmus__scan_core(pmu)) != NULL)
- nr++;
-
- if (nr == 0)
+ if (nr <= 1)
return NULL;
tp = zalloc(sizeof(*tp) + sizeof(tp->nodes[0]) * nr);
@@ -3140,7 +3140,7 @@ void evsel__zero_per_pkg(struct evsel *evsel)
*/
bool evsel__is_hybrid(const struct evsel *evsel)
{
- if (!perf_pmus__has_hybrid())
+ if (perf_pmus__num_core_pmus() == 1)
return false;
return evsel->core.is_pmu_core;
@@ -1589,7 +1589,7 @@ static int write_pmu_caps(struct feat_fd *ff,
* Write hybrid pmu caps first to maintain compatibility with
* older perf tool.
*/
- if (perf_pmus__has_hybrid()) {
+ if (perf_pmus__num_core_pmus() > 1) {
pmu = NULL;
while ((pmu = perf_pmus__scan_core(pmu))) {
ret = __write_pmu_caps(ff, pmu, true);
@@ -121,6 +121,7 @@ int perf_mem_events__init(void)
for (j = 0; j < PERF_MEM_EVENTS__MAX; j++) {
struct perf_mem_event *e = perf_mem_events__ptr(j);
char sysfs_name[100];
+ struct perf_pmu *pmu = NULL;
/*
* If the event entry isn't valid, skip initialization
@@ -129,18 +130,9 @@ int perf_mem_events__init(void)
if (!e->tag)
continue;
- if (!perf_pmus__has_hybrid()) {
- scnprintf(sysfs_name, sizeof(sysfs_name),
- e->sysfs_name, "cpu");
- e->supported = perf_mem_event__supported(mnt, sysfs_name);
- } else {
- struct perf_pmu *pmu = NULL;
-
- while ((pmu = perf_pmus__scan_core(pmu)) != NULL) {
- scnprintf(sysfs_name, sizeof(sysfs_name),
- e->sysfs_name, pmu->name);
- e->supported |= perf_mem_event__supported(mnt, sysfs_name);
- }
+ while ((pmu = perf_pmus__scan_core(pmu)) != NULL) {
+ scnprintf(sysfs_name, sizeof(sysfs_name), e->sysfs_name, pmu->name);
+ e->supported |= perf_mem_event__supported(mnt, sysfs_name);
}
if (e->supported)
@@ -196,7 +188,7 @@ int perf_mem_events__record_args(const char **rec_argv, int *argv_nr,
if (!e->record)
continue;
- if (!perf_pmus__has_hybrid()) {
+ if (perf_pmus__num_core_pmus() == 1) {
if (!e->supported) {
pr_err("failed: event '%s' not supported\n",
perf_mem_events__name(j, NULL));
@@ -274,7 +274,7 @@ static int setup_metric_events(const char *pmu, struct hashmap *ids,
const char *metric_id;
struct evsel *ev;
size_t ids_size, matched_events, i;
- bool all_pmus = !strcmp(pmu, "all") || !perf_pmus__has_hybrid() || !is_pmu_hybrid(pmu);
+ bool all_pmus = !strcmp(pmu, "all") || perf_pmus__num_core_pmus() == 1 || !is_pmu_core(pmu);
*out_metric_events = NULL;
ids_size = hashmap__size(ids);
@@ -464,24 +464,6 @@ bool perf_pmus__have_event(const char *pname, const char *name)
return pmu && perf_pmu__have_event(pmu, name);
}
-bool perf_pmus__has_hybrid(void)
-{
- static bool hybrid_scanned, has_hybrid;
-
- if (!hybrid_scanned) {
- struct perf_pmu *pmu = NULL;
-
- while ((pmu = perf_pmus__scan_core(pmu)) != NULL) {
- if (is_pmu_hybrid(pmu->name)) {
- has_hybrid = true;
- break;
- }
- }
- hybrid_scanned = true;
- }
- return has_hybrid;
-}
-
int perf_pmus__num_core_pmus(void)
{
static int count;
@@ -18,7 +18,6 @@ const struct perf_pmu *perf_pmus__pmu_for_pmu_filter(const char *str);
int perf_pmus__num_mem_pmus(void);
void perf_pmus__print_pmu_events(const struct print_callbacks *print_cb, void *print_state);
bool perf_pmus__have_event(const char *pname, const char *name);
-bool perf_pmus__has_hybrid(void);
int perf_pmus__num_core_pmus(void);
#endif /* __PMUS_H */
@@ -681,7 +681,7 @@ static bool evlist__has_hybrid(struct evlist *evlist)
{
struct evsel *evsel;
- if (!perf_pmus__has_hybrid())
+ if (perf_pmus__num_core_pmus() == 1)
return false;
evlist__for_each_entry(evlist, evsel) {