xref: /linux/tools/perf/arch/x86/util/evsel.c (revision 8e07e0e3964ca4e23ce7b68e2096fe660a888942)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <stdio.h>
3 #include <stdlib.h>
4 #include "util/evsel.h"
5 #include "util/env.h"
6 #include "util/pmu.h"
7 #include "util/pmus.h"
8 #include "linux/string.h"
9 #include "evsel.h"
10 #include "util/debug.h"
11 #include "env.h"
12 
13 #define IBS_FETCH_L3MISSONLY   (1ULL << 59)
14 #define IBS_OP_L3MISSONLY      (1ULL << 16)
15 
16 void arch_evsel__set_sample_weight(struct evsel *evsel)
17 {
18 	evsel__set_sample_bit(evsel, WEIGHT_STRUCT);
19 }
20 
21 /* Check whether the evsel's PMU supports the perf metrics */
22 bool evsel__sys_has_perf_metrics(const struct evsel *evsel)
23 {
24 	const char *pmu_name = evsel->pmu_name ? evsel->pmu_name : "cpu";
25 
26 	/*
27 	 * The PERF_TYPE_RAW type is the core PMU type, e.g., "cpu" PMU
28 	 * on a non-hybrid machine, "cpu_core" PMU on a hybrid machine.
29 	 * The slots event is only available for the core PMU, which
30 	 * supports the perf metrics feature.
31 	 * Checking both the PERF_TYPE_RAW type and the slots event
32 	 * should be good enough to detect the perf metrics feature.
33 	 */
34 	if ((evsel->core.attr.type == PERF_TYPE_RAW) &&
35 	    perf_pmus__have_event(pmu_name, "slots"))
36 		return true;
37 
38 	return false;
39 }
40 
41 bool arch_evsel__must_be_in_group(const struct evsel *evsel)
42 {
43 	if (!evsel__sys_has_perf_metrics(evsel) || !evsel->name ||
44 	    strcasestr(evsel->name, "uops_retired.slots"))
45 		return false;
46 
47 	return strcasestr(evsel->name, "topdown") || strcasestr(evsel->name, "slots");
48 }
49 
50 int arch_evsel__hw_name(struct evsel *evsel, char *bf, size_t size)
51 {
52 	u64 event = evsel->core.attr.config & PERF_HW_EVENT_MASK;
53 	u64 pmu = evsel->core.attr.config >> PERF_PMU_TYPE_SHIFT;
54 	const char *event_name;
55 
56 	if (event < PERF_COUNT_HW_MAX && evsel__hw_names[event])
57 		event_name = evsel__hw_names[event];
58 	else
59 		event_name = "unknown-hardware";
60 
61 	/* The PMU type is not required for the non-hybrid platform. */
62 	if (!pmu)
63 		return  scnprintf(bf, size, "%s", event_name);
64 
65 	return scnprintf(bf, size, "%s/%s/",
66 			 evsel->pmu_name ? evsel->pmu_name : "cpu",
67 			 event_name);
68 }
69 
70 static void ibs_l3miss_warn(void)
71 {
72 	pr_warning(
73 "WARNING: Hw internally resets sampling period when L3 Miss Filtering is enabled\n"
74 "and tagged operation does not cause L3 Miss. This causes sampling period skew.\n");
75 }
76 
77 void arch__post_evsel_config(struct evsel *evsel, struct perf_event_attr *attr)
78 {
79 	struct perf_pmu *evsel_pmu, *ibs_fetch_pmu, *ibs_op_pmu;
80 	static int warned_once;
81 
82 	if (warned_once || !x86__is_amd_cpu())
83 		return;
84 
85 	evsel_pmu = evsel__find_pmu(evsel);
86 	if (!evsel_pmu)
87 		return;
88 
89 	ibs_fetch_pmu = perf_pmus__find("ibs_fetch");
90 	ibs_op_pmu = perf_pmus__find("ibs_op");
91 
92 	if (ibs_fetch_pmu && ibs_fetch_pmu->type == evsel_pmu->type) {
93 		if (attr->config & IBS_FETCH_L3MISSONLY) {
94 			ibs_l3miss_warn();
95 			warned_once = 1;
96 		}
97 	} else if (ibs_op_pmu && ibs_op_pmu->type == evsel_pmu->type) {
98 		if (attr->config & IBS_OP_L3MISSONLY) {
99 			ibs_l3miss_warn();
100 			warned_once = 1;
101 		}
102 	}
103 }
104 
105 int arch_evsel__open_strerror(struct evsel *evsel, char *msg, size_t size)
106 {
107 	if (!x86__is_amd_cpu())
108 		return 0;
109 
110 	if (!evsel->core.attr.precise_ip &&
111 	    !(evsel->pmu_name && !strncmp(evsel->pmu_name, "ibs", 3)))
112 		return 0;
113 
114 	/* More verbose IBS errors. */
115 	if (evsel->core.attr.exclude_kernel || evsel->core.attr.exclude_user ||
116 	    evsel->core.attr.exclude_hv || evsel->core.attr.exclude_idle ||
117 	    evsel->core.attr.exclude_host || evsel->core.attr.exclude_guest) {
118 		return scnprintf(msg, size, "AMD IBS doesn't support privilege filtering. Try "
119 				 "again without the privilege modifiers (like 'k') at the end.");
120 	}
121 
122 	return 0;
123 }
124