xref: /linux/tools/perf/util/record.c (revision 312b62b6610cabea4cb535fd4889c41e9a84afca)
1 // SPDX-License-Identifier: GPL-2.0
2 #include "debug.h"
3 #include "evlist.h"
4 #include "evsel.h"
5 #include "parse-events.h"
6 #include <errno.h>
7 #include <limits.h>
8 #include <stdlib.h>
9 #include <api/fs/fs.h>
10 #include <subcmd/parse-options.h>
11 #include <perf/cpumap.h>
12 #include "cloexec.h"
13 #include "util/perf_api_probe.h"
14 #include "record.h"
15 #include "../perf-sys.h"
16 
17 /*
18  * evsel__config_leader_sampling() uses special rules for leader sampling.
19  * However, if the leader is an AUX area event, then assume the event to sample
20  * is the next event.
21  */
22 static struct evsel *evsel__read_sampler(struct evsel *evsel, struct evlist *evlist)
23 {
24 	struct evsel *leader = evsel->leader;
25 
26 	if (evsel__is_aux_event(leader)) {
27 		evlist__for_each_entry(evlist, evsel) {
28 			if (evsel->leader == leader && evsel != evsel->leader)
29 				return evsel;
30 		}
31 	}
32 
33 	return leader;
34 }
35 
36 static void evsel__config_leader_sampling(struct evsel *evsel, struct evlist *evlist)
37 {
38 	struct perf_event_attr *attr = &evsel->core.attr;
39 	struct evsel *leader = evsel->leader;
40 	struct evsel *read_sampler;
41 
42 	if (!leader->sample_read)
43 		return;
44 
45 	read_sampler = evsel__read_sampler(evsel, evlist);
46 
47 	if (evsel == read_sampler)
48 		return;
49 
50 	/*
51 	 * Disable sampling for all group members other than the leader in
52 	 * case the leader 'leads' the sampling, except when the leader is an
53 	 * AUX area event, in which case the 2nd event in the group is the one
54 	 * that 'leads' the sampling.
55 	 */
56 	attr->freq           = 0;
57 	attr->sample_freq    = 0;
58 	attr->sample_period  = 0;
59 	attr->write_backward = 0;
60 
61 	/*
62 	 * We don't get a sample for slave events, we make them when delivering
63 	 * the group leader sample. Set the slave event to follow the master
64 	 * sample_type to ease up reporting.
65 	 * An AUX area event also has sample_type requirements, so also include
66 	 * the sample type bits from the leader's sample_type to cover that
67 	 * case.
68 	 */
69 	attr->sample_type = read_sampler->core.attr.sample_type |
70 			    leader->core.attr.sample_type;
71 }
72 
73 void perf_evlist__config(struct evlist *evlist, struct record_opts *opts,
74 			 struct callchain_param *callchain)
75 {
76 	struct evsel *evsel;
77 	bool use_sample_identifier = false;
78 	bool use_comm_exec;
79 	bool sample_id = opts->sample_id;
80 
81 	/*
82 	 * Set the evsel leader links before we configure attributes,
83 	 * since some might depend on this info.
84 	 */
85 	if (opts->group)
86 		perf_evlist__set_leader(evlist);
87 
88 	if (evlist->core.cpus->map[0] < 0)
89 		opts->no_inherit = true;
90 
91 	use_comm_exec = perf_can_comm_exec();
92 
93 	evlist__for_each_entry(evlist, evsel) {
94 		evsel__config(evsel, opts, callchain);
95 		if (evsel->tracking && use_comm_exec)
96 			evsel->core.attr.comm_exec = 1;
97 	}
98 
99 	/* Configure leader sampling here now that the sample type is known */
100 	evlist__for_each_entry(evlist, evsel)
101 		evsel__config_leader_sampling(evsel, evlist);
102 
103 	if (opts->full_auxtrace) {
104 		/*
105 		 * Need to be able to synthesize and parse selected events with
106 		 * arbitrary sample types, which requires always being able to
107 		 * match the id.
108 		 */
109 		use_sample_identifier = perf_can_sample_identifier();
110 		sample_id = true;
111 	} else if (evlist->core.nr_entries > 1) {
112 		struct evsel *first = evlist__first(evlist);
113 
114 		evlist__for_each_entry(evlist, evsel) {
115 			if (evsel->core.attr.sample_type == first->core.attr.sample_type)
116 				continue;
117 			use_sample_identifier = perf_can_sample_identifier();
118 			break;
119 		}
120 		sample_id = true;
121 	}
122 
123 	if (sample_id) {
124 		evlist__for_each_entry(evlist, evsel)
125 			evsel__set_sample_id(evsel, use_sample_identifier);
126 	}
127 
128 	perf_evlist__set_id_pos(evlist);
129 }
130 
131 static int get_max_rate(unsigned int *rate)
132 {
133 	return sysctl__read_int("kernel/perf_event_max_sample_rate", (int *)rate);
134 }
135 
136 static int record_opts__config_freq(struct record_opts *opts)
137 {
138 	bool user_freq = opts->user_freq != UINT_MAX;
139 	unsigned int max_rate;
140 
141 	if (opts->user_interval != ULLONG_MAX)
142 		opts->default_interval = opts->user_interval;
143 	if (user_freq)
144 		opts->freq = opts->user_freq;
145 
146 	/*
147 	 * User specified count overrides default frequency.
148 	 */
149 	if (opts->default_interval)
150 		opts->freq = 0;
151 	else if (opts->freq) {
152 		opts->default_interval = opts->freq;
153 	} else {
154 		pr_err("frequency and count are zero, aborting\n");
155 		return -1;
156 	}
157 
158 	if (get_max_rate(&max_rate))
159 		return 0;
160 
161 	/*
162 	 * User specified frequency is over current maximum.
163 	 */
164 	if (user_freq && (max_rate < opts->freq)) {
165 		if (opts->strict_freq) {
166 			pr_err("error: Maximum frequency rate (%'u Hz) exceeded.\n"
167 			       "       Please use -F freq option with a lower value or consider\n"
168 			       "       tweaking /proc/sys/kernel/perf_event_max_sample_rate.\n",
169 			       max_rate);
170 			return -1;
171 		} else {
172 			pr_warning("warning: Maximum frequency rate (%'u Hz) exceeded, throttling from %'u Hz to %'u Hz.\n"
173 				   "         The limit can be raised via /proc/sys/kernel/perf_event_max_sample_rate.\n"
174 				   "         The kernel will lower it when perf's interrupts take too long.\n"
175 				   "         Use --strict-freq to disable this throttling, refusing to record.\n",
176 				   max_rate, opts->freq, max_rate);
177 
178 			opts->freq = max_rate;
179 		}
180 	}
181 
182 	/*
183 	 * Default frequency is over current maximum.
184 	 */
185 	if (max_rate < opts->freq) {
186 		pr_warning("Lowering default frequency rate to %u.\n"
187 			   "Please consider tweaking "
188 			   "/proc/sys/kernel/perf_event_max_sample_rate.\n",
189 			   max_rate);
190 		opts->freq = max_rate;
191 	}
192 
193 	return 0;
194 }
195 
196 int record_opts__config(struct record_opts *opts)
197 {
198 	return record_opts__config_freq(opts);
199 }
200 
201 bool perf_evlist__can_select_event(struct evlist *evlist, const char *str)
202 {
203 	struct evlist *temp_evlist;
204 	struct evsel *evsel;
205 	int err, fd, cpu;
206 	bool ret = false;
207 	pid_t pid = -1;
208 
209 	temp_evlist = evlist__new();
210 	if (!temp_evlist)
211 		return false;
212 
213 	err = parse_events(temp_evlist, str, NULL);
214 	if (err)
215 		goto out_delete;
216 
217 	evsel = evlist__last(temp_evlist);
218 
219 	if (!evlist || perf_cpu_map__empty(evlist->core.cpus)) {
220 		struct perf_cpu_map *cpus = perf_cpu_map__new(NULL);
221 
222 		cpu =  cpus ? cpus->map[0] : 0;
223 		perf_cpu_map__put(cpus);
224 	} else {
225 		cpu = evlist->core.cpus->map[0];
226 	}
227 
228 	while (1) {
229 		fd = sys_perf_event_open(&evsel->core.attr, pid, cpu, -1,
230 					 perf_event_open_cloexec_flag());
231 		if (fd < 0) {
232 			if (pid == -1 && errno == EACCES) {
233 				pid = 0;
234 				continue;
235 			}
236 			goto out_delete;
237 		}
238 		break;
239 	}
240 	close(fd);
241 	ret = true;
242 
243 out_delete:
244 	evlist__delete(temp_evlist);
245 	return ret;
246 }
247 
248 int record__parse_freq(const struct option *opt, const char *str, int unset __maybe_unused)
249 {
250 	unsigned int freq;
251 	struct record_opts *opts = opt->value;
252 
253 	if (!str)
254 		return -EINVAL;
255 
256 	if (strcasecmp(str, "max") == 0) {
257 		if (get_max_rate(&freq)) {
258 			pr_err("couldn't read /proc/sys/kernel/perf_event_max_sample_rate\n");
259 			return -1;
260 		}
261 		pr_info("info: Using a maximum frequency rate of %'d Hz\n", freq);
262 	} else {
263 		freq = atoi(str);
264 	}
265 
266 	opts->user_freq = freq;
267 	return 0;
268 }
269