xref: /linux/tools/perf/util/record.c (revision c98be0c96db00e9b6b02d31e0fa7590c54cdaaac)
1 #include "evlist.h"
2 #include "evsel.h"
3 #include "cpumap.h"
4 #include "parse-events.h"
5 #include <api/fs/fs.h>
6 #include "util.h"
7 
8 typedef void (*setup_probe_fn_t)(struct perf_evsel *evsel);
9 
10 static int perf_do_probe_api(setup_probe_fn_t fn, int cpu, const char *str)
11 {
12 	struct perf_evlist *evlist;
13 	struct perf_evsel *evsel;
14 	int err = -EAGAIN, fd;
15 
16 	evlist = perf_evlist__new();
17 	if (!evlist)
18 		return -ENOMEM;
19 
20 	if (parse_events(evlist, str))
21 		goto out_delete;
22 
23 	evsel = perf_evlist__first(evlist);
24 
25 	fd = sys_perf_event_open(&evsel->attr, -1, cpu, -1, 0);
26 	if (fd < 0)
27 		goto out_delete;
28 	close(fd);
29 
30 	fn(evsel);
31 
32 	fd = sys_perf_event_open(&evsel->attr, -1, cpu, -1, 0);
33 	if (fd < 0) {
34 		if (errno == EINVAL)
35 			err = -EINVAL;
36 		goto out_delete;
37 	}
38 	close(fd);
39 	err = 0;
40 
41 out_delete:
42 	perf_evlist__delete(evlist);
43 	return err;
44 }
45 
46 static bool perf_probe_api(setup_probe_fn_t fn)
47 {
48 	const char *try[] = {"cycles:u", "instructions:u", "cpu-clock", NULL};
49 	struct cpu_map *cpus;
50 	int cpu, ret, i = 0;
51 
52 	cpus = cpu_map__new(NULL);
53 	if (!cpus)
54 		return false;
55 	cpu = cpus->map[0];
56 	cpu_map__delete(cpus);
57 
58 	do {
59 		ret = perf_do_probe_api(fn, cpu, try[i++]);
60 		if (!ret)
61 			return true;
62 	} while (ret == -EAGAIN && try[i]);
63 
64 	return false;
65 }
66 
67 static void perf_probe_sample_identifier(struct perf_evsel *evsel)
68 {
69 	evsel->attr.sample_type |= PERF_SAMPLE_IDENTIFIER;
70 }
71 
72 bool perf_can_sample_identifier(void)
73 {
74 	return perf_probe_api(perf_probe_sample_identifier);
75 }
76 
77 void perf_evlist__config(struct perf_evlist *evlist, struct record_opts *opts)
78 {
79 	struct perf_evsel *evsel;
80 	bool use_sample_identifier = false;
81 
82 	/*
83 	 * Set the evsel leader links before we configure attributes,
84 	 * since some might depend on this info.
85 	 */
86 	if (opts->group)
87 		perf_evlist__set_leader(evlist);
88 
89 	if (evlist->cpus->map[0] < 0)
90 		opts->no_inherit = true;
91 
92 	evlist__for_each(evlist, evsel)
93 		perf_evsel__config(evsel, opts);
94 
95 	if (evlist->nr_entries > 1) {
96 		struct perf_evsel *first = perf_evlist__first(evlist);
97 
98 		evlist__for_each(evlist, evsel) {
99 			if (evsel->attr.sample_type == first->attr.sample_type)
100 				continue;
101 			use_sample_identifier = perf_can_sample_identifier();
102 			break;
103 		}
104 		evlist__for_each(evlist, evsel)
105 			perf_evsel__set_sample_id(evsel, use_sample_identifier);
106 	}
107 
108 	perf_evlist__set_id_pos(evlist);
109 }
110 
111 static int get_max_rate(unsigned int *rate)
112 {
113 	char path[PATH_MAX];
114 	const char *procfs = procfs__mountpoint();
115 
116 	if (!procfs)
117 		return -1;
118 
119 	snprintf(path, PATH_MAX,
120 		 "%s/sys/kernel/perf_event_max_sample_rate", procfs);
121 
122 	return filename__read_int(path, (int *) rate);
123 }
124 
125 static int record_opts__config_freq(struct record_opts *opts)
126 {
127 	bool user_freq = opts->user_freq != UINT_MAX;
128 	unsigned int max_rate;
129 
130 	if (opts->user_interval != ULLONG_MAX)
131 		opts->default_interval = opts->user_interval;
132 	if (user_freq)
133 		opts->freq = opts->user_freq;
134 
135 	/*
136 	 * User specified count overrides default frequency.
137 	 */
138 	if (opts->default_interval)
139 		opts->freq = 0;
140 	else if (opts->freq) {
141 		opts->default_interval = opts->freq;
142 	} else {
143 		pr_err("frequency and count are zero, aborting\n");
144 		return -1;
145 	}
146 
147 	if (get_max_rate(&max_rate))
148 		return 0;
149 
150 	/*
151 	 * User specified frequency is over current maximum.
152 	 */
153 	if (user_freq && (max_rate < opts->freq)) {
154 		pr_err("Maximum frequency rate (%u) reached.\n"
155 		   "Please use -F freq option with lower value or consider\n"
156 		   "tweaking /proc/sys/kernel/perf_event_max_sample_rate.\n",
157 		   max_rate);
158 		return -1;
159 	}
160 
161 	/*
162 	 * Default frequency is over current maximum.
163 	 */
164 	if (max_rate < opts->freq) {
165 		pr_warning("Lowering default frequency rate to %u.\n"
166 			   "Please consider tweaking "
167 			   "/proc/sys/kernel/perf_event_max_sample_rate.\n",
168 			   max_rate);
169 		opts->freq = max_rate;
170 	}
171 
172 	return 0;
173 }
174 
175 int record_opts__config(struct record_opts *opts)
176 {
177 	return record_opts__config_freq(opts);
178 }
179 
180 bool perf_evlist__can_select_event(struct perf_evlist *evlist, const char *str)
181 {
182 	struct perf_evlist *temp_evlist;
183 	struct perf_evsel *evsel;
184 	int err, fd, cpu;
185 	bool ret = false;
186 
187 	temp_evlist = perf_evlist__new();
188 	if (!temp_evlist)
189 		return false;
190 
191 	err = parse_events(temp_evlist, str);
192 	if (err)
193 		goto out_delete;
194 
195 	evsel = perf_evlist__last(temp_evlist);
196 
197 	if (!evlist || cpu_map__empty(evlist->cpus)) {
198 		struct cpu_map *cpus = cpu_map__new(NULL);
199 
200 		cpu =  cpus ? cpus->map[0] : 0;
201 		cpu_map__delete(cpus);
202 	} else {
203 		cpu = evlist->cpus->map[0];
204 	}
205 
206 	fd = sys_perf_event_open(&evsel->attr, -1, cpu, -1, 0);
207 	if (fd >= 0) {
208 		close(fd);
209 		ret = true;
210 	}
211 
212 out_delete:
213 	perf_evlist__delete(temp_evlist);
214 	return ret;
215 }
216