xref: /linux/tools/perf/util/bpf_counter_cgroup.c (revision 2c240484cf52da3c2ca14cfb70e8cf9179197ced)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 /* Copyright (c) 2021 Facebook */
4 /* Copyright (c) 2021 Google */
5 
6 #include <assert.h>
7 #include <limits.h>
8 #include <unistd.h>
9 #include <sys/file.h>
10 #include <sys/time.h>
11 #include <sys/resource.h>
12 #include <linux/err.h>
13 #include <linux/zalloc.h>
14 #include <linux/perf_event.h>
15 #include <api/fs/fs.h>
16 #include <bpf/bpf.h>
17 #include <perf/bpf_perf.h>
18 
19 #include "affinity.h"
20 #include "bpf_counter.h"
21 #include "cgroup.h"
22 #include "counts.h"
23 #include "debug.h"
24 #include "evsel.h"
25 #include "evlist.h"
26 #include "target.h"
27 #include "cpumap.h"
28 #include "thread_map.h"
29 
30 #include "bpf_skel/bperf_cgroup.h"
31 #include "bpf_skel/bperf_cgroup.skel.h"
32 
33 static struct perf_event_attr cgrp_switch_attr = {
34 	.type = PERF_TYPE_SOFTWARE,
35 	.config = PERF_COUNT_SW_CGROUP_SWITCHES,
36 	.size = sizeof(cgrp_switch_attr),
37 	.sample_period = 1,
38 	.disabled = 1,
39 };
40 
41 static struct evsel *cgrp_switch;
42 static struct bperf_cgroup_bpf *skel;
43 
44 #define FD(evt, cpu) (*(int *)xyarray__entry(evt->core.fd, cpu, 0))
45 
46 static void setup_rodata(struct bperf_cgroup_bpf *sk, int evlist_size)
47 {
48 	int map_size, total_cpus = cpu__max_cpu().cpu;
49 
50 	sk->rodata->num_cpus = total_cpus;
51 	sk->rodata->num_events = evlist_size / nr_cgroups;
52 
53 	if (cgroup_is_v2("perf_event") > 0)
54 		sk->rodata->use_cgroup_v2 = 1;
55 
56 	BUG_ON(evlist_size % nr_cgroups != 0);
57 
58 	/* we need one copy of events per cpu for reading */
59 	map_size = total_cpus * evlist_size / nr_cgroups;
60 	bpf_map__set_max_entries(sk->maps.events, map_size);
61 	bpf_map__set_max_entries(sk->maps.cgrp_idx, nr_cgroups);
62 	/* previous result is saved in a per-cpu array */
63 	map_size = evlist_size / nr_cgroups;
64 	bpf_map__set_max_entries(sk->maps.prev_readings, map_size);
65 	/* cgroup result needs all events (per-cpu) */
66 	map_size = evlist_size;
67 	bpf_map__set_max_entries(sk->maps.cgrp_readings, map_size);
68 }
69 
70 static void test_max_events_program_load(void)
71 {
72 #ifndef NDEBUG
73 	/*
74 	 * Test that the program verifies with the maximum number of events. If
75 	 * this test fails unfortunately perf needs recompiling with a lower
76 	 * BPERF_CGROUP__MAX_EVENTS to avoid BPF verifier issues.
77 	 */
78 	int err, max_events = BPERF_CGROUP__MAX_EVENTS * nr_cgroups;
79 	struct bperf_cgroup_bpf *test_skel = bperf_cgroup_bpf__open();
80 
81 	if (!test_skel) {
82 		pr_err("Failed to open cgroup skeleton\n");
83 		return;
84 	}
85 	setup_rodata(test_skel, max_events);
86 	err = bperf_cgroup_bpf__load(test_skel);
87 	if (err) {
88 		pr_err("Failed to load cgroup skeleton with max events %d.\n",
89 			BPERF_CGROUP__MAX_EVENTS);
90 	}
91 	bperf_cgroup_bpf__destroy(test_skel);
92 #endif
93 }
94 
95 static int bperf_load_program(struct evlist *evlist)
96 {
97 	struct bpf_link *link;
98 	struct evsel *evsel;
99 	struct cgroup *cgrp, *leader_cgrp;
100 	int i, j;
101 	struct perf_cpu cpu;
102 	int total_cpus = cpu__max_cpu().cpu;
103 	int map_fd, prog_fd, err;
104 
105 	set_max_rlimit();
106 
107 	test_max_events_program_load();
108 
109 	skel = bperf_cgroup_bpf__open();
110 	if (!skel) {
111 		pr_err("Failed to open cgroup skeleton\n");
112 		return -1;
113 	}
114 	setup_rodata(skel, evlist->core.nr_entries);
115 
116 	err = bperf_cgroup_bpf__load(skel);
117 	if (err) {
118 		pr_err("Failed to load cgroup skeleton\n");
119 		goto out;
120 	}
121 
122 	err = -1;
123 
124 	cgrp_switch = evsel__new(&cgrp_switch_attr);
125 	if (evsel__open_per_cpu(cgrp_switch, evlist->core.all_cpus, -1) < 0) {
126 		pr_err("Failed to open cgroup switches event\n");
127 		goto out;
128 	}
129 
130 	perf_cpu_map__for_each_cpu(cpu, i, evlist->core.all_cpus) {
131 		link = bpf_program__attach_perf_event(skel->progs.on_cgrp_switch,
132 						      FD(cgrp_switch, i));
133 		if (IS_ERR(link)) {
134 			pr_err("Failed to attach cgroup program\n");
135 			err = PTR_ERR(link);
136 			goto out;
137 		}
138 	}
139 
140 	/*
141 	 * Update cgrp_idx map from cgroup-id to event index.
142 	 */
143 	cgrp = NULL;
144 	i = 0;
145 
146 	evlist__for_each_entry(evlist, evsel) {
147 		if (cgrp == NULL || evsel->cgrp == leader_cgrp) {
148 			leader_cgrp = evsel->cgrp;
149 			evsel->cgrp = NULL;
150 
151 			/* open single copy of the events w/o cgroup */
152 			err = evsel__open_per_cpu(evsel, evsel->core.cpus, -1);
153 			if (err == 0)
154 				evsel->supported = true;
155 
156 			map_fd = bpf_map__fd(skel->maps.events);
157 			perf_cpu_map__for_each_cpu(cpu, j, evsel->core.cpus) {
158 				int fd = FD(evsel, j);
159 				__u32 idx = evsel->core.idx * total_cpus + cpu.cpu;
160 
161 				bpf_map_update_elem(map_fd, &idx, &fd, BPF_ANY);
162 			}
163 
164 			evsel->cgrp = leader_cgrp;
165 		}
166 
167 		if (evsel->cgrp == cgrp)
168 			continue;
169 
170 		cgrp = evsel->cgrp;
171 
172 		if (read_cgroup_id(cgrp) < 0) {
173 			pr_debug("Failed to get cgroup id for %s\n", cgrp->name);
174 			cgrp->id = 0;
175 		}
176 
177 		map_fd = bpf_map__fd(skel->maps.cgrp_idx);
178 		err = bpf_map_update_elem(map_fd, &cgrp->id, &i, BPF_ANY);
179 		if (err < 0) {
180 			pr_err("Failed to update cgroup index map\n");
181 			goto out;
182 		}
183 
184 		i++;
185 	}
186 
187 	/*
188 	 * bperf uses BPF_PROG_TEST_RUN to get accurate reading. Check
189 	 * whether the kernel support it
190 	 */
191 	prog_fd = bpf_program__fd(skel->progs.trigger_read);
192 	err = bperf_trigger_reading(prog_fd, 0);
193 	if (err) {
194 		pr_warning("The kernel does not support test_run for raw_tp BPF programs.\n"
195 			   "Therefore, --for-each-cgroup might show inaccurate readings\n");
196 		err = 0;
197 	}
198 
199 out:
200 	return err;
201 }
202 
203 static int bperf_cgrp__load(struct evsel *evsel,
204 			    struct target *target __maybe_unused)
205 {
206 	static bool bperf_loaded = false;
207 
208 	evsel->bperf_leader_prog_fd = -1;
209 	evsel->bperf_leader_link_fd = -1;
210 
211 	if (!bperf_loaded && bperf_load_program(evsel->evlist))
212 		return -1;
213 
214 	bperf_loaded = true;
215 	/* just to bypass bpf_counter_skip() */
216 	evsel->follower_skel = (struct bperf_follower_bpf *)skel;
217 
218 	return 0;
219 }
220 
221 static int bperf_cgrp__install_pe(struct evsel *evsel __maybe_unused,
222 				  int cpu_map_idx __maybe_unused,
223 				  int fd __maybe_unused)
224 {
225 	/* nothing to do */
226 	return 0;
227 }
228 
229 /*
230  * trigger the leader prog on each cpu, so the cgrp_reading map could get
231  * the latest results.
232  */
233 static int bperf_cgrp__sync_counters(struct evlist *evlist)
234 {
235 	struct perf_cpu cpu;
236 	int idx;
237 	int prog_fd = bpf_program__fd(skel->progs.trigger_read);
238 
239 	perf_cpu_map__for_each_cpu(cpu, idx, evlist->core.all_cpus)
240 		bperf_trigger_reading(prog_fd, cpu.cpu);
241 
242 	return 0;
243 }
244 
245 static int bperf_cgrp__enable(struct evsel *evsel)
246 {
247 	if (evsel->core.idx)
248 		return 0;
249 
250 	bperf_cgrp__sync_counters(evsel->evlist);
251 
252 	skel->bss->enabled = 1;
253 	return 0;
254 }
255 
256 static int bperf_cgrp__disable(struct evsel *evsel)
257 {
258 	if (evsel->core.idx)
259 		return 0;
260 
261 	bperf_cgrp__sync_counters(evsel->evlist);
262 
263 	skel->bss->enabled = 0;
264 	return 0;
265 }
266 
267 static int bperf_cgrp__read(struct evsel *evsel)
268 {
269 	struct evlist *evlist = evsel->evlist;
270 	int total_cpus = cpu__max_cpu().cpu;
271 	struct perf_counts_values *counts;
272 	struct bpf_perf_event_value *values;
273 	int reading_map_fd, err = 0;
274 
275 	if (evsel->core.idx)
276 		return 0;
277 
278 	bperf_cgrp__sync_counters(evsel->evlist);
279 
280 	values = calloc(total_cpus, sizeof(*values));
281 	if (values == NULL)
282 		return -ENOMEM;
283 
284 	reading_map_fd = bpf_map__fd(skel->maps.cgrp_readings);
285 
286 	evlist__for_each_entry(evlist, evsel) {
287 		__u32 idx = evsel->core.idx;
288 		int i;
289 		struct perf_cpu cpu;
290 
291 		err = bpf_map_lookup_elem(reading_map_fd, &idx, values);
292 		if (err) {
293 			pr_err("bpf map lookup failed: idx=%u, event=%s, cgrp=%s\n",
294 			       idx, evsel__name(evsel), evsel->cgrp->name);
295 			goto out;
296 		}
297 
298 		perf_cpu_map__for_each_cpu(cpu, i, evsel->core.cpus) {
299 			counts = perf_counts(evsel->counts, i, 0);
300 			counts->val = values[cpu.cpu].counter;
301 			counts->ena = values[cpu.cpu].enabled;
302 			counts->run = values[cpu.cpu].running;
303 		}
304 	}
305 
306 out:
307 	free(values);
308 	return err;
309 }
310 
311 static int bperf_cgrp__destroy(struct evsel *evsel)
312 {
313 	if (evsel->core.idx)
314 		return 0;
315 
316 	bperf_cgroup_bpf__destroy(skel);
317 	evsel__delete(cgrp_switch);  // it'll destroy on_switch progs too
318 
319 	return 0;
320 }
321 
322 struct bpf_counter_ops bperf_cgrp_ops = {
323 	.load       = bperf_cgrp__load,
324 	.enable     = bperf_cgrp__enable,
325 	.disable    = bperf_cgrp__disable,
326 	.read       = bperf_cgrp__read,
327 	.install_pe = bperf_cgrp__install_pe,
328 	.destroy    = bperf_cgrp__destroy,
329 };
330