xref: /linux/tools/perf/util/bpf_counter_cgroup.c (revision 36ec807b627b4c0a0a382f0ae48eac7187d14b2b)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 /* Copyright (c) 2021 Facebook */
4 /* Copyright (c) 2021 Google */
5 
6 #include <assert.h>
7 #include <limits.h>
8 #include <unistd.h>
9 #include <sys/file.h>
10 #include <sys/time.h>
11 #include <sys/resource.h>
12 #include <linux/err.h>
13 #include <linux/zalloc.h>
14 #include <linux/perf_event.h>
15 #include <api/fs/fs.h>
16 #include <perf/bpf_perf.h>
17 
18 #include "affinity.h"
19 #include "bpf_counter.h"
20 #include "cgroup.h"
21 #include "counts.h"
22 #include "debug.h"
23 #include "evsel.h"
24 #include "evlist.h"
25 #include "target.h"
26 #include "cpumap.h"
27 #include "thread_map.h"
28 
29 #include "bpf_skel/bperf_cgroup.skel.h"
30 
31 static struct perf_event_attr cgrp_switch_attr = {
32 	.type = PERF_TYPE_SOFTWARE,
33 	.config = PERF_COUNT_SW_CGROUP_SWITCHES,
34 	.size = sizeof(cgrp_switch_attr),
35 	.sample_period = 1,
36 	.disabled = 1,
37 };
38 
39 static struct evsel *cgrp_switch;
40 static struct bperf_cgroup_bpf *skel;
41 
42 #define FD(evt, cpu) (*(int *)xyarray__entry(evt->core.fd, cpu, 0))
43 
44 static int bperf_load_program(struct evlist *evlist)
45 {
46 	struct bpf_link *link;
47 	struct evsel *evsel;
48 	struct cgroup *cgrp, *leader_cgrp;
49 	int i, j;
50 	struct perf_cpu cpu;
51 	int total_cpus = cpu__max_cpu().cpu;
52 	int map_size, map_fd;
53 	int prog_fd, err;
54 
55 	skel = bperf_cgroup_bpf__open();
56 	if (!skel) {
57 		pr_err("Failed to open cgroup skeleton\n");
58 		return -1;
59 	}
60 
61 	skel->rodata->num_cpus = total_cpus;
62 	skel->rodata->num_events = evlist->core.nr_entries / nr_cgroups;
63 
64 	BUG_ON(evlist->core.nr_entries % nr_cgroups != 0);
65 
66 	/* we need one copy of events per cpu for reading */
67 	map_size = total_cpus * evlist->core.nr_entries / nr_cgroups;
68 	bpf_map__set_max_entries(skel->maps.events, map_size);
69 	bpf_map__set_max_entries(skel->maps.cgrp_idx, nr_cgroups);
70 	/* previous result is saved in a per-cpu array */
71 	map_size = evlist->core.nr_entries / nr_cgroups;
72 	bpf_map__set_max_entries(skel->maps.prev_readings, map_size);
73 	/* cgroup result needs all events (per-cpu) */
74 	map_size = evlist->core.nr_entries;
75 	bpf_map__set_max_entries(skel->maps.cgrp_readings, map_size);
76 
77 	set_max_rlimit();
78 
79 	err = bperf_cgroup_bpf__load(skel);
80 	if (err) {
81 		pr_err("Failed to load cgroup skeleton\n");
82 		goto out;
83 	}
84 
85 	if (cgroup_is_v2("perf_event") > 0)
86 		skel->bss->use_cgroup_v2 = 1;
87 
88 	err = -1;
89 
90 	cgrp_switch = evsel__new(&cgrp_switch_attr);
91 	if (evsel__open_per_cpu(cgrp_switch, evlist->core.all_cpus, -1) < 0) {
92 		pr_err("Failed to open cgroup switches event\n");
93 		goto out;
94 	}
95 
96 	perf_cpu_map__for_each_cpu(cpu, i, evlist->core.all_cpus) {
97 		link = bpf_program__attach_perf_event(skel->progs.on_cgrp_switch,
98 						      FD(cgrp_switch, i));
99 		if (IS_ERR(link)) {
100 			pr_err("Failed to attach cgroup program\n");
101 			err = PTR_ERR(link);
102 			goto out;
103 		}
104 	}
105 
106 	/*
107 	 * Update cgrp_idx map from cgroup-id to event index.
108 	 */
109 	cgrp = NULL;
110 	i = 0;
111 
112 	evlist__for_each_entry(evlist, evsel) {
113 		if (cgrp == NULL || evsel->cgrp == leader_cgrp) {
114 			leader_cgrp = evsel->cgrp;
115 			evsel->cgrp = NULL;
116 
117 			/* open single copy of the events w/o cgroup */
118 			err = evsel__open_per_cpu(evsel, evsel->core.cpus, -1);
119 			if (err == 0)
120 				evsel->supported = true;
121 
122 			map_fd = bpf_map__fd(skel->maps.events);
123 			perf_cpu_map__for_each_cpu(cpu, j, evsel->core.cpus) {
124 				int fd = FD(evsel, j);
125 				__u32 idx = evsel->core.idx * total_cpus + cpu.cpu;
126 
127 				bpf_map_update_elem(map_fd, &idx, &fd, BPF_ANY);
128 			}
129 
130 			evsel->cgrp = leader_cgrp;
131 		}
132 
133 		if (evsel->cgrp == cgrp)
134 			continue;
135 
136 		cgrp = evsel->cgrp;
137 
138 		if (read_cgroup_id(cgrp) < 0) {
139 			pr_debug("Failed to get cgroup id for %s\n", cgrp->name);
140 			cgrp->id = 0;
141 		}
142 
143 		map_fd = bpf_map__fd(skel->maps.cgrp_idx);
144 		err = bpf_map_update_elem(map_fd, &cgrp->id, &i, BPF_ANY);
145 		if (err < 0) {
146 			pr_err("Failed to update cgroup index map\n");
147 			goto out;
148 		}
149 
150 		i++;
151 	}
152 
153 	/*
154 	 * bperf uses BPF_PROG_TEST_RUN to get accurate reading. Check
155 	 * whether the kernel support it
156 	 */
157 	prog_fd = bpf_program__fd(skel->progs.trigger_read);
158 	err = bperf_trigger_reading(prog_fd, 0);
159 	if (err) {
160 		pr_warning("The kernel does not support test_run for raw_tp BPF programs.\n"
161 			   "Therefore, --for-each-cgroup might show inaccurate readings\n");
162 		err = 0;
163 	}
164 
165 out:
166 	return err;
167 }
168 
169 static int bperf_cgrp__load(struct evsel *evsel,
170 			    struct target *target __maybe_unused)
171 {
172 	static bool bperf_loaded = false;
173 
174 	evsel->bperf_leader_prog_fd = -1;
175 	evsel->bperf_leader_link_fd = -1;
176 
177 	if (!bperf_loaded && bperf_load_program(evsel->evlist))
178 		return -1;
179 
180 	bperf_loaded = true;
181 	/* just to bypass bpf_counter_skip() */
182 	evsel->follower_skel = (struct bperf_follower_bpf *)skel;
183 
184 	return 0;
185 }
186 
187 static int bperf_cgrp__install_pe(struct evsel *evsel __maybe_unused,
188 				  int cpu __maybe_unused, int fd __maybe_unused)
189 {
190 	/* nothing to do */
191 	return 0;
192 }
193 
194 /*
195  * trigger the leader prog on each cpu, so the cgrp_reading map could get
196  * the latest results.
197  */
198 static int bperf_cgrp__sync_counters(struct evlist *evlist)
199 {
200 	struct perf_cpu cpu;
201 	int idx;
202 	int prog_fd = bpf_program__fd(skel->progs.trigger_read);
203 
204 	perf_cpu_map__for_each_cpu(cpu, idx, evlist->core.all_cpus)
205 		bperf_trigger_reading(prog_fd, cpu.cpu);
206 
207 	return 0;
208 }
209 
210 static int bperf_cgrp__enable(struct evsel *evsel)
211 {
212 	if (evsel->core.idx)
213 		return 0;
214 
215 	bperf_cgrp__sync_counters(evsel->evlist);
216 
217 	skel->bss->enabled = 1;
218 	return 0;
219 }
220 
221 static int bperf_cgrp__disable(struct evsel *evsel)
222 {
223 	if (evsel->core.idx)
224 		return 0;
225 
226 	bperf_cgrp__sync_counters(evsel->evlist);
227 
228 	skel->bss->enabled = 0;
229 	return 0;
230 }
231 
232 static int bperf_cgrp__read(struct evsel *evsel)
233 {
234 	struct evlist *evlist = evsel->evlist;
235 	int total_cpus = cpu__max_cpu().cpu;
236 	struct perf_counts_values *counts;
237 	struct bpf_perf_event_value *values;
238 	int reading_map_fd, err = 0;
239 
240 	if (evsel->core.idx)
241 		return 0;
242 
243 	bperf_cgrp__sync_counters(evsel->evlist);
244 
245 	values = calloc(total_cpus, sizeof(*values));
246 	if (values == NULL)
247 		return -ENOMEM;
248 
249 	reading_map_fd = bpf_map__fd(skel->maps.cgrp_readings);
250 
251 	evlist__for_each_entry(evlist, evsel) {
252 		__u32 idx = evsel->core.idx;
253 		int i;
254 		struct perf_cpu cpu;
255 
256 		err = bpf_map_lookup_elem(reading_map_fd, &idx, values);
257 		if (err) {
258 			pr_err("bpf map lookup failed: idx=%u, event=%s, cgrp=%s\n",
259 			       idx, evsel__name(evsel), evsel->cgrp->name);
260 			goto out;
261 		}
262 
263 		perf_cpu_map__for_each_cpu(cpu, i, evsel->core.cpus) {
264 			counts = perf_counts(evsel->counts, i, 0);
265 			counts->val = values[cpu.cpu].counter;
266 			counts->ena = values[cpu.cpu].enabled;
267 			counts->run = values[cpu.cpu].running;
268 		}
269 	}
270 
271 out:
272 	free(values);
273 	return err;
274 }
275 
276 static int bperf_cgrp__destroy(struct evsel *evsel)
277 {
278 	if (evsel->core.idx)
279 		return 0;
280 
281 	bperf_cgroup_bpf__destroy(skel);
282 	evsel__delete(cgrp_switch);  // it'll destroy on_switch progs too
283 
284 	return 0;
285 }
286 
287 struct bpf_counter_ops bperf_cgrp_ops = {
288 	.load       = bperf_cgrp__load,
289 	.enable     = bperf_cgrp__enable,
290 	.disable    = bperf_cgrp__disable,
291 	.read       = bperf_cgrp__read,
292 	.install_pe = bperf_cgrp__install_pe,
293 	.destroy    = bperf_cgrp__destroy,
294 };
295