1 /* SPDX-License-Identifier: GPL-2.0 */
2 /**
3 * Generic event filter for sampling events in BPF.
4 *
5 * The BPF program is fixed and just to read filter expressions in the 'filters'
6 * map and compare the sample data in order to reject samples that don't match.
7 * Each filter expression contains a sample flag (term) to compare, an operation
8 * (==, >=, and so on) and a value.
9 *
10 * Note that each entry has an array of filter expressions and it only succeeds
11 * when all of the expressions are satisfied. But it supports the logical OR
12 * using a GROUP operation which is satisfied when any of its member expression
13 * is evaluated to true. But it doesn't allow nested GROUP operations for now.
14 *
15 * To support non-root users, the filters map can be loaded and pinned in the BPF
16 * filesystem by root (perf record --setup-filter pin). Then each user will get
17 * a new entry in the shared filters map to fill the filter expressions. And the
18 * BPF program will find the filter using (task-id, event-id) as a key.
19 *
20 * The pinned BPF object (shared for regular users) has:
21 *
22 * event_hash |
23 * | | |
24 * event->id ---> | id | ---+ idx_hash | filters
25 * | | | | | | | |
26 * | .... | +-> | idx | --+--> | exprs | ---> perf_bpf_filter_entry[]
27 * | | | | | | .op
28 * task id (tgid) --------------+ | .... | | | ... | .term (+ part)
29 * | .value
30 * |
31 * ======= (root would skip this part) ======== (compares it in a loop)
32 *
33 * This is used for per-task use cases while system-wide profiling (normally from
34 * root user) uses a separate copy of the program and the maps for its own so that
35 * it can proceed even if a lot of non-root users are using the filters at the
36 * same time. In this case the filters map has a single entry and no need to use
37 * the hash maps to get the index (key) of the filters map (IOW it's always 0).
38 *
39 * The BPF program returns 1 to accept the sample or 0 to drop it.
40 * The 'dropped' map is to keep how many samples it dropped by the filter and
41 * it will be reported as lost samples.
42 */
43 #include <stdlib.h>
44 #include <fcntl.h>
45 #include <sys/ioctl.h>
46 #include <sys/stat.h>
47
48 #include <bpf/bpf.h>
49 #include <linux/err.h>
50 #include <linux/list.h>
51 #include <api/fs/fs.h>
52 #include <internal/xyarray.h>
53 #include <perf/threadmap.h>
54
55 #include "util/cap.h"
56 #include "util/debug.h"
57 #include "util/evsel.h"
58 #include "util/target.h"
59
60 #include "util/bpf-filter.h"
61 #include <util/bpf-filter-flex.h>
62 #include <util/bpf-filter-bison.h>
63
64 #include "bpf_skel/sample-filter.h"
65 #include "bpf_skel/sample_filter.skel.h"
66
67 #define FD(e, x, y) (*(int *)xyarray__entry(e->core.fd, x, y))
68
69 #define __PERF_SAMPLE_TYPE(tt, st, opt) { tt, #st, opt }
70 #define PERF_SAMPLE_TYPE(_st, opt) __PERF_SAMPLE_TYPE(PBF_TERM_##_st, PERF_SAMPLE_##_st, opt)
71
72 /* Index in the pinned 'filters' map. Should be released after use. */
73 struct pinned_filter_idx {
74 struct list_head list;
75 struct evsel *evsel;
76 u64 event_id;
77 int hash_idx;
78 };
79
80 static LIST_HEAD(pinned_filters);
81
82 static const struct perf_sample_info {
83 enum perf_bpf_filter_term type;
84 const char *name;
85 const char *option;
86 } sample_table[] = {
87 /* default sample flags */
88 PERF_SAMPLE_TYPE(IP, NULL),
89 PERF_SAMPLE_TYPE(TID, NULL),
90 PERF_SAMPLE_TYPE(PERIOD, NULL),
91 /* flags mostly set by default, but still have options */
92 PERF_SAMPLE_TYPE(ID, "--sample-identifier"),
93 PERF_SAMPLE_TYPE(CPU, "--sample-cpu"),
94 PERF_SAMPLE_TYPE(TIME, "-T"),
95 /* optional sample flags */
96 PERF_SAMPLE_TYPE(ADDR, "-d"),
97 PERF_SAMPLE_TYPE(DATA_SRC, "-d"),
98 PERF_SAMPLE_TYPE(PHYS_ADDR, "--phys-data"),
99 PERF_SAMPLE_TYPE(WEIGHT, "-W"),
100 PERF_SAMPLE_TYPE(WEIGHT_STRUCT, "-W"),
101 PERF_SAMPLE_TYPE(TRANSACTION, "--transaction"),
102 PERF_SAMPLE_TYPE(CODE_PAGE_SIZE, "--code-page-size"),
103 PERF_SAMPLE_TYPE(DATA_PAGE_SIZE, "--data-page-size"),
104 PERF_SAMPLE_TYPE(CGROUP, "--all-cgroups"),
105 };
106
107 static int get_pinned_fd(const char *name);
108
get_sample_info(enum perf_bpf_filter_term type)109 static const struct perf_sample_info *get_sample_info(enum perf_bpf_filter_term type)
110 {
111 size_t i;
112
113 for (i = 0; i < ARRAY_SIZE(sample_table); i++) {
114 if (sample_table[i].type == type)
115 return &sample_table[i];
116 }
117 return NULL;
118 }
119
check_sample_flags(struct evsel * evsel,struct perf_bpf_filter_expr * expr)120 static int check_sample_flags(struct evsel *evsel, struct perf_bpf_filter_expr *expr)
121 {
122 const struct perf_sample_info *info;
123
124 if (expr->term >= PBF_TERM_SAMPLE_START && expr->term <= PBF_TERM_SAMPLE_END &&
125 (evsel->core.attr.sample_type & (1 << (expr->term - PBF_TERM_SAMPLE_START))))
126 return 0;
127
128 if (expr->term == PBF_TERM_UID || expr->term == PBF_TERM_GID) {
129 /* Not dependent on the sample_type as computed from a BPF helper. */
130 return 0;
131 }
132
133 if (expr->op == PBF_OP_GROUP_BEGIN) {
134 struct perf_bpf_filter_expr *group;
135
136 list_for_each_entry(group, &expr->groups, list) {
137 if (check_sample_flags(evsel, group) < 0)
138 return -1;
139 }
140 return 0;
141 }
142
143 info = get_sample_info(expr->term);
144 if (info == NULL) {
145 pr_err("Error: %s event does not have sample flags %d\n",
146 evsel__name(evsel), expr->term);
147 return -1;
148 }
149
150 pr_err("Error: %s event does not have %s\n", evsel__name(evsel), info->name);
151 if (info->option)
152 pr_err(" Hint: please add %s option to perf record\n", info->option);
153 return -1;
154 }
155
get_filter_entries(struct evsel * evsel,struct perf_bpf_filter_entry * entry)156 static int get_filter_entries(struct evsel *evsel, struct perf_bpf_filter_entry *entry)
157 {
158 int i = 0;
159 struct perf_bpf_filter_expr *expr;
160
161 list_for_each_entry(expr, &evsel->bpf_filters, list) {
162 if (check_sample_flags(evsel, expr) < 0)
163 return -EINVAL;
164
165 if (i == MAX_FILTERS)
166 return -E2BIG;
167
168 entry[i].op = expr->op;
169 entry[i].part = expr->part;
170 entry[i].term = expr->term;
171 entry[i].value = expr->val;
172 i++;
173
174 if (expr->op == PBF_OP_GROUP_BEGIN) {
175 struct perf_bpf_filter_expr *group;
176
177 list_for_each_entry(group, &expr->groups, list) {
178 if (i == MAX_FILTERS)
179 return -E2BIG;
180
181 entry[i].op = group->op;
182 entry[i].part = group->part;
183 entry[i].term = group->term;
184 entry[i].value = group->val;
185 i++;
186 }
187
188 if (i == MAX_FILTERS)
189 return -E2BIG;
190
191 entry[i].op = PBF_OP_GROUP_END;
192 i++;
193 }
194 }
195
196 if (i < MAX_FILTERS) {
197 /* to terminate the loop early */
198 entry[i].op = PBF_OP_DONE;
199 i++;
200 }
201 return 0;
202 }
203
convert_to_tgid(int tid)204 static int convert_to_tgid(int tid)
205 {
206 char path[128];
207 char *buf, *p, *q;
208 int tgid;
209 size_t len;
210
211 scnprintf(path, sizeof(path), "%d/status", tid);
212 if (procfs__read_str(path, &buf, &len) < 0)
213 return -1;
214
215 p = strstr(buf, "Tgid:");
216 if (p == NULL) {
217 free(buf);
218 return -1;
219 }
220
221 tgid = strtol(p + 6, &q, 0);
222 free(buf);
223 if (*q != '\n')
224 return -1;
225
226 return tgid;
227 }
228
229 /*
230 * The event might be closed already so we cannot get the list of ids using FD
231 * like in create_event_hash() below, let's iterate the event_hash map and
232 * delete all entries that have the event id as a key.
233 */
destroy_event_hash(u64 event_id)234 static void destroy_event_hash(u64 event_id)
235 {
236 int fd;
237 u64 key, *prev_key = NULL;
238 int num = 0, alloced = 32;
239 u64 *ids = calloc(alloced, sizeof(*ids));
240
241 if (ids == NULL)
242 return;
243
244 fd = get_pinned_fd("event_hash");
245 if (fd < 0) {
246 pr_debug("cannot get fd for 'event_hash' map\n");
247 free(ids);
248 return;
249 }
250
251 /* Iterate the whole map to collect keys for the event id. */
252 while (!bpf_map_get_next_key(fd, prev_key, &key)) {
253 u64 id;
254
255 if (bpf_map_lookup_elem(fd, &key, &id) == 0 && id == event_id) {
256 if (num == alloced) {
257 void *tmp;
258
259 alloced *= 2;
260 tmp = realloc(ids, alloced * sizeof(*ids));
261 if (tmp == NULL)
262 break;
263
264 ids = tmp;
265 }
266 ids[num++] = key;
267 }
268
269 prev_key = &key;
270 }
271
272 for (int i = 0; i < num; i++)
273 bpf_map_delete_elem(fd, &ids[i]);
274
275 free(ids);
276 close(fd);
277 }
278
279 /*
280 * Return a representative id if ok, or 0 for failures.
281 *
282 * The perf_event->id is good for this, but an evsel would have multiple
283 * instances for CPUs and tasks. So pick up the first id and setup a hash
284 * from id of each instance to the representative id (the first one).
285 */
create_event_hash(struct evsel * evsel)286 static u64 create_event_hash(struct evsel *evsel)
287 {
288 int x, y, fd;
289 u64 the_id = 0, id;
290
291 fd = get_pinned_fd("event_hash");
292 if (fd < 0) {
293 pr_err("cannot get fd for 'event_hash' map\n");
294 return 0;
295 }
296
297 for (x = 0; x < xyarray__max_x(evsel->core.fd); x++) {
298 for (y = 0; y < xyarray__max_y(evsel->core.fd); y++) {
299 int ret = ioctl(FD(evsel, x, y), PERF_EVENT_IOC_ID, &id);
300
301 if (ret < 0) {
302 pr_err("Failed to get the event id\n");
303 if (the_id)
304 destroy_event_hash(the_id);
305 return 0;
306 }
307
308 if (the_id == 0)
309 the_id = id;
310
311 bpf_map_update_elem(fd, &id, &the_id, BPF_ANY);
312 }
313 }
314
315 close(fd);
316 return the_id;
317 }
318
destroy_idx_hash(struct pinned_filter_idx * pfi)319 static void destroy_idx_hash(struct pinned_filter_idx *pfi)
320 {
321 int fd, nr;
322 struct perf_thread_map *threads;
323
324 fd = get_pinned_fd("filters");
325 bpf_map_delete_elem(fd, &pfi->hash_idx);
326 close(fd);
327
328 if (pfi->event_id)
329 destroy_event_hash(pfi->event_id);
330
331 threads = perf_evsel__threads(&pfi->evsel->core);
332 if (threads == NULL)
333 return;
334
335 fd = get_pinned_fd("idx_hash");
336 nr = perf_thread_map__nr(threads);
337 for (int i = 0; i < nr; i++) {
338 /* The target task might be dead already, just try the pid */
339 struct idx_hash_key key = {
340 .evt_id = pfi->event_id,
341 .tgid = perf_thread_map__pid(threads, i),
342 };
343
344 bpf_map_delete_elem(fd, &key);
345 }
346 close(fd);
347 }
348
349 /* Maintain a hashmap from (tgid, event-id) to filter index */
create_idx_hash(struct evsel * evsel,struct perf_bpf_filter_entry * entry)350 static int create_idx_hash(struct evsel *evsel, struct perf_bpf_filter_entry *entry)
351 {
352 int filter_idx;
353 int fd, nr, last;
354 u64 event_id = 0;
355 struct pinned_filter_idx *pfi = NULL;
356 struct perf_thread_map *threads;
357
358 fd = get_pinned_fd("filters");
359 if (fd < 0) {
360 pr_err("cannot get fd for 'filters' map\n");
361 return fd;
362 }
363
364 /* Find the first available entry in the filters map */
365 for (filter_idx = 0; filter_idx < MAX_FILTERS; filter_idx++) {
366 if (bpf_map_update_elem(fd, &filter_idx, entry, BPF_NOEXIST) == 0)
367 break;
368 }
369 close(fd);
370
371 if (filter_idx == MAX_FILTERS) {
372 pr_err("Too many users for the filter map\n");
373 return -EBUSY;
374 }
375
376 pfi = zalloc(sizeof(*pfi));
377 if (pfi == NULL) {
378 pr_err("Cannot save pinned filter index\n");
379 return -ENOMEM;
380 }
381
382 pfi->evsel = evsel;
383 pfi->hash_idx = filter_idx;
384
385 event_id = create_event_hash(evsel);
386 if (event_id == 0) {
387 pr_err("Cannot update the event hash\n");
388 goto err;
389 }
390
391 pfi->event_id = event_id;
392
393 threads = perf_evsel__threads(&evsel->core);
394 if (threads == NULL) {
395 pr_err("Cannot get the thread list of the event\n");
396 goto err;
397 }
398
399 /* save the index to a hash map */
400 fd = get_pinned_fd("idx_hash");
401 if (fd < 0) {
402 pr_err("cannot get fd for 'idx_hash' map\n");
403 goto err;
404 }
405
406 last = -1;
407 nr = perf_thread_map__nr(threads);
408 for (int i = 0; i < nr; i++) {
409 int pid = perf_thread_map__pid(threads, i);
410 int tgid;
411 struct idx_hash_key key = {
412 .evt_id = event_id,
413 };
414
415 /* it actually needs tgid, let's get tgid from /proc. */
416 tgid = convert_to_tgid(pid);
417 if (tgid < 0) {
418 /* the thread may be dead, ignore. */
419 continue;
420 }
421
422 if (tgid == last)
423 continue;
424 last = tgid;
425 key.tgid = tgid;
426
427 if (bpf_map_update_elem(fd, &key, &filter_idx, BPF_ANY) < 0) {
428 pr_err("Failed to update the idx_hash\n");
429 close(fd);
430 goto err;
431 }
432 pr_debug("bpf-filter: idx_hash (task=%d,%s) -> %d\n",
433 tgid, evsel__name(evsel), filter_idx);
434 }
435
436 list_add(&pfi->list, &pinned_filters);
437 close(fd);
438 return filter_idx;
439
440 err:
441 destroy_idx_hash(pfi);
442 free(pfi);
443 return -1;
444 }
445
perf_bpf_filter__prepare(struct evsel * evsel,struct target * target)446 int perf_bpf_filter__prepare(struct evsel *evsel, struct target *target)
447 {
448 int i, x, y, fd, ret;
449 struct sample_filter_bpf *skel = NULL;
450 struct bpf_program *prog;
451 struct bpf_link *link;
452 struct perf_bpf_filter_entry *entry;
453 bool needs_idx_hash = !target__has_cpu(target);
454 DECLARE_LIBBPF_OPTS(bpf_perf_event_opts, pe_opts,
455 .dont_enable = true);
456
457 entry = calloc(MAX_FILTERS, sizeof(*entry));
458 if (entry == NULL)
459 return -1;
460
461 ret = get_filter_entries(evsel, entry);
462 if (ret < 0) {
463 pr_err("Failed to process filter entries\n");
464 goto err;
465 }
466
467 if (needs_idx_hash && geteuid() != 0) {
468 int zero = 0;
469
470 /* The filters map is shared among other processes */
471 ret = create_idx_hash(evsel, entry);
472 if (ret < 0)
473 goto err;
474
475 fd = get_pinned_fd("dropped");
476 if (fd < 0) {
477 ret = fd;
478 goto err;
479 }
480
481 /* Reset the lost count */
482 bpf_map_update_elem(fd, &ret, &zero, BPF_ANY);
483 close(fd);
484
485 fd = get_pinned_fd("perf_sample_filter");
486 if (fd < 0) {
487 ret = fd;
488 goto err;
489 }
490
491 for (x = 0; x < xyarray__max_x(evsel->core.fd); x++) {
492 for (y = 0; y < xyarray__max_y(evsel->core.fd); y++) {
493 ret = ioctl(FD(evsel, x, y), PERF_EVENT_IOC_SET_BPF, fd);
494 if (ret < 0) {
495 pr_err("Failed to attach perf sample-filter\n");
496 close(fd);
497 goto err;
498 }
499 }
500 }
501
502 close(fd);
503 free(entry);
504 return 0;
505 }
506
507 skel = sample_filter_bpf__open_and_load();
508 if (!skel) {
509 ret = -errno;
510 pr_err("Failed to load perf sample-filter BPF skeleton\n");
511 goto err;
512 }
513
514 i = 0;
515 fd = bpf_map__fd(skel->maps.filters);
516
517 /* The filters map has only one entry in this case */
518 if (bpf_map_update_elem(fd, &i, entry, BPF_ANY) < 0) {
519 ret = -errno;
520 pr_err("Failed to update the filter map\n");
521 goto err;
522 }
523
524 prog = skel->progs.perf_sample_filter;
525 for (x = 0; x < xyarray__max_x(evsel->core.fd); x++) {
526 for (y = 0; y < xyarray__max_y(evsel->core.fd); y++) {
527 link = bpf_program__attach_perf_event_opts(prog, FD(evsel, x, y),
528 &pe_opts);
529 if (IS_ERR(link)) {
530 pr_err("Failed to attach perf sample-filter program\n");
531 ret = PTR_ERR(link);
532 goto err;
533 }
534 }
535 }
536 free(entry);
537 evsel->bpf_skel = skel;
538 return 0;
539
540 err:
541 free(entry);
542 if (!list_empty(&pinned_filters)) {
543 struct pinned_filter_idx *pfi, *tmp;
544
545 list_for_each_entry_safe(pfi, tmp, &pinned_filters, list) {
546 destroy_idx_hash(pfi);
547 list_del(&pfi->list);
548 free(pfi);
549 }
550 }
551 sample_filter_bpf__destroy(skel);
552 return ret;
553 }
554
perf_bpf_filter__destroy(struct evsel * evsel)555 int perf_bpf_filter__destroy(struct evsel *evsel)
556 {
557 struct perf_bpf_filter_expr *expr, *tmp;
558 struct pinned_filter_idx *pfi, *pos;
559
560 list_for_each_entry_safe(expr, tmp, &evsel->bpf_filters, list) {
561 list_del(&expr->list);
562 free(expr);
563 }
564 sample_filter_bpf__destroy(evsel->bpf_skel);
565
566 list_for_each_entry_safe(pfi, pos, &pinned_filters, list) {
567 destroy_idx_hash(pfi);
568 list_del(&pfi->list);
569 free(pfi);
570 }
571 return 0;
572 }
573
perf_bpf_filter__lost_count(struct evsel * evsel)574 u64 perf_bpf_filter__lost_count(struct evsel *evsel)
575 {
576 int count = 0;
577
578 if (list_empty(&evsel->bpf_filters))
579 return 0;
580
581 if (!list_empty(&pinned_filters)) {
582 int fd = get_pinned_fd("dropped");
583 struct pinned_filter_idx *pfi;
584
585 if (fd < 0)
586 return 0;
587
588 list_for_each_entry(pfi, &pinned_filters, list) {
589 if (pfi->evsel != evsel)
590 continue;
591
592 bpf_map_lookup_elem(fd, &pfi->hash_idx, &count);
593 break;
594 }
595 close(fd);
596 } else if (evsel->bpf_skel) {
597 struct sample_filter_bpf *skel = evsel->bpf_skel;
598 int fd = bpf_map__fd(skel->maps.dropped);
599 int idx = 0;
600
601 bpf_map_lookup_elem(fd, &idx, &count);
602 }
603
604 return count;
605 }
606
perf_bpf_filter_expr__new(enum perf_bpf_filter_term term,int part,enum perf_bpf_filter_op op,unsigned long val)607 struct perf_bpf_filter_expr *perf_bpf_filter_expr__new(enum perf_bpf_filter_term term,
608 int part,
609 enum perf_bpf_filter_op op,
610 unsigned long val)
611 {
612 struct perf_bpf_filter_expr *expr;
613
614 expr = malloc(sizeof(*expr));
615 if (expr != NULL) {
616 expr->term = term;
617 expr->part = part;
618 expr->op = op;
619 expr->val = val;
620 INIT_LIST_HEAD(&expr->groups);
621 }
622 return expr;
623 }
624
check_bpf_filter_capable(void)625 static bool check_bpf_filter_capable(void)
626 {
627 bool used_root;
628
629 if (perf_cap__capable(CAP_BPF, &used_root))
630 return true;
631
632 if (!used_root) {
633 /* Check if root already pinned the filter programs and maps */
634 int fd = get_pinned_fd("filters");
635
636 if (fd >= 0) {
637 close(fd);
638 return true;
639 }
640 }
641
642 pr_err("Error: BPF filter only works for %s!\n"
643 "\tPlease run 'perf record --setup-filter pin' as root first.\n",
644 used_root ? "root" : "users with the CAP_BPF capability");
645
646 return false;
647 }
648
perf_bpf_filter__parse(struct list_head * expr_head,const char * str)649 int perf_bpf_filter__parse(struct list_head *expr_head, const char *str)
650 {
651 YY_BUFFER_STATE buffer;
652 int ret;
653
654 if (!check_bpf_filter_capable())
655 return -EPERM;
656
657 buffer = perf_bpf_filter__scan_string(str);
658
659 ret = perf_bpf_filter_parse(expr_head);
660
661 perf_bpf_filter__flush_buffer(buffer);
662 perf_bpf_filter__delete_buffer(buffer);
663 perf_bpf_filter_lex_destroy();
664
665 return ret;
666 }
667
perf_bpf_filter__pin(void)668 int perf_bpf_filter__pin(void)
669 {
670 struct sample_filter_bpf *skel;
671 char *path = NULL;
672 int dir_fd, ret = -1;
673
674 skel = sample_filter_bpf__open();
675 if (!skel) {
676 ret = -errno;
677 pr_err("Failed to open perf sample-filter BPF skeleton\n");
678 goto err;
679 }
680
681 /* pinned program will use pid-hash */
682 bpf_map__set_max_entries(skel->maps.filters, MAX_FILTERS);
683 bpf_map__set_max_entries(skel->maps.event_hash, MAX_EVT_HASH);
684 bpf_map__set_max_entries(skel->maps.idx_hash, MAX_IDX_HASH);
685 bpf_map__set_max_entries(skel->maps.dropped, MAX_FILTERS);
686 skel->rodata->use_idx_hash = 1;
687
688 if (sample_filter_bpf__load(skel) < 0) {
689 ret = -errno;
690 pr_err("Failed to load perf sample-filter BPF skeleton\n");
691 goto err;
692 }
693
694 if (asprintf(&path, "%s/fs/bpf/%s", sysfs__mountpoint(),
695 PERF_BPF_FILTER_PIN_PATH) < 0) {
696 ret = -errno;
697 pr_err("Failed to allocate pathname in the BPF-fs\n");
698 goto err;
699 }
700
701 ret = bpf_object__pin(skel->obj, path);
702 if (ret < 0) {
703 pr_err("Failed to pin BPF filter objects\n");
704 goto err;
705 }
706
707 /* setup access permissions for the pinned objects */
708 dir_fd = open(path, O_PATH);
709 if (dir_fd < 0) {
710 bpf_object__unpin(skel->obj, path);
711 ret = dir_fd;
712 goto err;
713 }
714
715 /* BPF-fs root has the sticky bit */
716 if (fchmodat(dir_fd, "..", 01755, 0) < 0) {
717 pr_debug("chmod for BPF-fs failed\n");
718 ret = -errno;
719 goto err_close;
720 }
721
722 /* perf_filter directory */
723 if (fchmodat(dir_fd, ".", 0755, 0) < 0) {
724 pr_debug("chmod for perf_filter directory failed?\n");
725 ret = -errno;
726 goto err_close;
727 }
728
729 /* programs need write permission for some reason */
730 if (fchmodat(dir_fd, "perf_sample_filter", 0777, 0) < 0) {
731 pr_debug("chmod for perf_sample_filter failed\n");
732 ret = -errno;
733 }
734 /* maps */
735 if (fchmodat(dir_fd, "filters", 0666, 0) < 0) {
736 pr_debug("chmod for filters failed\n");
737 ret = -errno;
738 }
739 if (fchmodat(dir_fd, "event_hash", 0666, 0) < 0) {
740 pr_debug("chmod for event_hash failed\n");
741 ret = -errno;
742 }
743 if (fchmodat(dir_fd, "idx_hash", 0666, 0) < 0) {
744 pr_debug("chmod for idx_hash failed\n");
745 ret = -errno;
746 }
747 if (fchmodat(dir_fd, "dropped", 0666, 0) < 0) {
748 pr_debug("chmod for dropped failed\n");
749 ret = -errno;
750 }
751
752 err_close:
753 close(dir_fd);
754
755 err:
756 free(path);
757 sample_filter_bpf__destroy(skel);
758 return ret;
759 }
760
perf_bpf_filter__unpin(void)761 int perf_bpf_filter__unpin(void)
762 {
763 struct sample_filter_bpf *skel;
764 char *path = NULL;
765 int ret = -1;
766
767 skel = sample_filter_bpf__open_and_load();
768 if (!skel) {
769 ret = -errno;
770 pr_err("Failed to open perf sample-filter BPF skeleton\n");
771 goto err;
772 }
773
774 if (asprintf(&path, "%s/fs/bpf/%s", sysfs__mountpoint(),
775 PERF_BPF_FILTER_PIN_PATH) < 0) {
776 ret = -errno;
777 pr_err("Failed to allocate pathname in the BPF-fs\n");
778 goto err;
779 }
780
781 ret = bpf_object__unpin(skel->obj, path);
782
783 err:
784 free(path);
785 sample_filter_bpf__destroy(skel);
786 return ret;
787 }
788
get_pinned_fd(const char * name)789 static int get_pinned_fd(const char *name)
790 {
791 char *path = NULL;
792 int fd;
793
794 if (asprintf(&path, "%s/fs/bpf/%s/%s", sysfs__mountpoint(),
795 PERF_BPF_FILTER_PIN_PATH, name) < 0)
796 return -1;
797
798 fd = bpf_obj_get(path);
799
800 free(path);
801 return fd;
802 }
803