xref: /linux/tools/bpf/bpftool/common.c (revision 5e3992fe72748ed3892be876f09d4d990548b7af)
1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 /* Copyright (C) 2017-2018 Netronome Systems, Inc. */
3 
4 #ifndef _GNU_SOURCE
5 #define _GNU_SOURCE
6 #endif
7 #include <ctype.h>
8 #include <errno.h>
9 #include <fcntl.h>
10 #include <ftw.h>
11 #include <libgen.h>
12 #include <mntent.h>
13 #include <stdbool.h>
14 #include <stdio.h>
15 #include <stdlib.h>
16 #include <string.h>
17 #include <unistd.h>
18 #include <net/if.h>
19 #include <sys/mount.h>
20 #include <sys/resource.h>
21 #include <sys/stat.h>
22 #include <sys/vfs.h>
23 
24 #include <linux/filter.h>
25 #include <linux/limits.h>
26 #include <linux/magic.h>
27 #include <linux/unistd.h>
28 
29 #include <bpf/bpf.h>
30 #include <bpf/hashmap.h>
31 #include <bpf/libbpf.h> /* libbpf_num_possible_cpus */
32 #include <bpf/btf.h>
33 
34 #include "main.h"
35 
36 #ifndef BPF_FS_MAGIC
37 #define BPF_FS_MAGIC		0xcafe4a11
38 #endif
39 
40 void p_err(const char *fmt, ...)
41 {
42 	va_list ap;
43 
44 	va_start(ap, fmt);
45 	if (json_output) {
46 		jsonw_start_object(json_wtr);
47 		jsonw_name(json_wtr, "error");
48 		jsonw_vprintf_enquote(json_wtr, fmt, ap);
49 		jsonw_end_object(json_wtr);
50 	} else {
51 		fprintf(stderr, "Error: ");
52 		vfprintf(stderr, fmt, ap);
53 		fprintf(stderr, "\n");
54 	}
55 	va_end(ap);
56 }
57 
58 void p_info(const char *fmt, ...)
59 {
60 	va_list ap;
61 
62 	if (json_output)
63 		return;
64 
65 	va_start(ap, fmt);
66 	vfprintf(stderr, fmt, ap);
67 	fprintf(stderr, "\n");
68 	va_end(ap);
69 }
70 
71 static bool is_bpffs(char *path)
72 {
73 	struct statfs st_fs;
74 
75 	if (statfs(path, &st_fs) < 0)
76 		return false;
77 
78 	return (unsigned long)st_fs.f_type == BPF_FS_MAGIC;
79 }
80 
81 /* Probe whether kernel switched from memlock-based (RLIMIT_MEMLOCK) to
82  * memcg-based memory accounting for BPF maps and programs. This was done in
83  * commit 97306be45fbe ("Merge branch 'switch to memcg-based memory
84  * accounting'"), in Linux 5.11.
85  *
86  * Libbpf also offers to probe for memcg-based accounting vs rlimit, but does
87  * so by checking for the availability of a given BPF helper and this has
88  * failed on some kernels with backports in the past, see commit 6b4384ff1088
89  * ("Revert "bpftool: Use libbpf 1.0 API mode instead of RLIMIT_MEMLOCK"").
90  * Instead, we can probe by lowering the process-based rlimit to 0, trying to
91  * load a BPF object, and resetting the rlimit. If the load succeeds then
92  * memcg-based accounting is supported.
93  *
94  * This would be too dangerous to do in the library, because multithreaded
95  * applications might attempt to load items while the rlimit is at 0. Given
96  * that bpftool is single-threaded, this is fine to do here.
97  */
98 static bool known_to_need_rlimit(void)
99 {
100 	struct rlimit rlim_init, rlim_cur_zero = {};
101 	struct bpf_insn insns[] = {
102 		BPF_MOV64_IMM(BPF_REG_0, 0),
103 		BPF_EXIT_INSN(),
104 	};
105 	size_t insn_cnt = ARRAY_SIZE(insns);
106 	union bpf_attr attr;
107 	int prog_fd, err;
108 
109 	memset(&attr, 0, sizeof(attr));
110 	attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
111 	attr.insns = ptr_to_u64(insns);
112 	attr.insn_cnt = insn_cnt;
113 	attr.license = ptr_to_u64("GPL");
114 
115 	if (getrlimit(RLIMIT_MEMLOCK, &rlim_init))
116 		return false;
117 
118 	/* Drop the soft limit to zero. We maintain the hard limit to its
119 	 * current value, because lowering it would be a permanent operation
120 	 * for unprivileged users.
121 	 */
122 	rlim_cur_zero.rlim_max = rlim_init.rlim_max;
123 	if (setrlimit(RLIMIT_MEMLOCK, &rlim_cur_zero))
124 		return false;
125 
126 	/* Do not use bpf_prog_load() from libbpf here, because it calls
127 	 * bump_rlimit_memlock(), interfering with the current probe.
128 	 */
129 	prog_fd = syscall(__NR_bpf, BPF_PROG_LOAD, &attr, sizeof(attr));
130 	err = errno;
131 
132 	/* reset soft rlimit to its initial value */
133 	setrlimit(RLIMIT_MEMLOCK, &rlim_init);
134 
135 	if (prog_fd < 0)
136 		return err == EPERM;
137 
138 	close(prog_fd);
139 	return false;
140 }
141 
142 void set_max_rlimit(void)
143 {
144 	struct rlimit rinf = { RLIM_INFINITY, RLIM_INFINITY };
145 
146 	if (known_to_need_rlimit())
147 		setrlimit(RLIMIT_MEMLOCK, &rinf);
148 }
149 
150 static int
151 mnt_fs(const char *target, const char *type, char *buff, size_t bufflen)
152 {
153 	bool bind_done = false;
154 
155 	while (mount("", target, "none", MS_PRIVATE | MS_REC, NULL)) {
156 		if (errno != EINVAL || bind_done) {
157 			snprintf(buff, bufflen,
158 				 "mount --make-private %s failed: %s",
159 				 target, strerror(errno));
160 			return -1;
161 		}
162 
163 		if (mount(target, target, "none", MS_BIND, NULL)) {
164 			snprintf(buff, bufflen,
165 				 "mount --bind %s %s failed: %s",
166 				 target, target, strerror(errno));
167 			return -1;
168 		}
169 
170 		bind_done = true;
171 	}
172 
173 	if (mount(type, target, type, 0, "mode=0700")) {
174 		snprintf(buff, bufflen, "mount -t %s %s %s failed: %s",
175 			 type, type, target, strerror(errno));
176 		return -1;
177 	}
178 
179 	return 0;
180 }
181 
182 int mount_tracefs(const char *target)
183 {
184 	char err_str[ERR_MAX_LEN];
185 	int err;
186 
187 	err = mnt_fs(target, "tracefs", err_str, ERR_MAX_LEN);
188 	if (err) {
189 		err_str[ERR_MAX_LEN - 1] = '\0';
190 		p_err("can't mount tracefs: %s", err_str);
191 	}
192 
193 	return err;
194 }
195 
196 int open_obj_pinned(const char *path, bool quiet)
197 {
198 	char *pname;
199 	int fd = -1;
200 
201 	pname = strdup(path);
202 	if (!pname) {
203 		if (!quiet)
204 			p_err("mem alloc failed");
205 		goto out_ret;
206 	}
207 
208 	fd = bpf_obj_get(pname);
209 	if (fd < 0) {
210 		if (!quiet)
211 			p_err("bpf obj get (%s): %s", pname,
212 			      errno == EACCES && !is_bpffs(dirname(pname)) ?
213 			    "directory not in bpf file system (bpffs)" :
214 			    strerror(errno));
215 		goto out_free;
216 	}
217 
218 out_free:
219 	free(pname);
220 out_ret:
221 	return fd;
222 }
223 
224 int open_obj_pinned_any(const char *path, enum bpf_obj_type exp_type)
225 {
226 	enum bpf_obj_type type;
227 	int fd;
228 
229 	fd = open_obj_pinned(path, false);
230 	if (fd < 0)
231 		return -1;
232 
233 	type = get_fd_type(fd);
234 	if (type < 0) {
235 		close(fd);
236 		return type;
237 	}
238 	if (type != exp_type) {
239 		p_err("incorrect object type: %s", get_fd_type_name(type));
240 		close(fd);
241 		return -1;
242 	}
243 
244 	return fd;
245 }
246 
247 int mount_bpffs_for_pin(const char *name)
248 {
249 	char err_str[ERR_MAX_LEN];
250 	char *file;
251 	char *dir;
252 	int err = 0;
253 
254 	file = malloc(strlen(name) + 1);
255 	if (!file) {
256 		p_err("mem alloc failed");
257 		return -1;
258 	}
259 
260 	strcpy(file, name);
261 	dir = dirname(file);
262 
263 	if (is_bpffs(dir))
264 		/* nothing to do if already mounted */
265 		goto out_free;
266 
267 	if (block_mount) {
268 		p_err("no BPF file system found, not mounting it due to --nomount option");
269 		err = -1;
270 		goto out_free;
271 	}
272 
273 	err = mnt_fs(dir, "bpf", err_str, ERR_MAX_LEN);
274 	if (err) {
275 		err_str[ERR_MAX_LEN - 1] = '\0';
276 		p_err("can't mount BPF file system to pin the object (%s): %s",
277 		      name, err_str);
278 	}
279 
280 out_free:
281 	free(file);
282 	return err;
283 }
284 
285 int do_pin_fd(int fd, const char *name)
286 {
287 	int err;
288 
289 	err = mount_bpffs_for_pin(name);
290 	if (err)
291 		return err;
292 
293 	err = bpf_obj_pin(fd, name);
294 	if (err)
295 		p_err("can't pin the object (%s): %s", name, strerror(errno));
296 
297 	return err;
298 }
299 
300 int do_pin_any(int argc, char **argv, int (*get_fd)(int *, char ***))
301 {
302 	int err;
303 	int fd;
304 
305 	if (!REQ_ARGS(3))
306 		return -EINVAL;
307 
308 	fd = get_fd(&argc, &argv);
309 	if (fd < 0)
310 		return fd;
311 
312 	err = do_pin_fd(fd, *argv);
313 
314 	close(fd);
315 	return err;
316 }
317 
318 const char *get_fd_type_name(enum bpf_obj_type type)
319 {
320 	static const char * const names[] = {
321 		[BPF_OBJ_UNKNOWN]	= "unknown",
322 		[BPF_OBJ_PROG]		= "prog",
323 		[BPF_OBJ_MAP]		= "map",
324 		[BPF_OBJ_LINK]		= "link",
325 	};
326 
327 	if (type < 0 || type >= ARRAY_SIZE(names) || !names[type])
328 		return names[BPF_OBJ_UNKNOWN];
329 
330 	return names[type];
331 }
332 
333 void get_prog_full_name(const struct bpf_prog_info *prog_info, int prog_fd,
334 			char *name_buff, size_t buff_len)
335 {
336 	const char *prog_name = prog_info->name;
337 	const struct btf_type *func_type;
338 	const struct bpf_func_info finfo = {};
339 	struct bpf_prog_info info = {};
340 	__u32 info_len = sizeof(info);
341 	struct btf *prog_btf = NULL;
342 
343 	if (buff_len <= BPF_OBJ_NAME_LEN ||
344 	    strlen(prog_info->name) < BPF_OBJ_NAME_LEN - 1)
345 		goto copy_name;
346 
347 	if (!prog_info->btf_id || prog_info->nr_func_info == 0)
348 		goto copy_name;
349 
350 	info.nr_func_info = 1;
351 	info.func_info_rec_size = prog_info->func_info_rec_size;
352 	if (info.func_info_rec_size > sizeof(finfo))
353 		info.func_info_rec_size = sizeof(finfo);
354 	info.func_info = ptr_to_u64(&finfo);
355 
356 	if (bpf_prog_get_info_by_fd(prog_fd, &info, &info_len))
357 		goto copy_name;
358 
359 	prog_btf = btf__load_from_kernel_by_id(info.btf_id);
360 	if (!prog_btf)
361 		goto copy_name;
362 
363 	func_type = btf__type_by_id(prog_btf, finfo.type_id);
364 	if (!func_type || !btf_is_func(func_type))
365 		goto copy_name;
366 
367 	prog_name = btf__name_by_offset(prog_btf, func_type->name_off);
368 
369 copy_name:
370 	snprintf(name_buff, buff_len, "%s", prog_name);
371 
372 	if (prog_btf)
373 		btf__free(prog_btf);
374 }
375 
376 int get_fd_type(int fd)
377 {
378 	char path[PATH_MAX];
379 	char buf[512];
380 	ssize_t n;
381 
382 	snprintf(path, sizeof(path), "/proc/self/fd/%d", fd);
383 
384 	n = readlink(path, buf, sizeof(buf));
385 	if (n < 0) {
386 		p_err("can't read link type: %s", strerror(errno));
387 		return -1;
388 	}
389 	if (n == sizeof(path)) {
390 		p_err("can't read link type: path too long!");
391 		return -1;
392 	}
393 
394 	if (strstr(buf, "bpf-map"))
395 		return BPF_OBJ_MAP;
396 	else if (strstr(buf, "bpf-prog"))
397 		return BPF_OBJ_PROG;
398 	else if (strstr(buf, "bpf-link"))
399 		return BPF_OBJ_LINK;
400 
401 	return BPF_OBJ_UNKNOWN;
402 }
403 
404 char *get_fdinfo(int fd, const char *key)
405 {
406 	char path[PATH_MAX];
407 	char *line = NULL;
408 	size_t line_n = 0;
409 	ssize_t n;
410 	FILE *fdi;
411 
412 	snprintf(path, sizeof(path), "/proc/self/fdinfo/%d", fd);
413 
414 	fdi = fopen(path, "r");
415 	if (!fdi)
416 		return NULL;
417 
418 	while ((n = getline(&line, &line_n, fdi)) > 0) {
419 		char *value;
420 		int len;
421 
422 		if (!strstr(line, key))
423 			continue;
424 
425 		fclose(fdi);
426 
427 		value = strchr(line, '\t');
428 		if (!value || !value[1]) {
429 			free(line);
430 			return NULL;
431 		}
432 		value++;
433 
434 		len = strlen(value);
435 		memmove(line, value, len);
436 		line[len - 1] = '\0';
437 
438 		return line;
439 	}
440 
441 	free(line);
442 	fclose(fdi);
443 	return NULL;
444 }
445 
446 void print_data_json(uint8_t *data, size_t len)
447 {
448 	unsigned int i;
449 
450 	jsonw_start_array(json_wtr);
451 	for (i = 0; i < len; i++)
452 		jsonw_printf(json_wtr, "%d", data[i]);
453 	jsonw_end_array(json_wtr);
454 }
455 
456 void print_hex_data_json(uint8_t *data, size_t len)
457 {
458 	unsigned int i;
459 
460 	jsonw_start_array(json_wtr);
461 	for (i = 0; i < len; i++)
462 		jsonw_printf(json_wtr, "\"0x%02hhx\"", data[i]);
463 	jsonw_end_array(json_wtr);
464 }
465 
466 /* extra params for nftw cb */
467 static struct hashmap *build_fn_table;
468 static enum bpf_obj_type build_fn_type;
469 
470 static int do_build_table_cb(const char *fpath, const struct stat *sb,
471 			     int typeflag, struct FTW *ftwbuf)
472 {
473 	struct bpf_prog_info pinned_info;
474 	__u32 len = sizeof(pinned_info);
475 	enum bpf_obj_type objtype;
476 	int fd, err = 0;
477 	char *path;
478 
479 	if (typeflag != FTW_F)
480 		goto out_ret;
481 
482 	fd = open_obj_pinned(fpath, true);
483 	if (fd < 0)
484 		goto out_ret;
485 
486 	objtype = get_fd_type(fd);
487 	if (objtype != build_fn_type)
488 		goto out_close;
489 
490 	memset(&pinned_info, 0, sizeof(pinned_info));
491 	if (bpf_prog_get_info_by_fd(fd, &pinned_info, &len))
492 		goto out_close;
493 
494 	path = strdup(fpath);
495 	if (!path) {
496 		err = -1;
497 		goto out_close;
498 	}
499 
500 	err = hashmap__append(build_fn_table, pinned_info.id, path);
501 	if (err) {
502 		p_err("failed to append entry to hashmap for ID %u, path '%s': %s",
503 		      pinned_info.id, path, strerror(errno));
504 		free(path);
505 		goto out_close;
506 	}
507 
508 out_close:
509 	close(fd);
510 out_ret:
511 	return err;
512 }
513 
514 int build_pinned_obj_table(struct hashmap *tab,
515 			   enum bpf_obj_type type)
516 {
517 	struct mntent *mntent = NULL;
518 	FILE *mntfile = NULL;
519 	int flags = FTW_PHYS;
520 	int nopenfd = 16;
521 	int err = 0;
522 
523 	mntfile = setmntent("/proc/mounts", "r");
524 	if (!mntfile)
525 		return -1;
526 
527 	build_fn_table = tab;
528 	build_fn_type = type;
529 
530 	while ((mntent = getmntent(mntfile))) {
531 		char *path = mntent->mnt_dir;
532 
533 		if (strncmp(mntent->mnt_type, "bpf", 3) != 0)
534 			continue;
535 		err = nftw(path, do_build_table_cb, nopenfd, flags);
536 		if (err)
537 			break;
538 	}
539 	fclose(mntfile);
540 	return err;
541 }
542 
543 void delete_pinned_obj_table(struct hashmap *map)
544 {
545 	struct hashmap_entry *entry;
546 	size_t bkt;
547 
548 	if (!map)
549 		return;
550 
551 	hashmap__for_each_entry(map, entry, bkt)
552 		free(entry->pvalue);
553 
554 	hashmap__free(map);
555 }
556 
557 unsigned int get_page_size(void)
558 {
559 	static int result;
560 
561 	if (!result)
562 		result = getpagesize();
563 	return result;
564 }
565 
566 unsigned int get_possible_cpus(void)
567 {
568 	int cpus = libbpf_num_possible_cpus();
569 
570 	if (cpus < 0) {
571 		p_err("Can't get # of possible cpus: %s", strerror(-cpus));
572 		exit(-1);
573 	}
574 	return cpus;
575 }
576 
577 static char *
578 ifindex_to_name_ns(__u32 ifindex, __u32 ns_dev, __u32 ns_ino, char *buf)
579 {
580 	struct stat st;
581 	int err;
582 
583 	err = stat("/proc/self/ns/net", &st);
584 	if (err) {
585 		p_err("Can't stat /proc/self: %s", strerror(errno));
586 		return NULL;
587 	}
588 
589 	if (st.st_dev != ns_dev || st.st_ino != ns_ino)
590 		return NULL;
591 
592 	return if_indextoname(ifindex, buf);
593 }
594 
595 static int read_sysfs_hex_int(char *path)
596 {
597 	char vendor_id_buf[8];
598 	int len;
599 	int fd;
600 
601 	fd = open(path, O_RDONLY);
602 	if (fd < 0) {
603 		p_err("Can't open %s: %s", path, strerror(errno));
604 		return -1;
605 	}
606 
607 	len = read(fd, vendor_id_buf, sizeof(vendor_id_buf));
608 	close(fd);
609 	if (len < 0) {
610 		p_err("Can't read %s: %s", path, strerror(errno));
611 		return -1;
612 	}
613 	if (len >= (int)sizeof(vendor_id_buf)) {
614 		p_err("Value in %s too long", path);
615 		return -1;
616 	}
617 
618 	vendor_id_buf[len] = 0;
619 
620 	return strtol(vendor_id_buf, NULL, 0);
621 }
622 
623 static int read_sysfs_netdev_hex_int(char *devname, const char *entry_name)
624 {
625 	char full_path[64];
626 
627 	snprintf(full_path, sizeof(full_path), "/sys/class/net/%s/device/%s",
628 		 devname, entry_name);
629 
630 	return read_sysfs_hex_int(full_path);
631 }
632 
633 const char *
634 ifindex_to_arch(__u32 ifindex, __u64 ns_dev, __u64 ns_ino, const char **opt)
635 {
636 	__maybe_unused int device_id;
637 	char devname[IF_NAMESIZE];
638 	int vendor_id;
639 
640 	if (!ifindex_to_name_ns(ifindex, ns_dev, ns_ino, devname)) {
641 		p_err("Can't get net device name for ifindex %d: %s", ifindex,
642 		      strerror(errno));
643 		return NULL;
644 	}
645 
646 	vendor_id = read_sysfs_netdev_hex_int(devname, "vendor");
647 	if (vendor_id < 0) {
648 		p_err("Can't get device vendor id for %s", devname);
649 		return NULL;
650 	}
651 
652 	switch (vendor_id) {
653 #ifdef HAVE_LIBBFD_SUPPORT
654 	case 0x19ee:
655 		device_id = read_sysfs_netdev_hex_int(devname, "device");
656 		if (device_id != 0x4000 &&
657 		    device_id != 0x6000 &&
658 		    device_id != 0x6003)
659 			p_info("Unknown NFP device ID, assuming it is NFP-6xxx arch");
660 		*opt = "ctx4";
661 		return "NFP-6xxx";
662 #endif /* HAVE_LIBBFD_SUPPORT */
663 	/* No NFP support in LLVM, we have no valid triple to return. */
664 	default:
665 		p_err("Can't get arch name for device vendor id 0x%04x",
666 		      vendor_id);
667 		return NULL;
668 	}
669 }
670 
671 void print_dev_plain(__u32 ifindex, __u64 ns_dev, __u64 ns_inode)
672 {
673 	char name[IF_NAMESIZE];
674 
675 	if (!ifindex)
676 		return;
677 
678 	printf("  offloaded_to ");
679 	if (ifindex_to_name_ns(ifindex, ns_dev, ns_inode, name))
680 		printf("%s", name);
681 	else
682 		printf("ifindex %u ns_dev %llu ns_ino %llu",
683 		       ifindex, ns_dev, ns_inode);
684 }
685 
686 void print_dev_json(__u32 ifindex, __u64 ns_dev, __u64 ns_inode)
687 {
688 	char name[IF_NAMESIZE];
689 
690 	if (!ifindex)
691 		return;
692 
693 	jsonw_name(json_wtr, "dev");
694 	jsonw_start_object(json_wtr);
695 	jsonw_uint_field(json_wtr, "ifindex", ifindex);
696 	jsonw_uint_field(json_wtr, "ns_dev", ns_dev);
697 	jsonw_uint_field(json_wtr, "ns_inode", ns_inode);
698 	if (ifindex_to_name_ns(ifindex, ns_dev, ns_inode, name))
699 		jsonw_string_field(json_wtr, "ifname", name);
700 	jsonw_end_object(json_wtr);
701 }
702 
703 int parse_u32_arg(int *argc, char ***argv, __u32 *val, const char *what)
704 {
705 	char *endptr;
706 
707 	NEXT_ARGP();
708 
709 	if (*val) {
710 		p_err("%s already specified", what);
711 		return -1;
712 	}
713 
714 	*val = strtoul(**argv, &endptr, 0);
715 	if (*endptr) {
716 		p_err("can't parse %s as %s", **argv, what);
717 		return -1;
718 	}
719 	NEXT_ARGP();
720 
721 	return 0;
722 }
723 
724 int __printf(2, 0)
725 print_all_levels(__maybe_unused enum libbpf_print_level level,
726 		 const char *format, va_list args)
727 {
728 	return vfprintf(stderr, format, args);
729 }
730 
731 static int prog_fd_by_nametag(void *nametag, int **fds, bool tag)
732 {
733 	char prog_name[MAX_PROG_FULL_NAME];
734 	unsigned int id = 0;
735 	int fd, nb_fds = 0;
736 	void *tmp;
737 	int err;
738 
739 	while (true) {
740 		struct bpf_prog_info info = {};
741 		__u32 len = sizeof(info);
742 
743 		err = bpf_prog_get_next_id(id, &id);
744 		if (err) {
745 			if (errno != ENOENT) {
746 				p_err("%s", strerror(errno));
747 				goto err_close_fds;
748 			}
749 			return nb_fds;
750 		}
751 
752 		fd = bpf_prog_get_fd_by_id(id);
753 		if (fd < 0) {
754 			p_err("can't get prog by id (%u): %s",
755 			      id, strerror(errno));
756 			goto err_close_fds;
757 		}
758 
759 		err = bpf_prog_get_info_by_fd(fd, &info, &len);
760 		if (err) {
761 			p_err("can't get prog info (%u): %s",
762 			      id, strerror(errno));
763 			goto err_close_fd;
764 		}
765 
766 		if (tag && memcmp(nametag, info.tag, BPF_TAG_SIZE)) {
767 			close(fd);
768 			continue;
769 		}
770 
771 		if (!tag) {
772 			get_prog_full_name(&info, fd, prog_name,
773 					   sizeof(prog_name));
774 			if (strncmp(nametag, prog_name, sizeof(prog_name))) {
775 				close(fd);
776 				continue;
777 			}
778 		}
779 
780 		if (nb_fds > 0) {
781 			tmp = realloc(*fds, (nb_fds + 1) * sizeof(int));
782 			if (!tmp) {
783 				p_err("failed to realloc");
784 				goto err_close_fd;
785 			}
786 			*fds = tmp;
787 		}
788 		(*fds)[nb_fds++] = fd;
789 	}
790 
791 err_close_fd:
792 	close(fd);
793 err_close_fds:
794 	while (--nb_fds >= 0)
795 		close((*fds)[nb_fds]);
796 	return -1;
797 }
798 
799 int prog_parse_fds(int *argc, char ***argv, int **fds)
800 {
801 	if (is_prefix(**argv, "id")) {
802 		unsigned int id;
803 		char *endptr;
804 
805 		NEXT_ARGP();
806 
807 		id = strtoul(**argv, &endptr, 0);
808 		if (*endptr) {
809 			p_err("can't parse %s as ID", **argv);
810 			return -1;
811 		}
812 		NEXT_ARGP();
813 
814 		(*fds)[0] = bpf_prog_get_fd_by_id(id);
815 		if ((*fds)[0] < 0) {
816 			p_err("get by id (%u): %s", id, strerror(errno));
817 			return -1;
818 		}
819 		return 1;
820 	} else if (is_prefix(**argv, "tag")) {
821 		unsigned char tag[BPF_TAG_SIZE];
822 
823 		NEXT_ARGP();
824 
825 		if (sscanf(**argv, BPF_TAG_FMT, tag, tag + 1, tag + 2,
826 			   tag + 3, tag + 4, tag + 5, tag + 6, tag + 7)
827 		    != BPF_TAG_SIZE) {
828 			p_err("can't parse tag");
829 			return -1;
830 		}
831 		NEXT_ARGP();
832 
833 		return prog_fd_by_nametag(tag, fds, true);
834 	} else if (is_prefix(**argv, "name")) {
835 		char *name;
836 
837 		NEXT_ARGP();
838 
839 		name = **argv;
840 		if (strlen(name) > MAX_PROG_FULL_NAME - 1) {
841 			p_err("can't parse name");
842 			return -1;
843 		}
844 		NEXT_ARGP();
845 
846 		return prog_fd_by_nametag(name, fds, false);
847 	} else if (is_prefix(**argv, "pinned")) {
848 		char *path;
849 
850 		NEXT_ARGP();
851 
852 		path = **argv;
853 		NEXT_ARGP();
854 
855 		(*fds)[0] = open_obj_pinned_any(path, BPF_OBJ_PROG);
856 		if ((*fds)[0] < 0)
857 			return -1;
858 		return 1;
859 	}
860 
861 	p_err("expected 'id', 'tag', 'name' or 'pinned', got: '%s'?", **argv);
862 	return -1;
863 }
864 
865 int prog_parse_fd(int *argc, char ***argv)
866 {
867 	int *fds = NULL;
868 	int nb_fds, fd;
869 
870 	fds = malloc(sizeof(int));
871 	if (!fds) {
872 		p_err("mem alloc failed");
873 		return -1;
874 	}
875 	nb_fds = prog_parse_fds(argc, argv, &fds);
876 	if (nb_fds != 1) {
877 		if (nb_fds > 1) {
878 			p_err("several programs match this handle");
879 			while (nb_fds--)
880 				close(fds[nb_fds]);
881 		}
882 		fd = -1;
883 		goto exit_free;
884 	}
885 
886 	fd = fds[0];
887 exit_free:
888 	free(fds);
889 	return fd;
890 }
891 
892 static int map_fd_by_name(char *name, int **fds)
893 {
894 	unsigned int id = 0;
895 	int fd, nb_fds = 0;
896 	void *tmp;
897 	int err;
898 
899 	while (true) {
900 		struct bpf_map_info info = {};
901 		__u32 len = sizeof(info);
902 
903 		err = bpf_map_get_next_id(id, &id);
904 		if (err) {
905 			if (errno != ENOENT) {
906 				p_err("%s", strerror(errno));
907 				goto err_close_fds;
908 			}
909 			return nb_fds;
910 		}
911 
912 		fd = bpf_map_get_fd_by_id(id);
913 		if (fd < 0) {
914 			p_err("can't get map by id (%u): %s",
915 			      id, strerror(errno));
916 			goto err_close_fds;
917 		}
918 
919 		err = bpf_map_get_info_by_fd(fd, &info, &len);
920 		if (err) {
921 			p_err("can't get map info (%u): %s",
922 			      id, strerror(errno));
923 			goto err_close_fd;
924 		}
925 
926 		if (strncmp(name, info.name, BPF_OBJ_NAME_LEN)) {
927 			close(fd);
928 			continue;
929 		}
930 
931 		if (nb_fds > 0) {
932 			tmp = realloc(*fds, (nb_fds + 1) * sizeof(int));
933 			if (!tmp) {
934 				p_err("failed to realloc");
935 				goto err_close_fd;
936 			}
937 			*fds = tmp;
938 		}
939 		(*fds)[nb_fds++] = fd;
940 	}
941 
942 err_close_fd:
943 	close(fd);
944 err_close_fds:
945 	while (--nb_fds >= 0)
946 		close((*fds)[nb_fds]);
947 	return -1;
948 }
949 
950 int map_parse_fds(int *argc, char ***argv, int **fds)
951 {
952 	if (is_prefix(**argv, "id")) {
953 		unsigned int id;
954 		char *endptr;
955 
956 		NEXT_ARGP();
957 
958 		id = strtoul(**argv, &endptr, 0);
959 		if (*endptr) {
960 			p_err("can't parse %s as ID", **argv);
961 			return -1;
962 		}
963 		NEXT_ARGP();
964 
965 		(*fds)[0] = bpf_map_get_fd_by_id(id);
966 		if ((*fds)[0] < 0) {
967 			p_err("get map by id (%u): %s", id, strerror(errno));
968 			return -1;
969 		}
970 		return 1;
971 	} else if (is_prefix(**argv, "name")) {
972 		char *name;
973 
974 		NEXT_ARGP();
975 
976 		name = **argv;
977 		if (strlen(name) > BPF_OBJ_NAME_LEN - 1) {
978 			p_err("can't parse name");
979 			return -1;
980 		}
981 		NEXT_ARGP();
982 
983 		return map_fd_by_name(name, fds);
984 	} else if (is_prefix(**argv, "pinned")) {
985 		char *path;
986 
987 		NEXT_ARGP();
988 
989 		path = **argv;
990 		NEXT_ARGP();
991 
992 		(*fds)[0] = open_obj_pinned_any(path, BPF_OBJ_MAP);
993 		if ((*fds)[0] < 0)
994 			return -1;
995 		return 1;
996 	}
997 
998 	p_err("expected 'id', 'name' or 'pinned', got: '%s'?", **argv);
999 	return -1;
1000 }
1001 
1002 int map_parse_fd(int *argc, char ***argv)
1003 {
1004 	int *fds = NULL;
1005 	int nb_fds, fd;
1006 
1007 	fds = malloc(sizeof(int));
1008 	if (!fds) {
1009 		p_err("mem alloc failed");
1010 		return -1;
1011 	}
1012 	nb_fds = map_parse_fds(argc, argv, &fds);
1013 	if (nb_fds != 1) {
1014 		if (nb_fds > 1) {
1015 			p_err("several maps match this handle");
1016 			while (nb_fds--)
1017 				close(fds[nb_fds]);
1018 		}
1019 		fd = -1;
1020 		goto exit_free;
1021 	}
1022 
1023 	fd = fds[0];
1024 exit_free:
1025 	free(fds);
1026 	return fd;
1027 }
1028 
1029 int map_parse_fd_and_info(int *argc, char ***argv, struct bpf_map_info *info,
1030 			  __u32 *info_len)
1031 {
1032 	int err;
1033 	int fd;
1034 
1035 	fd = map_parse_fd(argc, argv);
1036 	if (fd < 0)
1037 		return -1;
1038 
1039 	err = bpf_map_get_info_by_fd(fd, info, info_len);
1040 	if (err) {
1041 		p_err("can't get map info: %s", strerror(errno));
1042 		close(fd);
1043 		return err;
1044 	}
1045 
1046 	return fd;
1047 }
1048 
1049 size_t hash_fn_for_key_as_id(long key, void *ctx)
1050 {
1051 	return key;
1052 }
1053 
1054 bool equal_fn_for_key_as_id(long k1, long k2, void *ctx)
1055 {
1056 	return k1 == k2;
1057 }
1058 
1059 const char *bpf_attach_type_input_str(enum bpf_attach_type t)
1060 {
1061 	switch (t) {
1062 	case BPF_CGROUP_INET_INGRESS:		return "ingress";
1063 	case BPF_CGROUP_INET_EGRESS:		return "egress";
1064 	case BPF_CGROUP_INET_SOCK_CREATE:	return "sock_create";
1065 	case BPF_CGROUP_INET_SOCK_RELEASE:	return "sock_release";
1066 	case BPF_CGROUP_SOCK_OPS:		return "sock_ops";
1067 	case BPF_CGROUP_DEVICE:			return "device";
1068 	case BPF_CGROUP_INET4_BIND:		return "bind4";
1069 	case BPF_CGROUP_INET6_BIND:		return "bind6";
1070 	case BPF_CGROUP_INET4_CONNECT:		return "connect4";
1071 	case BPF_CGROUP_INET6_CONNECT:		return "connect6";
1072 	case BPF_CGROUP_INET4_POST_BIND:	return "post_bind4";
1073 	case BPF_CGROUP_INET6_POST_BIND:	return "post_bind6";
1074 	case BPF_CGROUP_INET4_GETPEERNAME:	return "getpeername4";
1075 	case BPF_CGROUP_INET6_GETPEERNAME:	return "getpeername6";
1076 	case BPF_CGROUP_INET4_GETSOCKNAME:	return "getsockname4";
1077 	case BPF_CGROUP_INET6_GETSOCKNAME:	return "getsockname6";
1078 	case BPF_CGROUP_UDP4_SENDMSG:		return "sendmsg4";
1079 	case BPF_CGROUP_UDP6_SENDMSG:		return "sendmsg6";
1080 	case BPF_CGROUP_SYSCTL:			return "sysctl";
1081 	case BPF_CGROUP_UDP4_RECVMSG:		return "recvmsg4";
1082 	case BPF_CGROUP_UDP6_RECVMSG:		return "recvmsg6";
1083 	case BPF_CGROUP_GETSOCKOPT:		return "getsockopt";
1084 	case BPF_CGROUP_SETSOCKOPT:		return "setsockopt";
1085 	case BPF_TRACE_RAW_TP:			return "raw_tp";
1086 	case BPF_TRACE_FENTRY:			return "fentry";
1087 	case BPF_TRACE_FEXIT:			return "fexit";
1088 	case BPF_MODIFY_RETURN:			return "mod_ret";
1089 	case BPF_SK_REUSEPORT_SELECT:		return "sk_skb_reuseport_select";
1090 	case BPF_SK_REUSEPORT_SELECT_OR_MIGRATE:	return "sk_skb_reuseport_select_or_migrate";
1091 	default:	return libbpf_bpf_attach_type_str(t);
1092 	}
1093 }
1094 
1095 int pathname_concat(char *buf, int buf_sz, const char *path,
1096 		    const char *name)
1097 {
1098 	int len;
1099 
1100 	len = snprintf(buf, buf_sz, "%s/%s", path, name);
1101 	if (len < 0)
1102 		return -EINVAL;
1103 	if (len >= buf_sz)
1104 		return -ENAMETOOLONG;
1105 
1106 	return 0;
1107 }
1108