xref: /linux/tools/bpf/bpftool/link.c (revision d9104cec3e8fe4b458b74709853231385779001f)
1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 /* Copyright (C) 2020 Facebook */
3 
4 #include <errno.h>
5 #include <linux/err.h>
6 #include <linux/netfilter.h>
7 #include <linux/netfilter_arp.h>
8 #include <linux/perf_event.h>
9 #include <net/if.h>
10 #include <stdio.h>
11 #include <unistd.h>
12 
13 #include <bpf/bpf.h>
14 #include <bpf/hashmap.h>
15 
16 #include "json_writer.h"
17 #include "main.h"
18 #include "xlated_dumper.h"
19 
20 #define PERF_HW_CACHE_LEN 128
21 
22 static struct hashmap *link_table;
23 static struct dump_data dd;
24 
25 static const char *perf_type_name[PERF_TYPE_MAX] = {
26 	[PERF_TYPE_HARDWARE]			= "hardware",
27 	[PERF_TYPE_SOFTWARE]			= "software",
28 	[PERF_TYPE_TRACEPOINT]			= "tracepoint",
29 	[PERF_TYPE_HW_CACHE]			= "hw-cache",
30 	[PERF_TYPE_RAW]				= "raw",
31 	[PERF_TYPE_BREAKPOINT]			= "breakpoint",
32 };
33 
34 const char *event_symbols_hw[PERF_COUNT_HW_MAX] = {
35 	[PERF_COUNT_HW_CPU_CYCLES]		= "cpu-cycles",
36 	[PERF_COUNT_HW_INSTRUCTIONS]		= "instructions",
37 	[PERF_COUNT_HW_CACHE_REFERENCES]	= "cache-references",
38 	[PERF_COUNT_HW_CACHE_MISSES]		= "cache-misses",
39 	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS]	= "branch-instructions",
40 	[PERF_COUNT_HW_BRANCH_MISSES]		= "branch-misses",
41 	[PERF_COUNT_HW_BUS_CYCLES]		= "bus-cycles",
42 	[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND]	= "stalled-cycles-frontend",
43 	[PERF_COUNT_HW_STALLED_CYCLES_BACKEND]	= "stalled-cycles-backend",
44 	[PERF_COUNT_HW_REF_CPU_CYCLES]		= "ref-cycles",
45 };
46 
47 const char *event_symbols_sw[PERF_COUNT_SW_MAX] = {
48 	[PERF_COUNT_SW_CPU_CLOCK]		= "cpu-clock",
49 	[PERF_COUNT_SW_TASK_CLOCK]		= "task-clock",
50 	[PERF_COUNT_SW_PAGE_FAULTS]		= "page-faults",
51 	[PERF_COUNT_SW_CONTEXT_SWITCHES]	= "context-switches",
52 	[PERF_COUNT_SW_CPU_MIGRATIONS]		= "cpu-migrations",
53 	[PERF_COUNT_SW_PAGE_FAULTS_MIN]		= "minor-faults",
54 	[PERF_COUNT_SW_PAGE_FAULTS_MAJ]		= "major-faults",
55 	[PERF_COUNT_SW_ALIGNMENT_FAULTS]	= "alignment-faults",
56 	[PERF_COUNT_SW_EMULATION_FAULTS]	= "emulation-faults",
57 	[PERF_COUNT_SW_DUMMY]			= "dummy",
58 	[PERF_COUNT_SW_BPF_OUTPUT]		= "bpf-output",
59 	[PERF_COUNT_SW_CGROUP_SWITCHES]		= "cgroup-switches",
60 };
61 
62 const char *evsel__hw_cache[PERF_COUNT_HW_CACHE_MAX] = {
63 	[PERF_COUNT_HW_CACHE_L1D]		= "L1-dcache",
64 	[PERF_COUNT_HW_CACHE_L1I]		= "L1-icache",
65 	[PERF_COUNT_HW_CACHE_LL]		= "LLC",
66 	[PERF_COUNT_HW_CACHE_DTLB]		= "dTLB",
67 	[PERF_COUNT_HW_CACHE_ITLB]		= "iTLB",
68 	[PERF_COUNT_HW_CACHE_BPU]		= "branch",
69 	[PERF_COUNT_HW_CACHE_NODE]		= "node",
70 };
71 
72 const char *evsel__hw_cache_op[PERF_COUNT_HW_CACHE_OP_MAX] = {
73 	[PERF_COUNT_HW_CACHE_OP_READ]		= "load",
74 	[PERF_COUNT_HW_CACHE_OP_WRITE]		= "store",
75 	[PERF_COUNT_HW_CACHE_OP_PREFETCH]	= "prefetch",
76 };
77 
78 const char *evsel__hw_cache_result[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
79 	[PERF_COUNT_HW_CACHE_RESULT_ACCESS]	= "refs",
80 	[PERF_COUNT_HW_CACHE_RESULT_MISS]	= "misses",
81 };
82 
83 #define perf_event_name(array, id) ({			\
84 	const char *event_str = NULL;			\
85 							\
86 	if ((id) < ARRAY_SIZE(array))			\
87 		event_str = array[id];			\
88 	event_str;					\
89 })
90 
link_parse_fd(int * argc,char *** argv)91 static int link_parse_fd(int *argc, char ***argv)
92 {
93 	int fd;
94 
95 	if (is_prefix(**argv, "id")) {
96 		unsigned int id;
97 		char *endptr;
98 
99 		NEXT_ARGP();
100 
101 		id = strtoul(**argv, &endptr, 0);
102 		if (*endptr) {
103 			p_err("can't parse %s as ID", **argv);
104 			return -1;
105 		}
106 		NEXT_ARGP();
107 
108 		fd = bpf_link_get_fd_by_id(id);
109 		if (fd < 0)
110 			p_err("failed to get link with ID %u: %s", id, strerror(errno));
111 		return fd;
112 	} else if (is_prefix(**argv, "pinned")) {
113 		char *path;
114 
115 		NEXT_ARGP();
116 
117 		path = **argv;
118 		NEXT_ARGP();
119 
120 		return open_obj_pinned_any(path, BPF_OBJ_LINK, NULL);
121 	}
122 
123 	p_err("expected 'id' or 'pinned', got: '%s'?", **argv);
124 	return -1;
125 }
126 
127 static void
show_link_header_json(struct bpf_link_info * info,json_writer_t * wtr)128 show_link_header_json(struct bpf_link_info *info, json_writer_t *wtr)
129 {
130 	const char *link_type_str;
131 
132 	jsonw_uint_field(wtr, "id", info->id);
133 	link_type_str = libbpf_bpf_link_type_str(info->type);
134 	if (link_type_str)
135 		jsonw_string_field(wtr, "type", link_type_str);
136 	else
137 		jsonw_uint_field(wtr, "type", info->type);
138 
139 	jsonw_uint_field(json_wtr, "prog_id", info->prog_id);
140 }
141 
show_link_attach_type_json(__u32 attach_type,json_writer_t * wtr)142 static void show_link_attach_type_json(__u32 attach_type, json_writer_t *wtr)
143 {
144 	const char *attach_type_str;
145 
146 	attach_type_str = libbpf_bpf_attach_type_str(attach_type);
147 	if (attach_type_str)
148 		jsonw_string_field(wtr, "attach_type", attach_type_str);
149 	else
150 		jsonw_uint_field(wtr, "attach_type", attach_type);
151 }
152 
show_link_ifindex_json(__u32 ifindex,json_writer_t * wtr)153 static void show_link_ifindex_json(__u32 ifindex, json_writer_t *wtr)
154 {
155 	char devname[IF_NAMESIZE] = "(unknown)";
156 
157 	if (ifindex)
158 		if_indextoname(ifindex, devname);
159 	else
160 		snprintf(devname, sizeof(devname), "(detached)");
161 	jsonw_string_field(wtr, "devname", devname);
162 	jsonw_uint_field(wtr, "ifindex", ifindex);
163 }
164 
is_iter_map_target(const char * target_name)165 static bool is_iter_map_target(const char *target_name)
166 {
167 	return strcmp(target_name, "bpf_map_elem") == 0 ||
168 	       strcmp(target_name, "bpf_sk_storage_map") == 0;
169 }
170 
is_iter_cgroup_target(const char * target_name)171 static bool is_iter_cgroup_target(const char *target_name)
172 {
173 	return strcmp(target_name, "cgroup") == 0;
174 }
175 
cgroup_order_string(__u32 order)176 static const char *cgroup_order_string(__u32 order)
177 {
178 	switch (order) {
179 	case BPF_CGROUP_ITER_ORDER_UNSPEC:
180 		return "order_unspec";
181 	case BPF_CGROUP_ITER_SELF_ONLY:
182 		return "self_only";
183 	case BPF_CGROUP_ITER_DESCENDANTS_PRE:
184 		return "descendants_pre";
185 	case BPF_CGROUP_ITER_DESCENDANTS_POST:
186 		return "descendants_post";
187 	case BPF_CGROUP_ITER_ANCESTORS_UP:
188 		return "ancestors_up";
189 	default: /* won't happen */
190 		return "unknown";
191 	}
192 }
193 
is_iter_task_target(const char * target_name)194 static bool is_iter_task_target(const char *target_name)
195 {
196 	return strcmp(target_name, "task") == 0 ||
197 		strcmp(target_name, "task_file") == 0 ||
198 		strcmp(target_name, "task_vma") == 0;
199 }
200 
show_iter_json(struct bpf_link_info * info,json_writer_t * wtr)201 static void show_iter_json(struct bpf_link_info *info, json_writer_t *wtr)
202 {
203 	const char *target_name = u64_to_ptr(info->iter.target_name);
204 
205 	jsonw_string_field(wtr, "target_name", target_name);
206 
207 	if (is_iter_map_target(target_name))
208 		jsonw_uint_field(wtr, "map_id", info->iter.map.map_id);
209 	else if (is_iter_task_target(target_name)) {
210 		if (info->iter.task.tid)
211 			jsonw_uint_field(wtr, "tid", info->iter.task.tid);
212 		else if (info->iter.task.pid)
213 			jsonw_uint_field(wtr, "pid", info->iter.task.pid);
214 	}
215 
216 	if (is_iter_cgroup_target(target_name)) {
217 		jsonw_lluint_field(wtr, "cgroup_id", info->iter.cgroup.cgroup_id);
218 		jsonw_string_field(wtr, "order",
219 				   cgroup_order_string(info->iter.cgroup.order));
220 	}
221 }
222 
netfilter_dump_json(const struct bpf_link_info * info,json_writer_t * wtr)223 void netfilter_dump_json(const struct bpf_link_info *info, json_writer_t *wtr)
224 {
225 	jsonw_uint_field(json_wtr, "pf",
226 			 info->netfilter.pf);
227 	jsonw_uint_field(json_wtr, "hook",
228 			 info->netfilter.hooknum);
229 	jsonw_int_field(json_wtr, "prio",
230 			 info->netfilter.priority);
231 	jsonw_uint_field(json_wtr, "flags",
232 			 info->netfilter.flags);
233 }
234 
get_prog_info(int prog_id,struct bpf_prog_info * info)235 static int get_prog_info(int prog_id, struct bpf_prog_info *info)
236 {
237 	__u32 len = sizeof(*info);
238 	int err, prog_fd;
239 
240 	prog_fd = bpf_prog_get_fd_by_id(prog_id);
241 	if (prog_fd < 0)
242 		return prog_fd;
243 
244 	memset(info, 0, sizeof(*info));
245 	err = bpf_prog_get_info_by_fd(prog_fd, info, &len);
246 	if (err)
247 		p_err("can't get prog info: %s", strerror(errno));
248 	close(prog_fd);
249 	return err;
250 }
251 
252 struct addr_cookie {
253 	__u64 addr;
254 	__u64 cookie;
255 };
256 
cmp_addr_cookie(const void * A,const void * B)257 static int cmp_addr_cookie(const void *A, const void *B)
258 {
259 	const struct addr_cookie *a = A, *b = B;
260 
261 	if (a->addr == b->addr)
262 		return 0;
263 	return a->addr < b->addr ? -1 : 1;
264 }
265 
266 static struct addr_cookie *
get_addr_cookie_array(__u64 * addrs,__u64 * cookies,__u32 count)267 get_addr_cookie_array(__u64 *addrs, __u64 *cookies, __u32 count)
268 {
269 	struct addr_cookie *data;
270 	__u32 i;
271 
272 	data = calloc(count, sizeof(data[0]));
273 	if (!data) {
274 		p_err("mem alloc failed");
275 		return NULL;
276 	}
277 	for (i = 0; i < count; i++) {
278 		data[i].addr = addrs[i];
279 		data[i].cookie = cookies[i];
280 	}
281 	qsort(data, count, sizeof(data[0]), cmp_addr_cookie);
282 	return data;
283 }
284 
285 static void
show_kprobe_multi_json(struct bpf_link_info * info,json_writer_t * wtr)286 show_kprobe_multi_json(struct bpf_link_info *info, json_writer_t *wtr)
287 {
288 	struct addr_cookie *data;
289 	__u32 i, j = 0;
290 
291 	jsonw_bool_field(json_wtr, "retprobe",
292 			 info->kprobe_multi.flags & BPF_F_KPROBE_MULTI_RETURN);
293 	jsonw_uint_field(json_wtr, "func_cnt", info->kprobe_multi.count);
294 	jsonw_uint_field(json_wtr, "missed", info->kprobe_multi.missed);
295 	jsonw_name(json_wtr, "funcs");
296 	jsonw_start_array(json_wtr);
297 	data = get_addr_cookie_array(u64_to_ptr(info->kprobe_multi.addrs),
298 				     u64_to_ptr(info->kprobe_multi.cookies),
299 				     info->kprobe_multi.count);
300 	if (!data)
301 		return;
302 
303 	/* Load it once for all. */
304 	if (!dd.sym_count)
305 		kernel_syms_load(&dd);
306 	if (!dd.sym_count)
307 		goto error;
308 
309 	for (i = 0; i < dd.sym_count; i++) {
310 		if (dd.sym_mapping[i].address != data[j].addr)
311 			continue;
312 		jsonw_start_object(json_wtr);
313 		jsonw_uint_field(json_wtr, "addr", dd.sym_mapping[i].address);
314 		jsonw_string_field(json_wtr, "func", dd.sym_mapping[i].name);
315 		/* Print null if it is vmlinux */
316 		if (dd.sym_mapping[i].module[0] == '\0') {
317 			jsonw_name(json_wtr, "module");
318 			jsonw_null(json_wtr);
319 		} else {
320 			jsonw_string_field(json_wtr, "module", dd.sym_mapping[i].module);
321 		}
322 		jsonw_uint_field(json_wtr, "cookie", data[j].cookie);
323 		jsonw_end_object(json_wtr);
324 		if (j++ == info->kprobe_multi.count)
325 			break;
326 	}
327 	jsonw_end_array(json_wtr);
328 error:
329 	free(data);
330 }
331 
u64_to_arr(__u64 val)332 static __u64 *u64_to_arr(__u64 val)
333 {
334 	return (__u64 *) u64_to_ptr(val);
335 }
336 
337 static void
show_uprobe_multi_json(struct bpf_link_info * info,json_writer_t * wtr)338 show_uprobe_multi_json(struct bpf_link_info *info, json_writer_t *wtr)
339 {
340 	__u32 i;
341 
342 	jsonw_bool_field(json_wtr, "retprobe",
343 			 info->uprobe_multi.flags & BPF_F_UPROBE_MULTI_RETURN);
344 	jsonw_string_field(json_wtr, "path", (char *) u64_to_ptr(info->uprobe_multi.path));
345 	jsonw_uint_field(json_wtr, "func_cnt", info->uprobe_multi.count);
346 	jsonw_int_field(json_wtr, "pid", (int) info->uprobe_multi.pid);
347 	jsonw_name(json_wtr, "funcs");
348 	jsonw_start_array(json_wtr);
349 
350 	for (i = 0; i < info->uprobe_multi.count; i++) {
351 		jsonw_start_object(json_wtr);
352 		jsonw_uint_field(json_wtr, "offset",
353 				 u64_to_arr(info->uprobe_multi.offsets)[i]);
354 		jsonw_uint_field(json_wtr, "ref_ctr_offset",
355 				 u64_to_arr(info->uprobe_multi.ref_ctr_offsets)[i]);
356 		jsonw_uint_field(json_wtr, "cookie",
357 				 u64_to_arr(info->uprobe_multi.cookies)[i]);
358 		jsonw_end_object(json_wtr);
359 	}
360 	jsonw_end_array(json_wtr);
361 }
362 
363 static void
show_perf_event_kprobe_json(struct bpf_link_info * info,json_writer_t * wtr)364 show_perf_event_kprobe_json(struct bpf_link_info *info, json_writer_t *wtr)
365 {
366 	jsonw_bool_field(wtr, "retprobe", info->perf_event.type == BPF_PERF_EVENT_KRETPROBE);
367 	jsonw_uint_field(wtr, "addr", info->perf_event.kprobe.addr);
368 	jsonw_string_field(wtr, "func",
369 			   u64_to_ptr(info->perf_event.kprobe.func_name));
370 	jsonw_uint_field(wtr, "offset", info->perf_event.kprobe.offset);
371 	jsonw_uint_field(wtr, "missed", info->perf_event.kprobe.missed);
372 	jsonw_uint_field(wtr, "cookie", info->perf_event.kprobe.cookie);
373 }
374 
375 static void
show_perf_event_uprobe_json(struct bpf_link_info * info,json_writer_t * wtr)376 show_perf_event_uprobe_json(struct bpf_link_info *info, json_writer_t *wtr)
377 {
378 	jsonw_bool_field(wtr, "retprobe", info->perf_event.type == BPF_PERF_EVENT_URETPROBE);
379 	jsonw_string_field(wtr, "file",
380 			   u64_to_ptr(info->perf_event.uprobe.file_name));
381 	jsonw_uint_field(wtr, "offset", info->perf_event.uprobe.offset);
382 	jsonw_uint_field(wtr, "cookie", info->perf_event.uprobe.cookie);
383 	jsonw_uint_field(wtr, "ref_ctr_offset", info->perf_event.uprobe.ref_ctr_offset);
384 }
385 
386 static void
show_perf_event_tracepoint_json(struct bpf_link_info * info,json_writer_t * wtr)387 show_perf_event_tracepoint_json(struct bpf_link_info *info, json_writer_t *wtr)
388 {
389 	jsonw_string_field(wtr, "tracepoint",
390 			   u64_to_ptr(info->perf_event.tracepoint.tp_name));
391 	jsonw_uint_field(wtr, "cookie", info->perf_event.tracepoint.cookie);
392 }
393 
perf_config_hw_cache_str(__u64 config)394 static char *perf_config_hw_cache_str(__u64 config)
395 {
396 	const char *hw_cache, *result, *op;
397 	char *str = malloc(PERF_HW_CACHE_LEN);
398 
399 	if (!str) {
400 		p_err("mem alloc failed");
401 		return NULL;
402 	}
403 
404 	hw_cache = perf_event_name(evsel__hw_cache, config & 0xff);
405 	if (hw_cache)
406 		snprintf(str, PERF_HW_CACHE_LEN, "%s-", hw_cache);
407 	else
408 		snprintf(str, PERF_HW_CACHE_LEN, "%llu-", config & 0xff);
409 
410 	op = perf_event_name(evsel__hw_cache_op, (config >> 8) & 0xff);
411 	if (op)
412 		snprintf(str + strlen(str), PERF_HW_CACHE_LEN - strlen(str),
413 			 "%s-", op);
414 	else
415 		snprintf(str + strlen(str), PERF_HW_CACHE_LEN - strlen(str),
416 			 "%llu-", (config >> 8) & 0xff);
417 
418 	result = perf_event_name(evsel__hw_cache_result, config >> 16);
419 	if (result)
420 		snprintf(str + strlen(str), PERF_HW_CACHE_LEN - strlen(str),
421 			 "%s", result);
422 	else
423 		snprintf(str + strlen(str), PERF_HW_CACHE_LEN - strlen(str),
424 			 "%llu", config >> 16);
425 	return str;
426 }
427 
perf_config_str(__u32 type,__u64 config)428 static const char *perf_config_str(__u32 type, __u64 config)
429 {
430 	const char *perf_config;
431 
432 	switch (type) {
433 	case PERF_TYPE_HARDWARE:
434 		perf_config = perf_event_name(event_symbols_hw, config);
435 		break;
436 	case PERF_TYPE_SOFTWARE:
437 		perf_config = perf_event_name(event_symbols_sw, config);
438 		break;
439 	case PERF_TYPE_HW_CACHE:
440 		perf_config = perf_config_hw_cache_str(config);
441 		break;
442 	default:
443 		perf_config = NULL;
444 		break;
445 	}
446 	return perf_config;
447 }
448 
449 static void
show_perf_event_event_json(struct bpf_link_info * info,json_writer_t * wtr)450 show_perf_event_event_json(struct bpf_link_info *info, json_writer_t *wtr)
451 {
452 	__u64 config = info->perf_event.event.config;
453 	__u32 type = info->perf_event.event.type;
454 	const char *perf_type, *perf_config;
455 
456 	perf_type = perf_event_name(perf_type_name, type);
457 	if (perf_type)
458 		jsonw_string_field(wtr, "event_type", perf_type);
459 	else
460 		jsonw_uint_field(wtr, "event_type", type);
461 
462 	perf_config = perf_config_str(type, config);
463 	if (perf_config)
464 		jsonw_string_field(wtr, "event_config", perf_config);
465 	else
466 		jsonw_uint_field(wtr, "event_config", config);
467 
468 	jsonw_uint_field(wtr, "cookie", info->perf_event.event.cookie);
469 
470 	if (type == PERF_TYPE_HW_CACHE && perf_config)
471 		free((void *)perf_config);
472 }
473 
show_link_close_json(int fd,struct bpf_link_info * info)474 static int show_link_close_json(int fd, struct bpf_link_info *info)
475 {
476 	struct bpf_prog_info prog_info;
477 	const char *prog_type_str;
478 	int err;
479 
480 	jsonw_start_object(json_wtr);
481 
482 	show_link_header_json(info, json_wtr);
483 
484 	switch (info->type) {
485 	case BPF_LINK_TYPE_RAW_TRACEPOINT:
486 		jsonw_string_field(json_wtr, "tp_name",
487 				   u64_to_ptr(info->raw_tracepoint.tp_name));
488 		jsonw_uint_field(json_wtr, "cookie", info->raw_tracepoint.cookie);
489 		break;
490 	case BPF_LINK_TYPE_TRACING:
491 		err = get_prog_info(info->prog_id, &prog_info);
492 		if (err)
493 			return err;
494 
495 		prog_type_str = libbpf_bpf_prog_type_str(prog_info.type);
496 		/* libbpf will return NULL for variants unknown to it. */
497 		if (prog_type_str)
498 			jsonw_string_field(json_wtr, "prog_type", prog_type_str);
499 		else
500 			jsonw_uint_field(json_wtr, "prog_type", prog_info.type);
501 
502 		show_link_attach_type_json(info->tracing.attach_type,
503 					   json_wtr);
504 		jsonw_uint_field(json_wtr, "target_obj_id", info->tracing.target_obj_id);
505 		jsonw_uint_field(json_wtr, "target_btf_id", info->tracing.target_btf_id);
506 		jsonw_uint_field(json_wtr, "cookie", info->tracing.cookie);
507 		break;
508 	case BPF_LINK_TYPE_CGROUP:
509 		jsonw_lluint_field(json_wtr, "cgroup_id",
510 				   info->cgroup.cgroup_id);
511 		show_link_attach_type_json(info->cgroup.attach_type, json_wtr);
512 		break;
513 	case BPF_LINK_TYPE_ITER:
514 		show_iter_json(info, json_wtr);
515 		break;
516 	case BPF_LINK_TYPE_NETNS:
517 		jsonw_uint_field(json_wtr, "netns_ino",
518 				 info->netns.netns_ino);
519 		show_link_attach_type_json(info->netns.attach_type, json_wtr);
520 		break;
521 	case BPF_LINK_TYPE_NETFILTER:
522 		netfilter_dump_json(info, json_wtr);
523 		break;
524 	case BPF_LINK_TYPE_TCX:
525 		show_link_ifindex_json(info->tcx.ifindex, json_wtr);
526 		show_link_attach_type_json(info->tcx.attach_type, json_wtr);
527 		break;
528 	case BPF_LINK_TYPE_NETKIT:
529 		show_link_ifindex_json(info->netkit.ifindex, json_wtr);
530 		show_link_attach_type_json(info->netkit.attach_type, json_wtr);
531 		break;
532 	case BPF_LINK_TYPE_SOCKMAP:
533 		jsonw_uint_field(json_wtr, "map_id", info->sockmap.map_id);
534 		show_link_attach_type_json(info->sockmap.attach_type, json_wtr);
535 		break;
536 	case BPF_LINK_TYPE_XDP:
537 		show_link_ifindex_json(info->xdp.ifindex, json_wtr);
538 		break;
539 	case BPF_LINK_TYPE_STRUCT_OPS:
540 		jsonw_uint_field(json_wtr, "map_id",
541 				 info->struct_ops.map_id);
542 		break;
543 	case BPF_LINK_TYPE_KPROBE_MULTI:
544 		show_kprobe_multi_json(info, json_wtr);
545 		break;
546 	case BPF_LINK_TYPE_UPROBE_MULTI:
547 		show_uprobe_multi_json(info, json_wtr);
548 		break;
549 	case BPF_LINK_TYPE_PERF_EVENT:
550 		switch (info->perf_event.type) {
551 		case BPF_PERF_EVENT_EVENT:
552 			show_perf_event_event_json(info, json_wtr);
553 			break;
554 		case BPF_PERF_EVENT_TRACEPOINT:
555 			show_perf_event_tracepoint_json(info, json_wtr);
556 			break;
557 		case BPF_PERF_EVENT_KPROBE:
558 		case BPF_PERF_EVENT_KRETPROBE:
559 			show_perf_event_kprobe_json(info, json_wtr);
560 			break;
561 		case BPF_PERF_EVENT_UPROBE:
562 		case BPF_PERF_EVENT_URETPROBE:
563 			show_perf_event_uprobe_json(info, json_wtr);
564 			break;
565 		default:
566 			break;
567 		}
568 		break;
569 	default:
570 		break;
571 	}
572 
573 	if (!hashmap__empty(link_table)) {
574 		struct hashmap_entry *entry;
575 
576 		jsonw_name(json_wtr, "pinned");
577 		jsonw_start_array(json_wtr);
578 		hashmap__for_each_key_entry(link_table, entry, info->id)
579 			jsonw_string(json_wtr, entry->pvalue);
580 		jsonw_end_array(json_wtr);
581 	}
582 
583 	emit_obj_refs_json(refs_table, info->id, json_wtr);
584 
585 	jsonw_end_object(json_wtr);
586 
587 	return 0;
588 }
589 
show_link_header_plain(struct bpf_link_info * info)590 static void show_link_header_plain(struct bpf_link_info *info)
591 {
592 	const char *link_type_str;
593 
594 	printf("%u: ", info->id);
595 	link_type_str = libbpf_bpf_link_type_str(info->type);
596 	if (link_type_str)
597 		printf("%s  ", link_type_str);
598 	else
599 		printf("type %u  ", info->type);
600 
601 	if (info->type == BPF_LINK_TYPE_STRUCT_OPS)
602 		printf("map %u  ", info->struct_ops.map_id);
603 	else
604 		printf("prog %u  ", info->prog_id);
605 }
606 
show_link_attach_type_plain(__u32 attach_type)607 static void show_link_attach_type_plain(__u32 attach_type)
608 {
609 	const char *attach_type_str;
610 
611 	attach_type_str = libbpf_bpf_attach_type_str(attach_type);
612 	if (attach_type_str)
613 		printf("attach_type %s  ", attach_type_str);
614 	else
615 		printf("attach_type %u  ", attach_type);
616 }
617 
show_link_ifindex_plain(__u32 ifindex)618 static void show_link_ifindex_plain(__u32 ifindex)
619 {
620 	char devname[IF_NAMESIZE * 2] = "(unknown)";
621 	char tmpname[IF_NAMESIZE];
622 	char *ret = NULL;
623 
624 	if (ifindex)
625 		ret = if_indextoname(ifindex, tmpname);
626 	else
627 		snprintf(devname, sizeof(devname), "(detached)");
628 	if (ret)
629 		snprintf(devname, sizeof(devname), "%s(%u)",
630 			 tmpname, ifindex);
631 	printf("ifindex %s  ", devname);
632 }
633 
show_iter_plain(struct bpf_link_info * info)634 static void show_iter_plain(struct bpf_link_info *info)
635 {
636 	const char *target_name = u64_to_ptr(info->iter.target_name);
637 
638 	printf("target_name %s  ", target_name);
639 
640 	if (is_iter_map_target(target_name))
641 		printf("map_id %u  ", info->iter.map.map_id);
642 	else if (is_iter_task_target(target_name)) {
643 		if (info->iter.task.tid)
644 			printf("tid %u ", info->iter.task.tid);
645 		else if (info->iter.task.pid)
646 			printf("pid %u ", info->iter.task.pid);
647 	}
648 
649 	if (is_iter_cgroup_target(target_name)) {
650 		printf("cgroup_id %llu  ", info->iter.cgroup.cgroup_id);
651 		printf("order %s  ",
652 		       cgroup_order_string(info->iter.cgroup.order));
653 	}
654 }
655 
656 static const char * const pf2name[] = {
657 	[NFPROTO_INET] = "inet",
658 	[NFPROTO_IPV4] = "ip",
659 	[NFPROTO_ARP] = "arp",
660 	[NFPROTO_NETDEV] = "netdev",
661 	[NFPROTO_BRIDGE] = "bridge",
662 	[NFPROTO_IPV6] = "ip6",
663 };
664 
665 static const char * const inethook2name[] = {
666 	[NF_INET_PRE_ROUTING] = "prerouting",
667 	[NF_INET_LOCAL_IN] = "input",
668 	[NF_INET_FORWARD] = "forward",
669 	[NF_INET_LOCAL_OUT] = "output",
670 	[NF_INET_POST_ROUTING] = "postrouting",
671 };
672 
673 static const char * const arphook2name[] = {
674 	[NF_ARP_IN] = "input",
675 	[NF_ARP_OUT] = "output",
676 };
677 
netfilter_dump_plain(const struct bpf_link_info * info)678 void netfilter_dump_plain(const struct bpf_link_info *info)
679 {
680 	const char *hookname = NULL, *pfname = NULL;
681 	unsigned int hook = info->netfilter.hooknum;
682 	unsigned int pf = info->netfilter.pf;
683 
684 	if (pf < ARRAY_SIZE(pf2name))
685 		pfname = pf2name[pf];
686 
687 	switch (pf) {
688 	case NFPROTO_BRIDGE: /* bridge shares numbers with enum nf_inet_hooks */
689 	case NFPROTO_IPV4:
690 	case NFPROTO_IPV6:
691 	case NFPROTO_INET:
692 		if (hook < ARRAY_SIZE(inethook2name))
693 			hookname = inethook2name[hook];
694 		break;
695 	case NFPROTO_ARP:
696 		if (hook < ARRAY_SIZE(arphook2name))
697 			hookname = arphook2name[hook];
698 	default:
699 		break;
700 	}
701 
702 	if (pfname)
703 		printf("\n\t%s", pfname);
704 	else
705 		printf("\n\tpf: %u", pf);
706 
707 	if (hookname)
708 		printf(" %s", hookname);
709 	else
710 		printf(", hook %u,", hook);
711 
712 	printf(" prio %d", info->netfilter.priority);
713 
714 	if (info->netfilter.flags)
715 		printf(" flags 0x%x", info->netfilter.flags);
716 }
717 
show_kprobe_multi_plain(struct bpf_link_info * info)718 static void show_kprobe_multi_plain(struct bpf_link_info *info)
719 {
720 	struct addr_cookie *data;
721 	__u32 i, j = 0;
722 
723 	if (!info->kprobe_multi.count)
724 		return;
725 
726 	if (info->kprobe_multi.flags & BPF_F_KPROBE_MULTI_RETURN)
727 		printf("\n\tkretprobe.multi  ");
728 	else
729 		printf("\n\tkprobe.multi  ");
730 	printf("func_cnt %u  ", info->kprobe_multi.count);
731 	if (info->kprobe_multi.missed)
732 		printf("missed %llu  ", info->kprobe_multi.missed);
733 	data = get_addr_cookie_array(u64_to_ptr(info->kprobe_multi.addrs),
734 				     u64_to_ptr(info->kprobe_multi.cookies),
735 				     info->kprobe_multi.count);
736 	if (!data)
737 		return;
738 
739 	/* Load it once for all. */
740 	if (!dd.sym_count)
741 		kernel_syms_load(&dd);
742 	if (!dd.sym_count)
743 		goto error;
744 
745 	printf("\n\t%-16s %-16s %s", "addr", "cookie", "func [module]");
746 	for (i = 0; i < dd.sym_count; i++) {
747 		if (dd.sym_mapping[i].address != data[j].addr)
748 			continue;
749 		printf("\n\t%016lx %-16llx %s",
750 		       dd.sym_mapping[i].address, data[j].cookie, dd.sym_mapping[i].name);
751 		if (dd.sym_mapping[i].module[0] != '\0')
752 			printf(" [%s]  ", dd.sym_mapping[i].module);
753 		else
754 			printf("  ");
755 
756 		if (j++ == info->kprobe_multi.count)
757 			break;
758 	}
759 error:
760 	free(data);
761 }
762 
show_uprobe_multi_plain(struct bpf_link_info * info)763 static void show_uprobe_multi_plain(struct bpf_link_info *info)
764 {
765 	__u32 i;
766 
767 	if (!info->uprobe_multi.count)
768 		return;
769 
770 	if (info->uprobe_multi.flags & BPF_F_UPROBE_MULTI_RETURN)
771 		printf("\n\turetprobe.multi  ");
772 	else
773 		printf("\n\tuprobe.multi  ");
774 
775 	printf("path %s  ", (char *) u64_to_ptr(info->uprobe_multi.path));
776 	printf("func_cnt %u  ", info->uprobe_multi.count);
777 
778 	if (info->uprobe_multi.pid)
779 		printf("pid %u  ", info->uprobe_multi.pid);
780 
781 	printf("\n\t%-16s   %-16s   %-16s", "offset", "ref_ctr_offset", "cookies");
782 	for (i = 0; i < info->uprobe_multi.count; i++) {
783 		printf("\n\t0x%-16llx 0x%-16llx 0x%-16llx",
784 			u64_to_arr(info->uprobe_multi.offsets)[i],
785 			u64_to_arr(info->uprobe_multi.ref_ctr_offsets)[i],
786 			u64_to_arr(info->uprobe_multi.cookies)[i]);
787 	}
788 }
789 
show_perf_event_kprobe_plain(struct bpf_link_info * info)790 static void show_perf_event_kprobe_plain(struct bpf_link_info *info)
791 {
792 	const char *buf;
793 
794 	buf = u64_to_ptr(info->perf_event.kprobe.func_name);
795 	if (buf[0] == '\0' && !info->perf_event.kprobe.addr)
796 		return;
797 
798 	if (info->perf_event.type == BPF_PERF_EVENT_KRETPROBE)
799 		printf("\n\tkretprobe ");
800 	else
801 		printf("\n\tkprobe ");
802 	if (info->perf_event.kprobe.addr)
803 		printf("%llx ", info->perf_event.kprobe.addr);
804 	printf("%s", buf);
805 	if (info->perf_event.kprobe.offset)
806 		printf("+%#x", info->perf_event.kprobe.offset);
807 	if (info->perf_event.kprobe.missed)
808 		printf("  missed %llu", info->perf_event.kprobe.missed);
809 	if (info->perf_event.kprobe.cookie)
810 		printf("  cookie %llu", info->perf_event.kprobe.cookie);
811 	printf("  ");
812 }
813 
show_perf_event_uprobe_plain(struct bpf_link_info * info)814 static void show_perf_event_uprobe_plain(struct bpf_link_info *info)
815 {
816 	const char *buf;
817 
818 	buf = u64_to_ptr(info->perf_event.uprobe.file_name);
819 	if (buf[0] == '\0')
820 		return;
821 
822 	if (info->perf_event.type == BPF_PERF_EVENT_URETPROBE)
823 		printf("\n\turetprobe ");
824 	else
825 		printf("\n\tuprobe ");
826 	printf("%s+%#x  ", buf, info->perf_event.uprobe.offset);
827 	if (info->perf_event.uprobe.cookie)
828 		printf("cookie %llu  ", info->perf_event.uprobe.cookie);
829 	if (info->perf_event.uprobe.ref_ctr_offset)
830 		printf("ref_ctr_offset 0x%llx  ", info->perf_event.uprobe.ref_ctr_offset);
831 }
832 
show_perf_event_tracepoint_plain(struct bpf_link_info * info)833 static void show_perf_event_tracepoint_plain(struct bpf_link_info *info)
834 {
835 	const char *buf;
836 
837 	buf = u64_to_ptr(info->perf_event.tracepoint.tp_name);
838 	if (buf[0] == '\0')
839 		return;
840 
841 	printf("\n\ttracepoint %s  ", buf);
842 	if (info->perf_event.tracepoint.cookie)
843 		printf("cookie %llu  ", info->perf_event.tracepoint.cookie);
844 }
845 
show_perf_event_event_plain(struct bpf_link_info * info)846 static void show_perf_event_event_plain(struct bpf_link_info *info)
847 {
848 	__u64 config = info->perf_event.event.config;
849 	__u32 type = info->perf_event.event.type;
850 	const char *perf_type, *perf_config;
851 
852 	printf("\n\tevent ");
853 	perf_type = perf_event_name(perf_type_name, type);
854 	if (perf_type)
855 		printf("%s:", perf_type);
856 	else
857 		printf("%u :", type);
858 
859 	perf_config = perf_config_str(type, config);
860 	if (perf_config)
861 		printf("%s  ", perf_config);
862 	else
863 		printf("%llu  ", config);
864 
865 	if (info->perf_event.event.cookie)
866 		printf("cookie %llu  ", info->perf_event.event.cookie);
867 
868 	if (type == PERF_TYPE_HW_CACHE && perf_config)
869 		free((void *)perf_config);
870 }
871 
show_link_close_plain(int fd,struct bpf_link_info * info)872 static int show_link_close_plain(int fd, struct bpf_link_info *info)
873 {
874 	struct bpf_prog_info prog_info;
875 	const char *prog_type_str;
876 	int err;
877 
878 	show_link_header_plain(info);
879 
880 	switch (info->type) {
881 	case BPF_LINK_TYPE_RAW_TRACEPOINT:
882 		printf("\n\ttp '%s'  ",
883 		       (const char *)u64_to_ptr(info->raw_tracepoint.tp_name));
884 		if (info->raw_tracepoint.cookie)
885 			printf("cookie %llu  ", info->raw_tracepoint.cookie);
886 		break;
887 	case BPF_LINK_TYPE_TRACING:
888 		err = get_prog_info(info->prog_id, &prog_info);
889 		if (err)
890 			return err;
891 
892 		prog_type_str = libbpf_bpf_prog_type_str(prog_info.type);
893 		/* libbpf will return NULL for variants unknown to it. */
894 		if (prog_type_str)
895 			printf("\n\tprog_type %s  ", prog_type_str);
896 		else
897 			printf("\n\tprog_type %u  ", prog_info.type);
898 
899 		show_link_attach_type_plain(info->tracing.attach_type);
900 		if (info->tracing.target_obj_id || info->tracing.target_btf_id)
901 			printf("\n\ttarget_obj_id %u  target_btf_id %u  ",
902 			       info->tracing.target_obj_id,
903 			       info->tracing.target_btf_id);
904 		if (info->tracing.cookie)
905 			printf("\n\tcookie %llu  ", info->tracing.cookie);
906 		break;
907 	case BPF_LINK_TYPE_CGROUP:
908 		printf("\n\tcgroup_id %zu  ", (size_t)info->cgroup.cgroup_id);
909 		show_link_attach_type_plain(info->cgroup.attach_type);
910 		break;
911 	case BPF_LINK_TYPE_ITER:
912 		show_iter_plain(info);
913 		break;
914 	case BPF_LINK_TYPE_NETNS:
915 		printf("\n\tnetns_ino %u  ", info->netns.netns_ino);
916 		show_link_attach_type_plain(info->netns.attach_type);
917 		break;
918 	case BPF_LINK_TYPE_NETFILTER:
919 		netfilter_dump_plain(info);
920 		break;
921 	case BPF_LINK_TYPE_TCX:
922 		printf("\n\t");
923 		show_link_ifindex_plain(info->tcx.ifindex);
924 		show_link_attach_type_plain(info->tcx.attach_type);
925 		break;
926 	case BPF_LINK_TYPE_NETKIT:
927 		printf("\n\t");
928 		show_link_ifindex_plain(info->netkit.ifindex);
929 		show_link_attach_type_plain(info->netkit.attach_type);
930 		break;
931 	case BPF_LINK_TYPE_SOCKMAP:
932 		printf("\n\t");
933 		printf("map_id %u  ", info->sockmap.map_id);
934 		show_link_attach_type_plain(info->sockmap.attach_type);
935 		break;
936 	case BPF_LINK_TYPE_XDP:
937 		printf("\n\t");
938 		show_link_ifindex_plain(info->xdp.ifindex);
939 		break;
940 	case BPF_LINK_TYPE_KPROBE_MULTI:
941 		show_kprobe_multi_plain(info);
942 		break;
943 	case BPF_LINK_TYPE_UPROBE_MULTI:
944 		show_uprobe_multi_plain(info);
945 		break;
946 	case BPF_LINK_TYPE_PERF_EVENT:
947 		switch (info->perf_event.type) {
948 		case BPF_PERF_EVENT_EVENT:
949 			show_perf_event_event_plain(info);
950 			break;
951 		case BPF_PERF_EVENT_TRACEPOINT:
952 			show_perf_event_tracepoint_plain(info);
953 			break;
954 		case BPF_PERF_EVENT_KPROBE:
955 		case BPF_PERF_EVENT_KRETPROBE:
956 			show_perf_event_kprobe_plain(info);
957 			break;
958 		case BPF_PERF_EVENT_UPROBE:
959 		case BPF_PERF_EVENT_URETPROBE:
960 			show_perf_event_uprobe_plain(info);
961 			break;
962 		default:
963 			break;
964 		}
965 		break;
966 	default:
967 		break;
968 	}
969 
970 	if (!hashmap__empty(link_table)) {
971 		struct hashmap_entry *entry;
972 
973 		hashmap__for_each_key_entry(link_table, entry, info->id)
974 			printf("\n\tpinned %s", (char *)entry->pvalue);
975 	}
976 	emit_obj_refs_plain(refs_table, info->id, "\n\tpids ");
977 
978 	printf("\n");
979 
980 	return 0;
981 }
982 
do_show_link(int fd)983 static int do_show_link(int fd)
984 {
985 	__u64 *ref_ctr_offsets = NULL, *offsets = NULL, *cookies = NULL;
986 	struct bpf_link_info info;
987 	__u32 len = sizeof(info);
988 	char path_buf[PATH_MAX];
989 	__u64 *addrs = NULL;
990 	char buf[PATH_MAX];
991 	int count;
992 	int err;
993 
994 	memset(&info, 0, sizeof(info));
995 	buf[0] = '\0';
996 again:
997 	err = bpf_link_get_info_by_fd(fd, &info, &len);
998 	if (err) {
999 		p_err("can't get link info: %s",
1000 		      strerror(errno));
1001 		close(fd);
1002 		return err;
1003 	}
1004 	if (info.type == BPF_LINK_TYPE_RAW_TRACEPOINT &&
1005 	    !info.raw_tracepoint.tp_name) {
1006 		info.raw_tracepoint.tp_name = ptr_to_u64(&buf);
1007 		info.raw_tracepoint.tp_name_len = sizeof(buf);
1008 		goto again;
1009 	}
1010 	if (info.type == BPF_LINK_TYPE_ITER &&
1011 	    !info.iter.target_name) {
1012 		info.iter.target_name = ptr_to_u64(&buf);
1013 		info.iter.target_name_len = sizeof(buf);
1014 		goto again;
1015 	}
1016 	if (info.type == BPF_LINK_TYPE_KPROBE_MULTI &&
1017 	    !info.kprobe_multi.addrs) {
1018 		count = info.kprobe_multi.count;
1019 		if (count) {
1020 			addrs = calloc(count, sizeof(__u64));
1021 			if (!addrs) {
1022 				p_err("mem alloc failed");
1023 				close(fd);
1024 				return -ENOMEM;
1025 			}
1026 			info.kprobe_multi.addrs = ptr_to_u64(addrs);
1027 			cookies = calloc(count, sizeof(__u64));
1028 			if (!cookies) {
1029 				p_err("mem alloc failed");
1030 				free(addrs);
1031 				close(fd);
1032 				return -ENOMEM;
1033 			}
1034 			info.kprobe_multi.cookies = ptr_to_u64(cookies);
1035 			goto again;
1036 		}
1037 	}
1038 	if (info.type == BPF_LINK_TYPE_UPROBE_MULTI &&
1039 	    !info.uprobe_multi.offsets) {
1040 		count = info.uprobe_multi.count;
1041 		if (count) {
1042 			offsets = calloc(count, sizeof(__u64));
1043 			if (!offsets) {
1044 				p_err("mem alloc failed");
1045 				close(fd);
1046 				return -ENOMEM;
1047 			}
1048 			info.uprobe_multi.offsets = ptr_to_u64(offsets);
1049 			ref_ctr_offsets = calloc(count, sizeof(__u64));
1050 			if (!ref_ctr_offsets) {
1051 				p_err("mem alloc failed");
1052 				free(offsets);
1053 				close(fd);
1054 				return -ENOMEM;
1055 			}
1056 			info.uprobe_multi.ref_ctr_offsets = ptr_to_u64(ref_ctr_offsets);
1057 			cookies = calloc(count, sizeof(__u64));
1058 			if (!cookies) {
1059 				p_err("mem alloc failed");
1060 				free(ref_ctr_offsets);
1061 				free(offsets);
1062 				close(fd);
1063 				return -ENOMEM;
1064 			}
1065 			info.uprobe_multi.cookies = ptr_to_u64(cookies);
1066 			info.uprobe_multi.path = ptr_to_u64(path_buf);
1067 			info.uprobe_multi.path_size = sizeof(path_buf);
1068 			goto again;
1069 		}
1070 	}
1071 	if (info.type == BPF_LINK_TYPE_PERF_EVENT) {
1072 		switch (info.perf_event.type) {
1073 		case BPF_PERF_EVENT_TRACEPOINT:
1074 			if (!info.perf_event.tracepoint.tp_name) {
1075 				info.perf_event.tracepoint.tp_name = ptr_to_u64(&buf);
1076 				info.perf_event.tracepoint.name_len = sizeof(buf);
1077 				goto again;
1078 			}
1079 			break;
1080 		case BPF_PERF_EVENT_KPROBE:
1081 		case BPF_PERF_EVENT_KRETPROBE:
1082 			if (!info.perf_event.kprobe.func_name) {
1083 				info.perf_event.kprobe.func_name = ptr_to_u64(&buf);
1084 				info.perf_event.kprobe.name_len = sizeof(buf);
1085 				goto again;
1086 			}
1087 			break;
1088 		case BPF_PERF_EVENT_UPROBE:
1089 		case BPF_PERF_EVENT_URETPROBE:
1090 			if (!info.perf_event.uprobe.file_name) {
1091 				info.perf_event.uprobe.file_name = ptr_to_u64(&buf);
1092 				info.perf_event.uprobe.name_len = sizeof(buf);
1093 				goto again;
1094 			}
1095 			break;
1096 		default:
1097 			break;
1098 		}
1099 	}
1100 
1101 	if (json_output)
1102 		show_link_close_json(fd, &info);
1103 	else
1104 		show_link_close_plain(fd, &info);
1105 
1106 	free(ref_ctr_offsets);
1107 	free(cookies);
1108 	free(offsets);
1109 	free(addrs);
1110 	close(fd);
1111 	return 0;
1112 }
1113 
do_show(int argc,char ** argv)1114 static int do_show(int argc, char **argv)
1115 {
1116 	__u32 id = 0;
1117 	int err, fd;
1118 
1119 	if (show_pinned) {
1120 		link_table = hashmap__new(hash_fn_for_key_as_id,
1121 					  equal_fn_for_key_as_id, NULL);
1122 		if (IS_ERR(link_table)) {
1123 			p_err("failed to create hashmap for pinned paths");
1124 			return -1;
1125 		}
1126 		build_pinned_obj_table(link_table, BPF_OBJ_LINK);
1127 	}
1128 	build_obj_refs_table(&refs_table, BPF_OBJ_LINK);
1129 
1130 	if (argc == 2) {
1131 		fd = link_parse_fd(&argc, &argv);
1132 		if (fd < 0)
1133 			return fd;
1134 		do_show_link(fd);
1135 		goto out;
1136 	}
1137 
1138 	if (argc)
1139 		return BAD_ARG();
1140 
1141 	if (json_output)
1142 		jsonw_start_array(json_wtr);
1143 	while (true) {
1144 		err = bpf_link_get_next_id(id, &id);
1145 		if (err) {
1146 			if (errno == ENOENT)
1147 				break;
1148 			p_err("can't get next link: %s%s", strerror(errno),
1149 			      errno == EINVAL ? " -- kernel too old?" : "");
1150 			break;
1151 		}
1152 
1153 		fd = bpf_link_get_fd_by_id(id);
1154 		if (fd < 0) {
1155 			if (errno == ENOENT)
1156 				continue;
1157 			p_err("can't get link by id (%u): %s",
1158 			      id, strerror(errno));
1159 			break;
1160 		}
1161 
1162 		err = do_show_link(fd);
1163 		if (err)
1164 			break;
1165 	}
1166 	if (json_output)
1167 		jsonw_end_array(json_wtr);
1168 
1169 	delete_obj_refs_table(refs_table);
1170 
1171 	if (show_pinned)
1172 		delete_pinned_obj_table(link_table);
1173 
1174 out:
1175 	if (dd.sym_count)
1176 		kernel_syms_destroy(&dd);
1177 	return errno == ENOENT ? 0 : -1;
1178 }
1179 
do_pin(int argc,char ** argv)1180 static int do_pin(int argc, char **argv)
1181 {
1182 	int err;
1183 
1184 	err = do_pin_any(argc, argv, link_parse_fd);
1185 	if (!err && json_output)
1186 		jsonw_null(json_wtr);
1187 	return err;
1188 }
1189 
do_detach(int argc,char ** argv)1190 static int do_detach(int argc, char **argv)
1191 {
1192 	int err, fd;
1193 
1194 	if (argc != 2) {
1195 		p_err("link specifier is invalid or missing\n");
1196 		return 1;
1197 	}
1198 
1199 	fd = link_parse_fd(&argc, &argv);
1200 	if (fd < 0)
1201 		return 1;
1202 
1203 	err = bpf_link_detach(fd);
1204 	if (err)
1205 		err = -errno;
1206 	close(fd);
1207 	if (err) {
1208 		p_err("failed link detach: %s", strerror(-err));
1209 		return 1;
1210 	}
1211 
1212 	if (json_output)
1213 		jsonw_null(json_wtr);
1214 
1215 	return 0;
1216 }
1217 
do_help(int argc,char ** argv)1218 static int do_help(int argc, char **argv)
1219 {
1220 	if (json_output) {
1221 		jsonw_null(json_wtr);
1222 		return 0;
1223 	}
1224 
1225 	fprintf(stderr,
1226 		"Usage: %1$s %2$s { show | list }   [LINK]\n"
1227 		"       %1$s %2$s pin        LINK  FILE\n"
1228 		"       %1$s %2$s detach     LINK\n"
1229 		"       %1$s %2$s help\n"
1230 		"\n"
1231 		"       " HELP_SPEC_LINK "\n"
1232 		"       " HELP_SPEC_OPTIONS " |\n"
1233 		"                    {-f|--bpffs} | {-n|--nomount} }\n"
1234 		"",
1235 		bin_name, argv[-2]);
1236 
1237 	return 0;
1238 }
1239 
1240 static const struct cmd cmds[] = {
1241 	{ "show",	do_show },
1242 	{ "list",	do_show },
1243 	{ "help",	do_help },
1244 	{ "pin",	do_pin },
1245 	{ "detach",	do_detach },
1246 	{ 0 }
1247 };
1248 
do_link(int argc,char ** argv)1249 int do_link(int argc, char **argv)
1250 {
1251 	return cmd_select(cmds, argc, argv, do_help);
1252 }
1253