xref: /linux/tools/bpf/bpftool/link.c (revision 25489a4f556414445d342951615178368ee45cde)
1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 /* Copyright (C) 2020 Facebook */
3 
4 #include <errno.h>
5 #include <linux/err.h>
6 #include <linux/netfilter.h>
7 #include <linux/netfilter_arp.h>
8 #include <linux/perf_event.h>
9 #include <net/if.h>
10 #include <stdio.h>
11 #include <unistd.h>
12 
13 #include <bpf/bpf.h>
14 #include <bpf/hashmap.h>
15 
16 #include "json_writer.h"
17 #include "main.h"
18 #include "xlated_dumper.h"
19 
20 #define PERF_HW_CACHE_LEN 128
21 
22 static struct hashmap *link_table;
23 static struct dump_data dd;
24 
25 static const char *perf_type_name[PERF_TYPE_MAX] = {
26 	[PERF_TYPE_HARDWARE]			= "hardware",
27 	[PERF_TYPE_SOFTWARE]			= "software",
28 	[PERF_TYPE_TRACEPOINT]			= "tracepoint",
29 	[PERF_TYPE_HW_CACHE]			= "hw-cache",
30 	[PERF_TYPE_RAW]				= "raw",
31 	[PERF_TYPE_BREAKPOINT]			= "breakpoint",
32 };
33 
34 const char *event_symbols_hw[PERF_COUNT_HW_MAX] = {
35 	[PERF_COUNT_HW_CPU_CYCLES]		= "cpu-cycles",
36 	[PERF_COUNT_HW_INSTRUCTIONS]		= "instructions",
37 	[PERF_COUNT_HW_CACHE_REFERENCES]	= "cache-references",
38 	[PERF_COUNT_HW_CACHE_MISSES]		= "cache-misses",
39 	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS]	= "branch-instructions",
40 	[PERF_COUNT_HW_BRANCH_MISSES]		= "branch-misses",
41 	[PERF_COUNT_HW_BUS_CYCLES]		= "bus-cycles",
42 	[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND]	= "stalled-cycles-frontend",
43 	[PERF_COUNT_HW_STALLED_CYCLES_BACKEND]	= "stalled-cycles-backend",
44 	[PERF_COUNT_HW_REF_CPU_CYCLES]		= "ref-cycles",
45 };
46 
47 const char *event_symbols_sw[PERF_COUNT_SW_MAX] = {
48 	[PERF_COUNT_SW_CPU_CLOCK]		= "cpu-clock",
49 	[PERF_COUNT_SW_TASK_CLOCK]		= "task-clock",
50 	[PERF_COUNT_SW_PAGE_FAULTS]		= "page-faults",
51 	[PERF_COUNT_SW_CONTEXT_SWITCHES]	= "context-switches",
52 	[PERF_COUNT_SW_CPU_MIGRATIONS]		= "cpu-migrations",
53 	[PERF_COUNT_SW_PAGE_FAULTS_MIN]		= "minor-faults",
54 	[PERF_COUNT_SW_PAGE_FAULTS_MAJ]		= "major-faults",
55 	[PERF_COUNT_SW_ALIGNMENT_FAULTS]	= "alignment-faults",
56 	[PERF_COUNT_SW_EMULATION_FAULTS]	= "emulation-faults",
57 	[PERF_COUNT_SW_DUMMY]			= "dummy",
58 	[PERF_COUNT_SW_BPF_OUTPUT]		= "bpf-output",
59 	[PERF_COUNT_SW_CGROUP_SWITCHES]		= "cgroup-switches",
60 };
61 
62 const char *evsel__hw_cache[PERF_COUNT_HW_CACHE_MAX] = {
63 	[PERF_COUNT_HW_CACHE_L1D]		= "L1-dcache",
64 	[PERF_COUNT_HW_CACHE_L1I]		= "L1-icache",
65 	[PERF_COUNT_HW_CACHE_LL]		= "LLC",
66 	[PERF_COUNT_HW_CACHE_DTLB]		= "dTLB",
67 	[PERF_COUNT_HW_CACHE_ITLB]		= "iTLB",
68 	[PERF_COUNT_HW_CACHE_BPU]		= "branch",
69 	[PERF_COUNT_HW_CACHE_NODE]		= "node",
70 };
71 
72 const char *evsel__hw_cache_op[PERF_COUNT_HW_CACHE_OP_MAX] = {
73 	[PERF_COUNT_HW_CACHE_OP_READ]		= "load",
74 	[PERF_COUNT_HW_CACHE_OP_WRITE]		= "store",
75 	[PERF_COUNT_HW_CACHE_OP_PREFETCH]	= "prefetch",
76 };
77 
78 const char *evsel__hw_cache_result[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
79 	[PERF_COUNT_HW_CACHE_RESULT_ACCESS]	= "refs",
80 	[PERF_COUNT_HW_CACHE_RESULT_MISS]	= "misses",
81 };
82 
83 #define perf_event_name(array, id) ({			\
84 	const char *event_str = NULL;			\
85 							\
86 	if ((id) < ARRAY_SIZE(array))			\
87 		event_str = array[id];			\
88 	event_str;					\
89 })
90 
91 static int link_parse_fd(int *argc, char ***argv)
92 {
93 	int fd;
94 
95 	if (is_prefix(**argv, "id")) {
96 		unsigned int id;
97 		char *endptr;
98 
99 		NEXT_ARGP();
100 
101 		id = strtoul(**argv, &endptr, 0);
102 		if (*endptr) {
103 			p_err("can't parse %s as ID", **argv);
104 			return -1;
105 		}
106 		NEXT_ARGP();
107 
108 		fd = bpf_link_get_fd_by_id(id);
109 		if (fd < 0)
110 			p_err("failed to get link with ID %u: %s", id, strerror(errno));
111 		return fd;
112 	} else if (is_prefix(**argv, "pinned")) {
113 		char *path;
114 
115 		NEXT_ARGP();
116 
117 		path = **argv;
118 		NEXT_ARGP();
119 
120 		return open_obj_pinned_any(path, BPF_OBJ_LINK);
121 	}
122 
123 	p_err("expected 'id' or 'pinned', got: '%s'?", **argv);
124 	return -1;
125 }
126 
127 static void
128 show_link_header_json(struct bpf_link_info *info, json_writer_t *wtr)
129 {
130 	const char *link_type_str;
131 
132 	jsonw_uint_field(wtr, "id", info->id);
133 	link_type_str = libbpf_bpf_link_type_str(info->type);
134 	if (link_type_str)
135 		jsonw_string_field(wtr, "type", link_type_str);
136 	else
137 		jsonw_uint_field(wtr, "type", info->type);
138 
139 	jsonw_uint_field(json_wtr, "prog_id", info->prog_id);
140 }
141 
142 static void show_link_attach_type_json(__u32 attach_type, json_writer_t *wtr)
143 {
144 	const char *attach_type_str;
145 
146 	attach_type_str = libbpf_bpf_attach_type_str(attach_type);
147 	if (attach_type_str)
148 		jsonw_string_field(wtr, "attach_type", attach_type_str);
149 	else
150 		jsonw_uint_field(wtr, "attach_type", attach_type);
151 }
152 
153 static void show_link_ifindex_json(__u32 ifindex, json_writer_t *wtr)
154 {
155 	char devname[IF_NAMESIZE] = "(unknown)";
156 
157 	if (ifindex)
158 		if_indextoname(ifindex, devname);
159 	else
160 		snprintf(devname, sizeof(devname), "(detached)");
161 	jsonw_string_field(wtr, "devname", devname);
162 	jsonw_uint_field(wtr, "ifindex", ifindex);
163 }
164 
165 static bool is_iter_map_target(const char *target_name)
166 {
167 	return strcmp(target_name, "bpf_map_elem") == 0 ||
168 	       strcmp(target_name, "bpf_sk_storage_map") == 0;
169 }
170 
171 static bool is_iter_cgroup_target(const char *target_name)
172 {
173 	return strcmp(target_name, "cgroup") == 0;
174 }
175 
176 static const char *cgroup_order_string(__u32 order)
177 {
178 	switch (order) {
179 	case BPF_CGROUP_ITER_ORDER_UNSPEC:
180 		return "order_unspec";
181 	case BPF_CGROUP_ITER_SELF_ONLY:
182 		return "self_only";
183 	case BPF_CGROUP_ITER_DESCENDANTS_PRE:
184 		return "descendants_pre";
185 	case BPF_CGROUP_ITER_DESCENDANTS_POST:
186 		return "descendants_post";
187 	case BPF_CGROUP_ITER_ANCESTORS_UP:
188 		return "ancestors_up";
189 	default: /* won't happen */
190 		return "unknown";
191 	}
192 }
193 
194 static bool is_iter_task_target(const char *target_name)
195 {
196 	return strcmp(target_name, "task") == 0 ||
197 		strcmp(target_name, "task_file") == 0 ||
198 		strcmp(target_name, "task_vma") == 0;
199 }
200 
201 static void show_iter_json(struct bpf_link_info *info, json_writer_t *wtr)
202 {
203 	const char *target_name = u64_to_ptr(info->iter.target_name);
204 
205 	jsonw_string_field(wtr, "target_name", target_name);
206 
207 	if (is_iter_map_target(target_name))
208 		jsonw_uint_field(wtr, "map_id", info->iter.map.map_id);
209 	else if (is_iter_task_target(target_name)) {
210 		if (info->iter.task.tid)
211 			jsonw_uint_field(wtr, "tid", info->iter.task.tid);
212 		else if (info->iter.task.pid)
213 			jsonw_uint_field(wtr, "pid", info->iter.task.pid);
214 	}
215 
216 	if (is_iter_cgroup_target(target_name)) {
217 		jsonw_lluint_field(wtr, "cgroup_id", info->iter.cgroup.cgroup_id);
218 		jsonw_string_field(wtr, "order",
219 				   cgroup_order_string(info->iter.cgroup.order));
220 	}
221 }
222 
223 void netfilter_dump_json(const struct bpf_link_info *info, json_writer_t *wtr)
224 {
225 	jsonw_uint_field(json_wtr, "pf",
226 			 info->netfilter.pf);
227 	jsonw_uint_field(json_wtr, "hook",
228 			 info->netfilter.hooknum);
229 	jsonw_int_field(json_wtr, "prio",
230 			 info->netfilter.priority);
231 	jsonw_uint_field(json_wtr, "flags",
232 			 info->netfilter.flags);
233 }
234 
235 static int get_prog_info(int prog_id, struct bpf_prog_info *info)
236 {
237 	__u32 len = sizeof(*info);
238 	int err, prog_fd;
239 
240 	prog_fd = bpf_prog_get_fd_by_id(prog_id);
241 	if (prog_fd < 0)
242 		return prog_fd;
243 
244 	memset(info, 0, sizeof(*info));
245 	err = bpf_prog_get_info_by_fd(prog_fd, info, &len);
246 	if (err)
247 		p_err("can't get prog info: %s", strerror(errno));
248 	close(prog_fd);
249 	return err;
250 }
251 
252 struct addr_cookie {
253 	__u64 addr;
254 	__u64 cookie;
255 };
256 
257 static int cmp_addr_cookie(const void *A, const void *B)
258 {
259 	const struct addr_cookie *a = A, *b = B;
260 
261 	if (a->addr == b->addr)
262 		return 0;
263 	return a->addr < b->addr ? -1 : 1;
264 }
265 
266 static struct addr_cookie *
267 get_addr_cookie_array(__u64 *addrs, __u64 *cookies, __u32 count)
268 {
269 	struct addr_cookie *data;
270 	__u32 i;
271 
272 	data = calloc(count, sizeof(data[0]));
273 	if (!data) {
274 		p_err("mem alloc failed");
275 		return NULL;
276 	}
277 	for (i = 0; i < count; i++) {
278 		data[i].addr = addrs[i];
279 		data[i].cookie = cookies[i];
280 	}
281 	qsort(data, count, sizeof(data[0]), cmp_addr_cookie);
282 	return data;
283 }
284 
285 static void
286 show_kprobe_multi_json(struct bpf_link_info *info, json_writer_t *wtr)
287 {
288 	struct addr_cookie *data;
289 	__u32 i, j = 0;
290 
291 	jsonw_bool_field(json_wtr, "retprobe",
292 			 info->kprobe_multi.flags & BPF_F_KPROBE_MULTI_RETURN);
293 	jsonw_uint_field(json_wtr, "func_cnt", info->kprobe_multi.count);
294 	jsonw_uint_field(json_wtr, "missed", info->kprobe_multi.missed);
295 	jsonw_name(json_wtr, "funcs");
296 	jsonw_start_array(json_wtr);
297 	data = get_addr_cookie_array(u64_to_ptr(info->kprobe_multi.addrs),
298 				     u64_to_ptr(info->kprobe_multi.cookies),
299 				     info->kprobe_multi.count);
300 	if (!data)
301 		return;
302 
303 	/* Load it once for all. */
304 	if (!dd.sym_count)
305 		kernel_syms_load(&dd);
306 	if (!dd.sym_count)
307 		goto error;
308 
309 	for (i = 0; i < dd.sym_count; i++) {
310 		if (dd.sym_mapping[i].address != data[j].addr)
311 			continue;
312 		jsonw_start_object(json_wtr);
313 		jsonw_uint_field(json_wtr, "addr", dd.sym_mapping[i].address);
314 		jsonw_string_field(json_wtr, "func", dd.sym_mapping[i].name);
315 		/* Print null if it is vmlinux */
316 		if (dd.sym_mapping[i].module[0] == '\0') {
317 			jsonw_name(json_wtr, "module");
318 			jsonw_null(json_wtr);
319 		} else {
320 			jsonw_string_field(json_wtr, "module", dd.sym_mapping[i].module);
321 		}
322 		jsonw_uint_field(json_wtr, "cookie", data[j].cookie);
323 		jsonw_end_object(json_wtr);
324 		if (j++ == info->kprobe_multi.count)
325 			break;
326 	}
327 	jsonw_end_array(json_wtr);
328 error:
329 	free(data);
330 }
331 
332 static __u64 *u64_to_arr(__u64 val)
333 {
334 	return (__u64 *) u64_to_ptr(val);
335 }
336 
337 static void
338 show_uprobe_multi_json(struct bpf_link_info *info, json_writer_t *wtr)
339 {
340 	__u32 i;
341 
342 	jsonw_bool_field(json_wtr, "retprobe",
343 			 info->uprobe_multi.flags & BPF_F_UPROBE_MULTI_RETURN);
344 	jsonw_string_field(json_wtr, "path", (char *) u64_to_ptr(info->uprobe_multi.path));
345 	jsonw_uint_field(json_wtr, "func_cnt", info->uprobe_multi.count);
346 	jsonw_int_field(json_wtr, "pid", (int) info->uprobe_multi.pid);
347 	jsonw_name(json_wtr, "funcs");
348 	jsonw_start_array(json_wtr);
349 
350 	for (i = 0; i < info->uprobe_multi.count; i++) {
351 		jsonw_start_object(json_wtr);
352 		jsonw_uint_field(json_wtr, "offset",
353 				 u64_to_arr(info->uprobe_multi.offsets)[i]);
354 		jsonw_uint_field(json_wtr, "ref_ctr_offset",
355 				 u64_to_arr(info->uprobe_multi.ref_ctr_offsets)[i]);
356 		jsonw_uint_field(json_wtr, "cookie",
357 				 u64_to_arr(info->uprobe_multi.cookies)[i]);
358 		jsonw_end_object(json_wtr);
359 	}
360 	jsonw_end_array(json_wtr);
361 }
362 
363 static void
364 show_perf_event_kprobe_json(struct bpf_link_info *info, json_writer_t *wtr)
365 {
366 	jsonw_bool_field(wtr, "retprobe", info->perf_event.type == BPF_PERF_EVENT_KRETPROBE);
367 	jsonw_uint_field(wtr, "addr", info->perf_event.kprobe.addr);
368 	jsonw_string_field(wtr, "func",
369 			   u64_to_ptr(info->perf_event.kprobe.func_name));
370 	jsonw_uint_field(wtr, "offset", info->perf_event.kprobe.offset);
371 	jsonw_uint_field(wtr, "missed", info->perf_event.kprobe.missed);
372 	jsonw_uint_field(wtr, "cookie", info->perf_event.kprobe.cookie);
373 }
374 
375 static void
376 show_perf_event_uprobe_json(struct bpf_link_info *info, json_writer_t *wtr)
377 {
378 	jsonw_bool_field(wtr, "retprobe", info->perf_event.type == BPF_PERF_EVENT_URETPROBE);
379 	jsonw_string_field(wtr, "file",
380 			   u64_to_ptr(info->perf_event.uprobe.file_name));
381 	jsonw_uint_field(wtr, "offset", info->perf_event.uprobe.offset);
382 	jsonw_uint_field(wtr, "cookie", info->perf_event.uprobe.cookie);
383 	jsonw_uint_field(wtr, "ref_ctr_offset", info->perf_event.uprobe.ref_ctr_offset);
384 }
385 
386 static void
387 show_perf_event_tracepoint_json(struct bpf_link_info *info, json_writer_t *wtr)
388 {
389 	jsonw_string_field(wtr, "tracepoint",
390 			   u64_to_ptr(info->perf_event.tracepoint.tp_name));
391 	jsonw_uint_field(wtr, "cookie", info->perf_event.tracepoint.cookie);
392 }
393 
394 static char *perf_config_hw_cache_str(__u64 config)
395 {
396 	const char *hw_cache, *result, *op;
397 	char *str = malloc(PERF_HW_CACHE_LEN);
398 
399 	if (!str) {
400 		p_err("mem alloc failed");
401 		return NULL;
402 	}
403 
404 	hw_cache = perf_event_name(evsel__hw_cache, config & 0xff);
405 	if (hw_cache)
406 		snprintf(str, PERF_HW_CACHE_LEN, "%s-", hw_cache);
407 	else
408 		snprintf(str, PERF_HW_CACHE_LEN, "%llu-", config & 0xff);
409 
410 	op = perf_event_name(evsel__hw_cache_op, (config >> 8) & 0xff);
411 	if (op)
412 		snprintf(str + strlen(str), PERF_HW_CACHE_LEN - strlen(str),
413 			 "%s-", op);
414 	else
415 		snprintf(str + strlen(str), PERF_HW_CACHE_LEN - strlen(str),
416 			 "%llu-", (config >> 8) & 0xff);
417 
418 	result = perf_event_name(evsel__hw_cache_result, config >> 16);
419 	if (result)
420 		snprintf(str + strlen(str), PERF_HW_CACHE_LEN - strlen(str),
421 			 "%s", result);
422 	else
423 		snprintf(str + strlen(str), PERF_HW_CACHE_LEN - strlen(str),
424 			 "%llu", config >> 16);
425 	return str;
426 }
427 
428 static const char *perf_config_str(__u32 type, __u64 config)
429 {
430 	const char *perf_config;
431 
432 	switch (type) {
433 	case PERF_TYPE_HARDWARE:
434 		perf_config = perf_event_name(event_symbols_hw, config);
435 		break;
436 	case PERF_TYPE_SOFTWARE:
437 		perf_config = perf_event_name(event_symbols_sw, config);
438 		break;
439 	case PERF_TYPE_HW_CACHE:
440 		perf_config = perf_config_hw_cache_str(config);
441 		break;
442 	default:
443 		perf_config = NULL;
444 		break;
445 	}
446 	return perf_config;
447 }
448 
449 static void
450 show_perf_event_event_json(struct bpf_link_info *info, json_writer_t *wtr)
451 {
452 	__u64 config = info->perf_event.event.config;
453 	__u32 type = info->perf_event.event.type;
454 	const char *perf_type, *perf_config;
455 
456 	perf_type = perf_event_name(perf_type_name, type);
457 	if (perf_type)
458 		jsonw_string_field(wtr, "event_type", perf_type);
459 	else
460 		jsonw_uint_field(wtr, "event_type", type);
461 
462 	perf_config = perf_config_str(type, config);
463 	if (perf_config)
464 		jsonw_string_field(wtr, "event_config", perf_config);
465 	else
466 		jsonw_uint_field(wtr, "event_config", config);
467 
468 	jsonw_uint_field(wtr, "cookie", info->perf_event.event.cookie);
469 
470 	if (type == PERF_TYPE_HW_CACHE && perf_config)
471 		free((void *)perf_config);
472 }
473 
474 static int show_link_close_json(int fd, struct bpf_link_info *info)
475 {
476 	struct bpf_prog_info prog_info;
477 	const char *prog_type_str;
478 	int err;
479 
480 	jsonw_start_object(json_wtr);
481 
482 	show_link_header_json(info, json_wtr);
483 
484 	switch (info->type) {
485 	case BPF_LINK_TYPE_RAW_TRACEPOINT:
486 		jsonw_string_field(json_wtr, "tp_name",
487 				   u64_to_ptr(info->raw_tracepoint.tp_name));
488 		break;
489 	case BPF_LINK_TYPE_TRACING:
490 		err = get_prog_info(info->prog_id, &prog_info);
491 		if (err)
492 			return err;
493 
494 		prog_type_str = libbpf_bpf_prog_type_str(prog_info.type);
495 		/* libbpf will return NULL for variants unknown to it. */
496 		if (prog_type_str)
497 			jsonw_string_field(json_wtr, "prog_type", prog_type_str);
498 		else
499 			jsonw_uint_field(json_wtr, "prog_type", prog_info.type);
500 
501 		show_link_attach_type_json(info->tracing.attach_type,
502 					   json_wtr);
503 		jsonw_uint_field(json_wtr, "target_obj_id", info->tracing.target_obj_id);
504 		jsonw_uint_field(json_wtr, "target_btf_id", info->tracing.target_btf_id);
505 		break;
506 	case BPF_LINK_TYPE_CGROUP:
507 		jsonw_lluint_field(json_wtr, "cgroup_id",
508 				   info->cgroup.cgroup_id);
509 		show_link_attach_type_json(info->cgroup.attach_type, json_wtr);
510 		break;
511 	case BPF_LINK_TYPE_ITER:
512 		show_iter_json(info, json_wtr);
513 		break;
514 	case BPF_LINK_TYPE_NETNS:
515 		jsonw_uint_field(json_wtr, "netns_ino",
516 				 info->netns.netns_ino);
517 		show_link_attach_type_json(info->netns.attach_type, json_wtr);
518 		break;
519 	case BPF_LINK_TYPE_NETFILTER:
520 		netfilter_dump_json(info, json_wtr);
521 		break;
522 	case BPF_LINK_TYPE_TCX:
523 		show_link_ifindex_json(info->tcx.ifindex, json_wtr);
524 		show_link_attach_type_json(info->tcx.attach_type, json_wtr);
525 		break;
526 	case BPF_LINK_TYPE_NETKIT:
527 		show_link_ifindex_json(info->netkit.ifindex, json_wtr);
528 		show_link_attach_type_json(info->netkit.attach_type, json_wtr);
529 		break;
530 	case BPF_LINK_TYPE_SOCKMAP:
531 		jsonw_uint_field(json_wtr, "map_id", info->sockmap.map_id);
532 		show_link_attach_type_json(info->sockmap.attach_type, json_wtr);
533 		break;
534 	case BPF_LINK_TYPE_XDP:
535 		show_link_ifindex_json(info->xdp.ifindex, json_wtr);
536 		break;
537 	case BPF_LINK_TYPE_STRUCT_OPS:
538 		jsonw_uint_field(json_wtr, "map_id",
539 				 info->struct_ops.map_id);
540 		break;
541 	case BPF_LINK_TYPE_KPROBE_MULTI:
542 		show_kprobe_multi_json(info, json_wtr);
543 		break;
544 	case BPF_LINK_TYPE_UPROBE_MULTI:
545 		show_uprobe_multi_json(info, json_wtr);
546 		break;
547 	case BPF_LINK_TYPE_PERF_EVENT:
548 		switch (info->perf_event.type) {
549 		case BPF_PERF_EVENT_EVENT:
550 			show_perf_event_event_json(info, json_wtr);
551 			break;
552 		case BPF_PERF_EVENT_TRACEPOINT:
553 			show_perf_event_tracepoint_json(info, json_wtr);
554 			break;
555 		case BPF_PERF_EVENT_KPROBE:
556 		case BPF_PERF_EVENT_KRETPROBE:
557 			show_perf_event_kprobe_json(info, json_wtr);
558 			break;
559 		case BPF_PERF_EVENT_UPROBE:
560 		case BPF_PERF_EVENT_URETPROBE:
561 			show_perf_event_uprobe_json(info, json_wtr);
562 			break;
563 		default:
564 			break;
565 		}
566 		break;
567 	default:
568 		break;
569 	}
570 
571 	if (!hashmap__empty(link_table)) {
572 		struct hashmap_entry *entry;
573 
574 		jsonw_name(json_wtr, "pinned");
575 		jsonw_start_array(json_wtr);
576 		hashmap__for_each_key_entry(link_table, entry, info->id)
577 			jsonw_string(json_wtr, entry->pvalue);
578 		jsonw_end_array(json_wtr);
579 	}
580 
581 	emit_obj_refs_json(refs_table, info->id, json_wtr);
582 
583 	jsonw_end_object(json_wtr);
584 
585 	return 0;
586 }
587 
588 static void show_link_header_plain(struct bpf_link_info *info)
589 {
590 	const char *link_type_str;
591 
592 	printf("%u: ", info->id);
593 	link_type_str = libbpf_bpf_link_type_str(info->type);
594 	if (link_type_str)
595 		printf("%s  ", link_type_str);
596 	else
597 		printf("type %u  ", info->type);
598 
599 	if (info->type == BPF_LINK_TYPE_STRUCT_OPS)
600 		printf("map %u  ", info->struct_ops.map_id);
601 	else
602 		printf("prog %u  ", info->prog_id);
603 }
604 
605 static void show_link_attach_type_plain(__u32 attach_type)
606 {
607 	const char *attach_type_str;
608 
609 	attach_type_str = libbpf_bpf_attach_type_str(attach_type);
610 	if (attach_type_str)
611 		printf("attach_type %s  ", attach_type_str);
612 	else
613 		printf("attach_type %u  ", attach_type);
614 }
615 
616 static void show_link_ifindex_plain(__u32 ifindex)
617 {
618 	char devname[IF_NAMESIZE * 2] = "(unknown)";
619 	char tmpname[IF_NAMESIZE];
620 	char *ret = NULL;
621 
622 	if (ifindex)
623 		ret = if_indextoname(ifindex, tmpname);
624 	else
625 		snprintf(devname, sizeof(devname), "(detached)");
626 	if (ret)
627 		snprintf(devname, sizeof(devname), "%s(%u)",
628 			 tmpname, ifindex);
629 	printf("ifindex %s  ", devname);
630 }
631 
632 static void show_iter_plain(struct bpf_link_info *info)
633 {
634 	const char *target_name = u64_to_ptr(info->iter.target_name);
635 
636 	printf("target_name %s  ", target_name);
637 
638 	if (is_iter_map_target(target_name))
639 		printf("map_id %u  ", info->iter.map.map_id);
640 	else if (is_iter_task_target(target_name)) {
641 		if (info->iter.task.tid)
642 			printf("tid %u ", info->iter.task.tid);
643 		else if (info->iter.task.pid)
644 			printf("pid %u ", info->iter.task.pid);
645 	}
646 
647 	if (is_iter_cgroup_target(target_name)) {
648 		printf("cgroup_id %llu  ", info->iter.cgroup.cgroup_id);
649 		printf("order %s  ",
650 		       cgroup_order_string(info->iter.cgroup.order));
651 	}
652 }
653 
654 static const char * const pf2name[] = {
655 	[NFPROTO_INET] = "inet",
656 	[NFPROTO_IPV4] = "ip",
657 	[NFPROTO_ARP] = "arp",
658 	[NFPROTO_NETDEV] = "netdev",
659 	[NFPROTO_BRIDGE] = "bridge",
660 	[NFPROTO_IPV6] = "ip6",
661 };
662 
663 static const char * const inethook2name[] = {
664 	[NF_INET_PRE_ROUTING] = "prerouting",
665 	[NF_INET_LOCAL_IN] = "input",
666 	[NF_INET_FORWARD] = "forward",
667 	[NF_INET_LOCAL_OUT] = "output",
668 	[NF_INET_POST_ROUTING] = "postrouting",
669 };
670 
671 static const char * const arphook2name[] = {
672 	[NF_ARP_IN] = "input",
673 	[NF_ARP_OUT] = "output",
674 };
675 
676 void netfilter_dump_plain(const struct bpf_link_info *info)
677 {
678 	const char *hookname = NULL, *pfname = NULL;
679 	unsigned int hook = info->netfilter.hooknum;
680 	unsigned int pf = info->netfilter.pf;
681 
682 	if (pf < ARRAY_SIZE(pf2name))
683 		pfname = pf2name[pf];
684 
685 	switch (pf) {
686 	case NFPROTO_BRIDGE: /* bridge shares numbers with enum nf_inet_hooks */
687 	case NFPROTO_IPV4:
688 	case NFPROTO_IPV6:
689 	case NFPROTO_INET:
690 		if (hook < ARRAY_SIZE(inethook2name))
691 			hookname = inethook2name[hook];
692 		break;
693 	case NFPROTO_ARP:
694 		if (hook < ARRAY_SIZE(arphook2name))
695 			hookname = arphook2name[hook];
696 	default:
697 		break;
698 	}
699 
700 	if (pfname)
701 		printf("\n\t%s", pfname);
702 	else
703 		printf("\n\tpf: %u", pf);
704 
705 	if (hookname)
706 		printf(" %s", hookname);
707 	else
708 		printf(", hook %u,", hook);
709 
710 	printf(" prio %d", info->netfilter.priority);
711 
712 	if (info->netfilter.flags)
713 		printf(" flags 0x%x", info->netfilter.flags);
714 }
715 
716 static void show_kprobe_multi_plain(struct bpf_link_info *info)
717 {
718 	struct addr_cookie *data;
719 	__u32 i, j = 0;
720 
721 	if (!info->kprobe_multi.count)
722 		return;
723 
724 	if (info->kprobe_multi.flags & BPF_F_KPROBE_MULTI_RETURN)
725 		printf("\n\tkretprobe.multi  ");
726 	else
727 		printf("\n\tkprobe.multi  ");
728 	printf("func_cnt %u  ", info->kprobe_multi.count);
729 	if (info->kprobe_multi.missed)
730 		printf("missed %llu  ", info->kprobe_multi.missed);
731 	data = get_addr_cookie_array(u64_to_ptr(info->kprobe_multi.addrs),
732 				     u64_to_ptr(info->kprobe_multi.cookies),
733 				     info->kprobe_multi.count);
734 	if (!data)
735 		return;
736 
737 	/* Load it once for all. */
738 	if (!dd.sym_count)
739 		kernel_syms_load(&dd);
740 	if (!dd.sym_count)
741 		goto error;
742 
743 	printf("\n\t%-16s %-16s %s", "addr", "cookie", "func [module]");
744 	for (i = 0; i < dd.sym_count; i++) {
745 		if (dd.sym_mapping[i].address != data[j].addr)
746 			continue;
747 		printf("\n\t%016lx %-16llx %s",
748 		       dd.sym_mapping[i].address, data[j].cookie, dd.sym_mapping[i].name);
749 		if (dd.sym_mapping[i].module[0] != '\0')
750 			printf(" [%s]  ", dd.sym_mapping[i].module);
751 		else
752 			printf("  ");
753 
754 		if (j++ == info->kprobe_multi.count)
755 			break;
756 	}
757 error:
758 	free(data);
759 }
760 
761 static void show_uprobe_multi_plain(struct bpf_link_info *info)
762 {
763 	__u32 i;
764 
765 	if (!info->uprobe_multi.count)
766 		return;
767 
768 	if (info->uprobe_multi.flags & BPF_F_UPROBE_MULTI_RETURN)
769 		printf("\n\turetprobe.multi  ");
770 	else
771 		printf("\n\tuprobe.multi  ");
772 
773 	printf("path %s  ", (char *) u64_to_ptr(info->uprobe_multi.path));
774 	printf("func_cnt %u  ", info->uprobe_multi.count);
775 
776 	if (info->uprobe_multi.pid)
777 		printf("pid %u  ", info->uprobe_multi.pid);
778 
779 	printf("\n\t%-16s   %-16s   %-16s", "offset", "ref_ctr_offset", "cookies");
780 	for (i = 0; i < info->uprobe_multi.count; i++) {
781 		printf("\n\t0x%-16llx 0x%-16llx 0x%-16llx",
782 			u64_to_arr(info->uprobe_multi.offsets)[i],
783 			u64_to_arr(info->uprobe_multi.ref_ctr_offsets)[i],
784 			u64_to_arr(info->uprobe_multi.cookies)[i]);
785 	}
786 }
787 
788 static void show_perf_event_kprobe_plain(struct bpf_link_info *info)
789 {
790 	const char *buf;
791 
792 	buf = u64_to_ptr(info->perf_event.kprobe.func_name);
793 	if (buf[0] == '\0' && !info->perf_event.kprobe.addr)
794 		return;
795 
796 	if (info->perf_event.type == BPF_PERF_EVENT_KRETPROBE)
797 		printf("\n\tkretprobe ");
798 	else
799 		printf("\n\tkprobe ");
800 	if (info->perf_event.kprobe.addr)
801 		printf("%llx ", info->perf_event.kprobe.addr);
802 	printf("%s", buf);
803 	if (info->perf_event.kprobe.offset)
804 		printf("+%#x", info->perf_event.kprobe.offset);
805 	if (info->perf_event.kprobe.missed)
806 		printf("  missed %llu", info->perf_event.kprobe.missed);
807 	if (info->perf_event.kprobe.cookie)
808 		printf("  cookie %llu", info->perf_event.kprobe.cookie);
809 	printf("  ");
810 }
811 
812 static void show_perf_event_uprobe_plain(struct bpf_link_info *info)
813 {
814 	const char *buf;
815 
816 	buf = u64_to_ptr(info->perf_event.uprobe.file_name);
817 	if (buf[0] == '\0')
818 		return;
819 
820 	if (info->perf_event.type == BPF_PERF_EVENT_URETPROBE)
821 		printf("\n\turetprobe ");
822 	else
823 		printf("\n\tuprobe ");
824 	printf("%s+%#x  ", buf, info->perf_event.uprobe.offset);
825 	if (info->perf_event.uprobe.cookie)
826 		printf("cookie %llu  ", info->perf_event.uprobe.cookie);
827 	if (info->perf_event.uprobe.ref_ctr_offset)
828 		printf("ref_ctr_offset 0x%llx  ", info->perf_event.uprobe.ref_ctr_offset);
829 }
830 
831 static void show_perf_event_tracepoint_plain(struct bpf_link_info *info)
832 {
833 	const char *buf;
834 
835 	buf = u64_to_ptr(info->perf_event.tracepoint.tp_name);
836 	if (buf[0] == '\0')
837 		return;
838 
839 	printf("\n\ttracepoint %s  ", buf);
840 	if (info->perf_event.tracepoint.cookie)
841 		printf("cookie %llu  ", info->perf_event.tracepoint.cookie);
842 }
843 
844 static void show_perf_event_event_plain(struct bpf_link_info *info)
845 {
846 	__u64 config = info->perf_event.event.config;
847 	__u32 type = info->perf_event.event.type;
848 	const char *perf_type, *perf_config;
849 
850 	printf("\n\tevent ");
851 	perf_type = perf_event_name(perf_type_name, type);
852 	if (perf_type)
853 		printf("%s:", perf_type);
854 	else
855 		printf("%u :", type);
856 
857 	perf_config = perf_config_str(type, config);
858 	if (perf_config)
859 		printf("%s  ", perf_config);
860 	else
861 		printf("%llu  ", config);
862 
863 	if (info->perf_event.event.cookie)
864 		printf("cookie %llu  ", info->perf_event.event.cookie);
865 
866 	if (type == PERF_TYPE_HW_CACHE && perf_config)
867 		free((void *)perf_config);
868 }
869 
870 static int show_link_close_plain(int fd, struct bpf_link_info *info)
871 {
872 	struct bpf_prog_info prog_info;
873 	const char *prog_type_str;
874 	int err;
875 
876 	show_link_header_plain(info);
877 
878 	switch (info->type) {
879 	case BPF_LINK_TYPE_RAW_TRACEPOINT:
880 		printf("\n\ttp '%s'  ",
881 		       (const char *)u64_to_ptr(info->raw_tracepoint.tp_name));
882 		break;
883 	case BPF_LINK_TYPE_TRACING:
884 		err = get_prog_info(info->prog_id, &prog_info);
885 		if (err)
886 			return err;
887 
888 		prog_type_str = libbpf_bpf_prog_type_str(prog_info.type);
889 		/* libbpf will return NULL for variants unknown to it. */
890 		if (prog_type_str)
891 			printf("\n\tprog_type %s  ", prog_type_str);
892 		else
893 			printf("\n\tprog_type %u  ", prog_info.type);
894 
895 		show_link_attach_type_plain(info->tracing.attach_type);
896 		if (info->tracing.target_obj_id || info->tracing.target_btf_id)
897 			printf("\n\ttarget_obj_id %u  target_btf_id %u  ",
898 			       info->tracing.target_obj_id,
899 			       info->tracing.target_btf_id);
900 		break;
901 	case BPF_LINK_TYPE_CGROUP:
902 		printf("\n\tcgroup_id %zu  ", (size_t)info->cgroup.cgroup_id);
903 		show_link_attach_type_plain(info->cgroup.attach_type);
904 		break;
905 	case BPF_LINK_TYPE_ITER:
906 		show_iter_plain(info);
907 		break;
908 	case BPF_LINK_TYPE_NETNS:
909 		printf("\n\tnetns_ino %u  ", info->netns.netns_ino);
910 		show_link_attach_type_plain(info->netns.attach_type);
911 		break;
912 	case BPF_LINK_TYPE_NETFILTER:
913 		netfilter_dump_plain(info);
914 		break;
915 	case BPF_LINK_TYPE_TCX:
916 		printf("\n\t");
917 		show_link_ifindex_plain(info->tcx.ifindex);
918 		show_link_attach_type_plain(info->tcx.attach_type);
919 		break;
920 	case BPF_LINK_TYPE_NETKIT:
921 		printf("\n\t");
922 		show_link_ifindex_plain(info->netkit.ifindex);
923 		show_link_attach_type_plain(info->netkit.attach_type);
924 		break;
925 	case BPF_LINK_TYPE_SOCKMAP:
926 		printf("\n\t");
927 		printf("map_id %u  ", info->sockmap.map_id);
928 		show_link_attach_type_plain(info->sockmap.attach_type);
929 		break;
930 	case BPF_LINK_TYPE_XDP:
931 		printf("\n\t");
932 		show_link_ifindex_plain(info->xdp.ifindex);
933 		break;
934 	case BPF_LINK_TYPE_KPROBE_MULTI:
935 		show_kprobe_multi_plain(info);
936 		break;
937 	case BPF_LINK_TYPE_UPROBE_MULTI:
938 		show_uprobe_multi_plain(info);
939 		break;
940 	case BPF_LINK_TYPE_PERF_EVENT:
941 		switch (info->perf_event.type) {
942 		case BPF_PERF_EVENT_EVENT:
943 			show_perf_event_event_plain(info);
944 			break;
945 		case BPF_PERF_EVENT_TRACEPOINT:
946 			show_perf_event_tracepoint_plain(info);
947 			break;
948 		case BPF_PERF_EVENT_KPROBE:
949 		case BPF_PERF_EVENT_KRETPROBE:
950 			show_perf_event_kprobe_plain(info);
951 			break;
952 		case BPF_PERF_EVENT_UPROBE:
953 		case BPF_PERF_EVENT_URETPROBE:
954 			show_perf_event_uprobe_plain(info);
955 			break;
956 		default:
957 			break;
958 		}
959 		break;
960 	default:
961 		break;
962 	}
963 
964 	if (!hashmap__empty(link_table)) {
965 		struct hashmap_entry *entry;
966 
967 		hashmap__for_each_key_entry(link_table, entry, info->id)
968 			printf("\n\tpinned %s", (char *)entry->pvalue);
969 	}
970 	emit_obj_refs_plain(refs_table, info->id, "\n\tpids ");
971 
972 	printf("\n");
973 
974 	return 0;
975 }
976 
977 static int do_show_link(int fd)
978 {
979 	__u64 *ref_ctr_offsets = NULL, *offsets = NULL, *cookies = NULL;
980 	struct bpf_link_info info;
981 	__u32 len = sizeof(info);
982 	char path_buf[PATH_MAX];
983 	__u64 *addrs = NULL;
984 	char buf[PATH_MAX];
985 	int count;
986 	int err;
987 
988 	memset(&info, 0, sizeof(info));
989 	buf[0] = '\0';
990 again:
991 	err = bpf_link_get_info_by_fd(fd, &info, &len);
992 	if (err) {
993 		p_err("can't get link info: %s",
994 		      strerror(errno));
995 		close(fd);
996 		return err;
997 	}
998 	if (info.type == BPF_LINK_TYPE_RAW_TRACEPOINT &&
999 	    !info.raw_tracepoint.tp_name) {
1000 		info.raw_tracepoint.tp_name = ptr_to_u64(&buf);
1001 		info.raw_tracepoint.tp_name_len = sizeof(buf);
1002 		goto again;
1003 	}
1004 	if (info.type == BPF_LINK_TYPE_ITER &&
1005 	    !info.iter.target_name) {
1006 		info.iter.target_name = ptr_to_u64(&buf);
1007 		info.iter.target_name_len = sizeof(buf);
1008 		goto again;
1009 	}
1010 	if (info.type == BPF_LINK_TYPE_KPROBE_MULTI &&
1011 	    !info.kprobe_multi.addrs) {
1012 		count = info.kprobe_multi.count;
1013 		if (count) {
1014 			addrs = calloc(count, sizeof(__u64));
1015 			if (!addrs) {
1016 				p_err("mem alloc failed");
1017 				close(fd);
1018 				return -ENOMEM;
1019 			}
1020 			info.kprobe_multi.addrs = ptr_to_u64(addrs);
1021 			cookies = calloc(count, sizeof(__u64));
1022 			if (!cookies) {
1023 				p_err("mem alloc failed");
1024 				free(addrs);
1025 				close(fd);
1026 				return -ENOMEM;
1027 			}
1028 			info.kprobe_multi.cookies = ptr_to_u64(cookies);
1029 			goto again;
1030 		}
1031 	}
1032 	if (info.type == BPF_LINK_TYPE_UPROBE_MULTI &&
1033 	    !info.uprobe_multi.offsets) {
1034 		count = info.uprobe_multi.count;
1035 		if (count) {
1036 			offsets = calloc(count, sizeof(__u64));
1037 			if (!offsets) {
1038 				p_err("mem alloc failed");
1039 				close(fd);
1040 				return -ENOMEM;
1041 			}
1042 			info.uprobe_multi.offsets = ptr_to_u64(offsets);
1043 			ref_ctr_offsets = calloc(count, sizeof(__u64));
1044 			if (!ref_ctr_offsets) {
1045 				p_err("mem alloc failed");
1046 				free(offsets);
1047 				close(fd);
1048 				return -ENOMEM;
1049 			}
1050 			info.uprobe_multi.ref_ctr_offsets = ptr_to_u64(ref_ctr_offsets);
1051 			cookies = calloc(count, sizeof(__u64));
1052 			if (!cookies) {
1053 				p_err("mem alloc failed");
1054 				free(ref_ctr_offsets);
1055 				free(offsets);
1056 				close(fd);
1057 				return -ENOMEM;
1058 			}
1059 			info.uprobe_multi.cookies = ptr_to_u64(cookies);
1060 			info.uprobe_multi.path = ptr_to_u64(path_buf);
1061 			info.uprobe_multi.path_size = sizeof(path_buf);
1062 			goto again;
1063 		}
1064 	}
1065 	if (info.type == BPF_LINK_TYPE_PERF_EVENT) {
1066 		switch (info.perf_event.type) {
1067 		case BPF_PERF_EVENT_TRACEPOINT:
1068 			if (!info.perf_event.tracepoint.tp_name) {
1069 				info.perf_event.tracepoint.tp_name = ptr_to_u64(&buf);
1070 				info.perf_event.tracepoint.name_len = sizeof(buf);
1071 				goto again;
1072 			}
1073 			break;
1074 		case BPF_PERF_EVENT_KPROBE:
1075 		case BPF_PERF_EVENT_KRETPROBE:
1076 			if (!info.perf_event.kprobe.func_name) {
1077 				info.perf_event.kprobe.func_name = ptr_to_u64(&buf);
1078 				info.perf_event.kprobe.name_len = sizeof(buf);
1079 				goto again;
1080 			}
1081 			break;
1082 		case BPF_PERF_EVENT_UPROBE:
1083 		case BPF_PERF_EVENT_URETPROBE:
1084 			if (!info.perf_event.uprobe.file_name) {
1085 				info.perf_event.uprobe.file_name = ptr_to_u64(&buf);
1086 				info.perf_event.uprobe.name_len = sizeof(buf);
1087 				goto again;
1088 			}
1089 			break;
1090 		default:
1091 			break;
1092 		}
1093 	}
1094 
1095 	if (json_output)
1096 		show_link_close_json(fd, &info);
1097 	else
1098 		show_link_close_plain(fd, &info);
1099 
1100 	free(ref_ctr_offsets);
1101 	free(cookies);
1102 	free(offsets);
1103 	free(addrs);
1104 	close(fd);
1105 	return 0;
1106 }
1107 
1108 static int do_show(int argc, char **argv)
1109 {
1110 	__u32 id = 0;
1111 	int err, fd;
1112 
1113 	if (show_pinned) {
1114 		link_table = hashmap__new(hash_fn_for_key_as_id,
1115 					  equal_fn_for_key_as_id, NULL);
1116 		if (IS_ERR(link_table)) {
1117 			p_err("failed to create hashmap for pinned paths");
1118 			return -1;
1119 		}
1120 		build_pinned_obj_table(link_table, BPF_OBJ_LINK);
1121 	}
1122 	build_obj_refs_table(&refs_table, BPF_OBJ_LINK);
1123 
1124 	if (argc == 2) {
1125 		fd = link_parse_fd(&argc, &argv);
1126 		if (fd < 0)
1127 			return fd;
1128 		do_show_link(fd);
1129 		goto out;
1130 	}
1131 
1132 	if (argc)
1133 		return BAD_ARG();
1134 
1135 	if (json_output)
1136 		jsonw_start_array(json_wtr);
1137 	while (true) {
1138 		err = bpf_link_get_next_id(id, &id);
1139 		if (err) {
1140 			if (errno == ENOENT)
1141 				break;
1142 			p_err("can't get next link: %s%s", strerror(errno),
1143 			      errno == EINVAL ? " -- kernel too old?" : "");
1144 			break;
1145 		}
1146 
1147 		fd = bpf_link_get_fd_by_id(id);
1148 		if (fd < 0) {
1149 			if (errno == ENOENT)
1150 				continue;
1151 			p_err("can't get link by id (%u): %s",
1152 			      id, strerror(errno));
1153 			break;
1154 		}
1155 
1156 		err = do_show_link(fd);
1157 		if (err)
1158 			break;
1159 	}
1160 	if (json_output)
1161 		jsonw_end_array(json_wtr);
1162 
1163 	delete_obj_refs_table(refs_table);
1164 
1165 	if (show_pinned)
1166 		delete_pinned_obj_table(link_table);
1167 
1168 out:
1169 	if (dd.sym_count)
1170 		kernel_syms_destroy(&dd);
1171 	return errno == ENOENT ? 0 : -1;
1172 }
1173 
1174 static int do_pin(int argc, char **argv)
1175 {
1176 	int err;
1177 
1178 	err = do_pin_any(argc, argv, link_parse_fd);
1179 	if (!err && json_output)
1180 		jsonw_null(json_wtr);
1181 	return err;
1182 }
1183 
1184 static int do_detach(int argc, char **argv)
1185 {
1186 	int err, fd;
1187 
1188 	if (argc != 2) {
1189 		p_err("link specifier is invalid or missing\n");
1190 		return 1;
1191 	}
1192 
1193 	fd = link_parse_fd(&argc, &argv);
1194 	if (fd < 0)
1195 		return 1;
1196 
1197 	err = bpf_link_detach(fd);
1198 	if (err)
1199 		err = -errno;
1200 	close(fd);
1201 	if (err) {
1202 		p_err("failed link detach: %s", strerror(-err));
1203 		return 1;
1204 	}
1205 
1206 	if (json_output)
1207 		jsonw_null(json_wtr);
1208 
1209 	return 0;
1210 }
1211 
1212 static int do_help(int argc, char **argv)
1213 {
1214 	if (json_output) {
1215 		jsonw_null(json_wtr);
1216 		return 0;
1217 	}
1218 
1219 	fprintf(stderr,
1220 		"Usage: %1$s %2$s { show | list }   [LINK]\n"
1221 		"       %1$s %2$s pin        LINK  FILE\n"
1222 		"       %1$s %2$s detach     LINK\n"
1223 		"       %1$s %2$s help\n"
1224 		"\n"
1225 		"       " HELP_SPEC_LINK "\n"
1226 		"       " HELP_SPEC_OPTIONS " |\n"
1227 		"                    {-f|--bpffs} | {-n|--nomount} }\n"
1228 		"",
1229 		bin_name, argv[-2]);
1230 
1231 	return 0;
1232 }
1233 
1234 static const struct cmd cmds[] = {
1235 	{ "show",	do_show },
1236 	{ "list",	do_show },
1237 	{ "help",	do_help },
1238 	{ "pin",	do_pin },
1239 	{ "detach",	do_detach },
1240 	{ 0 }
1241 };
1242 
1243 int do_link(int argc, char **argv)
1244 {
1245 	return cmd_select(cmds, argc, argv, do_help);
1246 }
1247