xref: /linux/tools/bpf/bpftool/gen.c (revision 4e887471e8e3a513607495d18333c44f59a82c5a)
1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 /* Copyright (C) 2019 Facebook */
3 
4 #ifndef _GNU_SOURCE
5 #define _GNU_SOURCE
6 #endif
7 #include <ctype.h>
8 #include <errno.h>
9 #include <fcntl.h>
10 #include <libgen.h>
11 #include <linux/err.h>
12 #include <stdbool.h>
13 #include <stdio.h>
14 #include <string.h>
15 #include <unistd.h>
16 #include <bpf/bpf.h>
17 #include <bpf/libbpf.h>
18 #include <bpf/libbpf_internal.h>
19 #include <sys/types.h>
20 #include <sys/stat.h>
21 #include <sys/mman.h>
22 #include <bpf/btf.h>
23 
24 #include "json_writer.h"
25 #include "main.h"
26 
27 #define MAX_OBJ_NAME_LEN 64
28 
29 static void sanitize_identifier(char *name)
30 {
31 	int i;
32 
33 	for (i = 0; name[i]; i++)
34 		if (!isalnum(name[i]) && name[i] != '_')
35 			name[i] = '_';
36 }
37 
38 static bool str_has_prefix(const char *str, const char *prefix)
39 {
40 	return strncmp(str, prefix, strlen(prefix)) == 0;
41 }
42 
43 static bool str_has_suffix(const char *str, const char *suffix)
44 {
45 	size_t i, n1 = strlen(str), n2 = strlen(suffix);
46 
47 	if (n1 < n2)
48 		return false;
49 
50 	for (i = 0; i < n2; i++) {
51 		if (str[n1 - i - 1] != suffix[n2 - i - 1])
52 			return false;
53 	}
54 
55 	return true;
56 }
57 
58 static const struct btf_type *
59 resolve_func_ptr(const struct btf *btf, __u32 id, __u32 *res_id)
60 {
61 	const struct btf_type *t;
62 
63 	t = skip_mods_and_typedefs(btf, id, NULL);
64 	if (!btf_is_ptr(t))
65 		return NULL;
66 
67 	t = skip_mods_and_typedefs(btf, t->type, res_id);
68 
69 	return btf_is_func_proto(t) ? t : NULL;
70 }
71 
72 static void get_obj_name(char *name, const char *file)
73 {
74 	char file_copy[PATH_MAX];
75 
76 	/* Using basename() POSIX version to be more portable. */
77 	strncpy(file_copy, file, PATH_MAX - 1)[PATH_MAX - 1] = '\0';
78 	strncpy(name, basename(file_copy), MAX_OBJ_NAME_LEN - 1)[MAX_OBJ_NAME_LEN - 1] = '\0';
79 	if (str_has_suffix(name, ".o"))
80 		name[strlen(name) - 2] = '\0';
81 	sanitize_identifier(name);
82 }
83 
84 static void get_header_guard(char *guard, const char *obj_name, const char *suffix)
85 {
86 	int i;
87 
88 	sprintf(guard, "__%s_%s__", obj_name, suffix);
89 	for (i = 0; guard[i]; i++)
90 		guard[i] = toupper(guard[i]);
91 }
92 
93 static bool get_map_ident(const struct bpf_map *map, char *buf, size_t buf_sz)
94 {
95 	static const char *sfxs[] = { ".data", ".rodata", ".bss", ".kconfig" };
96 	const char *name = bpf_map__name(map);
97 	int i, n;
98 
99 	if (!bpf_map__is_internal(map)) {
100 		snprintf(buf, buf_sz, "%s", name);
101 		return true;
102 	}
103 
104 	for  (i = 0, n = ARRAY_SIZE(sfxs); i < n; i++) {
105 		const char *sfx = sfxs[i], *p;
106 
107 		p = strstr(name, sfx);
108 		if (p) {
109 			snprintf(buf, buf_sz, "%s", p + 1);
110 			sanitize_identifier(buf);
111 			return true;
112 		}
113 	}
114 
115 	return false;
116 }
117 
118 static bool get_datasec_ident(const char *sec_name, char *buf, size_t buf_sz)
119 {
120 	static const char *pfxs[] = { ".data", ".rodata", ".bss", ".kconfig" };
121 	int i, n;
122 
123 	for  (i = 0, n = ARRAY_SIZE(pfxs); i < n; i++) {
124 		const char *pfx = pfxs[i];
125 
126 		if (str_has_prefix(sec_name, pfx)) {
127 			snprintf(buf, buf_sz, "%s", sec_name + 1);
128 			sanitize_identifier(buf);
129 			return true;
130 		}
131 	}
132 
133 	return false;
134 }
135 
136 static void codegen_btf_dump_printf(void *ctx, const char *fmt, va_list args)
137 {
138 	vprintf(fmt, args);
139 }
140 
141 static int codegen_datasec_def(struct bpf_object *obj,
142 			       struct btf *btf,
143 			       struct btf_dump *d,
144 			       const struct btf_type *sec,
145 			       const char *obj_name)
146 {
147 	const char *sec_name = btf__name_by_offset(btf, sec->name_off);
148 	const struct btf_var_secinfo *sec_var = btf_var_secinfos(sec);
149 	int i, err, off = 0, pad_cnt = 0, vlen = btf_vlen(sec);
150 	char var_ident[256], sec_ident[256];
151 	bool strip_mods = false;
152 
153 	if (!get_datasec_ident(sec_name, sec_ident, sizeof(sec_ident)))
154 		return 0;
155 
156 	if (strcmp(sec_name, ".kconfig") != 0)
157 		strip_mods = true;
158 
159 	printf("	struct %s__%s {\n", obj_name, sec_ident);
160 	for (i = 0; i < vlen; i++, sec_var++) {
161 		const struct btf_type *var = btf__type_by_id(btf, sec_var->type);
162 		const char *var_name = btf__name_by_offset(btf, var->name_off);
163 		DECLARE_LIBBPF_OPTS(btf_dump_emit_type_decl_opts, opts,
164 			.field_name = var_ident,
165 			.indent_level = 2,
166 			.strip_mods = strip_mods,
167 		);
168 		int need_off = sec_var->offset, align_off, align;
169 		__u32 var_type_id = var->type;
170 
171 		/* static variables are not exposed through BPF skeleton */
172 		if (btf_var(var)->linkage == BTF_VAR_STATIC)
173 			continue;
174 
175 		if (off > need_off) {
176 			p_err("Something is wrong for %s's variable #%d: need offset %d, already at %d.\n",
177 			      sec_name, i, need_off, off);
178 			return -EINVAL;
179 		}
180 
181 		align = btf__align_of(btf, var->type);
182 		if (align <= 0) {
183 			p_err("Failed to determine alignment of variable '%s': %d",
184 			      var_name, align);
185 			return -EINVAL;
186 		}
187 		/* Assume 32-bit architectures when generating data section
188 		 * struct memory layout. Given bpftool can't know which target
189 		 * host architecture it's emitting skeleton for, we need to be
190 		 * conservative and assume 32-bit one to ensure enough padding
191 		 * bytes are generated for pointer and long types. This will
192 		 * still work correctly for 64-bit architectures, because in
193 		 * the worst case we'll generate unnecessary padding field,
194 		 * which on 64-bit architectures is not strictly necessary and
195 		 * would be handled by natural 8-byte alignment. But it still
196 		 * will be a correct memory layout, based on recorded offsets
197 		 * in BTF.
198 		 */
199 		if (align > 4)
200 			align = 4;
201 
202 		align_off = (off + align - 1) / align * align;
203 		if (align_off != need_off) {
204 			printf("\t\tchar __pad%d[%d];\n",
205 			       pad_cnt, need_off - off);
206 			pad_cnt++;
207 		}
208 
209 		/* sanitize variable name, e.g., for static vars inside
210 		 * a function, it's name is '<function name>.<variable name>',
211 		 * which we'll turn into a '<function name>_<variable name>'
212 		 */
213 		var_ident[0] = '\0';
214 		strncat(var_ident, var_name, sizeof(var_ident) - 1);
215 		sanitize_identifier(var_ident);
216 
217 		printf("\t\t");
218 		err = btf_dump__emit_type_decl(d, var_type_id, &opts);
219 		if (err)
220 			return err;
221 		printf(";\n");
222 
223 		off = sec_var->offset + sec_var->size;
224 	}
225 	printf("	} *%s;\n", sec_ident);
226 	return 0;
227 }
228 
229 static const struct btf_type *find_type_for_map(struct btf *btf, const char *map_ident)
230 {
231 	int n = btf__type_cnt(btf), i;
232 	char sec_ident[256];
233 
234 	for (i = 1; i < n; i++) {
235 		const struct btf_type *t = btf__type_by_id(btf, i);
236 		const char *name;
237 
238 		if (!btf_is_datasec(t))
239 			continue;
240 
241 		name = btf__str_by_offset(btf, t->name_off);
242 		if (!get_datasec_ident(name, sec_ident, sizeof(sec_ident)))
243 			continue;
244 
245 		if (strcmp(sec_ident, map_ident) == 0)
246 			return t;
247 	}
248 	return NULL;
249 }
250 
251 static bool is_internal_mmapable_map(const struct bpf_map *map, char *buf, size_t sz)
252 {
253 	if (!bpf_map__is_internal(map) || !(bpf_map__map_flags(map) & BPF_F_MMAPABLE))
254 		return false;
255 
256 	if (!get_map_ident(map, buf, sz))
257 		return false;
258 
259 	return true;
260 }
261 
262 static int codegen_datasecs(struct bpf_object *obj, const char *obj_name)
263 {
264 	struct btf *btf = bpf_object__btf(obj);
265 	struct btf_dump *d;
266 	struct bpf_map *map;
267 	const struct btf_type *sec;
268 	char map_ident[256];
269 	int err = 0;
270 
271 	d = btf_dump__new(btf, codegen_btf_dump_printf, NULL, NULL);
272 	if (!d)
273 		return -errno;
274 
275 	bpf_object__for_each_map(map, obj) {
276 		/* only generate definitions for memory-mapped internal maps */
277 		if (!is_internal_mmapable_map(map, map_ident, sizeof(map_ident)))
278 			continue;
279 
280 		sec = find_type_for_map(btf, map_ident);
281 
282 		/* In some cases (e.g., sections like .rodata.cst16 containing
283 		 * compiler allocated string constants only) there will be
284 		 * special internal maps with no corresponding DATASEC BTF
285 		 * type. In such case, generate empty structs for each such
286 		 * map. It will still be memory-mapped and its contents
287 		 * accessible from user-space through BPF skeleton.
288 		 */
289 		if (!sec) {
290 			printf("	struct %s__%s {\n", obj_name, map_ident);
291 			printf("	} *%s;\n", map_ident);
292 		} else {
293 			err = codegen_datasec_def(obj, btf, d, sec, obj_name);
294 			if (err)
295 				goto out;
296 		}
297 	}
298 
299 
300 out:
301 	btf_dump__free(d);
302 	return err;
303 }
304 
305 static bool btf_is_ptr_to_func_proto(const struct btf *btf,
306 				     const struct btf_type *v)
307 {
308 	return btf_is_ptr(v) && btf_is_func_proto(btf__type_by_id(btf, v->type));
309 }
310 
311 static int codegen_subskel_datasecs(struct bpf_object *obj, const char *obj_name)
312 {
313 	struct btf *btf = bpf_object__btf(obj);
314 	struct btf_dump *d;
315 	struct bpf_map *map;
316 	const struct btf_type *sec, *var;
317 	const struct btf_var_secinfo *sec_var;
318 	int i, err = 0, vlen;
319 	char map_ident[256], sec_ident[256];
320 	bool strip_mods = false, needs_typeof = false;
321 	const char *sec_name, *var_name;
322 	__u32 var_type_id;
323 
324 	d = btf_dump__new(btf, codegen_btf_dump_printf, NULL, NULL);
325 	if (!d)
326 		return -errno;
327 
328 	bpf_object__for_each_map(map, obj) {
329 		/* only generate definitions for memory-mapped internal maps */
330 		if (!is_internal_mmapable_map(map, map_ident, sizeof(map_ident)))
331 			continue;
332 
333 		sec = find_type_for_map(btf, map_ident);
334 		if (!sec)
335 			continue;
336 
337 		sec_name = btf__name_by_offset(btf, sec->name_off);
338 		if (!get_datasec_ident(sec_name, sec_ident, sizeof(sec_ident)))
339 			continue;
340 
341 		strip_mods = strcmp(sec_name, ".kconfig") != 0;
342 		printf("	struct %s__%s {\n", obj_name, sec_ident);
343 
344 		sec_var = btf_var_secinfos(sec);
345 		vlen = btf_vlen(sec);
346 		for (i = 0; i < vlen; i++, sec_var++) {
347 			DECLARE_LIBBPF_OPTS(btf_dump_emit_type_decl_opts, opts,
348 				.indent_level = 2,
349 				.strip_mods = strip_mods,
350 				/* we'll print the name separately */
351 				.field_name = "",
352 			);
353 
354 			var = btf__type_by_id(btf, sec_var->type);
355 			var_name = btf__name_by_offset(btf, var->name_off);
356 			var_type_id = var->type;
357 
358 			/* static variables are not exposed through BPF skeleton */
359 			if (btf_var(var)->linkage == BTF_VAR_STATIC)
360 				continue;
361 
362 			/* The datasec member has KIND_VAR but we want the
363 			 * underlying type of the variable (e.g. KIND_INT).
364 			 */
365 			var = skip_mods_and_typedefs(btf, var->type, NULL);
366 
367 			printf("\t\t");
368 			/* Func and array members require special handling.
369 			 * Instead of producing `typename *var`, they produce
370 			 * `typeof(typename) *var`. This allows us to keep a
371 			 * similar syntax where the identifier is just prefixed
372 			 * by *, allowing us to ignore C declaration minutiae.
373 			 */
374 			needs_typeof = btf_is_array(var) || btf_is_ptr_to_func_proto(btf, var);
375 			if (needs_typeof)
376 				printf("typeof(");
377 
378 			err = btf_dump__emit_type_decl(d, var_type_id, &opts);
379 			if (err)
380 				goto out;
381 
382 			if (needs_typeof)
383 				printf(")");
384 
385 			printf(" *%s;\n", var_name);
386 		}
387 		printf("	} %s;\n", sec_ident);
388 	}
389 
390 out:
391 	btf_dump__free(d);
392 	return err;
393 }
394 
395 static void codegen(const char *template, ...)
396 {
397 	const char *src, *end;
398 	int skip_tabs = 0, n;
399 	char *s, *dst;
400 	va_list args;
401 	char c;
402 
403 	n = strlen(template);
404 	s = malloc(n + 1);
405 	if (!s)
406 		exit(-1);
407 	src = template;
408 	dst = s;
409 
410 	/* find out "baseline" indentation to skip */
411 	while ((c = *src++)) {
412 		if (c == '\t') {
413 			skip_tabs++;
414 		} else if (c == '\n') {
415 			break;
416 		} else {
417 			p_err("unrecognized character at pos %td in template '%s': '%c'",
418 			      src - template - 1, template, c);
419 			free(s);
420 			exit(-1);
421 		}
422 	}
423 
424 	while (*src) {
425 		/* skip baseline indentation tabs */
426 		for (n = skip_tabs; n > 0; n--, src++) {
427 			if (*src != '\t') {
428 				p_err("not enough tabs at pos %td in template '%s'",
429 				      src - template - 1, template);
430 				free(s);
431 				exit(-1);
432 			}
433 		}
434 		/* trim trailing whitespace */
435 		end = strchrnul(src, '\n');
436 		for (n = end - src; n > 0 && isspace(src[n - 1]); n--)
437 			;
438 		memcpy(dst, src, n);
439 		dst += n;
440 		if (*end)
441 			*dst++ = '\n';
442 		src = *end ? end + 1 : end;
443 	}
444 	*dst++ = '\0';
445 
446 	/* print out using adjusted template */
447 	va_start(args, template);
448 	n = vprintf(s, args);
449 	va_end(args);
450 
451 	free(s);
452 }
453 
454 static void print_hex(const char *data, int data_sz)
455 {
456 	int i, len;
457 
458 	for (i = 0, len = 0; i < data_sz; i++) {
459 		int w = data[i] ? 4 : 2;
460 
461 		len += w;
462 		if (len > 78) {
463 			printf("\\\n");
464 			len = w;
465 		}
466 		if (!data[i])
467 			printf("\\0");
468 		else
469 			printf("\\x%02x", (unsigned char)data[i]);
470 	}
471 }
472 
473 static size_t bpf_map_mmap_sz(const struct bpf_map *map)
474 {
475 	long page_sz = sysconf(_SC_PAGE_SIZE);
476 	size_t map_sz;
477 
478 	map_sz = (size_t)roundup(bpf_map__value_size(map), 8) * bpf_map__max_entries(map);
479 	map_sz = roundup(map_sz, page_sz);
480 	return map_sz;
481 }
482 
483 /* Emit type size asserts for all top-level fields in memory-mapped internal maps. */
484 static void codegen_asserts(struct bpf_object *obj, const char *obj_name)
485 {
486 	struct btf *btf = bpf_object__btf(obj);
487 	struct bpf_map *map;
488 	struct btf_var_secinfo *sec_var;
489 	int i, vlen;
490 	const struct btf_type *sec;
491 	char map_ident[256], var_ident[256];
492 
493 	if (!btf)
494 		return;
495 
496 	codegen("\
497 		\n\
498 		__attribute__((unused)) static void			    \n\
499 		%1$s__assert(struct %1$s *s __attribute__((unused)))	    \n\
500 		{							    \n\
501 		#ifdef __cplusplus					    \n\
502 		#define _Static_assert static_assert			    \n\
503 		#endif							    \n\
504 		", obj_name);
505 
506 	bpf_object__for_each_map(map, obj) {
507 		if (!is_internal_mmapable_map(map, map_ident, sizeof(map_ident)))
508 			continue;
509 
510 		sec = find_type_for_map(btf, map_ident);
511 		if (!sec) {
512 			/* best effort, couldn't find the type for this map */
513 			continue;
514 		}
515 
516 		sec_var = btf_var_secinfos(sec);
517 		vlen =  btf_vlen(sec);
518 
519 		for (i = 0; i < vlen; i++, sec_var++) {
520 			const struct btf_type *var = btf__type_by_id(btf, sec_var->type);
521 			const char *var_name = btf__name_by_offset(btf, var->name_off);
522 			long var_size;
523 
524 			/* static variables are not exposed through BPF skeleton */
525 			if (btf_var(var)->linkage == BTF_VAR_STATIC)
526 				continue;
527 
528 			var_size = btf__resolve_size(btf, var->type);
529 			if (var_size < 0)
530 				continue;
531 
532 			var_ident[0] = '\0';
533 			strncat(var_ident, var_name, sizeof(var_ident) - 1);
534 			sanitize_identifier(var_ident);
535 
536 			printf("\t_Static_assert(sizeof(s->%s->%s) == %ld, \"unexpected size of '%s'\");\n",
537 			       map_ident, var_ident, var_size, var_ident);
538 		}
539 	}
540 	codegen("\
541 		\n\
542 		#ifdef __cplusplus					    \n\
543 		#undef _Static_assert					    \n\
544 		#endif							    \n\
545 		}							    \n\
546 		");
547 }
548 
549 static void codegen_attach_detach(struct bpf_object *obj, const char *obj_name)
550 {
551 	struct bpf_program *prog;
552 
553 	bpf_object__for_each_program(prog, obj) {
554 		const char *tp_name;
555 
556 		codegen("\
557 			\n\
558 			\n\
559 			static inline int					    \n\
560 			%1$s__%2$s__attach(struct %1$s *skel)			    \n\
561 			{							    \n\
562 				int prog_fd = skel->progs.%2$s.prog_fd;		    \n\
563 			", obj_name, bpf_program__name(prog));
564 
565 		switch (bpf_program__type(prog)) {
566 		case BPF_PROG_TYPE_RAW_TRACEPOINT:
567 			tp_name = strchr(bpf_program__section_name(prog), '/') + 1;
568 			printf("\tint fd = skel_raw_tracepoint_open(\"%s\", prog_fd);\n", tp_name);
569 			break;
570 		case BPF_PROG_TYPE_TRACING:
571 		case BPF_PROG_TYPE_LSM:
572 			if (bpf_program__expected_attach_type(prog) == BPF_TRACE_ITER)
573 				printf("\tint fd = skel_link_create(prog_fd, 0, BPF_TRACE_ITER);\n");
574 			else
575 				printf("\tint fd = skel_raw_tracepoint_open(NULL, prog_fd);\n");
576 			break;
577 		default:
578 			printf("\tint fd = ((void)prog_fd, 0); /* auto-attach not supported */\n");
579 			break;
580 		}
581 		codegen("\
582 			\n\
583 										    \n\
584 				if (fd > 0)					    \n\
585 					skel->links.%1$s_fd = fd;		    \n\
586 				return fd;					    \n\
587 			}							    \n\
588 			", bpf_program__name(prog));
589 	}
590 
591 	codegen("\
592 		\n\
593 									    \n\
594 		static inline int					    \n\
595 		%1$s__attach(struct %1$s *skel)				    \n\
596 		{							    \n\
597 			int ret = 0;					    \n\
598 									    \n\
599 		", obj_name);
600 
601 	bpf_object__for_each_program(prog, obj) {
602 		codegen("\
603 			\n\
604 				ret = ret < 0 ? ret : %1$s__%2$s__attach(skel);   \n\
605 			", obj_name, bpf_program__name(prog));
606 	}
607 
608 	codegen("\
609 		\n\
610 			return ret < 0 ? ret : 0;			    \n\
611 		}							    \n\
612 									    \n\
613 		static inline void					    \n\
614 		%1$s__detach(struct %1$s *skel)				    \n\
615 		{							    \n\
616 		", obj_name);
617 
618 	bpf_object__for_each_program(prog, obj) {
619 		codegen("\
620 			\n\
621 				skel_closenz(skel->links.%1$s_fd);	    \n\
622 			", bpf_program__name(prog));
623 	}
624 
625 	codegen("\
626 		\n\
627 		}							    \n\
628 		");
629 }
630 
631 static void codegen_destroy(struct bpf_object *obj, const char *obj_name)
632 {
633 	struct bpf_program *prog;
634 	struct bpf_map *map;
635 	char ident[256];
636 
637 	codegen("\
638 		\n\
639 		static void						    \n\
640 		%1$s__destroy(struct %1$s *skel)			    \n\
641 		{							    \n\
642 			if (!skel)					    \n\
643 				return;					    \n\
644 			%1$s__detach(skel);				    \n\
645 		",
646 		obj_name);
647 
648 	bpf_object__for_each_program(prog, obj) {
649 		codegen("\
650 			\n\
651 				skel_closenz(skel->progs.%1$s.prog_fd);	    \n\
652 			", bpf_program__name(prog));
653 	}
654 
655 	bpf_object__for_each_map(map, obj) {
656 		if (!get_map_ident(map, ident, sizeof(ident)))
657 			continue;
658 		if (bpf_map__is_internal(map) &&
659 		    (bpf_map__map_flags(map) & BPF_F_MMAPABLE))
660 			printf("\tskel_free_map_data(skel->%1$s, skel->maps.%1$s.initial_value, %2$zd);\n",
661 			       ident, bpf_map_mmap_sz(map));
662 		codegen("\
663 			\n\
664 				skel_closenz(skel->maps.%1$s.map_fd);	    \n\
665 			", ident);
666 	}
667 	codegen("\
668 		\n\
669 			skel_free(skel);				    \n\
670 		}							    \n\
671 		",
672 		obj_name);
673 }
674 
675 static int gen_trace(struct bpf_object *obj, const char *obj_name, const char *header_guard)
676 {
677 	DECLARE_LIBBPF_OPTS(gen_loader_opts, opts);
678 	struct bpf_map *map;
679 	char ident[256];
680 	int err = 0;
681 
682 	err = bpf_object__gen_loader(obj, &opts);
683 	if (err)
684 		return err;
685 
686 	err = bpf_object__load(obj);
687 	if (err) {
688 		p_err("failed to load object file");
689 		goto out;
690 	}
691 	/* If there was no error during load then gen_loader_opts
692 	 * are populated with the loader program.
693 	 */
694 
695 	/* finish generating 'struct skel' */
696 	codegen("\
697 		\n\
698 		};							    \n\
699 		", obj_name);
700 
701 
702 	codegen_attach_detach(obj, obj_name);
703 
704 	codegen_destroy(obj, obj_name);
705 
706 	codegen("\
707 		\n\
708 		static inline struct %1$s *				    \n\
709 		%1$s__open(void)					    \n\
710 		{							    \n\
711 			struct %1$s *skel;				    \n\
712 									    \n\
713 			skel = skel_alloc(sizeof(*skel));		    \n\
714 			if (!skel)					    \n\
715 				goto cleanup;				    \n\
716 			skel->ctx.sz = (void *)&skel->links - (void *)skel; \n\
717 		",
718 		obj_name, opts.data_sz);
719 	bpf_object__for_each_map(map, obj) {
720 		const void *mmap_data = NULL;
721 		size_t mmap_size = 0;
722 
723 		if (!is_internal_mmapable_map(map, ident, sizeof(ident)))
724 			continue;
725 
726 		codegen("\
727 		\n\
728 			{						    \n\
729 				static const char data[] __attribute__((__aligned__(8))) = \"\\\n\
730 		");
731 		mmap_data = bpf_map__initial_value(map, &mmap_size);
732 		print_hex(mmap_data, mmap_size);
733 		codegen("\
734 		\n\
735 		\";							    \n\
736 									    \n\
737 				skel->%1$s = skel_prep_map_data((void *)data, %2$zd,\n\
738 								sizeof(data) - 1);\n\
739 				if (!skel->%1$s)			    \n\
740 					goto cleanup;			    \n\
741 				skel->maps.%1$s.initial_value = (__u64) (long) skel->%1$s;\n\
742 			}						    \n\
743 			", ident, bpf_map_mmap_sz(map));
744 	}
745 	codegen("\
746 		\n\
747 			return skel;					    \n\
748 		cleanup:						    \n\
749 			%1$s__destroy(skel);				    \n\
750 			return NULL;					    \n\
751 		}							    \n\
752 									    \n\
753 		static inline int					    \n\
754 		%1$s__load(struct %1$s *skel)				    \n\
755 		{							    \n\
756 			struct bpf_load_and_run_opts opts = {};		    \n\
757 			int err;					    \n\
758 			static const char opts_data[] __attribute__((__aligned__(8))) = \"\\\n\
759 		",
760 		obj_name);
761 	print_hex(opts.data, opts.data_sz);
762 	codegen("\
763 		\n\
764 		\";							    \n\
765 			static const char opts_insn[] __attribute__((__aligned__(8))) = \"\\\n\
766 		");
767 	print_hex(opts.insns, opts.insns_sz);
768 	codegen("\
769 		\n\
770 		\";							    \n\
771 									    \n\
772 			opts.ctx = (struct bpf_loader_ctx *)skel;	    \n\
773 			opts.data_sz = sizeof(opts_data) - 1;		    \n\
774 			opts.data = (void *)opts_data;			    \n\
775 			opts.insns_sz = sizeof(opts_insn) - 1;		    \n\
776 			opts.insns = (void *)opts_insn;			    \n\
777 									    \n\
778 			err = bpf_load_and_run(&opts);			    \n\
779 			if (err < 0)					    \n\
780 				return err;				    \n\
781 		");
782 	bpf_object__for_each_map(map, obj) {
783 		const char *mmap_flags;
784 
785 		if (!is_internal_mmapable_map(map, ident, sizeof(ident)))
786 			continue;
787 
788 		if (bpf_map__map_flags(map) & BPF_F_RDONLY_PROG)
789 			mmap_flags = "PROT_READ";
790 		else
791 			mmap_flags = "PROT_READ | PROT_WRITE";
792 
793 		codegen("\
794 		\n\
795 			skel->%1$s = skel_finalize_map_data(&skel->maps.%1$s.initial_value,  \n\
796 							%2$zd, %3$s, skel->maps.%1$s.map_fd);\n\
797 			if (!skel->%1$s)				    \n\
798 				return -ENOMEM;				    \n\
799 			",
800 		       ident, bpf_map_mmap_sz(map), mmap_flags);
801 	}
802 	codegen("\
803 		\n\
804 			return 0;					    \n\
805 		}							    \n\
806 									    \n\
807 		static inline struct %1$s *				    \n\
808 		%1$s__open_and_load(void)				    \n\
809 		{							    \n\
810 			struct %1$s *skel;				    \n\
811 									    \n\
812 			skel = %1$s__open();				    \n\
813 			if (!skel)					    \n\
814 				return NULL;				    \n\
815 			if (%1$s__load(skel)) {				    \n\
816 				%1$s__destroy(skel);			    \n\
817 				return NULL;				    \n\
818 			}						    \n\
819 			return skel;					    \n\
820 		}							    \n\
821 									    \n\
822 		", obj_name);
823 
824 	codegen_asserts(obj, obj_name);
825 
826 	codegen("\
827 		\n\
828 									    \n\
829 		#endif /* %s */						    \n\
830 		",
831 		header_guard);
832 	err = 0;
833 out:
834 	return err;
835 }
836 
837 static void
838 codegen_maps_skeleton(struct bpf_object *obj, size_t map_cnt, bool mmaped)
839 {
840 	struct bpf_map *map;
841 	char ident[256];
842 	size_t i;
843 
844 	if (!map_cnt)
845 		return;
846 
847 	codegen("\
848 		\n\
849 									\n\
850 			/* maps */				    \n\
851 			s->map_cnt = %zu;			    \n\
852 			s->map_skel_sz = sizeof(*s->maps);	    \n\
853 			s->maps = (struct bpf_map_skeleton *)calloc(s->map_cnt, s->map_skel_sz);\n\
854 			if (!s->maps) {				    \n\
855 				err = -ENOMEM;			    \n\
856 				goto err;			    \n\
857 			}					    \n\
858 		",
859 		map_cnt
860 	);
861 	i = 0;
862 	bpf_object__for_each_map(map, obj) {
863 		if (!get_map_ident(map, ident, sizeof(ident)))
864 			continue;
865 
866 		codegen("\
867 			\n\
868 									\n\
869 				s->maps[%zu].name = \"%s\";	    \n\
870 				s->maps[%zu].map = &obj->maps.%s;   \n\
871 			",
872 			i, bpf_map__name(map), i, ident);
873 		/* memory-mapped internal maps */
874 		if (mmaped && is_internal_mmapable_map(map, ident, sizeof(ident))) {
875 			printf("\ts->maps[%zu].mmaped = (void **)&obj->%s;\n",
876 				i, ident);
877 		}
878 		i++;
879 	}
880 }
881 
882 static void
883 codegen_progs_skeleton(struct bpf_object *obj, size_t prog_cnt, bool populate_links)
884 {
885 	struct bpf_program *prog;
886 	int i;
887 
888 	if (!prog_cnt)
889 		return;
890 
891 	codegen("\
892 		\n\
893 									\n\
894 			/* programs */				    \n\
895 			s->prog_cnt = %zu;			    \n\
896 			s->prog_skel_sz = sizeof(*s->progs);	    \n\
897 			s->progs = (struct bpf_prog_skeleton *)calloc(s->prog_cnt, s->prog_skel_sz);\n\
898 			if (!s->progs) {			    \n\
899 				err = -ENOMEM;			    \n\
900 				goto err;			    \n\
901 			}					    \n\
902 		",
903 		prog_cnt
904 	);
905 	i = 0;
906 	bpf_object__for_each_program(prog, obj) {
907 		codegen("\
908 			\n\
909 									\n\
910 				s->progs[%1$zu].name = \"%2$s\";    \n\
911 				s->progs[%1$zu].prog = &obj->progs.%2$s;\n\
912 			",
913 			i, bpf_program__name(prog));
914 
915 		if (populate_links) {
916 			codegen("\
917 				\n\
918 					s->progs[%1$zu].link = &obj->links.%2$s;\n\
919 				",
920 				i, bpf_program__name(prog));
921 		}
922 		i++;
923 	}
924 }
925 
926 static int walk_st_ops_shadow_vars(struct btf *btf, const char *ident,
927 				   const struct btf_type *map_type, __u32 map_type_id)
928 {
929 	LIBBPF_OPTS(btf_dump_emit_type_decl_opts, opts, .indent_level = 3);
930 	const struct btf_type *member_type;
931 	__u32 offset, next_offset = 0;
932 	const struct btf_member *m;
933 	struct btf_dump *d = NULL;
934 	const char *member_name;
935 	__u32 member_type_id;
936 	int i, err = 0, n;
937 	int size;
938 
939 	d = btf_dump__new(btf, codegen_btf_dump_printf, NULL, NULL);
940 	if (!d)
941 		return -errno;
942 
943 	n = btf_vlen(map_type);
944 	for (i = 0, m = btf_members(map_type); i < n; i++, m++) {
945 		member_type = skip_mods_and_typedefs(btf, m->type, &member_type_id);
946 		member_name = btf__name_by_offset(btf, m->name_off);
947 
948 		offset = m->offset / 8;
949 		if (next_offset < offset)
950 			printf("\t\t\tchar __padding_%d[%d];\n", i, offset - next_offset);
951 
952 		switch (btf_kind(member_type)) {
953 		case BTF_KIND_INT:
954 		case BTF_KIND_FLOAT:
955 		case BTF_KIND_ENUM:
956 		case BTF_KIND_ENUM64:
957 			/* scalar type */
958 			printf("\t\t\t");
959 			opts.field_name = member_name;
960 			err = btf_dump__emit_type_decl(d, member_type_id, &opts);
961 			if (err) {
962 				p_err("Failed to emit type declaration for %s: %d", member_name, err);
963 				goto out;
964 			}
965 			printf(";\n");
966 
967 			size = btf__resolve_size(btf, member_type_id);
968 			if (size < 0) {
969 				p_err("Failed to resolve size of %s: %d\n", member_name, size);
970 				err = size;
971 				goto out;
972 			}
973 
974 			next_offset = offset + size;
975 			break;
976 
977 		case BTF_KIND_PTR:
978 			if (resolve_func_ptr(btf, m->type, NULL)) {
979 				/* Function pointer */
980 				printf("\t\t\tstruct bpf_program *%s;\n", member_name);
981 
982 				next_offset = offset + sizeof(void *);
983 				break;
984 			}
985 			/* All pointer types are unsupported except for
986 			 * function pointers.
987 			 */
988 			fallthrough;
989 
990 		default:
991 			/* Unsupported types
992 			 *
993 			 * Types other than scalar types and function
994 			 * pointers are currently not supported in order to
995 			 * prevent conflicts in the generated code caused
996 			 * by multiple definitions. For instance, if the
997 			 * struct type FOO is used in a struct_ops map,
998 			 * bpftool has to generate definitions for FOO,
999 			 * which may result in conflicts if FOO is defined
1000 			 * in different skeleton files.
1001 			 */
1002 			size = btf__resolve_size(btf, member_type_id);
1003 			if (size < 0) {
1004 				p_err("Failed to resolve size of %s: %d\n", member_name, size);
1005 				err = size;
1006 				goto out;
1007 			}
1008 			printf("\t\t\tchar __unsupported_%d[%d];\n", i, size);
1009 
1010 			next_offset = offset + size;
1011 			break;
1012 		}
1013 	}
1014 
1015 	/* Cannot fail since it must be a struct type */
1016 	size = btf__resolve_size(btf, map_type_id);
1017 	if (next_offset < (__u32)size)
1018 		printf("\t\t\tchar __padding_end[%d];\n", size - next_offset);
1019 
1020 out:
1021 	btf_dump__free(d);
1022 
1023 	return err;
1024 }
1025 
1026 /* Generate the pointer of the shadow type for a struct_ops map.
1027  *
1028  * This function adds a pointer of the shadow type for a struct_ops map.
1029  * The members of a struct_ops map can be exported through a pointer to a
1030  * shadow type. The user can access these members through the pointer.
1031  *
1032  * A shadow type includes not all members, only members of some types.
1033  * They are scalar types and function pointers. The function pointers are
1034  * translated to the pointer of the struct bpf_program. The scalar types
1035  * are translated to the original type without any modifiers.
1036  *
1037  * Unsupported types will be translated to a char array to occupy the same
1038  * space as the original field, being renamed as __unsupported_*.  The user
1039  * should treat these fields as opaque data.
1040  */
1041 static int gen_st_ops_shadow_type(const char *obj_name, struct btf *btf, const char *ident,
1042 				  const struct bpf_map *map)
1043 {
1044 	const struct btf_type *map_type;
1045 	const char *type_name;
1046 	__u32 map_type_id;
1047 	int err;
1048 
1049 	map_type_id = bpf_map__btf_value_type_id(map);
1050 	if (map_type_id == 0)
1051 		return -EINVAL;
1052 	map_type = btf__type_by_id(btf, map_type_id);
1053 	if (!map_type)
1054 		return -EINVAL;
1055 
1056 	type_name = btf__name_by_offset(btf, map_type->name_off);
1057 
1058 	printf("\t\tstruct %s__%s__%s {\n", obj_name, ident, type_name);
1059 
1060 	err = walk_st_ops_shadow_vars(btf, ident, map_type, map_type_id);
1061 	if (err)
1062 		return err;
1063 
1064 	printf("\t\t} *%s;\n", ident);
1065 
1066 	return 0;
1067 }
1068 
1069 static int gen_st_ops_shadow(const char *obj_name, struct btf *btf, struct bpf_object *obj)
1070 {
1071 	int err, st_ops_cnt = 0;
1072 	struct bpf_map *map;
1073 	char ident[256];
1074 
1075 	if (!btf)
1076 		return 0;
1077 
1078 	/* Generate the pointers to shadow types of
1079 	 * struct_ops maps.
1080 	 */
1081 	bpf_object__for_each_map(map, obj) {
1082 		if (bpf_map__type(map) != BPF_MAP_TYPE_STRUCT_OPS)
1083 			continue;
1084 		if (!get_map_ident(map, ident, sizeof(ident)))
1085 			continue;
1086 
1087 		if (st_ops_cnt == 0) /* first struct_ops map */
1088 			printf("\tstruct {\n");
1089 		st_ops_cnt++;
1090 
1091 		err = gen_st_ops_shadow_type(obj_name, btf, ident, map);
1092 		if (err)
1093 			return err;
1094 	}
1095 
1096 	if (st_ops_cnt)
1097 		printf("\t} struct_ops;\n");
1098 
1099 	return 0;
1100 }
1101 
1102 /* Generate the code to initialize the pointers of shadow types. */
1103 static void gen_st_ops_shadow_init(struct btf *btf, struct bpf_object *obj)
1104 {
1105 	struct bpf_map *map;
1106 	char ident[256];
1107 
1108 	if (!btf)
1109 		return;
1110 
1111 	/* Initialize the pointers to_ops shadow types of
1112 	 * struct_ops maps.
1113 	 */
1114 	bpf_object__for_each_map(map, obj) {
1115 		if (bpf_map__type(map) != BPF_MAP_TYPE_STRUCT_OPS)
1116 			continue;
1117 		if (!get_map_ident(map, ident, sizeof(ident)))
1118 			continue;
1119 		codegen("\
1120 			\n\
1121 				obj->struct_ops.%1$s = bpf_map__initial_value(obj->maps.%1$s, NULL);\n\
1122 			\n\
1123 			", ident);
1124 	}
1125 }
1126 
1127 static int do_skeleton(int argc, char **argv)
1128 {
1129 	char header_guard[MAX_OBJ_NAME_LEN + sizeof("__SKEL_H__")];
1130 	size_t map_cnt = 0, prog_cnt = 0, file_sz, mmap_sz;
1131 	DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts);
1132 	char obj_name[MAX_OBJ_NAME_LEN] = "", *obj_data;
1133 	struct bpf_object *obj = NULL;
1134 	const char *file;
1135 	char ident[256];
1136 	struct bpf_program *prog;
1137 	int fd, err = -1;
1138 	struct bpf_map *map;
1139 	struct btf *btf;
1140 	struct stat st;
1141 
1142 	if (!REQ_ARGS(1)) {
1143 		usage();
1144 		return -1;
1145 	}
1146 	file = GET_ARG();
1147 
1148 	while (argc) {
1149 		if (!REQ_ARGS(2))
1150 			return -1;
1151 
1152 		if (is_prefix(*argv, "name")) {
1153 			NEXT_ARG();
1154 
1155 			if (obj_name[0] != '\0') {
1156 				p_err("object name already specified");
1157 				return -1;
1158 			}
1159 
1160 			strncpy(obj_name, *argv, MAX_OBJ_NAME_LEN - 1);
1161 			obj_name[MAX_OBJ_NAME_LEN - 1] = '\0';
1162 		} else {
1163 			p_err("unknown arg %s", *argv);
1164 			return -1;
1165 		}
1166 
1167 		NEXT_ARG();
1168 	}
1169 
1170 	if (argc) {
1171 		p_err("extra unknown arguments");
1172 		return -1;
1173 	}
1174 
1175 	if (stat(file, &st)) {
1176 		p_err("failed to stat() %s: %s", file, strerror(errno));
1177 		return -1;
1178 	}
1179 	file_sz = st.st_size;
1180 	mmap_sz = roundup(file_sz, sysconf(_SC_PAGE_SIZE));
1181 	fd = open(file, O_RDONLY);
1182 	if (fd < 0) {
1183 		p_err("failed to open() %s: %s", file, strerror(errno));
1184 		return -1;
1185 	}
1186 	obj_data = mmap(NULL, mmap_sz, PROT_READ, MAP_PRIVATE, fd, 0);
1187 	if (obj_data == MAP_FAILED) {
1188 		obj_data = NULL;
1189 		p_err("failed to mmap() %s: %s", file, strerror(errno));
1190 		goto out;
1191 	}
1192 	if (obj_name[0] == '\0')
1193 		get_obj_name(obj_name, file);
1194 	opts.object_name = obj_name;
1195 	if (verifier_logs)
1196 		/* log_level1 + log_level2 + stats, but not stable UAPI */
1197 		opts.kernel_log_level = 1 + 2 + 4;
1198 	obj = bpf_object__open_mem(obj_data, file_sz, &opts);
1199 	if (!obj) {
1200 		char err_buf[256];
1201 
1202 		err = -errno;
1203 		libbpf_strerror(err, err_buf, sizeof(err_buf));
1204 		p_err("failed to open BPF object file: %s", err_buf);
1205 		goto out;
1206 	}
1207 
1208 	bpf_object__for_each_map(map, obj) {
1209 		if (!get_map_ident(map, ident, sizeof(ident))) {
1210 			p_err("ignoring unrecognized internal map '%s'...",
1211 			      bpf_map__name(map));
1212 			continue;
1213 		}
1214 		map_cnt++;
1215 	}
1216 	bpf_object__for_each_program(prog, obj) {
1217 		prog_cnt++;
1218 	}
1219 
1220 	get_header_guard(header_guard, obj_name, "SKEL_H");
1221 	if (use_loader) {
1222 		codegen("\
1223 		\n\
1224 		/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */   \n\
1225 		/* THIS FILE IS AUTOGENERATED BY BPFTOOL! */		    \n\
1226 		#ifndef %2$s						    \n\
1227 		#define %2$s						    \n\
1228 									    \n\
1229 		#include <bpf/skel_internal.h>				    \n\
1230 									    \n\
1231 		struct %1$s {						    \n\
1232 			struct bpf_loader_ctx ctx;			    \n\
1233 		",
1234 		obj_name, header_guard
1235 		);
1236 	} else {
1237 		codegen("\
1238 		\n\
1239 		/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */   \n\
1240 									    \n\
1241 		/* THIS FILE IS AUTOGENERATED BY BPFTOOL! */		    \n\
1242 		#ifndef %2$s						    \n\
1243 		#define %2$s						    \n\
1244 									    \n\
1245 		#include <errno.h>					    \n\
1246 		#include <stdlib.h>					    \n\
1247 		#include <bpf/libbpf.h>					    \n\
1248 									    \n\
1249 		struct %1$s {						    \n\
1250 			struct bpf_object_skeleton *skeleton;		    \n\
1251 			struct bpf_object *obj;				    \n\
1252 		",
1253 		obj_name, header_guard
1254 		);
1255 	}
1256 
1257 	if (map_cnt) {
1258 		printf("\tstruct {\n");
1259 		bpf_object__for_each_map(map, obj) {
1260 			if (!get_map_ident(map, ident, sizeof(ident)))
1261 				continue;
1262 			if (use_loader)
1263 				printf("\t\tstruct bpf_map_desc %s;\n", ident);
1264 			else
1265 				printf("\t\tstruct bpf_map *%s;\n", ident);
1266 		}
1267 		printf("\t} maps;\n");
1268 	}
1269 
1270 	btf = bpf_object__btf(obj);
1271 	err = gen_st_ops_shadow(obj_name, btf, obj);
1272 	if (err)
1273 		goto out;
1274 
1275 	if (prog_cnt) {
1276 		printf("\tstruct {\n");
1277 		bpf_object__for_each_program(prog, obj) {
1278 			if (use_loader)
1279 				printf("\t\tstruct bpf_prog_desc %s;\n",
1280 				       bpf_program__name(prog));
1281 			else
1282 				printf("\t\tstruct bpf_program *%s;\n",
1283 				       bpf_program__name(prog));
1284 		}
1285 		printf("\t} progs;\n");
1286 		printf("\tstruct {\n");
1287 		bpf_object__for_each_program(prog, obj) {
1288 			if (use_loader)
1289 				printf("\t\tint %s_fd;\n",
1290 				       bpf_program__name(prog));
1291 			else
1292 				printf("\t\tstruct bpf_link *%s;\n",
1293 				       bpf_program__name(prog));
1294 		}
1295 		printf("\t} links;\n");
1296 	}
1297 
1298 	if (btf) {
1299 		err = codegen_datasecs(obj, obj_name);
1300 		if (err)
1301 			goto out;
1302 	}
1303 	if (use_loader) {
1304 		err = gen_trace(obj, obj_name, header_guard);
1305 		goto out;
1306 	}
1307 
1308 	codegen("\
1309 		\n\
1310 									    \n\
1311 		#ifdef __cplusplus					    \n\
1312 			static inline struct %1$s *open(const struct bpf_object_open_opts *opts = nullptr);\n\
1313 			static inline struct %1$s *open_and_load();	    \n\
1314 			static inline int load(struct %1$s *skel);	    \n\
1315 			static inline int attach(struct %1$s *skel);	    \n\
1316 			static inline void detach(struct %1$s *skel);	    \n\
1317 			static inline void destroy(struct %1$s *skel);	    \n\
1318 			static inline const void *elf_bytes(size_t *sz);    \n\
1319 		#endif /* __cplusplus */				    \n\
1320 		};							    \n\
1321 									    \n\
1322 		static void						    \n\
1323 		%1$s__destroy(struct %1$s *obj)				    \n\
1324 		{							    \n\
1325 			if (!obj)					    \n\
1326 				return;					    \n\
1327 			if (obj->skeleton)				    \n\
1328 				bpf_object__destroy_skeleton(obj->skeleton);\n\
1329 			free(obj);					    \n\
1330 		}							    \n\
1331 									    \n\
1332 		static inline int					    \n\
1333 		%1$s__create_skeleton(struct %1$s *obj);		    \n\
1334 									    \n\
1335 		static inline struct %1$s *				    \n\
1336 		%1$s__open_opts(const struct bpf_object_open_opts *opts)    \n\
1337 		{							    \n\
1338 			struct %1$s *obj;				    \n\
1339 			int err;					    \n\
1340 									    \n\
1341 			obj = (struct %1$s *)calloc(1, sizeof(*obj));	    \n\
1342 			if (!obj) {					    \n\
1343 				errno = ENOMEM;				    \n\
1344 				return NULL;				    \n\
1345 			}						    \n\
1346 									    \n\
1347 			err = %1$s__create_skeleton(obj);		    \n\
1348 			if (err)					    \n\
1349 				goto err_out;				    \n\
1350 									    \n\
1351 			err = bpf_object__open_skeleton(obj->skeleton, opts);\n\
1352 			if (err)					    \n\
1353 				goto err_out;				    \n\
1354 									    \n\
1355 		", obj_name);
1356 
1357 	gen_st_ops_shadow_init(btf, obj);
1358 
1359 	codegen("\
1360 		\n\
1361 			return obj;					    \n\
1362 		err_out:						    \n\
1363 			%1$s__destroy(obj);				    \n\
1364 			errno = -err;					    \n\
1365 			return NULL;					    \n\
1366 		}							    \n\
1367 									    \n\
1368 		static inline struct %1$s *				    \n\
1369 		%1$s__open(void)					    \n\
1370 		{							    \n\
1371 			return %1$s__open_opts(NULL);			    \n\
1372 		}							    \n\
1373 									    \n\
1374 		static inline int					    \n\
1375 		%1$s__load(struct %1$s *obj)				    \n\
1376 		{							    \n\
1377 			return bpf_object__load_skeleton(obj->skeleton);    \n\
1378 		}							    \n\
1379 									    \n\
1380 		static inline struct %1$s *				    \n\
1381 		%1$s__open_and_load(void)				    \n\
1382 		{							    \n\
1383 			struct %1$s *obj;				    \n\
1384 			int err;					    \n\
1385 									    \n\
1386 			obj = %1$s__open();				    \n\
1387 			if (!obj)					    \n\
1388 				return NULL;				    \n\
1389 			err = %1$s__load(obj);				    \n\
1390 			if (err) {					    \n\
1391 				%1$s__destroy(obj);			    \n\
1392 				errno = -err;				    \n\
1393 				return NULL;				    \n\
1394 			}						    \n\
1395 			return obj;					    \n\
1396 		}							    \n\
1397 									    \n\
1398 		static inline int					    \n\
1399 		%1$s__attach(struct %1$s *obj)				    \n\
1400 		{							    \n\
1401 			return bpf_object__attach_skeleton(obj->skeleton);  \n\
1402 		}							    \n\
1403 									    \n\
1404 		static inline void					    \n\
1405 		%1$s__detach(struct %1$s *obj)				    \n\
1406 		{							    \n\
1407 			bpf_object__detach_skeleton(obj->skeleton);	    \n\
1408 		}							    \n\
1409 		",
1410 		obj_name
1411 	);
1412 
1413 	codegen("\
1414 		\n\
1415 									    \n\
1416 		static inline const void *%1$s__elf_bytes(size_t *sz);	    \n\
1417 									    \n\
1418 		static inline int					    \n\
1419 		%1$s__create_skeleton(struct %1$s *obj)			    \n\
1420 		{							    \n\
1421 			struct bpf_object_skeleton *s;			    \n\
1422 			int err;					    \n\
1423 									    \n\
1424 			s = (struct bpf_object_skeleton *)calloc(1, sizeof(*s));\n\
1425 			if (!s)	{					    \n\
1426 				err = -ENOMEM;				    \n\
1427 				goto err;				    \n\
1428 			}						    \n\
1429 									    \n\
1430 			s->sz = sizeof(*s);				    \n\
1431 			s->name = \"%1$s\";				    \n\
1432 			s->obj = &obj->obj;				    \n\
1433 		",
1434 		obj_name
1435 	);
1436 
1437 	codegen_maps_skeleton(obj, map_cnt, true /*mmaped*/);
1438 	codegen_progs_skeleton(obj, prog_cnt, true /*populate_links*/);
1439 
1440 	codegen("\
1441 		\n\
1442 									    \n\
1443 			s->data = %1$s__elf_bytes(&s->data_sz);		    \n\
1444 									    \n\
1445 			obj->skeleton = s;				    \n\
1446 			return 0;					    \n\
1447 		err:							    \n\
1448 			bpf_object__destroy_skeleton(s);		    \n\
1449 			return err;					    \n\
1450 		}							    \n\
1451 									    \n\
1452 		static inline const void *%1$s__elf_bytes(size_t *sz)	    \n\
1453 		{							    \n\
1454 			static const char data[] __attribute__((__aligned__(8))) = \"\\\n\
1455 		",
1456 		obj_name
1457 	);
1458 
1459 	/* embed contents of BPF object file */
1460 	print_hex(obj_data, file_sz);
1461 
1462 	codegen("\
1463 		\n\
1464 		\";							    \n\
1465 									    \n\
1466 			*sz = sizeof(data) - 1;				    \n\
1467 			return (const void *)data;			    \n\
1468 		}							    \n\
1469 									    \n\
1470 		#ifdef __cplusplus					    \n\
1471 		struct %1$s *%1$s::open(const struct bpf_object_open_opts *opts) { return %1$s__open_opts(opts); }\n\
1472 		struct %1$s *%1$s::open_and_load() { return %1$s__open_and_load(); }	\n\
1473 		int %1$s::load(struct %1$s *skel) { return %1$s__load(skel); }		\n\
1474 		int %1$s::attach(struct %1$s *skel) { return %1$s__attach(skel); }	\n\
1475 		void %1$s::detach(struct %1$s *skel) { %1$s__detach(skel); }		\n\
1476 		void %1$s::destroy(struct %1$s *skel) { %1$s__destroy(skel); }		\n\
1477 		const void *%1$s::elf_bytes(size_t *sz) { return %1$s__elf_bytes(sz); } \n\
1478 		#endif /* __cplusplus */				    \n\
1479 									    \n\
1480 		",
1481 		obj_name);
1482 
1483 	codegen_asserts(obj, obj_name);
1484 
1485 	codegen("\
1486 		\n\
1487 									    \n\
1488 		#endif /* %1$s */					    \n\
1489 		",
1490 		header_guard);
1491 	err = 0;
1492 out:
1493 	bpf_object__close(obj);
1494 	if (obj_data)
1495 		munmap(obj_data, mmap_sz);
1496 	close(fd);
1497 	return err;
1498 }
1499 
1500 /* Subskeletons are like skeletons, except they don't own the bpf_object,
1501  * associated maps, links, etc. Instead, they know about the existence of
1502  * variables, maps, programs and are able to find their locations
1503  * _at runtime_ from an already loaded bpf_object.
1504  *
1505  * This allows for library-like BPF objects to have userspace counterparts
1506  * with access to their own items without having to know anything about the
1507  * final BPF object that the library was linked into.
1508  */
1509 static int do_subskeleton(int argc, char **argv)
1510 {
1511 	char header_guard[MAX_OBJ_NAME_LEN + sizeof("__SUBSKEL_H__")];
1512 	size_t i, len, file_sz, map_cnt = 0, prog_cnt = 0, mmap_sz, var_cnt = 0, var_idx = 0;
1513 	DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts);
1514 	char obj_name[MAX_OBJ_NAME_LEN] = "", *obj_data;
1515 	struct bpf_object *obj = NULL;
1516 	const char *file, *var_name;
1517 	char ident[256];
1518 	int fd, err = -1, map_type_id;
1519 	const struct bpf_map *map;
1520 	struct bpf_program *prog;
1521 	struct btf *btf;
1522 	const struct btf_type *map_type, *var_type;
1523 	const struct btf_var_secinfo *var;
1524 	struct stat st;
1525 
1526 	if (!REQ_ARGS(1)) {
1527 		usage();
1528 		return -1;
1529 	}
1530 	file = GET_ARG();
1531 
1532 	while (argc) {
1533 		if (!REQ_ARGS(2))
1534 			return -1;
1535 
1536 		if (is_prefix(*argv, "name")) {
1537 			NEXT_ARG();
1538 
1539 			if (obj_name[0] != '\0') {
1540 				p_err("object name already specified");
1541 				return -1;
1542 			}
1543 
1544 			strncpy(obj_name, *argv, MAX_OBJ_NAME_LEN - 1);
1545 			obj_name[MAX_OBJ_NAME_LEN - 1] = '\0';
1546 		} else {
1547 			p_err("unknown arg %s", *argv);
1548 			return -1;
1549 		}
1550 
1551 		NEXT_ARG();
1552 	}
1553 
1554 	if (argc) {
1555 		p_err("extra unknown arguments");
1556 		return -1;
1557 	}
1558 
1559 	if (use_loader) {
1560 		p_err("cannot use loader for subskeletons");
1561 		return -1;
1562 	}
1563 
1564 	if (stat(file, &st)) {
1565 		p_err("failed to stat() %s: %s", file, strerror(errno));
1566 		return -1;
1567 	}
1568 	file_sz = st.st_size;
1569 	mmap_sz = roundup(file_sz, sysconf(_SC_PAGE_SIZE));
1570 	fd = open(file, O_RDONLY);
1571 	if (fd < 0) {
1572 		p_err("failed to open() %s: %s", file, strerror(errno));
1573 		return -1;
1574 	}
1575 	obj_data = mmap(NULL, mmap_sz, PROT_READ, MAP_PRIVATE, fd, 0);
1576 	if (obj_data == MAP_FAILED) {
1577 		obj_data = NULL;
1578 		p_err("failed to mmap() %s: %s", file, strerror(errno));
1579 		goto out;
1580 	}
1581 	if (obj_name[0] == '\0')
1582 		get_obj_name(obj_name, file);
1583 
1584 	/* The empty object name allows us to use bpf_map__name and produce
1585 	 * ELF section names out of it. (".data" instead of "obj.data")
1586 	 */
1587 	opts.object_name = "";
1588 	obj = bpf_object__open_mem(obj_data, file_sz, &opts);
1589 	if (!obj) {
1590 		char err_buf[256];
1591 
1592 		libbpf_strerror(errno, err_buf, sizeof(err_buf));
1593 		p_err("failed to open BPF object file: %s", err_buf);
1594 		obj = NULL;
1595 		goto out;
1596 	}
1597 
1598 	btf = bpf_object__btf(obj);
1599 	if (!btf) {
1600 		err = -1;
1601 		p_err("need btf type information for %s", obj_name);
1602 		goto out;
1603 	}
1604 
1605 	bpf_object__for_each_program(prog, obj) {
1606 		prog_cnt++;
1607 	}
1608 
1609 	/* First, count how many variables we have to find.
1610 	 * We need this in advance so the subskel can allocate the right
1611 	 * amount of storage.
1612 	 */
1613 	bpf_object__for_each_map(map, obj) {
1614 		if (!get_map_ident(map, ident, sizeof(ident)))
1615 			continue;
1616 
1617 		/* Also count all maps that have a name */
1618 		map_cnt++;
1619 
1620 		if (!is_internal_mmapable_map(map, ident, sizeof(ident)))
1621 			continue;
1622 
1623 		map_type_id = bpf_map__btf_value_type_id(map);
1624 		if (map_type_id <= 0) {
1625 			err = map_type_id;
1626 			goto out;
1627 		}
1628 		map_type = btf__type_by_id(btf, map_type_id);
1629 
1630 		var = btf_var_secinfos(map_type);
1631 		len = btf_vlen(map_type);
1632 		for (i = 0; i < len; i++, var++) {
1633 			var_type = btf__type_by_id(btf, var->type);
1634 
1635 			if (btf_var(var_type)->linkage == BTF_VAR_STATIC)
1636 				continue;
1637 
1638 			var_cnt++;
1639 		}
1640 	}
1641 
1642 	get_header_guard(header_guard, obj_name, "SUBSKEL_H");
1643 	codegen("\
1644 	\n\
1645 	/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */	    \n\
1646 									    \n\
1647 	/* THIS FILE IS AUTOGENERATED! */				    \n\
1648 	#ifndef %2$s							    \n\
1649 	#define %2$s							    \n\
1650 									    \n\
1651 	#include <errno.h>						    \n\
1652 	#include <stdlib.h>						    \n\
1653 	#include <bpf/libbpf.h>						    \n\
1654 									    \n\
1655 	struct %1$s {							    \n\
1656 		struct bpf_object *obj;					    \n\
1657 		struct bpf_object_subskeleton *subskel;			    \n\
1658 	", obj_name, header_guard);
1659 
1660 	if (map_cnt) {
1661 		printf("\tstruct {\n");
1662 		bpf_object__for_each_map(map, obj) {
1663 			if (!get_map_ident(map, ident, sizeof(ident)))
1664 				continue;
1665 			printf("\t\tstruct bpf_map *%s;\n", ident);
1666 		}
1667 		printf("\t} maps;\n");
1668 	}
1669 
1670 	err = gen_st_ops_shadow(obj_name, btf, obj);
1671 	if (err)
1672 		goto out;
1673 
1674 	if (prog_cnt) {
1675 		printf("\tstruct {\n");
1676 		bpf_object__for_each_program(prog, obj) {
1677 			printf("\t\tstruct bpf_program *%s;\n",
1678 				bpf_program__name(prog));
1679 		}
1680 		printf("\t} progs;\n");
1681 	}
1682 
1683 	err = codegen_subskel_datasecs(obj, obj_name);
1684 	if (err)
1685 		goto out;
1686 
1687 	/* emit code that will allocate enough storage for all symbols */
1688 	codegen("\
1689 		\n\
1690 									    \n\
1691 		#ifdef __cplusplus					    \n\
1692 			static inline struct %1$s *open(const struct bpf_object *src);\n\
1693 			static inline void destroy(struct %1$s *skel);	    \n\
1694 		#endif /* __cplusplus */				    \n\
1695 		};							    \n\
1696 									    \n\
1697 		static inline void					    \n\
1698 		%1$s__destroy(struct %1$s *skel)			    \n\
1699 		{							    \n\
1700 			if (!skel)					    \n\
1701 				return;					    \n\
1702 			if (skel->subskel)				    \n\
1703 				bpf_object__destroy_subskeleton(skel->subskel);\n\
1704 			free(skel);					    \n\
1705 		}							    \n\
1706 									    \n\
1707 		static inline struct %1$s *				    \n\
1708 		%1$s__open(const struct bpf_object *src)		    \n\
1709 		{							    \n\
1710 			struct %1$s *obj;				    \n\
1711 			struct bpf_object_subskeleton *s;		    \n\
1712 			int err;					    \n\
1713 									    \n\
1714 			obj = (struct %1$s *)calloc(1, sizeof(*obj));	    \n\
1715 			if (!obj) {					    \n\
1716 				err = -ENOMEM;				    \n\
1717 				goto err;				    \n\
1718 			}						    \n\
1719 			s = (struct bpf_object_subskeleton *)calloc(1, sizeof(*s));\n\
1720 			if (!s) {					    \n\
1721 				err = -ENOMEM;				    \n\
1722 				goto err;				    \n\
1723 			}						    \n\
1724 			s->sz = sizeof(*s);				    \n\
1725 			s->obj = src;					    \n\
1726 			s->var_skel_sz = sizeof(*s->vars);		    \n\
1727 			obj->subskel = s;				    \n\
1728 									    \n\
1729 			/* vars */					    \n\
1730 			s->var_cnt = %2$d;				    \n\
1731 			s->vars = (struct bpf_var_skeleton *)calloc(%2$d, sizeof(*s->vars));\n\
1732 			if (!s->vars) {					    \n\
1733 				err = -ENOMEM;				    \n\
1734 				goto err;				    \n\
1735 			}						    \n\
1736 		",
1737 		obj_name, var_cnt
1738 	);
1739 
1740 	/* walk through each symbol and emit the runtime representation */
1741 	bpf_object__for_each_map(map, obj) {
1742 		if (!is_internal_mmapable_map(map, ident, sizeof(ident)))
1743 			continue;
1744 
1745 		map_type_id = bpf_map__btf_value_type_id(map);
1746 		if (map_type_id <= 0)
1747 			/* skip over internal maps with no type*/
1748 			continue;
1749 
1750 		map_type = btf__type_by_id(btf, map_type_id);
1751 		var = btf_var_secinfos(map_type);
1752 		len = btf_vlen(map_type);
1753 		for (i = 0; i < len; i++, var++) {
1754 			var_type = btf__type_by_id(btf, var->type);
1755 			var_name = btf__name_by_offset(btf, var_type->name_off);
1756 
1757 			if (btf_var(var_type)->linkage == BTF_VAR_STATIC)
1758 				continue;
1759 
1760 			/* Note that we use the dot prefix in .data as the
1761 			 * field access operator i.e. maps%s becomes maps.data
1762 			 */
1763 			codegen("\
1764 			\n\
1765 									    \n\
1766 				s->vars[%3$d].name = \"%1$s\";		    \n\
1767 				s->vars[%3$d].map = &obj->maps.%2$s;	    \n\
1768 				s->vars[%3$d].addr = (void **) &obj->%2$s.%1$s;\n\
1769 			", var_name, ident, var_idx);
1770 
1771 			var_idx++;
1772 		}
1773 	}
1774 
1775 	codegen_maps_skeleton(obj, map_cnt, false /*mmaped*/);
1776 	codegen_progs_skeleton(obj, prog_cnt, false /*links*/);
1777 
1778 	codegen("\
1779 		\n\
1780 									    \n\
1781 			err = bpf_object__open_subskeleton(s);		    \n\
1782 			if (err)					    \n\
1783 				goto err;				    \n\
1784 									    \n\
1785 		");
1786 
1787 	gen_st_ops_shadow_init(btf, obj);
1788 
1789 	codegen("\
1790 		\n\
1791 			return obj;					    \n\
1792 		err:							    \n\
1793 			%1$s__destroy(obj);				    \n\
1794 			errno = -err;					    \n\
1795 			return NULL;					    \n\
1796 		}							    \n\
1797 									    \n\
1798 		#ifdef __cplusplus					    \n\
1799 		struct %1$s *%1$s::open(const struct bpf_object *src) { return %1$s__open(src); }\n\
1800 		void %1$s::destroy(struct %1$s *skel) { %1$s__destroy(skel); }\n\
1801 		#endif /* __cplusplus */				    \n\
1802 									    \n\
1803 		#endif /* %2$s */					    \n\
1804 		",
1805 		obj_name, header_guard);
1806 	err = 0;
1807 out:
1808 	bpf_object__close(obj);
1809 	if (obj_data)
1810 		munmap(obj_data, mmap_sz);
1811 	close(fd);
1812 	return err;
1813 }
1814 
1815 static int do_object(int argc, char **argv)
1816 {
1817 	struct bpf_linker *linker;
1818 	const char *output_file, *file;
1819 	int err = 0;
1820 
1821 	if (!REQ_ARGS(2)) {
1822 		usage();
1823 		return -1;
1824 	}
1825 
1826 	output_file = GET_ARG();
1827 
1828 	linker = bpf_linker__new(output_file, NULL);
1829 	if (!linker) {
1830 		p_err("failed to create BPF linker instance");
1831 		return -1;
1832 	}
1833 
1834 	while (argc) {
1835 		file = GET_ARG();
1836 
1837 		err = bpf_linker__add_file(linker, file, NULL);
1838 		if (err) {
1839 			p_err("failed to link '%s': %s (%d)", file, strerror(errno), errno);
1840 			goto out;
1841 		}
1842 	}
1843 
1844 	err = bpf_linker__finalize(linker);
1845 	if (err) {
1846 		p_err("failed to finalize ELF file: %s (%d)", strerror(errno), errno);
1847 		goto out;
1848 	}
1849 
1850 	err = 0;
1851 out:
1852 	bpf_linker__free(linker);
1853 	return err;
1854 }
1855 
1856 static int do_help(int argc, char **argv)
1857 {
1858 	if (json_output) {
1859 		jsonw_null(json_wtr);
1860 		return 0;
1861 	}
1862 
1863 	fprintf(stderr,
1864 		"Usage: %1$s %2$s object OUTPUT_FILE INPUT_FILE [INPUT_FILE...]\n"
1865 		"       %1$s %2$s skeleton FILE [name OBJECT_NAME]\n"
1866 		"       %1$s %2$s subskeleton FILE [name OBJECT_NAME]\n"
1867 		"       %1$s %2$s min_core_btf INPUT OUTPUT OBJECT [OBJECT...]\n"
1868 		"       %1$s %2$s help\n"
1869 		"\n"
1870 		"       " HELP_SPEC_OPTIONS " |\n"
1871 		"                    {-L|--use-loader} }\n"
1872 		"",
1873 		bin_name, "gen");
1874 
1875 	return 0;
1876 }
1877 
1878 static int btf_save_raw(const struct btf *btf, const char *path)
1879 {
1880 	const void *data;
1881 	FILE *f = NULL;
1882 	__u32 data_sz;
1883 	int err = 0;
1884 
1885 	data = btf__raw_data(btf, &data_sz);
1886 	if (!data)
1887 		return -ENOMEM;
1888 
1889 	f = fopen(path, "wb");
1890 	if (!f)
1891 		return -errno;
1892 
1893 	if (fwrite(data, 1, data_sz, f) != data_sz)
1894 		err = -errno;
1895 
1896 	fclose(f);
1897 	return err;
1898 }
1899 
1900 struct btfgen_info {
1901 	struct btf *src_btf;
1902 	struct btf *marked_btf; /* btf structure used to mark used types */
1903 };
1904 
1905 static size_t btfgen_hash_fn(long key, void *ctx)
1906 {
1907 	return key;
1908 }
1909 
1910 static bool btfgen_equal_fn(long k1, long k2, void *ctx)
1911 {
1912 	return k1 == k2;
1913 }
1914 
1915 static void btfgen_free_info(struct btfgen_info *info)
1916 {
1917 	if (!info)
1918 		return;
1919 
1920 	btf__free(info->src_btf);
1921 	btf__free(info->marked_btf);
1922 
1923 	free(info);
1924 }
1925 
1926 static struct btfgen_info *
1927 btfgen_new_info(const char *targ_btf_path)
1928 {
1929 	struct btfgen_info *info;
1930 	int err;
1931 
1932 	info = calloc(1, sizeof(*info));
1933 	if (!info)
1934 		return NULL;
1935 
1936 	info->src_btf = btf__parse(targ_btf_path, NULL);
1937 	if (!info->src_btf) {
1938 		err = -errno;
1939 		p_err("failed parsing '%s' BTF file: %s", targ_btf_path, strerror(errno));
1940 		goto err_out;
1941 	}
1942 
1943 	info->marked_btf = btf__parse(targ_btf_path, NULL);
1944 	if (!info->marked_btf) {
1945 		err = -errno;
1946 		p_err("failed parsing '%s' BTF file: %s", targ_btf_path, strerror(errno));
1947 		goto err_out;
1948 	}
1949 
1950 	return info;
1951 
1952 err_out:
1953 	btfgen_free_info(info);
1954 	errno = -err;
1955 	return NULL;
1956 }
1957 
1958 #define MARKED UINT32_MAX
1959 
1960 static void btfgen_mark_member(struct btfgen_info *info, int type_id, int idx)
1961 {
1962 	const struct btf_type *t = btf__type_by_id(info->marked_btf, type_id);
1963 	struct btf_member *m = btf_members(t) + idx;
1964 
1965 	m->name_off = MARKED;
1966 }
1967 
1968 static int
1969 btfgen_mark_type(struct btfgen_info *info, unsigned int type_id, bool follow_pointers)
1970 {
1971 	const struct btf_type *btf_type = btf__type_by_id(info->src_btf, type_id);
1972 	struct btf_type *cloned_type;
1973 	struct btf_param *param;
1974 	struct btf_array *array;
1975 	int err, i;
1976 
1977 	if (type_id == 0)
1978 		return 0;
1979 
1980 	/* mark type on cloned BTF as used */
1981 	cloned_type = (struct btf_type *) btf__type_by_id(info->marked_btf, type_id);
1982 	cloned_type->name_off = MARKED;
1983 
1984 	/* recursively mark other types needed by it */
1985 	switch (btf_kind(btf_type)) {
1986 	case BTF_KIND_UNKN:
1987 	case BTF_KIND_INT:
1988 	case BTF_KIND_FLOAT:
1989 	case BTF_KIND_ENUM:
1990 	case BTF_KIND_ENUM64:
1991 	case BTF_KIND_STRUCT:
1992 	case BTF_KIND_UNION:
1993 		break;
1994 	case BTF_KIND_PTR:
1995 		if (follow_pointers) {
1996 			err = btfgen_mark_type(info, btf_type->type, follow_pointers);
1997 			if (err)
1998 				return err;
1999 		}
2000 		break;
2001 	case BTF_KIND_CONST:
2002 	case BTF_KIND_RESTRICT:
2003 	case BTF_KIND_VOLATILE:
2004 	case BTF_KIND_TYPEDEF:
2005 		err = btfgen_mark_type(info, btf_type->type, follow_pointers);
2006 		if (err)
2007 			return err;
2008 		break;
2009 	case BTF_KIND_ARRAY:
2010 		array = btf_array(btf_type);
2011 
2012 		/* mark array type */
2013 		err = btfgen_mark_type(info, array->type, follow_pointers);
2014 		/* mark array's index type */
2015 		err = err ? : btfgen_mark_type(info, array->index_type, follow_pointers);
2016 		if (err)
2017 			return err;
2018 		break;
2019 	case BTF_KIND_FUNC_PROTO:
2020 		/* mark ret type */
2021 		err = btfgen_mark_type(info, btf_type->type, follow_pointers);
2022 		if (err)
2023 			return err;
2024 
2025 		/* mark parameters types */
2026 		param = btf_params(btf_type);
2027 		for (i = 0; i < btf_vlen(btf_type); i++) {
2028 			err = btfgen_mark_type(info, param->type, follow_pointers);
2029 			if (err)
2030 				return err;
2031 			param++;
2032 		}
2033 		break;
2034 	/* tells if some other type needs to be handled */
2035 	default:
2036 		p_err("unsupported kind: %s (%d)", btf_kind_str(btf_type), type_id);
2037 		return -EINVAL;
2038 	}
2039 
2040 	return 0;
2041 }
2042 
2043 static int btfgen_record_field_relo(struct btfgen_info *info, struct bpf_core_spec *targ_spec)
2044 {
2045 	struct btf *btf = info->src_btf;
2046 	const struct btf_type *btf_type;
2047 	struct btf_member *btf_member;
2048 	struct btf_array *array;
2049 	unsigned int type_id = targ_spec->root_type_id;
2050 	int idx, err;
2051 
2052 	/* mark root type */
2053 	btf_type = btf__type_by_id(btf, type_id);
2054 	err = btfgen_mark_type(info, type_id, false);
2055 	if (err)
2056 		return err;
2057 
2058 	/* mark types for complex types (arrays, unions, structures) */
2059 	for (int i = 1; i < targ_spec->raw_len; i++) {
2060 		/* skip typedefs and mods */
2061 		while (btf_is_mod(btf_type) || btf_is_typedef(btf_type)) {
2062 			type_id = btf_type->type;
2063 			btf_type = btf__type_by_id(btf, type_id);
2064 		}
2065 
2066 		switch (btf_kind(btf_type)) {
2067 		case BTF_KIND_STRUCT:
2068 		case BTF_KIND_UNION:
2069 			idx = targ_spec->raw_spec[i];
2070 			btf_member = btf_members(btf_type) + idx;
2071 
2072 			/* mark member */
2073 			btfgen_mark_member(info, type_id, idx);
2074 
2075 			/* mark member's type */
2076 			type_id = btf_member->type;
2077 			btf_type = btf__type_by_id(btf, type_id);
2078 			err = btfgen_mark_type(info, type_id, false);
2079 			if (err)
2080 				return err;
2081 			break;
2082 		case BTF_KIND_ARRAY:
2083 			array = btf_array(btf_type);
2084 			type_id = array->type;
2085 			btf_type = btf__type_by_id(btf, type_id);
2086 			break;
2087 		default:
2088 			p_err("unsupported kind: %s (%d)",
2089 			      btf_kind_str(btf_type), btf_type->type);
2090 			return -EINVAL;
2091 		}
2092 	}
2093 
2094 	return 0;
2095 }
2096 
2097 /* Mark types, members, and member types. Compared to btfgen_record_field_relo,
2098  * this function does not rely on the target spec for inferring members, but
2099  * uses the associated BTF.
2100  *
2101  * The `behind_ptr` argument is used to stop marking of composite types reached
2102  * through a pointer. This way, we can keep BTF size in check while providing
2103  * reasonable match semantics.
2104  */
2105 static int btfgen_mark_type_match(struct btfgen_info *info, __u32 type_id, bool behind_ptr)
2106 {
2107 	const struct btf_type *btf_type;
2108 	struct btf *btf = info->src_btf;
2109 	struct btf_type *cloned_type;
2110 	int i, err;
2111 
2112 	if (type_id == 0)
2113 		return 0;
2114 
2115 	btf_type = btf__type_by_id(btf, type_id);
2116 	/* mark type on cloned BTF as used */
2117 	cloned_type = (struct btf_type *)btf__type_by_id(info->marked_btf, type_id);
2118 	cloned_type->name_off = MARKED;
2119 
2120 	switch (btf_kind(btf_type)) {
2121 	case BTF_KIND_UNKN:
2122 	case BTF_KIND_INT:
2123 	case BTF_KIND_FLOAT:
2124 	case BTF_KIND_ENUM:
2125 	case BTF_KIND_ENUM64:
2126 		break;
2127 	case BTF_KIND_STRUCT:
2128 	case BTF_KIND_UNION: {
2129 		struct btf_member *m = btf_members(btf_type);
2130 		__u16 vlen = btf_vlen(btf_type);
2131 
2132 		if (behind_ptr)
2133 			break;
2134 
2135 		for (i = 0; i < vlen; i++, m++) {
2136 			/* mark member */
2137 			btfgen_mark_member(info, type_id, i);
2138 
2139 			/* mark member's type */
2140 			err = btfgen_mark_type_match(info, m->type, false);
2141 			if (err)
2142 				return err;
2143 		}
2144 		break;
2145 	}
2146 	case BTF_KIND_CONST:
2147 	case BTF_KIND_FWD:
2148 	case BTF_KIND_RESTRICT:
2149 	case BTF_KIND_TYPEDEF:
2150 	case BTF_KIND_VOLATILE:
2151 		return btfgen_mark_type_match(info, btf_type->type, behind_ptr);
2152 	case BTF_KIND_PTR:
2153 		return btfgen_mark_type_match(info, btf_type->type, true);
2154 	case BTF_KIND_ARRAY: {
2155 		struct btf_array *array;
2156 
2157 		array = btf_array(btf_type);
2158 		/* mark array type */
2159 		err = btfgen_mark_type_match(info, array->type, false);
2160 		/* mark array's index type */
2161 		err = err ? : btfgen_mark_type_match(info, array->index_type, false);
2162 		if (err)
2163 			return err;
2164 		break;
2165 	}
2166 	case BTF_KIND_FUNC_PROTO: {
2167 		__u16 vlen = btf_vlen(btf_type);
2168 		struct btf_param *param;
2169 
2170 		/* mark ret type */
2171 		err = btfgen_mark_type_match(info, btf_type->type, false);
2172 		if (err)
2173 			return err;
2174 
2175 		/* mark parameters types */
2176 		param = btf_params(btf_type);
2177 		for (i = 0; i < vlen; i++) {
2178 			err = btfgen_mark_type_match(info, param->type, false);
2179 			if (err)
2180 				return err;
2181 			param++;
2182 		}
2183 		break;
2184 	}
2185 	/* tells if some other type needs to be handled */
2186 	default:
2187 		p_err("unsupported kind: %s (%d)", btf_kind_str(btf_type), type_id);
2188 		return -EINVAL;
2189 	}
2190 
2191 	return 0;
2192 }
2193 
2194 /* Mark types, members, and member types. Compared to btfgen_record_field_relo,
2195  * this function does not rely on the target spec for inferring members, but
2196  * uses the associated BTF.
2197  */
2198 static int btfgen_record_type_match_relo(struct btfgen_info *info, struct bpf_core_spec *targ_spec)
2199 {
2200 	return btfgen_mark_type_match(info, targ_spec->root_type_id, false);
2201 }
2202 
2203 static int btfgen_record_type_relo(struct btfgen_info *info, struct bpf_core_spec *targ_spec)
2204 {
2205 	return btfgen_mark_type(info, targ_spec->root_type_id, true);
2206 }
2207 
2208 static int btfgen_record_enumval_relo(struct btfgen_info *info, struct bpf_core_spec *targ_spec)
2209 {
2210 	return btfgen_mark_type(info, targ_spec->root_type_id, false);
2211 }
2212 
2213 static int btfgen_record_reloc(struct btfgen_info *info, struct bpf_core_spec *res)
2214 {
2215 	switch (res->relo_kind) {
2216 	case BPF_CORE_FIELD_BYTE_OFFSET:
2217 	case BPF_CORE_FIELD_BYTE_SIZE:
2218 	case BPF_CORE_FIELD_EXISTS:
2219 	case BPF_CORE_FIELD_SIGNED:
2220 	case BPF_CORE_FIELD_LSHIFT_U64:
2221 	case BPF_CORE_FIELD_RSHIFT_U64:
2222 		return btfgen_record_field_relo(info, res);
2223 	case BPF_CORE_TYPE_ID_LOCAL: /* BPF_CORE_TYPE_ID_LOCAL doesn't require kernel BTF */
2224 		return 0;
2225 	case BPF_CORE_TYPE_ID_TARGET:
2226 	case BPF_CORE_TYPE_EXISTS:
2227 	case BPF_CORE_TYPE_SIZE:
2228 		return btfgen_record_type_relo(info, res);
2229 	case BPF_CORE_TYPE_MATCHES:
2230 		return btfgen_record_type_match_relo(info, res);
2231 	case BPF_CORE_ENUMVAL_EXISTS:
2232 	case BPF_CORE_ENUMVAL_VALUE:
2233 		return btfgen_record_enumval_relo(info, res);
2234 	default:
2235 		return -EINVAL;
2236 	}
2237 }
2238 
2239 static struct bpf_core_cand_list *
2240 btfgen_find_cands(const struct btf *local_btf, const struct btf *targ_btf, __u32 local_id)
2241 {
2242 	const struct btf_type *local_type;
2243 	struct bpf_core_cand_list *cands = NULL;
2244 	struct bpf_core_cand local_cand = {};
2245 	size_t local_essent_len;
2246 	const char *local_name;
2247 	int err;
2248 
2249 	local_cand.btf = local_btf;
2250 	local_cand.id = local_id;
2251 
2252 	local_type = btf__type_by_id(local_btf, local_id);
2253 	if (!local_type) {
2254 		err = -EINVAL;
2255 		goto err_out;
2256 	}
2257 
2258 	local_name = btf__name_by_offset(local_btf, local_type->name_off);
2259 	if (!local_name) {
2260 		err = -EINVAL;
2261 		goto err_out;
2262 	}
2263 	local_essent_len = bpf_core_essential_name_len(local_name);
2264 
2265 	cands = calloc(1, sizeof(*cands));
2266 	if (!cands)
2267 		return NULL;
2268 
2269 	err = bpf_core_add_cands(&local_cand, local_essent_len, targ_btf, "vmlinux", 1, cands);
2270 	if (err)
2271 		goto err_out;
2272 
2273 	return cands;
2274 
2275 err_out:
2276 	bpf_core_free_cands(cands);
2277 	errno = -err;
2278 	return NULL;
2279 }
2280 
2281 /* Record relocation information for a single BPF object */
2282 static int btfgen_record_obj(struct btfgen_info *info, const char *obj_path)
2283 {
2284 	const struct btf_ext_info_sec *sec;
2285 	const struct bpf_core_relo *relo;
2286 	const struct btf_ext_info *seg;
2287 	struct hashmap_entry *entry;
2288 	struct hashmap *cand_cache = NULL;
2289 	struct btf_ext *btf_ext = NULL;
2290 	unsigned int relo_idx;
2291 	struct btf *btf = NULL;
2292 	size_t i;
2293 	int err;
2294 
2295 	btf = btf__parse(obj_path, &btf_ext);
2296 	if (!btf) {
2297 		err = -errno;
2298 		p_err("failed to parse BPF object '%s': %s", obj_path, strerror(errno));
2299 		return err;
2300 	}
2301 
2302 	if (!btf_ext) {
2303 		p_err("failed to parse BPF object '%s': section %s not found",
2304 		      obj_path, BTF_EXT_ELF_SEC);
2305 		err = -EINVAL;
2306 		goto out;
2307 	}
2308 
2309 	if (btf_ext->core_relo_info.len == 0) {
2310 		err = 0;
2311 		goto out;
2312 	}
2313 
2314 	cand_cache = hashmap__new(btfgen_hash_fn, btfgen_equal_fn, NULL);
2315 	if (IS_ERR(cand_cache)) {
2316 		err = PTR_ERR(cand_cache);
2317 		goto out;
2318 	}
2319 
2320 	seg = &btf_ext->core_relo_info;
2321 	for_each_btf_ext_sec(seg, sec) {
2322 		for_each_btf_ext_rec(seg, sec, relo_idx, relo) {
2323 			struct bpf_core_spec specs_scratch[3] = {};
2324 			struct bpf_core_relo_res targ_res = {};
2325 			struct bpf_core_cand_list *cands = NULL;
2326 			const char *sec_name = btf__name_by_offset(btf, sec->sec_name_off);
2327 
2328 			if (relo->kind != BPF_CORE_TYPE_ID_LOCAL &&
2329 			    !hashmap__find(cand_cache, relo->type_id, &cands)) {
2330 				cands = btfgen_find_cands(btf, info->src_btf, relo->type_id);
2331 				if (!cands) {
2332 					err = -errno;
2333 					goto out;
2334 				}
2335 
2336 				err = hashmap__set(cand_cache, relo->type_id, cands,
2337 						   NULL, NULL);
2338 				if (err)
2339 					goto out;
2340 			}
2341 
2342 			err = bpf_core_calc_relo_insn(sec_name, relo, relo_idx, btf, cands,
2343 						      specs_scratch, &targ_res);
2344 			if (err)
2345 				goto out;
2346 
2347 			/* specs_scratch[2] is the target spec */
2348 			err = btfgen_record_reloc(info, &specs_scratch[2]);
2349 			if (err)
2350 				goto out;
2351 		}
2352 	}
2353 
2354 out:
2355 	btf__free(btf);
2356 	btf_ext__free(btf_ext);
2357 
2358 	if (!IS_ERR_OR_NULL(cand_cache)) {
2359 		hashmap__for_each_entry(cand_cache, entry, i) {
2360 			bpf_core_free_cands(entry->pvalue);
2361 		}
2362 		hashmap__free(cand_cache);
2363 	}
2364 
2365 	return err;
2366 }
2367 
2368 static int btfgen_remap_id(__u32 *type_id, void *ctx)
2369 {
2370 	unsigned int *ids = ctx;
2371 
2372 	*type_id = ids[*type_id];
2373 
2374 	return 0;
2375 }
2376 
2377 /* Generate BTF from relocation information previously recorded */
2378 static struct btf *btfgen_get_btf(struct btfgen_info *info)
2379 {
2380 	struct btf *btf_new = NULL;
2381 	unsigned int *ids = NULL;
2382 	unsigned int i, n = btf__type_cnt(info->marked_btf);
2383 	int err = 0;
2384 
2385 	btf_new = btf__new_empty();
2386 	if (!btf_new) {
2387 		err = -errno;
2388 		goto err_out;
2389 	}
2390 
2391 	ids = calloc(n, sizeof(*ids));
2392 	if (!ids) {
2393 		err = -errno;
2394 		goto err_out;
2395 	}
2396 
2397 	/* first pass: add all marked types to btf_new and add their new ids to the ids map */
2398 	for (i = 1; i < n; i++) {
2399 		const struct btf_type *cloned_type, *type;
2400 		const char *name;
2401 		int new_id;
2402 
2403 		cloned_type = btf__type_by_id(info->marked_btf, i);
2404 
2405 		if (cloned_type->name_off != MARKED)
2406 			continue;
2407 
2408 		type = btf__type_by_id(info->src_btf, i);
2409 
2410 		/* add members for struct and union */
2411 		if (btf_is_composite(type)) {
2412 			struct btf_member *cloned_m, *m;
2413 			unsigned short vlen;
2414 			int idx_src;
2415 
2416 			name = btf__str_by_offset(info->src_btf, type->name_off);
2417 
2418 			if (btf_is_struct(type))
2419 				err = btf__add_struct(btf_new, name, type->size);
2420 			else
2421 				err = btf__add_union(btf_new, name, type->size);
2422 
2423 			if (err < 0)
2424 				goto err_out;
2425 			new_id = err;
2426 
2427 			cloned_m = btf_members(cloned_type);
2428 			m = btf_members(type);
2429 			vlen = btf_vlen(cloned_type);
2430 			for (idx_src = 0; idx_src < vlen; idx_src++, cloned_m++, m++) {
2431 				/* add only members that are marked as used */
2432 				if (cloned_m->name_off != MARKED)
2433 					continue;
2434 
2435 				name = btf__str_by_offset(info->src_btf, m->name_off);
2436 				err = btf__add_field(btf_new, name, m->type,
2437 						     btf_member_bit_offset(cloned_type, idx_src),
2438 						     btf_member_bitfield_size(cloned_type, idx_src));
2439 				if (err < 0)
2440 					goto err_out;
2441 			}
2442 		} else {
2443 			err = btf__add_type(btf_new, info->src_btf, type);
2444 			if (err < 0)
2445 				goto err_out;
2446 			new_id = err;
2447 		}
2448 
2449 		/* add ID mapping */
2450 		ids[i] = new_id;
2451 	}
2452 
2453 	/* second pass: fix up type ids */
2454 	for (i = 1; i < btf__type_cnt(btf_new); i++) {
2455 		struct btf_type *btf_type = (struct btf_type *) btf__type_by_id(btf_new, i);
2456 
2457 		err = btf_type_visit_type_ids(btf_type, btfgen_remap_id, ids);
2458 		if (err)
2459 			goto err_out;
2460 	}
2461 
2462 	free(ids);
2463 	return btf_new;
2464 
2465 err_out:
2466 	btf__free(btf_new);
2467 	free(ids);
2468 	errno = -err;
2469 	return NULL;
2470 }
2471 
2472 /* Create minimized BTF file for a set of BPF objects.
2473  *
2474  * The BTFGen algorithm is divided in two main parts: (1) collect the
2475  * BTF types that are involved in relocations and (2) generate the BTF
2476  * object using the collected types.
2477  *
2478  * In order to collect the types involved in the relocations, we parse
2479  * the BTF and BTF.ext sections of the BPF objects and use
2480  * bpf_core_calc_relo_insn() to get the target specification, this
2481  * indicates how the types and fields are used in a relocation.
2482  *
2483  * Types are recorded in different ways according to the kind of the
2484  * relocation. For field-based relocations only the members that are
2485  * actually used are saved in order to reduce the size of the generated
2486  * BTF file. For type-based relocations empty struct / unions are
2487  * generated and for enum-based relocations the whole type is saved.
2488  *
2489  * The second part of the algorithm generates the BTF object. It creates
2490  * an empty BTF object and fills it with the types recorded in the
2491  * previous step. This function takes care of only adding the structure
2492  * and union members that were marked as used and it also fixes up the
2493  * type IDs on the generated BTF object.
2494  */
2495 static int minimize_btf(const char *src_btf, const char *dst_btf, const char *objspaths[])
2496 {
2497 	struct btfgen_info *info;
2498 	struct btf *btf_new = NULL;
2499 	int err, i;
2500 
2501 	info = btfgen_new_info(src_btf);
2502 	if (!info) {
2503 		err = -errno;
2504 		p_err("failed to allocate info structure: %s", strerror(errno));
2505 		goto out;
2506 	}
2507 
2508 	for (i = 0; objspaths[i] != NULL; i++) {
2509 		err = btfgen_record_obj(info, objspaths[i]);
2510 		if (err) {
2511 			p_err("error recording relocations for %s: %s", objspaths[i],
2512 			      strerror(errno));
2513 			goto out;
2514 		}
2515 	}
2516 
2517 	btf_new = btfgen_get_btf(info);
2518 	if (!btf_new) {
2519 		err = -errno;
2520 		p_err("error generating BTF: %s", strerror(errno));
2521 		goto out;
2522 	}
2523 
2524 	err = btf_save_raw(btf_new, dst_btf);
2525 	if (err) {
2526 		p_err("error saving btf file: %s", strerror(errno));
2527 		goto out;
2528 	}
2529 
2530 out:
2531 	btf__free(btf_new);
2532 	btfgen_free_info(info);
2533 
2534 	return err;
2535 }
2536 
2537 static int do_min_core_btf(int argc, char **argv)
2538 {
2539 	const char *input, *output, **objs;
2540 	int i, err;
2541 
2542 	if (!REQ_ARGS(3)) {
2543 		usage();
2544 		return -1;
2545 	}
2546 
2547 	input = GET_ARG();
2548 	output = GET_ARG();
2549 
2550 	objs = (const char **) calloc(argc + 1, sizeof(*objs));
2551 	if (!objs) {
2552 		p_err("failed to allocate array for object names");
2553 		return -ENOMEM;
2554 	}
2555 
2556 	i = 0;
2557 	while (argc)
2558 		objs[i++] = GET_ARG();
2559 
2560 	err = minimize_btf(input, output, objs);
2561 	free(objs);
2562 	return err;
2563 }
2564 
2565 static const struct cmd cmds[] = {
2566 	{ "object",		do_object },
2567 	{ "skeleton",		do_skeleton },
2568 	{ "subskeleton",	do_subskeleton },
2569 	{ "min_core_btf",	do_min_core_btf},
2570 	{ "help",		do_help },
2571 	{ 0 }
2572 };
2573 
2574 int do_gen(int argc, char **argv)
2575 {
2576 	return cmd_select(cmds, argc, argv, do_help);
2577 }
2578