xref: /linux/tools/lib/bpf/btf.c (revision 7a5f1cd22d47f8ca4b760b6334378ae42c1bd24b)
1 // SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
2 /* Copyright (c) 2018 Facebook */
3 
4 #include <byteswap.h>
5 #include <endian.h>
6 #include <stdio.h>
7 #include <stdlib.h>
8 #include <string.h>
9 #include <fcntl.h>
10 #include <unistd.h>
11 #include <errno.h>
12 #include <sys/utsname.h>
13 #include <sys/param.h>
14 #include <sys/stat.h>
15 #include <sys/mman.h>
16 #include <linux/kernel.h>
17 #include <linux/err.h>
18 #include <linux/btf.h>
19 #include <gelf.h>
20 #include "btf.h"
21 #include "bpf.h"
22 #include "libbpf.h"
23 #include "libbpf_internal.h"
24 #include "hashmap.h"
25 #include "strset.h"
26 
27 #define BTF_MAX_NR_TYPES 0x7fffffffU
28 #define BTF_MAX_STR_OFFSET 0x7fffffffU
29 
30 static struct btf_type btf_void;
31 
32 /*
33  * Describe how kinds are laid out; some have a singular element following the "struct btf_type",
34  * some have BTF_INFO_VLEN(t->info) elements.  Specify sizes for both.  Flags are currently unused.
35  * Kind layout can be optionally added to the BTF representation in a dedicated section to
36  * facilitate parsing.  New kinds must be added here.
37  */
38 static struct btf_layout layouts[NR_BTF_KINDS] = {
39 /*				singular element size		vlen element(s) size		flags */
40 [BTF_KIND_UNKN] =	{	0,				0,				0 },
41 [BTF_KIND_INT] =	{	sizeof(__u32),			0,				0 },
42 [BTF_KIND_PTR] =	{	0,				0,				0 },
43 [BTF_KIND_ARRAY] =	{	sizeof(struct btf_array),	0,				0 },
44 [BTF_KIND_STRUCT] =	{	0,				sizeof(struct btf_member),	0 },
45 [BTF_KIND_UNION] =	{	0,				sizeof(struct btf_member),	0 },
46 [BTF_KIND_ENUM] =	{	0,				sizeof(struct btf_enum),	0 },
47 [BTF_KIND_FWD] =	{	0,				0,				0 },
48 [BTF_KIND_TYPEDEF] =	{	0,				0,				0 },
49 [BTF_KIND_VOLATILE] =	{	0,				0,				0 },
50 [BTF_KIND_CONST] =	{	0,				0,				0 },
51 [BTF_KIND_RESTRICT] =	{	0,				0,				0 },
52 [BTF_KIND_FUNC] =	{	0,				0,				0 },
53 [BTF_KIND_FUNC_PROTO] =	{	0,				sizeof(struct btf_param),	0 },
54 [BTF_KIND_VAR] =	{	sizeof(struct btf_var),		0,				0 },
55 [BTF_KIND_DATASEC] =	{	0,				sizeof(struct btf_var_secinfo),	0 },
56 [BTF_KIND_FLOAT] =	{	0,				0,				0 },
57 [BTF_KIND_DECL_TAG] =	{	sizeof(struct btf_decl_tag),	0,				0 },
58 [BTF_KIND_TYPE_TAG] =	{	0,				0,				0 },
59 [BTF_KIND_ENUM64] =	{	0,				sizeof(struct btf_enum64),	0 },
60 };
61 
62 struct btf {
63 	/* raw BTF data in native endianness */
64 	void *raw_data;
65 	/* raw BTF data in non-native endianness */
66 	void *raw_data_swapped;
67 	__u32 raw_size;
68 	/* whether target endianness differs from the native one */
69 	bool swapped_endian;
70 
71 	/*
72 	 * When BTF is loaded from an ELF or raw memory it is stored
73 	 * in a contiguous memory block. The type_data, layout and strs_data
74 	 * point inside that memory region to their respective parts of BTF
75 	 * representation:
76 	 *
77 	 * +----------------------------------------+---------------+
78 	 * |  Header  |  Types  |  Optional layout  |  Strings      |
79 	 * +--------------------------------------------------------+
80 	 * ^          ^         ^                   ^
81 	 * |          |         |                   |
82 	 * raw_data   |         |                   |
83 	 * types_data-+         |                   |
84 	 * layout---------------+                   |
85 	 * strs_data--------------------------------+
86 	 *
87 	 * A separate struct btf_header is embedded as btf->hdr,
88 	 * and header information is copied into it.  This allows us
89 	 * to handle header data for various header formats; the original,
90 	 * the extended header with layout info, etc.
91 	 *
92 	 * If BTF data is later modified, e.g., due to types added or
93 	 * removed, BTF deduplication performed, etc, this contiguous
94 	 * representation is broken up into four independent memory
95 	 * regions.
96 	 *
97 	 * raw_data is nulled out at that point, but can be later allocated
98 	 * and cached again if user calls btf__raw_data(), at which point
99 	 * raw_data will contain a contiguous copy of header, types, optional
100 	 * layout and strings.  layout optionally points to a
101 	 * btf_layout array - this allows us to encode information about
102 	 * the kinds known at encoding time.  If layout is NULL no
103 	 * layout information is encoded.
104 	 *
105 	 * +----------+  +---------+  +-----------+   +-----------+
106 	 * |  Header  |  |  Types  |  |  Layout   |   |  Strings  |
107 	 * +----------+  +---------+  +-----------+   +-----------+
108 	 * ^             ^            ^               ^
109 	 * |             |            |               |
110 	 * hdr           |            |               |
111 	 * types_data----+            |               |
112 	 * layout---------------------+               |
113 	 * strset__data(strs_set)---------------------+
114 	 *
115 	 *               +----------+---------+-------------------+-----------+
116 	 *               |  Header  |  Types  |  Optional Layout  |  Strings  |
117 	 * raw_data----->+----------+---------+-------------------+-----------+
118 	 */
119 	struct btf_header hdr;
120 
121 	void *types_data;
122 	size_t types_data_cap; /* used size stored in hdr->type_len */
123 
124 	/* type ID to `struct btf_type *` lookup index
125 	 * type_offs[0] corresponds to the first non-VOID type:
126 	 *   - for base BTF it's type [1];
127 	 *   - for split BTF it's the first non-base BTF type.
128 	 */
129 	__u32 *type_offs;
130 	size_t type_offs_cap;
131 	/* number of types in this BTF instance:
132 	 *   - doesn't include special [0] void type;
133 	 *   - for split BTF counts number of types added on top of base BTF.
134 	 */
135 	__u32 nr_types;
136 	/* the start IDs of named types in sorted BTF */
137 	int named_start_id;
138 	/* if not NULL, points to the base BTF on top of which the current
139 	 * split BTF is based
140 	 */
141 	struct btf *base_btf;
142 	/* BTF type ID of the first type in this BTF instance:
143 	 *   - for base BTF it's equal to 1;
144 	 *   - for split BTF it's equal to biggest type ID of base BTF plus 1.
145 	 */
146 	int start_id;
147 	/* logical string offset of this BTF instance:
148 	 *   - for base BTF it's equal to 0;
149 	 *   - for split BTF it's equal to total size of base BTF's string section size.
150 	 */
151 	int start_str_off;
152 
153 	/* only one of strs_data or strs_set can be non-NULL, depending on
154 	 * whether BTF is in a modifiable state (strs_set is used) or not
155 	 * (strs_data points inside raw_data)
156 	 */
157 	void *strs_data;
158 	/* a set of unique strings */
159 	struct strset *strs_set;
160 	/* whether strings are already deduplicated */
161 	bool strs_deduped;
162 
163 	/* whether base_btf should be freed in btf_free for this instance */
164 	bool owns_base;
165 
166 	/* whether raw_data is a (read-only) mmap */
167 	bool raw_data_is_mmap;
168 
169 	/* is BTF modifiable? i.e. is it split into separate sections as described above? */
170 	bool modifiable;
171 	/* does BTF have header information we do not support?  If so, disallow
172 	 * modification.
173 	 */
174 	bool has_hdr_extra;
175 	/* Points either at raw kind layout data in parsed BTF (if present), or
176 	 * at an allocated kind layout array when BTF is modifiable.
177 	 */
178 	void *layout;
179 
180 	/* BTF object FD, if loaded into kernel */
181 	int fd;
182 
183 	/* Pointer size (in bytes) for a target architecture of this BTF */
184 	int ptr_sz;
185 };
186 
187 static inline __u64 ptr_to_u64(const void *ptr)
188 {
189 	return (__u64) (unsigned long) ptr;
190 }
191 
192 /* Ensure given dynamically allocated memory region pointed to by *data* with
193  * capacity of *cap_cnt* elements each taking *elem_sz* bytes has enough
194  * memory to accommodate *add_cnt* new elements, assuming *cur_cnt* elements
195  * are already used. At most *max_cnt* elements can be ever allocated.
196  * If necessary, memory is reallocated and all existing data is copied over,
197  * new pointer to the memory region is stored at *data, new memory region
198  * capacity (in number of elements) is stored in *cap.
199  * On success, memory pointer to the beginning of unused memory is returned.
200  * On error, NULL is returned.
201  */
202 void *libbpf_add_mem(void **data, size_t *cap_cnt, size_t elem_sz,
203 		     size_t cur_cnt, size_t max_cnt, size_t add_cnt)
204 {
205 	size_t new_cnt;
206 	void *new_data;
207 
208 	if (cur_cnt + add_cnt <= *cap_cnt)
209 		return *data + cur_cnt * elem_sz;
210 
211 	/* requested more than the set limit */
212 	if (cur_cnt + add_cnt > max_cnt)
213 		return NULL;
214 
215 	new_cnt = *cap_cnt;
216 	new_cnt += new_cnt / 4;		  /* expand by 25% */
217 	if (new_cnt < 16)		  /* but at least 16 elements */
218 		new_cnt = 16;
219 	if (new_cnt > max_cnt)		  /* but not exceeding a set limit */
220 		new_cnt = max_cnt;
221 	if (new_cnt < cur_cnt + add_cnt)  /* also ensure we have enough memory */
222 		new_cnt = cur_cnt + add_cnt;
223 
224 	new_data = libbpf_reallocarray(*data, new_cnt, elem_sz);
225 	if (!new_data)
226 		return NULL;
227 
228 	/* zero out newly allocated portion of memory */
229 	memset(new_data + (*cap_cnt) * elem_sz, 0, (new_cnt - *cap_cnt) * elem_sz);
230 
231 	*data = new_data;
232 	*cap_cnt = new_cnt;
233 	return new_data + cur_cnt * elem_sz;
234 }
235 
236 /* Ensure given dynamically allocated memory region has enough allocated space
237  * to accommodate *need_cnt* elements of size *elem_sz* bytes each
238  */
239 int libbpf_ensure_mem(void **data, size_t *cap_cnt, size_t elem_sz, size_t need_cnt)
240 {
241 	void *p;
242 
243 	if (need_cnt <= *cap_cnt)
244 		return 0;
245 
246 	p = libbpf_add_mem(data, cap_cnt, elem_sz, *cap_cnt, SIZE_MAX, need_cnt - *cap_cnt);
247 	if (!p)
248 		return -ENOMEM;
249 
250 	return 0;
251 }
252 
253 static void *btf_add_type_offs_mem(struct btf *btf, size_t add_cnt)
254 {
255 	return libbpf_add_mem((void **)&btf->type_offs, &btf->type_offs_cap, sizeof(__u32),
256 			      btf->nr_types, BTF_MAX_NR_TYPES, add_cnt);
257 }
258 
259 static int btf_add_type_idx_entry(struct btf *btf, __u32 type_off)
260 {
261 	__u32 *p;
262 
263 	p = btf_add_type_offs_mem(btf, 1);
264 	if (!p)
265 		return -ENOMEM;
266 
267 	*p = type_off;
268 	return 0;
269 }
270 
271 static void btf_bswap_hdr(struct btf_header *h, __u32 hdr_len)
272 {
273 	h->magic = bswap_16(h->magic);
274 	h->hdr_len = bswap_32(h->hdr_len);
275 	h->type_off = bswap_32(h->type_off);
276 	h->type_len = bswap_32(h->type_len);
277 	h->str_off = bswap_32(h->str_off);
278 	h->str_len = bswap_32(h->str_len);
279 	/* May be operating on raw data with hdr_len that does not include below fields */
280 	if (hdr_len >= sizeof(struct btf_header)) {
281 		h->layout_off = bswap_32(h->layout_off);
282 		h->layout_len = bswap_32(h->layout_len);
283 	}
284 }
285 
286 static int btf_parse_hdr(struct btf *btf)
287 {
288 	struct btf_header *hdr = btf->raw_data;
289 	__u32 hdr_len, meta_left;
290 
291 	if (btf->raw_size < offsetofend(struct btf_header, str_len)) {
292 		pr_debug("BTF header not found\n");
293 		return -EINVAL;
294 	}
295 
296 	hdr_len = hdr->hdr_len;
297 
298 	if (hdr->magic == bswap_16(BTF_MAGIC)) {
299 		btf->swapped_endian = true;
300 		hdr_len = bswap_32(hdr->hdr_len);
301 		if (hdr_len < offsetofend(struct btf_header, str_len)) {
302 			pr_warn("Can't load BTF with non-native endianness due to unsupported header length %u\n",
303 				hdr_len);
304 			return -ENOTSUP;
305 		}
306 	} else if (hdr->magic != BTF_MAGIC) {
307 		pr_debug("Invalid BTF magic: %x\n", hdr->magic);
308 		return -EINVAL;
309 	}
310 
311 	if (btf->raw_size < hdr_len) {
312 		pr_debug("BTF header len %u larger than data size %u\n",
313 			 hdr_len, btf->raw_size);
314 		return -EINVAL;
315 	}
316 
317 	if (btf->swapped_endian)
318 		btf_bswap_hdr(hdr, hdr_len);
319 
320 	memcpy(&btf->hdr, hdr, min((size_t)hdr_len, sizeof(struct btf_header)));
321 
322 	/* If unknown header data is found, modification is prohibited in
323 	 * btf_ensure_modifiable().
324 	 */
325 	if (hdr_len > sizeof(struct btf_header)) {
326 		__u8 *h = (__u8 *)hdr;
327 		__u32 i;
328 
329 		for (i = sizeof(struct btf_header); i < hdr_len; i++) {
330 			if (!h[i])
331 				continue;
332 			btf->has_hdr_extra = true;
333 			pr_debug("Unknown BTF header data at offset %u; modification is disallowed\n",
334 				 i);
335 			break;
336 		}
337 	}
338 
339 	meta_left = btf->raw_size - hdr_len;
340 	if (meta_left < (long long)btf->hdr.str_off + btf->hdr.str_len) {
341 		pr_debug("Invalid BTF total size: %u\n", btf->raw_size);
342 		return -EINVAL;
343 	}
344 
345 	if ((long long)btf->hdr.type_off + btf->hdr.type_len > btf->hdr.str_off) {
346 		pr_debug("Invalid BTF data sections layout: type data at %u + %u, strings data at %u + %u\n",
347 			 btf->hdr.type_off, btf->hdr.type_len, btf->hdr.str_off,
348 			 btf->hdr.str_len);
349 		return -EINVAL;
350 	}
351 
352 	if (btf->hdr.type_off % 4) {
353 		pr_debug("BTF type section is not aligned to 4 bytes\n");
354 		return -EINVAL;
355 	}
356 
357 	if (btf->hdr.layout_len == 0)
358 		return 0;
359 
360 	/* optional layout section sits between types and strings */
361 	if (btf->hdr.layout_off % 4) {
362 		pr_debug("BTF layout section is not aligned to 4 bytes\n");
363 		return -EINVAL;
364 	}
365 	if (btf->hdr.layout_off < (long long)btf->hdr.type_off + btf->hdr.type_len) {
366 		pr_debug("Invalid BTF data sections layout: type data at %u + %u,  layout data at %u + %u\n",
367 			 btf->hdr.type_off, btf->hdr.type_len,
368 			 btf->hdr.layout_off, btf->hdr.layout_len);
369 		return -EINVAL;
370 	}
371 	if ((long long)btf->hdr.layout_off + btf->hdr.layout_len > btf->hdr.str_off ||
372 	    btf->hdr.layout_off > btf->hdr.str_off) {
373 		pr_debug("Invalid BTF data sections layout: layout data at %u + %u, strings data at %u\n",
374 			 btf->hdr.layout_off, btf->hdr.layout_len, btf->hdr.str_off);
375 		return -EINVAL;
376 	}
377 	return 0;
378 }
379 
380 static int btf_parse_str_sec(struct btf *btf)
381 {
382 	const char *start = btf->strs_data;
383 	const char *end = start + btf->hdr.str_len;
384 
385 	if (btf->base_btf && btf->hdr.str_len == 0)
386 		return 0;
387 	if (!btf->hdr.str_len || btf->hdr.str_len - 1 > BTF_MAX_STR_OFFSET || end[-1]) {
388 		pr_debug("Invalid BTF string section\n");
389 		return -EINVAL;
390 	}
391 	if (!btf->base_btf && start[0]) {
392 		pr_debug("Malformed BTF string section, did you forget to provide base BTF?\n");
393 		return -EINVAL;
394 	}
395 	return 0;
396 }
397 
398 static int btf_parse_layout_sec(struct btf *btf)
399 {
400 	if (!btf->hdr.layout_len)
401 		return 0;
402 
403 	if (btf->hdr.layout_len % sizeof(struct btf_layout) != 0) {
404 		pr_debug("Invalid BTF kind layout section\n");
405 		return -EINVAL;
406 	}
407 	btf->layout = btf->raw_data + btf->hdr.hdr_len + btf->hdr.layout_off;
408 
409 	if (btf->swapped_endian) {
410 		struct btf_layout *l, *end = btf->layout + btf->hdr.layout_len;
411 
412 		for (l = btf->layout; l < end; l++)
413 			l->flags = bswap_16(l->flags);
414 	}
415 
416 	return 0;
417 }
418 
419 /* for unknown kinds, consult kind layout. */
420 static int btf_type_size_unknown(const struct btf *btf, const struct btf_type *t)
421 {
422 	__u32 l_cnt = btf->hdr.layout_len / sizeof(struct btf_layout);
423 	struct btf_layout *l = btf->layout;
424 	__u16 vlen = btf_vlen(t);
425 	__u32 kind = btf_kind(t);
426 
427 	/* Fall back to base BTF if needed as they share layout information */
428 	if (!l) {
429 		struct btf *base_btf = btf->base_btf;
430 
431 		if (base_btf) {
432 			l = base_btf->layout;
433 			l_cnt = base_btf->hdr.layout_len / sizeof(struct btf_layout);
434 		}
435 	}
436 	if (!l || kind >= l_cnt) {
437 		pr_debug("Unsupported BTF_KIND: %u\n", btf_kind(t));
438 		return -EINVAL;
439 	}
440 	if (l[kind].info_sz % 4) {
441 		pr_debug("Unsupported info_sz %u for kind %u\n",
442 			  l[kind].info_sz, kind);
443 		return -EINVAL;
444 	}
445 	if (l[kind].elem_sz % 4) {
446 		pr_debug("Unsupported elem_sz %u for kind %u\n",
447 			 l[kind].elem_sz, kind);
448 		return -EINVAL;
449 	}
450 
451 	return sizeof(struct btf_type) + l[kind].info_sz + vlen * l[kind].elem_sz;
452 }
453 
454 static int btf_type_size(const struct btf *btf, const struct btf_type *t)
455 {
456 	const int base_size = sizeof(struct btf_type);
457 	__u16 vlen = btf_vlen(t);
458 
459 	switch (btf_kind(t)) {
460 	case BTF_KIND_FWD:
461 	case BTF_KIND_CONST:
462 	case BTF_KIND_VOLATILE:
463 	case BTF_KIND_RESTRICT:
464 	case BTF_KIND_PTR:
465 	case BTF_KIND_TYPEDEF:
466 	case BTF_KIND_FUNC:
467 	case BTF_KIND_FLOAT:
468 	case BTF_KIND_TYPE_TAG:
469 		return base_size;
470 	case BTF_KIND_INT:
471 		return base_size + sizeof(__u32);
472 	case BTF_KIND_ENUM:
473 		return base_size + vlen * sizeof(struct btf_enum);
474 	case BTF_KIND_ENUM64:
475 		return base_size + vlen * sizeof(struct btf_enum64);
476 	case BTF_KIND_ARRAY:
477 		return base_size + sizeof(struct btf_array);
478 	case BTF_KIND_STRUCT:
479 	case BTF_KIND_UNION:
480 		return base_size + vlen * sizeof(struct btf_member);
481 	case BTF_KIND_FUNC_PROTO:
482 		return base_size + vlen * sizeof(struct btf_param);
483 	case BTF_KIND_VAR:
484 		return base_size + sizeof(struct btf_var);
485 	case BTF_KIND_DATASEC:
486 		return base_size + vlen * sizeof(struct btf_var_secinfo);
487 	case BTF_KIND_DECL_TAG:
488 		return base_size + sizeof(struct btf_decl_tag);
489 	default:
490 		return btf_type_size_unknown(btf, t);
491 	}
492 }
493 
494 static void btf_bswap_type_base(struct btf_type *t)
495 {
496 	t->name_off = bswap_32(t->name_off);
497 	t->info = bswap_32(t->info);
498 	t->type = bswap_32(t->type);
499 }
500 
501 static int btf_bswap_type_rest(struct btf_type *t)
502 {
503 	struct btf_var_secinfo *v;
504 	struct btf_enum64 *e64;
505 	struct btf_member *m;
506 	struct btf_array *a;
507 	struct btf_param *p;
508 	struct btf_enum *e;
509 	__u16 vlen = btf_vlen(t);
510 	int i;
511 
512 	switch (btf_kind(t)) {
513 	case BTF_KIND_FWD:
514 	case BTF_KIND_CONST:
515 	case BTF_KIND_VOLATILE:
516 	case BTF_KIND_RESTRICT:
517 	case BTF_KIND_PTR:
518 	case BTF_KIND_TYPEDEF:
519 	case BTF_KIND_FUNC:
520 	case BTF_KIND_FLOAT:
521 	case BTF_KIND_TYPE_TAG:
522 		return 0;
523 	case BTF_KIND_INT:
524 		*(__u32 *)(t + 1) = bswap_32(*(__u32 *)(t + 1));
525 		return 0;
526 	case BTF_KIND_ENUM:
527 		for (i = 0, e = btf_enum(t); i < vlen; i++, e++) {
528 			e->name_off = bswap_32(e->name_off);
529 			e->val = bswap_32(e->val);
530 		}
531 		return 0;
532 	case BTF_KIND_ENUM64:
533 		for (i = 0, e64 = btf_enum64(t); i < vlen; i++, e64++) {
534 			e64->name_off = bswap_32(e64->name_off);
535 			e64->val_lo32 = bswap_32(e64->val_lo32);
536 			e64->val_hi32 = bswap_32(e64->val_hi32);
537 		}
538 		return 0;
539 	case BTF_KIND_ARRAY:
540 		a = btf_array(t);
541 		a->type = bswap_32(a->type);
542 		a->index_type = bswap_32(a->index_type);
543 		a->nelems = bswap_32(a->nelems);
544 		return 0;
545 	case BTF_KIND_STRUCT:
546 	case BTF_KIND_UNION:
547 		for (i = 0, m = btf_members(t); i < vlen; i++, m++) {
548 			m->name_off = bswap_32(m->name_off);
549 			m->type = bswap_32(m->type);
550 			m->offset = bswap_32(m->offset);
551 		}
552 		return 0;
553 	case BTF_KIND_FUNC_PROTO:
554 		for (i = 0, p = btf_params(t); i < vlen; i++, p++) {
555 			p->name_off = bswap_32(p->name_off);
556 			p->type = bswap_32(p->type);
557 		}
558 		return 0;
559 	case BTF_KIND_VAR:
560 		btf_var(t)->linkage = bswap_32(btf_var(t)->linkage);
561 		return 0;
562 	case BTF_KIND_DATASEC:
563 		for (i = 0, v = btf_var_secinfos(t); i < vlen; i++, v++) {
564 			v->type = bswap_32(v->type);
565 			v->offset = bswap_32(v->offset);
566 			v->size = bswap_32(v->size);
567 		}
568 		return 0;
569 	case BTF_KIND_DECL_TAG:
570 		btf_decl_tag(t)->component_idx = bswap_32(btf_decl_tag(t)->component_idx);
571 		return 0;
572 	default:
573 		pr_debug("Unsupported BTF_KIND:%u\n", btf_kind(t));
574 		return -EINVAL;
575 	}
576 }
577 
578 static int btf_parse_type_sec(struct btf *btf)
579 {
580 	void *next_type = btf->types_data;
581 	void *end_type = next_type + btf->hdr.type_len;
582 	int err, type_size;
583 
584 	while (next_type + sizeof(struct btf_type) <= end_type) {
585 		if (btf->swapped_endian)
586 			btf_bswap_type_base(next_type);
587 
588 		type_size = btf_type_size(btf, next_type);
589 		if (type_size < 0)
590 			return type_size;
591 		if (next_type + type_size > end_type) {
592 			pr_warn("BTF type [%d] is malformed\n", btf->start_id + btf->nr_types);
593 			return -EINVAL;
594 		}
595 
596 		if (btf->swapped_endian && btf_bswap_type_rest(next_type))
597 			return -EINVAL;
598 
599 		err = btf_add_type_idx_entry(btf, next_type - btf->types_data);
600 		if (err)
601 			return err;
602 
603 		next_type += type_size;
604 		btf->nr_types++;
605 	}
606 
607 	if (next_type != end_type) {
608 		pr_warn("BTF types data is malformed\n");
609 		return -EINVAL;
610 	}
611 
612 	return 0;
613 }
614 
615 static int btf_validate_str(const struct btf *btf, __u32 str_off, const char *what, __u32 type_id)
616 {
617 	const char *s;
618 
619 	s = btf__str_by_offset(btf, str_off);
620 	if (!s) {
621 		pr_warn("btf: type [%u]: invalid %s (string offset %u)\n", type_id, what, str_off);
622 		return -EINVAL;
623 	}
624 
625 	return 0;
626 }
627 
628 static int btf_validate_id(const struct btf *btf, __u32 id, __u32 ctx_id)
629 {
630 	const struct btf_type *t;
631 
632 	t = btf__type_by_id(btf, id);
633 	if (!t) {
634 		pr_warn("btf: type [%u]: invalid referenced type ID %u\n", ctx_id, id);
635 		return -EINVAL;
636 	}
637 
638 	return 0;
639 }
640 
641 static int btf_validate_type(const struct btf *btf, const struct btf_type *t, __u32 id)
642 {
643 	__u32 kind = btf_kind(t);
644 	int err, i, n;
645 
646 	err = btf_validate_str(btf, t->name_off, "type name", id);
647 	if (err)
648 		return err;
649 
650 	switch (kind) {
651 	case BTF_KIND_UNKN:
652 	case BTF_KIND_INT:
653 	case BTF_KIND_FWD:
654 	case BTF_KIND_FLOAT:
655 		break;
656 	case BTF_KIND_PTR:
657 	case BTF_KIND_TYPEDEF:
658 	case BTF_KIND_VOLATILE:
659 	case BTF_KIND_CONST:
660 	case BTF_KIND_RESTRICT:
661 	case BTF_KIND_VAR:
662 	case BTF_KIND_DECL_TAG:
663 	case BTF_KIND_TYPE_TAG:
664 		err = btf_validate_id(btf, t->type, id);
665 		if (err)
666 			return err;
667 		break;
668 	case BTF_KIND_ARRAY: {
669 		const struct btf_array *a = btf_array(t);
670 
671 		err = btf_validate_id(btf, a->type, id);
672 		err = err ?: btf_validate_id(btf, a->index_type, id);
673 		if (err)
674 			return err;
675 		break;
676 	}
677 	case BTF_KIND_STRUCT:
678 	case BTF_KIND_UNION: {
679 		const struct btf_member *m = btf_members(t);
680 
681 		n = btf_vlen(t);
682 		for (i = 0; i < n; i++, m++) {
683 			err = btf_validate_str(btf, m->name_off, "field name", id);
684 			err = err ?: btf_validate_id(btf, m->type, id);
685 			if (err)
686 				return err;
687 		}
688 		break;
689 	}
690 	case BTF_KIND_ENUM: {
691 		const struct btf_enum *m = btf_enum(t);
692 
693 		n = btf_vlen(t);
694 		for (i = 0; i < n; i++, m++) {
695 			err = btf_validate_str(btf, m->name_off, "enum name", id);
696 			if (err)
697 				return err;
698 		}
699 		break;
700 	}
701 	case BTF_KIND_ENUM64: {
702 		const struct btf_enum64 *m = btf_enum64(t);
703 
704 		n = btf_vlen(t);
705 		for (i = 0; i < n; i++, m++) {
706 			err = btf_validate_str(btf, m->name_off, "enum name", id);
707 			if (err)
708 				return err;
709 		}
710 		break;
711 	}
712 	case BTF_KIND_FUNC: {
713 		const struct btf_type *ft;
714 
715 		err = btf_validate_id(btf, t->type, id);
716 		if (err)
717 			return err;
718 		ft = btf__type_by_id(btf, t->type);
719 		if (btf_kind(ft) != BTF_KIND_FUNC_PROTO) {
720 			pr_warn("btf: type [%u]: referenced type [%u] is not FUNC_PROTO\n", id, t->type);
721 			return -EINVAL;
722 		}
723 		break;
724 	}
725 	case BTF_KIND_FUNC_PROTO: {
726 		const struct btf_param *m = btf_params(t);
727 
728 		n = btf_vlen(t);
729 		for (i = 0; i < n; i++, m++) {
730 			err = btf_validate_str(btf, m->name_off, "param name", id);
731 			err = err ?: btf_validate_id(btf, m->type, id);
732 			if (err)
733 				return err;
734 		}
735 		break;
736 	}
737 	case BTF_KIND_DATASEC: {
738 		const struct btf_var_secinfo *m = btf_var_secinfos(t);
739 
740 		n = btf_vlen(t);
741 		for (i = 0; i < n; i++, m++) {
742 			err = btf_validate_id(btf, m->type, id);
743 			if (err)
744 				return err;
745 		}
746 		break;
747 	}
748 	default:
749 		/* Kind may be represented in kind layout information. */
750 		if (btf_type_size_unknown(btf, t) < 0) {
751 			pr_warn("btf: type [%u]: unrecognized kind %u\n", id, kind);
752 			return -EINVAL;
753 		}
754 		break;
755 	}
756 	return 0;
757 }
758 
759 /* Validate basic sanity of BTF. It's intentionally less thorough than
760  * kernel's validation and validates only properties of BTF that libbpf relies
761  * on to be correct (e.g., valid type IDs, valid string offsets, etc)
762  */
763 static int btf_sanity_check(const struct btf *btf)
764 {
765 	const struct btf_type *t;
766 	__u32 i, n = btf__type_cnt(btf);
767 	int err;
768 
769 	for (i = btf->start_id; i < n; i++) {
770 		t = btf_type_by_id(btf, i);
771 		err = btf_validate_type(btf, t, i);
772 		if (err)
773 			return err;
774 	}
775 	return 0;
776 }
777 
778 __u32 btf__type_cnt(const struct btf *btf)
779 {
780 	return btf->start_id + btf->nr_types;
781 }
782 
783 const struct btf *btf__base_btf(const struct btf *btf)
784 {
785 	return btf->base_btf;
786 }
787 
788 /* internal helper returning non-const pointer to a type */
789 struct btf_type *btf_type_by_id(const struct btf *btf, __u32 type_id)
790 {
791 	if (type_id == 0)
792 		return &btf_void;
793 	if (type_id < btf->start_id)
794 		return btf_type_by_id(btf->base_btf, type_id);
795 	return btf->types_data + btf->type_offs[type_id - btf->start_id];
796 }
797 
798 const struct btf_type *btf__type_by_id(const struct btf *btf, __u32 type_id)
799 {
800 	if (type_id >= btf->start_id + btf->nr_types)
801 		return errno = EINVAL, NULL;
802 	return btf_type_by_id((struct btf *)btf, type_id);
803 }
804 
805 static int determine_ptr_size(const struct btf *btf)
806 {
807 	static const char * const long_aliases[] = {
808 		"long",
809 		"long int",
810 		"int long",
811 		"unsigned long",
812 		"long unsigned",
813 		"unsigned long int",
814 		"unsigned int long",
815 		"long unsigned int",
816 		"long int unsigned",
817 		"int unsigned long",
818 		"int long unsigned",
819 	};
820 	const struct btf_type *t;
821 	const char *name;
822 	int i, j, n;
823 
824 	if (btf->base_btf && btf->base_btf->ptr_sz > 0)
825 		return btf->base_btf->ptr_sz;
826 
827 	n = btf__type_cnt(btf);
828 	for (i = 1; i < n; i++) {
829 		t = btf__type_by_id(btf, i);
830 		if (!btf_is_int(t))
831 			continue;
832 
833 		if (t->size != 4 && t->size != 8)
834 			continue;
835 
836 		name = btf__name_by_offset(btf, t->name_off);
837 		if (!name)
838 			continue;
839 
840 		for (j = 0; j < ARRAY_SIZE(long_aliases); j++) {
841 			if (strcmp(name, long_aliases[j]) == 0)
842 				return t->size;
843 		}
844 	}
845 
846 	return -1;
847 }
848 
849 static size_t btf_ptr_sz(const struct btf *btf)
850 {
851 	if (!btf->ptr_sz)
852 		((struct btf *)btf)->ptr_sz = determine_ptr_size(btf);
853 	return btf->ptr_sz < 0 ? sizeof(void *) : btf->ptr_sz;
854 }
855 
856 /* Return pointer size this BTF instance assumes. The size is heuristically
857  * determined by looking for 'long' or 'unsigned long' integer type and
858  * recording its size in bytes. If BTF type information doesn't have any such
859  * type, this function returns 0. In the latter case, native architecture's
860  * pointer size is assumed, so will be either 4 or 8, depending on
861  * architecture that libbpf was compiled for. It's possible to override
862  * guessed value by using btf__set_pointer_size() API.
863  */
864 size_t btf__pointer_size(const struct btf *btf)
865 {
866 	if (!btf->ptr_sz)
867 		((struct btf *)btf)->ptr_sz = determine_ptr_size(btf);
868 
869 	if (btf->ptr_sz < 0)
870 		/* not enough BTF type info to guess */
871 		return 0;
872 
873 	return btf->ptr_sz;
874 }
875 
876 /* Override or set pointer size in bytes. Only values of 4 and 8 are
877  * supported.
878  */
879 int btf__set_pointer_size(struct btf *btf, size_t ptr_sz)
880 {
881 	if (ptr_sz != 4 && ptr_sz != 8)
882 		return libbpf_err(-EINVAL);
883 	btf->ptr_sz = ptr_sz;
884 	return 0;
885 }
886 
887 static bool is_host_big_endian(void)
888 {
889 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
890 	return false;
891 #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
892 	return true;
893 #else
894 # error "Unrecognized __BYTE_ORDER__"
895 #endif
896 }
897 
898 enum btf_endianness btf__endianness(const struct btf *btf)
899 {
900 	if (is_host_big_endian())
901 		return btf->swapped_endian ? BTF_LITTLE_ENDIAN : BTF_BIG_ENDIAN;
902 	else
903 		return btf->swapped_endian ? BTF_BIG_ENDIAN : BTF_LITTLE_ENDIAN;
904 }
905 
906 int btf__set_endianness(struct btf *btf, enum btf_endianness endian)
907 {
908 	if (endian != BTF_LITTLE_ENDIAN && endian != BTF_BIG_ENDIAN)
909 		return libbpf_err(-EINVAL);
910 
911 	btf->swapped_endian = is_host_big_endian() != (endian == BTF_BIG_ENDIAN);
912 	if (!btf->swapped_endian) {
913 		free(btf->raw_data_swapped);
914 		btf->raw_data_swapped = NULL;
915 	}
916 	return 0;
917 }
918 
919 static bool btf_type_is_void(const struct btf_type *t)
920 {
921 	return t == &btf_void || btf_is_fwd(t);
922 }
923 
924 static bool btf_type_is_void_or_null(const struct btf_type *t)
925 {
926 	return !t || btf_type_is_void(t);
927 }
928 
929 #define MAX_RESOLVE_DEPTH 32
930 
931 __s64 btf__resolve_size(const struct btf *btf, __u32 type_id)
932 {
933 	const struct btf_array *array;
934 	const struct btf_type *t;
935 	__u32 nelems = 1;
936 	__s64 size = -1;
937 	int i;
938 
939 	t = btf__type_by_id(btf, type_id);
940 	for (i = 0; i < MAX_RESOLVE_DEPTH && !btf_type_is_void_or_null(t); i++) {
941 		switch (btf_kind(t)) {
942 		case BTF_KIND_INT:
943 		case BTF_KIND_STRUCT:
944 		case BTF_KIND_UNION:
945 		case BTF_KIND_ENUM:
946 		case BTF_KIND_ENUM64:
947 		case BTF_KIND_DATASEC:
948 		case BTF_KIND_FLOAT:
949 			size = t->size;
950 			goto done;
951 		case BTF_KIND_PTR:
952 			size = btf_ptr_sz(btf);
953 			goto done;
954 		case BTF_KIND_TYPEDEF:
955 		case BTF_KIND_VOLATILE:
956 		case BTF_KIND_CONST:
957 		case BTF_KIND_RESTRICT:
958 		case BTF_KIND_VAR:
959 		case BTF_KIND_DECL_TAG:
960 		case BTF_KIND_TYPE_TAG:
961 			type_id = t->type;
962 			break;
963 		case BTF_KIND_ARRAY:
964 			array = btf_array(t);
965 			if (nelems && array->nelems > UINT32_MAX / nelems)
966 				return libbpf_err(-E2BIG);
967 			nelems *= array->nelems;
968 			type_id = array->type;
969 			break;
970 		default:
971 			return libbpf_err(-EINVAL);
972 		}
973 
974 		t = btf__type_by_id(btf, type_id);
975 	}
976 
977 done:
978 	if (size < 0)
979 		return libbpf_err(-EINVAL);
980 	if (nelems && size > UINT32_MAX / nelems)
981 		return libbpf_err(-E2BIG);
982 
983 	return nelems * size;
984 }
985 
986 int btf__align_of(const struct btf *btf, __u32 id)
987 {
988 	const struct btf_type *t = btf__type_by_id(btf, id);
989 	__u16 kind = btf_kind(t);
990 
991 	switch (kind) {
992 	case BTF_KIND_INT:
993 	case BTF_KIND_ENUM:
994 	case BTF_KIND_ENUM64:
995 	case BTF_KIND_FLOAT:
996 		return min(btf_ptr_sz(btf), (size_t)t->size);
997 	case BTF_KIND_PTR:
998 		return btf_ptr_sz(btf);
999 	case BTF_KIND_TYPEDEF:
1000 	case BTF_KIND_VOLATILE:
1001 	case BTF_KIND_CONST:
1002 	case BTF_KIND_RESTRICT:
1003 	case BTF_KIND_TYPE_TAG:
1004 		return btf__align_of(btf, t->type);
1005 	case BTF_KIND_ARRAY:
1006 		return btf__align_of(btf, btf_array(t)->type);
1007 	case BTF_KIND_STRUCT:
1008 	case BTF_KIND_UNION: {
1009 		const struct btf_member *m = btf_members(t);
1010 		__u16 vlen = btf_vlen(t);
1011 		int i, max_align = 1, align;
1012 
1013 		for (i = 0; i < vlen; i++, m++) {
1014 			align = btf__align_of(btf, m->type);
1015 			if (align <= 0)
1016 				return libbpf_err(align);
1017 			max_align = max(max_align, align);
1018 
1019 			/* if field offset isn't aligned according to field
1020 			 * type's alignment, then struct must be packed
1021 			 */
1022 			if (btf_member_bitfield_size(t, i) == 0 &&
1023 			    (m->offset % (8 * align)) != 0)
1024 				return 1;
1025 		}
1026 
1027 		/* if struct/union size isn't a multiple of its alignment,
1028 		 * then struct must be packed
1029 		 */
1030 		if ((t->size % max_align) != 0)
1031 			return 1;
1032 
1033 		return max_align;
1034 	}
1035 	default:
1036 		pr_warn("unsupported BTF_KIND:%u\n", btf_kind(t));
1037 		return errno = EINVAL, 0;
1038 	}
1039 }
1040 
1041 int btf__resolve_type(const struct btf *btf, __u32 type_id)
1042 {
1043 	const struct btf_type *t;
1044 	int depth = 0;
1045 
1046 	t = btf__type_by_id(btf, type_id);
1047 	while (depth < MAX_RESOLVE_DEPTH &&
1048 	       !btf_type_is_void_or_null(t) &&
1049 	       (btf_is_mod(t) || btf_is_typedef(t) || btf_is_var(t))) {
1050 		type_id = t->type;
1051 		t = btf__type_by_id(btf, type_id);
1052 		depth++;
1053 	}
1054 
1055 	if (depth == MAX_RESOLVE_DEPTH || btf_type_is_void_or_null(t))
1056 		return libbpf_err(-EINVAL);
1057 
1058 	return type_id;
1059 }
1060 
1061 static void btf_check_sorted(struct btf *btf)
1062 {
1063 	__u32 i, n, named_start_id = 0;
1064 
1065 	n = btf__type_cnt(btf);
1066 	for (i = btf->start_id + 1; i < n; i++) {
1067 		struct btf_type *ta = btf_type_by_id(btf, i - 1);
1068 		struct btf_type *tb = btf_type_by_id(btf, i);
1069 		const char *na = btf__str_by_offset(btf, ta->name_off);
1070 		const char *nb = btf__str_by_offset(btf, tb->name_off);
1071 
1072 		if (strcmp(na, nb) > 0)
1073 			return;
1074 
1075 		if (named_start_id == 0 && na[0] != '\0')
1076 			named_start_id = i - 1;
1077 		if (named_start_id == 0 && nb[0] != '\0')
1078 			named_start_id = i;
1079 	}
1080 
1081 	if (named_start_id)
1082 		btf->named_start_id = named_start_id;
1083 }
1084 
1085 static __s32 btf_find_type_by_name_bsearch(const struct btf *btf, const char *name,
1086 					   __s32 start_id)
1087 {
1088 	const struct btf_type *t;
1089 	const char *tname;
1090 	__s32 l, r, m;
1091 
1092 	l = start_id;
1093 	r = btf__type_cnt(btf) - 1;
1094 	while (l <= r) {
1095 		m = l + (r - l) / 2;
1096 		t = btf_type_by_id(btf, m);
1097 		tname = btf__str_by_offset(btf, t->name_off);
1098 		if (strcmp(tname, name) >= 0) {
1099 			if (l == r)
1100 				return r;
1101 			r = m;
1102 		} else {
1103 			l = m + 1;
1104 		}
1105 	}
1106 
1107 	return btf__type_cnt(btf);
1108 }
1109 
1110 static __s32 btf_find_by_name_kind(const struct btf *btf, int start_id,
1111 				   const char *type_name, __s32 kind)
1112 {
1113 	__u32 nr_types = btf__type_cnt(btf);
1114 	const struct btf_type *t;
1115 	const char *tname;
1116 	__s32 id;
1117 
1118 	if (start_id < btf->start_id) {
1119 		id = btf_find_by_name_kind(btf->base_btf, start_id,
1120 					   type_name, kind);
1121 		if (id >= 0)
1122 			return id;
1123 		start_id = btf->start_id;
1124 	}
1125 
1126 	if (kind == BTF_KIND_UNKN || strcmp(type_name, "void") == 0)
1127 		return 0;
1128 
1129 	if (btf->named_start_id > 0 && type_name[0]) {
1130 		start_id = max(start_id, btf->named_start_id);
1131 		id = btf_find_type_by_name_bsearch(btf, type_name, start_id);
1132 		for (; id < nr_types; id++) {
1133 			t = btf__type_by_id(btf, id);
1134 			tname = btf__str_by_offset(btf, t->name_off);
1135 			if (strcmp(tname, type_name) != 0)
1136 				return libbpf_err(-ENOENT);
1137 			if (kind < 0 || btf_kind(t) == kind)
1138 				return id;
1139 		}
1140 	} else {
1141 		for (id = start_id; id < nr_types; id++) {
1142 			t = btf_type_by_id(btf, id);
1143 			if (kind > 0 && btf_kind(t) != kind)
1144 				continue;
1145 			tname = btf__str_by_offset(btf, t->name_off);
1146 			if (strcmp(tname, type_name) == 0)
1147 				return id;
1148 		}
1149 	}
1150 
1151 	return libbpf_err(-ENOENT);
1152 }
1153 
1154 /* the kind value of -1 indicates that kind matching should be skipped */
1155 __s32 btf__find_by_name(const struct btf *btf, const char *type_name)
1156 {
1157 	return btf_find_by_name_kind(btf, 1, type_name, -1);
1158 }
1159 
1160 __s32 btf__find_by_name_kind_own(const struct btf *btf, const char *type_name,
1161 				 __u32 kind)
1162 {
1163 	return btf_find_by_name_kind(btf, btf->start_id, type_name, kind);
1164 }
1165 
1166 __s32 btf__find_by_name_kind(const struct btf *btf, const char *type_name,
1167 			     __u32 kind)
1168 {
1169 	return btf_find_by_name_kind(btf, 1, type_name, kind);
1170 }
1171 
1172 static bool btf_is_modifiable(const struct btf *btf)
1173 {
1174 	/* BTF is modifiable if split into multiple sections */
1175 	return btf->modifiable;
1176 }
1177 
1178 static void btf_free_raw_data(struct btf *btf)
1179 {
1180 	if (btf->raw_data_is_mmap) {
1181 		munmap(btf->raw_data, btf->raw_size);
1182 		btf->raw_data_is_mmap = false;
1183 	} else {
1184 		free(btf->raw_data);
1185 	}
1186 	btf->raw_data = NULL;
1187 }
1188 
1189 void btf__free(struct btf *btf)
1190 {
1191 	if (IS_ERR_OR_NULL(btf))
1192 		return;
1193 
1194 	if (btf->fd >= 0)
1195 		close(btf->fd);
1196 
1197 	if (btf_is_modifiable(btf)) {
1198 		/* if BTF was modified after loading, it will have a split
1199 		 * in-memory representation for types, strings and layout
1200 		 * sections, so we need to free all of them individually. It
1201 		 * might still have a cached contiguous raw data present,
1202 		 * which will be unconditionally freed below.
1203 		 */
1204 		free(btf->types_data);
1205 		strset__free(btf->strs_set);
1206 		free(btf->layout);
1207 	}
1208 	btf_free_raw_data(btf);
1209 	free(btf->raw_data_swapped);
1210 	free(btf->type_offs);
1211 	if (btf->owns_base)
1212 		btf__free(btf->base_btf);
1213 	free(btf);
1214 }
1215 
1216 static struct btf *btf_new_empty(struct btf_new_opts *opts)
1217 {
1218 	bool add_layout = OPTS_GET(opts, add_layout, false);
1219 	struct btf *base_btf = OPTS_GET(opts, base_btf, NULL);
1220 	struct btf_header *hdr;
1221 	struct btf *btf;
1222 
1223 	btf = calloc(1, sizeof(*btf));
1224 	if (!btf)
1225 		return ERR_PTR(-ENOMEM);
1226 
1227 	btf->nr_types = 0;
1228 	btf->start_id = 1;
1229 	btf->start_str_off = 0;
1230 	btf->fd = -1;
1231 	btf->ptr_sz = sizeof(void *);
1232 	btf->swapped_endian = false;
1233 	btf->named_start_id = 0;
1234 
1235 	if (base_btf) {
1236 		btf->base_btf = base_btf;
1237 		btf->start_id = btf__type_cnt(base_btf);
1238 		btf->start_str_off = base_btf->hdr.str_len + base_btf->start_str_off;
1239 		btf->swapped_endian = base_btf->swapped_endian;
1240 	}
1241 
1242 	/* +1 for empty string at offset 0 */
1243 	btf->raw_size = sizeof(struct btf_header) + (base_btf ? 0 : 1);
1244 	if (add_layout)
1245 		btf->raw_size += sizeof(layouts);
1246 	btf->raw_data = calloc(1, btf->raw_size);
1247 	if (!btf->raw_data) {
1248 		free(btf);
1249 		return ERR_PTR(-ENOMEM);
1250 	}
1251 
1252 	hdr = btf->raw_data;
1253 	hdr->hdr_len = sizeof(struct btf_header);
1254 	hdr->magic = BTF_MAGIC;
1255 	hdr->version = BTF_VERSION;
1256 
1257 	btf->types_data = btf->raw_data + hdr->hdr_len;
1258 	btf->strs_data = btf->raw_data + hdr->hdr_len;
1259 	hdr->str_len = base_btf ? 0 : 1; /* empty string at offset 0 */
1260 
1261 	if (add_layout) {
1262 		hdr->layout_len = sizeof(layouts);
1263 		btf->layout = layouts;
1264 		/*
1265 		 * No need to swap endianness here as btf_get_raw_data()
1266 		 * will do this for us if btf->swapped_endian is true.
1267 		 */
1268 		memcpy(btf->raw_data + hdr->hdr_len, layouts, sizeof(layouts));
1269 		btf->strs_data += sizeof(layouts);
1270 		hdr->str_off += sizeof(layouts);
1271 	}
1272 
1273 	memcpy(&btf->hdr, hdr, sizeof(*hdr));
1274 
1275 	return btf;
1276 }
1277 
1278 struct btf *btf__new_empty(void)
1279 {
1280 	return libbpf_ptr(btf_new_empty(NULL));
1281 }
1282 
1283 struct btf *btf__new_empty_split(struct btf *base_btf)
1284 {
1285 	LIBBPF_OPTS(btf_new_opts, opts);
1286 
1287 	opts.base_btf = base_btf;
1288 
1289 	return libbpf_ptr(btf_new_empty(&opts));
1290 }
1291 
1292 struct btf *btf__new_empty_opts(struct btf_new_opts *opts)
1293 {
1294 	if (!OPTS_VALID(opts, btf_new_opts))
1295 		return libbpf_err_ptr(-EINVAL);
1296 
1297 	return libbpf_ptr(btf_new_empty(opts));
1298 }
1299 
1300 static struct btf *btf_new(const void *data, __u32 size, struct btf *base_btf, bool is_mmap)
1301 {
1302 	struct btf *btf;
1303 	int err;
1304 
1305 	btf = calloc(1, sizeof(struct btf));
1306 	if (!btf)
1307 		return ERR_PTR(-ENOMEM);
1308 
1309 	btf->nr_types = 0;
1310 	btf->start_id = 1;
1311 	btf->start_str_off = 0;
1312 	btf->fd = -1;
1313 	btf->named_start_id = 0;
1314 
1315 	if (base_btf) {
1316 		btf->base_btf = base_btf;
1317 		btf->start_id = btf__type_cnt(base_btf);
1318 		btf->start_str_off = base_btf->hdr.str_len + base_btf->start_str_off;
1319 	}
1320 
1321 	if (is_mmap) {
1322 		btf->raw_data = (void *)data;
1323 		btf->raw_data_is_mmap = true;
1324 	} else {
1325 		btf->raw_data = malloc(size);
1326 		if (!btf->raw_data) {
1327 			err = -ENOMEM;
1328 			goto done;
1329 		}
1330 		memcpy(btf->raw_data, data, size);
1331 	}
1332 
1333 	btf->raw_size = size;
1334 
1335 	err = btf_parse_hdr(btf);
1336 	if (err)
1337 		goto done;
1338 
1339 	btf->strs_data = btf->raw_data + btf->hdr.hdr_len + btf->hdr.str_off;
1340 	btf->types_data = btf->raw_data + btf->hdr.hdr_len + btf->hdr.type_off;
1341 
1342 	err = btf_parse_str_sec(btf);
1343 	err = err ?: btf_parse_layout_sec(btf);
1344 	err = err ?: btf_parse_type_sec(btf);
1345 	err = err ?: btf_sanity_check(btf);
1346 	if (err)
1347 		goto done;
1348 	btf_check_sorted(btf);
1349 
1350 done:
1351 	if (err) {
1352 		btf__free(btf);
1353 		return ERR_PTR(err);
1354 	}
1355 
1356 	return btf;
1357 }
1358 
1359 struct btf *btf__new(const void *data, __u32 size)
1360 {
1361 	return libbpf_ptr(btf_new(data, size, NULL, false));
1362 }
1363 
1364 struct btf *btf__new_split(const void *data, __u32 size, struct btf *base_btf)
1365 {
1366 	return libbpf_ptr(btf_new(data, size, base_btf, false));
1367 }
1368 
1369 struct btf_elf_secs {
1370 	Elf_Data *btf_data;
1371 	Elf_Data *btf_ext_data;
1372 	Elf_Data *btf_base_data;
1373 };
1374 
1375 static int btf_find_elf_sections(Elf *elf, const char *path, struct btf_elf_secs *secs)
1376 {
1377 	Elf_Scn *scn = NULL;
1378 	Elf_Data *data;
1379 	GElf_Ehdr ehdr;
1380 	size_t shstrndx;
1381 	int idx = 0;
1382 
1383 	if (!gelf_getehdr(elf, &ehdr)) {
1384 		pr_warn("failed to get EHDR from %s\n", path);
1385 		goto err;
1386 	}
1387 
1388 	if (elf_getshdrstrndx(elf, &shstrndx)) {
1389 		pr_warn("failed to get section names section index for %s\n",
1390 			path);
1391 		goto err;
1392 	}
1393 
1394 	if (!elf_rawdata(elf_getscn(elf, shstrndx), NULL)) {
1395 		pr_warn("failed to get e_shstrndx from %s\n", path);
1396 		goto err;
1397 	}
1398 
1399 	while ((scn = elf_nextscn(elf, scn)) != NULL) {
1400 		Elf_Data **field;
1401 		GElf_Shdr sh;
1402 		char *name;
1403 
1404 		idx++;
1405 		if (gelf_getshdr(scn, &sh) != &sh) {
1406 			pr_warn("failed to get section(%d) header from %s\n",
1407 				idx, path);
1408 			goto err;
1409 		}
1410 		name = elf_strptr(elf, shstrndx, sh.sh_name);
1411 		if (!name) {
1412 			pr_warn("failed to get section(%d) name from %s\n",
1413 				idx, path);
1414 			goto err;
1415 		}
1416 
1417 		if (strcmp(name, BTF_ELF_SEC) == 0)
1418 			field = &secs->btf_data;
1419 		else if (strcmp(name, BTF_EXT_ELF_SEC) == 0)
1420 			field = &secs->btf_ext_data;
1421 		else if (strcmp(name, BTF_BASE_ELF_SEC) == 0)
1422 			field = &secs->btf_base_data;
1423 		else
1424 			continue;
1425 
1426 		if (sh.sh_type != SHT_PROGBITS) {
1427 			pr_warn("unexpected section type (%d) of section(%d, %s) from %s\n",
1428 				sh.sh_type, idx, name, path);
1429 			goto err;
1430 		}
1431 
1432 		data = elf_getdata(scn, 0);
1433 		if (!data) {
1434 			pr_warn("failed to get section(%d, %s) data from %s\n",
1435 				idx, name, path);
1436 			goto err;
1437 		}
1438 		*field = data;
1439 	}
1440 
1441 	return 0;
1442 
1443 err:
1444 	return -LIBBPF_ERRNO__FORMAT;
1445 }
1446 
1447 static struct btf *btf_parse_elf(const char *path, struct btf *base_btf,
1448 				 struct btf_ext **btf_ext)
1449 {
1450 	struct btf_elf_secs secs = {};
1451 	struct btf *dist_base_btf = NULL;
1452 	struct btf *btf = NULL;
1453 	int err = 0, fd = -1;
1454 	Elf *elf = NULL;
1455 
1456 	if (elf_version(EV_CURRENT) == EV_NONE) {
1457 		pr_warn("failed to init libelf for %s\n", path);
1458 		return ERR_PTR(-LIBBPF_ERRNO__LIBELF);
1459 	}
1460 
1461 	fd = open(path, O_RDONLY | O_CLOEXEC);
1462 	if (fd < 0) {
1463 		err = -errno;
1464 		pr_warn("failed to open %s: %s\n", path, errstr(err));
1465 		return ERR_PTR(err);
1466 	}
1467 
1468 	elf = elf_begin(fd, ELF_C_READ, NULL);
1469 	if (!elf) {
1470 		err = -LIBBPF_ERRNO__FORMAT;
1471 		pr_warn("failed to open %s as ELF file\n", path);
1472 		goto done;
1473 	}
1474 
1475 	err = btf_find_elf_sections(elf, path, &secs);
1476 	if (err)
1477 		goto done;
1478 
1479 	if (!secs.btf_data) {
1480 		pr_warn("failed to find '%s' ELF section in %s\n", BTF_ELF_SEC, path);
1481 		err = -ENODATA;
1482 		goto done;
1483 	}
1484 
1485 	if (secs.btf_base_data) {
1486 		dist_base_btf = btf_new(secs.btf_base_data->d_buf, secs.btf_base_data->d_size,
1487 					NULL, false);
1488 		if (IS_ERR(dist_base_btf)) {
1489 			err = PTR_ERR(dist_base_btf);
1490 			dist_base_btf = NULL;
1491 			goto done;
1492 		}
1493 	}
1494 
1495 	btf = btf_new(secs.btf_data->d_buf, secs.btf_data->d_size,
1496 		      dist_base_btf ?: base_btf, false);
1497 	if (IS_ERR(btf)) {
1498 		err = PTR_ERR(btf);
1499 		goto done;
1500 	}
1501 	if (dist_base_btf && base_btf) {
1502 		err = btf__relocate(btf, base_btf);
1503 		if (err)
1504 			goto done;
1505 		btf__free(dist_base_btf);
1506 		dist_base_btf = NULL;
1507 	}
1508 
1509 	if (dist_base_btf)
1510 		btf->owns_base = true;
1511 
1512 	switch (gelf_getclass(elf)) {
1513 	case ELFCLASS32:
1514 		btf__set_pointer_size(btf, 4);
1515 		break;
1516 	case ELFCLASS64:
1517 		btf__set_pointer_size(btf, 8);
1518 		break;
1519 	default:
1520 		pr_warn("failed to get ELF class (bitness) for %s\n", path);
1521 		break;
1522 	}
1523 
1524 	if (btf_ext && secs.btf_ext_data) {
1525 		*btf_ext = btf_ext__new(secs.btf_ext_data->d_buf, secs.btf_ext_data->d_size);
1526 		if (IS_ERR(*btf_ext)) {
1527 			err = PTR_ERR(*btf_ext);
1528 			goto done;
1529 		}
1530 	} else if (btf_ext) {
1531 		*btf_ext = NULL;
1532 	}
1533 done:
1534 	if (elf)
1535 		elf_end(elf);
1536 	close(fd);
1537 
1538 	if (!err)
1539 		return btf;
1540 
1541 	if (btf_ext)
1542 		btf_ext__free(*btf_ext);
1543 	btf__free(dist_base_btf);
1544 	btf__free(btf);
1545 
1546 	return ERR_PTR(err);
1547 }
1548 
1549 struct btf *btf__parse_elf(const char *path, struct btf_ext **btf_ext)
1550 {
1551 	return libbpf_ptr(btf_parse_elf(path, NULL, btf_ext));
1552 }
1553 
1554 struct btf *btf__parse_elf_split(const char *path, struct btf *base_btf)
1555 {
1556 	return libbpf_ptr(btf_parse_elf(path, base_btf, NULL));
1557 }
1558 
1559 static struct btf *btf_parse_raw(const char *path, struct btf *base_btf)
1560 {
1561 	struct btf *btf = NULL;
1562 	void *data = NULL;
1563 	FILE *f = NULL;
1564 	__u16 magic;
1565 	int err = 0;
1566 	long sz;
1567 
1568 	f = fopen(path, "rbe");
1569 	if (!f) {
1570 		err = -errno;
1571 		goto err_out;
1572 	}
1573 
1574 	/* check BTF magic */
1575 	if (fread(&magic, 1, sizeof(magic), f) < sizeof(magic)) {
1576 		err = -EIO;
1577 		goto err_out;
1578 	}
1579 	if (magic != BTF_MAGIC && magic != bswap_16(BTF_MAGIC)) {
1580 		/* definitely not a raw BTF */
1581 		err = -EPROTO;
1582 		goto err_out;
1583 	}
1584 
1585 	/* get file size */
1586 	if (fseek(f, 0, SEEK_END)) {
1587 		err = -errno;
1588 		goto err_out;
1589 	}
1590 	sz = ftell(f);
1591 	if (sz < 0) {
1592 		err = -errno;
1593 		goto err_out;
1594 	}
1595 	/* rewind to the start */
1596 	if (fseek(f, 0, SEEK_SET)) {
1597 		err = -errno;
1598 		goto err_out;
1599 	}
1600 
1601 	/* pre-alloc memory and read all of BTF data */
1602 	data = malloc(sz);
1603 	if (!data) {
1604 		err = -ENOMEM;
1605 		goto err_out;
1606 	}
1607 	if (fread(data, 1, sz, f) < sz) {
1608 		err = -EIO;
1609 		goto err_out;
1610 	}
1611 
1612 	/* finally parse BTF data */
1613 	btf = btf_new(data, sz, base_btf, false);
1614 
1615 err_out:
1616 	free(data);
1617 	if (f)
1618 		fclose(f);
1619 	return err ? ERR_PTR(err) : btf;
1620 }
1621 
1622 struct btf *btf__parse_raw(const char *path)
1623 {
1624 	return libbpf_ptr(btf_parse_raw(path, NULL));
1625 }
1626 
1627 struct btf *btf__parse_raw_split(const char *path, struct btf *base_btf)
1628 {
1629 	return libbpf_ptr(btf_parse_raw(path, base_btf));
1630 }
1631 
1632 static struct btf *btf_parse_raw_mmap(const char *path, struct btf *base_btf)
1633 {
1634 	struct stat st;
1635 	void *data;
1636 	struct btf *btf;
1637 	int fd, err;
1638 
1639 	fd = open(path, O_RDONLY);
1640 	if (fd < 0)
1641 		return ERR_PTR(-errno);
1642 
1643 	if (fstat(fd, &st) < 0) {
1644 		err = -errno;
1645 		close(fd);
1646 		return ERR_PTR(err);
1647 	}
1648 
1649 	data = mmap(NULL, st.st_size, PROT_READ, MAP_PRIVATE, fd, 0);
1650 	err = -errno;
1651 	close(fd);
1652 
1653 	if (data == MAP_FAILED)
1654 		return ERR_PTR(err);
1655 
1656 	btf = btf_new(data, st.st_size, base_btf, true);
1657 	if (IS_ERR(btf))
1658 		munmap(data, st.st_size);
1659 
1660 	return btf;
1661 }
1662 
1663 static struct btf *btf_parse(const char *path, struct btf *base_btf, struct btf_ext **btf_ext)
1664 {
1665 	struct btf *btf;
1666 	int err;
1667 
1668 	if (btf_ext)
1669 		*btf_ext = NULL;
1670 
1671 	btf = btf_parse_raw(path, base_btf);
1672 	err = libbpf_get_error(btf);
1673 	if (!err)
1674 		return btf;
1675 	if (err != -EPROTO)
1676 		return ERR_PTR(err);
1677 	return btf_parse_elf(path, base_btf, btf_ext);
1678 }
1679 
1680 struct btf *btf__parse(const char *path, struct btf_ext **btf_ext)
1681 {
1682 	return libbpf_ptr(btf_parse(path, NULL, btf_ext));
1683 }
1684 
1685 struct btf *btf__parse_split(const char *path, struct btf *base_btf)
1686 {
1687 	return libbpf_ptr(btf_parse(path, base_btf, NULL));
1688 }
1689 
1690 static void *btf_get_raw_data(const struct btf *btf, __u32 *size, bool swap_endian);
1691 
1692 int btf_load_into_kernel(struct btf *btf,
1693 			 char *log_buf, size_t log_sz, __u32 log_level,
1694 			 int token_fd)
1695 {
1696 	LIBBPF_OPTS(bpf_btf_load_opts, opts);
1697 	__u32 buf_sz = 0, raw_size;
1698 	char *buf = NULL, *tmp;
1699 	void *raw_data;
1700 	int err = 0;
1701 
1702 	if (btf->fd >= 0)
1703 		return libbpf_err(-EEXIST);
1704 	if (log_sz && !log_buf)
1705 		return libbpf_err(-EINVAL);
1706 
1707 	/* cache native raw data representation */
1708 	raw_data = btf_get_raw_data(btf, &raw_size, false);
1709 	if (!raw_data) {
1710 		err = -ENOMEM;
1711 		goto done;
1712 	}
1713 	btf->raw_size = raw_size;
1714 	btf->raw_data = raw_data;
1715 
1716 retry_load:
1717 	/* if log_level is 0, we won't provide log_buf/log_size to the kernel,
1718 	 * initially. Only if BTF loading fails, we bump log_level to 1 and
1719 	 * retry, using either auto-allocated or custom log_buf. This way
1720 	 * non-NULL custom log_buf provides a buffer just in case, but hopes
1721 	 * for successful load and no need for log_buf.
1722 	 */
1723 	if (log_level) {
1724 		/* if caller didn't provide custom log_buf, we'll keep
1725 		 * allocating our own progressively bigger buffers for BTF
1726 		 * verification log
1727 		 */
1728 		if (!log_buf) {
1729 			buf_sz = max((__u32)BPF_LOG_BUF_SIZE, buf_sz * 2);
1730 			tmp = realloc(buf, buf_sz);
1731 			if (!tmp) {
1732 				err = -ENOMEM;
1733 				goto done;
1734 			}
1735 			buf = tmp;
1736 			buf[0] = '\0';
1737 		}
1738 
1739 		opts.log_buf = log_buf ? log_buf : buf;
1740 		opts.log_size = log_buf ? log_sz : buf_sz;
1741 		opts.log_level = log_level;
1742 	}
1743 
1744 	opts.token_fd = token_fd;
1745 	if (token_fd)
1746 		opts.btf_flags |= BPF_F_TOKEN_FD;
1747 
1748 	btf->fd = bpf_btf_load(raw_data, raw_size, &opts);
1749 	if (btf->fd < 0) {
1750 		/* time to turn on verbose mode and try again */
1751 		if (log_level == 0) {
1752 			log_level = 1;
1753 			goto retry_load;
1754 		}
1755 		/* only retry if caller didn't provide custom log_buf, but
1756 		 * make sure we can never overflow buf_sz
1757 		 */
1758 		if (!log_buf && errno == ENOSPC && buf_sz <= UINT_MAX / 2)
1759 			goto retry_load;
1760 
1761 		err = -errno;
1762 		pr_warn("BTF loading error: %s\n", errstr(err));
1763 		/* don't print out contents of custom log_buf */
1764 		if (!log_buf && buf[0])
1765 			pr_warn("-- BEGIN BTF LOAD LOG ---\n%s\n-- END BTF LOAD LOG --\n", buf);
1766 	}
1767 
1768 done:
1769 	free(buf);
1770 	return libbpf_err(err);
1771 }
1772 
1773 int btf__load_into_kernel(struct btf *btf)
1774 {
1775 	return btf_load_into_kernel(btf, NULL, 0, 0, 0);
1776 }
1777 
1778 int btf__fd(const struct btf *btf)
1779 {
1780 	return btf->fd;
1781 }
1782 
1783 void btf__set_fd(struct btf *btf, int fd)
1784 {
1785 	btf->fd = fd;
1786 }
1787 
1788 static const void *btf_strs_data(const struct btf *btf)
1789 {
1790 	return btf->strs_data ? btf->strs_data : strset__data(btf->strs_set);
1791 }
1792 
1793 static void *btf_get_raw_data(const struct btf *btf, __u32 *size, bool swap_endian)
1794 {
1795 	const struct btf_header *hdr = &btf->hdr;
1796 	struct btf_type *t;
1797 	void *data, *p;
1798 	__u32 data_sz;
1799 	int i;
1800 
1801 	data = swap_endian ? btf->raw_data_swapped : btf->raw_data;
1802 	if (data) {
1803 		*size = btf->raw_size;
1804 		return data;
1805 	}
1806 
1807 	data_sz = hdr->hdr_len + hdr->type_len + hdr->str_len;
1808 	if (btf->layout)
1809 		data_sz += hdr->layout_len;
1810 
1811 	data = calloc(1, data_sz);
1812 	if (!data)
1813 		return NULL;
1814 	p = data;
1815 
1816 	memcpy(p, hdr, min((__u32)sizeof(struct btf_header), hdr->hdr_len));
1817 	if (swap_endian)
1818 		btf_bswap_hdr(p, hdr->hdr_len);
1819 	p += hdr->hdr_len;
1820 
1821 	memcpy(p, btf->types_data, hdr->type_len);
1822 	if (swap_endian) {
1823 		for (i = 0; i < btf->nr_types; i++) {
1824 			t = p + btf->type_offs[i];
1825 			/* btf_bswap_type_rest() relies on native t->info, so
1826 			 * we swap base type info after we swapped all the
1827 			 * additional information
1828 			 */
1829 			if (btf_bswap_type_rest(t))
1830 				goto err_out;
1831 			btf_bswap_type_base(t);
1832 		}
1833 	}
1834 	p += hdr->type_len;
1835 
1836 	if (btf->layout) {
1837 		memcpy(p, btf->layout, hdr->layout_len);
1838 		if (swap_endian) {
1839 			struct btf_layout *l, *end = p + hdr->layout_len;
1840 
1841 			for (l = p; l < end ; l++)
1842 				l->flags = bswap_16(l->flags);
1843 		}
1844 		p += hdr->layout_len;
1845 	}
1846 
1847 	memcpy(p, btf_strs_data(btf), hdr->str_len);
1848 
1849 	*size = data_sz;
1850 	return data;
1851 err_out:
1852 	free(data);
1853 	return NULL;
1854 }
1855 
1856 const void *btf__raw_data(const struct btf *btf_ro, __u32 *size)
1857 {
1858 	struct btf *btf = (struct btf *)btf_ro;
1859 	__u32 data_sz;
1860 	void *data;
1861 
1862 	data = btf_get_raw_data(btf, &data_sz, btf->swapped_endian);
1863 	if (!data)
1864 		return errno = ENOMEM, NULL;
1865 
1866 	btf->raw_size = data_sz;
1867 	if (btf->swapped_endian)
1868 		btf->raw_data_swapped = data;
1869 	else
1870 		btf->raw_data = data;
1871 	*size = data_sz;
1872 	return data;
1873 }
1874 
1875 __attribute__((alias("btf__raw_data")))
1876 const void *btf__get_raw_data(const struct btf *btf, __u32 *size);
1877 
1878 const char *btf__str_by_offset(const struct btf *btf, __u32 offset)
1879 {
1880 	if (offset < btf->start_str_off)
1881 		return btf__str_by_offset(btf->base_btf, offset);
1882 	else if (offset - btf->start_str_off < btf->hdr.str_len)
1883 		return btf_strs_data(btf) + (offset - btf->start_str_off);
1884 	else
1885 		return errno = EINVAL, NULL;
1886 }
1887 
1888 const char *btf__name_by_offset(const struct btf *btf, __u32 offset)
1889 {
1890 	return btf__str_by_offset(btf, offset);
1891 }
1892 
1893 struct btf *btf_get_from_fd(int btf_fd, struct btf *base_btf)
1894 {
1895 	struct bpf_btf_info btf_info;
1896 	__u32 len = sizeof(btf_info);
1897 	__u32 last_size;
1898 	struct btf *btf;
1899 	void *ptr;
1900 	int err;
1901 
1902 	/* we won't know btf_size until we call bpf_btf_get_info_by_fd(). so
1903 	 * let's start with a sane default - 4KiB here - and resize it only if
1904 	 * bpf_btf_get_info_by_fd() needs a bigger buffer.
1905 	 */
1906 	last_size = 4096;
1907 	ptr = malloc(last_size);
1908 	if (!ptr)
1909 		return ERR_PTR(-ENOMEM);
1910 
1911 	memset(&btf_info, 0, sizeof(btf_info));
1912 	btf_info.btf = ptr_to_u64(ptr);
1913 	btf_info.btf_size = last_size;
1914 	err = bpf_btf_get_info_by_fd(btf_fd, &btf_info, &len);
1915 
1916 	if (!err && btf_info.btf_size > last_size) {
1917 		void *temp_ptr;
1918 
1919 		last_size = btf_info.btf_size;
1920 		temp_ptr = realloc(ptr, last_size);
1921 		if (!temp_ptr) {
1922 			btf = ERR_PTR(-ENOMEM);
1923 			goto exit_free;
1924 		}
1925 		ptr = temp_ptr;
1926 
1927 		len = sizeof(btf_info);
1928 		memset(&btf_info, 0, sizeof(btf_info));
1929 		btf_info.btf = ptr_to_u64(ptr);
1930 		btf_info.btf_size = last_size;
1931 
1932 		err = bpf_btf_get_info_by_fd(btf_fd, &btf_info, &len);
1933 	}
1934 
1935 	if (err || btf_info.btf_size > last_size) {
1936 		btf = err ? ERR_PTR(-errno) : ERR_PTR(-E2BIG);
1937 		goto exit_free;
1938 	}
1939 
1940 	btf = btf_new(ptr, btf_info.btf_size, base_btf, false);
1941 
1942 exit_free:
1943 	free(ptr);
1944 	return btf;
1945 }
1946 
1947 struct btf *btf_load_from_kernel(__u32 id, struct btf *base_btf, int token_fd)
1948 {
1949 	struct btf *btf;
1950 	int btf_fd;
1951 	LIBBPF_OPTS(bpf_get_fd_by_id_opts, opts);
1952 
1953 	if (token_fd) {
1954 		opts.open_flags |= BPF_F_TOKEN_FD;
1955 		opts.token_fd = token_fd;
1956 	}
1957 
1958 	btf_fd = bpf_btf_get_fd_by_id_opts(id, &opts);
1959 	if (btf_fd < 0)
1960 		return libbpf_err_ptr(-errno);
1961 
1962 	btf = btf_get_from_fd(btf_fd, base_btf);
1963 	close(btf_fd);
1964 
1965 	return libbpf_ptr(btf);
1966 }
1967 
1968 struct btf *btf__load_from_kernel_by_id_split(__u32 id, struct btf *base_btf)
1969 {
1970 	return btf_load_from_kernel(id, base_btf, 0);
1971 }
1972 
1973 struct btf *btf__load_from_kernel_by_id(__u32 id)
1974 {
1975 	return btf__load_from_kernel_by_id_split(id, NULL);
1976 }
1977 
1978 static void btf_invalidate_raw_data(struct btf *btf)
1979 {
1980 	if (btf->raw_data)
1981 		btf_free_raw_data(btf);
1982 	if (btf->raw_data_swapped) {
1983 		free(btf->raw_data_swapped);
1984 		btf->raw_data_swapped = NULL;
1985 	}
1986 	btf->named_start_id = 0;
1987 }
1988 
1989 /* Ensure BTF is ready to be modified (by splitting into a three memory
1990  * regions for types, strings and layout. Also invalidate cached
1991  * raw_data, if any.
1992  */
1993 static int btf_ensure_modifiable(struct btf *btf)
1994 {
1995 	void *types, *layout = NULL;
1996 	struct strset *set = NULL;
1997 	int err = -ENOMEM;
1998 
1999 	if (btf_is_modifiable(btf)) {
2000 		/* any BTF modification invalidates raw_data */
2001 		btf_invalidate_raw_data(btf);
2002 		return 0;
2003 	}
2004 
2005 	if (btf->has_hdr_extra) {
2006 		/* Additional BTF header data was found; not safe to modify. */
2007 		return -EOPNOTSUPP;
2008 	}
2009 
2010 	/* split raw data into memory regions; btf->hdr is done already. */
2011 	types = malloc(btf->hdr.type_len);
2012 	if (!types)
2013 		goto err_out;
2014 	memcpy(types, btf->types_data, btf->hdr.type_len);
2015 
2016 	if (btf->hdr.layout_len) {
2017 		layout = malloc(btf->hdr.layout_len);
2018 		if (!layout)
2019 			goto err_out;
2020 		memcpy(layout, btf->raw_data + btf->hdr.hdr_len + btf->hdr.layout_off,
2021 		       btf->hdr.layout_len);
2022 	}
2023 
2024 	/* build lookup index for all strings */
2025 	set = strset__new(BTF_MAX_STR_OFFSET, btf->strs_data, btf->hdr.str_len);
2026 	if (IS_ERR(set)) {
2027 		err = PTR_ERR(set);
2028 		goto err_out;
2029 	}
2030 
2031 	/* only when everything was successful, update internal state */
2032 	btf->types_data = types;
2033 	btf->types_data_cap = btf->hdr.type_len;
2034 	btf->strs_data = NULL;
2035 	btf->strs_set = set;
2036 	if (layout)
2037 		btf->layout = layout;
2038 	/* if BTF was created from scratch, all strings are guaranteed to be
2039 	 * unique and deduplicated
2040 	 */
2041 	if (btf->hdr.str_len == 0)
2042 		btf->strs_deduped = true;
2043 	if (!btf->base_btf && btf->hdr.str_len == 1)
2044 		btf->strs_deduped = true;
2045 
2046 	/* invalidate raw_data representation */
2047 	btf_invalidate_raw_data(btf);
2048 
2049 	btf->modifiable = true;
2050 
2051 	return 0;
2052 
2053 err_out:
2054 	strset__free(set);
2055 	free(types);
2056 	free(layout);
2057 	return err;
2058 }
2059 
2060 /* Find an offset in BTF string section that corresponds to a given string *s*.
2061  * Returns:
2062  *   - >0 offset into string section, if string is found;
2063  *   - -ENOENT, if string is not in the string section;
2064  *   - <0, on any other error.
2065  */
2066 int btf__find_str(struct btf *btf, const char *s)
2067 {
2068 	int off;
2069 	int err;
2070 
2071 	if (btf->base_btf) {
2072 		off = btf__find_str(btf->base_btf, s);
2073 		if (off != -ENOENT)
2074 			return off;
2075 	}
2076 
2077 	/* BTF needs to be in a modifiable state to build string lookup index */
2078 	err = btf_ensure_modifiable(btf);
2079 	if (err)
2080 		return libbpf_err(err);
2081 
2082 	off = strset__find_str(btf->strs_set, s);
2083 	if (off < 0)
2084 		return libbpf_err(off);
2085 
2086 	return btf->start_str_off + off;
2087 }
2088 
2089 /* Add a string s to the BTF string section.
2090  * Returns:
2091  *   - > 0 offset into string section, on success;
2092  *   - < 0, on error.
2093  */
2094 int btf__add_str(struct btf *btf, const char *s)
2095 {
2096 	int off;
2097 	int err;
2098 
2099 	if (btf->base_btf) {
2100 		off = btf__find_str(btf->base_btf, s);
2101 		if (off != -ENOENT)
2102 			return off;
2103 	}
2104 
2105 	err = btf_ensure_modifiable(btf);
2106 	if (err)
2107 		return libbpf_err(err);
2108 
2109 	off = strset__add_str(btf->strs_set, s);
2110 	if (off < 0)
2111 		return libbpf_err(off);
2112 
2113 	btf->hdr.str_len = strset__data_size(btf->strs_set);
2114 
2115 	return btf->start_str_off + off;
2116 }
2117 
2118 static void *btf_add_type_mem(struct btf *btf, size_t add_sz)
2119 {
2120 	return libbpf_add_mem(&btf->types_data, &btf->types_data_cap, 1,
2121 			      btf->hdr.type_len, UINT_MAX, add_sz);
2122 }
2123 
2124 static void btf_type_inc_vlen(struct btf_type *t)
2125 {
2126 	t->info = btf_type_info(btf_kind(t), btf_vlen(t) + 1, btf_kflag(t));
2127 }
2128 
2129 static void btf_hdr_update_type_len(struct btf *btf, int new_len)
2130 {
2131 	btf->hdr.type_len = new_len;
2132 	if (btf->layout) {
2133 		btf->hdr.layout_off = btf->hdr.type_off + new_len;
2134 		btf->hdr.str_off = btf->hdr.layout_off + btf->hdr.layout_len;
2135 	} else {
2136 		btf->hdr.str_off = btf->hdr.type_off + new_len;
2137 	}
2138 }
2139 
2140 static void btf_hdr_update_str_len(struct btf *btf, int new_len)
2141 {
2142 	btf->hdr.str_len = new_len;
2143 }
2144 
2145 static int btf_commit_type(struct btf *btf, int data_sz)
2146 {
2147 	int err;
2148 
2149 	err = btf_add_type_idx_entry(btf, btf->hdr.type_len);
2150 	if (err)
2151 		return libbpf_err(err);
2152 
2153 	btf_hdr_update_type_len(btf, btf->hdr.type_len + data_sz);
2154 	btf->nr_types++;
2155 	return btf->start_id + btf->nr_types - 1;
2156 }
2157 
2158 struct btf_pipe {
2159 	const struct btf *src;
2160 	struct btf *dst;
2161 	struct hashmap *str_off_map; /* map string offsets from src to dst */
2162 };
2163 
2164 static int btf_rewrite_str(struct btf_pipe *p, __u32 *str_off)
2165 {
2166 	long mapped_off;
2167 	int off, err;
2168 
2169 	if (!*str_off) /* nothing to do for empty strings */
2170 		return 0;
2171 
2172 	if (p->str_off_map &&
2173 	    hashmap__find(p->str_off_map, *str_off, &mapped_off)) {
2174 		*str_off = mapped_off;
2175 		return 0;
2176 	}
2177 
2178 	off = btf__add_str(p->dst, btf__str_by_offset(p->src, *str_off));
2179 	if (off < 0)
2180 		return off;
2181 
2182 	/* Remember string mapping from src to dst.  It avoids
2183 	 * performing expensive string comparisons.
2184 	 */
2185 	if (p->str_off_map) {
2186 		err = hashmap__append(p->str_off_map, *str_off, off);
2187 		if (err)
2188 			return err;
2189 	}
2190 
2191 	*str_off = off;
2192 	return 0;
2193 }
2194 
2195 static int btf_add_type(struct btf_pipe *p, const struct btf_type *src_type)
2196 {
2197 	struct btf_field_iter it;
2198 	struct btf_type *t;
2199 	__u32 *str_off;
2200 	int sz, err;
2201 
2202 	sz = btf_type_size(p->src, src_type);
2203 	if (sz < 0)
2204 		return libbpf_err(sz);
2205 
2206 	/* deconstruct BTF, if necessary, and invalidate raw_data */
2207 	err = btf_ensure_modifiable(p->dst);
2208 	if (err)
2209 		return libbpf_err(err);
2210 
2211 	t = btf_add_type_mem(p->dst, sz);
2212 	if (!t)
2213 		return libbpf_err(-ENOMEM);
2214 
2215 	memcpy(t, src_type, sz);
2216 
2217 	err = btf_field_iter_init(&it, t, BTF_FIELD_ITER_STRS);
2218 	if (err)
2219 		return libbpf_err(err);
2220 
2221 	while ((str_off = btf_field_iter_next(&it))) {
2222 		err = btf_rewrite_str(p, str_off);
2223 		if (err)
2224 			return libbpf_err(err);
2225 	}
2226 
2227 	return btf_commit_type(p->dst, sz);
2228 }
2229 
2230 int btf__add_type(struct btf *btf, const struct btf *src_btf, const struct btf_type *src_type)
2231 {
2232 	struct btf_pipe p = { .src = src_btf, .dst = btf };
2233 
2234 	return btf_add_type(&p, src_type);
2235 }
2236 
2237 static size_t btf_dedup_identity_hash_fn(long key, void *ctx);
2238 static bool btf_dedup_equal_fn(long k1, long k2, void *ctx);
2239 
2240 int btf__add_btf(struct btf *btf, const struct btf *src_btf)
2241 {
2242 	struct btf_pipe p = { .src = src_btf, .dst = btf };
2243 	int data_sz, sz, cnt, i, err, old_strs_len;
2244 	__u32 src_start_id;
2245 	__u32 *off;
2246 	void *t;
2247 
2248 	/*
2249 	 * When appending split BTF, the destination must share the same base
2250 	 * BTF so that base type ID references remain valid.
2251 	 */
2252 	if (src_btf->base_btf && src_btf->base_btf != btf->base_btf)
2253 		return libbpf_err(-EOPNOTSUPP);
2254 
2255 	src_start_id = src_btf->base_btf ? btf__type_cnt(src_btf->base_btf) : 1;
2256 
2257 	/* deconstruct BTF, if necessary, and invalidate raw_data */
2258 	err = btf_ensure_modifiable(btf);
2259 	if (err)
2260 		return libbpf_err(err);
2261 
2262 	/* remember original strings section size if we have to roll back
2263 	 * partial strings section changes
2264 	 */
2265 	old_strs_len = btf->hdr.str_len;
2266 
2267 	data_sz = src_btf->hdr.type_len;
2268 	cnt = src_btf->nr_types;
2269 
2270 	/* pre-allocate enough memory for new types */
2271 	t = btf_add_type_mem(btf, data_sz);
2272 	if (!t)
2273 		return libbpf_err(-ENOMEM);
2274 
2275 	/* pre-allocate enough memory for type offset index for new types */
2276 	off = btf_add_type_offs_mem(btf, cnt);
2277 	if (!off)
2278 		return libbpf_err(-ENOMEM);
2279 
2280 	/* Map the string offsets from src_btf to the offsets from btf to improve performance */
2281 	p.str_off_map = hashmap__new(btf_dedup_identity_hash_fn, btf_dedup_equal_fn, NULL);
2282 	if (IS_ERR(p.str_off_map))
2283 		return libbpf_err(-ENOMEM);
2284 
2285 	/* bulk copy types data for all types from src_btf */
2286 	memcpy(t, src_btf->types_data, data_sz);
2287 
2288 	for (i = 0; i < cnt; i++) {
2289 		struct btf_field_iter it;
2290 		__u32 *type_id, *str_off;
2291 
2292 		sz = btf_type_size(src_btf, t);
2293 		if (sz < 0) {
2294 			/* unlikely, has to be corrupted src_btf */
2295 			err = sz;
2296 			goto err_out;
2297 		}
2298 
2299 		/* fill out type ID to type offset mapping for lookups by type ID */
2300 		*off = t - btf->types_data;
2301 
2302 		/* add, dedup, and remap strings referenced by this BTF type */
2303 		err = btf_field_iter_init(&it, t, BTF_FIELD_ITER_STRS);
2304 		if (err)
2305 			goto err_out;
2306 		while ((str_off = btf_field_iter_next(&it))) {
2307 			/* don't remap strings from shared base BTF */
2308 			if (*str_off < src_btf->start_str_off)
2309 				continue;
2310 			err = btf_rewrite_str(&p, str_off);
2311 			if (err)
2312 				goto err_out;
2313 		}
2314 
2315 		/* remap all type IDs referenced from this BTF type */
2316 		err = btf_field_iter_init(&it, t, BTF_FIELD_ITER_IDS);
2317 		if (err)
2318 			goto err_out;
2319 
2320 		while ((type_id = btf_field_iter_next(&it))) {
2321 			if (!*type_id) /* nothing to do for VOID references */
2322 				continue;
2323 
2324 			/* don't remap types from shared base BTF */
2325 			if (*type_id < src_start_id)
2326 				continue;
2327 
2328 			*type_id += btf->start_id + btf->nr_types - src_start_id;
2329 		}
2330 
2331 		/* go to next type data and type offset index entry */
2332 		t += sz;
2333 		off++;
2334 	}
2335 
2336 	/* Up until now any of the copied type data was effectively invisible,
2337 	 * so if we exited early before this point due to error, BTF would be
2338 	 * effectively unmodified. There would be extra internal memory
2339 	 * pre-allocated, but it would not be available for querying.  But now
2340 	 * that we've copied and rewritten all the data successfully, we can
2341 	 * update type count and various internal offsets and sizes to
2342 	 * "commit" the changes and made them visible to the outside world.
2343 	 */
2344 	btf_hdr_update_type_len(btf, btf->hdr.type_len + data_sz);
2345 	btf->nr_types += cnt;
2346 
2347 	hashmap__free(p.str_off_map);
2348 
2349 	/* return type ID of the first added BTF type */
2350 	return btf->start_id + btf->nr_types - cnt;
2351 err_out:
2352 	/* zero out preallocated memory as if it was just allocated with
2353 	 * libbpf_add_mem()
2354 	 */
2355 	memset(btf->types_data + btf->hdr.type_len, 0, data_sz);
2356 	if (btf->strs_data)
2357 		memset(btf->strs_data + old_strs_len, 0, btf->hdr.str_len - old_strs_len);
2358 
2359 	/* and now restore original strings section size; types data size
2360 	 * wasn't modified, so doesn't need restoring, see big comment above
2361 	 */
2362 	btf_hdr_update_str_len(btf, old_strs_len);
2363 
2364 	hashmap__free(p.str_off_map);
2365 
2366 	return libbpf_err(err);
2367 }
2368 
2369 /*
2370  * Append new BTF_KIND_INT type with:
2371  *   - *name* - non-empty, non-NULL type name;
2372  *   - *sz* - power-of-2 (1, 2, 4, ..) size of the type, in bytes;
2373  *   - encoding is a combination of BTF_INT_SIGNED, BTF_INT_CHAR, BTF_INT_BOOL.
2374  * Returns:
2375  *   - >0, type ID of newly added BTF type;
2376  *   - <0, on error.
2377  */
2378 int btf__add_int(struct btf *btf, const char *name, size_t byte_sz, int encoding)
2379 {
2380 	struct btf_type *t;
2381 	int sz, name_off;
2382 	int err;
2383 
2384 	/* non-empty name */
2385 	if (str_is_empty(name))
2386 		return libbpf_err(-EINVAL);
2387 	/* byte_sz must be power of 2 */
2388 	if (!byte_sz || (byte_sz & (byte_sz - 1)) || byte_sz > 16)
2389 		return libbpf_err(-EINVAL);
2390 	if (encoding & ~(BTF_INT_SIGNED | BTF_INT_CHAR | BTF_INT_BOOL))
2391 		return libbpf_err(-EINVAL);
2392 
2393 	/* deconstruct BTF, if necessary, and invalidate raw_data */
2394 	err = btf_ensure_modifiable(btf);
2395 	if (err)
2396 		return libbpf_err(err);
2397 
2398 	sz = sizeof(struct btf_type) + sizeof(int);
2399 	t = btf_add_type_mem(btf, sz);
2400 	if (!t)
2401 		return libbpf_err(-ENOMEM);
2402 
2403 	/* if something goes wrong later, we might end up with an extra string,
2404 	 * but that shouldn't be a problem, because BTF can't be constructed
2405 	 * completely anyway and will most probably be just discarded
2406 	 */
2407 	name_off = btf__add_str(btf, name);
2408 	if (name_off < 0)
2409 		return name_off;
2410 
2411 	t->name_off = name_off;
2412 	t->info = btf_type_info(BTF_KIND_INT, 0, 0);
2413 	t->size = byte_sz;
2414 	/* set INT info, we don't allow setting legacy bit offset/size */
2415 	*(__u32 *)(t + 1) = (encoding << 24) | (byte_sz * 8);
2416 
2417 	return btf_commit_type(btf, sz);
2418 }
2419 
2420 /*
2421  * Append new BTF_KIND_FLOAT type with:
2422  *   - *name* - non-empty, non-NULL type name;
2423  *   - *sz* - size of the type, in bytes;
2424  * Returns:
2425  *   - >0, type ID of newly added BTF type;
2426  *   - <0, on error.
2427  */
2428 int btf__add_float(struct btf *btf, const char *name, size_t byte_sz)
2429 {
2430 	struct btf_type *t;
2431 	int sz, name_off;
2432 	int err;
2433 
2434 	/* non-empty name */
2435 	if (str_is_empty(name))
2436 		return libbpf_err(-EINVAL);
2437 
2438 	/* byte_sz must be one of the explicitly allowed values */
2439 	if (byte_sz != 2 && byte_sz != 4 && byte_sz != 8 && byte_sz != 12 &&
2440 	    byte_sz != 16)
2441 		return libbpf_err(-EINVAL);
2442 
2443 	err = btf_ensure_modifiable(btf);
2444 	if (err)
2445 		return libbpf_err(err);
2446 
2447 	sz = sizeof(struct btf_type);
2448 	t = btf_add_type_mem(btf, sz);
2449 	if (!t)
2450 		return libbpf_err(-ENOMEM);
2451 
2452 	name_off = btf__add_str(btf, name);
2453 	if (name_off < 0)
2454 		return name_off;
2455 
2456 	t->name_off = name_off;
2457 	t->info = btf_type_info(BTF_KIND_FLOAT, 0, 0);
2458 	t->size = byte_sz;
2459 
2460 	return btf_commit_type(btf, sz);
2461 }
2462 
2463 /* it's completely legal to append BTF types with type IDs pointing forward to
2464  * types that haven't been appended yet, so we only make sure that id looks
2465  * sane, we can't guarantee that ID will always be valid
2466  */
2467 static int validate_type_id(int id)
2468 {
2469 	if (id < 0 || id > BTF_MAX_NR_TYPES)
2470 		return -EINVAL;
2471 	return 0;
2472 }
2473 
2474 /* generic append function for PTR, TYPEDEF, CONST/VOLATILE/RESTRICT */
2475 static int btf_add_ref_kind(struct btf *btf, int kind, const char *name, int ref_type_id, int kflag)
2476 {
2477 	struct btf_type *t;
2478 	int sz, name_off = 0;
2479 	int err;
2480 
2481 	if (validate_type_id(ref_type_id))
2482 		return libbpf_err(-EINVAL);
2483 
2484 	err = btf_ensure_modifiable(btf);
2485 	if (err)
2486 		return libbpf_err(err);
2487 
2488 	sz = sizeof(struct btf_type);
2489 	t = btf_add_type_mem(btf, sz);
2490 	if (!t)
2491 		return libbpf_err(-ENOMEM);
2492 
2493 	if (!str_is_empty(name)) {
2494 		name_off = btf__add_str(btf, name);
2495 		if (name_off < 0)
2496 			return name_off;
2497 	}
2498 
2499 	t->name_off = name_off;
2500 	t->info = btf_type_info(kind, 0, kflag);
2501 	t->type = ref_type_id;
2502 
2503 	return btf_commit_type(btf, sz);
2504 }
2505 
2506 /*
2507  * Append new BTF_KIND_PTR type with:
2508  *   - *ref_type_id* - referenced type ID, it might not exist yet;
2509  * Returns:
2510  *   - >0, type ID of newly added BTF type;
2511  *   - <0, on error.
2512  */
2513 int btf__add_ptr(struct btf *btf, int ref_type_id)
2514 {
2515 	return btf_add_ref_kind(btf, BTF_KIND_PTR, NULL, ref_type_id, 0);
2516 }
2517 
2518 /*
2519  * Append new BTF_KIND_ARRAY type with:
2520  *   - *index_type_id* - type ID of the type describing array index;
2521  *   - *elem_type_id* - type ID of the type describing array element;
2522  *   - *nr_elems* - the size of the array;
2523  * Returns:
2524  *   - >0, type ID of newly added BTF type;
2525  *   - <0, on error.
2526  */
2527 int btf__add_array(struct btf *btf, int index_type_id, int elem_type_id, __u32 nr_elems)
2528 {
2529 	struct btf_type *t;
2530 	struct btf_array *a;
2531 	int err;
2532 	int sz;
2533 
2534 	if (validate_type_id(index_type_id) || validate_type_id(elem_type_id))
2535 		return libbpf_err(-EINVAL);
2536 
2537 	err = btf_ensure_modifiable(btf);
2538 	if (err)
2539 		return libbpf_err(err);
2540 
2541 	sz = sizeof(struct btf_type) + sizeof(struct btf_array);
2542 	t = btf_add_type_mem(btf, sz);
2543 	if (!t)
2544 		return libbpf_err(-ENOMEM);
2545 
2546 	t->name_off = 0;
2547 	t->info = btf_type_info(BTF_KIND_ARRAY, 0, 0);
2548 	t->size = 0;
2549 
2550 	a = btf_array(t);
2551 	a->type = elem_type_id;
2552 	a->index_type = index_type_id;
2553 	a->nelems = nr_elems;
2554 
2555 	return btf_commit_type(btf, sz);
2556 }
2557 
2558 /* generic STRUCT/UNION append function */
2559 static int btf_add_composite(struct btf *btf, int kind, const char *name, __u32 bytes_sz)
2560 {
2561 	struct btf_type *t;
2562 	int sz, name_off = 0;
2563 	int err;
2564 
2565 	err = btf_ensure_modifiable(btf);
2566 	if (err)
2567 		return libbpf_err(err);
2568 
2569 	sz = sizeof(struct btf_type);
2570 	t = btf_add_type_mem(btf, sz);
2571 	if (!t)
2572 		return libbpf_err(-ENOMEM);
2573 
2574 	if (!str_is_empty(name)) {
2575 		name_off = btf__add_str(btf, name);
2576 		if (name_off < 0)
2577 			return name_off;
2578 	}
2579 
2580 	/* start out with vlen=0 and no kflag; this will be adjusted when
2581 	 * adding each member
2582 	 */
2583 	t->name_off = name_off;
2584 	t->info = btf_type_info(kind, 0, 0);
2585 	t->size = bytes_sz;
2586 
2587 	return btf_commit_type(btf, sz);
2588 }
2589 
2590 /*
2591  * Append new BTF_KIND_STRUCT type with:
2592  *   - *name* - name of the struct, can be NULL or empty for anonymous structs;
2593  *   - *byte_sz* - size of the struct, in bytes;
2594  *
2595  * Struct initially has no fields in it. Fields can be added by
2596  * btf__add_field() right after btf__add_struct() succeeds.
2597  *
2598  * Returns:
2599  *   - >0, type ID of newly added BTF type;
2600  *   - <0, on error.
2601  */
2602 int btf__add_struct(struct btf *btf, const char *name, __u32 byte_sz)
2603 {
2604 	return btf_add_composite(btf, BTF_KIND_STRUCT, name, byte_sz);
2605 }
2606 
2607 /*
2608  * Append new BTF_KIND_UNION type with:
2609  *   - *name* - name of the union, can be NULL or empty for anonymous union;
2610  *   - *byte_sz* - size of the union, in bytes;
2611  *
2612  * Union initially has no fields in it. Fields can be added by
2613  * btf__add_field() right after btf__add_union() succeeds. All fields
2614  * should have *bit_offset* of 0.
2615  *
2616  * Returns:
2617  *   - >0, type ID of newly added BTF type;
2618  *   - <0, on error.
2619  */
2620 int btf__add_union(struct btf *btf, const char *name, __u32 byte_sz)
2621 {
2622 	return btf_add_composite(btf, BTF_KIND_UNION, name, byte_sz);
2623 }
2624 
2625 static struct btf_type *btf_last_type(struct btf *btf)
2626 {
2627 	return btf_type_by_id(btf, btf__type_cnt(btf) - 1);
2628 }
2629 
2630 /*
2631  * Append new field for the current STRUCT/UNION type with:
2632  *   - *name* - name of the field, can be NULL or empty for anonymous field;
2633  *   - *type_id* - type ID for the type describing field type;
2634  *   - *bit_offset* - bit offset of the start of the field within struct/union;
2635  *   - *bit_size* - bit size of a bitfield, 0 for non-bitfield fields;
2636  * Returns:
2637  *   -  0, on success;
2638  *   - <0, on error.
2639  */
2640 int btf__add_field(struct btf *btf, const char *name, int type_id,
2641 		   __u32 bit_offset, __u32 bit_size)
2642 {
2643 	struct btf_type *t;
2644 	struct btf_member *m;
2645 	bool is_bitfield;
2646 	int sz, name_off = 0;
2647 	int err;
2648 
2649 	/* last type should be union/struct */
2650 	if (btf->nr_types == 0)
2651 		return libbpf_err(-EINVAL);
2652 	t = btf_last_type(btf);
2653 	if (!btf_is_composite(t))
2654 		return libbpf_err(-EINVAL);
2655 
2656 	if (validate_type_id(type_id))
2657 		return libbpf_err(-EINVAL);
2658 	/* best-effort bit field offset/size enforcement */
2659 	is_bitfield = bit_size || (bit_offset % 8 != 0);
2660 	if (is_bitfield && (bit_size == 0 || bit_size > 255 || bit_offset > 0xffffff))
2661 		return libbpf_err(-EINVAL);
2662 
2663 	/* only offset 0 is allowed for unions */
2664 	if (btf_is_union(t) && bit_offset)
2665 		return libbpf_err(-EINVAL);
2666 
2667 	/* decompose and invalidate raw data */
2668 	err = btf_ensure_modifiable(btf);
2669 	if (err)
2670 		return libbpf_err(err);
2671 
2672 	sz = sizeof(struct btf_member);
2673 	m = btf_add_type_mem(btf, sz);
2674 	if (!m)
2675 		return libbpf_err(-ENOMEM);
2676 
2677 	if (!str_is_empty(name)) {
2678 		name_off = btf__add_str(btf, name);
2679 		if (name_off < 0)
2680 			return name_off;
2681 	}
2682 
2683 	m->name_off = name_off;
2684 	m->type = type_id;
2685 	m->offset = bit_offset | (bit_size << 24);
2686 
2687 	/* btf_add_type_mem can invalidate t pointer */
2688 	t = btf_last_type(btf);
2689 	/* update parent type's vlen and kflag */
2690 	t->info = btf_type_info(btf_kind(t), btf_vlen(t) + 1, is_bitfield || btf_kflag(t));
2691 
2692 	btf_hdr_update_type_len(btf, btf->hdr.type_len + sz);
2693 	return 0;
2694 }
2695 
2696 static int btf_add_enum_common(struct btf *btf, const char *name, __u32 byte_sz,
2697 			       bool is_signed, __u8 kind)
2698 {
2699 	struct btf_type *t;
2700 	int sz, name_off = 0;
2701 	int err;
2702 
2703 	/* byte_sz must be power of 2 */
2704 	if (!byte_sz || (byte_sz & (byte_sz - 1)) || byte_sz > 8)
2705 		return libbpf_err(-EINVAL);
2706 
2707 	err = btf_ensure_modifiable(btf);
2708 	if (err)
2709 		return libbpf_err(err);
2710 
2711 	sz = sizeof(struct btf_type);
2712 	t = btf_add_type_mem(btf, sz);
2713 	if (!t)
2714 		return libbpf_err(-ENOMEM);
2715 
2716 	if (!str_is_empty(name)) {
2717 		name_off = btf__add_str(btf, name);
2718 		if (name_off < 0)
2719 			return name_off;
2720 	}
2721 
2722 	/* start out with vlen=0; it will be adjusted when adding enum values */
2723 	t->name_off = name_off;
2724 	t->info = btf_type_info(kind, 0, is_signed);
2725 	t->size = byte_sz;
2726 
2727 	return btf_commit_type(btf, sz);
2728 }
2729 
2730 /*
2731  * Append new BTF_KIND_ENUM type with:
2732  *   - *name* - name of the enum, can be NULL or empty for anonymous enums;
2733  *   - *byte_sz* - size of the enum, in bytes.
2734  *
2735  * Enum initially has no enum values in it (and corresponds to enum forward
2736  * declaration). Enumerator values can be added by btf__add_enum_value()
2737  * immediately after btf__add_enum() succeeds.
2738  *
2739  * Returns:
2740  *   - >0, type ID of newly added BTF type;
2741  *   - <0, on error.
2742  */
2743 int btf__add_enum(struct btf *btf, const char *name, __u32 byte_sz)
2744 {
2745 	/*
2746 	 * set the signedness to be unsigned, it will change to signed
2747 	 * if any later enumerator is negative.
2748 	 */
2749 	return btf_add_enum_common(btf, name, byte_sz, false, BTF_KIND_ENUM);
2750 }
2751 
2752 /*
2753  * Append new enum value for the current ENUM type with:
2754  *   - *name* - name of the enumerator value, can't be NULL or empty;
2755  *   - *value* - integer value corresponding to enum value *name*;
2756  * Returns:
2757  *   -  0, on success;
2758  *   - <0, on error.
2759  */
2760 int btf__add_enum_value(struct btf *btf, const char *name, __s64 value)
2761 {
2762 	struct btf_type *t;
2763 	struct btf_enum *v;
2764 	int sz, name_off;
2765 	int err;
2766 
2767 	/* last type should be BTF_KIND_ENUM */
2768 	if (btf->nr_types == 0)
2769 		return libbpf_err(-EINVAL);
2770 	t = btf_last_type(btf);
2771 	if (!btf_is_enum(t))
2772 		return libbpf_err(-EINVAL);
2773 
2774 	/* non-empty name */
2775 	if (str_is_empty(name))
2776 		return libbpf_err(-EINVAL);
2777 	if (value < INT_MIN || value > UINT_MAX)
2778 		return libbpf_err(-E2BIG);
2779 
2780 	/* decompose and invalidate raw data */
2781 	err = btf_ensure_modifiable(btf);
2782 	if (err)
2783 		return libbpf_err(err);
2784 
2785 	sz = sizeof(struct btf_enum);
2786 	v = btf_add_type_mem(btf, sz);
2787 	if (!v)
2788 		return libbpf_err(-ENOMEM);
2789 
2790 	name_off = btf__add_str(btf, name);
2791 	if (name_off < 0)
2792 		return name_off;
2793 
2794 	v->name_off = name_off;
2795 	v->val = value;
2796 
2797 	/* update parent type's vlen */
2798 	t = btf_last_type(btf);
2799 	btf_type_inc_vlen(t);
2800 
2801 	/* if negative value, set signedness to signed */
2802 	if (value < 0)
2803 		t->info = btf_type_info(btf_kind(t), btf_vlen(t), true);
2804 
2805 	btf_hdr_update_type_len(btf, btf->hdr.type_len + sz);
2806 	return 0;
2807 }
2808 
2809 /*
2810  * Append new BTF_KIND_ENUM64 type with:
2811  *   - *name* - name of the enum, can be NULL or empty for anonymous enums;
2812  *   - *byte_sz* - size of the enum, in bytes.
2813  *   - *is_signed* - whether the enum values are signed or not;
2814  *
2815  * Enum initially has no enum values in it (and corresponds to enum forward
2816  * declaration). Enumerator values can be added by btf__add_enum64_value()
2817  * immediately after btf__add_enum64() succeeds.
2818  *
2819  * Returns:
2820  *   - >0, type ID of newly added BTF type;
2821  *   - <0, on error.
2822  */
2823 int btf__add_enum64(struct btf *btf, const char *name, __u32 byte_sz,
2824 		    bool is_signed)
2825 {
2826 	return btf_add_enum_common(btf, name, byte_sz, is_signed,
2827 				   BTF_KIND_ENUM64);
2828 }
2829 
2830 /*
2831  * Append new enum value for the current ENUM64 type with:
2832  *   - *name* - name of the enumerator value, can't be NULL or empty;
2833  *   - *value* - integer value corresponding to enum value *name*;
2834  * Returns:
2835  *   -  0, on success;
2836  *   - <0, on error.
2837  */
2838 int btf__add_enum64_value(struct btf *btf, const char *name, __u64 value)
2839 {
2840 	struct btf_enum64 *v;
2841 	struct btf_type *t;
2842 	int sz, name_off;
2843 	int err;
2844 
2845 	/* last type should be BTF_KIND_ENUM64 */
2846 	if (btf->nr_types == 0)
2847 		return libbpf_err(-EINVAL);
2848 	t = btf_last_type(btf);
2849 	if (!btf_is_enum64(t))
2850 		return libbpf_err(-EINVAL);
2851 
2852 	/* non-empty name */
2853 	if (str_is_empty(name))
2854 		return libbpf_err(-EINVAL);
2855 
2856 	/* decompose and invalidate raw data */
2857 	err = btf_ensure_modifiable(btf);
2858 	if (err)
2859 		return libbpf_err(err);
2860 
2861 	sz = sizeof(struct btf_enum64);
2862 	v = btf_add_type_mem(btf, sz);
2863 	if (!v)
2864 		return libbpf_err(-ENOMEM);
2865 
2866 	name_off = btf__add_str(btf, name);
2867 	if (name_off < 0)
2868 		return name_off;
2869 
2870 	v->name_off = name_off;
2871 	v->val_lo32 = (__u32)value;
2872 	v->val_hi32 = value >> 32;
2873 
2874 	/* update parent type's vlen */
2875 	t = btf_last_type(btf);
2876 	btf_type_inc_vlen(t);
2877 
2878 	btf_hdr_update_type_len(btf, btf->hdr.type_len + sz);
2879 	return 0;
2880 }
2881 
2882 /*
2883  * Append new BTF_KIND_FWD type with:
2884  *   - *name*, non-empty/non-NULL name;
2885  *   - *fwd_kind*, kind of forward declaration, one of BTF_FWD_STRUCT,
2886  *     BTF_FWD_UNION, or BTF_FWD_ENUM;
2887  * Returns:
2888  *   - >0, type ID of newly added BTF type;
2889  *   - <0, on error.
2890  */
2891 int btf__add_fwd(struct btf *btf, const char *name, enum btf_fwd_kind fwd_kind)
2892 {
2893 	if (str_is_empty(name))
2894 		return libbpf_err(-EINVAL);
2895 
2896 	switch (fwd_kind) {
2897 	case BTF_FWD_STRUCT:
2898 	case BTF_FWD_UNION: {
2899 		struct btf_type *t;
2900 		int id;
2901 
2902 		id = btf_add_ref_kind(btf, BTF_KIND_FWD, name, 0, 0);
2903 		if (id <= 0)
2904 			return id;
2905 		t = btf_type_by_id(btf, id);
2906 		t->info = btf_type_info(BTF_KIND_FWD, 0, fwd_kind == BTF_FWD_UNION);
2907 		return id;
2908 	}
2909 	case BTF_FWD_ENUM:
2910 		/* enum forward in BTF currently is just an enum with no enum
2911 		 * values; we also assume a standard 4-byte size for it
2912 		 */
2913 		return btf__add_enum(btf, name, sizeof(int));
2914 	default:
2915 		return libbpf_err(-EINVAL);
2916 	}
2917 }
2918 
2919 /*
2920  * Append new BTF_KING_TYPEDEF type with:
2921  *   - *name*, non-empty/non-NULL name;
2922  *   - *ref_type_id* - referenced type ID, it might not exist yet;
2923  * Returns:
2924  *   - >0, type ID of newly added BTF type;
2925  *   - <0, on error.
2926  */
2927 int btf__add_typedef(struct btf *btf, const char *name, int ref_type_id)
2928 {
2929 	if (str_is_empty(name))
2930 		return libbpf_err(-EINVAL);
2931 
2932 	return btf_add_ref_kind(btf, BTF_KIND_TYPEDEF, name, ref_type_id, 0);
2933 }
2934 
2935 /*
2936  * Append new BTF_KIND_VOLATILE type with:
2937  *   - *ref_type_id* - referenced type ID, it might not exist yet;
2938  * Returns:
2939  *   - >0, type ID of newly added BTF type;
2940  *   - <0, on error.
2941  */
2942 int btf__add_volatile(struct btf *btf, int ref_type_id)
2943 {
2944 	return btf_add_ref_kind(btf, BTF_KIND_VOLATILE, NULL, ref_type_id, 0);
2945 }
2946 
2947 /*
2948  * Append new BTF_KIND_CONST type with:
2949  *   - *ref_type_id* - referenced type ID, it might not exist yet;
2950  * Returns:
2951  *   - >0, type ID of newly added BTF type;
2952  *   - <0, on error.
2953  */
2954 int btf__add_const(struct btf *btf, int ref_type_id)
2955 {
2956 	return btf_add_ref_kind(btf, BTF_KIND_CONST, NULL, ref_type_id, 0);
2957 }
2958 
2959 /*
2960  * Append new BTF_KIND_RESTRICT type with:
2961  *   - *ref_type_id* - referenced type ID, it might not exist yet;
2962  * Returns:
2963  *   - >0, type ID of newly added BTF type;
2964  *   - <0, on error.
2965  */
2966 int btf__add_restrict(struct btf *btf, int ref_type_id)
2967 {
2968 	return btf_add_ref_kind(btf, BTF_KIND_RESTRICT, NULL, ref_type_id, 0);
2969 }
2970 
2971 /*
2972  * Append new BTF_KIND_TYPE_TAG type with:
2973  *   - *value*, non-empty/non-NULL tag value;
2974  *   - *ref_type_id* - referenced type ID, it might not exist yet;
2975  * Returns:
2976  *   - >0, type ID of newly added BTF type;
2977  *   - <0, on error.
2978  */
2979 int btf__add_type_tag(struct btf *btf, const char *value, int ref_type_id)
2980 {
2981 	if (str_is_empty(value))
2982 		return libbpf_err(-EINVAL);
2983 
2984 	return btf_add_ref_kind(btf, BTF_KIND_TYPE_TAG, value, ref_type_id, 0);
2985 }
2986 
2987 /*
2988  * Append new BTF_KIND_TYPE_TAG type with:
2989  *   - *value*, non-empty/non-NULL tag value;
2990  *   - *ref_type_id* - referenced type ID, it might not exist yet;
2991  * Set info->kflag to 1, indicating this tag is an __attribute__
2992  * Returns:
2993  *   - >0, type ID of newly added BTF type;
2994  *   - <0, on error.
2995  */
2996 int btf__add_type_attr(struct btf *btf, const char *value, int ref_type_id)
2997 {
2998 	if (str_is_empty(value))
2999 		return libbpf_err(-EINVAL);
3000 
3001 	return btf_add_ref_kind(btf, BTF_KIND_TYPE_TAG, value, ref_type_id, 1);
3002 }
3003 
3004 /*
3005  * Append new BTF_KIND_FUNC type with:
3006  *   - *name*, non-empty/non-NULL name;
3007  *   - *proto_type_id* - FUNC_PROTO's type ID, it might not exist yet;
3008  * Returns:
3009  *   - >0, type ID of newly added BTF type;
3010  *   - <0, on error.
3011  */
3012 int btf__add_func(struct btf *btf, const char *name,
3013 		  enum btf_func_linkage linkage, int proto_type_id)
3014 {
3015 	int id;
3016 
3017 	if (str_is_empty(name))
3018 		return libbpf_err(-EINVAL);
3019 	if (linkage != BTF_FUNC_STATIC && linkage != BTF_FUNC_GLOBAL &&
3020 	    linkage != BTF_FUNC_EXTERN)
3021 		return libbpf_err(-EINVAL);
3022 
3023 	id = btf_add_ref_kind(btf, BTF_KIND_FUNC, name, proto_type_id, 0);
3024 	if (id > 0) {
3025 		struct btf_type *t = btf_type_by_id(btf, id);
3026 
3027 		t->info = btf_type_info(BTF_KIND_FUNC, linkage, 0);
3028 	}
3029 	return libbpf_err(id);
3030 }
3031 
3032 /*
3033  * Append new BTF_KIND_FUNC_PROTO with:
3034  *   - *ret_type_id* - type ID for return result of a function.
3035  *
3036  * Function prototype initially has no arguments, but they can be added by
3037  * btf__add_func_param() one by one, immediately after
3038  * btf__add_func_proto() succeeded.
3039  *
3040  * Returns:
3041  *   - >0, type ID of newly added BTF type;
3042  *   - <0, on error.
3043  */
3044 int btf__add_func_proto(struct btf *btf, int ret_type_id)
3045 {
3046 	struct btf_type *t;
3047 	int err;
3048 	int sz;
3049 
3050 	if (validate_type_id(ret_type_id))
3051 		return libbpf_err(-EINVAL);
3052 
3053 	err = btf_ensure_modifiable(btf);
3054 	if (err)
3055 		return libbpf_err(err);
3056 
3057 	sz = sizeof(struct btf_type);
3058 	t = btf_add_type_mem(btf, sz);
3059 	if (!t)
3060 		return libbpf_err(-ENOMEM);
3061 
3062 	/* start out with vlen=0; this will be adjusted when adding enum
3063 	 * values, if necessary
3064 	 */
3065 	t->name_off = 0;
3066 	t->info = btf_type_info(BTF_KIND_FUNC_PROTO, 0, 0);
3067 	t->type = ret_type_id;
3068 
3069 	return btf_commit_type(btf, sz);
3070 }
3071 
3072 /*
3073  * Append new function parameter for current FUNC_PROTO type with:
3074  *   - *name* - parameter name, can be NULL or empty;
3075  *   - *type_id* - type ID describing the type of the parameter.
3076  * Returns:
3077  *   -  0, on success;
3078  *   - <0, on error.
3079  */
3080 int btf__add_func_param(struct btf *btf, const char *name, int type_id)
3081 {
3082 	struct btf_type *t;
3083 	struct btf_param *p;
3084 	int sz, name_off = 0;
3085 	int err;
3086 
3087 	if (validate_type_id(type_id))
3088 		return libbpf_err(-EINVAL);
3089 
3090 	/* last type should be BTF_KIND_FUNC_PROTO */
3091 	if (btf->nr_types == 0)
3092 		return libbpf_err(-EINVAL);
3093 	t = btf_last_type(btf);
3094 	if (!btf_is_func_proto(t))
3095 		return libbpf_err(-EINVAL);
3096 
3097 	/* decompose and invalidate raw data */
3098 	err = btf_ensure_modifiable(btf);
3099 	if (err)
3100 		return libbpf_err(err);
3101 
3102 	sz = sizeof(struct btf_param);
3103 	p = btf_add_type_mem(btf, sz);
3104 	if (!p)
3105 		return libbpf_err(-ENOMEM);
3106 
3107 	if (!str_is_empty(name)) {
3108 		name_off = btf__add_str(btf, name);
3109 		if (name_off < 0)
3110 			return name_off;
3111 	}
3112 
3113 	p->name_off = name_off;
3114 	p->type = type_id;
3115 
3116 	/* update parent type's vlen */
3117 	t = btf_last_type(btf);
3118 	btf_type_inc_vlen(t);
3119 
3120 	btf_hdr_update_type_len(btf, btf->hdr.type_len + sz);
3121 	return 0;
3122 }
3123 
3124 /*
3125  * Append new BTF_KIND_VAR type with:
3126  *   - *name* - non-empty/non-NULL name;
3127  *   - *linkage* - variable linkage, one of BTF_VAR_STATIC,
3128  *     BTF_VAR_GLOBAL_ALLOCATED, or BTF_VAR_GLOBAL_EXTERN;
3129  *   - *type_id* - type ID of the type describing the type of the variable.
3130  * Returns:
3131  *   - >0, type ID of newly added BTF type;
3132  *   - <0, on error.
3133  */
3134 int btf__add_var(struct btf *btf, const char *name, int linkage, int type_id)
3135 {
3136 	struct btf_type *t;
3137 	struct btf_var *v;
3138 	int sz, name_off;
3139 	int err;
3140 
3141 	/* non-empty name */
3142 	if (str_is_empty(name))
3143 		return libbpf_err(-EINVAL);
3144 	if (linkage != BTF_VAR_STATIC && linkage != BTF_VAR_GLOBAL_ALLOCATED &&
3145 	    linkage != BTF_VAR_GLOBAL_EXTERN)
3146 		return libbpf_err(-EINVAL);
3147 	if (validate_type_id(type_id))
3148 		return libbpf_err(-EINVAL);
3149 
3150 	/* deconstruct BTF, if necessary, and invalidate raw_data */
3151 	err = btf_ensure_modifiable(btf);
3152 	if (err)
3153 		return libbpf_err(err);
3154 
3155 	sz = sizeof(struct btf_type) + sizeof(struct btf_var);
3156 	t = btf_add_type_mem(btf, sz);
3157 	if (!t)
3158 		return libbpf_err(-ENOMEM);
3159 
3160 	name_off = btf__add_str(btf, name);
3161 	if (name_off < 0)
3162 		return name_off;
3163 
3164 	t->name_off = name_off;
3165 	t->info = btf_type_info(BTF_KIND_VAR, 0, 0);
3166 	t->type = type_id;
3167 
3168 	v = btf_var(t);
3169 	v->linkage = linkage;
3170 
3171 	return btf_commit_type(btf, sz);
3172 }
3173 
3174 /*
3175  * Append new BTF_KIND_DATASEC type with:
3176  *   - *name* - non-empty/non-NULL name;
3177  *   - *byte_sz* - data section size, in bytes.
3178  *
3179  * Data section is initially empty. Variables info can be added with
3180  * btf__add_datasec_var_info() calls, after btf__add_datasec() succeeds.
3181  *
3182  * Returns:
3183  *   - >0, type ID of newly added BTF type;
3184  *   - <0, on error.
3185  */
3186 int btf__add_datasec(struct btf *btf, const char *name, __u32 byte_sz)
3187 {
3188 	struct btf_type *t;
3189 	int sz, name_off;
3190 	int err;
3191 
3192 	/* non-empty name */
3193 	if (str_is_empty(name))
3194 		return libbpf_err(-EINVAL);
3195 
3196 	err = btf_ensure_modifiable(btf);
3197 	if (err)
3198 		return libbpf_err(err);
3199 
3200 	sz = sizeof(struct btf_type);
3201 	t = btf_add_type_mem(btf, sz);
3202 	if (!t)
3203 		return libbpf_err(-ENOMEM);
3204 
3205 	name_off = btf__add_str(btf, name);
3206 	if (name_off < 0)
3207 		return name_off;
3208 
3209 	/* start with vlen=0, which will be update as var_secinfos are added */
3210 	t->name_off = name_off;
3211 	t->info = btf_type_info(BTF_KIND_DATASEC, 0, 0);
3212 	t->size = byte_sz;
3213 
3214 	return btf_commit_type(btf, sz);
3215 }
3216 
3217 /*
3218  * Append new data section variable information entry for current DATASEC type:
3219  *   - *var_type_id* - type ID, describing type of the variable;
3220  *   - *offset* - variable offset within data section, in bytes;
3221  *   - *byte_sz* - variable size, in bytes.
3222  *
3223  * Returns:
3224  *   -  0, on success;
3225  *   - <0, on error.
3226  */
3227 int btf__add_datasec_var_info(struct btf *btf, int var_type_id, __u32 offset, __u32 byte_sz)
3228 {
3229 	struct btf_type *t;
3230 	struct btf_var_secinfo *v;
3231 	int err;
3232 	int sz;
3233 
3234 	/* last type should be BTF_KIND_DATASEC */
3235 	if (btf->nr_types == 0)
3236 		return libbpf_err(-EINVAL);
3237 	t = btf_last_type(btf);
3238 	if (!btf_is_datasec(t))
3239 		return libbpf_err(-EINVAL);
3240 
3241 	if (validate_type_id(var_type_id))
3242 		return libbpf_err(-EINVAL);
3243 
3244 	/* decompose and invalidate raw data */
3245 	err = btf_ensure_modifiable(btf);
3246 	if (err)
3247 		return libbpf_err(err);
3248 
3249 	sz = sizeof(struct btf_var_secinfo);
3250 	v = btf_add_type_mem(btf, sz);
3251 	if (!v)
3252 		return libbpf_err(-ENOMEM);
3253 
3254 	v->type = var_type_id;
3255 	v->offset = offset;
3256 	v->size = byte_sz;
3257 
3258 	/* update parent type's vlen */
3259 	t = btf_last_type(btf);
3260 	btf_type_inc_vlen(t);
3261 
3262 	btf_hdr_update_type_len(btf, btf->hdr.type_len + sz);
3263 	return 0;
3264 }
3265 
3266 static int btf_add_decl_tag(struct btf *btf, const char *value, int ref_type_id,
3267 			    int component_idx, int kflag)
3268 {
3269 	struct btf_type *t;
3270 	int sz, value_off;
3271 	int err;
3272 
3273 	if (str_is_empty(value) || component_idx < -1)
3274 		return libbpf_err(-EINVAL);
3275 
3276 	if (validate_type_id(ref_type_id))
3277 		return libbpf_err(-EINVAL);
3278 
3279 	err = btf_ensure_modifiable(btf);
3280 	if (err)
3281 		return libbpf_err(err);
3282 
3283 	sz = sizeof(struct btf_type) + sizeof(struct btf_decl_tag);
3284 	t = btf_add_type_mem(btf, sz);
3285 	if (!t)
3286 		return libbpf_err(-ENOMEM);
3287 
3288 	value_off = btf__add_str(btf, value);
3289 	if (value_off < 0)
3290 		return value_off;
3291 
3292 	t->name_off = value_off;
3293 	t->info = btf_type_info(BTF_KIND_DECL_TAG, 0, kflag);
3294 	t->type = ref_type_id;
3295 	btf_decl_tag(t)->component_idx = component_idx;
3296 
3297 	return btf_commit_type(btf, sz);
3298 }
3299 
3300 /*
3301  * Append new BTF_KIND_DECL_TAG type with:
3302  *   - *value* - non-empty/non-NULL string;
3303  *   - *ref_type_id* - referenced type ID, it might not exist yet;
3304  *   - *component_idx* - -1 for tagging reference type, otherwise struct/union
3305  *     member or function argument index;
3306  * Returns:
3307  *   - >0, type ID of newly added BTF type;
3308  *   - <0, on error.
3309  */
3310 int btf__add_decl_tag(struct btf *btf, const char *value, int ref_type_id,
3311 		      int component_idx)
3312 {
3313 	return btf_add_decl_tag(btf, value, ref_type_id, component_idx, 0);
3314 }
3315 
3316 /*
3317  * Append new BTF_KIND_DECL_TAG type with:
3318  *   - *value* - non-empty/non-NULL string;
3319  *   - *ref_type_id* - referenced type ID, it might not exist yet;
3320  *   - *component_idx* - -1 for tagging reference type, otherwise struct/union
3321  *     member or function argument index;
3322  * Set info->kflag to 1, indicating this tag is an __attribute__
3323  * Returns:
3324  *   - >0, type ID of newly added BTF type;
3325  *   - <0, on error.
3326  */
3327 int btf__add_decl_attr(struct btf *btf, const char *value, int ref_type_id,
3328 		       int component_idx)
3329 {
3330 	return btf_add_decl_tag(btf, value, ref_type_id, component_idx, 1);
3331 }
3332 
3333 struct btf_ext_sec_info_param {
3334 	__u32 off;
3335 	__u32 len;
3336 	__u32 min_rec_size;
3337 	struct btf_ext_info *ext_info;
3338 	const char *desc;
3339 };
3340 
3341 /*
3342  * Parse a single info subsection of the BTF.ext info data:
3343  *  - validate subsection structure and elements
3344  *  - save info subsection start and sizing details in struct btf_ext
3345  *  - endian-independent operation, for calling before byte-swapping
3346  */
3347 static int btf_ext_parse_sec_info(struct btf_ext *btf_ext,
3348 				  struct btf_ext_sec_info_param *ext_sec,
3349 				  bool is_native)
3350 {
3351 	const struct btf_ext_info_sec *sinfo;
3352 	struct btf_ext_info *ext_info;
3353 	__u32 info_left, record_size;
3354 	size_t sec_cnt = 0;
3355 	void *info;
3356 
3357 	if (ext_sec->len == 0)
3358 		return 0;
3359 
3360 	if (ext_sec->off & 0x03) {
3361 		pr_debug(".BTF.ext %s section is not aligned to 4 bytes\n",
3362 		     ext_sec->desc);
3363 		return -EINVAL;
3364 	}
3365 
3366 	/* The start of the info sec (including the __u32 record_size). */
3367 	info = btf_ext->data + btf_ext->hdr->hdr_len + ext_sec->off;
3368 	info_left = ext_sec->len;
3369 
3370 	if (btf_ext->data + btf_ext->data_size < info + ext_sec->len) {
3371 		pr_debug("%s section (off:%u len:%u) is beyond the end of the ELF section .BTF.ext\n",
3372 			 ext_sec->desc, ext_sec->off, ext_sec->len);
3373 		return -EINVAL;
3374 	}
3375 
3376 	/* At least a record size */
3377 	if (info_left < sizeof(__u32)) {
3378 		pr_debug(".BTF.ext %s record size not found\n", ext_sec->desc);
3379 		return -EINVAL;
3380 	}
3381 
3382 	/* The record size needs to meet either the minimum standard or, when
3383 	 * handling non-native endianness data, the exact standard so as
3384 	 * to allow safe byte-swapping.
3385 	 */
3386 	record_size = is_native ? *(__u32 *)info : bswap_32(*(__u32 *)info);
3387 	if (record_size < ext_sec->min_rec_size ||
3388 	    (!is_native && record_size != ext_sec->min_rec_size) ||
3389 	    record_size & 0x03) {
3390 		pr_debug("%s section in .BTF.ext has invalid record size %u\n",
3391 			 ext_sec->desc, record_size);
3392 		return -EINVAL;
3393 	}
3394 
3395 	sinfo = info + sizeof(__u32);
3396 	info_left -= sizeof(__u32);
3397 
3398 	/* If no records, return failure now so .BTF.ext won't be used. */
3399 	if (!info_left) {
3400 		pr_debug("%s section in .BTF.ext has no records\n", ext_sec->desc);
3401 		return -EINVAL;
3402 	}
3403 
3404 	while (info_left) {
3405 		unsigned int sec_hdrlen = sizeof(struct btf_ext_info_sec);
3406 		__u64 total_record_size;
3407 		__u32 num_records;
3408 
3409 		if (info_left < sec_hdrlen) {
3410 			pr_debug("%s section header is not found in .BTF.ext\n",
3411 			     ext_sec->desc);
3412 			return -EINVAL;
3413 		}
3414 
3415 		num_records = is_native ? sinfo->num_info : bswap_32(sinfo->num_info);
3416 		if (num_records == 0) {
3417 			pr_debug("%s section has incorrect num_records in .BTF.ext\n",
3418 			     ext_sec->desc);
3419 			return -EINVAL;
3420 		}
3421 
3422 		total_record_size = sec_hdrlen + (__u64)num_records * record_size;
3423 		if (info_left < total_record_size) {
3424 			pr_debug("%s section has incorrect num_records in .BTF.ext\n",
3425 			     ext_sec->desc);
3426 			return -EINVAL;
3427 		}
3428 
3429 		info_left -= total_record_size;
3430 		sinfo = (void *)sinfo + total_record_size;
3431 		sec_cnt++;
3432 	}
3433 
3434 	ext_info = ext_sec->ext_info;
3435 	ext_info->len = ext_sec->len - sizeof(__u32);
3436 	ext_info->rec_size = record_size;
3437 	ext_info->info = info + sizeof(__u32);
3438 	ext_info->sec_cnt = sec_cnt;
3439 
3440 	return 0;
3441 }
3442 
3443 /* Parse all info secs in the BTF.ext info data */
3444 static int btf_ext_parse_info(struct btf_ext *btf_ext, bool is_native)
3445 {
3446 	struct btf_ext_sec_info_param func_info = {
3447 		.off = btf_ext->hdr->func_info_off,
3448 		.len = btf_ext->hdr->func_info_len,
3449 		.min_rec_size = sizeof(struct bpf_func_info_min),
3450 		.ext_info = &btf_ext->func_info,
3451 		.desc = "func_info"
3452 	};
3453 	struct btf_ext_sec_info_param line_info = {
3454 		.off = btf_ext->hdr->line_info_off,
3455 		.len = btf_ext->hdr->line_info_len,
3456 		.min_rec_size = sizeof(struct bpf_line_info_min),
3457 		.ext_info = &btf_ext->line_info,
3458 		.desc = "line_info",
3459 	};
3460 	struct btf_ext_sec_info_param core_relo = {
3461 		.min_rec_size = sizeof(struct bpf_core_relo),
3462 		.ext_info = &btf_ext->core_relo_info,
3463 		.desc = "core_relo",
3464 	};
3465 	int err;
3466 
3467 	err = btf_ext_parse_sec_info(btf_ext, &func_info, is_native);
3468 	if (err)
3469 		return err;
3470 
3471 	err = btf_ext_parse_sec_info(btf_ext, &line_info, is_native);
3472 	if (err)
3473 		return err;
3474 
3475 	if (btf_ext->hdr->hdr_len < offsetofend(struct btf_ext_header, core_relo_len))
3476 		return 0; /* skip core relos parsing */
3477 
3478 	core_relo.off = btf_ext->hdr->core_relo_off;
3479 	core_relo.len = btf_ext->hdr->core_relo_len;
3480 	err = btf_ext_parse_sec_info(btf_ext, &core_relo, is_native);
3481 	if (err)
3482 		return err;
3483 
3484 	return 0;
3485 }
3486 
3487 /* Swap byte-order of BTF.ext header with any endianness */
3488 static void btf_ext_bswap_hdr(struct btf_ext_header *h)
3489 {
3490 	bool is_native = h->magic == BTF_MAGIC;
3491 	__u32 hdr_len;
3492 
3493 	hdr_len = is_native ? h->hdr_len : bswap_32(h->hdr_len);
3494 
3495 	h->magic = bswap_16(h->magic);
3496 	h->hdr_len = bswap_32(h->hdr_len);
3497 	h->func_info_off = bswap_32(h->func_info_off);
3498 	h->func_info_len = bswap_32(h->func_info_len);
3499 	h->line_info_off = bswap_32(h->line_info_off);
3500 	h->line_info_len = bswap_32(h->line_info_len);
3501 
3502 	if (hdr_len < offsetofend(struct btf_ext_header, core_relo_len))
3503 		return;
3504 
3505 	h->core_relo_off = bswap_32(h->core_relo_off);
3506 	h->core_relo_len = bswap_32(h->core_relo_len);
3507 }
3508 
3509 /* Swap byte-order of generic info subsection */
3510 static void btf_ext_bswap_info_sec(void *info, __u32 len, bool is_native,
3511 				   info_rec_bswap_fn bswap_fn)
3512 {
3513 	struct btf_ext_info_sec *sec;
3514 	__u32 info_left, rec_size, *rs;
3515 
3516 	if (len == 0)
3517 		return;
3518 
3519 	rs = info;				/* info record size */
3520 	rec_size = is_native ? *rs : bswap_32(*rs);
3521 	*rs = bswap_32(*rs);
3522 
3523 	sec = info + sizeof(__u32);		/* info sec #1 */
3524 	info_left = len - sizeof(__u32);
3525 	while (info_left) {
3526 		unsigned int sec_hdrlen = sizeof(struct btf_ext_info_sec);
3527 		__u32 i, num_recs;
3528 		void *p;
3529 
3530 		num_recs = is_native ? sec->num_info : bswap_32(sec->num_info);
3531 		sec->sec_name_off = bswap_32(sec->sec_name_off);
3532 		sec->num_info = bswap_32(sec->num_info);
3533 		p = sec->data;			/* info rec #1 */
3534 		for (i = 0; i < num_recs; i++, p += rec_size)
3535 			bswap_fn(p);
3536 		sec = p;
3537 		info_left -= sec_hdrlen + (__u64)rec_size * num_recs;
3538 	}
3539 }
3540 
3541 /*
3542  * Swap byte-order of all info data in a BTF.ext section
3543  *  - requires BTF.ext hdr in native endianness
3544  */
3545 static void btf_ext_bswap_info(struct btf_ext *btf_ext, void *data)
3546 {
3547 	const bool is_native = btf_ext->swapped_endian;
3548 	const struct btf_ext_header *h = data;
3549 	void *info;
3550 
3551 	/* Swap func_info subsection byte-order */
3552 	info = data + h->hdr_len + h->func_info_off;
3553 	btf_ext_bswap_info_sec(info, h->func_info_len, is_native,
3554 			       (info_rec_bswap_fn)bpf_func_info_bswap);
3555 
3556 	/* Swap line_info subsection byte-order */
3557 	info = data + h->hdr_len + h->line_info_off;
3558 	btf_ext_bswap_info_sec(info, h->line_info_len, is_native,
3559 			       (info_rec_bswap_fn)bpf_line_info_bswap);
3560 
3561 	/* Swap core_relo subsection byte-order (if present) */
3562 	if (h->hdr_len < offsetofend(struct btf_ext_header, core_relo_len))
3563 		return;
3564 
3565 	info = data + h->hdr_len + h->core_relo_off;
3566 	btf_ext_bswap_info_sec(info, h->core_relo_len, is_native,
3567 			       (info_rec_bswap_fn)bpf_core_relo_bswap);
3568 }
3569 
3570 /* Parse hdr data and info sections: check and convert to native endianness */
3571 static int btf_ext_parse(struct btf_ext *btf_ext)
3572 {
3573 	__u32 hdr_len, data_size = btf_ext->data_size;
3574 	struct btf_ext_header *hdr = btf_ext->hdr;
3575 	bool swapped_endian = false;
3576 	int err;
3577 
3578 	if (data_size < offsetofend(struct btf_ext_header, hdr_len)) {
3579 		pr_debug("BTF.ext header too short\n");
3580 		return -EINVAL;
3581 	}
3582 
3583 	hdr_len = hdr->hdr_len;
3584 	if (hdr->magic == bswap_16(BTF_MAGIC)) {
3585 		swapped_endian = true;
3586 		hdr_len = bswap_32(hdr_len);
3587 	} else if (hdr->magic != BTF_MAGIC) {
3588 		pr_debug("Invalid BTF.ext magic:%x\n", hdr->magic);
3589 		return -EINVAL;
3590 	}
3591 
3592 	/* Ensure known version of structs, current BTF_VERSION == 1 */
3593 	if (hdr->version != 1) {
3594 		pr_debug("Unsupported BTF.ext version:%u\n", hdr->version);
3595 		return -ENOTSUP;
3596 	}
3597 
3598 	if (hdr->flags) {
3599 		pr_debug("Unsupported BTF.ext flags:%x\n", hdr->flags);
3600 		return -ENOTSUP;
3601 	}
3602 
3603 	if (data_size < hdr_len) {
3604 		pr_debug("BTF.ext header not found\n");
3605 		return -EINVAL;
3606 	} else if (data_size == hdr_len) {
3607 		pr_debug("BTF.ext has no data\n");
3608 		return -EINVAL;
3609 	}
3610 
3611 	/* Verify mandatory hdr info details present */
3612 	if (hdr_len < offsetofend(struct btf_ext_header, line_info_len)) {
3613 		pr_warn("BTF.ext header missing func_info, line_info\n");
3614 		return -EINVAL;
3615 	}
3616 
3617 	/* Keep hdr native byte-order in memory for introspection */
3618 	if (swapped_endian)
3619 		btf_ext_bswap_hdr(btf_ext->hdr);
3620 
3621 	/* Validate info subsections and cache key metadata */
3622 	err = btf_ext_parse_info(btf_ext, !swapped_endian);
3623 	if (err)
3624 		return err;
3625 
3626 	/* Keep infos native byte-order in memory for introspection */
3627 	if (swapped_endian)
3628 		btf_ext_bswap_info(btf_ext, btf_ext->data);
3629 
3630 	/*
3631 	 * Set btf_ext->swapped_endian only after all header and info data has
3632 	 * been swapped, helping bswap functions determine if their data are
3633 	 * in native byte-order when called.
3634 	 */
3635 	btf_ext->swapped_endian = swapped_endian;
3636 	return 0;
3637 }
3638 
3639 void btf_ext__free(struct btf_ext *btf_ext)
3640 {
3641 	if (IS_ERR_OR_NULL(btf_ext))
3642 		return;
3643 	free(btf_ext->func_info.sec_idxs);
3644 	free(btf_ext->line_info.sec_idxs);
3645 	free(btf_ext->core_relo_info.sec_idxs);
3646 	free(btf_ext->data);
3647 	free(btf_ext->data_swapped);
3648 	free(btf_ext);
3649 }
3650 
3651 struct btf_ext *btf_ext__new(const __u8 *data, __u32 size)
3652 {
3653 	struct btf_ext *btf_ext;
3654 	int err;
3655 
3656 	btf_ext = calloc(1, sizeof(struct btf_ext));
3657 	if (!btf_ext)
3658 		return libbpf_err_ptr(-ENOMEM);
3659 
3660 	btf_ext->data_size = size;
3661 	btf_ext->data = malloc(size);
3662 	if (!btf_ext->data) {
3663 		err = -ENOMEM;
3664 		goto done;
3665 	}
3666 	memcpy(btf_ext->data, data, size);
3667 
3668 	err = btf_ext_parse(btf_ext);
3669 
3670 done:
3671 	if (err) {
3672 		btf_ext__free(btf_ext);
3673 		return libbpf_err_ptr(err);
3674 	}
3675 
3676 	return btf_ext;
3677 }
3678 
3679 static void *btf_ext_raw_data(const struct btf_ext *btf_ext_ro, bool swap_endian)
3680 {
3681 	struct btf_ext *btf_ext = (struct btf_ext *)btf_ext_ro;
3682 	const __u32 data_sz = btf_ext->data_size;
3683 	void *data;
3684 
3685 	/* Return native data (always present) or swapped data if present */
3686 	if (!swap_endian)
3687 		return btf_ext->data;
3688 	else if (btf_ext->data_swapped)
3689 		return btf_ext->data_swapped;
3690 
3691 	/* Recreate missing swapped data, then cache and return */
3692 	data = calloc(1, data_sz);
3693 	if (!data)
3694 		return NULL;
3695 	memcpy(data, btf_ext->data, data_sz);
3696 
3697 	btf_ext_bswap_info(btf_ext, data);
3698 	btf_ext_bswap_hdr(data);
3699 	btf_ext->data_swapped = data;
3700 	return data;
3701 }
3702 
3703 const void *btf_ext__raw_data(const struct btf_ext *btf_ext, __u32 *size)
3704 {
3705 	void *data;
3706 
3707 	data = btf_ext_raw_data(btf_ext, btf_ext->swapped_endian);
3708 	if (!data)
3709 		return errno = ENOMEM, NULL;
3710 
3711 	*size = btf_ext->data_size;
3712 	return data;
3713 }
3714 
3715 __attribute__((alias("btf_ext__raw_data")))
3716 const void *btf_ext__get_raw_data(const struct btf_ext *btf_ext, __u32 *size);
3717 
3718 enum btf_endianness btf_ext__endianness(const struct btf_ext *btf_ext)
3719 {
3720 	if (is_host_big_endian())
3721 		return btf_ext->swapped_endian ? BTF_LITTLE_ENDIAN : BTF_BIG_ENDIAN;
3722 	else
3723 		return btf_ext->swapped_endian ? BTF_BIG_ENDIAN : BTF_LITTLE_ENDIAN;
3724 }
3725 
3726 int btf_ext__set_endianness(struct btf_ext *btf_ext, enum btf_endianness endian)
3727 {
3728 	if (endian != BTF_LITTLE_ENDIAN && endian != BTF_BIG_ENDIAN)
3729 		return libbpf_err(-EINVAL);
3730 
3731 	btf_ext->swapped_endian = is_host_big_endian() != (endian == BTF_BIG_ENDIAN);
3732 
3733 	if (!btf_ext->swapped_endian) {
3734 		free(btf_ext->data_swapped);
3735 		btf_ext->data_swapped = NULL;
3736 	}
3737 	return 0;
3738 }
3739 
3740 struct btf_dedup;
3741 
3742 static struct btf_dedup *btf_dedup_new(struct btf *btf, const struct btf_dedup_opts *opts);
3743 static void btf_dedup_free(struct btf_dedup *d);
3744 static int btf_dedup_prep(struct btf_dedup *d);
3745 static int btf_dedup_strings(struct btf_dedup *d);
3746 static int btf_dedup_prim_types(struct btf_dedup *d);
3747 static int btf_dedup_struct_types(struct btf_dedup *d);
3748 static int btf_dedup_ref_types(struct btf_dedup *d);
3749 static int btf_dedup_resolve_fwds(struct btf_dedup *d);
3750 static int btf_dedup_compact_types(struct btf_dedup *d);
3751 static int btf_dedup_remap_types(struct btf_dedup *d);
3752 
3753 /*
3754  * Deduplicate BTF types and strings.
3755  *
3756  * BTF dedup algorithm takes as an input `struct btf` representing `.BTF` ELF
3757  * section with all BTF type descriptors and string data. It overwrites that
3758  * memory in-place with deduplicated types and strings without any loss of
3759  * information. If optional `struct btf_ext` representing '.BTF.ext' ELF section
3760  * is provided, all the strings referenced from .BTF.ext section are honored
3761  * and updated to point to the right offsets after deduplication.
3762  *
3763  * If function returns with error, type/string data might be garbled and should
3764  * be discarded.
3765  *
3766  * More verbose and detailed description of both problem btf_dedup is solving,
3767  * as well as solution could be found at:
3768  * https://facebookmicrosites.github.io/bpf/blog/2018/11/14/btf-enhancement.html
3769  *
3770  * Problem description and justification
3771  * =====================================
3772  *
3773  * BTF type information is typically emitted either as a result of conversion
3774  * from DWARF to BTF or directly by compiler. In both cases, each compilation
3775  * unit contains information about a subset of all the types that are used
3776  * in an application. These subsets are frequently overlapping and contain a lot
3777  * of duplicated information when later concatenated together into a single
3778  * binary. This algorithm ensures that each unique type is represented by single
3779  * BTF type descriptor, greatly reducing resulting size of BTF data.
3780  *
3781  * Compilation unit isolation and subsequent duplication of data is not the only
3782  * problem. The same type hierarchy (e.g., struct and all the type that struct
3783  * references) in different compilation units can be represented in BTF to
3784  * various degrees of completeness (or, rather, incompleteness) due to
3785  * struct/union forward declarations.
3786  *
3787  * Let's take a look at an example, that we'll use to better understand the
3788  * problem (and solution). Suppose we have two compilation units, each using
3789  * same `struct S`, but each of them having incomplete type information about
3790  * struct's fields:
3791  *
3792  * // CU #1:
3793  * struct S;
3794  * struct A {
3795  *	int a;
3796  *	struct A* self;
3797  *	struct S* parent;
3798  * };
3799  * struct B;
3800  * struct S {
3801  *	struct A* a_ptr;
3802  *	struct B* b_ptr;
3803  * };
3804  *
3805  * // CU #2:
3806  * struct S;
3807  * struct A;
3808  * struct B {
3809  *	int b;
3810  *	struct B* self;
3811  *	struct S* parent;
3812  * };
3813  * struct S {
3814  *	struct A* a_ptr;
3815  *	struct B* b_ptr;
3816  * };
3817  *
3818  * In case of CU #1, BTF data will know only that `struct B` exist (but no
3819  * more), but will know the complete type information about `struct A`. While
3820  * for CU #2, it will know full type information about `struct B`, but will
3821  * only know about forward declaration of `struct A` (in BTF terms, it will
3822  * have `BTF_KIND_FWD` type descriptor with name `B`).
3823  *
3824  * This compilation unit isolation means that it's possible that there is no
3825  * single CU with complete type information describing structs `S`, `A`, and
3826  * `B`. Also, we might get tons of duplicated and redundant type information.
3827  *
3828  * Additional complication we need to keep in mind comes from the fact that
3829  * types, in general, can form graphs containing cycles, not just DAGs.
3830  *
3831  * While algorithm does deduplication, it also merges and resolves type
3832  * information (unless disabled throught `struct btf_opts`), whenever possible.
3833  * E.g., in the example above with two compilation units having partial type
3834  * information for structs `A` and `B`, the output of algorithm will emit
3835  * a single copy of each BTF type that describes structs `A`, `B`, and `S`
3836  * (as well as type information for `int` and pointers), as if they were defined
3837  * in a single compilation unit as:
3838  *
3839  * struct A {
3840  *	int a;
3841  *	struct A* self;
3842  *	struct S* parent;
3843  * };
3844  * struct B {
3845  *	int b;
3846  *	struct B* self;
3847  *	struct S* parent;
3848  * };
3849  * struct S {
3850  *	struct A* a_ptr;
3851  *	struct B* b_ptr;
3852  * };
3853  *
3854  * Algorithm summary
3855  * =================
3856  *
3857  * Algorithm completes its work in 7 separate passes:
3858  *
3859  * 1. Strings deduplication.
3860  * 2. Primitive types deduplication (int, enum, fwd).
3861  * 3. Struct/union types deduplication.
3862  * 4. Resolve unambiguous forward declarations.
3863  * 5. Reference types deduplication (pointers, typedefs, arrays, funcs, func
3864  *    protos, and const/volatile/restrict modifiers).
3865  * 6. Types compaction.
3866  * 7. Types remapping.
3867  *
3868  * Algorithm determines canonical type descriptor, which is a single
3869  * representative type for each truly unique type. This canonical type is the
3870  * one that will go into final deduplicated BTF type information. For
3871  * struct/unions, it is also the type that algorithm will merge additional type
3872  * information into (while resolving FWDs), as it discovers it from data in
3873  * other CUs. Each input BTF type eventually gets either mapped to itself, if
3874  * that type is canonical, or to some other type, if that type is equivalent
3875  * and was chosen as canonical representative. This mapping is stored in
3876  * `btf_dedup->map` array. This map is also used to record STRUCT/UNION that
3877  * FWD type got resolved to.
3878  *
3879  * To facilitate fast discovery of canonical types, we also maintain canonical
3880  * index (`btf_dedup->dedup_table`), which maps type descriptor's signature hash
3881  * (i.e., hashed kind, name, size, fields, etc) into a list of canonical types
3882  * that match that signature. With sufficiently good choice of type signature
3883  * hashing function, we can limit number of canonical types for each unique type
3884  * signature to a very small number, allowing to find canonical type for any
3885  * duplicated type very quickly.
3886  *
3887  * Struct/union deduplication is the most critical part and algorithm for
3888  * deduplicating structs/unions is described in greater details in comments for
3889  * `btf_dedup_is_equiv` function.
3890  */
3891 int btf__dedup(struct btf *btf, const struct btf_dedup_opts *opts)
3892 {
3893 	struct btf_dedup *d;
3894 	int err;
3895 
3896 	if (!OPTS_VALID(opts, btf_dedup_opts))
3897 		return libbpf_err(-EINVAL);
3898 
3899 	d = btf_dedup_new(btf, opts);
3900 	if (IS_ERR(d)) {
3901 		pr_debug("btf_dedup_new failed: %ld\n", PTR_ERR(d));
3902 		return libbpf_err(-EINVAL);
3903 	}
3904 
3905 	err = btf_ensure_modifiable(btf);
3906 	if (err)
3907 		goto done;
3908 
3909 	err = btf_dedup_prep(d);
3910 	if (err) {
3911 		pr_debug("btf_dedup_prep failed: %s\n", errstr(err));
3912 		goto done;
3913 	}
3914 	err = btf_dedup_strings(d);
3915 	if (err < 0) {
3916 		pr_debug("btf_dedup_strings failed: %s\n", errstr(err));
3917 		goto done;
3918 	}
3919 	err = btf_dedup_prim_types(d);
3920 	if (err < 0) {
3921 		pr_debug("btf_dedup_prim_types failed: %s\n", errstr(err));
3922 		goto done;
3923 	}
3924 	err = btf_dedup_struct_types(d);
3925 	if (err < 0) {
3926 		pr_debug("btf_dedup_struct_types failed: %s\n", errstr(err));
3927 		goto done;
3928 	}
3929 	err = btf_dedup_resolve_fwds(d);
3930 	if (err < 0) {
3931 		pr_debug("btf_dedup_resolve_fwds failed: %s\n", errstr(err));
3932 		goto done;
3933 	}
3934 	err = btf_dedup_ref_types(d);
3935 	if (err < 0) {
3936 		pr_debug("btf_dedup_ref_types failed: %s\n", errstr(err));
3937 		goto done;
3938 	}
3939 	err = btf_dedup_compact_types(d);
3940 	if (err < 0) {
3941 		pr_debug("btf_dedup_compact_types failed: %s\n", errstr(err));
3942 		goto done;
3943 	}
3944 	err = btf_dedup_remap_types(d);
3945 	if (err < 0) {
3946 		pr_debug("btf_dedup_remap_types failed: %s\n", errstr(err));
3947 		goto done;
3948 	}
3949 
3950 done:
3951 	btf_dedup_free(d);
3952 	return libbpf_err(err);
3953 }
3954 
3955 #define BTF_UNPROCESSED_ID ((__u32)-1)
3956 #define BTF_IN_PROGRESS_ID ((__u32)-2)
3957 
3958 struct btf_dedup {
3959 	/* .BTF section to be deduped in-place */
3960 	struct btf *btf;
3961 	/*
3962 	 * Optional .BTF.ext section. When provided, any strings referenced
3963 	 * from it will be taken into account when deduping strings
3964 	 */
3965 	struct btf_ext *btf_ext;
3966 	/*
3967 	 * This is a map from any type's signature hash to a list of possible
3968 	 * canonical representative type candidates. Hash collisions are
3969 	 * ignored, so even types of various kinds can share same list of
3970 	 * candidates, which is fine because we rely on subsequent
3971 	 * btf_xxx_equal() checks to authoritatively verify type equality.
3972 	 */
3973 	struct hashmap *dedup_table;
3974 	/* Canonical types map */
3975 	__u32 *map;
3976 	/* Hypothetical mapping, used during type graph equivalence checks */
3977 	__u32 *hypot_map;
3978 	__u32 *hypot_list;
3979 	size_t hypot_cnt;
3980 	size_t hypot_cap;
3981 	/* Whether hypothetical mapping, if successful, would need to adjust
3982 	 * already canonicalized types (due to a new forward declaration to
3983 	 * concrete type resolution). In such case, during split BTF dedup
3984 	 * candidate type would still be considered as different, because base
3985 	 * BTF is considered to be immutable.
3986 	 */
3987 	bool hypot_adjust_canon;
3988 	/* Various option modifying behavior of algorithm */
3989 	struct btf_dedup_opts opts;
3990 	/* temporary strings deduplication state */
3991 	struct strset *strs_set;
3992 };
3993 
3994 static unsigned long hash_combine(unsigned long h, unsigned long value)
3995 {
3996 	return h * 31 + value;
3997 }
3998 
3999 #define for_each_dedup_cand(d, node, hash) \
4000 	hashmap__for_each_key_entry(d->dedup_table, node, hash)
4001 
4002 static int btf_dedup_table_add(struct btf_dedup *d, long hash, __u32 type_id)
4003 {
4004 	return hashmap__append(d->dedup_table, hash, type_id);
4005 }
4006 
4007 static int btf_dedup_hypot_map_add(struct btf_dedup *d,
4008 				   __u32 from_id, __u32 to_id)
4009 {
4010 	if (d->hypot_cnt == d->hypot_cap) {
4011 		__u32 *new_list;
4012 
4013 		d->hypot_cap += max((size_t)16, d->hypot_cap / 2);
4014 		new_list = libbpf_reallocarray(d->hypot_list, d->hypot_cap, sizeof(__u32));
4015 		if (!new_list)
4016 			return -ENOMEM;
4017 		d->hypot_list = new_list;
4018 	}
4019 	d->hypot_list[d->hypot_cnt++] = from_id;
4020 	d->hypot_map[from_id] = to_id;
4021 	return 0;
4022 }
4023 
4024 static void btf_dedup_clear_hypot_map(struct btf_dedup *d)
4025 {
4026 	int i;
4027 
4028 	for (i = 0; i < d->hypot_cnt; i++)
4029 		d->hypot_map[d->hypot_list[i]] = BTF_UNPROCESSED_ID;
4030 	d->hypot_cnt = 0;
4031 	d->hypot_adjust_canon = false;
4032 }
4033 
4034 static void btf_dedup_free(struct btf_dedup *d)
4035 {
4036 	hashmap__free(d->dedup_table);
4037 	d->dedup_table = NULL;
4038 
4039 	free(d->map);
4040 	d->map = NULL;
4041 
4042 	free(d->hypot_map);
4043 	d->hypot_map = NULL;
4044 
4045 	free(d->hypot_list);
4046 	d->hypot_list = NULL;
4047 
4048 	free(d);
4049 }
4050 
4051 static size_t btf_dedup_identity_hash_fn(long key, void *ctx)
4052 {
4053 	return key;
4054 }
4055 
4056 static size_t btf_dedup_collision_hash_fn(long key, void *ctx)
4057 {
4058 	return 0;
4059 }
4060 
4061 static bool btf_dedup_equal_fn(long k1, long k2, void *ctx)
4062 {
4063 	return k1 == k2;
4064 }
4065 
4066 static struct btf_dedup *btf_dedup_new(struct btf *btf, const struct btf_dedup_opts *opts)
4067 {
4068 	struct btf_dedup *d = calloc(1, sizeof(struct btf_dedup));
4069 	hashmap_hash_fn hash_fn = btf_dedup_identity_hash_fn;
4070 	int i, err = 0, type_cnt;
4071 
4072 	if (!d)
4073 		return ERR_PTR(-ENOMEM);
4074 
4075 	if (OPTS_GET(opts, force_collisions, false))
4076 		hash_fn = btf_dedup_collision_hash_fn;
4077 
4078 	d->btf = btf;
4079 	d->btf_ext = OPTS_GET(opts, btf_ext, NULL);
4080 
4081 	d->dedup_table = hashmap__new(hash_fn, btf_dedup_equal_fn, NULL);
4082 	if (IS_ERR(d->dedup_table)) {
4083 		err = PTR_ERR(d->dedup_table);
4084 		d->dedup_table = NULL;
4085 		goto done;
4086 	}
4087 
4088 	type_cnt = btf__type_cnt(btf);
4089 	d->map = malloc(sizeof(__u32) * type_cnt);
4090 	if (!d->map) {
4091 		err = -ENOMEM;
4092 		goto done;
4093 	}
4094 	/* special BTF "void" type is made canonical immediately */
4095 	d->map[0] = 0;
4096 	for (i = 1; i < type_cnt; i++) {
4097 		struct btf_type *t = btf_type_by_id(d->btf, i);
4098 
4099 		/* VAR and DATASEC are never deduped and are self-canonical */
4100 		if (btf_is_var(t) || btf_is_datasec(t))
4101 			d->map[i] = i;
4102 		else
4103 			d->map[i] = BTF_UNPROCESSED_ID;
4104 	}
4105 
4106 	d->hypot_map = malloc(sizeof(__u32) * type_cnt);
4107 	if (!d->hypot_map) {
4108 		err = -ENOMEM;
4109 		goto done;
4110 	}
4111 	for (i = 0; i < type_cnt; i++)
4112 		d->hypot_map[i] = BTF_UNPROCESSED_ID;
4113 
4114 done:
4115 	if (err) {
4116 		btf_dedup_free(d);
4117 		return ERR_PTR(err);
4118 	}
4119 
4120 	return d;
4121 }
4122 
4123 /*
4124  * Iterate over all possible places in .BTF and .BTF.ext that can reference
4125  * string and pass pointer to it to a provided callback `fn`.
4126  */
4127 static int btf_for_each_str_off(struct btf_dedup *d, str_off_visit_fn fn, void *ctx)
4128 {
4129 	int i, r;
4130 
4131 	for (i = 0; i < d->btf->nr_types; i++) {
4132 		struct btf_field_iter it;
4133 		struct btf_type *t = btf_type_by_id(d->btf, d->btf->start_id + i);
4134 		__u32 *str_off;
4135 
4136 		r = btf_field_iter_init(&it, t, BTF_FIELD_ITER_STRS);
4137 		if (r)
4138 			return r;
4139 
4140 		while ((str_off = btf_field_iter_next(&it))) {
4141 			r = fn(str_off, ctx);
4142 			if (r)
4143 				return r;
4144 		}
4145 	}
4146 
4147 	if (!d->btf_ext)
4148 		return 0;
4149 
4150 	r = btf_ext_visit_str_offs(d->btf_ext, fn, ctx);
4151 	if (r)
4152 		return r;
4153 
4154 	return 0;
4155 }
4156 
4157 static int strs_dedup_remap_str_off(__u32 *str_off_ptr, void *ctx)
4158 {
4159 	struct btf_dedup *d = ctx;
4160 	__u32 str_off = *str_off_ptr;
4161 	const char *s;
4162 	int off, err;
4163 
4164 	/* don't touch empty string or string in main BTF */
4165 	if (str_off == 0 || str_off < d->btf->start_str_off)
4166 		return 0;
4167 
4168 	s = btf__str_by_offset(d->btf, str_off);
4169 	if (d->btf->base_btf) {
4170 		err = btf__find_str(d->btf->base_btf, s);
4171 		if (err >= 0) {
4172 			*str_off_ptr = err;
4173 			return 0;
4174 		}
4175 		if (err != -ENOENT)
4176 			return err;
4177 	}
4178 
4179 	off = strset__add_str(d->strs_set, s);
4180 	if (off < 0)
4181 		return off;
4182 
4183 	*str_off_ptr = d->btf->start_str_off + off;
4184 	return 0;
4185 }
4186 
4187 /*
4188  * Dedup string and filter out those that are not referenced from either .BTF
4189  * or .BTF.ext (if provided) sections.
4190  *
4191  * This is done by building index of all strings in BTF's string section,
4192  * then iterating over all entities that can reference strings (e.g., type
4193  * names, struct field names, .BTF.ext line info, etc) and marking corresponding
4194  * strings as used. After that all used strings are deduped and compacted into
4195  * sequential blob of memory and new offsets are calculated. Then all the string
4196  * references are iterated again and rewritten using new offsets.
4197  */
4198 static int btf_dedup_strings(struct btf_dedup *d)
4199 {
4200 	int err;
4201 
4202 	if (d->btf->strs_deduped)
4203 		return 0;
4204 
4205 	d->strs_set = strset__new(BTF_MAX_STR_OFFSET, NULL, 0);
4206 	if (IS_ERR(d->strs_set)) {
4207 		err = PTR_ERR(d->strs_set);
4208 		goto err_out;
4209 	}
4210 
4211 	if (!d->btf->base_btf) {
4212 		/* insert empty string; we won't be looking it up during strings
4213 		 * dedup, but it's good to have it for generic BTF string lookups
4214 		 */
4215 		err = strset__add_str(d->strs_set, "");
4216 		if (err < 0)
4217 			goto err_out;
4218 	}
4219 
4220 	/* remap string offsets */
4221 	err = btf_for_each_str_off(d, strs_dedup_remap_str_off, d);
4222 	if (err)
4223 		goto err_out;
4224 
4225 	/* replace BTF string data and hash with deduped ones */
4226 	strset__free(d->btf->strs_set);
4227 	btf_hdr_update_str_len(d->btf, strset__data_size(d->strs_set));
4228 	d->btf->strs_set = d->strs_set;
4229 	d->strs_set = NULL;
4230 	d->btf->strs_deduped = true;
4231 	return 0;
4232 
4233 err_out:
4234 	strset__free(d->strs_set);
4235 	d->strs_set = NULL;
4236 
4237 	return err;
4238 }
4239 
4240 /*
4241  * Calculate type signature hash of TYPEDEF, ignoring referenced type IDs,
4242  * as referenced type IDs equivalence is established separately during type
4243  * graph equivalence check algorithm.
4244  */
4245 static long btf_hash_typedef(struct btf_type *t)
4246 {
4247 	long h;
4248 
4249 	h = hash_combine(0, t->name_off);
4250 	h = hash_combine(h, t->info);
4251 	return h;
4252 }
4253 
4254 static long btf_hash_common(struct btf_type *t)
4255 {
4256 	long h;
4257 
4258 	h = hash_combine(0, t->name_off);
4259 	h = hash_combine(h, t->info);
4260 	h = hash_combine(h, t->size);
4261 	return h;
4262 }
4263 
4264 static bool btf_equal_common(struct btf_type *t1, struct btf_type *t2)
4265 {
4266 	return t1->name_off == t2->name_off &&
4267 	       t1->info == t2->info &&
4268 	       t1->size == t2->size;
4269 }
4270 
4271 /* Check structural compatibility of two TYPEDEF. */
4272 static bool btf_equal_typedef(struct btf_type *t1, struct btf_type *t2)
4273 {
4274 	return t1->name_off == t2->name_off &&
4275 	       t1->info == t2->info;
4276 }
4277 
4278 /* Calculate type signature hash of INT or TAG. */
4279 static long btf_hash_int_decl_tag(struct btf_type *t)
4280 {
4281 	__u32 info = *(__u32 *)(t + 1);
4282 	long h;
4283 
4284 	h = btf_hash_common(t);
4285 	h = hash_combine(h, info);
4286 	return h;
4287 }
4288 
4289 /* Check structural equality of two INTs or TAGs. */
4290 static bool btf_equal_int_tag(struct btf_type *t1, struct btf_type *t2)
4291 {
4292 	__u32 info1, info2;
4293 
4294 	if (!btf_equal_common(t1, t2))
4295 		return false;
4296 	info1 = *(__u32 *)(t1 + 1);
4297 	info2 = *(__u32 *)(t2 + 1);
4298 	return info1 == info2;
4299 }
4300 
4301 /* Calculate type signature hash of ENUM/ENUM64. */
4302 static long btf_hash_enum(struct btf_type *t)
4303 {
4304 	long h;
4305 
4306 	/* don't hash vlen, enum members and size to support enum fwd resolving */
4307 	h = hash_combine(0, t->name_off);
4308 	return h;
4309 }
4310 
4311 static bool btf_equal_enum_members(struct btf_type *t1, struct btf_type *t2)
4312 {
4313 	const struct btf_enum *m1, *m2;
4314 	__u16 vlen;
4315 	int i;
4316 
4317 	vlen = btf_vlen(t1);
4318 	m1 = btf_enum(t1);
4319 	m2 = btf_enum(t2);
4320 	for (i = 0; i < vlen; i++) {
4321 		if (m1->name_off != m2->name_off || m1->val != m2->val)
4322 			return false;
4323 		m1++;
4324 		m2++;
4325 	}
4326 	return true;
4327 }
4328 
4329 static bool btf_equal_enum64_members(struct btf_type *t1, struct btf_type *t2)
4330 {
4331 	const struct btf_enum64 *m1, *m2;
4332 	__u16 vlen;
4333 	int i;
4334 
4335 	vlen = btf_vlen(t1);
4336 	m1 = btf_enum64(t1);
4337 	m2 = btf_enum64(t2);
4338 	for (i = 0; i < vlen; i++) {
4339 		if (m1->name_off != m2->name_off || m1->val_lo32 != m2->val_lo32 ||
4340 		    m1->val_hi32 != m2->val_hi32)
4341 			return false;
4342 		m1++;
4343 		m2++;
4344 	}
4345 	return true;
4346 }
4347 
4348 /* Check structural equality of two ENUMs or ENUM64s. */
4349 static bool btf_equal_enum(struct btf_type *t1, struct btf_type *t2)
4350 {
4351 	if (!btf_equal_common(t1, t2))
4352 		return false;
4353 
4354 	/* t1 & t2 kinds are identical because of btf_equal_common */
4355 	if (btf_kind(t1) == BTF_KIND_ENUM)
4356 		return btf_equal_enum_members(t1, t2);
4357 	else
4358 		return btf_equal_enum64_members(t1, t2);
4359 }
4360 
4361 static inline bool btf_is_enum_fwd(struct btf_type *t)
4362 {
4363 	return btf_is_any_enum(t) && btf_vlen(t) == 0;
4364 }
4365 
4366 static bool btf_compat_enum(struct btf_type *t1, struct btf_type *t2)
4367 {
4368 	if (!btf_is_enum_fwd(t1) && !btf_is_enum_fwd(t2))
4369 		return btf_equal_enum(t1, t2);
4370 	/* At this point either t1 or t2 or both are forward declarations, thus:
4371 	 * - skip comparing vlen because it is zero for forward declarations;
4372 	 * - skip comparing size to allow enum forward declarations
4373 	 *   to be compatible with enum64 full declarations;
4374 	 * - skip comparing kind for the same reason.
4375 	 */
4376 	return t1->name_off == t2->name_off &&
4377 	       btf_is_any_enum(t1) && btf_is_any_enum(t2);
4378 }
4379 
4380 /*
4381  * Calculate type signature hash of STRUCT/UNION, ignoring referenced type IDs,
4382  * as referenced type IDs equivalence is established separately during type
4383  * graph equivalence check algorithm.
4384  */
4385 static long btf_hash_struct(struct btf_type *t)
4386 {
4387 	const struct btf_member *member = btf_members(t);
4388 	__u32 vlen = btf_vlen(t);
4389 	long h = btf_hash_common(t);
4390 	int i;
4391 
4392 	for (i = 0; i < vlen; i++) {
4393 		h = hash_combine(h, member->name_off);
4394 		h = hash_combine(h, member->offset);
4395 		/* no hashing of referenced type ID, it can be unresolved yet */
4396 		member++;
4397 	}
4398 	return h;
4399 }
4400 
4401 /*
4402  * Check structural compatibility of two STRUCTs/UNIONs, ignoring referenced
4403  * type IDs. This check is performed during type graph equivalence check and
4404  * referenced types equivalence is checked separately.
4405  */
4406 static bool btf_shallow_equal_struct(struct btf_type *t1, struct btf_type *t2)
4407 {
4408 	const struct btf_member *m1, *m2;
4409 	__u16 vlen;
4410 	int i;
4411 
4412 	if (!btf_equal_common(t1, t2))
4413 		return false;
4414 
4415 	vlen = btf_vlen(t1);
4416 	m1 = btf_members(t1);
4417 	m2 = btf_members(t2);
4418 	for (i = 0; i < vlen; i++) {
4419 		if (m1->name_off != m2->name_off || m1->offset != m2->offset)
4420 			return false;
4421 		m1++;
4422 		m2++;
4423 	}
4424 	return true;
4425 }
4426 
4427 /*
4428  * Calculate type signature hash of ARRAY, including referenced type IDs,
4429  * under assumption that they were already resolved to canonical type IDs and
4430  * are not going to change.
4431  */
4432 static long btf_hash_array(struct btf_type *t)
4433 {
4434 	const struct btf_array *info = btf_array(t);
4435 	long h = btf_hash_common(t);
4436 
4437 	h = hash_combine(h, info->type);
4438 	h = hash_combine(h, info->index_type);
4439 	h = hash_combine(h, info->nelems);
4440 	return h;
4441 }
4442 
4443 /*
4444  * Check exact equality of two ARRAYs, taking into account referenced
4445  * type IDs, under assumption that they were already resolved to canonical
4446  * type IDs and are not going to change.
4447  * This function is called during reference types deduplication to compare
4448  * ARRAY to potential canonical representative.
4449  */
4450 static bool btf_equal_array(struct btf_type *t1, struct btf_type *t2)
4451 {
4452 	const struct btf_array *info1, *info2;
4453 
4454 	if (!btf_equal_common(t1, t2))
4455 		return false;
4456 
4457 	info1 = btf_array(t1);
4458 	info2 = btf_array(t2);
4459 	return info1->type == info2->type &&
4460 	       info1->index_type == info2->index_type &&
4461 	       info1->nelems == info2->nelems;
4462 }
4463 
4464 /*
4465  * Check structural compatibility of two ARRAYs, ignoring referenced type
4466  * IDs. This check is performed during type graph equivalence check and
4467  * referenced types equivalence is checked separately.
4468  */
4469 static bool btf_compat_array(struct btf_type *t1, struct btf_type *t2)
4470 {
4471 	if (!btf_equal_common(t1, t2))
4472 		return false;
4473 
4474 	return btf_array(t1)->nelems == btf_array(t2)->nelems;
4475 }
4476 
4477 /*
4478  * Calculate type signature hash of FUNC_PROTO, including referenced type IDs,
4479  * under assumption that they were already resolved to canonical type IDs and
4480  * are not going to change.
4481  */
4482 static long btf_hash_fnproto(struct btf_type *t)
4483 {
4484 	const struct btf_param *member = btf_params(t);
4485 	__u16 vlen = btf_vlen(t);
4486 	long h = btf_hash_common(t);
4487 	int i;
4488 
4489 	for (i = 0; i < vlen; i++) {
4490 		h = hash_combine(h, member->name_off);
4491 		h = hash_combine(h, member->type);
4492 		member++;
4493 	}
4494 	return h;
4495 }
4496 
4497 /*
4498  * Check exact equality of two FUNC_PROTOs, taking into account referenced
4499  * type IDs, under assumption that they were already resolved to canonical
4500  * type IDs and are not going to change.
4501  * This function is called during reference types deduplication to compare
4502  * FUNC_PROTO to potential canonical representative.
4503  */
4504 static bool btf_equal_fnproto(struct btf_type *t1, struct btf_type *t2)
4505 {
4506 	const struct btf_param *m1, *m2;
4507 	__u16 vlen;
4508 	int i;
4509 
4510 	if (!btf_equal_common(t1, t2))
4511 		return false;
4512 
4513 	vlen = btf_vlen(t1);
4514 	m1 = btf_params(t1);
4515 	m2 = btf_params(t2);
4516 	for (i = 0; i < vlen; i++) {
4517 		if (m1->name_off != m2->name_off || m1->type != m2->type)
4518 			return false;
4519 		m1++;
4520 		m2++;
4521 	}
4522 	return true;
4523 }
4524 
4525 /*
4526  * Check structural compatibility of two FUNC_PROTOs, ignoring referenced type
4527  * IDs. This check is performed during type graph equivalence check and
4528  * referenced types equivalence is checked separately.
4529  */
4530 static bool btf_compat_fnproto(struct btf_type *t1, struct btf_type *t2)
4531 {
4532 	const struct btf_param *m1, *m2;
4533 	__u16 vlen;
4534 	int i;
4535 
4536 	/* skip return type ID */
4537 	if (t1->name_off != t2->name_off || t1->info != t2->info)
4538 		return false;
4539 
4540 	vlen = btf_vlen(t1);
4541 	m1 = btf_params(t1);
4542 	m2 = btf_params(t2);
4543 	for (i = 0; i < vlen; i++) {
4544 		if (m1->name_off != m2->name_off)
4545 			return false;
4546 		m1++;
4547 		m2++;
4548 	}
4549 	return true;
4550 }
4551 
4552 /* Prepare split BTF for deduplication by calculating hashes of base BTF's
4553  * types and initializing the rest of the state (canonical type mapping) for
4554  * the fixed base BTF part.
4555  */
4556 static int btf_dedup_prep(struct btf_dedup *d)
4557 {
4558 	struct btf_type *t;
4559 	int type_id;
4560 	long h;
4561 
4562 	if (!d->btf->base_btf)
4563 		return 0;
4564 
4565 	for (type_id = 1; type_id < d->btf->start_id; type_id++) {
4566 		t = btf_type_by_id(d->btf, type_id);
4567 
4568 		/* all base BTF types are self-canonical by definition */
4569 		d->map[type_id] = type_id;
4570 
4571 		switch (btf_kind(t)) {
4572 		case BTF_KIND_VAR:
4573 		case BTF_KIND_DATASEC:
4574 			/* VAR and DATASEC are never hash/deduplicated */
4575 			continue;
4576 		case BTF_KIND_CONST:
4577 		case BTF_KIND_VOLATILE:
4578 		case BTF_KIND_RESTRICT:
4579 		case BTF_KIND_PTR:
4580 		case BTF_KIND_FWD:
4581 		case BTF_KIND_TYPEDEF:
4582 		case BTF_KIND_FUNC:
4583 		case BTF_KIND_FLOAT:
4584 		case BTF_KIND_TYPE_TAG:
4585 			h = btf_hash_common(t);
4586 			break;
4587 		case BTF_KIND_INT:
4588 		case BTF_KIND_DECL_TAG:
4589 			h = btf_hash_int_decl_tag(t);
4590 			break;
4591 		case BTF_KIND_ENUM:
4592 		case BTF_KIND_ENUM64:
4593 			h = btf_hash_enum(t);
4594 			break;
4595 		case BTF_KIND_STRUCT:
4596 		case BTF_KIND_UNION:
4597 			h = btf_hash_struct(t);
4598 			break;
4599 		case BTF_KIND_ARRAY:
4600 			h = btf_hash_array(t);
4601 			break;
4602 		case BTF_KIND_FUNC_PROTO:
4603 			h = btf_hash_fnproto(t);
4604 			break;
4605 		default:
4606 			pr_debug("unknown kind %d for type [%d]\n", btf_kind(t), type_id);
4607 			return -EINVAL;
4608 		}
4609 		if (btf_dedup_table_add(d, h, type_id))
4610 			return -ENOMEM;
4611 	}
4612 
4613 	return 0;
4614 }
4615 
4616 /*
4617  * Deduplicate primitive types, that can't reference other types, by calculating
4618  * their type signature hash and comparing them with any possible canonical
4619  * candidate. If no canonical candidate matches, type itself is marked as
4620  * canonical and is added into `btf_dedup->dedup_table` as another candidate.
4621  */
4622 static int btf_dedup_prim_type(struct btf_dedup *d, __u32 type_id)
4623 {
4624 	struct btf_type *t = btf_type_by_id(d->btf, type_id);
4625 	struct hashmap_entry *hash_entry;
4626 	struct btf_type *cand;
4627 	/* if we don't find equivalent type, then we are canonical */
4628 	__u32 new_id = type_id;
4629 	__u32 cand_id;
4630 	long h;
4631 
4632 	switch (btf_kind(t)) {
4633 	case BTF_KIND_CONST:
4634 	case BTF_KIND_VOLATILE:
4635 	case BTF_KIND_RESTRICT:
4636 	case BTF_KIND_PTR:
4637 	case BTF_KIND_TYPEDEF:
4638 	case BTF_KIND_ARRAY:
4639 	case BTF_KIND_STRUCT:
4640 	case BTF_KIND_UNION:
4641 	case BTF_KIND_FUNC:
4642 	case BTF_KIND_FUNC_PROTO:
4643 	case BTF_KIND_VAR:
4644 	case BTF_KIND_DATASEC:
4645 	case BTF_KIND_DECL_TAG:
4646 	case BTF_KIND_TYPE_TAG:
4647 		return 0;
4648 
4649 	case BTF_KIND_INT:
4650 		h = btf_hash_int_decl_tag(t);
4651 		for_each_dedup_cand(d, hash_entry, h) {
4652 			cand_id = hash_entry->value;
4653 			cand = btf_type_by_id(d->btf, cand_id);
4654 			if (btf_equal_int_tag(t, cand)) {
4655 				new_id = cand_id;
4656 				break;
4657 			}
4658 		}
4659 		break;
4660 
4661 	case BTF_KIND_ENUM:
4662 	case BTF_KIND_ENUM64:
4663 		h = btf_hash_enum(t);
4664 		for_each_dedup_cand(d, hash_entry, h) {
4665 			cand_id = hash_entry->value;
4666 			cand = btf_type_by_id(d->btf, cand_id);
4667 			if (btf_equal_enum(t, cand)) {
4668 				new_id = cand_id;
4669 				break;
4670 			}
4671 			if (btf_compat_enum(t, cand)) {
4672 				if (btf_is_enum_fwd(t)) {
4673 					/* resolve fwd to full enum */
4674 					new_id = cand_id;
4675 					break;
4676 				}
4677 				/* resolve canonical enum fwd to full enum */
4678 				d->map[cand_id] = type_id;
4679 			}
4680 		}
4681 		break;
4682 
4683 	case BTF_KIND_FWD:
4684 	case BTF_KIND_FLOAT:
4685 		h = btf_hash_common(t);
4686 		for_each_dedup_cand(d, hash_entry, h) {
4687 			cand_id = hash_entry->value;
4688 			cand = btf_type_by_id(d->btf, cand_id);
4689 			if (btf_equal_common(t, cand)) {
4690 				new_id = cand_id;
4691 				break;
4692 			}
4693 		}
4694 		break;
4695 
4696 	default:
4697 		return -EINVAL;
4698 	}
4699 
4700 	d->map[type_id] = new_id;
4701 	if (type_id == new_id && btf_dedup_table_add(d, h, type_id))
4702 		return -ENOMEM;
4703 
4704 	return 0;
4705 }
4706 
4707 static int btf_dedup_prim_types(struct btf_dedup *d)
4708 {
4709 	int i, err;
4710 
4711 	for (i = 0; i < d->btf->nr_types; i++) {
4712 		err = btf_dedup_prim_type(d, d->btf->start_id + i);
4713 		if (err)
4714 			return err;
4715 	}
4716 	return 0;
4717 }
4718 
4719 /*
4720  * Check whether type is already mapped into canonical one (could be to itself).
4721  */
4722 static inline bool is_type_mapped(struct btf_dedup *d, uint32_t type_id)
4723 {
4724 	return d->map[type_id] <= BTF_MAX_NR_TYPES;
4725 }
4726 
4727 /*
4728  * Resolve type ID into its canonical type ID, if any; otherwise return original
4729  * type ID. If type is FWD and is resolved into STRUCT/UNION already, follow
4730  * STRUCT/UNION link and resolve it into canonical type ID as well.
4731  */
4732 static inline __u32 resolve_type_id(struct btf_dedup *d, __u32 type_id)
4733 {
4734 	while (is_type_mapped(d, type_id) && d->map[type_id] != type_id)
4735 		type_id = d->map[type_id];
4736 	return type_id;
4737 }
4738 
4739 /*
4740  * Resolve FWD to underlying STRUCT/UNION, if any; otherwise return original
4741  * type ID.
4742  */
4743 static uint32_t resolve_fwd_id(struct btf_dedup *d, uint32_t type_id)
4744 {
4745 	__u32 orig_type_id = type_id;
4746 
4747 	if (!btf_is_fwd(btf__type_by_id(d->btf, type_id)))
4748 		return type_id;
4749 
4750 	while (is_type_mapped(d, type_id) && d->map[type_id] != type_id)
4751 		type_id = d->map[type_id];
4752 
4753 	if (!btf_is_fwd(btf__type_by_id(d->btf, type_id)))
4754 		return type_id;
4755 
4756 	return orig_type_id;
4757 }
4758 
4759 
4760 static inline __u16 btf_fwd_kind(struct btf_type *t)
4761 {
4762 	return btf_kflag(t) ? BTF_KIND_UNION : BTF_KIND_STRUCT;
4763 }
4764 
4765 static bool btf_dedup_identical_types(struct btf_dedup *d, __u32 id1, __u32 id2, int depth)
4766 {
4767 	struct btf_type *t1, *t2;
4768 	int k1, k2;
4769 recur:
4770 	t1 = btf_type_by_id(d->btf, id1);
4771 	t2 = btf_type_by_id(d->btf, id2);
4772 	if (depth <= 0) {
4773 		pr_debug("Reached depth limit for identical type comparison for '%s'/'%s'\n",
4774 			 btf__name_by_offset(d->btf, t1->name_off),
4775 			 btf__name_by_offset(d->btf, t2->name_off));
4776 		return false;
4777 	}
4778 
4779 	k1 = btf_kind(t1);
4780 	k2 = btf_kind(t2);
4781 	if (k1 != k2)
4782 		return false;
4783 
4784 	switch (k1) {
4785 	case BTF_KIND_UNKN: /* VOID */
4786 		return true;
4787 	case BTF_KIND_INT:
4788 		return btf_equal_int_tag(t1, t2);
4789 	case BTF_KIND_ENUM:
4790 	case BTF_KIND_ENUM64:
4791 		return btf_compat_enum(t1, t2);
4792 	case BTF_KIND_FWD:
4793 	case BTF_KIND_FLOAT:
4794 		return btf_equal_common(t1, t2);
4795 	case BTF_KIND_CONST:
4796 	case BTF_KIND_VOLATILE:
4797 	case BTF_KIND_RESTRICT:
4798 	case BTF_KIND_PTR:
4799 	case BTF_KIND_TYPEDEF:
4800 	case BTF_KIND_FUNC:
4801 	case BTF_KIND_TYPE_TAG:
4802 		if (t1->info != t2->info || t1->name_off != t2->name_off)
4803 			return false;
4804 		id1 = t1->type;
4805 		id2 = t2->type;
4806 		goto recur;
4807 	case BTF_KIND_ARRAY: {
4808 		struct btf_array *a1, *a2;
4809 
4810 		if (!btf_compat_array(t1, t2))
4811 			return false;
4812 
4813 		a1 = btf_array(t1);
4814 		a2 = btf_array(t1);
4815 
4816 		if (a1->index_type != a2->index_type &&
4817 		    !btf_dedup_identical_types(d, a1->index_type, a2->index_type, depth - 1))
4818 			return false;
4819 
4820 		if (a1->type != a2->type &&
4821 		    !btf_dedup_identical_types(d, a1->type, a2->type, depth - 1))
4822 			return false;
4823 
4824 		return true;
4825 	}
4826 	case BTF_KIND_STRUCT:
4827 	case BTF_KIND_UNION: {
4828 		const struct btf_member *m1, *m2;
4829 		int i, n;
4830 
4831 		if (!btf_shallow_equal_struct(t1, t2))
4832 			return false;
4833 
4834 		m1 = btf_members(t1);
4835 		m2 = btf_members(t2);
4836 		for (i = 0, n = btf_vlen(t1); i < n; i++, m1++, m2++) {
4837 			if (m1->type == m2->type)
4838 				continue;
4839 			if (!btf_dedup_identical_types(d, m1->type, m2->type, depth - 1)) {
4840 				if (t1->name_off) {
4841 					pr_debug("%s '%s' size=%d vlen=%d id1[%u] id2[%u] shallow-equal but not identical for field#%d '%s'\n",
4842 						 k1 == BTF_KIND_STRUCT ? "STRUCT" : "UNION",
4843 						 btf__name_by_offset(d->btf, t1->name_off),
4844 						 t1->size, btf_vlen(t1), id1, id2, i,
4845 						 btf__name_by_offset(d->btf, m1->name_off));
4846 				}
4847 				return false;
4848 			}
4849 		}
4850 		return true;
4851 	}
4852 	case BTF_KIND_FUNC_PROTO: {
4853 		const struct btf_param *p1, *p2;
4854 		int i, n;
4855 
4856 		if (!btf_compat_fnproto(t1, t2))
4857 			return false;
4858 
4859 		if (t1->type != t2->type &&
4860 		    !btf_dedup_identical_types(d, t1->type, t2->type, depth - 1))
4861 			return false;
4862 
4863 		p1 = btf_params(t1);
4864 		p2 = btf_params(t2);
4865 		for (i = 0, n = btf_vlen(t1); i < n; i++, p1++, p2++) {
4866 			if (p1->type == p2->type)
4867 				continue;
4868 			if (!btf_dedup_identical_types(d, p1->type, p2->type, depth - 1))
4869 				return false;
4870 		}
4871 		return true;
4872 	}
4873 	default:
4874 		return false;
4875 	}
4876 }
4877 
4878 
4879 /*
4880  * Check equivalence of BTF type graph formed by candidate struct/union (we'll
4881  * call it "candidate graph" in this description for brevity) to a type graph
4882  * formed by (potential) canonical struct/union ("canonical graph" for brevity
4883  * here, though keep in mind that not all types in canonical graph are
4884  * necessarily canonical representatives themselves, some of them might be
4885  * duplicates or its uniqueness might not have been established yet).
4886  * Returns:
4887  *  - >0, if type graphs are equivalent;
4888  *  -  0, if not equivalent;
4889  *  - <0, on error.
4890  *
4891  * Algorithm performs side-by-side DFS traversal of both type graphs and checks
4892  * equivalence of BTF types at each step. If at any point BTF types in candidate
4893  * and canonical graphs are not compatible structurally, whole graphs are
4894  * incompatible. If types are structurally equivalent (i.e., all information
4895  * except referenced type IDs is exactly the same), a mapping from `canon_id` to
4896  * a `cand_id` is recoded in hypothetical mapping (`btf_dedup->hypot_map`).
4897  * If a type references other types, then those referenced types are checked
4898  * for equivalence recursively.
4899  *
4900  * During DFS traversal, if we find that for current `canon_id` type we
4901  * already have some mapping in hypothetical map, we check for two possible
4902  * situations:
4903  *   - `canon_id` is mapped to exactly the same type as `cand_id`. This will
4904  *     happen when type graphs have cycles. In this case we assume those two
4905  *     types are equivalent.
4906  *   - `canon_id` is mapped to different type. This is contradiction in our
4907  *     hypothetical mapping, because same graph in canonical graph corresponds
4908  *     to two different types in candidate graph, which for equivalent type
4909  *     graphs shouldn't happen. This condition terminates equivalence check
4910  *     with negative result.
4911  *
4912  * If type graphs traversal exhausts types to check and find no contradiction,
4913  * then type graphs are equivalent.
4914  *
4915  * When checking types for equivalence, there is one special case: FWD types.
4916  * If FWD type resolution is allowed and one of the types (either from canonical
4917  * or candidate graph) is FWD and other is STRUCT/UNION (depending on FWD's kind
4918  * flag) and their names match, hypothetical mapping is updated to point from
4919  * FWD to STRUCT/UNION. If graphs will be determined as equivalent successfully,
4920  * this mapping will be used to record FWD -> STRUCT/UNION mapping permanently.
4921  *
4922  * Technically, this could lead to incorrect FWD to STRUCT/UNION resolution,
4923  * if there are two exactly named (or anonymous) structs/unions that are
4924  * compatible structurally, one of which has FWD field, while other is concrete
4925  * STRUCT/UNION, but according to C sources they are different structs/unions
4926  * that are referencing different types with the same name. This is extremely
4927  * unlikely to happen, but btf_dedup API allows to disable FWD resolution if
4928  * this logic is causing problems.
4929  *
4930  * Doing FWD resolution means that both candidate and/or canonical graphs can
4931  * consists of portions of the graph that come from multiple compilation units.
4932  * This is due to the fact that types within single compilation unit are always
4933  * deduplicated and FWDs are already resolved, if referenced struct/union
4934  * definition is available. So, if we had unresolved FWD and found corresponding
4935  * STRUCT/UNION, they will be from different compilation units. This
4936  * consequently means that when we "link" FWD to corresponding STRUCT/UNION,
4937  * type graph will likely have at least two different BTF types that describe
4938  * same type (e.g., most probably there will be two different BTF types for the
4939  * same 'int' primitive type) and could even have "overlapping" parts of type
4940  * graph that describe same subset of types.
4941  *
4942  * This in turn means that our assumption that each type in canonical graph
4943  * must correspond to exactly one type in candidate graph might not hold
4944  * anymore and will make it harder to detect contradictions using hypothetical
4945  * map. To handle this problem, we allow to follow FWD -> STRUCT/UNION
4946  * resolution only in canonical graph. FWDs in candidate graphs are never
4947  * resolved. To see why it's OK, let's check all possible situations w.r.t. FWDs
4948  * that can occur:
4949  *   - Both types in canonical and candidate graphs are FWDs. If they are
4950  *     structurally equivalent, then they can either be both resolved to the
4951  *     same STRUCT/UNION or not resolved at all. In both cases they are
4952  *     equivalent and there is no need to resolve FWD on candidate side.
4953  *   - Both types in canonical and candidate graphs are concrete STRUCT/UNION,
4954  *     so nothing to resolve as well, algorithm will check equivalence anyway.
4955  *   - Type in canonical graph is FWD, while type in candidate is concrete
4956  *     STRUCT/UNION. In this case candidate graph comes from single compilation
4957  *     unit, so there is exactly one BTF type for each unique C type. After
4958  *     resolving FWD into STRUCT/UNION, there might be more than one BTF type
4959  *     in canonical graph mapping to single BTF type in candidate graph, but
4960  *     because hypothetical mapping maps from canonical to candidate types, it's
4961  *     alright, and we still maintain the property of having single `canon_id`
4962  *     mapping to single `cand_id` (there could be two different `canon_id`
4963  *     mapped to the same `cand_id`, but it's not contradictory).
4964  *   - Type in canonical graph is concrete STRUCT/UNION, while type in candidate
4965  *     graph is FWD. In this case we are just going to check compatibility of
4966  *     STRUCT/UNION and corresponding FWD, and if they are compatible, we'll
4967  *     assume that whatever STRUCT/UNION FWD resolves to must be equivalent to
4968  *     a concrete STRUCT/UNION from canonical graph. If the rest of type graphs
4969  *     turn out equivalent, we'll re-resolve FWD to concrete STRUCT/UNION from
4970  *     canonical graph.
4971  */
4972 static int btf_dedup_is_equiv(struct btf_dedup *d, __u32 cand_id,
4973 			      __u32 canon_id)
4974 {
4975 	struct btf_type *cand_type;
4976 	struct btf_type *canon_type;
4977 	__u32 hypot_type_id;
4978 	__u16 cand_kind;
4979 	__u16 canon_kind;
4980 	int i, eq;
4981 
4982 	/* if both resolve to the same canonical, they must be equivalent */
4983 	if (resolve_type_id(d, cand_id) == resolve_type_id(d, canon_id))
4984 		return 1;
4985 
4986 	canon_id = resolve_fwd_id(d, canon_id);
4987 
4988 	hypot_type_id = d->hypot_map[canon_id];
4989 	if (hypot_type_id <= BTF_MAX_NR_TYPES) {
4990 		if (hypot_type_id == cand_id)
4991 			return 1;
4992 		/* In some cases compiler will generate different DWARF types
4993 		 * for *identical* array type definitions and use them for
4994 		 * different fields within the *same* struct. This breaks type
4995 		 * equivalence check, which makes an assumption that candidate
4996 		 * types sub-graph has a consistent and deduped-by-compiler
4997 		 * types within a single CU. And similar situation can happen
4998 		 * with struct/union sometimes, and event with pointers.
4999 		 * So accommodate cases like this doing a structural
5000 		 * comparison recursively, but avoiding being stuck in endless
5001 		 * loops by limiting the depth up to which we check.
5002 		 */
5003 		if (btf_dedup_identical_types(d, hypot_type_id, cand_id, 16))
5004 			return 1;
5005 		return 0;
5006 	}
5007 
5008 	if (btf_dedup_hypot_map_add(d, canon_id, cand_id))
5009 		return -ENOMEM;
5010 
5011 	cand_type = btf_type_by_id(d->btf, cand_id);
5012 	canon_type = btf_type_by_id(d->btf, canon_id);
5013 	cand_kind = btf_kind(cand_type);
5014 	canon_kind = btf_kind(canon_type);
5015 
5016 	if (cand_type->name_off != canon_type->name_off)
5017 		return 0;
5018 
5019 	/* FWD <--> STRUCT/UNION equivalence check, if enabled */
5020 	if ((cand_kind == BTF_KIND_FWD || canon_kind == BTF_KIND_FWD)
5021 	    && cand_kind != canon_kind) {
5022 		__u16 real_kind;
5023 		__u16 fwd_kind;
5024 
5025 		if (cand_kind == BTF_KIND_FWD) {
5026 			real_kind = canon_kind;
5027 			fwd_kind = btf_fwd_kind(cand_type);
5028 		} else {
5029 			real_kind = cand_kind;
5030 			fwd_kind = btf_fwd_kind(canon_type);
5031 			/* we'd need to resolve base FWD to STRUCT/UNION */
5032 			if (fwd_kind == real_kind && canon_id < d->btf->start_id)
5033 				d->hypot_adjust_canon = true;
5034 		}
5035 		return fwd_kind == real_kind;
5036 	}
5037 
5038 	if (cand_kind != canon_kind)
5039 		return 0;
5040 
5041 	switch (cand_kind) {
5042 	case BTF_KIND_INT:
5043 		return btf_equal_int_tag(cand_type, canon_type);
5044 
5045 	case BTF_KIND_ENUM:
5046 	case BTF_KIND_ENUM64:
5047 		return btf_compat_enum(cand_type, canon_type);
5048 
5049 	case BTF_KIND_FWD:
5050 	case BTF_KIND_FLOAT:
5051 		return btf_equal_common(cand_type, canon_type);
5052 
5053 	case BTF_KIND_CONST:
5054 	case BTF_KIND_VOLATILE:
5055 	case BTF_KIND_RESTRICT:
5056 	case BTF_KIND_PTR:
5057 	case BTF_KIND_TYPEDEF:
5058 	case BTF_KIND_FUNC:
5059 	case BTF_KIND_TYPE_TAG:
5060 		if (cand_type->info != canon_type->info)
5061 			return 0;
5062 		return btf_dedup_is_equiv(d, cand_type->type, canon_type->type);
5063 
5064 	case BTF_KIND_ARRAY: {
5065 		const struct btf_array *cand_arr, *canon_arr;
5066 
5067 		if (!btf_compat_array(cand_type, canon_type))
5068 			return 0;
5069 		cand_arr = btf_array(cand_type);
5070 		canon_arr = btf_array(canon_type);
5071 		eq = btf_dedup_is_equiv(d, cand_arr->index_type, canon_arr->index_type);
5072 		if (eq <= 0)
5073 			return eq;
5074 		return btf_dedup_is_equiv(d, cand_arr->type, canon_arr->type);
5075 	}
5076 
5077 	case BTF_KIND_STRUCT:
5078 	case BTF_KIND_UNION: {
5079 		const struct btf_member *cand_m, *canon_m;
5080 		__u16 vlen;
5081 
5082 		if (!btf_shallow_equal_struct(cand_type, canon_type))
5083 			return 0;
5084 		vlen = btf_vlen(cand_type);
5085 		cand_m = btf_members(cand_type);
5086 		canon_m = btf_members(canon_type);
5087 		for (i = 0; i < vlen; i++) {
5088 			eq = btf_dedup_is_equiv(d, cand_m->type, canon_m->type);
5089 			if (eq <= 0) {
5090 				if (cand_type->name_off) {
5091 					pr_debug("%s '%s' size=%d vlen=%d cand_id[%u] canon_id[%u] shallow-equal but not equiv for field#%d '%s': %d\n",
5092 						 cand_kind == BTF_KIND_STRUCT ? "STRUCT" : "UNION",
5093 						 btf__name_by_offset(d->btf, cand_type->name_off),
5094 						 cand_type->size, vlen, cand_id, canon_id, i,
5095 						 btf__name_by_offset(d->btf, cand_m->name_off), eq);
5096 				}
5097 				return eq;
5098 			}
5099 			cand_m++;
5100 			canon_m++;
5101 		}
5102 
5103 		return 1;
5104 	}
5105 
5106 	case BTF_KIND_FUNC_PROTO: {
5107 		const struct btf_param *cand_p, *canon_p;
5108 		__u16 vlen;
5109 
5110 		if (!btf_compat_fnproto(cand_type, canon_type))
5111 			return 0;
5112 		eq = btf_dedup_is_equiv(d, cand_type->type, canon_type->type);
5113 		if (eq <= 0)
5114 			return eq;
5115 		vlen = btf_vlen(cand_type);
5116 		cand_p = btf_params(cand_type);
5117 		canon_p = btf_params(canon_type);
5118 		for (i = 0; i < vlen; i++) {
5119 			eq = btf_dedup_is_equiv(d, cand_p->type, canon_p->type);
5120 			if (eq <= 0)
5121 				return eq;
5122 			cand_p++;
5123 			canon_p++;
5124 		}
5125 		return 1;
5126 	}
5127 
5128 	default:
5129 		return -EINVAL;
5130 	}
5131 	return 0;
5132 }
5133 
5134 /*
5135  * Use hypothetical mapping, produced by successful type graph equivalence
5136  * check, to augment existing struct/union canonical mapping, where possible.
5137  *
5138  * If BTF_KIND_FWD resolution is allowed, this mapping is also used to record
5139  * FWD -> STRUCT/UNION correspondence as well. FWD resolution is bidirectional:
5140  * it doesn't matter if FWD type was part of canonical graph or candidate one,
5141  * we are recording the mapping anyway. As opposed to carefulness required
5142  * for struct/union correspondence mapping (described below), for FWD resolution
5143  * it's not important, as by the time that FWD type (reference type) will be
5144  * deduplicated all structs/unions will be deduped already anyway.
5145  *
5146  * Recording STRUCT/UNION mapping is purely a performance optimization and is
5147  * not required for correctness. It needs to be done carefully to ensure that
5148  * struct/union from candidate's type graph is not mapped into corresponding
5149  * struct/union from canonical type graph that itself hasn't been resolved into
5150  * canonical representative. The only guarantee we have is that canonical
5151  * struct/union was determined as canonical and that won't change. But any
5152  * types referenced through that struct/union fields could have been not yet
5153  * resolved, so in case like that it's too early to establish any kind of
5154  * correspondence between structs/unions.
5155  *
5156  * No canonical correspondence is derived for primitive types (they are already
5157  * deduplicated completely already anyway) or reference types (they rely on
5158  * stability of struct/union canonical relationship for equivalence checks).
5159  */
5160 static void btf_dedup_merge_hypot_map(struct btf_dedup *d)
5161 {
5162 	__u32 canon_type_id, targ_type_id;
5163 	__u16 t_kind, c_kind;
5164 	__u32 t_id, c_id;
5165 	int i;
5166 
5167 	for (i = 0; i < d->hypot_cnt; i++) {
5168 		canon_type_id = d->hypot_list[i];
5169 		targ_type_id = d->hypot_map[canon_type_id];
5170 		t_id = resolve_type_id(d, targ_type_id);
5171 		c_id = resolve_type_id(d, canon_type_id);
5172 		t_kind = btf_kind(btf__type_by_id(d->btf, t_id));
5173 		c_kind = btf_kind(btf__type_by_id(d->btf, c_id));
5174 		/*
5175 		 * Resolve FWD into STRUCT/UNION.
5176 		 * It's ok to resolve FWD into STRUCT/UNION that's not yet
5177 		 * mapped to canonical representative (as opposed to
5178 		 * STRUCT/UNION <--> STRUCT/UNION mapping logic below), because
5179 		 * eventually that struct is going to be mapped and all resolved
5180 		 * FWDs will automatically resolve to correct canonical
5181 		 * representative. This will happen before ref type deduping,
5182 		 * which critically depends on stability of these mapping. This
5183 		 * stability is not a requirement for STRUCT/UNION equivalence
5184 		 * checks, though.
5185 		 */
5186 
5187 		/* if it's the split BTF case, we still need to point base FWD
5188 		 * to STRUCT/UNION in a split BTF, because FWDs from split BTF
5189 		 * will be resolved against base FWD. If we don't point base
5190 		 * canonical FWD to the resolved STRUCT/UNION, then all the
5191 		 * FWDs in split BTF won't be correctly resolved to a proper
5192 		 * STRUCT/UNION.
5193 		 */
5194 		if (t_kind != BTF_KIND_FWD && c_kind == BTF_KIND_FWD)
5195 			d->map[c_id] = t_id;
5196 
5197 		/* if graph equivalence determined that we'd need to adjust
5198 		 * base canonical types, then we need to only point base FWDs
5199 		 * to STRUCTs/UNIONs and do no more modifications. For all
5200 		 * other purposes the type graphs were not equivalent.
5201 		 */
5202 		if (d->hypot_adjust_canon)
5203 			continue;
5204 
5205 		if (t_kind == BTF_KIND_FWD && c_kind != BTF_KIND_FWD)
5206 			d->map[t_id] = c_id;
5207 
5208 		if ((t_kind == BTF_KIND_STRUCT || t_kind == BTF_KIND_UNION) &&
5209 		    c_kind != BTF_KIND_FWD &&
5210 		    is_type_mapped(d, c_id) &&
5211 		    !is_type_mapped(d, t_id)) {
5212 			/*
5213 			 * as a perf optimization, we can map struct/union
5214 			 * that's part of type graph we just verified for
5215 			 * equivalence. We can do that for struct/union that has
5216 			 * canonical representative only, though.
5217 			 */
5218 			d->map[t_id] = c_id;
5219 		}
5220 	}
5221 }
5222 
5223 static inline long btf_hash_by_kind(struct btf_type *t, __u16 kind)
5224 {
5225 	if (kind == BTF_KIND_TYPEDEF)
5226 		return btf_hash_typedef(t);
5227 	else
5228 		return btf_hash_struct(t);
5229 }
5230 
5231 static inline bool btf_equal_by_kind(struct btf_type *t1, struct btf_type *t2, __u16 kind)
5232 {
5233 	if (kind == BTF_KIND_TYPEDEF)
5234 		return btf_equal_typedef(t1, t2);
5235 	else
5236 		return btf_shallow_equal_struct(t1, t2);
5237 }
5238 
5239 /*
5240  * Deduplicate struct/union and typedef types.
5241  *
5242  * For each struct/union type its type signature hash is calculated, taking
5243  * into account type's name, size, number, order and names of fields, but
5244  * ignoring type ID's referenced from fields, because they might not be deduped
5245  * completely until after reference types deduplication phase. For each typedef
5246  * type, the hash is computed based on the type’s name and size. This type hash
5247  * is used to iterate over all potential canonical types, sharing same hash.
5248  * For each canonical candidate we check whether type graphs that they form
5249  * (through referenced types in fields and so on) are equivalent using algorithm
5250  * implemented in `btf_dedup_is_equiv`. If such equivalence is found and
5251  * BTF_KIND_FWD resolution is allowed, then hypothetical mapping
5252  * (btf_dedup->hypot_map) produced by aforementioned type graph equivalence
5253  * algorithm is used to record FWD -> STRUCT/UNION mapping. It's also used to
5254  * potentially map other structs/unions to their canonical representatives,
5255  * if such relationship hasn't yet been established. This speeds up algorithm
5256  * by eliminating some of the duplicate work.
5257  *
5258  * If no matching canonical representative was found, struct/union is marked
5259  * as canonical for itself and is added into btf_dedup->dedup_table hash map
5260  * for further look ups.
5261  */
5262 static int btf_dedup_struct_type(struct btf_dedup *d, __u32 type_id)
5263 {
5264 	struct btf_type *cand_type, *t;
5265 	struct hashmap_entry *hash_entry;
5266 	/* if we don't find equivalent type, then we are canonical */
5267 	__u32 new_id = type_id;
5268 	__u16 kind;
5269 	long h;
5270 
5271 	/* already deduped or is in process of deduping (loop detected) */
5272 	if (d->map[type_id] <= BTF_MAX_NR_TYPES)
5273 		return 0;
5274 
5275 	t = btf_type_by_id(d->btf, type_id);
5276 	kind = btf_kind(t);
5277 
5278 	if (kind != BTF_KIND_STRUCT &&
5279 		kind != BTF_KIND_UNION &&
5280 		kind != BTF_KIND_TYPEDEF)
5281 		return 0;
5282 
5283 	h = btf_hash_by_kind(t, kind);
5284 	for_each_dedup_cand(d, hash_entry, h) {
5285 		__u32 cand_id = hash_entry->value;
5286 		int eq;
5287 
5288 		/*
5289 		 * Even though btf_dedup_is_equiv() checks for
5290 		 * btf_equal_by_kind() internally when checking two
5291 		 * structs (unions) or typedefs for equivalence, we need to guard here
5292 		 * from picking matching FWD type as a dedup candidate.
5293 		 * This can happen due to hash collision. In such case just
5294 		 * relying on btf_dedup_is_equiv() would lead to potentially
5295 		 * creating a loop (FWD -> STRUCT and STRUCT -> FWD), because
5296 		 * FWD and compatible STRUCT/UNION are considered equivalent.
5297 		 */
5298 		cand_type = btf_type_by_id(d->btf, cand_id);
5299 		if (!btf_equal_by_kind(t, cand_type, kind))
5300 			continue;
5301 
5302 		btf_dedup_clear_hypot_map(d);
5303 		eq = btf_dedup_is_equiv(d, type_id, cand_id);
5304 		if (eq < 0)
5305 			return eq;
5306 		if (!eq)
5307 			continue;
5308 		btf_dedup_merge_hypot_map(d);
5309 		if (d->hypot_adjust_canon) /* not really equivalent */
5310 			continue;
5311 		new_id = cand_id;
5312 		break;
5313 	}
5314 
5315 	d->map[type_id] = new_id;
5316 	if (type_id == new_id && btf_dedup_table_add(d, h, type_id))
5317 		return -ENOMEM;
5318 
5319 	return 0;
5320 }
5321 
5322 static int btf_dedup_struct_types(struct btf_dedup *d)
5323 {
5324 	int i, err;
5325 
5326 	for (i = 0; i < d->btf->nr_types; i++) {
5327 		err = btf_dedup_struct_type(d, d->btf->start_id + i);
5328 		if (err)
5329 			return err;
5330 	}
5331 	return 0;
5332 }
5333 
5334 /*
5335  * Deduplicate reference type.
5336  *
5337  * Once all primitive, struct/union and typedef types got deduplicated, we can easily
5338  * deduplicate all other (reference) BTF types. This is done in two steps:
5339  *
5340  * 1. Resolve all referenced type IDs into their canonical type IDs. This
5341  * resolution can be done either immediately for primitive, struct/union, and typedef
5342  * types (because they were deduped in previous two phases) or recursively for
5343  * reference types. Recursion will always terminate at either primitive or
5344  * struct/union and typedef types, at which point we can "unwind" chain of reference
5345  * types one by one. There is no danger of encountering cycles in C, as the only way to
5346  * form a type cycle is through struct or union types. Go can form such cycles through
5347  * typedef. Thus, any chain of reference types, even those taking part in a type cycle,
5348  * will inevitably reach a struct/union or typedef type at some point.
5349  *
5350  * 2. Once all referenced type IDs are resolved into canonical ones, BTF type
5351  * becomes "stable", in the sense that no further deduplication will cause
5352  * any changes to it. With that, it's now possible to calculate type's signature
5353  * hash (this time taking into account referenced type IDs) and loop over all
5354  * potential canonical representatives. If no match was found, current type
5355  * will become canonical representative of itself and will be added into
5356  * btf_dedup->dedup_table as another possible canonical representative.
5357  */
5358 static int btf_dedup_ref_type(struct btf_dedup *d, __u32 type_id)
5359 {
5360 	struct hashmap_entry *hash_entry;
5361 	__u32 new_id = type_id, cand_id;
5362 	struct btf_type *t, *cand;
5363 	/* if we don't find equivalent type, then we are representative type */
5364 	int ref_type_id;
5365 	long h;
5366 
5367 	if (d->map[type_id] == BTF_IN_PROGRESS_ID)
5368 		return -ELOOP;
5369 	if (d->map[type_id] <= BTF_MAX_NR_TYPES)
5370 		return resolve_type_id(d, type_id);
5371 
5372 	t = btf_type_by_id(d->btf, type_id);
5373 	d->map[type_id] = BTF_IN_PROGRESS_ID;
5374 
5375 	switch (btf_kind(t)) {
5376 	case BTF_KIND_CONST:
5377 	case BTF_KIND_VOLATILE:
5378 	case BTF_KIND_RESTRICT:
5379 	case BTF_KIND_PTR:
5380 	case BTF_KIND_FUNC:
5381 	case BTF_KIND_TYPE_TAG:
5382 		ref_type_id = btf_dedup_ref_type(d, t->type);
5383 		if (ref_type_id < 0)
5384 			return ref_type_id;
5385 		t->type = ref_type_id;
5386 
5387 		h = btf_hash_common(t);
5388 		for_each_dedup_cand(d, hash_entry, h) {
5389 			cand_id = hash_entry->value;
5390 			cand = btf_type_by_id(d->btf, cand_id);
5391 			if (btf_equal_common(t, cand)) {
5392 				new_id = cand_id;
5393 				break;
5394 			}
5395 		}
5396 		break;
5397 
5398 	case BTF_KIND_DECL_TAG:
5399 		ref_type_id = btf_dedup_ref_type(d, t->type);
5400 		if (ref_type_id < 0)
5401 			return ref_type_id;
5402 		t->type = ref_type_id;
5403 
5404 		h = btf_hash_int_decl_tag(t);
5405 		for_each_dedup_cand(d, hash_entry, h) {
5406 			cand_id = hash_entry->value;
5407 			cand = btf_type_by_id(d->btf, cand_id);
5408 			if (btf_equal_int_tag(t, cand)) {
5409 				new_id = cand_id;
5410 				break;
5411 			}
5412 		}
5413 		break;
5414 
5415 	case BTF_KIND_ARRAY: {
5416 		struct btf_array *info = btf_array(t);
5417 
5418 		ref_type_id = btf_dedup_ref_type(d, info->type);
5419 		if (ref_type_id < 0)
5420 			return ref_type_id;
5421 		info->type = ref_type_id;
5422 
5423 		ref_type_id = btf_dedup_ref_type(d, info->index_type);
5424 		if (ref_type_id < 0)
5425 			return ref_type_id;
5426 		info->index_type = ref_type_id;
5427 
5428 		h = btf_hash_array(t);
5429 		for_each_dedup_cand(d, hash_entry, h) {
5430 			cand_id = hash_entry->value;
5431 			cand = btf_type_by_id(d->btf, cand_id);
5432 			if (btf_equal_array(t, cand)) {
5433 				new_id = cand_id;
5434 				break;
5435 			}
5436 		}
5437 		break;
5438 	}
5439 
5440 	case BTF_KIND_FUNC_PROTO: {
5441 		struct btf_param *param;
5442 		__u16 vlen;
5443 		int i;
5444 
5445 		ref_type_id = btf_dedup_ref_type(d, t->type);
5446 		if (ref_type_id < 0)
5447 			return ref_type_id;
5448 		t->type = ref_type_id;
5449 
5450 		vlen = btf_vlen(t);
5451 		param = btf_params(t);
5452 		for (i = 0; i < vlen; i++) {
5453 			ref_type_id = btf_dedup_ref_type(d, param->type);
5454 			if (ref_type_id < 0)
5455 				return ref_type_id;
5456 			param->type = ref_type_id;
5457 			param++;
5458 		}
5459 
5460 		h = btf_hash_fnproto(t);
5461 		for_each_dedup_cand(d, hash_entry, h) {
5462 			cand_id = hash_entry->value;
5463 			cand = btf_type_by_id(d->btf, cand_id);
5464 			if (btf_equal_fnproto(t, cand)) {
5465 				new_id = cand_id;
5466 				break;
5467 			}
5468 		}
5469 		break;
5470 	}
5471 
5472 	default:
5473 		return -EINVAL;
5474 	}
5475 
5476 	d->map[type_id] = new_id;
5477 	if (type_id == new_id && btf_dedup_table_add(d, h, type_id))
5478 		return -ENOMEM;
5479 
5480 	return new_id;
5481 }
5482 
5483 static int btf_dedup_ref_types(struct btf_dedup *d)
5484 {
5485 	int i, err;
5486 
5487 	for (i = 0; i < d->btf->nr_types; i++) {
5488 		err = btf_dedup_ref_type(d, d->btf->start_id + i);
5489 		if (err < 0)
5490 			return err;
5491 	}
5492 	/* we won't need d->dedup_table anymore */
5493 	hashmap__free(d->dedup_table);
5494 	d->dedup_table = NULL;
5495 	return 0;
5496 }
5497 
5498 /*
5499  * Collect a map from type names to type ids for all canonical structs
5500  * and unions. If the same name is shared by several canonical types
5501  * use a special value 0 to indicate this fact.
5502  */
5503 static int btf_dedup_fill_unique_names_map(struct btf_dedup *d, struct hashmap *names_map)
5504 {
5505 	__u32 nr_types = btf__type_cnt(d->btf);
5506 	struct btf_type *t;
5507 	__u32 type_id;
5508 	__u16 kind;
5509 	int err;
5510 
5511 	/*
5512 	 * Iterate over base and split module ids in order to get all
5513 	 * available structs in the map.
5514 	 */
5515 	for (type_id = 1; type_id < nr_types; ++type_id) {
5516 		t = btf_type_by_id(d->btf, type_id);
5517 		kind = btf_kind(t);
5518 
5519 		if (kind != BTF_KIND_STRUCT && kind != BTF_KIND_UNION)
5520 			continue;
5521 
5522 		/* Skip non-canonical types */
5523 		if (type_id != d->map[type_id])
5524 			continue;
5525 
5526 		err = hashmap__add(names_map, t->name_off, type_id);
5527 		if (err == -EEXIST)
5528 			err = hashmap__set(names_map, t->name_off, 0, NULL, NULL);
5529 
5530 		if (err)
5531 			return err;
5532 	}
5533 
5534 	return 0;
5535 }
5536 
5537 static int btf_dedup_resolve_fwd(struct btf_dedup *d, struct hashmap *names_map, __u32 type_id)
5538 {
5539 	struct btf_type *t = btf_type_by_id(d->btf, type_id);
5540 	enum btf_fwd_kind fwd_kind = btf_kflag(t);
5541 	__u16 cand_kind, kind = btf_kind(t);
5542 	struct btf_type *cand_t;
5543 	uintptr_t cand_id;
5544 
5545 	if (kind != BTF_KIND_FWD)
5546 		return 0;
5547 
5548 	/* Skip if this FWD already has a mapping */
5549 	if (type_id != d->map[type_id])
5550 		return 0;
5551 
5552 	if (!hashmap__find(names_map, t->name_off, &cand_id))
5553 		return 0;
5554 
5555 	/* Zero is a special value indicating that name is not unique */
5556 	if (!cand_id)
5557 		return 0;
5558 
5559 	cand_t = btf_type_by_id(d->btf, cand_id);
5560 	cand_kind = btf_kind(cand_t);
5561 	if ((cand_kind == BTF_KIND_STRUCT && fwd_kind != BTF_FWD_STRUCT) ||
5562 	    (cand_kind == BTF_KIND_UNION && fwd_kind != BTF_FWD_UNION))
5563 		return 0;
5564 
5565 	d->map[type_id] = cand_id;
5566 
5567 	return 0;
5568 }
5569 
5570 /*
5571  * Resolve unambiguous forward declarations.
5572  *
5573  * The lion's share of all FWD declarations is resolved during
5574  * `btf_dedup_struct_types` phase when different type graphs are
5575  * compared against each other. However, if in some compilation unit a
5576  * FWD declaration is not a part of a type graph compared against
5577  * another type graph that declaration's canonical type would not be
5578  * changed. Example:
5579  *
5580  * CU #1:
5581  *
5582  * struct foo;
5583  * struct foo *some_global;
5584  *
5585  * CU #2:
5586  *
5587  * struct foo { int u; };
5588  * struct foo *another_global;
5589  *
5590  * After `btf_dedup_struct_types` the BTF looks as follows:
5591  *
5592  * [1] STRUCT 'foo' size=4 vlen=1 ...
5593  * [2] INT 'int' size=4 ...
5594  * [3] PTR '(anon)' type_id=1
5595  * [4] FWD 'foo' fwd_kind=struct
5596  * [5] PTR '(anon)' type_id=4
5597  *
5598  * This pass assumes that such FWD declarations should be mapped to
5599  * structs or unions with identical name in case if the name is not
5600  * ambiguous.
5601  */
5602 static int btf_dedup_resolve_fwds(struct btf_dedup *d)
5603 {
5604 	int i, err;
5605 	struct hashmap *names_map;
5606 
5607 	names_map = hashmap__new(btf_dedup_identity_hash_fn, btf_dedup_equal_fn, NULL);
5608 	if (IS_ERR(names_map))
5609 		return PTR_ERR(names_map);
5610 
5611 	err = btf_dedup_fill_unique_names_map(d, names_map);
5612 	if (err < 0)
5613 		goto exit;
5614 
5615 	for (i = 0; i < d->btf->nr_types; i++) {
5616 		err = btf_dedup_resolve_fwd(d, names_map, d->btf->start_id + i);
5617 		if (err < 0)
5618 			break;
5619 	}
5620 
5621 exit:
5622 	hashmap__free(names_map);
5623 	return err;
5624 }
5625 
5626 /*
5627  * Compact types.
5628  *
5629  * After we established for each type its corresponding canonical representative
5630  * type, we now can eliminate types that are not canonical and leave only
5631  * canonical ones layed out sequentially in memory by copying them over
5632  * duplicates. During compaction btf_dedup->hypot_map array is reused to store
5633  * a map from original type ID to a new compacted type ID, which will be used
5634  * during next phase to "fix up" type IDs, referenced from struct/union and
5635  * reference types.
5636  */
5637 static int btf_dedup_compact_types(struct btf_dedup *d)
5638 {
5639 	__u32 *new_offs;
5640 	__u32 next_type_id = d->btf->start_id;
5641 	const struct btf_type *t;
5642 	void *p;
5643 	int i, id, len;
5644 
5645 	/* we are going to reuse hypot_map to store compaction remapping */
5646 	d->hypot_map[0] = 0;
5647 	/* base BTF types are not renumbered */
5648 	for (id = 1; id < d->btf->start_id; id++)
5649 		d->hypot_map[id] = id;
5650 	for (i = 0, id = d->btf->start_id; i < d->btf->nr_types; i++, id++)
5651 		d->hypot_map[id] = BTF_UNPROCESSED_ID;
5652 
5653 	p = d->btf->types_data;
5654 
5655 	for (i = 0, id = d->btf->start_id; i < d->btf->nr_types; i++, id++) {
5656 		if (d->map[id] != id)
5657 			continue;
5658 
5659 		t = btf__type_by_id(d->btf, id);
5660 		len = btf_type_size(d->btf, t);
5661 		if (len < 0)
5662 			return len;
5663 
5664 		memmove(p, t, len);
5665 		d->hypot_map[id] = next_type_id;
5666 		d->btf->type_offs[next_type_id - d->btf->start_id] = p - d->btf->types_data;
5667 		p += len;
5668 		next_type_id++;
5669 	}
5670 
5671 	/* shrink struct btf's internal types index and update btf_header */
5672 	d->btf->nr_types = next_type_id - d->btf->start_id;
5673 	d->btf->type_offs_cap = d->btf->nr_types;
5674 	d->btf->hdr.type_len = p - d->btf->types_data;
5675 	new_offs = libbpf_reallocarray(d->btf->type_offs, d->btf->type_offs_cap,
5676 				       sizeof(*new_offs));
5677 	if (d->btf->type_offs_cap && !new_offs)
5678 		return -ENOMEM;
5679 	d->btf->type_offs = new_offs;
5680 	if (d->btf->layout)
5681 		d->btf->hdr.layout_off = d->btf->hdr.type_off + d->btf->hdr.type_len;
5682 	d->btf->hdr.str_off = d->btf->hdr.type_off + d->btf->hdr.type_len + d->btf->hdr.layout_len;
5683 	d->btf->raw_size = d->btf->hdr.hdr_len + d->btf->hdr.type_off + d->btf->hdr.type_len +
5684 			   d->btf->hdr.layout_len + d->btf->hdr.str_len;
5685 	return 0;
5686 }
5687 
5688 /*
5689  * Figure out final (deduplicated and compacted) type ID for provided original
5690  * `type_id` by first resolving it into corresponding canonical type ID and
5691  * then mapping it to a deduplicated type ID, stored in btf_dedup->hypot_map,
5692  * which is populated during compaction phase.
5693  */
5694 static int btf_dedup_remap_type_id(__u32 *type_id, void *ctx)
5695 {
5696 	struct btf_dedup *d = ctx;
5697 	__u32 resolved_type_id, new_type_id;
5698 
5699 	resolved_type_id = resolve_type_id(d, *type_id);
5700 	new_type_id = d->hypot_map[resolved_type_id];
5701 	if (new_type_id > BTF_MAX_NR_TYPES)
5702 		return -EINVAL;
5703 
5704 	*type_id = new_type_id;
5705 	return 0;
5706 }
5707 
5708 /*
5709  * Remap referenced type IDs into deduped type IDs.
5710  *
5711  * After BTF types are deduplicated and compacted, their final type IDs may
5712  * differ from original ones. The map from original to a corresponding
5713  * deduped type ID is stored in btf_dedup->hypot_map and is populated during
5714  * compaction phase. During remapping phase we are rewriting all type IDs
5715  * referenced from any BTF type (e.g., struct fields, func proto args, etc) to
5716  * their final deduped type IDs.
5717  */
5718 static int btf_dedup_remap_types(struct btf_dedup *d)
5719 {
5720 	int i, r;
5721 
5722 	for (i = 0; i < d->btf->nr_types; i++) {
5723 		struct btf_type *t = btf_type_by_id(d->btf, d->btf->start_id + i);
5724 		struct btf_field_iter it;
5725 		__u32 *type_id;
5726 
5727 		r = btf_field_iter_init(&it, t, BTF_FIELD_ITER_IDS);
5728 		if (r)
5729 			return r;
5730 
5731 		while ((type_id = btf_field_iter_next(&it))) {
5732 			__u32 resolved_id, new_id;
5733 
5734 			resolved_id = resolve_type_id(d, *type_id);
5735 			new_id = d->hypot_map[resolved_id];
5736 			if (new_id > BTF_MAX_NR_TYPES)
5737 				return -EINVAL;
5738 
5739 			*type_id = new_id;
5740 		}
5741 	}
5742 
5743 	if (!d->btf_ext)
5744 		return 0;
5745 
5746 	r = btf_ext_visit_type_ids(d->btf_ext, btf_dedup_remap_type_id, d);
5747 	if (r)
5748 		return r;
5749 
5750 	return 0;
5751 }
5752 
5753 /*
5754  * Probe few well-known locations for vmlinux kernel image and try to load BTF
5755  * data out of it to use for target BTF.
5756  */
5757 struct btf *btf__load_vmlinux_btf(void)
5758 {
5759 	const char *sysfs_btf_path = "/sys/kernel/btf/vmlinux";
5760 	/* fall back locations, trying to find vmlinux on disk */
5761 	const char *locations[] = {
5762 		"/boot/vmlinux-%1$s",
5763 		"/lib/modules/%1$s/vmlinux-%1$s",
5764 		"/lib/modules/%1$s/build/vmlinux",
5765 		"/usr/lib/modules/%1$s/kernel/vmlinux",
5766 		"/usr/lib/debug/boot/vmlinux-%1$s",
5767 		"/usr/lib/debug/boot/vmlinux-%1$s.debug",
5768 		"/usr/lib/debug/lib/modules/%1$s/vmlinux",
5769 	};
5770 	char path[PATH_MAX + 1];
5771 	struct utsname buf;
5772 	struct btf *btf;
5773 	int i, err;
5774 
5775 	/* is canonical sysfs location accessible? */
5776 	if (faccessat(AT_FDCWD, sysfs_btf_path, F_OK, AT_EACCESS) < 0) {
5777 		pr_warn("kernel BTF is missing at '%s', was CONFIG_DEBUG_INFO_BTF enabled?\n",
5778 			sysfs_btf_path);
5779 	} else {
5780 		btf = btf_parse_raw_mmap(sysfs_btf_path, NULL);
5781 		if (IS_ERR(btf))
5782 			btf = btf__parse(sysfs_btf_path, NULL);
5783 
5784 		if (!btf) {
5785 			err = -errno;
5786 			pr_warn("failed to read kernel BTF from '%s': %s\n",
5787 				sysfs_btf_path, errstr(err));
5788 			return libbpf_err_ptr(err);
5789 		}
5790 		pr_debug("loaded kernel BTF from '%s'\n", sysfs_btf_path);
5791 		return btf;
5792 	}
5793 
5794 	/* try fallback locations */
5795 	uname(&buf);
5796 	for (i = 0; i < ARRAY_SIZE(locations); i++) {
5797 		snprintf(path, PATH_MAX, locations[i], buf.release);
5798 
5799 		if (faccessat(AT_FDCWD, path, R_OK, AT_EACCESS))
5800 			continue;
5801 
5802 		btf = btf__parse(path, NULL);
5803 		err = libbpf_get_error(btf);
5804 		pr_debug("loading kernel BTF '%s': %s\n", path, errstr(err));
5805 		if (err)
5806 			continue;
5807 
5808 		return btf;
5809 	}
5810 
5811 	pr_warn("failed to find valid kernel BTF\n");
5812 	return libbpf_err_ptr(-ESRCH);
5813 }
5814 
5815 struct btf *libbpf_find_kernel_btf(void) __attribute__((alias("btf__load_vmlinux_btf")));
5816 
5817 struct btf *btf__load_module_btf(const char *module_name, struct btf *vmlinux_btf)
5818 {
5819 	char path[80];
5820 
5821 	snprintf(path, sizeof(path), "/sys/kernel/btf/%s", module_name);
5822 	return btf__parse_split(path, vmlinux_btf);
5823 }
5824 
5825 int btf_ext_visit_type_ids(struct btf_ext *btf_ext, type_id_visit_fn visit, void *ctx)
5826 {
5827 	const struct btf_ext_info *seg;
5828 	struct btf_ext_info_sec *sec;
5829 	int i, err;
5830 
5831 	seg = &btf_ext->func_info;
5832 	for_each_btf_ext_sec(seg, sec) {
5833 		struct bpf_func_info_min *rec;
5834 
5835 		for_each_btf_ext_rec(seg, sec, i, rec) {
5836 			err = visit(&rec->type_id, ctx);
5837 			if (err < 0)
5838 				return err;
5839 		}
5840 	}
5841 
5842 	seg = &btf_ext->core_relo_info;
5843 	for_each_btf_ext_sec(seg, sec) {
5844 		struct bpf_core_relo *rec;
5845 
5846 		for_each_btf_ext_rec(seg, sec, i, rec) {
5847 			err = visit(&rec->type_id, ctx);
5848 			if (err < 0)
5849 				return err;
5850 		}
5851 	}
5852 
5853 	return 0;
5854 }
5855 
5856 int btf_ext_visit_str_offs(struct btf_ext *btf_ext, str_off_visit_fn visit, void *ctx)
5857 {
5858 	const struct btf_ext_info *seg;
5859 	struct btf_ext_info_sec *sec;
5860 	int i, err;
5861 
5862 	seg = &btf_ext->func_info;
5863 	for_each_btf_ext_sec(seg, sec) {
5864 		err = visit(&sec->sec_name_off, ctx);
5865 		if (err)
5866 			return err;
5867 	}
5868 
5869 	seg = &btf_ext->line_info;
5870 	for_each_btf_ext_sec(seg, sec) {
5871 		struct bpf_line_info_min *rec;
5872 
5873 		err = visit(&sec->sec_name_off, ctx);
5874 		if (err)
5875 			return err;
5876 
5877 		for_each_btf_ext_rec(seg, sec, i, rec) {
5878 			err = visit(&rec->file_name_off, ctx);
5879 			if (err)
5880 				return err;
5881 			err = visit(&rec->line_off, ctx);
5882 			if (err)
5883 				return err;
5884 		}
5885 	}
5886 
5887 	seg = &btf_ext->core_relo_info;
5888 	for_each_btf_ext_sec(seg, sec) {
5889 		struct bpf_core_relo *rec;
5890 
5891 		err = visit(&sec->sec_name_off, ctx);
5892 		if (err)
5893 			return err;
5894 
5895 		for_each_btf_ext_rec(seg, sec, i, rec) {
5896 			err = visit(&rec->access_str_off, ctx);
5897 			if (err)
5898 				return err;
5899 		}
5900 	}
5901 
5902 	return 0;
5903 }
5904 
5905 struct btf_distill {
5906 	struct btf_pipe pipe;
5907 	int *id_map;
5908 	unsigned int split_start_id;
5909 	unsigned int split_start_str;
5910 	int diff_id;
5911 };
5912 
5913 static int btf_add_distilled_type_ids(struct btf_distill *dist, __u32 i)
5914 {
5915 	struct btf_type *split_t = btf_type_by_id(dist->pipe.src, i);
5916 	struct btf_field_iter it;
5917 	__u32 *id;
5918 	int err;
5919 
5920 	err = btf_field_iter_init(&it, split_t, BTF_FIELD_ITER_IDS);
5921 	if (err)
5922 		return err;
5923 	while ((id = btf_field_iter_next(&it))) {
5924 		struct btf_type *base_t;
5925 
5926 		if (!*id)
5927 			continue;
5928 		/* split BTF id, not needed */
5929 		if (*id >= dist->split_start_id)
5930 			continue;
5931 		/* already added ? */
5932 		if (dist->id_map[*id] > 0)
5933 			continue;
5934 
5935 		/* only a subset of base BTF types should be referenced from
5936 		 * split BTF; ensure nothing unexpected is referenced.
5937 		 */
5938 		base_t = btf_type_by_id(dist->pipe.src, *id);
5939 		switch (btf_kind(base_t)) {
5940 		case BTF_KIND_INT:
5941 		case BTF_KIND_FLOAT:
5942 		case BTF_KIND_FWD:
5943 		case BTF_KIND_ARRAY:
5944 		case BTF_KIND_STRUCT:
5945 		case BTF_KIND_UNION:
5946 		case BTF_KIND_TYPEDEF:
5947 		case BTF_KIND_ENUM:
5948 		case BTF_KIND_ENUM64:
5949 		case BTF_KIND_PTR:
5950 		case BTF_KIND_CONST:
5951 		case BTF_KIND_RESTRICT:
5952 		case BTF_KIND_VOLATILE:
5953 		case BTF_KIND_FUNC_PROTO:
5954 		case BTF_KIND_TYPE_TAG:
5955 			dist->id_map[*id] = *id;
5956 			break;
5957 		default:
5958 			pr_warn("unexpected reference to base type[%u] of kind [%u] when creating distilled base BTF.\n",
5959 				*id, btf_kind(base_t));
5960 			return -EINVAL;
5961 		}
5962 		/* If a base type is used, ensure types it refers to are
5963 		 * marked as used also; so for example if we find a PTR to INT
5964 		 * we need both the PTR and INT.
5965 		 *
5966 		 * The only exception is named struct/unions, since distilled
5967 		 * base BTF composite types have no members.
5968 		 */
5969 		if (btf_is_composite(base_t) && base_t->name_off)
5970 			continue;
5971 		err = btf_add_distilled_type_ids(dist, *id);
5972 		if (err)
5973 			return err;
5974 	}
5975 	return 0;
5976 }
5977 
5978 static int btf_add_distilled_types(struct btf_distill *dist)
5979 {
5980 	bool adding_to_base = dist->pipe.dst->start_id == 1;
5981 	int id = btf__type_cnt(dist->pipe.dst);
5982 	struct btf_type *t;
5983 	int i, err = 0;
5984 
5985 
5986 	/* Add types for each of the required references to either distilled
5987 	 * base or split BTF, depending on type characteristics.
5988 	 */
5989 	for (i = 1; i < dist->split_start_id; i++) {
5990 		const char *name;
5991 		int kind;
5992 
5993 		if (!dist->id_map[i])
5994 			continue;
5995 		t = btf_type_by_id(dist->pipe.src, i);
5996 		kind = btf_kind(t);
5997 		name = btf__name_by_offset(dist->pipe.src, t->name_off);
5998 
5999 		switch (kind) {
6000 		case BTF_KIND_INT:
6001 		case BTF_KIND_FLOAT:
6002 		case BTF_KIND_FWD:
6003 			/* Named int, float, fwd are added to base. */
6004 			if (!adding_to_base)
6005 				continue;
6006 			err = btf_add_type(&dist->pipe, t);
6007 			break;
6008 		case BTF_KIND_STRUCT:
6009 		case BTF_KIND_UNION:
6010 			/* Named struct/union are added to base as 0-vlen
6011 			 * struct/union of same size.  Anonymous struct/unions
6012 			 * are added to split BTF as-is.
6013 			 */
6014 			if (adding_to_base) {
6015 				if (!t->name_off)
6016 					continue;
6017 				err = btf_add_composite(dist->pipe.dst, kind, name, t->size);
6018 			} else {
6019 				if (t->name_off)
6020 					continue;
6021 				err = btf_add_type(&dist->pipe, t);
6022 			}
6023 			break;
6024 		case BTF_KIND_ENUM:
6025 		case BTF_KIND_ENUM64:
6026 			/* Named enum[64]s are added to base as a sized
6027 			 * enum; relocation will match with appropriately-named
6028 			 * and sized enum or enum64.
6029 			 *
6030 			 * Anonymous enums are added to split BTF as-is.
6031 			 */
6032 			if (adding_to_base) {
6033 				if (!t->name_off)
6034 					continue;
6035 				err = btf__add_enum(dist->pipe.dst, name, t->size);
6036 			} else {
6037 				if (t->name_off)
6038 					continue;
6039 				err = btf_add_type(&dist->pipe, t);
6040 			}
6041 			break;
6042 		case BTF_KIND_ARRAY:
6043 		case BTF_KIND_TYPEDEF:
6044 		case BTF_KIND_PTR:
6045 		case BTF_KIND_CONST:
6046 		case BTF_KIND_RESTRICT:
6047 		case BTF_KIND_VOLATILE:
6048 		case BTF_KIND_FUNC_PROTO:
6049 		case BTF_KIND_TYPE_TAG:
6050 			/* All other types are added to split BTF. */
6051 			if (adding_to_base)
6052 				continue;
6053 			err = btf_add_type(&dist->pipe, t);
6054 			break;
6055 		default:
6056 			pr_warn("unexpected kind when adding base type '%s'[%u] of kind [%u] to distilled base BTF.\n",
6057 				name, i, kind);
6058 			return -EINVAL;
6059 
6060 		}
6061 		if (err < 0)
6062 			break;
6063 		dist->id_map[i] = id++;
6064 	}
6065 	return err;
6066 }
6067 
6068 /* Split BTF ids without a mapping will be shifted downwards since distilled
6069  * base BTF is smaller than the original base BTF.  For those that have a
6070  * mapping (either to base or updated split BTF), update the id based on
6071  * that mapping.
6072  */
6073 static int btf_update_distilled_type_ids(struct btf_distill *dist, __u32 i)
6074 {
6075 	struct btf_type *t = btf_type_by_id(dist->pipe.dst, i);
6076 	struct btf_field_iter it;
6077 	__u32 *id;
6078 	int err;
6079 
6080 	err = btf_field_iter_init(&it, t, BTF_FIELD_ITER_IDS);
6081 	if (err)
6082 		return err;
6083 	while ((id = btf_field_iter_next(&it))) {
6084 		if (dist->id_map[*id])
6085 			*id = dist->id_map[*id];
6086 		else if (*id >= dist->split_start_id)
6087 			*id -= dist->diff_id;
6088 	}
6089 	return 0;
6090 }
6091 
6092 /* Create updated split BTF with distilled base BTF; distilled base BTF
6093  * consists of BTF information required to clarify the types that split
6094  * BTF refers to, omitting unneeded details.  Specifically it will contain
6095  * base types and memberless definitions of named structs, unions and enumerated
6096  * types. Associated reference types like pointers, arrays and anonymous
6097  * structs, unions and enumerated types will be added to split BTF.
6098  * Size is recorded for named struct/unions to help guide matching to the
6099  * target base BTF during later relocation.
6100  *
6101  * The only case where structs, unions or enumerated types are fully represented
6102  * is when they are anonymous; in such cases, the anonymous type is added to
6103  * split BTF in full.
6104  *
6105  * We return newly-created split BTF where the split BTF refers to a newly-created
6106  * distilled base BTF. Both must be freed separately by the caller.
6107  */
6108 int btf__distill_base(const struct btf *src_btf, struct btf **new_base_btf,
6109 		      struct btf **new_split_btf)
6110 {
6111 	struct btf *new_base = NULL, *new_split = NULL;
6112 	const struct btf *old_base;
6113 	unsigned int n = btf__type_cnt(src_btf);
6114 	struct btf_distill dist = {};
6115 	struct btf_type *t;
6116 	int i, err = 0;
6117 
6118 	/* src BTF must be split BTF. */
6119 	old_base = btf__base_btf(src_btf);
6120 	if (!new_base_btf || !new_split_btf || !old_base)
6121 		return libbpf_err(-EINVAL);
6122 
6123 	new_base = btf__new_empty();
6124 	if (!new_base)
6125 		return libbpf_err(-ENOMEM);
6126 
6127 	btf__set_endianness(new_base, btf__endianness(src_btf));
6128 
6129 	dist.id_map = calloc(n, sizeof(*dist.id_map));
6130 	if (!dist.id_map) {
6131 		err = -ENOMEM;
6132 		goto done;
6133 	}
6134 	dist.pipe.src = src_btf;
6135 	dist.pipe.dst = new_base;
6136 	dist.pipe.str_off_map = hashmap__new(btf_dedup_identity_hash_fn, btf_dedup_equal_fn, NULL);
6137 	if (IS_ERR(dist.pipe.str_off_map)) {
6138 		err = -ENOMEM;
6139 		goto done;
6140 	}
6141 	dist.split_start_id = btf__type_cnt(old_base);
6142 	dist.split_start_str = old_base->hdr.str_len;
6143 
6144 	/* Pass over src split BTF; generate the list of base BTF type ids it
6145 	 * references; these will constitute our distilled BTF set to be
6146 	 * distributed over base and split BTF as appropriate.
6147 	 */
6148 	for (i = src_btf->start_id; i < n; i++) {
6149 		err = btf_add_distilled_type_ids(&dist, i);
6150 		if (err < 0)
6151 			goto done;
6152 	}
6153 	/* Next add types for each of the required references to base BTF and split BTF
6154 	 * in turn.
6155 	 */
6156 	err = btf_add_distilled_types(&dist);
6157 	if (err < 0)
6158 		goto done;
6159 
6160 	/* Create new split BTF with distilled base BTF as its base; the final
6161 	 * state is split BTF with distilled base BTF that represents enough
6162 	 * about its base references to allow it to be relocated with the base
6163 	 * BTF available.
6164 	 */
6165 	new_split = btf__new_empty_split(new_base);
6166 	if (!new_split) {
6167 		err = -errno;
6168 		goto done;
6169 	}
6170 	dist.pipe.dst = new_split;
6171 	/* First add all split types */
6172 	for (i = src_btf->start_id; i < n; i++) {
6173 		t = btf_type_by_id(src_btf, i);
6174 		err = btf_add_type(&dist.pipe, t);
6175 		if (err < 0)
6176 			goto done;
6177 	}
6178 	/* Now add distilled types to split BTF that are not added to base. */
6179 	err = btf_add_distilled_types(&dist);
6180 	if (err < 0)
6181 		goto done;
6182 
6183 	/* All split BTF ids will be shifted downwards since there are less base
6184 	 * BTF ids in distilled base BTF.
6185 	 */
6186 	dist.diff_id = dist.split_start_id - btf__type_cnt(new_base);
6187 
6188 	n = btf__type_cnt(new_split);
6189 	/* Now update base/split BTF ids. */
6190 	for (i = 1; i < n; i++) {
6191 		err = btf_update_distilled_type_ids(&dist, i);
6192 		if (err < 0)
6193 			break;
6194 	}
6195 done:
6196 	free(dist.id_map);
6197 	hashmap__free(dist.pipe.str_off_map);
6198 	if (err) {
6199 		btf__free(new_split);
6200 		btf__free(new_base);
6201 		return libbpf_err(err);
6202 	}
6203 	*new_base_btf = new_base;
6204 	*new_split_btf = new_split;
6205 
6206 	return 0;
6207 }
6208 
6209 const struct btf_header *btf_header(const struct btf *btf)
6210 {
6211 	return &btf->hdr;
6212 }
6213 
6214 void btf_set_base_btf(struct btf *btf, const struct btf *base_btf)
6215 {
6216 	btf->base_btf = (struct btf *)base_btf;
6217 	btf->start_id = btf__type_cnt(base_btf);
6218 	btf->start_str_off = base_btf->hdr.str_len + base_btf->start_str_off;
6219 }
6220 
6221 int btf__relocate(struct btf *btf, const struct btf *base_btf)
6222 {
6223 	int err = btf_relocate(btf, base_btf, NULL);
6224 
6225 	if (!err)
6226 		btf->owns_base = false;
6227 	return libbpf_err(err);
6228 }
6229 
6230 struct btf_permute {
6231 	struct btf *btf;
6232 	__u32 *id_map;
6233 	__u32 start_offs;
6234 };
6235 
6236 /* Callback function to remap individual type ID references */
6237 static int btf_permute_remap_type_id(__u32 *type_id, void *ctx)
6238 {
6239 	struct btf_permute *p = ctx;
6240 	__u32 new_id = *type_id;
6241 
6242 	/* refer to the base BTF or VOID type */
6243 	if (new_id < p->btf->start_id)
6244 		return 0;
6245 
6246 	if (new_id >= btf__type_cnt(p->btf))
6247 		return -EINVAL;
6248 
6249 	*type_id = p->id_map[new_id - p->btf->start_id + p->start_offs];
6250 	return 0;
6251 }
6252 
6253 int btf__permute(struct btf *btf, __u32 *id_map, __u32 id_map_cnt,
6254 		 const struct btf_permute_opts *opts)
6255 {
6256 	struct btf_permute p;
6257 	struct btf_ext *btf_ext;
6258 	void *nt, *new_types = NULL;
6259 	__u32 *order_map = NULL;
6260 	int err = 0, i;
6261 	__u32 n, id, start_offs = 0;
6262 
6263 	if (!OPTS_VALID(opts, btf_permute_opts))
6264 		return libbpf_err(-EINVAL);
6265 
6266 	if (btf__base_btf(btf)) {
6267 		n = btf->nr_types;
6268 	} else {
6269 		if (id_map[0] != 0)
6270 			return libbpf_err(-EINVAL);
6271 		n = btf__type_cnt(btf);
6272 		start_offs = 1;
6273 	}
6274 
6275 	if (id_map_cnt != n)
6276 		return libbpf_err(-EINVAL);
6277 
6278 	/* record the sequence of types */
6279 	order_map = calloc(id_map_cnt, sizeof(*id_map));
6280 	if (!order_map) {
6281 		err = -ENOMEM;
6282 		goto done;
6283 	}
6284 
6285 	new_types = calloc(btf->hdr.type_len, 1);
6286 	if (!new_types) {
6287 		err = -ENOMEM;
6288 		goto done;
6289 	}
6290 
6291 	err = btf_ensure_modifiable(btf);
6292 	if (err)
6293 		goto done;
6294 
6295 	for (i = start_offs; i < id_map_cnt; i++) {
6296 		id = id_map[i];
6297 		if (id < btf->start_id || id >= btf__type_cnt(btf)) {
6298 			err = -EINVAL;
6299 			goto done;
6300 		}
6301 		id -= btf->start_id - start_offs;
6302 		/* cannot be mapped to the same ID */
6303 		if (order_map[id]) {
6304 			err = -EINVAL;
6305 			goto done;
6306 		}
6307 		order_map[id] = i + btf->start_id - start_offs;
6308 	}
6309 
6310 	p.btf = btf;
6311 	p.id_map = id_map;
6312 	p.start_offs = start_offs;
6313 	nt = new_types;
6314 	for (i = start_offs; i < id_map_cnt; i++) {
6315 		struct btf_field_iter it;
6316 		const struct btf_type *t;
6317 		__u32 *type_id;
6318 		int type_size;
6319 
6320 		id = order_map[i];
6321 		t = btf__type_by_id(btf, id);
6322 		type_size = btf_type_size(btf, t);
6323 		memcpy(nt, t, type_size);
6324 
6325 		/* fix up referenced IDs for BTF */
6326 		err = btf_field_iter_init(&it, nt, BTF_FIELD_ITER_IDS);
6327 		if (err)
6328 			goto done;
6329 		while ((type_id = btf_field_iter_next(&it))) {
6330 			err = btf_permute_remap_type_id(type_id, &p);
6331 			if (err)
6332 				goto done;
6333 		}
6334 
6335 		nt += type_size;
6336 	}
6337 
6338 	/* fix up referenced IDs for btf_ext */
6339 	btf_ext = OPTS_GET(opts, btf_ext, NULL);
6340 	if (btf_ext) {
6341 		err = btf_ext_visit_type_ids(btf_ext, btf_permute_remap_type_id, &p);
6342 		if (err)
6343 			goto done;
6344 	}
6345 
6346 	for (nt = new_types, i = 0; i < id_map_cnt - start_offs; i++) {
6347 		btf->type_offs[i] = nt - new_types;
6348 		nt += btf_type_size(btf, nt);
6349 	}
6350 
6351 	free(order_map);
6352 	free(btf->types_data);
6353 	btf->types_data = new_types;
6354 	return 0;
6355 
6356 done:
6357 	free(order_map);
6358 	free(new_types);
6359 	return libbpf_err(err);
6360 }
6361