xref: /linux/tools/lib/bpf/btf.c (revision 9d027a35a52a4ea9400390ef4414e4e9dcd54193)
1 // SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
2 /* Copyright (c) 2018 Facebook */
3 
4 #include <byteswap.h>
5 #include <endian.h>
6 #include <stdio.h>
7 #include <stdlib.h>
8 #include <string.h>
9 #include <fcntl.h>
10 #include <unistd.h>
11 #include <errno.h>
12 #include <sys/utsname.h>
13 #include <sys/param.h>
14 #include <sys/stat.h>
15 #include <linux/kernel.h>
16 #include <linux/err.h>
17 #include <linux/btf.h>
18 #include <gelf.h>
19 #include "btf.h"
20 #include "bpf.h"
21 #include "libbpf.h"
22 #include "libbpf_internal.h"
23 #include "hashmap.h"
24 #include "strset.h"
25 
26 #define BTF_MAX_NR_TYPES 0x7fffffffU
27 #define BTF_MAX_STR_OFFSET 0x7fffffffU
28 
29 static struct btf_type btf_void;
30 
31 struct btf {
32 	/* raw BTF data in native endianness */
33 	void *raw_data;
34 	/* raw BTF data in non-native endianness */
35 	void *raw_data_swapped;
36 	__u32 raw_size;
37 	/* whether target endianness differs from the native one */
38 	bool swapped_endian;
39 
40 	/*
41 	 * When BTF is loaded from an ELF or raw memory it is stored
42 	 * in a contiguous memory block. The hdr, type_data, and, strs_data
43 	 * point inside that memory region to their respective parts of BTF
44 	 * representation:
45 	 *
46 	 * +--------------------------------+
47 	 * |  Header  |  Types  |  Strings  |
48 	 * +--------------------------------+
49 	 * ^          ^         ^
50 	 * |          |         |
51 	 * hdr        |         |
52 	 * types_data-+         |
53 	 * strs_data------------+
54 	 *
55 	 * If BTF data is later modified, e.g., due to types added or
56 	 * removed, BTF deduplication performed, etc, this contiguous
57 	 * representation is broken up into three independently allocated
58 	 * memory regions to be able to modify them independently.
59 	 * raw_data is nulled out at that point, but can be later allocated
60 	 * and cached again if user calls btf__raw_data(), at which point
61 	 * raw_data will contain a contiguous copy of header, types, and
62 	 * strings:
63 	 *
64 	 * +----------+  +---------+  +-----------+
65 	 * |  Header  |  |  Types  |  |  Strings  |
66 	 * +----------+  +---------+  +-----------+
67 	 * ^             ^            ^
68 	 * |             |            |
69 	 * hdr           |            |
70 	 * types_data----+            |
71 	 * strset__data(strs_set)-----+
72 	 *
73 	 *               +----------+---------+-----------+
74 	 *               |  Header  |  Types  |  Strings  |
75 	 * raw_data----->+----------+---------+-----------+
76 	 */
77 	struct btf_header *hdr;
78 
79 	void *types_data;
80 	size_t types_data_cap; /* used size stored in hdr->type_len */
81 
82 	/* type ID to `struct btf_type *` lookup index
83 	 * type_offs[0] corresponds to the first non-VOID type:
84 	 *   - for base BTF it's type [1];
85 	 *   - for split BTF it's the first non-base BTF type.
86 	 */
87 	__u32 *type_offs;
88 	size_t type_offs_cap;
89 	/* number of types in this BTF instance:
90 	 *   - doesn't include special [0] void type;
91 	 *   - for split BTF counts number of types added on top of base BTF.
92 	 */
93 	__u32 nr_types;
94 	/* if not NULL, points to the base BTF on top of which the current
95 	 * split BTF is based
96 	 */
97 	struct btf *base_btf;
98 	/* BTF type ID of the first type in this BTF instance:
99 	 *   - for base BTF it's equal to 1;
100 	 *   - for split BTF it's equal to biggest type ID of base BTF plus 1.
101 	 */
102 	int start_id;
103 	/* logical string offset of this BTF instance:
104 	 *   - for base BTF it's equal to 0;
105 	 *   - for split BTF it's equal to total size of base BTF's string section size.
106 	 */
107 	int start_str_off;
108 
109 	/* only one of strs_data or strs_set can be non-NULL, depending on
110 	 * whether BTF is in a modifiable state (strs_set is used) or not
111 	 * (strs_data points inside raw_data)
112 	 */
113 	void *strs_data;
114 	/* a set of unique strings */
115 	struct strset *strs_set;
116 	/* whether strings are already deduplicated */
117 	bool strs_deduped;
118 
119 	/* BTF object FD, if loaded into kernel */
120 	int fd;
121 
122 	/* Pointer size (in bytes) for a target architecture of this BTF */
123 	int ptr_sz;
124 };
125 
126 static inline __u64 ptr_to_u64(const void *ptr)
127 {
128 	return (__u64) (unsigned long) ptr;
129 }
130 
131 /* Ensure given dynamically allocated memory region pointed to by *data* with
132  * capacity of *cap_cnt* elements each taking *elem_sz* bytes has enough
133  * memory to accommodate *add_cnt* new elements, assuming *cur_cnt* elements
134  * are already used. At most *max_cnt* elements can be ever allocated.
135  * If necessary, memory is reallocated and all existing data is copied over,
136  * new pointer to the memory region is stored at *data, new memory region
137  * capacity (in number of elements) is stored in *cap.
138  * On success, memory pointer to the beginning of unused memory is returned.
139  * On error, NULL is returned.
140  */
141 void *libbpf_add_mem(void **data, size_t *cap_cnt, size_t elem_sz,
142 		     size_t cur_cnt, size_t max_cnt, size_t add_cnt)
143 {
144 	size_t new_cnt;
145 	void *new_data;
146 
147 	if (cur_cnt + add_cnt <= *cap_cnt)
148 		return *data + cur_cnt * elem_sz;
149 
150 	/* requested more than the set limit */
151 	if (cur_cnt + add_cnt > max_cnt)
152 		return NULL;
153 
154 	new_cnt = *cap_cnt;
155 	new_cnt += new_cnt / 4;		  /* expand by 25% */
156 	if (new_cnt < 16)		  /* but at least 16 elements */
157 		new_cnt = 16;
158 	if (new_cnt > max_cnt)		  /* but not exceeding a set limit */
159 		new_cnt = max_cnt;
160 	if (new_cnt < cur_cnt + add_cnt)  /* also ensure we have enough memory */
161 		new_cnt = cur_cnt + add_cnt;
162 
163 	new_data = libbpf_reallocarray(*data, new_cnt, elem_sz);
164 	if (!new_data)
165 		return NULL;
166 
167 	/* zero out newly allocated portion of memory */
168 	memset(new_data + (*cap_cnt) * elem_sz, 0, (new_cnt - *cap_cnt) * elem_sz);
169 
170 	*data = new_data;
171 	*cap_cnt = new_cnt;
172 	return new_data + cur_cnt * elem_sz;
173 }
174 
175 /* Ensure given dynamically allocated memory region has enough allocated space
176  * to accommodate *need_cnt* elements of size *elem_sz* bytes each
177  */
178 int libbpf_ensure_mem(void **data, size_t *cap_cnt, size_t elem_sz, size_t need_cnt)
179 {
180 	void *p;
181 
182 	if (need_cnt <= *cap_cnt)
183 		return 0;
184 
185 	p = libbpf_add_mem(data, cap_cnt, elem_sz, *cap_cnt, SIZE_MAX, need_cnt - *cap_cnt);
186 	if (!p)
187 		return -ENOMEM;
188 
189 	return 0;
190 }
191 
192 static void *btf_add_type_offs_mem(struct btf *btf, size_t add_cnt)
193 {
194 	return libbpf_add_mem((void **)&btf->type_offs, &btf->type_offs_cap, sizeof(__u32),
195 			      btf->nr_types, BTF_MAX_NR_TYPES, add_cnt);
196 }
197 
198 static int btf_add_type_idx_entry(struct btf *btf, __u32 type_off)
199 {
200 	__u32 *p;
201 
202 	p = btf_add_type_offs_mem(btf, 1);
203 	if (!p)
204 		return -ENOMEM;
205 
206 	*p = type_off;
207 	return 0;
208 }
209 
210 static void btf_bswap_hdr(struct btf_header *h)
211 {
212 	h->magic = bswap_16(h->magic);
213 	h->hdr_len = bswap_32(h->hdr_len);
214 	h->type_off = bswap_32(h->type_off);
215 	h->type_len = bswap_32(h->type_len);
216 	h->str_off = bswap_32(h->str_off);
217 	h->str_len = bswap_32(h->str_len);
218 }
219 
220 static int btf_parse_hdr(struct btf *btf)
221 {
222 	struct btf_header *hdr = btf->hdr;
223 	__u32 meta_left;
224 
225 	if (btf->raw_size < sizeof(struct btf_header)) {
226 		pr_debug("BTF header not found\n");
227 		return -EINVAL;
228 	}
229 
230 	if (hdr->magic == bswap_16(BTF_MAGIC)) {
231 		btf->swapped_endian = true;
232 		if (bswap_32(hdr->hdr_len) != sizeof(struct btf_header)) {
233 			pr_warn("Can't load BTF with non-native endianness due to unsupported header length %u\n",
234 				bswap_32(hdr->hdr_len));
235 			return -ENOTSUP;
236 		}
237 		btf_bswap_hdr(hdr);
238 	} else if (hdr->magic != BTF_MAGIC) {
239 		pr_debug("Invalid BTF magic: %x\n", hdr->magic);
240 		return -EINVAL;
241 	}
242 
243 	if (btf->raw_size < hdr->hdr_len) {
244 		pr_debug("BTF header len %u larger than data size %u\n",
245 			 hdr->hdr_len, btf->raw_size);
246 		return -EINVAL;
247 	}
248 
249 	meta_left = btf->raw_size - hdr->hdr_len;
250 	if (meta_left < (long long)hdr->str_off + hdr->str_len) {
251 		pr_debug("Invalid BTF total size: %u\n", btf->raw_size);
252 		return -EINVAL;
253 	}
254 
255 	if ((long long)hdr->type_off + hdr->type_len > hdr->str_off) {
256 		pr_debug("Invalid BTF data sections layout: type data at %u + %u, strings data at %u + %u\n",
257 			 hdr->type_off, hdr->type_len, hdr->str_off, hdr->str_len);
258 		return -EINVAL;
259 	}
260 
261 	if (hdr->type_off % 4) {
262 		pr_debug("BTF type section is not aligned to 4 bytes\n");
263 		return -EINVAL;
264 	}
265 
266 	return 0;
267 }
268 
269 static int btf_parse_str_sec(struct btf *btf)
270 {
271 	const struct btf_header *hdr = btf->hdr;
272 	const char *start = btf->strs_data;
273 	const char *end = start + btf->hdr->str_len;
274 
275 	if (btf->base_btf && hdr->str_len == 0)
276 		return 0;
277 	if (!hdr->str_len || hdr->str_len - 1 > BTF_MAX_STR_OFFSET || end[-1]) {
278 		pr_debug("Invalid BTF string section\n");
279 		return -EINVAL;
280 	}
281 	if (!btf->base_btf && start[0]) {
282 		pr_debug("Invalid BTF string section\n");
283 		return -EINVAL;
284 	}
285 	return 0;
286 }
287 
288 static int btf_type_size(const struct btf_type *t)
289 {
290 	const int base_size = sizeof(struct btf_type);
291 	__u16 vlen = btf_vlen(t);
292 
293 	switch (btf_kind(t)) {
294 	case BTF_KIND_FWD:
295 	case BTF_KIND_CONST:
296 	case BTF_KIND_VOLATILE:
297 	case BTF_KIND_RESTRICT:
298 	case BTF_KIND_PTR:
299 	case BTF_KIND_TYPEDEF:
300 	case BTF_KIND_FUNC:
301 	case BTF_KIND_FLOAT:
302 	case BTF_KIND_TYPE_TAG:
303 		return base_size;
304 	case BTF_KIND_INT:
305 		return base_size + sizeof(__u32);
306 	case BTF_KIND_ENUM:
307 		return base_size + vlen * sizeof(struct btf_enum);
308 	case BTF_KIND_ENUM64:
309 		return base_size + vlen * sizeof(struct btf_enum64);
310 	case BTF_KIND_ARRAY:
311 		return base_size + sizeof(struct btf_array);
312 	case BTF_KIND_STRUCT:
313 	case BTF_KIND_UNION:
314 		return base_size + vlen * sizeof(struct btf_member);
315 	case BTF_KIND_FUNC_PROTO:
316 		return base_size + vlen * sizeof(struct btf_param);
317 	case BTF_KIND_VAR:
318 		return base_size + sizeof(struct btf_var);
319 	case BTF_KIND_DATASEC:
320 		return base_size + vlen * sizeof(struct btf_var_secinfo);
321 	case BTF_KIND_DECL_TAG:
322 		return base_size + sizeof(struct btf_decl_tag);
323 	default:
324 		pr_debug("Unsupported BTF_KIND:%u\n", btf_kind(t));
325 		return -EINVAL;
326 	}
327 }
328 
329 static void btf_bswap_type_base(struct btf_type *t)
330 {
331 	t->name_off = bswap_32(t->name_off);
332 	t->info = bswap_32(t->info);
333 	t->type = bswap_32(t->type);
334 }
335 
336 static int btf_bswap_type_rest(struct btf_type *t)
337 {
338 	struct btf_var_secinfo *v;
339 	struct btf_enum64 *e64;
340 	struct btf_member *m;
341 	struct btf_array *a;
342 	struct btf_param *p;
343 	struct btf_enum *e;
344 	__u16 vlen = btf_vlen(t);
345 	int i;
346 
347 	switch (btf_kind(t)) {
348 	case BTF_KIND_FWD:
349 	case BTF_KIND_CONST:
350 	case BTF_KIND_VOLATILE:
351 	case BTF_KIND_RESTRICT:
352 	case BTF_KIND_PTR:
353 	case BTF_KIND_TYPEDEF:
354 	case BTF_KIND_FUNC:
355 	case BTF_KIND_FLOAT:
356 	case BTF_KIND_TYPE_TAG:
357 		return 0;
358 	case BTF_KIND_INT:
359 		*(__u32 *)(t + 1) = bswap_32(*(__u32 *)(t + 1));
360 		return 0;
361 	case BTF_KIND_ENUM:
362 		for (i = 0, e = btf_enum(t); i < vlen; i++, e++) {
363 			e->name_off = bswap_32(e->name_off);
364 			e->val = bswap_32(e->val);
365 		}
366 		return 0;
367 	case BTF_KIND_ENUM64:
368 		for (i = 0, e64 = btf_enum64(t); i < vlen; i++, e64++) {
369 			e64->name_off = bswap_32(e64->name_off);
370 			e64->val_lo32 = bswap_32(e64->val_lo32);
371 			e64->val_hi32 = bswap_32(e64->val_hi32);
372 		}
373 		return 0;
374 	case BTF_KIND_ARRAY:
375 		a = btf_array(t);
376 		a->type = bswap_32(a->type);
377 		a->index_type = bswap_32(a->index_type);
378 		a->nelems = bswap_32(a->nelems);
379 		return 0;
380 	case BTF_KIND_STRUCT:
381 	case BTF_KIND_UNION:
382 		for (i = 0, m = btf_members(t); i < vlen; i++, m++) {
383 			m->name_off = bswap_32(m->name_off);
384 			m->type = bswap_32(m->type);
385 			m->offset = bswap_32(m->offset);
386 		}
387 		return 0;
388 	case BTF_KIND_FUNC_PROTO:
389 		for (i = 0, p = btf_params(t); i < vlen; i++, p++) {
390 			p->name_off = bswap_32(p->name_off);
391 			p->type = bswap_32(p->type);
392 		}
393 		return 0;
394 	case BTF_KIND_VAR:
395 		btf_var(t)->linkage = bswap_32(btf_var(t)->linkage);
396 		return 0;
397 	case BTF_KIND_DATASEC:
398 		for (i = 0, v = btf_var_secinfos(t); i < vlen; i++, v++) {
399 			v->type = bswap_32(v->type);
400 			v->offset = bswap_32(v->offset);
401 			v->size = bswap_32(v->size);
402 		}
403 		return 0;
404 	case BTF_KIND_DECL_TAG:
405 		btf_decl_tag(t)->component_idx = bswap_32(btf_decl_tag(t)->component_idx);
406 		return 0;
407 	default:
408 		pr_debug("Unsupported BTF_KIND:%u\n", btf_kind(t));
409 		return -EINVAL;
410 	}
411 }
412 
413 static int btf_parse_type_sec(struct btf *btf)
414 {
415 	struct btf_header *hdr = btf->hdr;
416 	void *next_type = btf->types_data;
417 	void *end_type = next_type + hdr->type_len;
418 	int err, type_size;
419 
420 	while (next_type + sizeof(struct btf_type) <= end_type) {
421 		if (btf->swapped_endian)
422 			btf_bswap_type_base(next_type);
423 
424 		type_size = btf_type_size(next_type);
425 		if (type_size < 0)
426 			return type_size;
427 		if (next_type + type_size > end_type) {
428 			pr_warn("BTF type [%d] is malformed\n", btf->start_id + btf->nr_types);
429 			return -EINVAL;
430 		}
431 
432 		if (btf->swapped_endian && btf_bswap_type_rest(next_type))
433 			return -EINVAL;
434 
435 		err = btf_add_type_idx_entry(btf, next_type - btf->types_data);
436 		if (err)
437 			return err;
438 
439 		next_type += type_size;
440 		btf->nr_types++;
441 	}
442 
443 	if (next_type != end_type) {
444 		pr_warn("BTF types data is malformed\n");
445 		return -EINVAL;
446 	}
447 
448 	return 0;
449 }
450 
451 static int btf_validate_str(const struct btf *btf, __u32 str_off, const char *what, __u32 type_id)
452 {
453 	const char *s;
454 
455 	s = btf__str_by_offset(btf, str_off);
456 	if (!s) {
457 		pr_warn("btf: type [%u]: invalid %s (string offset %u)\n", type_id, what, str_off);
458 		return -EINVAL;
459 	}
460 
461 	return 0;
462 }
463 
464 static int btf_validate_id(const struct btf *btf, __u32 id, __u32 ctx_id)
465 {
466 	const struct btf_type *t;
467 
468 	t = btf__type_by_id(btf, id);
469 	if (!t) {
470 		pr_warn("btf: type [%u]: invalid referenced type ID %u\n", ctx_id, id);
471 		return -EINVAL;
472 	}
473 
474 	return 0;
475 }
476 
477 static int btf_validate_type(const struct btf *btf, const struct btf_type *t, __u32 id)
478 {
479 	__u32 kind = btf_kind(t);
480 	int err, i, n;
481 
482 	err = btf_validate_str(btf, t->name_off, "type name", id);
483 	if (err)
484 		return err;
485 
486 	switch (kind) {
487 	case BTF_KIND_UNKN:
488 	case BTF_KIND_INT:
489 	case BTF_KIND_FWD:
490 	case BTF_KIND_FLOAT:
491 		break;
492 	case BTF_KIND_PTR:
493 	case BTF_KIND_TYPEDEF:
494 	case BTF_KIND_VOLATILE:
495 	case BTF_KIND_CONST:
496 	case BTF_KIND_RESTRICT:
497 	case BTF_KIND_VAR:
498 	case BTF_KIND_DECL_TAG:
499 	case BTF_KIND_TYPE_TAG:
500 		err = btf_validate_id(btf, t->type, id);
501 		if (err)
502 			return err;
503 		break;
504 	case BTF_KIND_ARRAY: {
505 		const struct btf_array *a = btf_array(t);
506 
507 		err = btf_validate_id(btf, a->type, id);
508 		err = err ?: btf_validate_id(btf, a->index_type, id);
509 		if (err)
510 			return err;
511 		break;
512 	}
513 	case BTF_KIND_STRUCT:
514 	case BTF_KIND_UNION: {
515 		const struct btf_member *m = btf_members(t);
516 
517 		n = btf_vlen(t);
518 		for (i = 0; i < n; i++, m++) {
519 			err = btf_validate_str(btf, m->name_off, "field name", id);
520 			err = err ?: btf_validate_id(btf, m->type, id);
521 			if (err)
522 				return err;
523 		}
524 		break;
525 	}
526 	case BTF_KIND_ENUM: {
527 		const struct btf_enum *m = btf_enum(t);
528 
529 		n = btf_vlen(t);
530 		for (i = 0; i < n; i++, m++) {
531 			err = btf_validate_str(btf, m->name_off, "enum name", id);
532 			if (err)
533 				return err;
534 		}
535 		break;
536 	}
537 	case BTF_KIND_ENUM64: {
538 		const struct btf_enum64 *m = btf_enum64(t);
539 
540 		n = btf_vlen(t);
541 		for (i = 0; i < n; i++, m++) {
542 			err = btf_validate_str(btf, m->name_off, "enum name", id);
543 			if (err)
544 				return err;
545 		}
546 		break;
547 	}
548 	case BTF_KIND_FUNC: {
549 		const struct btf_type *ft;
550 
551 		err = btf_validate_id(btf, t->type, id);
552 		if (err)
553 			return err;
554 		ft = btf__type_by_id(btf, t->type);
555 		if (btf_kind(ft) != BTF_KIND_FUNC_PROTO) {
556 			pr_warn("btf: type [%u]: referenced type [%u] is not FUNC_PROTO\n", id, t->type);
557 			return -EINVAL;
558 		}
559 		break;
560 	}
561 	case BTF_KIND_FUNC_PROTO: {
562 		const struct btf_param *m = btf_params(t);
563 
564 		n = btf_vlen(t);
565 		for (i = 0; i < n; i++, m++) {
566 			err = btf_validate_str(btf, m->name_off, "param name", id);
567 			err = err ?: btf_validate_id(btf, m->type, id);
568 			if (err)
569 				return err;
570 		}
571 		break;
572 	}
573 	case BTF_KIND_DATASEC: {
574 		const struct btf_var_secinfo *m = btf_var_secinfos(t);
575 
576 		n = btf_vlen(t);
577 		for (i = 0; i < n; i++, m++) {
578 			err = btf_validate_id(btf, m->type, id);
579 			if (err)
580 				return err;
581 		}
582 		break;
583 	}
584 	default:
585 		pr_warn("btf: type [%u]: unrecognized kind %u\n", id, kind);
586 		return -EINVAL;
587 	}
588 	return 0;
589 }
590 
591 /* Validate basic sanity of BTF. It's intentionally less thorough than
592  * kernel's validation and validates only properties of BTF that libbpf relies
593  * on to be correct (e.g., valid type IDs, valid string offsets, etc)
594  */
595 static int btf_sanity_check(const struct btf *btf)
596 {
597 	const struct btf_type *t;
598 	__u32 i, n = btf__type_cnt(btf);
599 	int err;
600 
601 	for (i = 1; i < n; i++) {
602 		t = btf_type_by_id(btf, i);
603 		err = btf_validate_type(btf, t, i);
604 		if (err)
605 			return err;
606 	}
607 	return 0;
608 }
609 
610 __u32 btf__type_cnt(const struct btf *btf)
611 {
612 	return btf->start_id + btf->nr_types;
613 }
614 
615 const struct btf *btf__base_btf(const struct btf *btf)
616 {
617 	return btf->base_btf;
618 }
619 
620 /* internal helper returning non-const pointer to a type */
621 struct btf_type *btf_type_by_id(const struct btf *btf, __u32 type_id)
622 {
623 	if (type_id == 0)
624 		return &btf_void;
625 	if (type_id < btf->start_id)
626 		return btf_type_by_id(btf->base_btf, type_id);
627 	return btf->types_data + btf->type_offs[type_id - btf->start_id];
628 }
629 
630 const struct btf_type *btf__type_by_id(const struct btf *btf, __u32 type_id)
631 {
632 	if (type_id >= btf->start_id + btf->nr_types)
633 		return errno = EINVAL, NULL;
634 	return btf_type_by_id((struct btf *)btf, type_id);
635 }
636 
637 static int determine_ptr_size(const struct btf *btf)
638 {
639 	static const char * const long_aliases[] = {
640 		"long",
641 		"long int",
642 		"int long",
643 		"unsigned long",
644 		"long unsigned",
645 		"unsigned long int",
646 		"unsigned int long",
647 		"long unsigned int",
648 		"long int unsigned",
649 		"int unsigned long",
650 		"int long unsigned",
651 	};
652 	const struct btf_type *t;
653 	const char *name;
654 	int i, j, n;
655 
656 	if (btf->base_btf && btf->base_btf->ptr_sz > 0)
657 		return btf->base_btf->ptr_sz;
658 
659 	n = btf__type_cnt(btf);
660 	for (i = 1; i < n; i++) {
661 		t = btf__type_by_id(btf, i);
662 		if (!btf_is_int(t))
663 			continue;
664 
665 		if (t->size != 4 && t->size != 8)
666 			continue;
667 
668 		name = btf__name_by_offset(btf, t->name_off);
669 		if (!name)
670 			continue;
671 
672 		for (j = 0; j < ARRAY_SIZE(long_aliases); j++) {
673 			if (strcmp(name, long_aliases[j]) == 0)
674 				return t->size;
675 		}
676 	}
677 
678 	return -1;
679 }
680 
681 static size_t btf_ptr_sz(const struct btf *btf)
682 {
683 	if (!btf->ptr_sz)
684 		((struct btf *)btf)->ptr_sz = determine_ptr_size(btf);
685 	return btf->ptr_sz < 0 ? sizeof(void *) : btf->ptr_sz;
686 }
687 
688 /* Return pointer size this BTF instance assumes. The size is heuristically
689  * determined by looking for 'long' or 'unsigned long' integer type and
690  * recording its size in bytes. If BTF type information doesn't have any such
691  * type, this function returns 0. In the latter case, native architecture's
692  * pointer size is assumed, so will be either 4 or 8, depending on
693  * architecture that libbpf was compiled for. It's possible to override
694  * guessed value by using btf__set_pointer_size() API.
695  */
696 size_t btf__pointer_size(const struct btf *btf)
697 {
698 	if (!btf->ptr_sz)
699 		((struct btf *)btf)->ptr_sz = determine_ptr_size(btf);
700 
701 	if (btf->ptr_sz < 0)
702 		/* not enough BTF type info to guess */
703 		return 0;
704 
705 	return btf->ptr_sz;
706 }
707 
708 /* Override or set pointer size in bytes. Only values of 4 and 8 are
709  * supported.
710  */
711 int btf__set_pointer_size(struct btf *btf, size_t ptr_sz)
712 {
713 	if (ptr_sz != 4 && ptr_sz != 8)
714 		return libbpf_err(-EINVAL);
715 	btf->ptr_sz = ptr_sz;
716 	return 0;
717 }
718 
719 static bool is_host_big_endian(void)
720 {
721 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
722 	return false;
723 #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
724 	return true;
725 #else
726 # error "Unrecognized __BYTE_ORDER__"
727 #endif
728 }
729 
730 enum btf_endianness btf__endianness(const struct btf *btf)
731 {
732 	if (is_host_big_endian())
733 		return btf->swapped_endian ? BTF_LITTLE_ENDIAN : BTF_BIG_ENDIAN;
734 	else
735 		return btf->swapped_endian ? BTF_BIG_ENDIAN : BTF_LITTLE_ENDIAN;
736 }
737 
738 int btf__set_endianness(struct btf *btf, enum btf_endianness endian)
739 {
740 	if (endian != BTF_LITTLE_ENDIAN && endian != BTF_BIG_ENDIAN)
741 		return libbpf_err(-EINVAL);
742 
743 	btf->swapped_endian = is_host_big_endian() != (endian == BTF_BIG_ENDIAN);
744 	if (!btf->swapped_endian) {
745 		free(btf->raw_data_swapped);
746 		btf->raw_data_swapped = NULL;
747 	}
748 	return 0;
749 }
750 
751 static bool btf_type_is_void(const struct btf_type *t)
752 {
753 	return t == &btf_void || btf_is_fwd(t);
754 }
755 
756 static bool btf_type_is_void_or_null(const struct btf_type *t)
757 {
758 	return !t || btf_type_is_void(t);
759 }
760 
761 #define MAX_RESOLVE_DEPTH 32
762 
763 __s64 btf__resolve_size(const struct btf *btf, __u32 type_id)
764 {
765 	const struct btf_array *array;
766 	const struct btf_type *t;
767 	__u32 nelems = 1;
768 	__s64 size = -1;
769 	int i;
770 
771 	t = btf__type_by_id(btf, type_id);
772 	for (i = 0; i < MAX_RESOLVE_DEPTH && !btf_type_is_void_or_null(t); i++) {
773 		switch (btf_kind(t)) {
774 		case BTF_KIND_INT:
775 		case BTF_KIND_STRUCT:
776 		case BTF_KIND_UNION:
777 		case BTF_KIND_ENUM:
778 		case BTF_KIND_ENUM64:
779 		case BTF_KIND_DATASEC:
780 		case BTF_KIND_FLOAT:
781 			size = t->size;
782 			goto done;
783 		case BTF_KIND_PTR:
784 			size = btf_ptr_sz(btf);
785 			goto done;
786 		case BTF_KIND_TYPEDEF:
787 		case BTF_KIND_VOLATILE:
788 		case BTF_KIND_CONST:
789 		case BTF_KIND_RESTRICT:
790 		case BTF_KIND_VAR:
791 		case BTF_KIND_DECL_TAG:
792 		case BTF_KIND_TYPE_TAG:
793 			type_id = t->type;
794 			break;
795 		case BTF_KIND_ARRAY:
796 			array = btf_array(t);
797 			if (nelems && array->nelems > UINT32_MAX / nelems)
798 				return libbpf_err(-E2BIG);
799 			nelems *= array->nelems;
800 			type_id = array->type;
801 			break;
802 		default:
803 			return libbpf_err(-EINVAL);
804 		}
805 
806 		t = btf__type_by_id(btf, type_id);
807 	}
808 
809 done:
810 	if (size < 0)
811 		return libbpf_err(-EINVAL);
812 	if (nelems && size > UINT32_MAX / nelems)
813 		return libbpf_err(-E2BIG);
814 
815 	return nelems * size;
816 }
817 
818 int btf__align_of(const struct btf *btf, __u32 id)
819 {
820 	const struct btf_type *t = btf__type_by_id(btf, id);
821 	__u16 kind = btf_kind(t);
822 
823 	switch (kind) {
824 	case BTF_KIND_INT:
825 	case BTF_KIND_ENUM:
826 	case BTF_KIND_ENUM64:
827 	case BTF_KIND_FLOAT:
828 		return min(btf_ptr_sz(btf), (size_t)t->size);
829 	case BTF_KIND_PTR:
830 		return btf_ptr_sz(btf);
831 	case BTF_KIND_TYPEDEF:
832 	case BTF_KIND_VOLATILE:
833 	case BTF_KIND_CONST:
834 	case BTF_KIND_RESTRICT:
835 	case BTF_KIND_TYPE_TAG:
836 		return btf__align_of(btf, t->type);
837 	case BTF_KIND_ARRAY:
838 		return btf__align_of(btf, btf_array(t)->type);
839 	case BTF_KIND_STRUCT:
840 	case BTF_KIND_UNION: {
841 		const struct btf_member *m = btf_members(t);
842 		__u16 vlen = btf_vlen(t);
843 		int i, max_align = 1, align;
844 
845 		for (i = 0; i < vlen; i++, m++) {
846 			align = btf__align_of(btf, m->type);
847 			if (align <= 0)
848 				return libbpf_err(align);
849 			max_align = max(max_align, align);
850 
851 			/* if field offset isn't aligned according to field
852 			 * type's alignment, then struct must be packed
853 			 */
854 			if (btf_member_bitfield_size(t, i) == 0 &&
855 			    (m->offset % (8 * align)) != 0)
856 				return 1;
857 		}
858 
859 		/* if struct/union size isn't a multiple of its alignment,
860 		 * then struct must be packed
861 		 */
862 		if ((t->size % max_align) != 0)
863 			return 1;
864 
865 		return max_align;
866 	}
867 	default:
868 		pr_warn("unsupported BTF_KIND:%u\n", btf_kind(t));
869 		return errno = EINVAL, 0;
870 	}
871 }
872 
873 int btf__resolve_type(const struct btf *btf, __u32 type_id)
874 {
875 	const struct btf_type *t;
876 	int depth = 0;
877 
878 	t = btf__type_by_id(btf, type_id);
879 	while (depth < MAX_RESOLVE_DEPTH &&
880 	       !btf_type_is_void_or_null(t) &&
881 	       (btf_is_mod(t) || btf_is_typedef(t) || btf_is_var(t))) {
882 		type_id = t->type;
883 		t = btf__type_by_id(btf, type_id);
884 		depth++;
885 	}
886 
887 	if (depth == MAX_RESOLVE_DEPTH || btf_type_is_void_or_null(t))
888 		return libbpf_err(-EINVAL);
889 
890 	return type_id;
891 }
892 
893 __s32 btf__find_by_name(const struct btf *btf, const char *type_name)
894 {
895 	__u32 i, nr_types = btf__type_cnt(btf);
896 
897 	if (!strcmp(type_name, "void"))
898 		return 0;
899 
900 	for (i = 1; i < nr_types; i++) {
901 		const struct btf_type *t = btf__type_by_id(btf, i);
902 		const char *name = btf__name_by_offset(btf, t->name_off);
903 
904 		if (name && !strcmp(type_name, name))
905 			return i;
906 	}
907 
908 	return libbpf_err(-ENOENT);
909 }
910 
911 static __s32 btf_find_by_name_kind(const struct btf *btf, int start_id,
912 				   const char *type_name, __u32 kind)
913 {
914 	__u32 i, nr_types = btf__type_cnt(btf);
915 
916 	if (kind == BTF_KIND_UNKN || !strcmp(type_name, "void"))
917 		return 0;
918 
919 	for (i = start_id; i < nr_types; i++) {
920 		const struct btf_type *t = btf__type_by_id(btf, i);
921 		const char *name;
922 
923 		if (btf_kind(t) != kind)
924 			continue;
925 		name = btf__name_by_offset(btf, t->name_off);
926 		if (name && !strcmp(type_name, name))
927 			return i;
928 	}
929 
930 	return libbpf_err(-ENOENT);
931 }
932 
933 __s32 btf__find_by_name_kind_own(const struct btf *btf, const char *type_name,
934 				 __u32 kind)
935 {
936 	return btf_find_by_name_kind(btf, btf->start_id, type_name, kind);
937 }
938 
939 __s32 btf__find_by_name_kind(const struct btf *btf, const char *type_name,
940 			     __u32 kind)
941 {
942 	return btf_find_by_name_kind(btf, 1, type_name, kind);
943 }
944 
945 static bool btf_is_modifiable(const struct btf *btf)
946 {
947 	return (void *)btf->hdr != btf->raw_data;
948 }
949 
950 void btf__free(struct btf *btf)
951 {
952 	if (IS_ERR_OR_NULL(btf))
953 		return;
954 
955 	if (btf->fd >= 0)
956 		close(btf->fd);
957 
958 	if (btf_is_modifiable(btf)) {
959 		/* if BTF was modified after loading, it will have a split
960 		 * in-memory representation for header, types, and strings
961 		 * sections, so we need to free all of them individually. It
962 		 * might still have a cached contiguous raw data present,
963 		 * which will be unconditionally freed below.
964 		 */
965 		free(btf->hdr);
966 		free(btf->types_data);
967 		strset__free(btf->strs_set);
968 	}
969 	free(btf->raw_data);
970 	free(btf->raw_data_swapped);
971 	free(btf->type_offs);
972 	free(btf);
973 }
974 
975 static struct btf *btf_new_empty(struct btf *base_btf)
976 {
977 	struct btf *btf;
978 
979 	btf = calloc(1, sizeof(*btf));
980 	if (!btf)
981 		return ERR_PTR(-ENOMEM);
982 
983 	btf->nr_types = 0;
984 	btf->start_id = 1;
985 	btf->start_str_off = 0;
986 	btf->fd = -1;
987 	btf->ptr_sz = sizeof(void *);
988 	btf->swapped_endian = false;
989 
990 	if (base_btf) {
991 		btf->base_btf = base_btf;
992 		btf->start_id = btf__type_cnt(base_btf);
993 		btf->start_str_off = base_btf->hdr->str_len;
994 	}
995 
996 	/* +1 for empty string at offset 0 */
997 	btf->raw_size = sizeof(struct btf_header) + (base_btf ? 0 : 1);
998 	btf->raw_data = calloc(1, btf->raw_size);
999 	if (!btf->raw_data) {
1000 		free(btf);
1001 		return ERR_PTR(-ENOMEM);
1002 	}
1003 
1004 	btf->hdr = btf->raw_data;
1005 	btf->hdr->hdr_len = sizeof(struct btf_header);
1006 	btf->hdr->magic = BTF_MAGIC;
1007 	btf->hdr->version = BTF_VERSION;
1008 
1009 	btf->types_data = btf->raw_data + btf->hdr->hdr_len;
1010 	btf->strs_data = btf->raw_data + btf->hdr->hdr_len;
1011 	btf->hdr->str_len = base_btf ? 0 : 1; /* empty string at offset 0 */
1012 
1013 	return btf;
1014 }
1015 
1016 struct btf *btf__new_empty(void)
1017 {
1018 	return libbpf_ptr(btf_new_empty(NULL));
1019 }
1020 
1021 struct btf *btf__new_empty_split(struct btf *base_btf)
1022 {
1023 	return libbpf_ptr(btf_new_empty(base_btf));
1024 }
1025 
1026 static struct btf *btf_new(const void *data, __u32 size, struct btf *base_btf)
1027 {
1028 	struct btf *btf;
1029 	int err;
1030 
1031 	btf = calloc(1, sizeof(struct btf));
1032 	if (!btf)
1033 		return ERR_PTR(-ENOMEM);
1034 
1035 	btf->nr_types = 0;
1036 	btf->start_id = 1;
1037 	btf->start_str_off = 0;
1038 	btf->fd = -1;
1039 
1040 	if (base_btf) {
1041 		btf->base_btf = base_btf;
1042 		btf->start_id = btf__type_cnt(base_btf);
1043 		btf->start_str_off = base_btf->hdr->str_len;
1044 	}
1045 
1046 	btf->raw_data = malloc(size);
1047 	if (!btf->raw_data) {
1048 		err = -ENOMEM;
1049 		goto done;
1050 	}
1051 	memcpy(btf->raw_data, data, size);
1052 	btf->raw_size = size;
1053 
1054 	btf->hdr = btf->raw_data;
1055 	err = btf_parse_hdr(btf);
1056 	if (err)
1057 		goto done;
1058 
1059 	btf->strs_data = btf->raw_data + btf->hdr->hdr_len + btf->hdr->str_off;
1060 	btf->types_data = btf->raw_data + btf->hdr->hdr_len + btf->hdr->type_off;
1061 
1062 	err = btf_parse_str_sec(btf);
1063 	err = err ?: btf_parse_type_sec(btf);
1064 	err = err ?: btf_sanity_check(btf);
1065 	if (err)
1066 		goto done;
1067 
1068 done:
1069 	if (err) {
1070 		btf__free(btf);
1071 		return ERR_PTR(err);
1072 	}
1073 
1074 	return btf;
1075 }
1076 
1077 struct btf *btf__new(const void *data, __u32 size)
1078 {
1079 	return libbpf_ptr(btf_new(data, size, NULL));
1080 }
1081 
1082 static struct btf *btf_parse_elf(const char *path, struct btf *base_btf,
1083 				 struct btf_ext **btf_ext)
1084 {
1085 	Elf_Data *btf_data = NULL, *btf_ext_data = NULL;
1086 	int err = 0, fd = -1, idx = 0;
1087 	struct btf *btf = NULL;
1088 	Elf_Scn *scn = NULL;
1089 	Elf *elf = NULL;
1090 	GElf_Ehdr ehdr;
1091 	size_t shstrndx;
1092 
1093 	if (elf_version(EV_CURRENT) == EV_NONE) {
1094 		pr_warn("failed to init libelf for %s\n", path);
1095 		return ERR_PTR(-LIBBPF_ERRNO__LIBELF);
1096 	}
1097 
1098 	fd = open(path, O_RDONLY | O_CLOEXEC);
1099 	if (fd < 0) {
1100 		err = -errno;
1101 		pr_warn("failed to open %s: %s\n", path, strerror(errno));
1102 		return ERR_PTR(err);
1103 	}
1104 
1105 	err = -LIBBPF_ERRNO__FORMAT;
1106 
1107 	elf = elf_begin(fd, ELF_C_READ, NULL);
1108 	if (!elf) {
1109 		pr_warn("failed to open %s as ELF file\n", path);
1110 		goto done;
1111 	}
1112 	if (!gelf_getehdr(elf, &ehdr)) {
1113 		pr_warn("failed to get EHDR from %s\n", path);
1114 		goto done;
1115 	}
1116 
1117 	if (elf_getshdrstrndx(elf, &shstrndx)) {
1118 		pr_warn("failed to get section names section index for %s\n",
1119 			path);
1120 		goto done;
1121 	}
1122 
1123 	if (!elf_rawdata(elf_getscn(elf, shstrndx), NULL)) {
1124 		pr_warn("failed to get e_shstrndx from %s\n", path);
1125 		goto done;
1126 	}
1127 
1128 	while ((scn = elf_nextscn(elf, scn)) != NULL) {
1129 		GElf_Shdr sh;
1130 		char *name;
1131 
1132 		idx++;
1133 		if (gelf_getshdr(scn, &sh) != &sh) {
1134 			pr_warn("failed to get section(%d) header from %s\n",
1135 				idx, path);
1136 			goto done;
1137 		}
1138 		name = elf_strptr(elf, shstrndx, sh.sh_name);
1139 		if (!name) {
1140 			pr_warn("failed to get section(%d) name from %s\n",
1141 				idx, path);
1142 			goto done;
1143 		}
1144 		if (strcmp(name, BTF_ELF_SEC) == 0) {
1145 			btf_data = elf_getdata(scn, 0);
1146 			if (!btf_data) {
1147 				pr_warn("failed to get section(%d, %s) data from %s\n",
1148 					idx, name, path);
1149 				goto done;
1150 			}
1151 			continue;
1152 		} else if (btf_ext && strcmp(name, BTF_EXT_ELF_SEC) == 0) {
1153 			btf_ext_data = elf_getdata(scn, 0);
1154 			if (!btf_ext_data) {
1155 				pr_warn("failed to get section(%d, %s) data from %s\n",
1156 					idx, name, path);
1157 				goto done;
1158 			}
1159 			continue;
1160 		}
1161 	}
1162 
1163 	if (!btf_data) {
1164 		pr_warn("failed to find '%s' ELF section in %s\n", BTF_ELF_SEC, path);
1165 		err = -ENODATA;
1166 		goto done;
1167 	}
1168 	btf = btf_new(btf_data->d_buf, btf_data->d_size, base_btf);
1169 	err = libbpf_get_error(btf);
1170 	if (err)
1171 		goto done;
1172 
1173 	switch (gelf_getclass(elf)) {
1174 	case ELFCLASS32:
1175 		btf__set_pointer_size(btf, 4);
1176 		break;
1177 	case ELFCLASS64:
1178 		btf__set_pointer_size(btf, 8);
1179 		break;
1180 	default:
1181 		pr_warn("failed to get ELF class (bitness) for %s\n", path);
1182 		break;
1183 	}
1184 
1185 	if (btf_ext && btf_ext_data) {
1186 		*btf_ext = btf_ext__new(btf_ext_data->d_buf, btf_ext_data->d_size);
1187 		err = libbpf_get_error(*btf_ext);
1188 		if (err)
1189 			goto done;
1190 	} else if (btf_ext) {
1191 		*btf_ext = NULL;
1192 	}
1193 done:
1194 	if (elf)
1195 		elf_end(elf);
1196 	close(fd);
1197 
1198 	if (!err)
1199 		return btf;
1200 
1201 	if (btf_ext)
1202 		btf_ext__free(*btf_ext);
1203 	btf__free(btf);
1204 
1205 	return ERR_PTR(err);
1206 }
1207 
1208 struct btf *btf__parse_elf(const char *path, struct btf_ext **btf_ext)
1209 {
1210 	return libbpf_ptr(btf_parse_elf(path, NULL, btf_ext));
1211 }
1212 
1213 struct btf *btf__parse_elf_split(const char *path, struct btf *base_btf)
1214 {
1215 	return libbpf_ptr(btf_parse_elf(path, base_btf, NULL));
1216 }
1217 
1218 static struct btf *btf_parse_raw(const char *path, struct btf *base_btf)
1219 {
1220 	struct btf *btf = NULL;
1221 	void *data = NULL;
1222 	FILE *f = NULL;
1223 	__u16 magic;
1224 	int err = 0;
1225 	long sz;
1226 
1227 	f = fopen(path, "rbe");
1228 	if (!f) {
1229 		err = -errno;
1230 		goto err_out;
1231 	}
1232 
1233 	/* check BTF magic */
1234 	if (fread(&magic, 1, sizeof(magic), f) < sizeof(magic)) {
1235 		err = -EIO;
1236 		goto err_out;
1237 	}
1238 	if (magic != BTF_MAGIC && magic != bswap_16(BTF_MAGIC)) {
1239 		/* definitely not a raw BTF */
1240 		err = -EPROTO;
1241 		goto err_out;
1242 	}
1243 
1244 	/* get file size */
1245 	if (fseek(f, 0, SEEK_END)) {
1246 		err = -errno;
1247 		goto err_out;
1248 	}
1249 	sz = ftell(f);
1250 	if (sz < 0) {
1251 		err = -errno;
1252 		goto err_out;
1253 	}
1254 	/* rewind to the start */
1255 	if (fseek(f, 0, SEEK_SET)) {
1256 		err = -errno;
1257 		goto err_out;
1258 	}
1259 
1260 	/* pre-alloc memory and read all of BTF data */
1261 	data = malloc(sz);
1262 	if (!data) {
1263 		err = -ENOMEM;
1264 		goto err_out;
1265 	}
1266 	if (fread(data, 1, sz, f) < sz) {
1267 		err = -EIO;
1268 		goto err_out;
1269 	}
1270 
1271 	/* finally parse BTF data */
1272 	btf = btf_new(data, sz, base_btf);
1273 
1274 err_out:
1275 	free(data);
1276 	if (f)
1277 		fclose(f);
1278 	return err ? ERR_PTR(err) : btf;
1279 }
1280 
1281 struct btf *btf__parse_raw(const char *path)
1282 {
1283 	return libbpf_ptr(btf_parse_raw(path, NULL));
1284 }
1285 
1286 struct btf *btf__parse_raw_split(const char *path, struct btf *base_btf)
1287 {
1288 	return libbpf_ptr(btf_parse_raw(path, base_btf));
1289 }
1290 
1291 static struct btf *btf_parse(const char *path, struct btf *base_btf, struct btf_ext **btf_ext)
1292 {
1293 	struct btf *btf;
1294 	int err;
1295 
1296 	if (btf_ext)
1297 		*btf_ext = NULL;
1298 
1299 	btf = btf_parse_raw(path, base_btf);
1300 	err = libbpf_get_error(btf);
1301 	if (!err)
1302 		return btf;
1303 	if (err != -EPROTO)
1304 		return ERR_PTR(err);
1305 	return btf_parse_elf(path, base_btf, btf_ext);
1306 }
1307 
1308 struct btf *btf__parse(const char *path, struct btf_ext **btf_ext)
1309 {
1310 	return libbpf_ptr(btf_parse(path, NULL, btf_ext));
1311 }
1312 
1313 struct btf *btf__parse_split(const char *path, struct btf *base_btf)
1314 {
1315 	return libbpf_ptr(btf_parse(path, base_btf, NULL));
1316 }
1317 
1318 static void *btf_get_raw_data(const struct btf *btf, __u32 *size, bool swap_endian);
1319 
1320 int btf_load_into_kernel(struct btf *btf,
1321 			 char *log_buf, size_t log_sz, __u32 log_level,
1322 			 int token_fd)
1323 {
1324 	LIBBPF_OPTS(bpf_btf_load_opts, opts);
1325 	__u32 buf_sz = 0, raw_size;
1326 	char *buf = NULL, *tmp;
1327 	void *raw_data;
1328 	int err = 0;
1329 
1330 	if (btf->fd >= 0)
1331 		return libbpf_err(-EEXIST);
1332 	if (log_sz && !log_buf)
1333 		return libbpf_err(-EINVAL);
1334 
1335 	/* cache native raw data representation */
1336 	raw_data = btf_get_raw_data(btf, &raw_size, false);
1337 	if (!raw_data) {
1338 		err = -ENOMEM;
1339 		goto done;
1340 	}
1341 	btf->raw_size = raw_size;
1342 	btf->raw_data = raw_data;
1343 
1344 retry_load:
1345 	/* if log_level is 0, we won't provide log_buf/log_size to the kernel,
1346 	 * initially. Only if BTF loading fails, we bump log_level to 1 and
1347 	 * retry, using either auto-allocated or custom log_buf. This way
1348 	 * non-NULL custom log_buf provides a buffer just in case, but hopes
1349 	 * for successful load and no need for log_buf.
1350 	 */
1351 	if (log_level) {
1352 		/* if caller didn't provide custom log_buf, we'll keep
1353 		 * allocating our own progressively bigger buffers for BTF
1354 		 * verification log
1355 		 */
1356 		if (!log_buf) {
1357 			buf_sz = max((__u32)BPF_LOG_BUF_SIZE, buf_sz * 2);
1358 			tmp = realloc(buf, buf_sz);
1359 			if (!tmp) {
1360 				err = -ENOMEM;
1361 				goto done;
1362 			}
1363 			buf = tmp;
1364 			buf[0] = '\0';
1365 		}
1366 
1367 		opts.log_buf = log_buf ? log_buf : buf;
1368 		opts.log_size = log_buf ? log_sz : buf_sz;
1369 		opts.log_level = log_level;
1370 	}
1371 
1372 	opts.token_fd = token_fd;
1373 	btf->fd = bpf_btf_load(raw_data, raw_size, &opts);
1374 	if (btf->fd < 0) {
1375 		/* time to turn on verbose mode and try again */
1376 		if (log_level == 0) {
1377 			log_level = 1;
1378 			goto retry_load;
1379 		}
1380 		/* only retry if caller didn't provide custom log_buf, but
1381 		 * make sure we can never overflow buf_sz
1382 		 */
1383 		if (!log_buf && errno == ENOSPC && buf_sz <= UINT_MAX / 2)
1384 			goto retry_load;
1385 
1386 		err = -errno;
1387 		pr_warn("BTF loading error: %d\n", err);
1388 		/* don't print out contents of custom log_buf */
1389 		if (!log_buf && buf[0])
1390 			pr_warn("-- BEGIN BTF LOAD LOG ---\n%s\n-- END BTF LOAD LOG --\n", buf);
1391 	}
1392 
1393 done:
1394 	free(buf);
1395 	return libbpf_err(err);
1396 }
1397 
1398 int btf__load_into_kernel(struct btf *btf)
1399 {
1400 	return btf_load_into_kernel(btf, NULL, 0, 0, 0);
1401 }
1402 
1403 int btf__fd(const struct btf *btf)
1404 {
1405 	return btf->fd;
1406 }
1407 
1408 void btf__set_fd(struct btf *btf, int fd)
1409 {
1410 	btf->fd = fd;
1411 }
1412 
1413 static const void *btf_strs_data(const struct btf *btf)
1414 {
1415 	return btf->strs_data ? btf->strs_data : strset__data(btf->strs_set);
1416 }
1417 
1418 static void *btf_get_raw_data(const struct btf *btf, __u32 *size, bool swap_endian)
1419 {
1420 	struct btf_header *hdr = btf->hdr;
1421 	struct btf_type *t;
1422 	void *data, *p;
1423 	__u32 data_sz;
1424 	int i;
1425 
1426 	data = swap_endian ? btf->raw_data_swapped : btf->raw_data;
1427 	if (data) {
1428 		*size = btf->raw_size;
1429 		return data;
1430 	}
1431 
1432 	data_sz = hdr->hdr_len + hdr->type_len + hdr->str_len;
1433 	data = calloc(1, data_sz);
1434 	if (!data)
1435 		return NULL;
1436 	p = data;
1437 
1438 	memcpy(p, hdr, hdr->hdr_len);
1439 	if (swap_endian)
1440 		btf_bswap_hdr(p);
1441 	p += hdr->hdr_len;
1442 
1443 	memcpy(p, btf->types_data, hdr->type_len);
1444 	if (swap_endian) {
1445 		for (i = 0; i < btf->nr_types; i++) {
1446 			t = p + btf->type_offs[i];
1447 			/* btf_bswap_type_rest() relies on native t->info, so
1448 			 * we swap base type info after we swapped all the
1449 			 * additional information
1450 			 */
1451 			if (btf_bswap_type_rest(t))
1452 				goto err_out;
1453 			btf_bswap_type_base(t);
1454 		}
1455 	}
1456 	p += hdr->type_len;
1457 
1458 	memcpy(p, btf_strs_data(btf), hdr->str_len);
1459 	p += hdr->str_len;
1460 
1461 	*size = data_sz;
1462 	return data;
1463 err_out:
1464 	free(data);
1465 	return NULL;
1466 }
1467 
1468 const void *btf__raw_data(const struct btf *btf_ro, __u32 *size)
1469 {
1470 	struct btf *btf = (struct btf *)btf_ro;
1471 	__u32 data_sz;
1472 	void *data;
1473 
1474 	data = btf_get_raw_data(btf, &data_sz, btf->swapped_endian);
1475 	if (!data)
1476 		return errno = ENOMEM, NULL;
1477 
1478 	btf->raw_size = data_sz;
1479 	if (btf->swapped_endian)
1480 		btf->raw_data_swapped = data;
1481 	else
1482 		btf->raw_data = data;
1483 	*size = data_sz;
1484 	return data;
1485 }
1486 
1487 __attribute__((alias("btf__raw_data")))
1488 const void *btf__get_raw_data(const struct btf *btf, __u32 *size);
1489 
1490 const char *btf__str_by_offset(const struct btf *btf, __u32 offset)
1491 {
1492 	if (offset < btf->start_str_off)
1493 		return btf__str_by_offset(btf->base_btf, offset);
1494 	else if (offset - btf->start_str_off < btf->hdr->str_len)
1495 		return btf_strs_data(btf) + (offset - btf->start_str_off);
1496 	else
1497 		return errno = EINVAL, NULL;
1498 }
1499 
1500 const char *btf__name_by_offset(const struct btf *btf, __u32 offset)
1501 {
1502 	return btf__str_by_offset(btf, offset);
1503 }
1504 
1505 struct btf *btf_get_from_fd(int btf_fd, struct btf *base_btf)
1506 {
1507 	struct bpf_btf_info btf_info;
1508 	__u32 len = sizeof(btf_info);
1509 	__u32 last_size;
1510 	struct btf *btf;
1511 	void *ptr;
1512 	int err;
1513 
1514 	/* we won't know btf_size until we call bpf_btf_get_info_by_fd(). so
1515 	 * let's start with a sane default - 4KiB here - and resize it only if
1516 	 * bpf_btf_get_info_by_fd() needs a bigger buffer.
1517 	 */
1518 	last_size = 4096;
1519 	ptr = malloc(last_size);
1520 	if (!ptr)
1521 		return ERR_PTR(-ENOMEM);
1522 
1523 	memset(&btf_info, 0, sizeof(btf_info));
1524 	btf_info.btf = ptr_to_u64(ptr);
1525 	btf_info.btf_size = last_size;
1526 	err = bpf_btf_get_info_by_fd(btf_fd, &btf_info, &len);
1527 
1528 	if (!err && btf_info.btf_size > last_size) {
1529 		void *temp_ptr;
1530 
1531 		last_size = btf_info.btf_size;
1532 		temp_ptr = realloc(ptr, last_size);
1533 		if (!temp_ptr) {
1534 			btf = ERR_PTR(-ENOMEM);
1535 			goto exit_free;
1536 		}
1537 		ptr = temp_ptr;
1538 
1539 		len = sizeof(btf_info);
1540 		memset(&btf_info, 0, sizeof(btf_info));
1541 		btf_info.btf = ptr_to_u64(ptr);
1542 		btf_info.btf_size = last_size;
1543 
1544 		err = bpf_btf_get_info_by_fd(btf_fd, &btf_info, &len);
1545 	}
1546 
1547 	if (err || btf_info.btf_size > last_size) {
1548 		btf = err ? ERR_PTR(-errno) : ERR_PTR(-E2BIG);
1549 		goto exit_free;
1550 	}
1551 
1552 	btf = btf_new(ptr, btf_info.btf_size, base_btf);
1553 
1554 exit_free:
1555 	free(ptr);
1556 	return btf;
1557 }
1558 
1559 struct btf *btf__load_from_kernel_by_id_split(__u32 id, struct btf *base_btf)
1560 {
1561 	struct btf *btf;
1562 	int btf_fd;
1563 
1564 	btf_fd = bpf_btf_get_fd_by_id(id);
1565 	if (btf_fd < 0)
1566 		return libbpf_err_ptr(-errno);
1567 
1568 	btf = btf_get_from_fd(btf_fd, base_btf);
1569 	close(btf_fd);
1570 
1571 	return libbpf_ptr(btf);
1572 }
1573 
1574 struct btf *btf__load_from_kernel_by_id(__u32 id)
1575 {
1576 	return btf__load_from_kernel_by_id_split(id, NULL);
1577 }
1578 
1579 static void btf_invalidate_raw_data(struct btf *btf)
1580 {
1581 	if (btf->raw_data) {
1582 		free(btf->raw_data);
1583 		btf->raw_data = NULL;
1584 	}
1585 	if (btf->raw_data_swapped) {
1586 		free(btf->raw_data_swapped);
1587 		btf->raw_data_swapped = NULL;
1588 	}
1589 }
1590 
1591 /* Ensure BTF is ready to be modified (by splitting into a three memory
1592  * regions for header, types, and strings). Also invalidate cached
1593  * raw_data, if any.
1594  */
1595 static int btf_ensure_modifiable(struct btf *btf)
1596 {
1597 	void *hdr, *types;
1598 	struct strset *set = NULL;
1599 	int err = -ENOMEM;
1600 
1601 	if (btf_is_modifiable(btf)) {
1602 		/* any BTF modification invalidates raw_data */
1603 		btf_invalidate_raw_data(btf);
1604 		return 0;
1605 	}
1606 
1607 	/* split raw data into three memory regions */
1608 	hdr = malloc(btf->hdr->hdr_len);
1609 	types = malloc(btf->hdr->type_len);
1610 	if (!hdr || !types)
1611 		goto err_out;
1612 
1613 	memcpy(hdr, btf->hdr, btf->hdr->hdr_len);
1614 	memcpy(types, btf->types_data, btf->hdr->type_len);
1615 
1616 	/* build lookup index for all strings */
1617 	set = strset__new(BTF_MAX_STR_OFFSET, btf->strs_data, btf->hdr->str_len);
1618 	if (IS_ERR(set)) {
1619 		err = PTR_ERR(set);
1620 		goto err_out;
1621 	}
1622 
1623 	/* only when everything was successful, update internal state */
1624 	btf->hdr = hdr;
1625 	btf->types_data = types;
1626 	btf->types_data_cap = btf->hdr->type_len;
1627 	btf->strs_data = NULL;
1628 	btf->strs_set = set;
1629 	/* if BTF was created from scratch, all strings are guaranteed to be
1630 	 * unique and deduplicated
1631 	 */
1632 	if (btf->hdr->str_len == 0)
1633 		btf->strs_deduped = true;
1634 	if (!btf->base_btf && btf->hdr->str_len == 1)
1635 		btf->strs_deduped = true;
1636 
1637 	/* invalidate raw_data representation */
1638 	btf_invalidate_raw_data(btf);
1639 
1640 	return 0;
1641 
1642 err_out:
1643 	strset__free(set);
1644 	free(hdr);
1645 	free(types);
1646 	return err;
1647 }
1648 
1649 /* Find an offset in BTF string section that corresponds to a given string *s*.
1650  * Returns:
1651  *   - >0 offset into string section, if string is found;
1652  *   - -ENOENT, if string is not in the string section;
1653  *   - <0, on any other error.
1654  */
1655 int btf__find_str(struct btf *btf, const char *s)
1656 {
1657 	int off;
1658 
1659 	if (btf->base_btf) {
1660 		off = btf__find_str(btf->base_btf, s);
1661 		if (off != -ENOENT)
1662 			return off;
1663 	}
1664 
1665 	/* BTF needs to be in a modifiable state to build string lookup index */
1666 	if (btf_ensure_modifiable(btf))
1667 		return libbpf_err(-ENOMEM);
1668 
1669 	off = strset__find_str(btf->strs_set, s);
1670 	if (off < 0)
1671 		return libbpf_err(off);
1672 
1673 	return btf->start_str_off + off;
1674 }
1675 
1676 /* Add a string s to the BTF string section.
1677  * Returns:
1678  *   - > 0 offset into string section, on success;
1679  *   - < 0, on error.
1680  */
1681 int btf__add_str(struct btf *btf, const char *s)
1682 {
1683 	int off;
1684 
1685 	if (btf->base_btf) {
1686 		off = btf__find_str(btf->base_btf, s);
1687 		if (off != -ENOENT)
1688 			return off;
1689 	}
1690 
1691 	if (btf_ensure_modifiable(btf))
1692 		return libbpf_err(-ENOMEM);
1693 
1694 	off = strset__add_str(btf->strs_set, s);
1695 	if (off < 0)
1696 		return libbpf_err(off);
1697 
1698 	btf->hdr->str_len = strset__data_size(btf->strs_set);
1699 
1700 	return btf->start_str_off + off;
1701 }
1702 
1703 static void *btf_add_type_mem(struct btf *btf, size_t add_sz)
1704 {
1705 	return libbpf_add_mem(&btf->types_data, &btf->types_data_cap, 1,
1706 			      btf->hdr->type_len, UINT_MAX, add_sz);
1707 }
1708 
1709 static void btf_type_inc_vlen(struct btf_type *t)
1710 {
1711 	t->info = btf_type_info(btf_kind(t), btf_vlen(t) + 1, btf_kflag(t));
1712 }
1713 
1714 static int btf_commit_type(struct btf *btf, int data_sz)
1715 {
1716 	int err;
1717 
1718 	err = btf_add_type_idx_entry(btf, btf->hdr->type_len);
1719 	if (err)
1720 		return libbpf_err(err);
1721 
1722 	btf->hdr->type_len += data_sz;
1723 	btf->hdr->str_off += data_sz;
1724 	btf->nr_types++;
1725 	return btf->start_id + btf->nr_types - 1;
1726 }
1727 
1728 struct btf_pipe {
1729 	const struct btf *src;
1730 	struct btf *dst;
1731 	struct hashmap *str_off_map; /* map string offsets from src to dst */
1732 };
1733 
1734 static int btf_rewrite_str(__u32 *str_off, void *ctx)
1735 {
1736 	struct btf_pipe *p = ctx;
1737 	long mapped_off;
1738 	int off, err;
1739 
1740 	if (!*str_off) /* nothing to do for empty strings */
1741 		return 0;
1742 
1743 	if (p->str_off_map &&
1744 	    hashmap__find(p->str_off_map, *str_off, &mapped_off)) {
1745 		*str_off = mapped_off;
1746 		return 0;
1747 	}
1748 
1749 	off = btf__add_str(p->dst, btf__str_by_offset(p->src, *str_off));
1750 	if (off < 0)
1751 		return off;
1752 
1753 	/* Remember string mapping from src to dst.  It avoids
1754 	 * performing expensive string comparisons.
1755 	 */
1756 	if (p->str_off_map) {
1757 		err = hashmap__append(p->str_off_map, *str_off, off);
1758 		if (err)
1759 			return err;
1760 	}
1761 
1762 	*str_off = off;
1763 	return 0;
1764 }
1765 
1766 int btf__add_type(struct btf *btf, const struct btf *src_btf, const struct btf_type *src_type)
1767 {
1768 	struct btf_pipe p = { .src = src_btf, .dst = btf };
1769 	struct btf_type *t;
1770 	int sz, err;
1771 
1772 	sz = btf_type_size(src_type);
1773 	if (sz < 0)
1774 		return libbpf_err(sz);
1775 
1776 	/* deconstruct BTF, if necessary, and invalidate raw_data */
1777 	if (btf_ensure_modifiable(btf))
1778 		return libbpf_err(-ENOMEM);
1779 
1780 	t = btf_add_type_mem(btf, sz);
1781 	if (!t)
1782 		return libbpf_err(-ENOMEM);
1783 
1784 	memcpy(t, src_type, sz);
1785 
1786 	err = btf_type_visit_str_offs(t, btf_rewrite_str, &p);
1787 	if (err)
1788 		return libbpf_err(err);
1789 
1790 	return btf_commit_type(btf, sz);
1791 }
1792 
1793 static int btf_rewrite_type_ids(__u32 *type_id, void *ctx)
1794 {
1795 	struct btf *btf = ctx;
1796 
1797 	if (!*type_id) /* nothing to do for VOID references */
1798 		return 0;
1799 
1800 	/* we haven't updated btf's type count yet, so
1801 	 * btf->start_id + btf->nr_types - 1 is the type ID offset we should
1802 	 * add to all newly added BTF types
1803 	 */
1804 	*type_id += btf->start_id + btf->nr_types - 1;
1805 	return 0;
1806 }
1807 
1808 static size_t btf_dedup_identity_hash_fn(long key, void *ctx);
1809 static bool btf_dedup_equal_fn(long k1, long k2, void *ctx);
1810 
1811 int btf__add_btf(struct btf *btf, const struct btf *src_btf)
1812 {
1813 	struct btf_pipe p = { .src = src_btf, .dst = btf };
1814 	int data_sz, sz, cnt, i, err, old_strs_len;
1815 	__u32 *off;
1816 	void *t;
1817 
1818 	/* appending split BTF isn't supported yet */
1819 	if (src_btf->base_btf)
1820 		return libbpf_err(-ENOTSUP);
1821 
1822 	/* deconstruct BTF, if necessary, and invalidate raw_data */
1823 	if (btf_ensure_modifiable(btf))
1824 		return libbpf_err(-ENOMEM);
1825 
1826 	/* remember original strings section size if we have to roll back
1827 	 * partial strings section changes
1828 	 */
1829 	old_strs_len = btf->hdr->str_len;
1830 
1831 	data_sz = src_btf->hdr->type_len;
1832 	cnt = btf__type_cnt(src_btf) - 1;
1833 
1834 	/* pre-allocate enough memory for new types */
1835 	t = btf_add_type_mem(btf, data_sz);
1836 	if (!t)
1837 		return libbpf_err(-ENOMEM);
1838 
1839 	/* pre-allocate enough memory for type offset index for new types */
1840 	off = btf_add_type_offs_mem(btf, cnt);
1841 	if (!off)
1842 		return libbpf_err(-ENOMEM);
1843 
1844 	/* Map the string offsets from src_btf to the offsets from btf to improve performance */
1845 	p.str_off_map = hashmap__new(btf_dedup_identity_hash_fn, btf_dedup_equal_fn, NULL);
1846 	if (IS_ERR(p.str_off_map))
1847 		return libbpf_err(-ENOMEM);
1848 
1849 	/* bulk copy types data for all types from src_btf */
1850 	memcpy(t, src_btf->types_data, data_sz);
1851 
1852 	for (i = 0; i < cnt; i++) {
1853 		sz = btf_type_size(t);
1854 		if (sz < 0) {
1855 			/* unlikely, has to be corrupted src_btf */
1856 			err = sz;
1857 			goto err_out;
1858 		}
1859 
1860 		/* fill out type ID to type offset mapping for lookups by type ID */
1861 		*off = t - btf->types_data;
1862 
1863 		/* add, dedup, and remap strings referenced by this BTF type */
1864 		err = btf_type_visit_str_offs(t, btf_rewrite_str, &p);
1865 		if (err)
1866 			goto err_out;
1867 
1868 		/* remap all type IDs referenced from this BTF type */
1869 		err = btf_type_visit_type_ids(t, btf_rewrite_type_ids, btf);
1870 		if (err)
1871 			goto err_out;
1872 
1873 		/* go to next type data and type offset index entry */
1874 		t += sz;
1875 		off++;
1876 	}
1877 
1878 	/* Up until now any of the copied type data was effectively invisible,
1879 	 * so if we exited early before this point due to error, BTF would be
1880 	 * effectively unmodified. There would be extra internal memory
1881 	 * pre-allocated, but it would not be available for querying.  But now
1882 	 * that we've copied and rewritten all the data successfully, we can
1883 	 * update type count and various internal offsets and sizes to
1884 	 * "commit" the changes and made them visible to the outside world.
1885 	 */
1886 	btf->hdr->type_len += data_sz;
1887 	btf->hdr->str_off += data_sz;
1888 	btf->nr_types += cnt;
1889 
1890 	hashmap__free(p.str_off_map);
1891 
1892 	/* return type ID of the first added BTF type */
1893 	return btf->start_id + btf->nr_types - cnt;
1894 err_out:
1895 	/* zero out preallocated memory as if it was just allocated with
1896 	 * libbpf_add_mem()
1897 	 */
1898 	memset(btf->types_data + btf->hdr->type_len, 0, data_sz);
1899 	memset(btf->strs_data + old_strs_len, 0, btf->hdr->str_len - old_strs_len);
1900 
1901 	/* and now restore original strings section size; types data size
1902 	 * wasn't modified, so doesn't need restoring, see big comment above
1903 	 */
1904 	btf->hdr->str_len = old_strs_len;
1905 
1906 	hashmap__free(p.str_off_map);
1907 
1908 	return libbpf_err(err);
1909 }
1910 
1911 /*
1912  * Append new BTF_KIND_INT type with:
1913  *   - *name* - non-empty, non-NULL type name;
1914  *   - *sz* - power-of-2 (1, 2, 4, ..) size of the type, in bytes;
1915  *   - encoding is a combination of BTF_INT_SIGNED, BTF_INT_CHAR, BTF_INT_BOOL.
1916  * Returns:
1917  *   - >0, type ID of newly added BTF type;
1918  *   - <0, on error.
1919  */
1920 int btf__add_int(struct btf *btf, const char *name, size_t byte_sz, int encoding)
1921 {
1922 	struct btf_type *t;
1923 	int sz, name_off;
1924 
1925 	/* non-empty name */
1926 	if (!name || !name[0])
1927 		return libbpf_err(-EINVAL);
1928 	/* byte_sz must be power of 2 */
1929 	if (!byte_sz || (byte_sz & (byte_sz - 1)) || byte_sz > 16)
1930 		return libbpf_err(-EINVAL);
1931 	if (encoding & ~(BTF_INT_SIGNED | BTF_INT_CHAR | BTF_INT_BOOL))
1932 		return libbpf_err(-EINVAL);
1933 
1934 	/* deconstruct BTF, if necessary, and invalidate raw_data */
1935 	if (btf_ensure_modifiable(btf))
1936 		return libbpf_err(-ENOMEM);
1937 
1938 	sz = sizeof(struct btf_type) + sizeof(int);
1939 	t = btf_add_type_mem(btf, sz);
1940 	if (!t)
1941 		return libbpf_err(-ENOMEM);
1942 
1943 	/* if something goes wrong later, we might end up with an extra string,
1944 	 * but that shouldn't be a problem, because BTF can't be constructed
1945 	 * completely anyway and will most probably be just discarded
1946 	 */
1947 	name_off = btf__add_str(btf, name);
1948 	if (name_off < 0)
1949 		return name_off;
1950 
1951 	t->name_off = name_off;
1952 	t->info = btf_type_info(BTF_KIND_INT, 0, 0);
1953 	t->size = byte_sz;
1954 	/* set INT info, we don't allow setting legacy bit offset/size */
1955 	*(__u32 *)(t + 1) = (encoding << 24) | (byte_sz * 8);
1956 
1957 	return btf_commit_type(btf, sz);
1958 }
1959 
1960 /*
1961  * Append new BTF_KIND_FLOAT type with:
1962  *   - *name* - non-empty, non-NULL type name;
1963  *   - *sz* - size of the type, in bytes;
1964  * Returns:
1965  *   - >0, type ID of newly added BTF type;
1966  *   - <0, on error.
1967  */
1968 int btf__add_float(struct btf *btf, const char *name, size_t byte_sz)
1969 {
1970 	struct btf_type *t;
1971 	int sz, name_off;
1972 
1973 	/* non-empty name */
1974 	if (!name || !name[0])
1975 		return libbpf_err(-EINVAL);
1976 
1977 	/* byte_sz must be one of the explicitly allowed values */
1978 	if (byte_sz != 2 && byte_sz != 4 && byte_sz != 8 && byte_sz != 12 &&
1979 	    byte_sz != 16)
1980 		return libbpf_err(-EINVAL);
1981 
1982 	if (btf_ensure_modifiable(btf))
1983 		return libbpf_err(-ENOMEM);
1984 
1985 	sz = sizeof(struct btf_type);
1986 	t = btf_add_type_mem(btf, sz);
1987 	if (!t)
1988 		return libbpf_err(-ENOMEM);
1989 
1990 	name_off = btf__add_str(btf, name);
1991 	if (name_off < 0)
1992 		return name_off;
1993 
1994 	t->name_off = name_off;
1995 	t->info = btf_type_info(BTF_KIND_FLOAT, 0, 0);
1996 	t->size = byte_sz;
1997 
1998 	return btf_commit_type(btf, sz);
1999 }
2000 
2001 /* it's completely legal to append BTF types with type IDs pointing forward to
2002  * types that haven't been appended yet, so we only make sure that id looks
2003  * sane, we can't guarantee that ID will always be valid
2004  */
2005 static int validate_type_id(int id)
2006 {
2007 	if (id < 0 || id > BTF_MAX_NR_TYPES)
2008 		return -EINVAL;
2009 	return 0;
2010 }
2011 
2012 /* generic append function for PTR, TYPEDEF, CONST/VOLATILE/RESTRICT */
2013 static int btf_add_ref_kind(struct btf *btf, int kind, const char *name, int ref_type_id)
2014 {
2015 	struct btf_type *t;
2016 	int sz, name_off = 0;
2017 
2018 	if (validate_type_id(ref_type_id))
2019 		return libbpf_err(-EINVAL);
2020 
2021 	if (btf_ensure_modifiable(btf))
2022 		return libbpf_err(-ENOMEM);
2023 
2024 	sz = sizeof(struct btf_type);
2025 	t = btf_add_type_mem(btf, sz);
2026 	if (!t)
2027 		return libbpf_err(-ENOMEM);
2028 
2029 	if (name && name[0]) {
2030 		name_off = btf__add_str(btf, name);
2031 		if (name_off < 0)
2032 			return name_off;
2033 	}
2034 
2035 	t->name_off = name_off;
2036 	t->info = btf_type_info(kind, 0, 0);
2037 	t->type = ref_type_id;
2038 
2039 	return btf_commit_type(btf, sz);
2040 }
2041 
2042 /*
2043  * Append new BTF_KIND_PTR type with:
2044  *   - *ref_type_id* - referenced type ID, it might not exist yet;
2045  * Returns:
2046  *   - >0, type ID of newly added BTF type;
2047  *   - <0, on error.
2048  */
2049 int btf__add_ptr(struct btf *btf, int ref_type_id)
2050 {
2051 	return btf_add_ref_kind(btf, BTF_KIND_PTR, NULL, ref_type_id);
2052 }
2053 
2054 /*
2055  * Append new BTF_KIND_ARRAY type with:
2056  *   - *index_type_id* - type ID of the type describing array index;
2057  *   - *elem_type_id* - type ID of the type describing array element;
2058  *   - *nr_elems* - the size of the array;
2059  * Returns:
2060  *   - >0, type ID of newly added BTF type;
2061  *   - <0, on error.
2062  */
2063 int btf__add_array(struct btf *btf, int index_type_id, int elem_type_id, __u32 nr_elems)
2064 {
2065 	struct btf_type *t;
2066 	struct btf_array *a;
2067 	int sz;
2068 
2069 	if (validate_type_id(index_type_id) || validate_type_id(elem_type_id))
2070 		return libbpf_err(-EINVAL);
2071 
2072 	if (btf_ensure_modifiable(btf))
2073 		return libbpf_err(-ENOMEM);
2074 
2075 	sz = sizeof(struct btf_type) + sizeof(struct btf_array);
2076 	t = btf_add_type_mem(btf, sz);
2077 	if (!t)
2078 		return libbpf_err(-ENOMEM);
2079 
2080 	t->name_off = 0;
2081 	t->info = btf_type_info(BTF_KIND_ARRAY, 0, 0);
2082 	t->size = 0;
2083 
2084 	a = btf_array(t);
2085 	a->type = elem_type_id;
2086 	a->index_type = index_type_id;
2087 	a->nelems = nr_elems;
2088 
2089 	return btf_commit_type(btf, sz);
2090 }
2091 
2092 /* generic STRUCT/UNION append function */
2093 static int btf_add_composite(struct btf *btf, int kind, const char *name, __u32 bytes_sz)
2094 {
2095 	struct btf_type *t;
2096 	int sz, name_off = 0;
2097 
2098 	if (btf_ensure_modifiable(btf))
2099 		return libbpf_err(-ENOMEM);
2100 
2101 	sz = sizeof(struct btf_type);
2102 	t = btf_add_type_mem(btf, sz);
2103 	if (!t)
2104 		return libbpf_err(-ENOMEM);
2105 
2106 	if (name && name[0]) {
2107 		name_off = btf__add_str(btf, name);
2108 		if (name_off < 0)
2109 			return name_off;
2110 	}
2111 
2112 	/* start out with vlen=0 and no kflag; this will be adjusted when
2113 	 * adding each member
2114 	 */
2115 	t->name_off = name_off;
2116 	t->info = btf_type_info(kind, 0, 0);
2117 	t->size = bytes_sz;
2118 
2119 	return btf_commit_type(btf, sz);
2120 }
2121 
2122 /*
2123  * Append new BTF_KIND_STRUCT type with:
2124  *   - *name* - name of the struct, can be NULL or empty for anonymous structs;
2125  *   - *byte_sz* - size of the struct, in bytes;
2126  *
2127  * Struct initially has no fields in it. Fields can be added by
2128  * btf__add_field() right after btf__add_struct() succeeds.
2129  *
2130  * Returns:
2131  *   - >0, type ID of newly added BTF type;
2132  *   - <0, on error.
2133  */
2134 int btf__add_struct(struct btf *btf, const char *name, __u32 byte_sz)
2135 {
2136 	return btf_add_composite(btf, BTF_KIND_STRUCT, name, byte_sz);
2137 }
2138 
2139 /*
2140  * Append new BTF_KIND_UNION type with:
2141  *   - *name* - name of the union, can be NULL or empty for anonymous union;
2142  *   - *byte_sz* - size of the union, in bytes;
2143  *
2144  * Union initially has no fields in it. Fields can be added by
2145  * btf__add_field() right after btf__add_union() succeeds. All fields
2146  * should have *bit_offset* of 0.
2147  *
2148  * Returns:
2149  *   - >0, type ID of newly added BTF type;
2150  *   - <0, on error.
2151  */
2152 int btf__add_union(struct btf *btf, const char *name, __u32 byte_sz)
2153 {
2154 	return btf_add_composite(btf, BTF_KIND_UNION, name, byte_sz);
2155 }
2156 
2157 static struct btf_type *btf_last_type(struct btf *btf)
2158 {
2159 	return btf_type_by_id(btf, btf__type_cnt(btf) - 1);
2160 }
2161 
2162 /*
2163  * Append new field for the current STRUCT/UNION type with:
2164  *   - *name* - name of the field, can be NULL or empty for anonymous field;
2165  *   - *type_id* - type ID for the type describing field type;
2166  *   - *bit_offset* - bit offset of the start of the field within struct/union;
2167  *   - *bit_size* - bit size of a bitfield, 0 for non-bitfield fields;
2168  * Returns:
2169  *   -  0, on success;
2170  *   - <0, on error.
2171  */
2172 int btf__add_field(struct btf *btf, const char *name, int type_id,
2173 		   __u32 bit_offset, __u32 bit_size)
2174 {
2175 	struct btf_type *t;
2176 	struct btf_member *m;
2177 	bool is_bitfield;
2178 	int sz, name_off = 0;
2179 
2180 	/* last type should be union/struct */
2181 	if (btf->nr_types == 0)
2182 		return libbpf_err(-EINVAL);
2183 	t = btf_last_type(btf);
2184 	if (!btf_is_composite(t))
2185 		return libbpf_err(-EINVAL);
2186 
2187 	if (validate_type_id(type_id))
2188 		return libbpf_err(-EINVAL);
2189 	/* best-effort bit field offset/size enforcement */
2190 	is_bitfield = bit_size || (bit_offset % 8 != 0);
2191 	if (is_bitfield && (bit_size == 0 || bit_size > 255 || bit_offset > 0xffffff))
2192 		return libbpf_err(-EINVAL);
2193 
2194 	/* only offset 0 is allowed for unions */
2195 	if (btf_is_union(t) && bit_offset)
2196 		return libbpf_err(-EINVAL);
2197 
2198 	/* decompose and invalidate raw data */
2199 	if (btf_ensure_modifiable(btf))
2200 		return libbpf_err(-ENOMEM);
2201 
2202 	sz = sizeof(struct btf_member);
2203 	m = btf_add_type_mem(btf, sz);
2204 	if (!m)
2205 		return libbpf_err(-ENOMEM);
2206 
2207 	if (name && name[0]) {
2208 		name_off = btf__add_str(btf, name);
2209 		if (name_off < 0)
2210 			return name_off;
2211 	}
2212 
2213 	m->name_off = name_off;
2214 	m->type = type_id;
2215 	m->offset = bit_offset | (bit_size << 24);
2216 
2217 	/* btf_add_type_mem can invalidate t pointer */
2218 	t = btf_last_type(btf);
2219 	/* update parent type's vlen and kflag */
2220 	t->info = btf_type_info(btf_kind(t), btf_vlen(t) + 1, is_bitfield || btf_kflag(t));
2221 
2222 	btf->hdr->type_len += sz;
2223 	btf->hdr->str_off += sz;
2224 	return 0;
2225 }
2226 
2227 static int btf_add_enum_common(struct btf *btf, const char *name, __u32 byte_sz,
2228 			       bool is_signed, __u8 kind)
2229 {
2230 	struct btf_type *t;
2231 	int sz, name_off = 0;
2232 
2233 	/* byte_sz must be power of 2 */
2234 	if (!byte_sz || (byte_sz & (byte_sz - 1)) || byte_sz > 8)
2235 		return libbpf_err(-EINVAL);
2236 
2237 	if (btf_ensure_modifiable(btf))
2238 		return libbpf_err(-ENOMEM);
2239 
2240 	sz = sizeof(struct btf_type);
2241 	t = btf_add_type_mem(btf, sz);
2242 	if (!t)
2243 		return libbpf_err(-ENOMEM);
2244 
2245 	if (name && name[0]) {
2246 		name_off = btf__add_str(btf, name);
2247 		if (name_off < 0)
2248 			return name_off;
2249 	}
2250 
2251 	/* start out with vlen=0; it will be adjusted when adding enum values */
2252 	t->name_off = name_off;
2253 	t->info = btf_type_info(kind, 0, is_signed);
2254 	t->size = byte_sz;
2255 
2256 	return btf_commit_type(btf, sz);
2257 }
2258 
2259 /*
2260  * Append new BTF_KIND_ENUM type with:
2261  *   - *name* - name of the enum, can be NULL or empty for anonymous enums;
2262  *   - *byte_sz* - size of the enum, in bytes.
2263  *
2264  * Enum initially has no enum values in it (and corresponds to enum forward
2265  * declaration). Enumerator values can be added by btf__add_enum_value()
2266  * immediately after btf__add_enum() succeeds.
2267  *
2268  * Returns:
2269  *   - >0, type ID of newly added BTF type;
2270  *   - <0, on error.
2271  */
2272 int btf__add_enum(struct btf *btf, const char *name, __u32 byte_sz)
2273 {
2274 	/*
2275 	 * set the signedness to be unsigned, it will change to signed
2276 	 * if any later enumerator is negative.
2277 	 */
2278 	return btf_add_enum_common(btf, name, byte_sz, false, BTF_KIND_ENUM);
2279 }
2280 
2281 /*
2282  * Append new enum value for the current ENUM type with:
2283  *   - *name* - name of the enumerator value, can't be NULL or empty;
2284  *   - *value* - integer value corresponding to enum value *name*;
2285  * Returns:
2286  *   -  0, on success;
2287  *   - <0, on error.
2288  */
2289 int btf__add_enum_value(struct btf *btf, const char *name, __s64 value)
2290 {
2291 	struct btf_type *t;
2292 	struct btf_enum *v;
2293 	int sz, name_off;
2294 
2295 	/* last type should be BTF_KIND_ENUM */
2296 	if (btf->nr_types == 0)
2297 		return libbpf_err(-EINVAL);
2298 	t = btf_last_type(btf);
2299 	if (!btf_is_enum(t))
2300 		return libbpf_err(-EINVAL);
2301 
2302 	/* non-empty name */
2303 	if (!name || !name[0])
2304 		return libbpf_err(-EINVAL);
2305 	if (value < INT_MIN || value > UINT_MAX)
2306 		return libbpf_err(-E2BIG);
2307 
2308 	/* decompose and invalidate raw data */
2309 	if (btf_ensure_modifiable(btf))
2310 		return libbpf_err(-ENOMEM);
2311 
2312 	sz = sizeof(struct btf_enum);
2313 	v = btf_add_type_mem(btf, sz);
2314 	if (!v)
2315 		return libbpf_err(-ENOMEM);
2316 
2317 	name_off = btf__add_str(btf, name);
2318 	if (name_off < 0)
2319 		return name_off;
2320 
2321 	v->name_off = name_off;
2322 	v->val = value;
2323 
2324 	/* update parent type's vlen */
2325 	t = btf_last_type(btf);
2326 	btf_type_inc_vlen(t);
2327 
2328 	/* if negative value, set signedness to signed */
2329 	if (value < 0)
2330 		t->info = btf_type_info(btf_kind(t), btf_vlen(t), true);
2331 
2332 	btf->hdr->type_len += sz;
2333 	btf->hdr->str_off += sz;
2334 	return 0;
2335 }
2336 
2337 /*
2338  * Append new BTF_KIND_ENUM64 type with:
2339  *   - *name* - name of the enum, can be NULL or empty for anonymous enums;
2340  *   - *byte_sz* - size of the enum, in bytes.
2341  *   - *is_signed* - whether the enum values are signed or not;
2342  *
2343  * Enum initially has no enum values in it (and corresponds to enum forward
2344  * declaration). Enumerator values can be added by btf__add_enum64_value()
2345  * immediately after btf__add_enum64() succeeds.
2346  *
2347  * Returns:
2348  *   - >0, type ID of newly added BTF type;
2349  *   - <0, on error.
2350  */
2351 int btf__add_enum64(struct btf *btf, const char *name, __u32 byte_sz,
2352 		    bool is_signed)
2353 {
2354 	return btf_add_enum_common(btf, name, byte_sz, is_signed,
2355 				   BTF_KIND_ENUM64);
2356 }
2357 
2358 /*
2359  * Append new enum value for the current ENUM64 type with:
2360  *   - *name* - name of the enumerator value, can't be NULL or empty;
2361  *   - *value* - integer value corresponding to enum value *name*;
2362  * Returns:
2363  *   -  0, on success;
2364  *   - <0, on error.
2365  */
2366 int btf__add_enum64_value(struct btf *btf, const char *name, __u64 value)
2367 {
2368 	struct btf_enum64 *v;
2369 	struct btf_type *t;
2370 	int sz, name_off;
2371 
2372 	/* last type should be BTF_KIND_ENUM64 */
2373 	if (btf->nr_types == 0)
2374 		return libbpf_err(-EINVAL);
2375 	t = btf_last_type(btf);
2376 	if (!btf_is_enum64(t))
2377 		return libbpf_err(-EINVAL);
2378 
2379 	/* non-empty name */
2380 	if (!name || !name[0])
2381 		return libbpf_err(-EINVAL);
2382 
2383 	/* decompose and invalidate raw data */
2384 	if (btf_ensure_modifiable(btf))
2385 		return libbpf_err(-ENOMEM);
2386 
2387 	sz = sizeof(struct btf_enum64);
2388 	v = btf_add_type_mem(btf, sz);
2389 	if (!v)
2390 		return libbpf_err(-ENOMEM);
2391 
2392 	name_off = btf__add_str(btf, name);
2393 	if (name_off < 0)
2394 		return name_off;
2395 
2396 	v->name_off = name_off;
2397 	v->val_lo32 = (__u32)value;
2398 	v->val_hi32 = value >> 32;
2399 
2400 	/* update parent type's vlen */
2401 	t = btf_last_type(btf);
2402 	btf_type_inc_vlen(t);
2403 
2404 	btf->hdr->type_len += sz;
2405 	btf->hdr->str_off += sz;
2406 	return 0;
2407 }
2408 
2409 /*
2410  * Append new BTF_KIND_FWD type with:
2411  *   - *name*, non-empty/non-NULL name;
2412  *   - *fwd_kind*, kind of forward declaration, one of BTF_FWD_STRUCT,
2413  *     BTF_FWD_UNION, or BTF_FWD_ENUM;
2414  * Returns:
2415  *   - >0, type ID of newly added BTF type;
2416  *   - <0, on error.
2417  */
2418 int btf__add_fwd(struct btf *btf, const char *name, enum btf_fwd_kind fwd_kind)
2419 {
2420 	if (!name || !name[0])
2421 		return libbpf_err(-EINVAL);
2422 
2423 	switch (fwd_kind) {
2424 	case BTF_FWD_STRUCT:
2425 	case BTF_FWD_UNION: {
2426 		struct btf_type *t;
2427 		int id;
2428 
2429 		id = btf_add_ref_kind(btf, BTF_KIND_FWD, name, 0);
2430 		if (id <= 0)
2431 			return id;
2432 		t = btf_type_by_id(btf, id);
2433 		t->info = btf_type_info(BTF_KIND_FWD, 0, fwd_kind == BTF_FWD_UNION);
2434 		return id;
2435 	}
2436 	case BTF_FWD_ENUM:
2437 		/* enum forward in BTF currently is just an enum with no enum
2438 		 * values; we also assume a standard 4-byte size for it
2439 		 */
2440 		return btf__add_enum(btf, name, sizeof(int));
2441 	default:
2442 		return libbpf_err(-EINVAL);
2443 	}
2444 }
2445 
2446 /*
2447  * Append new BTF_KING_TYPEDEF type with:
2448  *   - *name*, non-empty/non-NULL name;
2449  *   - *ref_type_id* - referenced type ID, it might not exist yet;
2450  * Returns:
2451  *   - >0, type ID of newly added BTF type;
2452  *   - <0, on error.
2453  */
2454 int btf__add_typedef(struct btf *btf, const char *name, int ref_type_id)
2455 {
2456 	if (!name || !name[0])
2457 		return libbpf_err(-EINVAL);
2458 
2459 	return btf_add_ref_kind(btf, BTF_KIND_TYPEDEF, name, ref_type_id);
2460 }
2461 
2462 /*
2463  * Append new BTF_KIND_VOLATILE type with:
2464  *   - *ref_type_id* - referenced type ID, it might not exist yet;
2465  * Returns:
2466  *   - >0, type ID of newly added BTF type;
2467  *   - <0, on error.
2468  */
2469 int btf__add_volatile(struct btf *btf, int ref_type_id)
2470 {
2471 	return btf_add_ref_kind(btf, BTF_KIND_VOLATILE, NULL, ref_type_id);
2472 }
2473 
2474 /*
2475  * Append new BTF_KIND_CONST type with:
2476  *   - *ref_type_id* - referenced type ID, it might not exist yet;
2477  * Returns:
2478  *   - >0, type ID of newly added BTF type;
2479  *   - <0, on error.
2480  */
2481 int btf__add_const(struct btf *btf, int ref_type_id)
2482 {
2483 	return btf_add_ref_kind(btf, BTF_KIND_CONST, NULL, ref_type_id);
2484 }
2485 
2486 /*
2487  * Append new BTF_KIND_RESTRICT type with:
2488  *   - *ref_type_id* - referenced type ID, it might not exist yet;
2489  * Returns:
2490  *   - >0, type ID of newly added BTF type;
2491  *   - <0, on error.
2492  */
2493 int btf__add_restrict(struct btf *btf, int ref_type_id)
2494 {
2495 	return btf_add_ref_kind(btf, BTF_KIND_RESTRICT, NULL, ref_type_id);
2496 }
2497 
2498 /*
2499  * Append new BTF_KIND_TYPE_TAG type with:
2500  *   - *value*, non-empty/non-NULL tag value;
2501  *   - *ref_type_id* - referenced type ID, it might not exist yet;
2502  * Returns:
2503  *   - >0, type ID of newly added BTF type;
2504  *   - <0, on error.
2505  */
2506 int btf__add_type_tag(struct btf *btf, const char *value, int ref_type_id)
2507 {
2508 	if (!value || !value[0])
2509 		return libbpf_err(-EINVAL);
2510 
2511 	return btf_add_ref_kind(btf, BTF_KIND_TYPE_TAG, value, ref_type_id);
2512 }
2513 
2514 /*
2515  * Append new BTF_KIND_FUNC type with:
2516  *   - *name*, non-empty/non-NULL name;
2517  *   - *proto_type_id* - FUNC_PROTO's type ID, it might not exist yet;
2518  * Returns:
2519  *   - >0, type ID of newly added BTF type;
2520  *   - <0, on error.
2521  */
2522 int btf__add_func(struct btf *btf, const char *name,
2523 		  enum btf_func_linkage linkage, int proto_type_id)
2524 {
2525 	int id;
2526 
2527 	if (!name || !name[0])
2528 		return libbpf_err(-EINVAL);
2529 	if (linkage != BTF_FUNC_STATIC && linkage != BTF_FUNC_GLOBAL &&
2530 	    linkage != BTF_FUNC_EXTERN)
2531 		return libbpf_err(-EINVAL);
2532 
2533 	id = btf_add_ref_kind(btf, BTF_KIND_FUNC, name, proto_type_id);
2534 	if (id > 0) {
2535 		struct btf_type *t = btf_type_by_id(btf, id);
2536 
2537 		t->info = btf_type_info(BTF_KIND_FUNC, linkage, 0);
2538 	}
2539 	return libbpf_err(id);
2540 }
2541 
2542 /*
2543  * Append new BTF_KIND_FUNC_PROTO with:
2544  *   - *ret_type_id* - type ID for return result of a function.
2545  *
2546  * Function prototype initially has no arguments, but they can be added by
2547  * btf__add_func_param() one by one, immediately after
2548  * btf__add_func_proto() succeeded.
2549  *
2550  * Returns:
2551  *   - >0, type ID of newly added BTF type;
2552  *   - <0, on error.
2553  */
2554 int btf__add_func_proto(struct btf *btf, int ret_type_id)
2555 {
2556 	struct btf_type *t;
2557 	int sz;
2558 
2559 	if (validate_type_id(ret_type_id))
2560 		return libbpf_err(-EINVAL);
2561 
2562 	if (btf_ensure_modifiable(btf))
2563 		return libbpf_err(-ENOMEM);
2564 
2565 	sz = sizeof(struct btf_type);
2566 	t = btf_add_type_mem(btf, sz);
2567 	if (!t)
2568 		return libbpf_err(-ENOMEM);
2569 
2570 	/* start out with vlen=0; this will be adjusted when adding enum
2571 	 * values, if necessary
2572 	 */
2573 	t->name_off = 0;
2574 	t->info = btf_type_info(BTF_KIND_FUNC_PROTO, 0, 0);
2575 	t->type = ret_type_id;
2576 
2577 	return btf_commit_type(btf, sz);
2578 }
2579 
2580 /*
2581  * Append new function parameter for current FUNC_PROTO type with:
2582  *   - *name* - parameter name, can be NULL or empty;
2583  *   - *type_id* - type ID describing the type of the parameter.
2584  * Returns:
2585  *   -  0, on success;
2586  *   - <0, on error.
2587  */
2588 int btf__add_func_param(struct btf *btf, const char *name, int type_id)
2589 {
2590 	struct btf_type *t;
2591 	struct btf_param *p;
2592 	int sz, name_off = 0;
2593 
2594 	if (validate_type_id(type_id))
2595 		return libbpf_err(-EINVAL);
2596 
2597 	/* last type should be BTF_KIND_FUNC_PROTO */
2598 	if (btf->nr_types == 0)
2599 		return libbpf_err(-EINVAL);
2600 	t = btf_last_type(btf);
2601 	if (!btf_is_func_proto(t))
2602 		return libbpf_err(-EINVAL);
2603 
2604 	/* decompose and invalidate raw data */
2605 	if (btf_ensure_modifiable(btf))
2606 		return libbpf_err(-ENOMEM);
2607 
2608 	sz = sizeof(struct btf_param);
2609 	p = btf_add_type_mem(btf, sz);
2610 	if (!p)
2611 		return libbpf_err(-ENOMEM);
2612 
2613 	if (name && name[0]) {
2614 		name_off = btf__add_str(btf, name);
2615 		if (name_off < 0)
2616 			return name_off;
2617 	}
2618 
2619 	p->name_off = name_off;
2620 	p->type = type_id;
2621 
2622 	/* update parent type's vlen */
2623 	t = btf_last_type(btf);
2624 	btf_type_inc_vlen(t);
2625 
2626 	btf->hdr->type_len += sz;
2627 	btf->hdr->str_off += sz;
2628 	return 0;
2629 }
2630 
2631 /*
2632  * Append new BTF_KIND_VAR type with:
2633  *   - *name* - non-empty/non-NULL name;
2634  *   - *linkage* - variable linkage, one of BTF_VAR_STATIC,
2635  *     BTF_VAR_GLOBAL_ALLOCATED, or BTF_VAR_GLOBAL_EXTERN;
2636  *   - *type_id* - type ID of the type describing the type of the variable.
2637  * Returns:
2638  *   - >0, type ID of newly added BTF type;
2639  *   - <0, on error.
2640  */
2641 int btf__add_var(struct btf *btf, const char *name, int linkage, int type_id)
2642 {
2643 	struct btf_type *t;
2644 	struct btf_var *v;
2645 	int sz, name_off;
2646 
2647 	/* non-empty name */
2648 	if (!name || !name[0])
2649 		return libbpf_err(-EINVAL);
2650 	if (linkage != BTF_VAR_STATIC && linkage != BTF_VAR_GLOBAL_ALLOCATED &&
2651 	    linkage != BTF_VAR_GLOBAL_EXTERN)
2652 		return libbpf_err(-EINVAL);
2653 	if (validate_type_id(type_id))
2654 		return libbpf_err(-EINVAL);
2655 
2656 	/* deconstruct BTF, if necessary, and invalidate raw_data */
2657 	if (btf_ensure_modifiable(btf))
2658 		return libbpf_err(-ENOMEM);
2659 
2660 	sz = sizeof(struct btf_type) + sizeof(struct btf_var);
2661 	t = btf_add_type_mem(btf, sz);
2662 	if (!t)
2663 		return libbpf_err(-ENOMEM);
2664 
2665 	name_off = btf__add_str(btf, name);
2666 	if (name_off < 0)
2667 		return name_off;
2668 
2669 	t->name_off = name_off;
2670 	t->info = btf_type_info(BTF_KIND_VAR, 0, 0);
2671 	t->type = type_id;
2672 
2673 	v = btf_var(t);
2674 	v->linkage = linkage;
2675 
2676 	return btf_commit_type(btf, sz);
2677 }
2678 
2679 /*
2680  * Append new BTF_KIND_DATASEC type with:
2681  *   - *name* - non-empty/non-NULL name;
2682  *   - *byte_sz* - data section size, in bytes.
2683  *
2684  * Data section is initially empty. Variables info can be added with
2685  * btf__add_datasec_var_info() calls, after btf__add_datasec() succeeds.
2686  *
2687  * Returns:
2688  *   - >0, type ID of newly added BTF type;
2689  *   - <0, on error.
2690  */
2691 int btf__add_datasec(struct btf *btf, const char *name, __u32 byte_sz)
2692 {
2693 	struct btf_type *t;
2694 	int sz, name_off;
2695 
2696 	/* non-empty name */
2697 	if (!name || !name[0])
2698 		return libbpf_err(-EINVAL);
2699 
2700 	if (btf_ensure_modifiable(btf))
2701 		return libbpf_err(-ENOMEM);
2702 
2703 	sz = sizeof(struct btf_type);
2704 	t = btf_add_type_mem(btf, sz);
2705 	if (!t)
2706 		return libbpf_err(-ENOMEM);
2707 
2708 	name_off = btf__add_str(btf, name);
2709 	if (name_off < 0)
2710 		return name_off;
2711 
2712 	/* start with vlen=0, which will be update as var_secinfos are added */
2713 	t->name_off = name_off;
2714 	t->info = btf_type_info(BTF_KIND_DATASEC, 0, 0);
2715 	t->size = byte_sz;
2716 
2717 	return btf_commit_type(btf, sz);
2718 }
2719 
2720 /*
2721  * Append new data section variable information entry for current DATASEC type:
2722  *   - *var_type_id* - type ID, describing type of the variable;
2723  *   - *offset* - variable offset within data section, in bytes;
2724  *   - *byte_sz* - variable size, in bytes.
2725  *
2726  * Returns:
2727  *   -  0, on success;
2728  *   - <0, on error.
2729  */
2730 int btf__add_datasec_var_info(struct btf *btf, int var_type_id, __u32 offset, __u32 byte_sz)
2731 {
2732 	struct btf_type *t;
2733 	struct btf_var_secinfo *v;
2734 	int sz;
2735 
2736 	/* last type should be BTF_KIND_DATASEC */
2737 	if (btf->nr_types == 0)
2738 		return libbpf_err(-EINVAL);
2739 	t = btf_last_type(btf);
2740 	if (!btf_is_datasec(t))
2741 		return libbpf_err(-EINVAL);
2742 
2743 	if (validate_type_id(var_type_id))
2744 		return libbpf_err(-EINVAL);
2745 
2746 	/* decompose and invalidate raw data */
2747 	if (btf_ensure_modifiable(btf))
2748 		return libbpf_err(-ENOMEM);
2749 
2750 	sz = sizeof(struct btf_var_secinfo);
2751 	v = btf_add_type_mem(btf, sz);
2752 	if (!v)
2753 		return libbpf_err(-ENOMEM);
2754 
2755 	v->type = var_type_id;
2756 	v->offset = offset;
2757 	v->size = byte_sz;
2758 
2759 	/* update parent type's vlen */
2760 	t = btf_last_type(btf);
2761 	btf_type_inc_vlen(t);
2762 
2763 	btf->hdr->type_len += sz;
2764 	btf->hdr->str_off += sz;
2765 	return 0;
2766 }
2767 
2768 /*
2769  * Append new BTF_KIND_DECL_TAG type with:
2770  *   - *value* - non-empty/non-NULL string;
2771  *   - *ref_type_id* - referenced type ID, it might not exist yet;
2772  *   - *component_idx* - -1 for tagging reference type, otherwise struct/union
2773  *     member or function argument index;
2774  * Returns:
2775  *   - >0, type ID of newly added BTF type;
2776  *   - <0, on error.
2777  */
2778 int btf__add_decl_tag(struct btf *btf, const char *value, int ref_type_id,
2779 		 int component_idx)
2780 {
2781 	struct btf_type *t;
2782 	int sz, value_off;
2783 
2784 	if (!value || !value[0] || component_idx < -1)
2785 		return libbpf_err(-EINVAL);
2786 
2787 	if (validate_type_id(ref_type_id))
2788 		return libbpf_err(-EINVAL);
2789 
2790 	if (btf_ensure_modifiable(btf))
2791 		return libbpf_err(-ENOMEM);
2792 
2793 	sz = sizeof(struct btf_type) + sizeof(struct btf_decl_tag);
2794 	t = btf_add_type_mem(btf, sz);
2795 	if (!t)
2796 		return libbpf_err(-ENOMEM);
2797 
2798 	value_off = btf__add_str(btf, value);
2799 	if (value_off < 0)
2800 		return value_off;
2801 
2802 	t->name_off = value_off;
2803 	t->info = btf_type_info(BTF_KIND_DECL_TAG, 0, false);
2804 	t->type = ref_type_id;
2805 	btf_decl_tag(t)->component_idx = component_idx;
2806 
2807 	return btf_commit_type(btf, sz);
2808 }
2809 
2810 struct btf_ext_sec_setup_param {
2811 	__u32 off;
2812 	__u32 len;
2813 	__u32 min_rec_size;
2814 	struct btf_ext_info *ext_info;
2815 	const char *desc;
2816 };
2817 
2818 static int btf_ext_setup_info(struct btf_ext *btf_ext,
2819 			      struct btf_ext_sec_setup_param *ext_sec)
2820 {
2821 	const struct btf_ext_info_sec *sinfo;
2822 	struct btf_ext_info *ext_info;
2823 	__u32 info_left, record_size;
2824 	size_t sec_cnt = 0;
2825 	/* The start of the info sec (including the __u32 record_size). */
2826 	void *info;
2827 
2828 	if (ext_sec->len == 0)
2829 		return 0;
2830 
2831 	if (ext_sec->off & 0x03) {
2832 		pr_debug(".BTF.ext %s section is not aligned to 4 bytes\n",
2833 		     ext_sec->desc);
2834 		return -EINVAL;
2835 	}
2836 
2837 	info = btf_ext->data + btf_ext->hdr->hdr_len + ext_sec->off;
2838 	info_left = ext_sec->len;
2839 
2840 	if (btf_ext->data + btf_ext->data_size < info + ext_sec->len) {
2841 		pr_debug("%s section (off:%u len:%u) is beyond the end of the ELF section .BTF.ext\n",
2842 			 ext_sec->desc, ext_sec->off, ext_sec->len);
2843 		return -EINVAL;
2844 	}
2845 
2846 	/* At least a record size */
2847 	if (info_left < sizeof(__u32)) {
2848 		pr_debug(".BTF.ext %s record size not found\n", ext_sec->desc);
2849 		return -EINVAL;
2850 	}
2851 
2852 	/* The record size needs to meet the minimum standard */
2853 	record_size = *(__u32 *)info;
2854 	if (record_size < ext_sec->min_rec_size ||
2855 	    record_size & 0x03) {
2856 		pr_debug("%s section in .BTF.ext has invalid record size %u\n",
2857 			 ext_sec->desc, record_size);
2858 		return -EINVAL;
2859 	}
2860 
2861 	sinfo = info + sizeof(__u32);
2862 	info_left -= sizeof(__u32);
2863 
2864 	/* If no records, return failure now so .BTF.ext won't be used. */
2865 	if (!info_left) {
2866 		pr_debug("%s section in .BTF.ext has no records", ext_sec->desc);
2867 		return -EINVAL;
2868 	}
2869 
2870 	while (info_left) {
2871 		unsigned int sec_hdrlen = sizeof(struct btf_ext_info_sec);
2872 		__u64 total_record_size;
2873 		__u32 num_records;
2874 
2875 		if (info_left < sec_hdrlen) {
2876 			pr_debug("%s section header is not found in .BTF.ext\n",
2877 			     ext_sec->desc);
2878 			return -EINVAL;
2879 		}
2880 
2881 		num_records = sinfo->num_info;
2882 		if (num_records == 0) {
2883 			pr_debug("%s section has incorrect num_records in .BTF.ext\n",
2884 			     ext_sec->desc);
2885 			return -EINVAL;
2886 		}
2887 
2888 		total_record_size = sec_hdrlen + (__u64)num_records * record_size;
2889 		if (info_left < total_record_size) {
2890 			pr_debug("%s section has incorrect num_records in .BTF.ext\n",
2891 			     ext_sec->desc);
2892 			return -EINVAL;
2893 		}
2894 
2895 		info_left -= total_record_size;
2896 		sinfo = (void *)sinfo + total_record_size;
2897 		sec_cnt++;
2898 	}
2899 
2900 	ext_info = ext_sec->ext_info;
2901 	ext_info->len = ext_sec->len - sizeof(__u32);
2902 	ext_info->rec_size = record_size;
2903 	ext_info->info = info + sizeof(__u32);
2904 	ext_info->sec_cnt = sec_cnt;
2905 
2906 	return 0;
2907 }
2908 
2909 static int btf_ext_setup_func_info(struct btf_ext *btf_ext)
2910 {
2911 	struct btf_ext_sec_setup_param param = {
2912 		.off = btf_ext->hdr->func_info_off,
2913 		.len = btf_ext->hdr->func_info_len,
2914 		.min_rec_size = sizeof(struct bpf_func_info_min),
2915 		.ext_info = &btf_ext->func_info,
2916 		.desc = "func_info"
2917 	};
2918 
2919 	return btf_ext_setup_info(btf_ext, &param);
2920 }
2921 
2922 static int btf_ext_setup_line_info(struct btf_ext *btf_ext)
2923 {
2924 	struct btf_ext_sec_setup_param param = {
2925 		.off = btf_ext->hdr->line_info_off,
2926 		.len = btf_ext->hdr->line_info_len,
2927 		.min_rec_size = sizeof(struct bpf_line_info_min),
2928 		.ext_info = &btf_ext->line_info,
2929 		.desc = "line_info",
2930 	};
2931 
2932 	return btf_ext_setup_info(btf_ext, &param);
2933 }
2934 
2935 static int btf_ext_setup_core_relos(struct btf_ext *btf_ext)
2936 {
2937 	struct btf_ext_sec_setup_param param = {
2938 		.off = btf_ext->hdr->core_relo_off,
2939 		.len = btf_ext->hdr->core_relo_len,
2940 		.min_rec_size = sizeof(struct bpf_core_relo),
2941 		.ext_info = &btf_ext->core_relo_info,
2942 		.desc = "core_relo",
2943 	};
2944 
2945 	return btf_ext_setup_info(btf_ext, &param);
2946 }
2947 
2948 static int btf_ext_parse_hdr(__u8 *data, __u32 data_size)
2949 {
2950 	const struct btf_ext_header *hdr = (struct btf_ext_header *)data;
2951 
2952 	if (data_size < offsetofend(struct btf_ext_header, hdr_len) ||
2953 	    data_size < hdr->hdr_len) {
2954 		pr_debug("BTF.ext header not found");
2955 		return -EINVAL;
2956 	}
2957 
2958 	if (hdr->magic == bswap_16(BTF_MAGIC)) {
2959 		pr_warn("BTF.ext in non-native endianness is not supported\n");
2960 		return -ENOTSUP;
2961 	} else if (hdr->magic != BTF_MAGIC) {
2962 		pr_debug("Invalid BTF.ext magic:%x\n", hdr->magic);
2963 		return -EINVAL;
2964 	}
2965 
2966 	if (hdr->version != BTF_VERSION) {
2967 		pr_debug("Unsupported BTF.ext version:%u\n", hdr->version);
2968 		return -ENOTSUP;
2969 	}
2970 
2971 	if (hdr->flags) {
2972 		pr_debug("Unsupported BTF.ext flags:%x\n", hdr->flags);
2973 		return -ENOTSUP;
2974 	}
2975 
2976 	if (data_size == hdr->hdr_len) {
2977 		pr_debug("BTF.ext has no data\n");
2978 		return -EINVAL;
2979 	}
2980 
2981 	return 0;
2982 }
2983 
2984 void btf_ext__free(struct btf_ext *btf_ext)
2985 {
2986 	if (IS_ERR_OR_NULL(btf_ext))
2987 		return;
2988 	free(btf_ext->func_info.sec_idxs);
2989 	free(btf_ext->line_info.sec_idxs);
2990 	free(btf_ext->core_relo_info.sec_idxs);
2991 	free(btf_ext->data);
2992 	free(btf_ext);
2993 }
2994 
2995 struct btf_ext *btf_ext__new(const __u8 *data, __u32 size)
2996 {
2997 	struct btf_ext *btf_ext;
2998 	int err;
2999 
3000 	btf_ext = calloc(1, sizeof(struct btf_ext));
3001 	if (!btf_ext)
3002 		return libbpf_err_ptr(-ENOMEM);
3003 
3004 	btf_ext->data_size = size;
3005 	btf_ext->data = malloc(size);
3006 	if (!btf_ext->data) {
3007 		err = -ENOMEM;
3008 		goto done;
3009 	}
3010 	memcpy(btf_ext->data, data, size);
3011 
3012 	err = btf_ext_parse_hdr(btf_ext->data, size);
3013 	if (err)
3014 		goto done;
3015 
3016 	if (btf_ext->hdr->hdr_len < offsetofend(struct btf_ext_header, line_info_len)) {
3017 		err = -EINVAL;
3018 		goto done;
3019 	}
3020 
3021 	err = btf_ext_setup_func_info(btf_ext);
3022 	if (err)
3023 		goto done;
3024 
3025 	err = btf_ext_setup_line_info(btf_ext);
3026 	if (err)
3027 		goto done;
3028 
3029 	if (btf_ext->hdr->hdr_len < offsetofend(struct btf_ext_header, core_relo_len))
3030 		goto done; /* skip core relos parsing */
3031 
3032 	err = btf_ext_setup_core_relos(btf_ext);
3033 	if (err)
3034 		goto done;
3035 
3036 done:
3037 	if (err) {
3038 		btf_ext__free(btf_ext);
3039 		return libbpf_err_ptr(err);
3040 	}
3041 
3042 	return btf_ext;
3043 }
3044 
3045 const void *btf_ext__get_raw_data(const struct btf_ext *btf_ext, __u32 *size)
3046 {
3047 	*size = btf_ext->data_size;
3048 	return btf_ext->data;
3049 }
3050 
3051 struct btf_dedup;
3052 
3053 static struct btf_dedup *btf_dedup_new(struct btf *btf, const struct btf_dedup_opts *opts);
3054 static void btf_dedup_free(struct btf_dedup *d);
3055 static int btf_dedup_prep(struct btf_dedup *d);
3056 static int btf_dedup_strings(struct btf_dedup *d);
3057 static int btf_dedup_prim_types(struct btf_dedup *d);
3058 static int btf_dedup_struct_types(struct btf_dedup *d);
3059 static int btf_dedup_ref_types(struct btf_dedup *d);
3060 static int btf_dedup_resolve_fwds(struct btf_dedup *d);
3061 static int btf_dedup_compact_types(struct btf_dedup *d);
3062 static int btf_dedup_remap_types(struct btf_dedup *d);
3063 
3064 /*
3065  * Deduplicate BTF types and strings.
3066  *
3067  * BTF dedup algorithm takes as an input `struct btf` representing `.BTF` ELF
3068  * section with all BTF type descriptors and string data. It overwrites that
3069  * memory in-place with deduplicated types and strings without any loss of
3070  * information. If optional `struct btf_ext` representing '.BTF.ext' ELF section
3071  * is provided, all the strings referenced from .BTF.ext section are honored
3072  * and updated to point to the right offsets after deduplication.
3073  *
3074  * If function returns with error, type/string data might be garbled and should
3075  * be discarded.
3076  *
3077  * More verbose and detailed description of both problem btf_dedup is solving,
3078  * as well as solution could be found at:
3079  * https://facebookmicrosites.github.io/bpf/blog/2018/11/14/btf-enhancement.html
3080  *
3081  * Problem description and justification
3082  * =====================================
3083  *
3084  * BTF type information is typically emitted either as a result of conversion
3085  * from DWARF to BTF or directly by compiler. In both cases, each compilation
3086  * unit contains information about a subset of all the types that are used
3087  * in an application. These subsets are frequently overlapping and contain a lot
3088  * of duplicated information when later concatenated together into a single
3089  * binary. This algorithm ensures that each unique type is represented by single
3090  * BTF type descriptor, greatly reducing resulting size of BTF data.
3091  *
3092  * Compilation unit isolation and subsequent duplication of data is not the only
3093  * problem. The same type hierarchy (e.g., struct and all the type that struct
3094  * references) in different compilation units can be represented in BTF to
3095  * various degrees of completeness (or, rather, incompleteness) due to
3096  * struct/union forward declarations.
3097  *
3098  * Let's take a look at an example, that we'll use to better understand the
3099  * problem (and solution). Suppose we have two compilation units, each using
3100  * same `struct S`, but each of them having incomplete type information about
3101  * struct's fields:
3102  *
3103  * // CU #1:
3104  * struct S;
3105  * struct A {
3106  *	int a;
3107  *	struct A* self;
3108  *	struct S* parent;
3109  * };
3110  * struct B;
3111  * struct S {
3112  *	struct A* a_ptr;
3113  *	struct B* b_ptr;
3114  * };
3115  *
3116  * // CU #2:
3117  * struct S;
3118  * struct A;
3119  * struct B {
3120  *	int b;
3121  *	struct B* self;
3122  *	struct S* parent;
3123  * };
3124  * struct S {
3125  *	struct A* a_ptr;
3126  *	struct B* b_ptr;
3127  * };
3128  *
3129  * In case of CU #1, BTF data will know only that `struct B` exist (but no
3130  * more), but will know the complete type information about `struct A`. While
3131  * for CU #2, it will know full type information about `struct B`, but will
3132  * only know about forward declaration of `struct A` (in BTF terms, it will
3133  * have `BTF_KIND_FWD` type descriptor with name `B`).
3134  *
3135  * This compilation unit isolation means that it's possible that there is no
3136  * single CU with complete type information describing structs `S`, `A`, and
3137  * `B`. Also, we might get tons of duplicated and redundant type information.
3138  *
3139  * Additional complication we need to keep in mind comes from the fact that
3140  * types, in general, can form graphs containing cycles, not just DAGs.
3141  *
3142  * While algorithm does deduplication, it also merges and resolves type
3143  * information (unless disabled throught `struct btf_opts`), whenever possible.
3144  * E.g., in the example above with two compilation units having partial type
3145  * information for structs `A` and `B`, the output of algorithm will emit
3146  * a single copy of each BTF type that describes structs `A`, `B`, and `S`
3147  * (as well as type information for `int` and pointers), as if they were defined
3148  * in a single compilation unit as:
3149  *
3150  * struct A {
3151  *	int a;
3152  *	struct A* self;
3153  *	struct S* parent;
3154  * };
3155  * struct B {
3156  *	int b;
3157  *	struct B* self;
3158  *	struct S* parent;
3159  * };
3160  * struct S {
3161  *	struct A* a_ptr;
3162  *	struct B* b_ptr;
3163  * };
3164  *
3165  * Algorithm summary
3166  * =================
3167  *
3168  * Algorithm completes its work in 7 separate passes:
3169  *
3170  * 1. Strings deduplication.
3171  * 2. Primitive types deduplication (int, enum, fwd).
3172  * 3. Struct/union types deduplication.
3173  * 4. Resolve unambiguous forward declarations.
3174  * 5. Reference types deduplication (pointers, typedefs, arrays, funcs, func
3175  *    protos, and const/volatile/restrict modifiers).
3176  * 6. Types compaction.
3177  * 7. Types remapping.
3178  *
3179  * Algorithm determines canonical type descriptor, which is a single
3180  * representative type for each truly unique type. This canonical type is the
3181  * one that will go into final deduplicated BTF type information. For
3182  * struct/unions, it is also the type that algorithm will merge additional type
3183  * information into (while resolving FWDs), as it discovers it from data in
3184  * other CUs. Each input BTF type eventually gets either mapped to itself, if
3185  * that type is canonical, or to some other type, if that type is equivalent
3186  * and was chosen as canonical representative. This mapping is stored in
3187  * `btf_dedup->map` array. This map is also used to record STRUCT/UNION that
3188  * FWD type got resolved to.
3189  *
3190  * To facilitate fast discovery of canonical types, we also maintain canonical
3191  * index (`btf_dedup->dedup_table`), which maps type descriptor's signature hash
3192  * (i.e., hashed kind, name, size, fields, etc) into a list of canonical types
3193  * that match that signature. With sufficiently good choice of type signature
3194  * hashing function, we can limit number of canonical types for each unique type
3195  * signature to a very small number, allowing to find canonical type for any
3196  * duplicated type very quickly.
3197  *
3198  * Struct/union deduplication is the most critical part and algorithm for
3199  * deduplicating structs/unions is described in greater details in comments for
3200  * `btf_dedup_is_equiv` function.
3201  */
3202 int btf__dedup(struct btf *btf, const struct btf_dedup_opts *opts)
3203 {
3204 	struct btf_dedup *d;
3205 	int err;
3206 
3207 	if (!OPTS_VALID(opts, btf_dedup_opts))
3208 		return libbpf_err(-EINVAL);
3209 
3210 	d = btf_dedup_new(btf, opts);
3211 	if (IS_ERR(d)) {
3212 		pr_debug("btf_dedup_new failed: %ld", PTR_ERR(d));
3213 		return libbpf_err(-EINVAL);
3214 	}
3215 
3216 	if (btf_ensure_modifiable(btf)) {
3217 		err = -ENOMEM;
3218 		goto done;
3219 	}
3220 
3221 	err = btf_dedup_prep(d);
3222 	if (err) {
3223 		pr_debug("btf_dedup_prep failed:%d\n", err);
3224 		goto done;
3225 	}
3226 	err = btf_dedup_strings(d);
3227 	if (err < 0) {
3228 		pr_debug("btf_dedup_strings failed:%d\n", err);
3229 		goto done;
3230 	}
3231 	err = btf_dedup_prim_types(d);
3232 	if (err < 0) {
3233 		pr_debug("btf_dedup_prim_types failed:%d\n", err);
3234 		goto done;
3235 	}
3236 	err = btf_dedup_struct_types(d);
3237 	if (err < 0) {
3238 		pr_debug("btf_dedup_struct_types failed:%d\n", err);
3239 		goto done;
3240 	}
3241 	err = btf_dedup_resolve_fwds(d);
3242 	if (err < 0) {
3243 		pr_debug("btf_dedup_resolve_fwds failed:%d\n", err);
3244 		goto done;
3245 	}
3246 	err = btf_dedup_ref_types(d);
3247 	if (err < 0) {
3248 		pr_debug("btf_dedup_ref_types failed:%d\n", err);
3249 		goto done;
3250 	}
3251 	err = btf_dedup_compact_types(d);
3252 	if (err < 0) {
3253 		pr_debug("btf_dedup_compact_types failed:%d\n", err);
3254 		goto done;
3255 	}
3256 	err = btf_dedup_remap_types(d);
3257 	if (err < 0) {
3258 		pr_debug("btf_dedup_remap_types failed:%d\n", err);
3259 		goto done;
3260 	}
3261 
3262 done:
3263 	btf_dedup_free(d);
3264 	return libbpf_err(err);
3265 }
3266 
3267 #define BTF_UNPROCESSED_ID ((__u32)-1)
3268 #define BTF_IN_PROGRESS_ID ((__u32)-2)
3269 
3270 struct btf_dedup {
3271 	/* .BTF section to be deduped in-place */
3272 	struct btf *btf;
3273 	/*
3274 	 * Optional .BTF.ext section. When provided, any strings referenced
3275 	 * from it will be taken into account when deduping strings
3276 	 */
3277 	struct btf_ext *btf_ext;
3278 	/*
3279 	 * This is a map from any type's signature hash to a list of possible
3280 	 * canonical representative type candidates. Hash collisions are
3281 	 * ignored, so even types of various kinds can share same list of
3282 	 * candidates, which is fine because we rely on subsequent
3283 	 * btf_xxx_equal() checks to authoritatively verify type equality.
3284 	 */
3285 	struct hashmap *dedup_table;
3286 	/* Canonical types map */
3287 	__u32 *map;
3288 	/* Hypothetical mapping, used during type graph equivalence checks */
3289 	__u32 *hypot_map;
3290 	__u32 *hypot_list;
3291 	size_t hypot_cnt;
3292 	size_t hypot_cap;
3293 	/* Whether hypothetical mapping, if successful, would need to adjust
3294 	 * already canonicalized types (due to a new forward declaration to
3295 	 * concrete type resolution). In such case, during split BTF dedup
3296 	 * candidate type would still be considered as different, because base
3297 	 * BTF is considered to be immutable.
3298 	 */
3299 	bool hypot_adjust_canon;
3300 	/* Various option modifying behavior of algorithm */
3301 	struct btf_dedup_opts opts;
3302 	/* temporary strings deduplication state */
3303 	struct strset *strs_set;
3304 };
3305 
3306 static long hash_combine(long h, long value)
3307 {
3308 	return h * 31 + value;
3309 }
3310 
3311 #define for_each_dedup_cand(d, node, hash) \
3312 	hashmap__for_each_key_entry(d->dedup_table, node, hash)
3313 
3314 static int btf_dedup_table_add(struct btf_dedup *d, long hash, __u32 type_id)
3315 {
3316 	return hashmap__append(d->dedup_table, hash, type_id);
3317 }
3318 
3319 static int btf_dedup_hypot_map_add(struct btf_dedup *d,
3320 				   __u32 from_id, __u32 to_id)
3321 {
3322 	if (d->hypot_cnt == d->hypot_cap) {
3323 		__u32 *new_list;
3324 
3325 		d->hypot_cap += max((size_t)16, d->hypot_cap / 2);
3326 		new_list = libbpf_reallocarray(d->hypot_list, d->hypot_cap, sizeof(__u32));
3327 		if (!new_list)
3328 			return -ENOMEM;
3329 		d->hypot_list = new_list;
3330 	}
3331 	d->hypot_list[d->hypot_cnt++] = from_id;
3332 	d->hypot_map[from_id] = to_id;
3333 	return 0;
3334 }
3335 
3336 static void btf_dedup_clear_hypot_map(struct btf_dedup *d)
3337 {
3338 	int i;
3339 
3340 	for (i = 0; i < d->hypot_cnt; i++)
3341 		d->hypot_map[d->hypot_list[i]] = BTF_UNPROCESSED_ID;
3342 	d->hypot_cnt = 0;
3343 	d->hypot_adjust_canon = false;
3344 }
3345 
3346 static void btf_dedup_free(struct btf_dedup *d)
3347 {
3348 	hashmap__free(d->dedup_table);
3349 	d->dedup_table = NULL;
3350 
3351 	free(d->map);
3352 	d->map = NULL;
3353 
3354 	free(d->hypot_map);
3355 	d->hypot_map = NULL;
3356 
3357 	free(d->hypot_list);
3358 	d->hypot_list = NULL;
3359 
3360 	free(d);
3361 }
3362 
3363 static size_t btf_dedup_identity_hash_fn(long key, void *ctx)
3364 {
3365 	return key;
3366 }
3367 
3368 static size_t btf_dedup_collision_hash_fn(long key, void *ctx)
3369 {
3370 	return 0;
3371 }
3372 
3373 static bool btf_dedup_equal_fn(long k1, long k2, void *ctx)
3374 {
3375 	return k1 == k2;
3376 }
3377 
3378 static struct btf_dedup *btf_dedup_new(struct btf *btf, const struct btf_dedup_opts *opts)
3379 {
3380 	struct btf_dedup *d = calloc(1, sizeof(struct btf_dedup));
3381 	hashmap_hash_fn hash_fn = btf_dedup_identity_hash_fn;
3382 	int i, err = 0, type_cnt;
3383 
3384 	if (!d)
3385 		return ERR_PTR(-ENOMEM);
3386 
3387 	if (OPTS_GET(opts, force_collisions, false))
3388 		hash_fn = btf_dedup_collision_hash_fn;
3389 
3390 	d->btf = btf;
3391 	d->btf_ext = OPTS_GET(opts, btf_ext, NULL);
3392 
3393 	d->dedup_table = hashmap__new(hash_fn, btf_dedup_equal_fn, NULL);
3394 	if (IS_ERR(d->dedup_table)) {
3395 		err = PTR_ERR(d->dedup_table);
3396 		d->dedup_table = NULL;
3397 		goto done;
3398 	}
3399 
3400 	type_cnt = btf__type_cnt(btf);
3401 	d->map = malloc(sizeof(__u32) * type_cnt);
3402 	if (!d->map) {
3403 		err = -ENOMEM;
3404 		goto done;
3405 	}
3406 	/* special BTF "void" type is made canonical immediately */
3407 	d->map[0] = 0;
3408 	for (i = 1; i < type_cnt; i++) {
3409 		struct btf_type *t = btf_type_by_id(d->btf, i);
3410 
3411 		/* VAR and DATASEC are never deduped and are self-canonical */
3412 		if (btf_is_var(t) || btf_is_datasec(t))
3413 			d->map[i] = i;
3414 		else
3415 			d->map[i] = BTF_UNPROCESSED_ID;
3416 	}
3417 
3418 	d->hypot_map = malloc(sizeof(__u32) * type_cnt);
3419 	if (!d->hypot_map) {
3420 		err = -ENOMEM;
3421 		goto done;
3422 	}
3423 	for (i = 0; i < type_cnt; i++)
3424 		d->hypot_map[i] = BTF_UNPROCESSED_ID;
3425 
3426 done:
3427 	if (err) {
3428 		btf_dedup_free(d);
3429 		return ERR_PTR(err);
3430 	}
3431 
3432 	return d;
3433 }
3434 
3435 /*
3436  * Iterate over all possible places in .BTF and .BTF.ext that can reference
3437  * string and pass pointer to it to a provided callback `fn`.
3438  */
3439 static int btf_for_each_str_off(struct btf_dedup *d, str_off_visit_fn fn, void *ctx)
3440 {
3441 	int i, r;
3442 
3443 	for (i = 0; i < d->btf->nr_types; i++) {
3444 		struct btf_type *t = btf_type_by_id(d->btf, d->btf->start_id + i);
3445 
3446 		r = btf_type_visit_str_offs(t, fn, ctx);
3447 		if (r)
3448 			return r;
3449 	}
3450 
3451 	if (!d->btf_ext)
3452 		return 0;
3453 
3454 	r = btf_ext_visit_str_offs(d->btf_ext, fn, ctx);
3455 	if (r)
3456 		return r;
3457 
3458 	return 0;
3459 }
3460 
3461 static int strs_dedup_remap_str_off(__u32 *str_off_ptr, void *ctx)
3462 {
3463 	struct btf_dedup *d = ctx;
3464 	__u32 str_off = *str_off_ptr;
3465 	const char *s;
3466 	int off, err;
3467 
3468 	/* don't touch empty string or string in main BTF */
3469 	if (str_off == 0 || str_off < d->btf->start_str_off)
3470 		return 0;
3471 
3472 	s = btf__str_by_offset(d->btf, str_off);
3473 	if (d->btf->base_btf) {
3474 		err = btf__find_str(d->btf->base_btf, s);
3475 		if (err >= 0) {
3476 			*str_off_ptr = err;
3477 			return 0;
3478 		}
3479 		if (err != -ENOENT)
3480 			return err;
3481 	}
3482 
3483 	off = strset__add_str(d->strs_set, s);
3484 	if (off < 0)
3485 		return off;
3486 
3487 	*str_off_ptr = d->btf->start_str_off + off;
3488 	return 0;
3489 }
3490 
3491 /*
3492  * Dedup string and filter out those that are not referenced from either .BTF
3493  * or .BTF.ext (if provided) sections.
3494  *
3495  * This is done by building index of all strings in BTF's string section,
3496  * then iterating over all entities that can reference strings (e.g., type
3497  * names, struct field names, .BTF.ext line info, etc) and marking corresponding
3498  * strings as used. After that all used strings are deduped and compacted into
3499  * sequential blob of memory and new offsets are calculated. Then all the string
3500  * references are iterated again and rewritten using new offsets.
3501  */
3502 static int btf_dedup_strings(struct btf_dedup *d)
3503 {
3504 	int err;
3505 
3506 	if (d->btf->strs_deduped)
3507 		return 0;
3508 
3509 	d->strs_set = strset__new(BTF_MAX_STR_OFFSET, NULL, 0);
3510 	if (IS_ERR(d->strs_set)) {
3511 		err = PTR_ERR(d->strs_set);
3512 		goto err_out;
3513 	}
3514 
3515 	if (!d->btf->base_btf) {
3516 		/* insert empty string; we won't be looking it up during strings
3517 		 * dedup, but it's good to have it for generic BTF string lookups
3518 		 */
3519 		err = strset__add_str(d->strs_set, "");
3520 		if (err < 0)
3521 			goto err_out;
3522 	}
3523 
3524 	/* remap string offsets */
3525 	err = btf_for_each_str_off(d, strs_dedup_remap_str_off, d);
3526 	if (err)
3527 		goto err_out;
3528 
3529 	/* replace BTF string data and hash with deduped ones */
3530 	strset__free(d->btf->strs_set);
3531 	d->btf->hdr->str_len = strset__data_size(d->strs_set);
3532 	d->btf->strs_set = d->strs_set;
3533 	d->strs_set = NULL;
3534 	d->btf->strs_deduped = true;
3535 	return 0;
3536 
3537 err_out:
3538 	strset__free(d->strs_set);
3539 	d->strs_set = NULL;
3540 
3541 	return err;
3542 }
3543 
3544 static long btf_hash_common(struct btf_type *t)
3545 {
3546 	long h;
3547 
3548 	h = hash_combine(0, t->name_off);
3549 	h = hash_combine(h, t->info);
3550 	h = hash_combine(h, t->size);
3551 	return h;
3552 }
3553 
3554 static bool btf_equal_common(struct btf_type *t1, struct btf_type *t2)
3555 {
3556 	return t1->name_off == t2->name_off &&
3557 	       t1->info == t2->info &&
3558 	       t1->size == t2->size;
3559 }
3560 
3561 /* Calculate type signature hash of INT or TAG. */
3562 static long btf_hash_int_decl_tag(struct btf_type *t)
3563 {
3564 	__u32 info = *(__u32 *)(t + 1);
3565 	long h;
3566 
3567 	h = btf_hash_common(t);
3568 	h = hash_combine(h, info);
3569 	return h;
3570 }
3571 
3572 /* Check structural equality of two INTs or TAGs. */
3573 static bool btf_equal_int_tag(struct btf_type *t1, struct btf_type *t2)
3574 {
3575 	__u32 info1, info2;
3576 
3577 	if (!btf_equal_common(t1, t2))
3578 		return false;
3579 	info1 = *(__u32 *)(t1 + 1);
3580 	info2 = *(__u32 *)(t2 + 1);
3581 	return info1 == info2;
3582 }
3583 
3584 /* Calculate type signature hash of ENUM/ENUM64. */
3585 static long btf_hash_enum(struct btf_type *t)
3586 {
3587 	long h;
3588 
3589 	/* don't hash vlen, enum members and size to support enum fwd resolving */
3590 	h = hash_combine(0, t->name_off);
3591 	return h;
3592 }
3593 
3594 static bool btf_equal_enum_members(struct btf_type *t1, struct btf_type *t2)
3595 {
3596 	const struct btf_enum *m1, *m2;
3597 	__u16 vlen;
3598 	int i;
3599 
3600 	vlen = btf_vlen(t1);
3601 	m1 = btf_enum(t1);
3602 	m2 = btf_enum(t2);
3603 	for (i = 0; i < vlen; i++) {
3604 		if (m1->name_off != m2->name_off || m1->val != m2->val)
3605 			return false;
3606 		m1++;
3607 		m2++;
3608 	}
3609 	return true;
3610 }
3611 
3612 static bool btf_equal_enum64_members(struct btf_type *t1, struct btf_type *t2)
3613 {
3614 	const struct btf_enum64 *m1, *m2;
3615 	__u16 vlen;
3616 	int i;
3617 
3618 	vlen = btf_vlen(t1);
3619 	m1 = btf_enum64(t1);
3620 	m2 = btf_enum64(t2);
3621 	for (i = 0; i < vlen; i++) {
3622 		if (m1->name_off != m2->name_off || m1->val_lo32 != m2->val_lo32 ||
3623 		    m1->val_hi32 != m2->val_hi32)
3624 			return false;
3625 		m1++;
3626 		m2++;
3627 	}
3628 	return true;
3629 }
3630 
3631 /* Check structural equality of two ENUMs or ENUM64s. */
3632 static bool btf_equal_enum(struct btf_type *t1, struct btf_type *t2)
3633 {
3634 	if (!btf_equal_common(t1, t2))
3635 		return false;
3636 
3637 	/* t1 & t2 kinds are identical because of btf_equal_common */
3638 	if (btf_kind(t1) == BTF_KIND_ENUM)
3639 		return btf_equal_enum_members(t1, t2);
3640 	else
3641 		return btf_equal_enum64_members(t1, t2);
3642 }
3643 
3644 static inline bool btf_is_enum_fwd(struct btf_type *t)
3645 {
3646 	return btf_is_any_enum(t) && btf_vlen(t) == 0;
3647 }
3648 
3649 static bool btf_compat_enum(struct btf_type *t1, struct btf_type *t2)
3650 {
3651 	if (!btf_is_enum_fwd(t1) && !btf_is_enum_fwd(t2))
3652 		return btf_equal_enum(t1, t2);
3653 	/* At this point either t1 or t2 or both are forward declarations, thus:
3654 	 * - skip comparing vlen because it is zero for forward declarations;
3655 	 * - skip comparing size to allow enum forward declarations
3656 	 *   to be compatible with enum64 full declarations;
3657 	 * - skip comparing kind for the same reason.
3658 	 */
3659 	return t1->name_off == t2->name_off &&
3660 	       btf_is_any_enum(t1) && btf_is_any_enum(t2);
3661 }
3662 
3663 /*
3664  * Calculate type signature hash of STRUCT/UNION, ignoring referenced type IDs,
3665  * as referenced type IDs equivalence is established separately during type
3666  * graph equivalence check algorithm.
3667  */
3668 static long btf_hash_struct(struct btf_type *t)
3669 {
3670 	const struct btf_member *member = btf_members(t);
3671 	__u32 vlen = btf_vlen(t);
3672 	long h = btf_hash_common(t);
3673 	int i;
3674 
3675 	for (i = 0; i < vlen; i++) {
3676 		h = hash_combine(h, member->name_off);
3677 		h = hash_combine(h, member->offset);
3678 		/* no hashing of referenced type ID, it can be unresolved yet */
3679 		member++;
3680 	}
3681 	return h;
3682 }
3683 
3684 /*
3685  * Check structural compatibility of two STRUCTs/UNIONs, ignoring referenced
3686  * type IDs. This check is performed during type graph equivalence check and
3687  * referenced types equivalence is checked separately.
3688  */
3689 static bool btf_shallow_equal_struct(struct btf_type *t1, struct btf_type *t2)
3690 {
3691 	const struct btf_member *m1, *m2;
3692 	__u16 vlen;
3693 	int i;
3694 
3695 	if (!btf_equal_common(t1, t2))
3696 		return false;
3697 
3698 	vlen = btf_vlen(t1);
3699 	m1 = btf_members(t1);
3700 	m2 = btf_members(t2);
3701 	for (i = 0; i < vlen; i++) {
3702 		if (m1->name_off != m2->name_off || m1->offset != m2->offset)
3703 			return false;
3704 		m1++;
3705 		m2++;
3706 	}
3707 	return true;
3708 }
3709 
3710 /*
3711  * Calculate type signature hash of ARRAY, including referenced type IDs,
3712  * under assumption that they were already resolved to canonical type IDs and
3713  * are not going to change.
3714  */
3715 static long btf_hash_array(struct btf_type *t)
3716 {
3717 	const struct btf_array *info = btf_array(t);
3718 	long h = btf_hash_common(t);
3719 
3720 	h = hash_combine(h, info->type);
3721 	h = hash_combine(h, info->index_type);
3722 	h = hash_combine(h, info->nelems);
3723 	return h;
3724 }
3725 
3726 /*
3727  * Check exact equality of two ARRAYs, taking into account referenced
3728  * type IDs, under assumption that they were already resolved to canonical
3729  * type IDs and are not going to change.
3730  * This function is called during reference types deduplication to compare
3731  * ARRAY to potential canonical representative.
3732  */
3733 static bool btf_equal_array(struct btf_type *t1, struct btf_type *t2)
3734 {
3735 	const struct btf_array *info1, *info2;
3736 
3737 	if (!btf_equal_common(t1, t2))
3738 		return false;
3739 
3740 	info1 = btf_array(t1);
3741 	info2 = btf_array(t2);
3742 	return info1->type == info2->type &&
3743 	       info1->index_type == info2->index_type &&
3744 	       info1->nelems == info2->nelems;
3745 }
3746 
3747 /*
3748  * Check structural compatibility of two ARRAYs, ignoring referenced type
3749  * IDs. This check is performed during type graph equivalence check and
3750  * referenced types equivalence is checked separately.
3751  */
3752 static bool btf_compat_array(struct btf_type *t1, struct btf_type *t2)
3753 {
3754 	if (!btf_equal_common(t1, t2))
3755 		return false;
3756 
3757 	return btf_array(t1)->nelems == btf_array(t2)->nelems;
3758 }
3759 
3760 /*
3761  * Calculate type signature hash of FUNC_PROTO, including referenced type IDs,
3762  * under assumption that they were already resolved to canonical type IDs and
3763  * are not going to change.
3764  */
3765 static long btf_hash_fnproto(struct btf_type *t)
3766 {
3767 	const struct btf_param *member = btf_params(t);
3768 	__u16 vlen = btf_vlen(t);
3769 	long h = btf_hash_common(t);
3770 	int i;
3771 
3772 	for (i = 0; i < vlen; i++) {
3773 		h = hash_combine(h, member->name_off);
3774 		h = hash_combine(h, member->type);
3775 		member++;
3776 	}
3777 	return h;
3778 }
3779 
3780 /*
3781  * Check exact equality of two FUNC_PROTOs, taking into account referenced
3782  * type IDs, under assumption that they were already resolved to canonical
3783  * type IDs and are not going to change.
3784  * This function is called during reference types deduplication to compare
3785  * FUNC_PROTO to potential canonical representative.
3786  */
3787 static bool btf_equal_fnproto(struct btf_type *t1, struct btf_type *t2)
3788 {
3789 	const struct btf_param *m1, *m2;
3790 	__u16 vlen;
3791 	int i;
3792 
3793 	if (!btf_equal_common(t1, t2))
3794 		return false;
3795 
3796 	vlen = btf_vlen(t1);
3797 	m1 = btf_params(t1);
3798 	m2 = btf_params(t2);
3799 	for (i = 0; i < vlen; i++) {
3800 		if (m1->name_off != m2->name_off || m1->type != m2->type)
3801 			return false;
3802 		m1++;
3803 		m2++;
3804 	}
3805 	return true;
3806 }
3807 
3808 /*
3809  * Check structural compatibility of two FUNC_PROTOs, ignoring referenced type
3810  * IDs. This check is performed during type graph equivalence check and
3811  * referenced types equivalence is checked separately.
3812  */
3813 static bool btf_compat_fnproto(struct btf_type *t1, struct btf_type *t2)
3814 {
3815 	const struct btf_param *m1, *m2;
3816 	__u16 vlen;
3817 	int i;
3818 
3819 	/* skip return type ID */
3820 	if (t1->name_off != t2->name_off || t1->info != t2->info)
3821 		return false;
3822 
3823 	vlen = btf_vlen(t1);
3824 	m1 = btf_params(t1);
3825 	m2 = btf_params(t2);
3826 	for (i = 0; i < vlen; i++) {
3827 		if (m1->name_off != m2->name_off)
3828 			return false;
3829 		m1++;
3830 		m2++;
3831 	}
3832 	return true;
3833 }
3834 
3835 /* Prepare split BTF for deduplication by calculating hashes of base BTF's
3836  * types and initializing the rest of the state (canonical type mapping) for
3837  * the fixed base BTF part.
3838  */
3839 static int btf_dedup_prep(struct btf_dedup *d)
3840 {
3841 	struct btf_type *t;
3842 	int type_id;
3843 	long h;
3844 
3845 	if (!d->btf->base_btf)
3846 		return 0;
3847 
3848 	for (type_id = 1; type_id < d->btf->start_id; type_id++) {
3849 		t = btf_type_by_id(d->btf, type_id);
3850 
3851 		/* all base BTF types are self-canonical by definition */
3852 		d->map[type_id] = type_id;
3853 
3854 		switch (btf_kind(t)) {
3855 		case BTF_KIND_VAR:
3856 		case BTF_KIND_DATASEC:
3857 			/* VAR and DATASEC are never hash/deduplicated */
3858 			continue;
3859 		case BTF_KIND_CONST:
3860 		case BTF_KIND_VOLATILE:
3861 		case BTF_KIND_RESTRICT:
3862 		case BTF_KIND_PTR:
3863 		case BTF_KIND_FWD:
3864 		case BTF_KIND_TYPEDEF:
3865 		case BTF_KIND_FUNC:
3866 		case BTF_KIND_FLOAT:
3867 		case BTF_KIND_TYPE_TAG:
3868 			h = btf_hash_common(t);
3869 			break;
3870 		case BTF_KIND_INT:
3871 		case BTF_KIND_DECL_TAG:
3872 			h = btf_hash_int_decl_tag(t);
3873 			break;
3874 		case BTF_KIND_ENUM:
3875 		case BTF_KIND_ENUM64:
3876 			h = btf_hash_enum(t);
3877 			break;
3878 		case BTF_KIND_STRUCT:
3879 		case BTF_KIND_UNION:
3880 			h = btf_hash_struct(t);
3881 			break;
3882 		case BTF_KIND_ARRAY:
3883 			h = btf_hash_array(t);
3884 			break;
3885 		case BTF_KIND_FUNC_PROTO:
3886 			h = btf_hash_fnproto(t);
3887 			break;
3888 		default:
3889 			pr_debug("unknown kind %d for type [%d]\n", btf_kind(t), type_id);
3890 			return -EINVAL;
3891 		}
3892 		if (btf_dedup_table_add(d, h, type_id))
3893 			return -ENOMEM;
3894 	}
3895 
3896 	return 0;
3897 }
3898 
3899 /*
3900  * Deduplicate primitive types, that can't reference other types, by calculating
3901  * their type signature hash and comparing them with any possible canonical
3902  * candidate. If no canonical candidate matches, type itself is marked as
3903  * canonical and is added into `btf_dedup->dedup_table` as another candidate.
3904  */
3905 static int btf_dedup_prim_type(struct btf_dedup *d, __u32 type_id)
3906 {
3907 	struct btf_type *t = btf_type_by_id(d->btf, type_id);
3908 	struct hashmap_entry *hash_entry;
3909 	struct btf_type *cand;
3910 	/* if we don't find equivalent type, then we are canonical */
3911 	__u32 new_id = type_id;
3912 	__u32 cand_id;
3913 	long h;
3914 
3915 	switch (btf_kind(t)) {
3916 	case BTF_KIND_CONST:
3917 	case BTF_KIND_VOLATILE:
3918 	case BTF_KIND_RESTRICT:
3919 	case BTF_KIND_PTR:
3920 	case BTF_KIND_TYPEDEF:
3921 	case BTF_KIND_ARRAY:
3922 	case BTF_KIND_STRUCT:
3923 	case BTF_KIND_UNION:
3924 	case BTF_KIND_FUNC:
3925 	case BTF_KIND_FUNC_PROTO:
3926 	case BTF_KIND_VAR:
3927 	case BTF_KIND_DATASEC:
3928 	case BTF_KIND_DECL_TAG:
3929 	case BTF_KIND_TYPE_TAG:
3930 		return 0;
3931 
3932 	case BTF_KIND_INT:
3933 		h = btf_hash_int_decl_tag(t);
3934 		for_each_dedup_cand(d, hash_entry, h) {
3935 			cand_id = hash_entry->value;
3936 			cand = btf_type_by_id(d->btf, cand_id);
3937 			if (btf_equal_int_tag(t, cand)) {
3938 				new_id = cand_id;
3939 				break;
3940 			}
3941 		}
3942 		break;
3943 
3944 	case BTF_KIND_ENUM:
3945 	case BTF_KIND_ENUM64:
3946 		h = btf_hash_enum(t);
3947 		for_each_dedup_cand(d, hash_entry, h) {
3948 			cand_id = hash_entry->value;
3949 			cand = btf_type_by_id(d->btf, cand_id);
3950 			if (btf_equal_enum(t, cand)) {
3951 				new_id = cand_id;
3952 				break;
3953 			}
3954 			if (btf_compat_enum(t, cand)) {
3955 				if (btf_is_enum_fwd(t)) {
3956 					/* resolve fwd to full enum */
3957 					new_id = cand_id;
3958 					break;
3959 				}
3960 				/* resolve canonical enum fwd to full enum */
3961 				d->map[cand_id] = type_id;
3962 			}
3963 		}
3964 		break;
3965 
3966 	case BTF_KIND_FWD:
3967 	case BTF_KIND_FLOAT:
3968 		h = btf_hash_common(t);
3969 		for_each_dedup_cand(d, hash_entry, h) {
3970 			cand_id = hash_entry->value;
3971 			cand = btf_type_by_id(d->btf, cand_id);
3972 			if (btf_equal_common(t, cand)) {
3973 				new_id = cand_id;
3974 				break;
3975 			}
3976 		}
3977 		break;
3978 
3979 	default:
3980 		return -EINVAL;
3981 	}
3982 
3983 	d->map[type_id] = new_id;
3984 	if (type_id == new_id && btf_dedup_table_add(d, h, type_id))
3985 		return -ENOMEM;
3986 
3987 	return 0;
3988 }
3989 
3990 static int btf_dedup_prim_types(struct btf_dedup *d)
3991 {
3992 	int i, err;
3993 
3994 	for (i = 0; i < d->btf->nr_types; i++) {
3995 		err = btf_dedup_prim_type(d, d->btf->start_id + i);
3996 		if (err)
3997 			return err;
3998 	}
3999 	return 0;
4000 }
4001 
4002 /*
4003  * Check whether type is already mapped into canonical one (could be to itself).
4004  */
4005 static inline bool is_type_mapped(struct btf_dedup *d, uint32_t type_id)
4006 {
4007 	return d->map[type_id] <= BTF_MAX_NR_TYPES;
4008 }
4009 
4010 /*
4011  * Resolve type ID into its canonical type ID, if any; otherwise return original
4012  * type ID. If type is FWD and is resolved into STRUCT/UNION already, follow
4013  * STRUCT/UNION link and resolve it into canonical type ID as well.
4014  */
4015 static inline __u32 resolve_type_id(struct btf_dedup *d, __u32 type_id)
4016 {
4017 	while (is_type_mapped(d, type_id) && d->map[type_id] != type_id)
4018 		type_id = d->map[type_id];
4019 	return type_id;
4020 }
4021 
4022 /*
4023  * Resolve FWD to underlying STRUCT/UNION, if any; otherwise return original
4024  * type ID.
4025  */
4026 static uint32_t resolve_fwd_id(struct btf_dedup *d, uint32_t type_id)
4027 {
4028 	__u32 orig_type_id = type_id;
4029 
4030 	if (!btf_is_fwd(btf__type_by_id(d->btf, type_id)))
4031 		return type_id;
4032 
4033 	while (is_type_mapped(d, type_id) && d->map[type_id] != type_id)
4034 		type_id = d->map[type_id];
4035 
4036 	if (!btf_is_fwd(btf__type_by_id(d->btf, type_id)))
4037 		return type_id;
4038 
4039 	return orig_type_id;
4040 }
4041 
4042 
4043 static inline __u16 btf_fwd_kind(struct btf_type *t)
4044 {
4045 	return btf_kflag(t) ? BTF_KIND_UNION : BTF_KIND_STRUCT;
4046 }
4047 
4048 /* Check if given two types are identical ARRAY definitions */
4049 static bool btf_dedup_identical_arrays(struct btf_dedup *d, __u32 id1, __u32 id2)
4050 {
4051 	struct btf_type *t1, *t2;
4052 
4053 	t1 = btf_type_by_id(d->btf, id1);
4054 	t2 = btf_type_by_id(d->btf, id2);
4055 	if (!btf_is_array(t1) || !btf_is_array(t2))
4056 		return false;
4057 
4058 	return btf_equal_array(t1, t2);
4059 }
4060 
4061 /* Check if given two types are identical STRUCT/UNION definitions */
4062 static bool btf_dedup_identical_structs(struct btf_dedup *d, __u32 id1, __u32 id2)
4063 {
4064 	const struct btf_member *m1, *m2;
4065 	struct btf_type *t1, *t2;
4066 	int n, i;
4067 
4068 	t1 = btf_type_by_id(d->btf, id1);
4069 	t2 = btf_type_by_id(d->btf, id2);
4070 
4071 	if (!btf_is_composite(t1) || btf_kind(t1) != btf_kind(t2))
4072 		return false;
4073 
4074 	if (!btf_shallow_equal_struct(t1, t2))
4075 		return false;
4076 
4077 	m1 = btf_members(t1);
4078 	m2 = btf_members(t2);
4079 	for (i = 0, n = btf_vlen(t1); i < n; i++, m1++, m2++) {
4080 		if (m1->type != m2->type &&
4081 		    !btf_dedup_identical_arrays(d, m1->type, m2->type) &&
4082 		    !btf_dedup_identical_structs(d, m1->type, m2->type))
4083 			return false;
4084 	}
4085 	return true;
4086 }
4087 
4088 /*
4089  * Check equivalence of BTF type graph formed by candidate struct/union (we'll
4090  * call it "candidate graph" in this description for brevity) to a type graph
4091  * formed by (potential) canonical struct/union ("canonical graph" for brevity
4092  * here, though keep in mind that not all types in canonical graph are
4093  * necessarily canonical representatives themselves, some of them might be
4094  * duplicates or its uniqueness might not have been established yet).
4095  * Returns:
4096  *  - >0, if type graphs are equivalent;
4097  *  -  0, if not equivalent;
4098  *  - <0, on error.
4099  *
4100  * Algorithm performs side-by-side DFS traversal of both type graphs and checks
4101  * equivalence of BTF types at each step. If at any point BTF types in candidate
4102  * and canonical graphs are not compatible structurally, whole graphs are
4103  * incompatible. If types are structurally equivalent (i.e., all information
4104  * except referenced type IDs is exactly the same), a mapping from `canon_id` to
4105  * a `cand_id` is recored in hypothetical mapping (`btf_dedup->hypot_map`).
4106  * If a type references other types, then those referenced types are checked
4107  * for equivalence recursively.
4108  *
4109  * During DFS traversal, if we find that for current `canon_id` type we
4110  * already have some mapping in hypothetical map, we check for two possible
4111  * situations:
4112  *   - `canon_id` is mapped to exactly the same type as `cand_id`. This will
4113  *     happen when type graphs have cycles. In this case we assume those two
4114  *     types are equivalent.
4115  *   - `canon_id` is mapped to different type. This is contradiction in our
4116  *     hypothetical mapping, because same graph in canonical graph corresponds
4117  *     to two different types in candidate graph, which for equivalent type
4118  *     graphs shouldn't happen. This condition terminates equivalence check
4119  *     with negative result.
4120  *
4121  * If type graphs traversal exhausts types to check and find no contradiction,
4122  * then type graphs are equivalent.
4123  *
4124  * When checking types for equivalence, there is one special case: FWD types.
4125  * If FWD type resolution is allowed and one of the types (either from canonical
4126  * or candidate graph) is FWD and other is STRUCT/UNION (depending on FWD's kind
4127  * flag) and their names match, hypothetical mapping is updated to point from
4128  * FWD to STRUCT/UNION. If graphs will be determined as equivalent successfully,
4129  * this mapping will be used to record FWD -> STRUCT/UNION mapping permanently.
4130  *
4131  * Technically, this could lead to incorrect FWD to STRUCT/UNION resolution,
4132  * if there are two exactly named (or anonymous) structs/unions that are
4133  * compatible structurally, one of which has FWD field, while other is concrete
4134  * STRUCT/UNION, but according to C sources they are different structs/unions
4135  * that are referencing different types with the same name. This is extremely
4136  * unlikely to happen, but btf_dedup API allows to disable FWD resolution if
4137  * this logic is causing problems.
4138  *
4139  * Doing FWD resolution means that both candidate and/or canonical graphs can
4140  * consists of portions of the graph that come from multiple compilation units.
4141  * This is due to the fact that types within single compilation unit are always
4142  * deduplicated and FWDs are already resolved, if referenced struct/union
4143  * definiton is available. So, if we had unresolved FWD and found corresponding
4144  * STRUCT/UNION, they will be from different compilation units. This
4145  * consequently means that when we "link" FWD to corresponding STRUCT/UNION,
4146  * type graph will likely have at least two different BTF types that describe
4147  * same type (e.g., most probably there will be two different BTF types for the
4148  * same 'int' primitive type) and could even have "overlapping" parts of type
4149  * graph that describe same subset of types.
4150  *
4151  * This in turn means that our assumption that each type in canonical graph
4152  * must correspond to exactly one type in candidate graph might not hold
4153  * anymore and will make it harder to detect contradictions using hypothetical
4154  * map. To handle this problem, we allow to follow FWD -> STRUCT/UNION
4155  * resolution only in canonical graph. FWDs in candidate graphs are never
4156  * resolved. To see why it's OK, let's check all possible situations w.r.t. FWDs
4157  * that can occur:
4158  *   - Both types in canonical and candidate graphs are FWDs. If they are
4159  *     structurally equivalent, then they can either be both resolved to the
4160  *     same STRUCT/UNION or not resolved at all. In both cases they are
4161  *     equivalent and there is no need to resolve FWD on candidate side.
4162  *   - Both types in canonical and candidate graphs are concrete STRUCT/UNION,
4163  *     so nothing to resolve as well, algorithm will check equivalence anyway.
4164  *   - Type in canonical graph is FWD, while type in candidate is concrete
4165  *     STRUCT/UNION. In this case candidate graph comes from single compilation
4166  *     unit, so there is exactly one BTF type for each unique C type. After
4167  *     resolving FWD into STRUCT/UNION, there might be more than one BTF type
4168  *     in canonical graph mapping to single BTF type in candidate graph, but
4169  *     because hypothetical mapping maps from canonical to candidate types, it's
4170  *     alright, and we still maintain the property of having single `canon_id`
4171  *     mapping to single `cand_id` (there could be two different `canon_id`
4172  *     mapped to the same `cand_id`, but it's not contradictory).
4173  *   - Type in canonical graph is concrete STRUCT/UNION, while type in candidate
4174  *     graph is FWD. In this case we are just going to check compatibility of
4175  *     STRUCT/UNION and corresponding FWD, and if they are compatible, we'll
4176  *     assume that whatever STRUCT/UNION FWD resolves to must be equivalent to
4177  *     a concrete STRUCT/UNION from canonical graph. If the rest of type graphs
4178  *     turn out equivalent, we'll re-resolve FWD to concrete STRUCT/UNION from
4179  *     canonical graph.
4180  */
4181 static int btf_dedup_is_equiv(struct btf_dedup *d, __u32 cand_id,
4182 			      __u32 canon_id)
4183 {
4184 	struct btf_type *cand_type;
4185 	struct btf_type *canon_type;
4186 	__u32 hypot_type_id;
4187 	__u16 cand_kind;
4188 	__u16 canon_kind;
4189 	int i, eq;
4190 
4191 	/* if both resolve to the same canonical, they must be equivalent */
4192 	if (resolve_type_id(d, cand_id) == resolve_type_id(d, canon_id))
4193 		return 1;
4194 
4195 	canon_id = resolve_fwd_id(d, canon_id);
4196 
4197 	hypot_type_id = d->hypot_map[canon_id];
4198 	if (hypot_type_id <= BTF_MAX_NR_TYPES) {
4199 		if (hypot_type_id == cand_id)
4200 			return 1;
4201 		/* In some cases compiler will generate different DWARF types
4202 		 * for *identical* array type definitions and use them for
4203 		 * different fields within the *same* struct. This breaks type
4204 		 * equivalence check, which makes an assumption that candidate
4205 		 * types sub-graph has a consistent and deduped-by-compiler
4206 		 * types within a single CU. So work around that by explicitly
4207 		 * allowing identical array types here.
4208 		 */
4209 		if (btf_dedup_identical_arrays(d, hypot_type_id, cand_id))
4210 			return 1;
4211 		/* It turns out that similar situation can happen with
4212 		 * struct/union sometimes, sigh... Handle the case where
4213 		 * structs/unions are exactly the same, down to the referenced
4214 		 * type IDs. Anything more complicated (e.g., if referenced
4215 		 * types are different, but equivalent) is *way more*
4216 		 * complicated and requires a many-to-many equivalence mapping.
4217 		 */
4218 		if (btf_dedup_identical_structs(d, hypot_type_id, cand_id))
4219 			return 1;
4220 		return 0;
4221 	}
4222 
4223 	if (btf_dedup_hypot_map_add(d, canon_id, cand_id))
4224 		return -ENOMEM;
4225 
4226 	cand_type = btf_type_by_id(d->btf, cand_id);
4227 	canon_type = btf_type_by_id(d->btf, canon_id);
4228 	cand_kind = btf_kind(cand_type);
4229 	canon_kind = btf_kind(canon_type);
4230 
4231 	if (cand_type->name_off != canon_type->name_off)
4232 		return 0;
4233 
4234 	/* FWD <--> STRUCT/UNION equivalence check, if enabled */
4235 	if ((cand_kind == BTF_KIND_FWD || canon_kind == BTF_KIND_FWD)
4236 	    && cand_kind != canon_kind) {
4237 		__u16 real_kind;
4238 		__u16 fwd_kind;
4239 
4240 		if (cand_kind == BTF_KIND_FWD) {
4241 			real_kind = canon_kind;
4242 			fwd_kind = btf_fwd_kind(cand_type);
4243 		} else {
4244 			real_kind = cand_kind;
4245 			fwd_kind = btf_fwd_kind(canon_type);
4246 			/* we'd need to resolve base FWD to STRUCT/UNION */
4247 			if (fwd_kind == real_kind && canon_id < d->btf->start_id)
4248 				d->hypot_adjust_canon = true;
4249 		}
4250 		return fwd_kind == real_kind;
4251 	}
4252 
4253 	if (cand_kind != canon_kind)
4254 		return 0;
4255 
4256 	switch (cand_kind) {
4257 	case BTF_KIND_INT:
4258 		return btf_equal_int_tag(cand_type, canon_type);
4259 
4260 	case BTF_KIND_ENUM:
4261 	case BTF_KIND_ENUM64:
4262 		return btf_compat_enum(cand_type, canon_type);
4263 
4264 	case BTF_KIND_FWD:
4265 	case BTF_KIND_FLOAT:
4266 		return btf_equal_common(cand_type, canon_type);
4267 
4268 	case BTF_KIND_CONST:
4269 	case BTF_KIND_VOLATILE:
4270 	case BTF_KIND_RESTRICT:
4271 	case BTF_KIND_PTR:
4272 	case BTF_KIND_TYPEDEF:
4273 	case BTF_KIND_FUNC:
4274 	case BTF_KIND_TYPE_TAG:
4275 		if (cand_type->info != canon_type->info)
4276 			return 0;
4277 		return btf_dedup_is_equiv(d, cand_type->type, canon_type->type);
4278 
4279 	case BTF_KIND_ARRAY: {
4280 		const struct btf_array *cand_arr, *canon_arr;
4281 
4282 		if (!btf_compat_array(cand_type, canon_type))
4283 			return 0;
4284 		cand_arr = btf_array(cand_type);
4285 		canon_arr = btf_array(canon_type);
4286 		eq = btf_dedup_is_equiv(d, cand_arr->index_type, canon_arr->index_type);
4287 		if (eq <= 0)
4288 			return eq;
4289 		return btf_dedup_is_equiv(d, cand_arr->type, canon_arr->type);
4290 	}
4291 
4292 	case BTF_KIND_STRUCT:
4293 	case BTF_KIND_UNION: {
4294 		const struct btf_member *cand_m, *canon_m;
4295 		__u16 vlen;
4296 
4297 		if (!btf_shallow_equal_struct(cand_type, canon_type))
4298 			return 0;
4299 		vlen = btf_vlen(cand_type);
4300 		cand_m = btf_members(cand_type);
4301 		canon_m = btf_members(canon_type);
4302 		for (i = 0; i < vlen; i++) {
4303 			eq = btf_dedup_is_equiv(d, cand_m->type, canon_m->type);
4304 			if (eq <= 0)
4305 				return eq;
4306 			cand_m++;
4307 			canon_m++;
4308 		}
4309 
4310 		return 1;
4311 	}
4312 
4313 	case BTF_KIND_FUNC_PROTO: {
4314 		const struct btf_param *cand_p, *canon_p;
4315 		__u16 vlen;
4316 
4317 		if (!btf_compat_fnproto(cand_type, canon_type))
4318 			return 0;
4319 		eq = btf_dedup_is_equiv(d, cand_type->type, canon_type->type);
4320 		if (eq <= 0)
4321 			return eq;
4322 		vlen = btf_vlen(cand_type);
4323 		cand_p = btf_params(cand_type);
4324 		canon_p = btf_params(canon_type);
4325 		for (i = 0; i < vlen; i++) {
4326 			eq = btf_dedup_is_equiv(d, cand_p->type, canon_p->type);
4327 			if (eq <= 0)
4328 				return eq;
4329 			cand_p++;
4330 			canon_p++;
4331 		}
4332 		return 1;
4333 	}
4334 
4335 	default:
4336 		return -EINVAL;
4337 	}
4338 	return 0;
4339 }
4340 
4341 /*
4342  * Use hypothetical mapping, produced by successful type graph equivalence
4343  * check, to augment existing struct/union canonical mapping, where possible.
4344  *
4345  * If BTF_KIND_FWD resolution is allowed, this mapping is also used to record
4346  * FWD -> STRUCT/UNION correspondence as well. FWD resolution is bidirectional:
4347  * it doesn't matter if FWD type was part of canonical graph or candidate one,
4348  * we are recording the mapping anyway. As opposed to carefulness required
4349  * for struct/union correspondence mapping (described below), for FWD resolution
4350  * it's not important, as by the time that FWD type (reference type) will be
4351  * deduplicated all structs/unions will be deduped already anyway.
4352  *
4353  * Recording STRUCT/UNION mapping is purely a performance optimization and is
4354  * not required for correctness. It needs to be done carefully to ensure that
4355  * struct/union from candidate's type graph is not mapped into corresponding
4356  * struct/union from canonical type graph that itself hasn't been resolved into
4357  * canonical representative. The only guarantee we have is that canonical
4358  * struct/union was determined as canonical and that won't change. But any
4359  * types referenced through that struct/union fields could have been not yet
4360  * resolved, so in case like that it's too early to establish any kind of
4361  * correspondence between structs/unions.
4362  *
4363  * No canonical correspondence is derived for primitive types (they are already
4364  * deduplicated completely already anyway) or reference types (they rely on
4365  * stability of struct/union canonical relationship for equivalence checks).
4366  */
4367 static void btf_dedup_merge_hypot_map(struct btf_dedup *d)
4368 {
4369 	__u32 canon_type_id, targ_type_id;
4370 	__u16 t_kind, c_kind;
4371 	__u32 t_id, c_id;
4372 	int i;
4373 
4374 	for (i = 0; i < d->hypot_cnt; i++) {
4375 		canon_type_id = d->hypot_list[i];
4376 		targ_type_id = d->hypot_map[canon_type_id];
4377 		t_id = resolve_type_id(d, targ_type_id);
4378 		c_id = resolve_type_id(d, canon_type_id);
4379 		t_kind = btf_kind(btf__type_by_id(d->btf, t_id));
4380 		c_kind = btf_kind(btf__type_by_id(d->btf, c_id));
4381 		/*
4382 		 * Resolve FWD into STRUCT/UNION.
4383 		 * It's ok to resolve FWD into STRUCT/UNION that's not yet
4384 		 * mapped to canonical representative (as opposed to
4385 		 * STRUCT/UNION <--> STRUCT/UNION mapping logic below), because
4386 		 * eventually that struct is going to be mapped and all resolved
4387 		 * FWDs will automatically resolve to correct canonical
4388 		 * representative. This will happen before ref type deduping,
4389 		 * which critically depends on stability of these mapping. This
4390 		 * stability is not a requirement for STRUCT/UNION equivalence
4391 		 * checks, though.
4392 		 */
4393 
4394 		/* if it's the split BTF case, we still need to point base FWD
4395 		 * to STRUCT/UNION in a split BTF, because FWDs from split BTF
4396 		 * will be resolved against base FWD. If we don't point base
4397 		 * canonical FWD to the resolved STRUCT/UNION, then all the
4398 		 * FWDs in split BTF won't be correctly resolved to a proper
4399 		 * STRUCT/UNION.
4400 		 */
4401 		if (t_kind != BTF_KIND_FWD && c_kind == BTF_KIND_FWD)
4402 			d->map[c_id] = t_id;
4403 
4404 		/* if graph equivalence determined that we'd need to adjust
4405 		 * base canonical types, then we need to only point base FWDs
4406 		 * to STRUCTs/UNIONs and do no more modifications. For all
4407 		 * other purposes the type graphs were not equivalent.
4408 		 */
4409 		if (d->hypot_adjust_canon)
4410 			continue;
4411 
4412 		if (t_kind == BTF_KIND_FWD && c_kind != BTF_KIND_FWD)
4413 			d->map[t_id] = c_id;
4414 
4415 		if ((t_kind == BTF_KIND_STRUCT || t_kind == BTF_KIND_UNION) &&
4416 		    c_kind != BTF_KIND_FWD &&
4417 		    is_type_mapped(d, c_id) &&
4418 		    !is_type_mapped(d, t_id)) {
4419 			/*
4420 			 * as a perf optimization, we can map struct/union
4421 			 * that's part of type graph we just verified for
4422 			 * equivalence. We can do that for struct/union that has
4423 			 * canonical representative only, though.
4424 			 */
4425 			d->map[t_id] = c_id;
4426 		}
4427 	}
4428 }
4429 
4430 /*
4431  * Deduplicate struct/union types.
4432  *
4433  * For each struct/union type its type signature hash is calculated, taking
4434  * into account type's name, size, number, order and names of fields, but
4435  * ignoring type ID's referenced from fields, because they might not be deduped
4436  * completely until after reference types deduplication phase. This type hash
4437  * is used to iterate over all potential canonical types, sharing same hash.
4438  * For each canonical candidate we check whether type graphs that they form
4439  * (through referenced types in fields and so on) are equivalent using algorithm
4440  * implemented in `btf_dedup_is_equiv`. If such equivalence is found and
4441  * BTF_KIND_FWD resolution is allowed, then hypothetical mapping
4442  * (btf_dedup->hypot_map) produced by aforementioned type graph equivalence
4443  * algorithm is used to record FWD -> STRUCT/UNION mapping. It's also used to
4444  * potentially map other structs/unions to their canonical representatives,
4445  * if such relationship hasn't yet been established. This speeds up algorithm
4446  * by eliminating some of the duplicate work.
4447  *
4448  * If no matching canonical representative was found, struct/union is marked
4449  * as canonical for itself and is added into btf_dedup->dedup_table hash map
4450  * for further look ups.
4451  */
4452 static int btf_dedup_struct_type(struct btf_dedup *d, __u32 type_id)
4453 {
4454 	struct btf_type *cand_type, *t;
4455 	struct hashmap_entry *hash_entry;
4456 	/* if we don't find equivalent type, then we are canonical */
4457 	__u32 new_id = type_id;
4458 	__u16 kind;
4459 	long h;
4460 
4461 	/* already deduped or is in process of deduping (loop detected) */
4462 	if (d->map[type_id] <= BTF_MAX_NR_TYPES)
4463 		return 0;
4464 
4465 	t = btf_type_by_id(d->btf, type_id);
4466 	kind = btf_kind(t);
4467 
4468 	if (kind != BTF_KIND_STRUCT && kind != BTF_KIND_UNION)
4469 		return 0;
4470 
4471 	h = btf_hash_struct(t);
4472 	for_each_dedup_cand(d, hash_entry, h) {
4473 		__u32 cand_id = hash_entry->value;
4474 		int eq;
4475 
4476 		/*
4477 		 * Even though btf_dedup_is_equiv() checks for
4478 		 * btf_shallow_equal_struct() internally when checking two
4479 		 * structs (unions) for equivalence, we need to guard here
4480 		 * from picking matching FWD type as a dedup candidate.
4481 		 * This can happen due to hash collision. In such case just
4482 		 * relying on btf_dedup_is_equiv() would lead to potentially
4483 		 * creating a loop (FWD -> STRUCT and STRUCT -> FWD), because
4484 		 * FWD and compatible STRUCT/UNION are considered equivalent.
4485 		 */
4486 		cand_type = btf_type_by_id(d->btf, cand_id);
4487 		if (!btf_shallow_equal_struct(t, cand_type))
4488 			continue;
4489 
4490 		btf_dedup_clear_hypot_map(d);
4491 		eq = btf_dedup_is_equiv(d, type_id, cand_id);
4492 		if (eq < 0)
4493 			return eq;
4494 		if (!eq)
4495 			continue;
4496 		btf_dedup_merge_hypot_map(d);
4497 		if (d->hypot_adjust_canon) /* not really equivalent */
4498 			continue;
4499 		new_id = cand_id;
4500 		break;
4501 	}
4502 
4503 	d->map[type_id] = new_id;
4504 	if (type_id == new_id && btf_dedup_table_add(d, h, type_id))
4505 		return -ENOMEM;
4506 
4507 	return 0;
4508 }
4509 
4510 static int btf_dedup_struct_types(struct btf_dedup *d)
4511 {
4512 	int i, err;
4513 
4514 	for (i = 0; i < d->btf->nr_types; i++) {
4515 		err = btf_dedup_struct_type(d, d->btf->start_id + i);
4516 		if (err)
4517 			return err;
4518 	}
4519 	return 0;
4520 }
4521 
4522 /*
4523  * Deduplicate reference type.
4524  *
4525  * Once all primitive and struct/union types got deduplicated, we can easily
4526  * deduplicate all other (reference) BTF types. This is done in two steps:
4527  *
4528  * 1. Resolve all referenced type IDs into their canonical type IDs. This
4529  * resolution can be done either immediately for primitive or struct/union types
4530  * (because they were deduped in previous two phases) or recursively for
4531  * reference types. Recursion will always terminate at either primitive or
4532  * struct/union type, at which point we can "unwind" chain of reference types
4533  * one by one. There is no danger of encountering cycles because in C type
4534  * system the only way to form type cycle is through struct/union, so any chain
4535  * of reference types, even those taking part in a type cycle, will inevitably
4536  * reach struct/union at some point.
4537  *
4538  * 2. Once all referenced type IDs are resolved into canonical ones, BTF type
4539  * becomes "stable", in the sense that no further deduplication will cause
4540  * any changes to it. With that, it's now possible to calculate type's signature
4541  * hash (this time taking into account referenced type IDs) and loop over all
4542  * potential canonical representatives. If no match was found, current type
4543  * will become canonical representative of itself and will be added into
4544  * btf_dedup->dedup_table as another possible canonical representative.
4545  */
4546 static int btf_dedup_ref_type(struct btf_dedup *d, __u32 type_id)
4547 {
4548 	struct hashmap_entry *hash_entry;
4549 	__u32 new_id = type_id, cand_id;
4550 	struct btf_type *t, *cand;
4551 	/* if we don't find equivalent type, then we are representative type */
4552 	int ref_type_id;
4553 	long h;
4554 
4555 	if (d->map[type_id] == BTF_IN_PROGRESS_ID)
4556 		return -ELOOP;
4557 	if (d->map[type_id] <= BTF_MAX_NR_TYPES)
4558 		return resolve_type_id(d, type_id);
4559 
4560 	t = btf_type_by_id(d->btf, type_id);
4561 	d->map[type_id] = BTF_IN_PROGRESS_ID;
4562 
4563 	switch (btf_kind(t)) {
4564 	case BTF_KIND_CONST:
4565 	case BTF_KIND_VOLATILE:
4566 	case BTF_KIND_RESTRICT:
4567 	case BTF_KIND_PTR:
4568 	case BTF_KIND_TYPEDEF:
4569 	case BTF_KIND_FUNC:
4570 	case BTF_KIND_TYPE_TAG:
4571 		ref_type_id = btf_dedup_ref_type(d, t->type);
4572 		if (ref_type_id < 0)
4573 			return ref_type_id;
4574 		t->type = ref_type_id;
4575 
4576 		h = btf_hash_common(t);
4577 		for_each_dedup_cand(d, hash_entry, h) {
4578 			cand_id = hash_entry->value;
4579 			cand = btf_type_by_id(d->btf, cand_id);
4580 			if (btf_equal_common(t, cand)) {
4581 				new_id = cand_id;
4582 				break;
4583 			}
4584 		}
4585 		break;
4586 
4587 	case BTF_KIND_DECL_TAG:
4588 		ref_type_id = btf_dedup_ref_type(d, t->type);
4589 		if (ref_type_id < 0)
4590 			return ref_type_id;
4591 		t->type = ref_type_id;
4592 
4593 		h = btf_hash_int_decl_tag(t);
4594 		for_each_dedup_cand(d, hash_entry, h) {
4595 			cand_id = hash_entry->value;
4596 			cand = btf_type_by_id(d->btf, cand_id);
4597 			if (btf_equal_int_tag(t, cand)) {
4598 				new_id = cand_id;
4599 				break;
4600 			}
4601 		}
4602 		break;
4603 
4604 	case BTF_KIND_ARRAY: {
4605 		struct btf_array *info = btf_array(t);
4606 
4607 		ref_type_id = btf_dedup_ref_type(d, info->type);
4608 		if (ref_type_id < 0)
4609 			return ref_type_id;
4610 		info->type = ref_type_id;
4611 
4612 		ref_type_id = btf_dedup_ref_type(d, info->index_type);
4613 		if (ref_type_id < 0)
4614 			return ref_type_id;
4615 		info->index_type = ref_type_id;
4616 
4617 		h = btf_hash_array(t);
4618 		for_each_dedup_cand(d, hash_entry, h) {
4619 			cand_id = hash_entry->value;
4620 			cand = btf_type_by_id(d->btf, cand_id);
4621 			if (btf_equal_array(t, cand)) {
4622 				new_id = cand_id;
4623 				break;
4624 			}
4625 		}
4626 		break;
4627 	}
4628 
4629 	case BTF_KIND_FUNC_PROTO: {
4630 		struct btf_param *param;
4631 		__u16 vlen;
4632 		int i;
4633 
4634 		ref_type_id = btf_dedup_ref_type(d, t->type);
4635 		if (ref_type_id < 0)
4636 			return ref_type_id;
4637 		t->type = ref_type_id;
4638 
4639 		vlen = btf_vlen(t);
4640 		param = btf_params(t);
4641 		for (i = 0; i < vlen; i++) {
4642 			ref_type_id = btf_dedup_ref_type(d, param->type);
4643 			if (ref_type_id < 0)
4644 				return ref_type_id;
4645 			param->type = ref_type_id;
4646 			param++;
4647 		}
4648 
4649 		h = btf_hash_fnproto(t);
4650 		for_each_dedup_cand(d, hash_entry, h) {
4651 			cand_id = hash_entry->value;
4652 			cand = btf_type_by_id(d->btf, cand_id);
4653 			if (btf_equal_fnproto(t, cand)) {
4654 				new_id = cand_id;
4655 				break;
4656 			}
4657 		}
4658 		break;
4659 	}
4660 
4661 	default:
4662 		return -EINVAL;
4663 	}
4664 
4665 	d->map[type_id] = new_id;
4666 	if (type_id == new_id && btf_dedup_table_add(d, h, type_id))
4667 		return -ENOMEM;
4668 
4669 	return new_id;
4670 }
4671 
4672 static int btf_dedup_ref_types(struct btf_dedup *d)
4673 {
4674 	int i, err;
4675 
4676 	for (i = 0; i < d->btf->nr_types; i++) {
4677 		err = btf_dedup_ref_type(d, d->btf->start_id + i);
4678 		if (err < 0)
4679 			return err;
4680 	}
4681 	/* we won't need d->dedup_table anymore */
4682 	hashmap__free(d->dedup_table);
4683 	d->dedup_table = NULL;
4684 	return 0;
4685 }
4686 
4687 /*
4688  * Collect a map from type names to type ids for all canonical structs
4689  * and unions. If the same name is shared by several canonical types
4690  * use a special value 0 to indicate this fact.
4691  */
4692 static int btf_dedup_fill_unique_names_map(struct btf_dedup *d, struct hashmap *names_map)
4693 {
4694 	__u32 nr_types = btf__type_cnt(d->btf);
4695 	struct btf_type *t;
4696 	__u32 type_id;
4697 	__u16 kind;
4698 	int err;
4699 
4700 	/*
4701 	 * Iterate over base and split module ids in order to get all
4702 	 * available structs in the map.
4703 	 */
4704 	for (type_id = 1; type_id < nr_types; ++type_id) {
4705 		t = btf_type_by_id(d->btf, type_id);
4706 		kind = btf_kind(t);
4707 
4708 		if (kind != BTF_KIND_STRUCT && kind != BTF_KIND_UNION)
4709 			continue;
4710 
4711 		/* Skip non-canonical types */
4712 		if (type_id != d->map[type_id])
4713 			continue;
4714 
4715 		err = hashmap__add(names_map, t->name_off, type_id);
4716 		if (err == -EEXIST)
4717 			err = hashmap__set(names_map, t->name_off, 0, NULL, NULL);
4718 
4719 		if (err)
4720 			return err;
4721 	}
4722 
4723 	return 0;
4724 }
4725 
4726 static int btf_dedup_resolve_fwd(struct btf_dedup *d, struct hashmap *names_map, __u32 type_id)
4727 {
4728 	struct btf_type *t = btf_type_by_id(d->btf, type_id);
4729 	enum btf_fwd_kind fwd_kind = btf_kflag(t);
4730 	__u16 cand_kind, kind = btf_kind(t);
4731 	struct btf_type *cand_t;
4732 	uintptr_t cand_id;
4733 
4734 	if (kind != BTF_KIND_FWD)
4735 		return 0;
4736 
4737 	/* Skip if this FWD already has a mapping */
4738 	if (type_id != d->map[type_id])
4739 		return 0;
4740 
4741 	if (!hashmap__find(names_map, t->name_off, &cand_id))
4742 		return 0;
4743 
4744 	/* Zero is a special value indicating that name is not unique */
4745 	if (!cand_id)
4746 		return 0;
4747 
4748 	cand_t = btf_type_by_id(d->btf, cand_id);
4749 	cand_kind = btf_kind(cand_t);
4750 	if ((cand_kind == BTF_KIND_STRUCT && fwd_kind != BTF_FWD_STRUCT) ||
4751 	    (cand_kind == BTF_KIND_UNION && fwd_kind != BTF_FWD_UNION))
4752 		return 0;
4753 
4754 	d->map[type_id] = cand_id;
4755 
4756 	return 0;
4757 }
4758 
4759 /*
4760  * Resolve unambiguous forward declarations.
4761  *
4762  * The lion's share of all FWD declarations is resolved during
4763  * `btf_dedup_struct_types` phase when different type graphs are
4764  * compared against each other. However, if in some compilation unit a
4765  * FWD declaration is not a part of a type graph compared against
4766  * another type graph that declaration's canonical type would not be
4767  * changed. Example:
4768  *
4769  * CU #1:
4770  *
4771  * struct foo;
4772  * struct foo *some_global;
4773  *
4774  * CU #2:
4775  *
4776  * struct foo { int u; };
4777  * struct foo *another_global;
4778  *
4779  * After `btf_dedup_struct_types` the BTF looks as follows:
4780  *
4781  * [1] STRUCT 'foo' size=4 vlen=1 ...
4782  * [2] INT 'int' size=4 ...
4783  * [3] PTR '(anon)' type_id=1
4784  * [4] FWD 'foo' fwd_kind=struct
4785  * [5] PTR '(anon)' type_id=4
4786  *
4787  * This pass assumes that such FWD declarations should be mapped to
4788  * structs or unions with identical name in case if the name is not
4789  * ambiguous.
4790  */
4791 static int btf_dedup_resolve_fwds(struct btf_dedup *d)
4792 {
4793 	int i, err;
4794 	struct hashmap *names_map;
4795 
4796 	names_map = hashmap__new(btf_dedup_identity_hash_fn, btf_dedup_equal_fn, NULL);
4797 	if (IS_ERR(names_map))
4798 		return PTR_ERR(names_map);
4799 
4800 	err = btf_dedup_fill_unique_names_map(d, names_map);
4801 	if (err < 0)
4802 		goto exit;
4803 
4804 	for (i = 0; i < d->btf->nr_types; i++) {
4805 		err = btf_dedup_resolve_fwd(d, names_map, d->btf->start_id + i);
4806 		if (err < 0)
4807 			break;
4808 	}
4809 
4810 exit:
4811 	hashmap__free(names_map);
4812 	return err;
4813 }
4814 
4815 /*
4816  * Compact types.
4817  *
4818  * After we established for each type its corresponding canonical representative
4819  * type, we now can eliminate types that are not canonical and leave only
4820  * canonical ones layed out sequentially in memory by copying them over
4821  * duplicates. During compaction btf_dedup->hypot_map array is reused to store
4822  * a map from original type ID to a new compacted type ID, which will be used
4823  * during next phase to "fix up" type IDs, referenced from struct/union and
4824  * reference types.
4825  */
4826 static int btf_dedup_compact_types(struct btf_dedup *d)
4827 {
4828 	__u32 *new_offs;
4829 	__u32 next_type_id = d->btf->start_id;
4830 	const struct btf_type *t;
4831 	void *p;
4832 	int i, id, len;
4833 
4834 	/* we are going to reuse hypot_map to store compaction remapping */
4835 	d->hypot_map[0] = 0;
4836 	/* base BTF types are not renumbered */
4837 	for (id = 1; id < d->btf->start_id; id++)
4838 		d->hypot_map[id] = id;
4839 	for (i = 0, id = d->btf->start_id; i < d->btf->nr_types; i++, id++)
4840 		d->hypot_map[id] = BTF_UNPROCESSED_ID;
4841 
4842 	p = d->btf->types_data;
4843 
4844 	for (i = 0, id = d->btf->start_id; i < d->btf->nr_types; i++, id++) {
4845 		if (d->map[id] != id)
4846 			continue;
4847 
4848 		t = btf__type_by_id(d->btf, id);
4849 		len = btf_type_size(t);
4850 		if (len < 0)
4851 			return len;
4852 
4853 		memmove(p, t, len);
4854 		d->hypot_map[id] = next_type_id;
4855 		d->btf->type_offs[next_type_id - d->btf->start_id] = p - d->btf->types_data;
4856 		p += len;
4857 		next_type_id++;
4858 	}
4859 
4860 	/* shrink struct btf's internal types index and update btf_header */
4861 	d->btf->nr_types = next_type_id - d->btf->start_id;
4862 	d->btf->type_offs_cap = d->btf->nr_types;
4863 	d->btf->hdr->type_len = p - d->btf->types_data;
4864 	new_offs = libbpf_reallocarray(d->btf->type_offs, d->btf->type_offs_cap,
4865 				       sizeof(*new_offs));
4866 	if (d->btf->type_offs_cap && !new_offs)
4867 		return -ENOMEM;
4868 	d->btf->type_offs = new_offs;
4869 	d->btf->hdr->str_off = d->btf->hdr->type_len;
4870 	d->btf->raw_size = d->btf->hdr->hdr_len + d->btf->hdr->type_len + d->btf->hdr->str_len;
4871 	return 0;
4872 }
4873 
4874 /*
4875  * Figure out final (deduplicated and compacted) type ID for provided original
4876  * `type_id` by first resolving it into corresponding canonical type ID and
4877  * then mapping it to a deduplicated type ID, stored in btf_dedup->hypot_map,
4878  * which is populated during compaction phase.
4879  */
4880 static int btf_dedup_remap_type_id(__u32 *type_id, void *ctx)
4881 {
4882 	struct btf_dedup *d = ctx;
4883 	__u32 resolved_type_id, new_type_id;
4884 
4885 	resolved_type_id = resolve_type_id(d, *type_id);
4886 	new_type_id = d->hypot_map[resolved_type_id];
4887 	if (new_type_id > BTF_MAX_NR_TYPES)
4888 		return -EINVAL;
4889 
4890 	*type_id = new_type_id;
4891 	return 0;
4892 }
4893 
4894 /*
4895  * Remap referenced type IDs into deduped type IDs.
4896  *
4897  * After BTF types are deduplicated and compacted, their final type IDs may
4898  * differ from original ones. The map from original to a corresponding
4899  * deduped type ID is stored in btf_dedup->hypot_map and is populated during
4900  * compaction phase. During remapping phase we are rewriting all type IDs
4901  * referenced from any BTF type (e.g., struct fields, func proto args, etc) to
4902  * their final deduped type IDs.
4903  */
4904 static int btf_dedup_remap_types(struct btf_dedup *d)
4905 {
4906 	int i, r;
4907 
4908 	for (i = 0; i < d->btf->nr_types; i++) {
4909 		struct btf_type *t = btf_type_by_id(d->btf, d->btf->start_id + i);
4910 
4911 		r = btf_type_visit_type_ids(t, btf_dedup_remap_type_id, d);
4912 		if (r)
4913 			return r;
4914 	}
4915 
4916 	if (!d->btf_ext)
4917 		return 0;
4918 
4919 	r = btf_ext_visit_type_ids(d->btf_ext, btf_dedup_remap_type_id, d);
4920 	if (r)
4921 		return r;
4922 
4923 	return 0;
4924 }
4925 
4926 /*
4927  * Probe few well-known locations for vmlinux kernel image and try to load BTF
4928  * data out of it to use for target BTF.
4929  */
4930 struct btf *btf__load_vmlinux_btf(void)
4931 {
4932 	const char *locations[] = {
4933 		/* try canonical vmlinux BTF through sysfs first */
4934 		"/sys/kernel/btf/vmlinux",
4935 		/* fall back to trying to find vmlinux on disk otherwise */
4936 		"/boot/vmlinux-%1$s",
4937 		"/lib/modules/%1$s/vmlinux-%1$s",
4938 		"/lib/modules/%1$s/build/vmlinux",
4939 		"/usr/lib/modules/%1$s/kernel/vmlinux",
4940 		"/usr/lib/debug/boot/vmlinux-%1$s",
4941 		"/usr/lib/debug/boot/vmlinux-%1$s.debug",
4942 		"/usr/lib/debug/lib/modules/%1$s/vmlinux",
4943 	};
4944 	char path[PATH_MAX + 1];
4945 	struct utsname buf;
4946 	struct btf *btf;
4947 	int i, err;
4948 
4949 	uname(&buf);
4950 
4951 	for (i = 0; i < ARRAY_SIZE(locations); i++) {
4952 		snprintf(path, PATH_MAX, locations[i], buf.release);
4953 
4954 		if (faccessat(AT_FDCWD, path, R_OK, AT_EACCESS))
4955 			continue;
4956 
4957 		btf = btf__parse(path, NULL);
4958 		err = libbpf_get_error(btf);
4959 		pr_debug("loading kernel BTF '%s': %d\n", path, err);
4960 		if (err)
4961 			continue;
4962 
4963 		return btf;
4964 	}
4965 
4966 	pr_warn("failed to find valid kernel BTF\n");
4967 	return libbpf_err_ptr(-ESRCH);
4968 }
4969 
4970 struct btf *libbpf_find_kernel_btf(void) __attribute__((alias("btf__load_vmlinux_btf")));
4971 
4972 struct btf *btf__load_module_btf(const char *module_name, struct btf *vmlinux_btf)
4973 {
4974 	char path[80];
4975 
4976 	snprintf(path, sizeof(path), "/sys/kernel/btf/%s", module_name);
4977 	return btf__parse_split(path, vmlinux_btf);
4978 }
4979 
4980 int btf_type_visit_type_ids(struct btf_type *t, type_id_visit_fn visit, void *ctx)
4981 {
4982 	int i, n, err;
4983 
4984 	switch (btf_kind(t)) {
4985 	case BTF_KIND_INT:
4986 	case BTF_KIND_FLOAT:
4987 	case BTF_KIND_ENUM:
4988 	case BTF_KIND_ENUM64:
4989 		return 0;
4990 
4991 	case BTF_KIND_FWD:
4992 	case BTF_KIND_CONST:
4993 	case BTF_KIND_VOLATILE:
4994 	case BTF_KIND_RESTRICT:
4995 	case BTF_KIND_PTR:
4996 	case BTF_KIND_TYPEDEF:
4997 	case BTF_KIND_FUNC:
4998 	case BTF_KIND_VAR:
4999 	case BTF_KIND_DECL_TAG:
5000 	case BTF_KIND_TYPE_TAG:
5001 		return visit(&t->type, ctx);
5002 
5003 	case BTF_KIND_ARRAY: {
5004 		struct btf_array *a = btf_array(t);
5005 
5006 		err = visit(&a->type, ctx);
5007 		err = err ?: visit(&a->index_type, ctx);
5008 		return err;
5009 	}
5010 
5011 	case BTF_KIND_STRUCT:
5012 	case BTF_KIND_UNION: {
5013 		struct btf_member *m = btf_members(t);
5014 
5015 		for (i = 0, n = btf_vlen(t); i < n; i++, m++) {
5016 			err = visit(&m->type, ctx);
5017 			if (err)
5018 				return err;
5019 		}
5020 		return 0;
5021 	}
5022 
5023 	case BTF_KIND_FUNC_PROTO: {
5024 		struct btf_param *m = btf_params(t);
5025 
5026 		err = visit(&t->type, ctx);
5027 		if (err)
5028 			return err;
5029 		for (i = 0, n = btf_vlen(t); i < n; i++, m++) {
5030 			err = visit(&m->type, ctx);
5031 			if (err)
5032 				return err;
5033 		}
5034 		return 0;
5035 	}
5036 
5037 	case BTF_KIND_DATASEC: {
5038 		struct btf_var_secinfo *m = btf_var_secinfos(t);
5039 
5040 		for (i = 0, n = btf_vlen(t); i < n; i++, m++) {
5041 			err = visit(&m->type, ctx);
5042 			if (err)
5043 				return err;
5044 		}
5045 		return 0;
5046 	}
5047 
5048 	default:
5049 		return -EINVAL;
5050 	}
5051 }
5052 
5053 int btf_type_visit_str_offs(struct btf_type *t, str_off_visit_fn visit, void *ctx)
5054 {
5055 	int i, n, err;
5056 
5057 	err = visit(&t->name_off, ctx);
5058 	if (err)
5059 		return err;
5060 
5061 	switch (btf_kind(t)) {
5062 	case BTF_KIND_STRUCT:
5063 	case BTF_KIND_UNION: {
5064 		struct btf_member *m = btf_members(t);
5065 
5066 		for (i = 0, n = btf_vlen(t); i < n; i++, m++) {
5067 			err = visit(&m->name_off, ctx);
5068 			if (err)
5069 				return err;
5070 		}
5071 		break;
5072 	}
5073 	case BTF_KIND_ENUM: {
5074 		struct btf_enum *m = btf_enum(t);
5075 
5076 		for (i = 0, n = btf_vlen(t); i < n; i++, m++) {
5077 			err = visit(&m->name_off, ctx);
5078 			if (err)
5079 				return err;
5080 		}
5081 		break;
5082 	}
5083 	case BTF_KIND_ENUM64: {
5084 		struct btf_enum64 *m = btf_enum64(t);
5085 
5086 		for (i = 0, n = btf_vlen(t); i < n; i++, m++) {
5087 			err = visit(&m->name_off, ctx);
5088 			if (err)
5089 				return err;
5090 		}
5091 		break;
5092 	}
5093 	case BTF_KIND_FUNC_PROTO: {
5094 		struct btf_param *m = btf_params(t);
5095 
5096 		for (i = 0, n = btf_vlen(t); i < n; i++, m++) {
5097 			err = visit(&m->name_off, ctx);
5098 			if (err)
5099 				return err;
5100 		}
5101 		break;
5102 	}
5103 	default:
5104 		break;
5105 	}
5106 
5107 	return 0;
5108 }
5109 
5110 int btf_ext_visit_type_ids(struct btf_ext *btf_ext, type_id_visit_fn visit, void *ctx)
5111 {
5112 	const struct btf_ext_info *seg;
5113 	struct btf_ext_info_sec *sec;
5114 	int i, err;
5115 
5116 	seg = &btf_ext->func_info;
5117 	for_each_btf_ext_sec(seg, sec) {
5118 		struct bpf_func_info_min *rec;
5119 
5120 		for_each_btf_ext_rec(seg, sec, i, rec) {
5121 			err = visit(&rec->type_id, ctx);
5122 			if (err < 0)
5123 				return err;
5124 		}
5125 	}
5126 
5127 	seg = &btf_ext->core_relo_info;
5128 	for_each_btf_ext_sec(seg, sec) {
5129 		struct bpf_core_relo *rec;
5130 
5131 		for_each_btf_ext_rec(seg, sec, i, rec) {
5132 			err = visit(&rec->type_id, ctx);
5133 			if (err < 0)
5134 				return err;
5135 		}
5136 	}
5137 
5138 	return 0;
5139 }
5140 
5141 int btf_ext_visit_str_offs(struct btf_ext *btf_ext, str_off_visit_fn visit, void *ctx)
5142 {
5143 	const struct btf_ext_info *seg;
5144 	struct btf_ext_info_sec *sec;
5145 	int i, err;
5146 
5147 	seg = &btf_ext->func_info;
5148 	for_each_btf_ext_sec(seg, sec) {
5149 		err = visit(&sec->sec_name_off, ctx);
5150 		if (err)
5151 			return err;
5152 	}
5153 
5154 	seg = &btf_ext->line_info;
5155 	for_each_btf_ext_sec(seg, sec) {
5156 		struct bpf_line_info_min *rec;
5157 
5158 		err = visit(&sec->sec_name_off, ctx);
5159 		if (err)
5160 			return err;
5161 
5162 		for_each_btf_ext_rec(seg, sec, i, rec) {
5163 			err = visit(&rec->file_name_off, ctx);
5164 			if (err)
5165 				return err;
5166 			err = visit(&rec->line_off, ctx);
5167 			if (err)
5168 				return err;
5169 		}
5170 	}
5171 
5172 	seg = &btf_ext->core_relo_info;
5173 	for_each_btf_ext_sec(seg, sec) {
5174 		struct bpf_core_relo *rec;
5175 
5176 		err = visit(&sec->sec_name_off, ctx);
5177 		if (err)
5178 			return err;
5179 
5180 		for_each_btf_ext_rec(seg, sec, i, rec) {
5181 			err = visit(&rec->access_str_off, ctx);
5182 			if (err)
5183 				return err;
5184 		}
5185 	}
5186 
5187 	return 0;
5188 }
5189