xref: /linux/tools/lib/bpf/btf.c (revision bdce82e960d1205d118662f575cec39379984e34)
1 // SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
2 /* Copyright (c) 2018 Facebook */
3 
4 #include <byteswap.h>
5 #include <endian.h>
6 #include <stdio.h>
7 #include <stdlib.h>
8 #include <string.h>
9 #include <fcntl.h>
10 #include <unistd.h>
11 #include <errno.h>
12 #include <sys/utsname.h>
13 #include <sys/param.h>
14 #include <sys/stat.h>
15 #include <linux/kernel.h>
16 #include <linux/err.h>
17 #include <linux/btf.h>
18 #include <gelf.h>
19 #include "btf.h"
20 #include "bpf.h"
21 #include "libbpf.h"
22 #include "libbpf_internal.h"
23 #include "hashmap.h"
24 #include "strset.h"
25 
26 #define BTF_MAX_NR_TYPES 0x7fffffffU
27 #define BTF_MAX_STR_OFFSET 0x7fffffffU
28 
29 static struct btf_type btf_void;
30 
31 struct btf {
32 	/* raw BTF data in native endianness */
33 	void *raw_data;
34 	/* raw BTF data in non-native endianness */
35 	void *raw_data_swapped;
36 	__u32 raw_size;
37 	/* whether target endianness differs from the native one */
38 	bool swapped_endian;
39 
40 	/*
41 	 * When BTF is loaded from an ELF or raw memory it is stored
42 	 * in a contiguous memory block. The hdr, type_data, and, strs_data
43 	 * point inside that memory region to their respective parts of BTF
44 	 * representation:
45 	 *
46 	 * +--------------------------------+
47 	 * |  Header  |  Types  |  Strings  |
48 	 * +--------------------------------+
49 	 * ^          ^         ^
50 	 * |          |         |
51 	 * hdr        |         |
52 	 * types_data-+         |
53 	 * strs_data------------+
54 	 *
55 	 * If BTF data is later modified, e.g., due to types added or
56 	 * removed, BTF deduplication performed, etc, this contiguous
57 	 * representation is broken up into three independently allocated
58 	 * memory regions to be able to modify them independently.
59 	 * raw_data is nulled out at that point, but can be later allocated
60 	 * and cached again if user calls btf__raw_data(), at which point
61 	 * raw_data will contain a contiguous copy of header, types, and
62 	 * strings:
63 	 *
64 	 * +----------+  +---------+  +-----------+
65 	 * |  Header  |  |  Types  |  |  Strings  |
66 	 * +----------+  +---------+  +-----------+
67 	 * ^             ^            ^
68 	 * |             |            |
69 	 * hdr           |            |
70 	 * types_data----+            |
71 	 * strset__data(strs_set)-----+
72 	 *
73 	 *               +----------+---------+-----------+
74 	 *               |  Header  |  Types  |  Strings  |
75 	 * raw_data----->+----------+---------+-----------+
76 	 */
77 	struct btf_header *hdr;
78 
79 	void *types_data;
80 	size_t types_data_cap; /* used size stored in hdr->type_len */
81 
82 	/* type ID to `struct btf_type *` lookup index
83 	 * type_offs[0] corresponds to the first non-VOID type:
84 	 *   - for base BTF it's type [1];
85 	 *   - for split BTF it's the first non-base BTF type.
86 	 */
87 	__u32 *type_offs;
88 	size_t type_offs_cap;
89 	/* number of types in this BTF instance:
90 	 *   - doesn't include special [0] void type;
91 	 *   - for split BTF counts number of types added on top of base BTF.
92 	 */
93 	__u32 nr_types;
94 	/* if not NULL, points to the base BTF on top of which the current
95 	 * split BTF is based
96 	 */
97 	struct btf *base_btf;
98 	/* BTF type ID of the first type in this BTF instance:
99 	 *   - for base BTF it's equal to 1;
100 	 *   - for split BTF it's equal to biggest type ID of base BTF plus 1.
101 	 */
102 	int start_id;
103 	/* logical string offset of this BTF instance:
104 	 *   - for base BTF it's equal to 0;
105 	 *   - for split BTF it's equal to total size of base BTF's string section size.
106 	 */
107 	int start_str_off;
108 
109 	/* only one of strs_data or strs_set can be non-NULL, depending on
110 	 * whether BTF is in a modifiable state (strs_set is used) or not
111 	 * (strs_data points inside raw_data)
112 	 */
113 	void *strs_data;
114 	/* a set of unique strings */
115 	struct strset *strs_set;
116 	/* whether strings are already deduplicated */
117 	bool strs_deduped;
118 
119 	/* BTF object FD, if loaded into kernel */
120 	int fd;
121 
122 	/* Pointer size (in bytes) for a target architecture of this BTF */
123 	int ptr_sz;
124 };
125 
126 static inline __u64 ptr_to_u64(const void *ptr)
127 {
128 	return (__u64) (unsigned long) ptr;
129 }
130 
131 /* Ensure given dynamically allocated memory region pointed to by *data* with
132  * capacity of *cap_cnt* elements each taking *elem_sz* bytes has enough
133  * memory to accommodate *add_cnt* new elements, assuming *cur_cnt* elements
134  * are already used. At most *max_cnt* elements can be ever allocated.
135  * If necessary, memory is reallocated and all existing data is copied over,
136  * new pointer to the memory region is stored at *data, new memory region
137  * capacity (in number of elements) is stored in *cap.
138  * On success, memory pointer to the beginning of unused memory is returned.
139  * On error, NULL is returned.
140  */
141 void *libbpf_add_mem(void **data, size_t *cap_cnt, size_t elem_sz,
142 		     size_t cur_cnt, size_t max_cnt, size_t add_cnt)
143 {
144 	size_t new_cnt;
145 	void *new_data;
146 
147 	if (cur_cnt + add_cnt <= *cap_cnt)
148 		return *data + cur_cnt * elem_sz;
149 
150 	/* requested more than the set limit */
151 	if (cur_cnt + add_cnt > max_cnt)
152 		return NULL;
153 
154 	new_cnt = *cap_cnt;
155 	new_cnt += new_cnt / 4;		  /* expand by 25% */
156 	if (new_cnt < 16)		  /* but at least 16 elements */
157 		new_cnt = 16;
158 	if (new_cnt > max_cnt)		  /* but not exceeding a set limit */
159 		new_cnt = max_cnt;
160 	if (new_cnt < cur_cnt + add_cnt)  /* also ensure we have enough memory */
161 		new_cnt = cur_cnt + add_cnt;
162 
163 	new_data = libbpf_reallocarray(*data, new_cnt, elem_sz);
164 	if (!new_data)
165 		return NULL;
166 
167 	/* zero out newly allocated portion of memory */
168 	memset(new_data + (*cap_cnt) * elem_sz, 0, (new_cnt - *cap_cnt) * elem_sz);
169 
170 	*data = new_data;
171 	*cap_cnt = new_cnt;
172 	return new_data + cur_cnt * elem_sz;
173 }
174 
175 /* Ensure given dynamically allocated memory region has enough allocated space
176  * to accommodate *need_cnt* elements of size *elem_sz* bytes each
177  */
178 int libbpf_ensure_mem(void **data, size_t *cap_cnt, size_t elem_sz, size_t need_cnt)
179 {
180 	void *p;
181 
182 	if (need_cnt <= *cap_cnt)
183 		return 0;
184 
185 	p = libbpf_add_mem(data, cap_cnt, elem_sz, *cap_cnt, SIZE_MAX, need_cnt - *cap_cnt);
186 	if (!p)
187 		return -ENOMEM;
188 
189 	return 0;
190 }
191 
192 static void *btf_add_type_offs_mem(struct btf *btf, size_t add_cnt)
193 {
194 	return libbpf_add_mem((void **)&btf->type_offs, &btf->type_offs_cap, sizeof(__u32),
195 			      btf->nr_types, BTF_MAX_NR_TYPES, add_cnt);
196 }
197 
198 static int btf_add_type_idx_entry(struct btf *btf, __u32 type_off)
199 {
200 	__u32 *p;
201 
202 	p = btf_add_type_offs_mem(btf, 1);
203 	if (!p)
204 		return -ENOMEM;
205 
206 	*p = type_off;
207 	return 0;
208 }
209 
210 static void btf_bswap_hdr(struct btf_header *h)
211 {
212 	h->magic = bswap_16(h->magic);
213 	h->hdr_len = bswap_32(h->hdr_len);
214 	h->type_off = bswap_32(h->type_off);
215 	h->type_len = bswap_32(h->type_len);
216 	h->str_off = bswap_32(h->str_off);
217 	h->str_len = bswap_32(h->str_len);
218 }
219 
220 static int btf_parse_hdr(struct btf *btf)
221 {
222 	struct btf_header *hdr = btf->hdr;
223 	__u32 meta_left;
224 
225 	if (btf->raw_size < sizeof(struct btf_header)) {
226 		pr_debug("BTF header not found\n");
227 		return -EINVAL;
228 	}
229 
230 	if (hdr->magic == bswap_16(BTF_MAGIC)) {
231 		btf->swapped_endian = true;
232 		if (bswap_32(hdr->hdr_len) != sizeof(struct btf_header)) {
233 			pr_warn("Can't load BTF with non-native endianness due to unsupported header length %u\n",
234 				bswap_32(hdr->hdr_len));
235 			return -ENOTSUP;
236 		}
237 		btf_bswap_hdr(hdr);
238 	} else if (hdr->magic != BTF_MAGIC) {
239 		pr_debug("Invalid BTF magic: %x\n", hdr->magic);
240 		return -EINVAL;
241 	}
242 
243 	if (btf->raw_size < hdr->hdr_len) {
244 		pr_debug("BTF header len %u larger than data size %u\n",
245 			 hdr->hdr_len, btf->raw_size);
246 		return -EINVAL;
247 	}
248 
249 	meta_left = btf->raw_size - hdr->hdr_len;
250 	if (meta_left < (long long)hdr->str_off + hdr->str_len) {
251 		pr_debug("Invalid BTF total size: %u\n", btf->raw_size);
252 		return -EINVAL;
253 	}
254 
255 	if ((long long)hdr->type_off + hdr->type_len > hdr->str_off) {
256 		pr_debug("Invalid BTF data sections layout: type data at %u + %u, strings data at %u + %u\n",
257 			 hdr->type_off, hdr->type_len, hdr->str_off, hdr->str_len);
258 		return -EINVAL;
259 	}
260 
261 	if (hdr->type_off % 4) {
262 		pr_debug("BTF type section is not aligned to 4 bytes\n");
263 		return -EINVAL;
264 	}
265 
266 	return 0;
267 }
268 
269 static int btf_parse_str_sec(struct btf *btf)
270 {
271 	const struct btf_header *hdr = btf->hdr;
272 	const char *start = btf->strs_data;
273 	const char *end = start + btf->hdr->str_len;
274 
275 	if (btf->base_btf && hdr->str_len == 0)
276 		return 0;
277 	if (!hdr->str_len || hdr->str_len - 1 > BTF_MAX_STR_OFFSET || end[-1]) {
278 		pr_debug("Invalid BTF string section\n");
279 		return -EINVAL;
280 	}
281 	if (!btf->base_btf && start[0]) {
282 		pr_debug("Invalid BTF string section\n");
283 		return -EINVAL;
284 	}
285 	return 0;
286 }
287 
288 static int btf_type_size(const struct btf_type *t)
289 {
290 	const int base_size = sizeof(struct btf_type);
291 	__u16 vlen = btf_vlen(t);
292 
293 	switch (btf_kind(t)) {
294 	case BTF_KIND_FWD:
295 	case BTF_KIND_CONST:
296 	case BTF_KIND_VOLATILE:
297 	case BTF_KIND_RESTRICT:
298 	case BTF_KIND_PTR:
299 	case BTF_KIND_TYPEDEF:
300 	case BTF_KIND_FUNC:
301 	case BTF_KIND_FLOAT:
302 	case BTF_KIND_TYPE_TAG:
303 		return base_size;
304 	case BTF_KIND_INT:
305 		return base_size + sizeof(__u32);
306 	case BTF_KIND_ENUM:
307 		return base_size + vlen * sizeof(struct btf_enum);
308 	case BTF_KIND_ENUM64:
309 		return base_size + vlen * sizeof(struct btf_enum64);
310 	case BTF_KIND_ARRAY:
311 		return base_size + sizeof(struct btf_array);
312 	case BTF_KIND_STRUCT:
313 	case BTF_KIND_UNION:
314 		return base_size + vlen * sizeof(struct btf_member);
315 	case BTF_KIND_FUNC_PROTO:
316 		return base_size + vlen * sizeof(struct btf_param);
317 	case BTF_KIND_VAR:
318 		return base_size + sizeof(struct btf_var);
319 	case BTF_KIND_DATASEC:
320 		return base_size + vlen * sizeof(struct btf_var_secinfo);
321 	case BTF_KIND_DECL_TAG:
322 		return base_size + sizeof(struct btf_decl_tag);
323 	default:
324 		pr_debug("Unsupported BTF_KIND:%u\n", btf_kind(t));
325 		return -EINVAL;
326 	}
327 }
328 
329 static void btf_bswap_type_base(struct btf_type *t)
330 {
331 	t->name_off = bswap_32(t->name_off);
332 	t->info = bswap_32(t->info);
333 	t->type = bswap_32(t->type);
334 }
335 
336 static int btf_bswap_type_rest(struct btf_type *t)
337 {
338 	struct btf_var_secinfo *v;
339 	struct btf_enum64 *e64;
340 	struct btf_member *m;
341 	struct btf_array *a;
342 	struct btf_param *p;
343 	struct btf_enum *e;
344 	__u16 vlen = btf_vlen(t);
345 	int i;
346 
347 	switch (btf_kind(t)) {
348 	case BTF_KIND_FWD:
349 	case BTF_KIND_CONST:
350 	case BTF_KIND_VOLATILE:
351 	case BTF_KIND_RESTRICT:
352 	case BTF_KIND_PTR:
353 	case BTF_KIND_TYPEDEF:
354 	case BTF_KIND_FUNC:
355 	case BTF_KIND_FLOAT:
356 	case BTF_KIND_TYPE_TAG:
357 		return 0;
358 	case BTF_KIND_INT:
359 		*(__u32 *)(t + 1) = bswap_32(*(__u32 *)(t + 1));
360 		return 0;
361 	case BTF_KIND_ENUM:
362 		for (i = 0, e = btf_enum(t); i < vlen; i++, e++) {
363 			e->name_off = bswap_32(e->name_off);
364 			e->val = bswap_32(e->val);
365 		}
366 		return 0;
367 	case BTF_KIND_ENUM64:
368 		for (i = 0, e64 = btf_enum64(t); i < vlen; i++, e64++) {
369 			e64->name_off = bswap_32(e64->name_off);
370 			e64->val_lo32 = bswap_32(e64->val_lo32);
371 			e64->val_hi32 = bswap_32(e64->val_hi32);
372 		}
373 		return 0;
374 	case BTF_KIND_ARRAY:
375 		a = btf_array(t);
376 		a->type = bswap_32(a->type);
377 		a->index_type = bswap_32(a->index_type);
378 		a->nelems = bswap_32(a->nelems);
379 		return 0;
380 	case BTF_KIND_STRUCT:
381 	case BTF_KIND_UNION:
382 		for (i = 0, m = btf_members(t); i < vlen; i++, m++) {
383 			m->name_off = bswap_32(m->name_off);
384 			m->type = bswap_32(m->type);
385 			m->offset = bswap_32(m->offset);
386 		}
387 		return 0;
388 	case BTF_KIND_FUNC_PROTO:
389 		for (i = 0, p = btf_params(t); i < vlen; i++, p++) {
390 			p->name_off = bswap_32(p->name_off);
391 			p->type = bswap_32(p->type);
392 		}
393 		return 0;
394 	case BTF_KIND_VAR:
395 		btf_var(t)->linkage = bswap_32(btf_var(t)->linkage);
396 		return 0;
397 	case BTF_KIND_DATASEC:
398 		for (i = 0, v = btf_var_secinfos(t); i < vlen; i++, v++) {
399 			v->type = bswap_32(v->type);
400 			v->offset = bswap_32(v->offset);
401 			v->size = bswap_32(v->size);
402 		}
403 		return 0;
404 	case BTF_KIND_DECL_TAG:
405 		btf_decl_tag(t)->component_idx = bswap_32(btf_decl_tag(t)->component_idx);
406 		return 0;
407 	default:
408 		pr_debug("Unsupported BTF_KIND:%u\n", btf_kind(t));
409 		return -EINVAL;
410 	}
411 }
412 
413 static int btf_parse_type_sec(struct btf *btf)
414 {
415 	struct btf_header *hdr = btf->hdr;
416 	void *next_type = btf->types_data;
417 	void *end_type = next_type + hdr->type_len;
418 	int err, type_size;
419 
420 	while (next_type + sizeof(struct btf_type) <= end_type) {
421 		if (btf->swapped_endian)
422 			btf_bswap_type_base(next_type);
423 
424 		type_size = btf_type_size(next_type);
425 		if (type_size < 0)
426 			return type_size;
427 		if (next_type + type_size > end_type) {
428 			pr_warn("BTF type [%d] is malformed\n", btf->start_id + btf->nr_types);
429 			return -EINVAL;
430 		}
431 
432 		if (btf->swapped_endian && btf_bswap_type_rest(next_type))
433 			return -EINVAL;
434 
435 		err = btf_add_type_idx_entry(btf, next_type - btf->types_data);
436 		if (err)
437 			return err;
438 
439 		next_type += type_size;
440 		btf->nr_types++;
441 	}
442 
443 	if (next_type != end_type) {
444 		pr_warn("BTF types data is malformed\n");
445 		return -EINVAL;
446 	}
447 
448 	return 0;
449 }
450 
451 static int btf_validate_str(const struct btf *btf, __u32 str_off, const char *what, __u32 type_id)
452 {
453 	const char *s;
454 
455 	s = btf__str_by_offset(btf, str_off);
456 	if (!s) {
457 		pr_warn("btf: type [%u]: invalid %s (string offset %u)\n", type_id, what, str_off);
458 		return -EINVAL;
459 	}
460 
461 	return 0;
462 }
463 
464 static int btf_validate_id(const struct btf *btf, __u32 id, __u32 ctx_id)
465 {
466 	const struct btf_type *t;
467 
468 	t = btf__type_by_id(btf, id);
469 	if (!t) {
470 		pr_warn("btf: type [%u]: invalid referenced type ID %u\n", ctx_id, id);
471 		return -EINVAL;
472 	}
473 
474 	return 0;
475 }
476 
477 static int btf_validate_type(const struct btf *btf, const struct btf_type *t, __u32 id)
478 {
479 	__u32 kind = btf_kind(t);
480 	int err, i, n;
481 
482 	err = btf_validate_str(btf, t->name_off, "type name", id);
483 	if (err)
484 		return err;
485 
486 	switch (kind) {
487 	case BTF_KIND_UNKN:
488 	case BTF_KIND_INT:
489 	case BTF_KIND_FWD:
490 	case BTF_KIND_FLOAT:
491 		break;
492 	case BTF_KIND_PTR:
493 	case BTF_KIND_TYPEDEF:
494 	case BTF_KIND_VOLATILE:
495 	case BTF_KIND_CONST:
496 	case BTF_KIND_RESTRICT:
497 	case BTF_KIND_VAR:
498 	case BTF_KIND_DECL_TAG:
499 	case BTF_KIND_TYPE_TAG:
500 		err = btf_validate_id(btf, t->type, id);
501 		if (err)
502 			return err;
503 		break;
504 	case BTF_KIND_ARRAY: {
505 		const struct btf_array *a = btf_array(t);
506 
507 		err = btf_validate_id(btf, a->type, id);
508 		err = err ?: btf_validate_id(btf, a->index_type, id);
509 		if (err)
510 			return err;
511 		break;
512 	}
513 	case BTF_KIND_STRUCT:
514 	case BTF_KIND_UNION: {
515 		const struct btf_member *m = btf_members(t);
516 
517 		n = btf_vlen(t);
518 		for (i = 0; i < n; i++, m++) {
519 			err = btf_validate_str(btf, m->name_off, "field name", id);
520 			err = err ?: btf_validate_id(btf, m->type, id);
521 			if (err)
522 				return err;
523 		}
524 		break;
525 	}
526 	case BTF_KIND_ENUM: {
527 		const struct btf_enum *m = btf_enum(t);
528 
529 		n = btf_vlen(t);
530 		for (i = 0; i < n; i++, m++) {
531 			err = btf_validate_str(btf, m->name_off, "enum name", id);
532 			if (err)
533 				return err;
534 		}
535 		break;
536 	}
537 	case BTF_KIND_ENUM64: {
538 		const struct btf_enum64 *m = btf_enum64(t);
539 
540 		n = btf_vlen(t);
541 		for (i = 0; i < n; i++, m++) {
542 			err = btf_validate_str(btf, m->name_off, "enum name", id);
543 			if (err)
544 				return err;
545 		}
546 		break;
547 	}
548 	case BTF_KIND_FUNC: {
549 		const struct btf_type *ft;
550 
551 		err = btf_validate_id(btf, t->type, id);
552 		if (err)
553 			return err;
554 		ft = btf__type_by_id(btf, t->type);
555 		if (btf_kind(ft) != BTF_KIND_FUNC_PROTO) {
556 			pr_warn("btf: type [%u]: referenced type [%u] is not FUNC_PROTO\n", id, t->type);
557 			return -EINVAL;
558 		}
559 		break;
560 	}
561 	case BTF_KIND_FUNC_PROTO: {
562 		const struct btf_param *m = btf_params(t);
563 
564 		n = btf_vlen(t);
565 		for (i = 0; i < n; i++, m++) {
566 			err = btf_validate_str(btf, m->name_off, "param name", id);
567 			err = err ?: btf_validate_id(btf, m->type, id);
568 			if (err)
569 				return err;
570 		}
571 		break;
572 	}
573 	case BTF_KIND_DATASEC: {
574 		const struct btf_var_secinfo *m = btf_var_secinfos(t);
575 
576 		n = btf_vlen(t);
577 		for (i = 0; i < n; i++, m++) {
578 			err = btf_validate_id(btf, m->type, id);
579 			if (err)
580 				return err;
581 		}
582 		break;
583 	}
584 	default:
585 		pr_warn("btf: type [%u]: unrecognized kind %u\n", id, kind);
586 		return -EINVAL;
587 	}
588 	return 0;
589 }
590 
591 /* Validate basic sanity of BTF. It's intentionally less thorough than
592  * kernel's validation and validates only properties of BTF that libbpf relies
593  * on to be correct (e.g., valid type IDs, valid string offsets, etc)
594  */
595 static int btf_sanity_check(const struct btf *btf)
596 {
597 	const struct btf_type *t;
598 	__u32 i, n = btf__type_cnt(btf);
599 	int err;
600 
601 	for (i = 1; i < n; i++) {
602 		t = btf_type_by_id(btf, i);
603 		err = btf_validate_type(btf, t, i);
604 		if (err)
605 			return err;
606 	}
607 	return 0;
608 }
609 
610 __u32 btf__type_cnt(const struct btf *btf)
611 {
612 	return btf->start_id + btf->nr_types;
613 }
614 
615 const struct btf *btf__base_btf(const struct btf *btf)
616 {
617 	return btf->base_btf;
618 }
619 
620 /* internal helper returning non-const pointer to a type */
621 struct btf_type *btf_type_by_id(const struct btf *btf, __u32 type_id)
622 {
623 	if (type_id == 0)
624 		return &btf_void;
625 	if (type_id < btf->start_id)
626 		return btf_type_by_id(btf->base_btf, type_id);
627 	return btf->types_data + btf->type_offs[type_id - btf->start_id];
628 }
629 
630 const struct btf_type *btf__type_by_id(const struct btf *btf, __u32 type_id)
631 {
632 	if (type_id >= btf->start_id + btf->nr_types)
633 		return errno = EINVAL, NULL;
634 	return btf_type_by_id((struct btf *)btf, type_id);
635 }
636 
637 static int determine_ptr_size(const struct btf *btf)
638 {
639 	static const char * const long_aliases[] = {
640 		"long",
641 		"long int",
642 		"int long",
643 		"unsigned long",
644 		"long unsigned",
645 		"unsigned long int",
646 		"unsigned int long",
647 		"long unsigned int",
648 		"long int unsigned",
649 		"int unsigned long",
650 		"int long unsigned",
651 	};
652 	const struct btf_type *t;
653 	const char *name;
654 	int i, j, n;
655 
656 	if (btf->base_btf && btf->base_btf->ptr_sz > 0)
657 		return btf->base_btf->ptr_sz;
658 
659 	n = btf__type_cnt(btf);
660 	for (i = 1; i < n; i++) {
661 		t = btf__type_by_id(btf, i);
662 		if (!btf_is_int(t))
663 			continue;
664 
665 		if (t->size != 4 && t->size != 8)
666 			continue;
667 
668 		name = btf__name_by_offset(btf, t->name_off);
669 		if (!name)
670 			continue;
671 
672 		for (j = 0; j < ARRAY_SIZE(long_aliases); j++) {
673 			if (strcmp(name, long_aliases[j]) == 0)
674 				return t->size;
675 		}
676 	}
677 
678 	return -1;
679 }
680 
681 static size_t btf_ptr_sz(const struct btf *btf)
682 {
683 	if (!btf->ptr_sz)
684 		((struct btf *)btf)->ptr_sz = determine_ptr_size(btf);
685 	return btf->ptr_sz < 0 ? sizeof(void *) : btf->ptr_sz;
686 }
687 
688 /* Return pointer size this BTF instance assumes. The size is heuristically
689  * determined by looking for 'long' or 'unsigned long' integer type and
690  * recording its size in bytes. If BTF type information doesn't have any such
691  * type, this function returns 0. In the latter case, native architecture's
692  * pointer size is assumed, so will be either 4 or 8, depending on
693  * architecture that libbpf was compiled for. It's possible to override
694  * guessed value by using btf__set_pointer_size() API.
695  */
696 size_t btf__pointer_size(const struct btf *btf)
697 {
698 	if (!btf->ptr_sz)
699 		((struct btf *)btf)->ptr_sz = determine_ptr_size(btf);
700 
701 	if (btf->ptr_sz < 0)
702 		/* not enough BTF type info to guess */
703 		return 0;
704 
705 	return btf->ptr_sz;
706 }
707 
708 /* Override or set pointer size in bytes. Only values of 4 and 8 are
709  * supported.
710  */
711 int btf__set_pointer_size(struct btf *btf, size_t ptr_sz)
712 {
713 	if (ptr_sz != 4 && ptr_sz != 8)
714 		return libbpf_err(-EINVAL);
715 	btf->ptr_sz = ptr_sz;
716 	return 0;
717 }
718 
719 static bool is_host_big_endian(void)
720 {
721 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
722 	return false;
723 #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
724 	return true;
725 #else
726 # error "Unrecognized __BYTE_ORDER__"
727 #endif
728 }
729 
730 enum btf_endianness btf__endianness(const struct btf *btf)
731 {
732 	if (is_host_big_endian())
733 		return btf->swapped_endian ? BTF_LITTLE_ENDIAN : BTF_BIG_ENDIAN;
734 	else
735 		return btf->swapped_endian ? BTF_BIG_ENDIAN : BTF_LITTLE_ENDIAN;
736 }
737 
738 int btf__set_endianness(struct btf *btf, enum btf_endianness endian)
739 {
740 	if (endian != BTF_LITTLE_ENDIAN && endian != BTF_BIG_ENDIAN)
741 		return libbpf_err(-EINVAL);
742 
743 	btf->swapped_endian = is_host_big_endian() != (endian == BTF_BIG_ENDIAN);
744 	if (!btf->swapped_endian) {
745 		free(btf->raw_data_swapped);
746 		btf->raw_data_swapped = NULL;
747 	}
748 	return 0;
749 }
750 
751 static bool btf_type_is_void(const struct btf_type *t)
752 {
753 	return t == &btf_void || btf_is_fwd(t);
754 }
755 
756 static bool btf_type_is_void_or_null(const struct btf_type *t)
757 {
758 	return !t || btf_type_is_void(t);
759 }
760 
761 #define MAX_RESOLVE_DEPTH 32
762 
763 __s64 btf__resolve_size(const struct btf *btf, __u32 type_id)
764 {
765 	const struct btf_array *array;
766 	const struct btf_type *t;
767 	__u32 nelems = 1;
768 	__s64 size = -1;
769 	int i;
770 
771 	t = btf__type_by_id(btf, type_id);
772 	for (i = 0; i < MAX_RESOLVE_DEPTH && !btf_type_is_void_or_null(t); i++) {
773 		switch (btf_kind(t)) {
774 		case BTF_KIND_INT:
775 		case BTF_KIND_STRUCT:
776 		case BTF_KIND_UNION:
777 		case BTF_KIND_ENUM:
778 		case BTF_KIND_ENUM64:
779 		case BTF_KIND_DATASEC:
780 		case BTF_KIND_FLOAT:
781 			size = t->size;
782 			goto done;
783 		case BTF_KIND_PTR:
784 			size = btf_ptr_sz(btf);
785 			goto done;
786 		case BTF_KIND_TYPEDEF:
787 		case BTF_KIND_VOLATILE:
788 		case BTF_KIND_CONST:
789 		case BTF_KIND_RESTRICT:
790 		case BTF_KIND_VAR:
791 		case BTF_KIND_DECL_TAG:
792 		case BTF_KIND_TYPE_TAG:
793 			type_id = t->type;
794 			break;
795 		case BTF_KIND_ARRAY:
796 			array = btf_array(t);
797 			if (nelems && array->nelems > UINT32_MAX / nelems)
798 				return libbpf_err(-E2BIG);
799 			nelems *= array->nelems;
800 			type_id = array->type;
801 			break;
802 		default:
803 			return libbpf_err(-EINVAL);
804 		}
805 
806 		t = btf__type_by_id(btf, type_id);
807 	}
808 
809 done:
810 	if (size < 0)
811 		return libbpf_err(-EINVAL);
812 	if (nelems && size > UINT32_MAX / nelems)
813 		return libbpf_err(-E2BIG);
814 
815 	return nelems * size;
816 }
817 
818 int btf__align_of(const struct btf *btf, __u32 id)
819 {
820 	const struct btf_type *t = btf__type_by_id(btf, id);
821 	__u16 kind = btf_kind(t);
822 
823 	switch (kind) {
824 	case BTF_KIND_INT:
825 	case BTF_KIND_ENUM:
826 	case BTF_KIND_ENUM64:
827 	case BTF_KIND_FLOAT:
828 		return min(btf_ptr_sz(btf), (size_t)t->size);
829 	case BTF_KIND_PTR:
830 		return btf_ptr_sz(btf);
831 	case BTF_KIND_TYPEDEF:
832 	case BTF_KIND_VOLATILE:
833 	case BTF_KIND_CONST:
834 	case BTF_KIND_RESTRICT:
835 	case BTF_KIND_TYPE_TAG:
836 		return btf__align_of(btf, t->type);
837 	case BTF_KIND_ARRAY:
838 		return btf__align_of(btf, btf_array(t)->type);
839 	case BTF_KIND_STRUCT:
840 	case BTF_KIND_UNION: {
841 		const struct btf_member *m = btf_members(t);
842 		__u16 vlen = btf_vlen(t);
843 		int i, max_align = 1, align;
844 
845 		for (i = 0; i < vlen; i++, m++) {
846 			align = btf__align_of(btf, m->type);
847 			if (align <= 0)
848 				return libbpf_err(align);
849 			max_align = max(max_align, align);
850 
851 			/* if field offset isn't aligned according to field
852 			 * type's alignment, then struct must be packed
853 			 */
854 			if (btf_member_bitfield_size(t, i) == 0 &&
855 			    (m->offset % (8 * align)) != 0)
856 				return 1;
857 		}
858 
859 		/* if struct/union size isn't a multiple of its alignment,
860 		 * then struct must be packed
861 		 */
862 		if ((t->size % max_align) != 0)
863 			return 1;
864 
865 		return max_align;
866 	}
867 	default:
868 		pr_warn("unsupported BTF_KIND:%u\n", btf_kind(t));
869 		return errno = EINVAL, 0;
870 	}
871 }
872 
873 int btf__resolve_type(const struct btf *btf, __u32 type_id)
874 {
875 	const struct btf_type *t;
876 	int depth = 0;
877 
878 	t = btf__type_by_id(btf, type_id);
879 	while (depth < MAX_RESOLVE_DEPTH &&
880 	       !btf_type_is_void_or_null(t) &&
881 	       (btf_is_mod(t) || btf_is_typedef(t) || btf_is_var(t))) {
882 		type_id = t->type;
883 		t = btf__type_by_id(btf, type_id);
884 		depth++;
885 	}
886 
887 	if (depth == MAX_RESOLVE_DEPTH || btf_type_is_void_or_null(t))
888 		return libbpf_err(-EINVAL);
889 
890 	return type_id;
891 }
892 
893 __s32 btf__find_by_name(const struct btf *btf, const char *type_name)
894 {
895 	__u32 i, nr_types = btf__type_cnt(btf);
896 
897 	if (!strcmp(type_name, "void"))
898 		return 0;
899 
900 	for (i = 1; i < nr_types; i++) {
901 		const struct btf_type *t = btf__type_by_id(btf, i);
902 		const char *name = btf__name_by_offset(btf, t->name_off);
903 
904 		if (name && !strcmp(type_name, name))
905 			return i;
906 	}
907 
908 	return libbpf_err(-ENOENT);
909 }
910 
911 static __s32 btf_find_by_name_kind(const struct btf *btf, int start_id,
912 				   const char *type_name, __u32 kind)
913 {
914 	__u32 i, nr_types = btf__type_cnt(btf);
915 
916 	if (kind == BTF_KIND_UNKN || !strcmp(type_name, "void"))
917 		return 0;
918 
919 	for (i = start_id; i < nr_types; i++) {
920 		const struct btf_type *t = btf__type_by_id(btf, i);
921 		const char *name;
922 
923 		if (btf_kind(t) != kind)
924 			continue;
925 		name = btf__name_by_offset(btf, t->name_off);
926 		if (name && !strcmp(type_name, name))
927 			return i;
928 	}
929 
930 	return libbpf_err(-ENOENT);
931 }
932 
933 __s32 btf__find_by_name_kind_own(const struct btf *btf, const char *type_name,
934 				 __u32 kind)
935 {
936 	return btf_find_by_name_kind(btf, btf->start_id, type_name, kind);
937 }
938 
939 __s32 btf__find_by_name_kind(const struct btf *btf, const char *type_name,
940 			     __u32 kind)
941 {
942 	return btf_find_by_name_kind(btf, 1, type_name, kind);
943 }
944 
945 static bool btf_is_modifiable(const struct btf *btf)
946 {
947 	return (void *)btf->hdr != btf->raw_data;
948 }
949 
950 void btf__free(struct btf *btf)
951 {
952 	if (IS_ERR_OR_NULL(btf))
953 		return;
954 
955 	if (btf->fd >= 0)
956 		close(btf->fd);
957 
958 	if (btf_is_modifiable(btf)) {
959 		/* if BTF was modified after loading, it will have a split
960 		 * in-memory representation for header, types, and strings
961 		 * sections, so we need to free all of them individually. It
962 		 * might still have a cached contiguous raw data present,
963 		 * which will be unconditionally freed below.
964 		 */
965 		free(btf->hdr);
966 		free(btf->types_data);
967 		strset__free(btf->strs_set);
968 	}
969 	free(btf->raw_data);
970 	free(btf->raw_data_swapped);
971 	free(btf->type_offs);
972 	free(btf);
973 }
974 
975 static struct btf *btf_new_empty(struct btf *base_btf)
976 {
977 	struct btf *btf;
978 
979 	btf = calloc(1, sizeof(*btf));
980 	if (!btf)
981 		return ERR_PTR(-ENOMEM);
982 
983 	btf->nr_types = 0;
984 	btf->start_id = 1;
985 	btf->start_str_off = 0;
986 	btf->fd = -1;
987 	btf->ptr_sz = sizeof(void *);
988 	btf->swapped_endian = false;
989 
990 	if (base_btf) {
991 		btf->base_btf = base_btf;
992 		btf->start_id = btf__type_cnt(base_btf);
993 		btf->start_str_off = base_btf->hdr->str_len;
994 	}
995 
996 	/* +1 for empty string at offset 0 */
997 	btf->raw_size = sizeof(struct btf_header) + (base_btf ? 0 : 1);
998 	btf->raw_data = calloc(1, btf->raw_size);
999 	if (!btf->raw_data) {
1000 		free(btf);
1001 		return ERR_PTR(-ENOMEM);
1002 	}
1003 
1004 	btf->hdr = btf->raw_data;
1005 	btf->hdr->hdr_len = sizeof(struct btf_header);
1006 	btf->hdr->magic = BTF_MAGIC;
1007 	btf->hdr->version = BTF_VERSION;
1008 
1009 	btf->types_data = btf->raw_data + btf->hdr->hdr_len;
1010 	btf->strs_data = btf->raw_data + btf->hdr->hdr_len;
1011 	btf->hdr->str_len = base_btf ? 0 : 1; /* empty string at offset 0 */
1012 
1013 	return btf;
1014 }
1015 
1016 struct btf *btf__new_empty(void)
1017 {
1018 	return libbpf_ptr(btf_new_empty(NULL));
1019 }
1020 
1021 struct btf *btf__new_empty_split(struct btf *base_btf)
1022 {
1023 	return libbpf_ptr(btf_new_empty(base_btf));
1024 }
1025 
1026 static struct btf *btf_new(const void *data, __u32 size, struct btf *base_btf)
1027 {
1028 	struct btf *btf;
1029 	int err;
1030 
1031 	btf = calloc(1, sizeof(struct btf));
1032 	if (!btf)
1033 		return ERR_PTR(-ENOMEM);
1034 
1035 	btf->nr_types = 0;
1036 	btf->start_id = 1;
1037 	btf->start_str_off = 0;
1038 	btf->fd = -1;
1039 
1040 	if (base_btf) {
1041 		btf->base_btf = base_btf;
1042 		btf->start_id = btf__type_cnt(base_btf);
1043 		btf->start_str_off = base_btf->hdr->str_len;
1044 	}
1045 
1046 	btf->raw_data = malloc(size);
1047 	if (!btf->raw_data) {
1048 		err = -ENOMEM;
1049 		goto done;
1050 	}
1051 	memcpy(btf->raw_data, data, size);
1052 	btf->raw_size = size;
1053 
1054 	btf->hdr = btf->raw_data;
1055 	err = btf_parse_hdr(btf);
1056 	if (err)
1057 		goto done;
1058 
1059 	btf->strs_data = btf->raw_data + btf->hdr->hdr_len + btf->hdr->str_off;
1060 	btf->types_data = btf->raw_data + btf->hdr->hdr_len + btf->hdr->type_off;
1061 
1062 	err = btf_parse_str_sec(btf);
1063 	err = err ?: btf_parse_type_sec(btf);
1064 	err = err ?: btf_sanity_check(btf);
1065 	if (err)
1066 		goto done;
1067 
1068 done:
1069 	if (err) {
1070 		btf__free(btf);
1071 		return ERR_PTR(err);
1072 	}
1073 
1074 	return btf;
1075 }
1076 
1077 struct btf *btf__new(const void *data, __u32 size)
1078 {
1079 	return libbpf_ptr(btf_new(data, size, NULL));
1080 }
1081 
1082 static struct btf *btf_parse_elf(const char *path, struct btf *base_btf,
1083 				 struct btf_ext **btf_ext)
1084 {
1085 	Elf_Data *btf_data = NULL, *btf_ext_data = NULL;
1086 	int err = 0, fd = -1, idx = 0;
1087 	struct btf *btf = NULL;
1088 	Elf_Scn *scn = NULL;
1089 	Elf *elf = NULL;
1090 	GElf_Ehdr ehdr;
1091 	size_t shstrndx;
1092 
1093 	if (elf_version(EV_CURRENT) == EV_NONE) {
1094 		pr_warn("failed to init libelf for %s\n", path);
1095 		return ERR_PTR(-LIBBPF_ERRNO__LIBELF);
1096 	}
1097 
1098 	fd = open(path, O_RDONLY | O_CLOEXEC);
1099 	if (fd < 0) {
1100 		err = -errno;
1101 		pr_warn("failed to open %s: %s\n", path, strerror(errno));
1102 		return ERR_PTR(err);
1103 	}
1104 
1105 	err = -LIBBPF_ERRNO__FORMAT;
1106 
1107 	elf = elf_begin(fd, ELF_C_READ, NULL);
1108 	if (!elf) {
1109 		pr_warn("failed to open %s as ELF file\n", path);
1110 		goto done;
1111 	}
1112 	if (!gelf_getehdr(elf, &ehdr)) {
1113 		pr_warn("failed to get EHDR from %s\n", path);
1114 		goto done;
1115 	}
1116 
1117 	if (elf_getshdrstrndx(elf, &shstrndx)) {
1118 		pr_warn("failed to get section names section index for %s\n",
1119 			path);
1120 		goto done;
1121 	}
1122 
1123 	if (!elf_rawdata(elf_getscn(elf, shstrndx), NULL)) {
1124 		pr_warn("failed to get e_shstrndx from %s\n", path);
1125 		goto done;
1126 	}
1127 
1128 	while ((scn = elf_nextscn(elf, scn)) != NULL) {
1129 		GElf_Shdr sh;
1130 		char *name;
1131 
1132 		idx++;
1133 		if (gelf_getshdr(scn, &sh) != &sh) {
1134 			pr_warn("failed to get section(%d) header from %s\n",
1135 				idx, path);
1136 			goto done;
1137 		}
1138 		name = elf_strptr(elf, shstrndx, sh.sh_name);
1139 		if (!name) {
1140 			pr_warn("failed to get section(%d) name from %s\n",
1141 				idx, path);
1142 			goto done;
1143 		}
1144 		if (strcmp(name, BTF_ELF_SEC) == 0) {
1145 			btf_data = elf_getdata(scn, 0);
1146 			if (!btf_data) {
1147 				pr_warn("failed to get section(%d, %s) data from %s\n",
1148 					idx, name, path);
1149 				goto done;
1150 			}
1151 			continue;
1152 		} else if (btf_ext && strcmp(name, BTF_EXT_ELF_SEC) == 0) {
1153 			btf_ext_data = elf_getdata(scn, 0);
1154 			if (!btf_ext_data) {
1155 				pr_warn("failed to get section(%d, %s) data from %s\n",
1156 					idx, name, path);
1157 				goto done;
1158 			}
1159 			continue;
1160 		}
1161 	}
1162 
1163 	if (!btf_data) {
1164 		pr_warn("failed to find '%s' ELF section in %s\n", BTF_ELF_SEC, path);
1165 		err = -ENODATA;
1166 		goto done;
1167 	}
1168 	btf = btf_new(btf_data->d_buf, btf_data->d_size, base_btf);
1169 	err = libbpf_get_error(btf);
1170 	if (err)
1171 		goto done;
1172 
1173 	switch (gelf_getclass(elf)) {
1174 	case ELFCLASS32:
1175 		btf__set_pointer_size(btf, 4);
1176 		break;
1177 	case ELFCLASS64:
1178 		btf__set_pointer_size(btf, 8);
1179 		break;
1180 	default:
1181 		pr_warn("failed to get ELF class (bitness) for %s\n", path);
1182 		break;
1183 	}
1184 
1185 	if (btf_ext && btf_ext_data) {
1186 		*btf_ext = btf_ext__new(btf_ext_data->d_buf, btf_ext_data->d_size);
1187 		err = libbpf_get_error(*btf_ext);
1188 		if (err)
1189 			goto done;
1190 	} else if (btf_ext) {
1191 		*btf_ext = NULL;
1192 	}
1193 done:
1194 	if (elf)
1195 		elf_end(elf);
1196 	close(fd);
1197 
1198 	if (!err)
1199 		return btf;
1200 
1201 	if (btf_ext)
1202 		btf_ext__free(*btf_ext);
1203 	btf__free(btf);
1204 
1205 	return ERR_PTR(err);
1206 }
1207 
1208 struct btf *btf__parse_elf(const char *path, struct btf_ext **btf_ext)
1209 {
1210 	return libbpf_ptr(btf_parse_elf(path, NULL, btf_ext));
1211 }
1212 
1213 struct btf *btf__parse_elf_split(const char *path, struct btf *base_btf)
1214 {
1215 	return libbpf_ptr(btf_parse_elf(path, base_btf, NULL));
1216 }
1217 
1218 static struct btf *btf_parse_raw(const char *path, struct btf *base_btf)
1219 {
1220 	struct btf *btf = NULL;
1221 	void *data = NULL;
1222 	FILE *f = NULL;
1223 	__u16 magic;
1224 	int err = 0;
1225 	long sz;
1226 
1227 	f = fopen(path, "rbe");
1228 	if (!f) {
1229 		err = -errno;
1230 		goto err_out;
1231 	}
1232 
1233 	/* check BTF magic */
1234 	if (fread(&magic, 1, sizeof(magic), f) < sizeof(magic)) {
1235 		err = -EIO;
1236 		goto err_out;
1237 	}
1238 	if (magic != BTF_MAGIC && magic != bswap_16(BTF_MAGIC)) {
1239 		/* definitely not a raw BTF */
1240 		err = -EPROTO;
1241 		goto err_out;
1242 	}
1243 
1244 	/* get file size */
1245 	if (fseek(f, 0, SEEK_END)) {
1246 		err = -errno;
1247 		goto err_out;
1248 	}
1249 	sz = ftell(f);
1250 	if (sz < 0) {
1251 		err = -errno;
1252 		goto err_out;
1253 	}
1254 	/* rewind to the start */
1255 	if (fseek(f, 0, SEEK_SET)) {
1256 		err = -errno;
1257 		goto err_out;
1258 	}
1259 
1260 	/* pre-alloc memory and read all of BTF data */
1261 	data = malloc(sz);
1262 	if (!data) {
1263 		err = -ENOMEM;
1264 		goto err_out;
1265 	}
1266 	if (fread(data, 1, sz, f) < sz) {
1267 		err = -EIO;
1268 		goto err_out;
1269 	}
1270 
1271 	/* finally parse BTF data */
1272 	btf = btf_new(data, sz, base_btf);
1273 
1274 err_out:
1275 	free(data);
1276 	if (f)
1277 		fclose(f);
1278 	return err ? ERR_PTR(err) : btf;
1279 }
1280 
1281 struct btf *btf__parse_raw(const char *path)
1282 {
1283 	return libbpf_ptr(btf_parse_raw(path, NULL));
1284 }
1285 
1286 struct btf *btf__parse_raw_split(const char *path, struct btf *base_btf)
1287 {
1288 	return libbpf_ptr(btf_parse_raw(path, base_btf));
1289 }
1290 
1291 static struct btf *btf_parse(const char *path, struct btf *base_btf, struct btf_ext **btf_ext)
1292 {
1293 	struct btf *btf;
1294 	int err;
1295 
1296 	if (btf_ext)
1297 		*btf_ext = NULL;
1298 
1299 	btf = btf_parse_raw(path, base_btf);
1300 	err = libbpf_get_error(btf);
1301 	if (!err)
1302 		return btf;
1303 	if (err != -EPROTO)
1304 		return ERR_PTR(err);
1305 	return btf_parse_elf(path, base_btf, btf_ext);
1306 }
1307 
1308 struct btf *btf__parse(const char *path, struct btf_ext **btf_ext)
1309 {
1310 	return libbpf_ptr(btf_parse(path, NULL, btf_ext));
1311 }
1312 
1313 struct btf *btf__parse_split(const char *path, struct btf *base_btf)
1314 {
1315 	return libbpf_ptr(btf_parse(path, base_btf, NULL));
1316 }
1317 
1318 static void *btf_get_raw_data(const struct btf *btf, __u32 *size, bool swap_endian);
1319 
1320 int btf_load_into_kernel(struct btf *btf,
1321 			 char *log_buf, size_t log_sz, __u32 log_level,
1322 			 int token_fd)
1323 {
1324 	LIBBPF_OPTS(bpf_btf_load_opts, opts);
1325 	__u32 buf_sz = 0, raw_size;
1326 	char *buf = NULL, *tmp;
1327 	void *raw_data;
1328 	int err = 0;
1329 
1330 	if (btf->fd >= 0)
1331 		return libbpf_err(-EEXIST);
1332 	if (log_sz && !log_buf)
1333 		return libbpf_err(-EINVAL);
1334 
1335 	/* cache native raw data representation */
1336 	raw_data = btf_get_raw_data(btf, &raw_size, false);
1337 	if (!raw_data) {
1338 		err = -ENOMEM;
1339 		goto done;
1340 	}
1341 	btf->raw_size = raw_size;
1342 	btf->raw_data = raw_data;
1343 
1344 retry_load:
1345 	/* if log_level is 0, we won't provide log_buf/log_size to the kernel,
1346 	 * initially. Only if BTF loading fails, we bump log_level to 1 and
1347 	 * retry, using either auto-allocated or custom log_buf. This way
1348 	 * non-NULL custom log_buf provides a buffer just in case, but hopes
1349 	 * for successful load and no need for log_buf.
1350 	 */
1351 	if (log_level) {
1352 		/* if caller didn't provide custom log_buf, we'll keep
1353 		 * allocating our own progressively bigger buffers for BTF
1354 		 * verification log
1355 		 */
1356 		if (!log_buf) {
1357 			buf_sz = max((__u32)BPF_LOG_BUF_SIZE, buf_sz * 2);
1358 			tmp = realloc(buf, buf_sz);
1359 			if (!tmp) {
1360 				err = -ENOMEM;
1361 				goto done;
1362 			}
1363 			buf = tmp;
1364 			buf[0] = '\0';
1365 		}
1366 
1367 		opts.log_buf = log_buf ? log_buf : buf;
1368 		opts.log_size = log_buf ? log_sz : buf_sz;
1369 		opts.log_level = log_level;
1370 	}
1371 
1372 	opts.token_fd = token_fd;
1373 	if (token_fd)
1374 		opts.btf_flags |= BPF_F_TOKEN_FD;
1375 
1376 	btf->fd = bpf_btf_load(raw_data, raw_size, &opts);
1377 	if (btf->fd < 0) {
1378 		/* time to turn on verbose mode and try again */
1379 		if (log_level == 0) {
1380 			log_level = 1;
1381 			goto retry_load;
1382 		}
1383 		/* only retry if caller didn't provide custom log_buf, but
1384 		 * make sure we can never overflow buf_sz
1385 		 */
1386 		if (!log_buf && errno == ENOSPC && buf_sz <= UINT_MAX / 2)
1387 			goto retry_load;
1388 
1389 		err = -errno;
1390 		pr_warn("BTF loading error: %d\n", err);
1391 		/* don't print out contents of custom log_buf */
1392 		if (!log_buf && buf[0])
1393 			pr_warn("-- BEGIN BTF LOAD LOG ---\n%s\n-- END BTF LOAD LOG --\n", buf);
1394 	}
1395 
1396 done:
1397 	free(buf);
1398 	return libbpf_err(err);
1399 }
1400 
1401 int btf__load_into_kernel(struct btf *btf)
1402 {
1403 	return btf_load_into_kernel(btf, NULL, 0, 0, 0);
1404 }
1405 
1406 int btf__fd(const struct btf *btf)
1407 {
1408 	return btf->fd;
1409 }
1410 
1411 void btf__set_fd(struct btf *btf, int fd)
1412 {
1413 	btf->fd = fd;
1414 }
1415 
1416 static const void *btf_strs_data(const struct btf *btf)
1417 {
1418 	return btf->strs_data ? btf->strs_data : strset__data(btf->strs_set);
1419 }
1420 
1421 static void *btf_get_raw_data(const struct btf *btf, __u32 *size, bool swap_endian)
1422 {
1423 	struct btf_header *hdr = btf->hdr;
1424 	struct btf_type *t;
1425 	void *data, *p;
1426 	__u32 data_sz;
1427 	int i;
1428 
1429 	data = swap_endian ? btf->raw_data_swapped : btf->raw_data;
1430 	if (data) {
1431 		*size = btf->raw_size;
1432 		return data;
1433 	}
1434 
1435 	data_sz = hdr->hdr_len + hdr->type_len + hdr->str_len;
1436 	data = calloc(1, data_sz);
1437 	if (!data)
1438 		return NULL;
1439 	p = data;
1440 
1441 	memcpy(p, hdr, hdr->hdr_len);
1442 	if (swap_endian)
1443 		btf_bswap_hdr(p);
1444 	p += hdr->hdr_len;
1445 
1446 	memcpy(p, btf->types_data, hdr->type_len);
1447 	if (swap_endian) {
1448 		for (i = 0; i < btf->nr_types; i++) {
1449 			t = p + btf->type_offs[i];
1450 			/* btf_bswap_type_rest() relies on native t->info, so
1451 			 * we swap base type info after we swapped all the
1452 			 * additional information
1453 			 */
1454 			if (btf_bswap_type_rest(t))
1455 				goto err_out;
1456 			btf_bswap_type_base(t);
1457 		}
1458 	}
1459 	p += hdr->type_len;
1460 
1461 	memcpy(p, btf_strs_data(btf), hdr->str_len);
1462 	p += hdr->str_len;
1463 
1464 	*size = data_sz;
1465 	return data;
1466 err_out:
1467 	free(data);
1468 	return NULL;
1469 }
1470 
1471 const void *btf__raw_data(const struct btf *btf_ro, __u32 *size)
1472 {
1473 	struct btf *btf = (struct btf *)btf_ro;
1474 	__u32 data_sz;
1475 	void *data;
1476 
1477 	data = btf_get_raw_data(btf, &data_sz, btf->swapped_endian);
1478 	if (!data)
1479 		return errno = ENOMEM, NULL;
1480 
1481 	btf->raw_size = data_sz;
1482 	if (btf->swapped_endian)
1483 		btf->raw_data_swapped = data;
1484 	else
1485 		btf->raw_data = data;
1486 	*size = data_sz;
1487 	return data;
1488 }
1489 
1490 __attribute__((alias("btf__raw_data")))
1491 const void *btf__get_raw_data(const struct btf *btf, __u32 *size);
1492 
1493 const char *btf__str_by_offset(const struct btf *btf, __u32 offset)
1494 {
1495 	if (offset < btf->start_str_off)
1496 		return btf__str_by_offset(btf->base_btf, offset);
1497 	else if (offset - btf->start_str_off < btf->hdr->str_len)
1498 		return btf_strs_data(btf) + (offset - btf->start_str_off);
1499 	else
1500 		return errno = EINVAL, NULL;
1501 }
1502 
1503 const char *btf__name_by_offset(const struct btf *btf, __u32 offset)
1504 {
1505 	return btf__str_by_offset(btf, offset);
1506 }
1507 
1508 struct btf *btf_get_from_fd(int btf_fd, struct btf *base_btf)
1509 {
1510 	struct bpf_btf_info btf_info;
1511 	__u32 len = sizeof(btf_info);
1512 	__u32 last_size;
1513 	struct btf *btf;
1514 	void *ptr;
1515 	int err;
1516 
1517 	/* we won't know btf_size until we call bpf_btf_get_info_by_fd(). so
1518 	 * let's start with a sane default - 4KiB here - and resize it only if
1519 	 * bpf_btf_get_info_by_fd() needs a bigger buffer.
1520 	 */
1521 	last_size = 4096;
1522 	ptr = malloc(last_size);
1523 	if (!ptr)
1524 		return ERR_PTR(-ENOMEM);
1525 
1526 	memset(&btf_info, 0, sizeof(btf_info));
1527 	btf_info.btf = ptr_to_u64(ptr);
1528 	btf_info.btf_size = last_size;
1529 	err = bpf_btf_get_info_by_fd(btf_fd, &btf_info, &len);
1530 
1531 	if (!err && btf_info.btf_size > last_size) {
1532 		void *temp_ptr;
1533 
1534 		last_size = btf_info.btf_size;
1535 		temp_ptr = realloc(ptr, last_size);
1536 		if (!temp_ptr) {
1537 			btf = ERR_PTR(-ENOMEM);
1538 			goto exit_free;
1539 		}
1540 		ptr = temp_ptr;
1541 
1542 		len = sizeof(btf_info);
1543 		memset(&btf_info, 0, sizeof(btf_info));
1544 		btf_info.btf = ptr_to_u64(ptr);
1545 		btf_info.btf_size = last_size;
1546 
1547 		err = bpf_btf_get_info_by_fd(btf_fd, &btf_info, &len);
1548 	}
1549 
1550 	if (err || btf_info.btf_size > last_size) {
1551 		btf = err ? ERR_PTR(-errno) : ERR_PTR(-E2BIG);
1552 		goto exit_free;
1553 	}
1554 
1555 	btf = btf_new(ptr, btf_info.btf_size, base_btf);
1556 
1557 exit_free:
1558 	free(ptr);
1559 	return btf;
1560 }
1561 
1562 struct btf *btf__load_from_kernel_by_id_split(__u32 id, struct btf *base_btf)
1563 {
1564 	struct btf *btf;
1565 	int btf_fd;
1566 
1567 	btf_fd = bpf_btf_get_fd_by_id(id);
1568 	if (btf_fd < 0)
1569 		return libbpf_err_ptr(-errno);
1570 
1571 	btf = btf_get_from_fd(btf_fd, base_btf);
1572 	close(btf_fd);
1573 
1574 	return libbpf_ptr(btf);
1575 }
1576 
1577 struct btf *btf__load_from_kernel_by_id(__u32 id)
1578 {
1579 	return btf__load_from_kernel_by_id_split(id, NULL);
1580 }
1581 
1582 static void btf_invalidate_raw_data(struct btf *btf)
1583 {
1584 	if (btf->raw_data) {
1585 		free(btf->raw_data);
1586 		btf->raw_data = NULL;
1587 	}
1588 	if (btf->raw_data_swapped) {
1589 		free(btf->raw_data_swapped);
1590 		btf->raw_data_swapped = NULL;
1591 	}
1592 }
1593 
1594 /* Ensure BTF is ready to be modified (by splitting into a three memory
1595  * regions for header, types, and strings). Also invalidate cached
1596  * raw_data, if any.
1597  */
1598 static int btf_ensure_modifiable(struct btf *btf)
1599 {
1600 	void *hdr, *types;
1601 	struct strset *set = NULL;
1602 	int err = -ENOMEM;
1603 
1604 	if (btf_is_modifiable(btf)) {
1605 		/* any BTF modification invalidates raw_data */
1606 		btf_invalidate_raw_data(btf);
1607 		return 0;
1608 	}
1609 
1610 	/* split raw data into three memory regions */
1611 	hdr = malloc(btf->hdr->hdr_len);
1612 	types = malloc(btf->hdr->type_len);
1613 	if (!hdr || !types)
1614 		goto err_out;
1615 
1616 	memcpy(hdr, btf->hdr, btf->hdr->hdr_len);
1617 	memcpy(types, btf->types_data, btf->hdr->type_len);
1618 
1619 	/* build lookup index for all strings */
1620 	set = strset__new(BTF_MAX_STR_OFFSET, btf->strs_data, btf->hdr->str_len);
1621 	if (IS_ERR(set)) {
1622 		err = PTR_ERR(set);
1623 		goto err_out;
1624 	}
1625 
1626 	/* only when everything was successful, update internal state */
1627 	btf->hdr = hdr;
1628 	btf->types_data = types;
1629 	btf->types_data_cap = btf->hdr->type_len;
1630 	btf->strs_data = NULL;
1631 	btf->strs_set = set;
1632 	/* if BTF was created from scratch, all strings are guaranteed to be
1633 	 * unique and deduplicated
1634 	 */
1635 	if (btf->hdr->str_len == 0)
1636 		btf->strs_deduped = true;
1637 	if (!btf->base_btf && btf->hdr->str_len == 1)
1638 		btf->strs_deduped = true;
1639 
1640 	/* invalidate raw_data representation */
1641 	btf_invalidate_raw_data(btf);
1642 
1643 	return 0;
1644 
1645 err_out:
1646 	strset__free(set);
1647 	free(hdr);
1648 	free(types);
1649 	return err;
1650 }
1651 
1652 /* Find an offset in BTF string section that corresponds to a given string *s*.
1653  * Returns:
1654  *   - >0 offset into string section, if string is found;
1655  *   - -ENOENT, if string is not in the string section;
1656  *   - <0, on any other error.
1657  */
1658 int btf__find_str(struct btf *btf, const char *s)
1659 {
1660 	int off;
1661 
1662 	if (btf->base_btf) {
1663 		off = btf__find_str(btf->base_btf, s);
1664 		if (off != -ENOENT)
1665 			return off;
1666 	}
1667 
1668 	/* BTF needs to be in a modifiable state to build string lookup index */
1669 	if (btf_ensure_modifiable(btf))
1670 		return libbpf_err(-ENOMEM);
1671 
1672 	off = strset__find_str(btf->strs_set, s);
1673 	if (off < 0)
1674 		return libbpf_err(off);
1675 
1676 	return btf->start_str_off + off;
1677 }
1678 
1679 /* Add a string s to the BTF string section.
1680  * Returns:
1681  *   - > 0 offset into string section, on success;
1682  *   - < 0, on error.
1683  */
1684 int btf__add_str(struct btf *btf, const char *s)
1685 {
1686 	int off;
1687 
1688 	if (btf->base_btf) {
1689 		off = btf__find_str(btf->base_btf, s);
1690 		if (off != -ENOENT)
1691 			return off;
1692 	}
1693 
1694 	if (btf_ensure_modifiable(btf))
1695 		return libbpf_err(-ENOMEM);
1696 
1697 	off = strset__add_str(btf->strs_set, s);
1698 	if (off < 0)
1699 		return libbpf_err(off);
1700 
1701 	btf->hdr->str_len = strset__data_size(btf->strs_set);
1702 
1703 	return btf->start_str_off + off;
1704 }
1705 
1706 static void *btf_add_type_mem(struct btf *btf, size_t add_sz)
1707 {
1708 	return libbpf_add_mem(&btf->types_data, &btf->types_data_cap, 1,
1709 			      btf->hdr->type_len, UINT_MAX, add_sz);
1710 }
1711 
1712 static void btf_type_inc_vlen(struct btf_type *t)
1713 {
1714 	t->info = btf_type_info(btf_kind(t), btf_vlen(t) + 1, btf_kflag(t));
1715 }
1716 
1717 static int btf_commit_type(struct btf *btf, int data_sz)
1718 {
1719 	int err;
1720 
1721 	err = btf_add_type_idx_entry(btf, btf->hdr->type_len);
1722 	if (err)
1723 		return libbpf_err(err);
1724 
1725 	btf->hdr->type_len += data_sz;
1726 	btf->hdr->str_off += data_sz;
1727 	btf->nr_types++;
1728 	return btf->start_id + btf->nr_types - 1;
1729 }
1730 
1731 struct btf_pipe {
1732 	const struct btf *src;
1733 	struct btf *dst;
1734 	struct hashmap *str_off_map; /* map string offsets from src to dst */
1735 };
1736 
1737 static int btf_rewrite_str(__u32 *str_off, void *ctx)
1738 {
1739 	struct btf_pipe *p = ctx;
1740 	long mapped_off;
1741 	int off, err;
1742 
1743 	if (!*str_off) /* nothing to do for empty strings */
1744 		return 0;
1745 
1746 	if (p->str_off_map &&
1747 	    hashmap__find(p->str_off_map, *str_off, &mapped_off)) {
1748 		*str_off = mapped_off;
1749 		return 0;
1750 	}
1751 
1752 	off = btf__add_str(p->dst, btf__str_by_offset(p->src, *str_off));
1753 	if (off < 0)
1754 		return off;
1755 
1756 	/* Remember string mapping from src to dst.  It avoids
1757 	 * performing expensive string comparisons.
1758 	 */
1759 	if (p->str_off_map) {
1760 		err = hashmap__append(p->str_off_map, *str_off, off);
1761 		if (err)
1762 			return err;
1763 	}
1764 
1765 	*str_off = off;
1766 	return 0;
1767 }
1768 
1769 int btf__add_type(struct btf *btf, const struct btf *src_btf, const struct btf_type *src_type)
1770 {
1771 	struct btf_pipe p = { .src = src_btf, .dst = btf };
1772 	struct btf_type *t;
1773 	int sz, err;
1774 
1775 	sz = btf_type_size(src_type);
1776 	if (sz < 0)
1777 		return libbpf_err(sz);
1778 
1779 	/* deconstruct BTF, if necessary, and invalidate raw_data */
1780 	if (btf_ensure_modifiable(btf))
1781 		return libbpf_err(-ENOMEM);
1782 
1783 	t = btf_add_type_mem(btf, sz);
1784 	if (!t)
1785 		return libbpf_err(-ENOMEM);
1786 
1787 	memcpy(t, src_type, sz);
1788 
1789 	err = btf_type_visit_str_offs(t, btf_rewrite_str, &p);
1790 	if (err)
1791 		return libbpf_err(err);
1792 
1793 	return btf_commit_type(btf, sz);
1794 }
1795 
1796 static int btf_rewrite_type_ids(__u32 *type_id, void *ctx)
1797 {
1798 	struct btf *btf = ctx;
1799 
1800 	if (!*type_id) /* nothing to do for VOID references */
1801 		return 0;
1802 
1803 	/* we haven't updated btf's type count yet, so
1804 	 * btf->start_id + btf->nr_types - 1 is the type ID offset we should
1805 	 * add to all newly added BTF types
1806 	 */
1807 	*type_id += btf->start_id + btf->nr_types - 1;
1808 	return 0;
1809 }
1810 
1811 static size_t btf_dedup_identity_hash_fn(long key, void *ctx);
1812 static bool btf_dedup_equal_fn(long k1, long k2, void *ctx);
1813 
1814 int btf__add_btf(struct btf *btf, const struct btf *src_btf)
1815 {
1816 	struct btf_pipe p = { .src = src_btf, .dst = btf };
1817 	int data_sz, sz, cnt, i, err, old_strs_len;
1818 	__u32 *off;
1819 	void *t;
1820 
1821 	/* appending split BTF isn't supported yet */
1822 	if (src_btf->base_btf)
1823 		return libbpf_err(-ENOTSUP);
1824 
1825 	/* deconstruct BTF, if necessary, and invalidate raw_data */
1826 	if (btf_ensure_modifiable(btf))
1827 		return libbpf_err(-ENOMEM);
1828 
1829 	/* remember original strings section size if we have to roll back
1830 	 * partial strings section changes
1831 	 */
1832 	old_strs_len = btf->hdr->str_len;
1833 
1834 	data_sz = src_btf->hdr->type_len;
1835 	cnt = btf__type_cnt(src_btf) - 1;
1836 
1837 	/* pre-allocate enough memory for new types */
1838 	t = btf_add_type_mem(btf, data_sz);
1839 	if (!t)
1840 		return libbpf_err(-ENOMEM);
1841 
1842 	/* pre-allocate enough memory for type offset index for new types */
1843 	off = btf_add_type_offs_mem(btf, cnt);
1844 	if (!off)
1845 		return libbpf_err(-ENOMEM);
1846 
1847 	/* Map the string offsets from src_btf to the offsets from btf to improve performance */
1848 	p.str_off_map = hashmap__new(btf_dedup_identity_hash_fn, btf_dedup_equal_fn, NULL);
1849 	if (IS_ERR(p.str_off_map))
1850 		return libbpf_err(-ENOMEM);
1851 
1852 	/* bulk copy types data for all types from src_btf */
1853 	memcpy(t, src_btf->types_data, data_sz);
1854 
1855 	for (i = 0; i < cnt; i++) {
1856 		sz = btf_type_size(t);
1857 		if (sz < 0) {
1858 			/* unlikely, has to be corrupted src_btf */
1859 			err = sz;
1860 			goto err_out;
1861 		}
1862 
1863 		/* fill out type ID to type offset mapping for lookups by type ID */
1864 		*off = t - btf->types_data;
1865 
1866 		/* add, dedup, and remap strings referenced by this BTF type */
1867 		err = btf_type_visit_str_offs(t, btf_rewrite_str, &p);
1868 		if (err)
1869 			goto err_out;
1870 
1871 		/* remap all type IDs referenced from this BTF type */
1872 		err = btf_type_visit_type_ids(t, btf_rewrite_type_ids, btf);
1873 		if (err)
1874 			goto err_out;
1875 
1876 		/* go to next type data and type offset index entry */
1877 		t += sz;
1878 		off++;
1879 	}
1880 
1881 	/* Up until now any of the copied type data was effectively invisible,
1882 	 * so if we exited early before this point due to error, BTF would be
1883 	 * effectively unmodified. There would be extra internal memory
1884 	 * pre-allocated, but it would not be available for querying.  But now
1885 	 * that we've copied and rewritten all the data successfully, we can
1886 	 * update type count and various internal offsets and sizes to
1887 	 * "commit" the changes and made them visible to the outside world.
1888 	 */
1889 	btf->hdr->type_len += data_sz;
1890 	btf->hdr->str_off += data_sz;
1891 	btf->nr_types += cnt;
1892 
1893 	hashmap__free(p.str_off_map);
1894 
1895 	/* return type ID of the first added BTF type */
1896 	return btf->start_id + btf->nr_types - cnt;
1897 err_out:
1898 	/* zero out preallocated memory as if it was just allocated with
1899 	 * libbpf_add_mem()
1900 	 */
1901 	memset(btf->types_data + btf->hdr->type_len, 0, data_sz);
1902 	memset(btf->strs_data + old_strs_len, 0, btf->hdr->str_len - old_strs_len);
1903 
1904 	/* and now restore original strings section size; types data size
1905 	 * wasn't modified, so doesn't need restoring, see big comment above
1906 	 */
1907 	btf->hdr->str_len = old_strs_len;
1908 
1909 	hashmap__free(p.str_off_map);
1910 
1911 	return libbpf_err(err);
1912 }
1913 
1914 /*
1915  * Append new BTF_KIND_INT type with:
1916  *   - *name* - non-empty, non-NULL type name;
1917  *   - *sz* - power-of-2 (1, 2, 4, ..) size of the type, in bytes;
1918  *   - encoding is a combination of BTF_INT_SIGNED, BTF_INT_CHAR, BTF_INT_BOOL.
1919  * Returns:
1920  *   - >0, type ID of newly added BTF type;
1921  *   - <0, on error.
1922  */
1923 int btf__add_int(struct btf *btf, const char *name, size_t byte_sz, int encoding)
1924 {
1925 	struct btf_type *t;
1926 	int sz, name_off;
1927 
1928 	/* non-empty name */
1929 	if (!name || !name[0])
1930 		return libbpf_err(-EINVAL);
1931 	/* byte_sz must be power of 2 */
1932 	if (!byte_sz || (byte_sz & (byte_sz - 1)) || byte_sz > 16)
1933 		return libbpf_err(-EINVAL);
1934 	if (encoding & ~(BTF_INT_SIGNED | BTF_INT_CHAR | BTF_INT_BOOL))
1935 		return libbpf_err(-EINVAL);
1936 
1937 	/* deconstruct BTF, if necessary, and invalidate raw_data */
1938 	if (btf_ensure_modifiable(btf))
1939 		return libbpf_err(-ENOMEM);
1940 
1941 	sz = sizeof(struct btf_type) + sizeof(int);
1942 	t = btf_add_type_mem(btf, sz);
1943 	if (!t)
1944 		return libbpf_err(-ENOMEM);
1945 
1946 	/* if something goes wrong later, we might end up with an extra string,
1947 	 * but that shouldn't be a problem, because BTF can't be constructed
1948 	 * completely anyway and will most probably be just discarded
1949 	 */
1950 	name_off = btf__add_str(btf, name);
1951 	if (name_off < 0)
1952 		return name_off;
1953 
1954 	t->name_off = name_off;
1955 	t->info = btf_type_info(BTF_KIND_INT, 0, 0);
1956 	t->size = byte_sz;
1957 	/* set INT info, we don't allow setting legacy bit offset/size */
1958 	*(__u32 *)(t + 1) = (encoding << 24) | (byte_sz * 8);
1959 
1960 	return btf_commit_type(btf, sz);
1961 }
1962 
1963 /*
1964  * Append new BTF_KIND_FLOAT type with:
1965  *   - *name* - non-empty, non-NULL type name;
1966  *   - *sz* - size of the type, in bytes;
1967  * Returns:
1968  *   - >0, type ID of newly added BTF type;
1969  *   - <0, on error.
1970  */
1971 int btf__add_float(struct btf *btf, const char *name, size_t byte_sz)
1972 {
1973 	struct btf_type *t;
1974 	int sz, name_off;
1975 
1976 	/* non-empty name */
1977 	if (!name || !name[0])
1978 		return libbpf_err(-EINVAL);
1979 
1980 	/* byte_sz must be one of the explicitly allowed values */
1981 	if (byte_sz != 2 && byte_sz != 4 && byte_sz != 8 && byte_sz != 12 &&
1982 	    byte_sz != 16)
1983 		return libbpf_err(-EINVAL);
1984 
1985 	if (btf_ensure_modifiable(btf))
1986 		return libbpf_err(-ENOMEM);
1987 
1988 	sz = sizeof(struct btf_type);
1989 	t = btf_add_type_mem(btf, sz);
1990 	if (!t)
1991 		return libbpf_err(-ENOMEM);
1992 
1993 	name_off = btf__add_str(btf, name);
1994 	if (name_off < 0)
1995 		return name_off;
1996 
1997 	t->name_off = name_off;
1998 	t->info = btf_type_info(BTF_KIND_FLOAT, 0, 0);
1999 	t->size = byte_sz;
2000 
2001 	return btf_commit_type(btf, sz);
2002 }
2003 
2004 /* it's completely legal to append BTF types with type IDs pointing forward to
2005  * types that haven't been appended yet, so we only make sure that id looks
2006  * sane, we can't guarantee that ID will always be valid
2007  */
2008 static int validate_type_id(int id)
2009 {
2010 	if (id < 0 || id > BTF_MAX_NR_TYPES)
2011 		return -EINVAL;
2012 	return 0;
2013 }
2014 
2015 /* generic append function for PTR, TYPEDEF, CONST/VOLATILE/RESTRICT */
2016 static int btf_add_ref_kind(struct btf *btf, int kind, const char *name, int ref_type_id)
2017 {
2018 	struct btf_type *t;
2019 	int sz, name_off = 0;
2020 
2021 	if (validate_type_id(ref_type_id))
2022 		return libbpf_err(-EINVAL);
2023 
2024 	if (btf_ensure_modifiable(btf))
2025 		return libbpf_err(-ENOMEM);
2026 
2027 	sz = sizeof(struct btf_type);
2028 	t = btf_add_type_mem(btf, sz);
2029 	if (!t)
2030 		return libbpf_err(-ENOMEM);
2031 
2032 	if (name && name[0]) {
2033 		name_off = btf__add_str(btf, name);
2034 		if (name_off < 0)
2035 			return name_off;
2036 	}
2037 
2038 	t->name_off = name_off;
2039 	t->info = btf_type_info(kind, 0, 0);
2040 	t->type = ref_type_id;
2041 
2042 	return btf_commit_type(btf, sz);
2043 }
2044 
2045 /*
2046  * Append new BTF_KIND_PTR type with:
2047  *   - *ref_type_id* - referenced type ID, it might not exist yet;
2048  * Returns:
2049  *   - >0, type ID of newly added BTF type;
2050  *   - <0, on error.
2051  */
2052 int btf__add_ptr(struct btf *btf, int ref_type_id)
2053 {
2054 	return btf_add_ref_kind(btf, BTF_KIND_PTR, NULL, ref_type_id);
2055 }
2056 
2057 /*
2058  * Append new BTF_KIND_ARRAY type with:
2059  *   - *index_type_id* - type ID of the type describing array index;
2060  *   - *elem_type_id* - type ID of the type describing array element;
2061  *   - *nr_elems* - the size of the array;
2062  * Returns:
2063  *   - >0, type ID of newly added BTF type;
2064  *   - <0, on error.
2065  */
2066 int btf__add_array(struct btf *btf, int index_type_id, int elem_type_id, __u32 nr_elems)
2067 {
2068 	struct btf_type *t;
2069 	struct btf_array *a;
2070 	int sz;
2071 
2072 	if (validate_type_id(index_type_id) || validate_type_id(elem_type_id))
2073 		return libbpf_err(-EINVAL);
2074 
2075 	if (btf_ensure_modifiable(btf))
2076 		return libbpf_err(-ENOMEM);
2077 
2078 	sz = sizeof(struct btf_type) + sizeof(struct btf_array);
2079 	t = btf_add_type_mem(btf, sz);
2080 	if (!t)
2081 		return libbpf_err(-ENOMEM);
2082 
2083 	t->name_off = 0;
2084 	t->info = btf_type_info(BTF_KIND_ARRAY, 0, 0);
2085 	t->size = 0;
2086 
2087 	a = btf_array(t);
2088 	a->type = elem_type_id;
2089 	a->index_type = index_type_id;
2090 	a->nelems = nr_elems;
2091 
2092 	return btf_commit_type(btf, sz);
2093 }
2094 
2095 /* generic STRUCT/UNION append function */
2096 static int btf_add_composite(struct btf *btf, int kind, const char *name, __u32 bytes_sz)
2097 {
2098 	struct btf_type *t;
2099 	int sz, name_off = 0;
2100 
2101 	if (btf_ensure_modifiable(btf))
2102 		return libbpf_err(-ENOMEM);
2103 
2104 	sz = sizeof(struct btf_type);
2105 	t = btf_add_type_mem(btf, sz);
2106 	if (!t)
2107 		return libbpf_err(-ENOMEM);
2108 
2109 	if (name && name[0]) {
2110 		name_off = btf__add_str(btf, name);
2111 		if (name_off < 0)
2112 			return name_off;
2113 	}
2114 
2115 	/* start out with vlen=0 and no kflag; this will be adjusted when
2116 	 * adding each member
2117 	 */
2118 	t->name_off = name_off;
2119 	t->info = btf_type_info(kind, 0, 0);
2120 	t->size = bytes_sz;
2121 
2122 	return btf_commit_type(btf, sz);
2123 }
2124 
2125 /*
2126  * Append new BTF_KIND_STRUCT type with:
2127  *   - *name* - name of the struct, can be NULL or empty for anonymous structs;
2128  *   - *byte_sz* - size of the struct, in bytes;
2129  *
2130  * Struct initially has no fields in it. Fields can be added by
2131  * btf__add_field() right after btf__add_struct() succeeds.
2132  *
2133  * Returns:
2134  *   - >0, type ID of newly added BTF type;
2135  *   - <0, on error.
2136  */
2137 int btf__add_struct(struct btf *btf, const char *name, __u32 byte_sz)
2138 {
2139 	return btf_add_composite(btf, BTF_KIND_STRUCT, name, byte_sz);
2140 }
2141 
2142 /*
2143  * Append new BTF_KIND_UNION type with:
2144  *   - *name* - name of the union, can be NULL or empty for anonymous union;
2145  *   - *byte_sz* - size of the union, in bytes;
2146  *
2147  * Union initially has no fields in it. Fields can be added by
2148  * btf__add_field() right after btf__add_union() succeeds. All fields
2149  * should have *bit_offset* of 0.
2150  *
2151  * Returns:
2152  *   - >0, type ID of newly added BTF type;
2153  *   - <0, on error.
2154  */
2155 int btf__add_union(struct btf *btf, const char *name, __u32 byte_sz)
2156 {
2157 	return btf_add_composite(btf, BTF_KIND_UNION, name, byte_sz);
2158 }
2159 
2160 static struct btf_type *btf_last_type(struct btf *btf)
2161 {
2162 	return btf_type_by_id(btf, btf__type_cnt(btf) - 1);
2163 }
2164 
2165 /*
2166  * Append new field for the current STRUCT/UNION type with:
2167  *   - *name* - name of the field, can be NULL or empty for anonymous field;
2168  *   - *type_id* - type ID for the type describing field type;
2169  *   - *bit_offset* - bit offset of the start of the field within struct/union;
2170  *   - *bit_size* - bit size of a bitfield, 0 for non-bitfield fields;
2171  * Returns:
2172  *   -  0, on success;
2173  *   - <0, on error.
2174  */
2175 int btf__add_field(struct btf *btf, const char *name, int type_id,
2176 		   __u32 bit_offset, __u32 bit_size)
2177 {
2178 	struct btf_type *t;
2179 	struct btf_member *m;
2180 	bool is_bitfield;
2181 	int sz, name_off = 0;
2182 
2183 	/* last type should be union/struct */
2184 	if (btf->nr_types == 0)
2185 		return libbpf_err(-EINVAL);
2186 	t = btf_last_type(btf);
2187 	if (!btf_is_composite(t))
2188 		return libbpf_err(-EINVAL);
2189 
2190 	if (validate_type_id(type_id))
2191 		return libbpf_err(-EINVAL);
2192 	/* best-effort bit field offset/size enforcement */
2193 	is_bitfield = bit_size || (bit_offset % 8 != 0);
2194 	if (is_bitfield && (bit_size == 0 || bit_size > 255 || bit_offset > 0xffffff))
2195 		return libbpf_err(-EINVAL);
2196 
2197 	/* only offset 0 is allowed for unions */
2198 	if (btf_is_union(t) && bit_offset)
2199 		return libbpf_err(-EINVAL);
2200 
2201 	/* decompose and invalidate raw data */
2202 	if (btf_ensure_modifiable(btf))
2203 		return libbpf_err(-ENOMEM);
2204 
2205 	sz = sizeof(struct btf_member);
2206 	m = btf_add_type_mem(btf, sz);
2207 	if (!m)
2208 		return libbpf_err(-ENOMEM);
2209 
2210 	if (name && name[0]) {
2211 		name_off = btf__add_str(btf, name);
2212 		if (name_off < 0)
2213 			return name_off;
2214 	}
2215 
2216 	m->name_off = name_off;
2217 	m->type = type_id;
2218 	m->offset = bit_offset | (bit_size << 24);
2219 
2220 	/* btf_add_type_mem can invalidate t pointer */
2221 	t = btf_last_type(btf);
2222 	/* update parent type's vlen and kflag */
2223 	t->info = btf_type_info(btf_kind(t), btf_vlen(t) + 1, is_bitfield || btf_kflag(t));
2224 
2225 	btf->hdr->type_len += sz;
2226 	btf->hdr->str_off += sz;
2227 	return 0;
2228 }
2229 
2230 static int btf_add_enum_common(struct btf *btf, const char *name, __u32 byte_sz,
2231 			       bool is_signed, __u8 kind)
2232 {
2233 	struct btf_type *t;
2234 	int sz, name_off = 0;
2235 
2236 	/* byte_sz must be power of 2 */
2237 	if (!byte_sz || (byte_sz & (byte_sz - 1)) || byte_sz > 8)
2238 		return libbpf_err(-EINVAL);
2239 
2240 	if (btf_ensure_modifiable(btf))
2241 		return libbpf_err(-ENOMEM);
2242 
2243 	sz = sizeof(struct btf_type);
2244 	t = btf_add_type_mem(btf, sz);
2245 	if (!t)
2246 		return libbpf_err(-ENOMEM);
2247 
2248 	if (name && name[0]) {
2249 		name_off = btf__add_str(btf, name);
2250 		if (name_off < 0)
2251 			return name_off;
2252 	}
2253 
2254 	/* start out with vlen=0; it will be adjusted when adding enum values */
2255 	t->name_off = name_off;
2256 	t->info = btf_type_info(kind, 0, is_signed);
2257 	t->size = byte_sz;
2258 
2259 	return btf_commit_type(btf, sz);
2260 }
2261 
2262 /*
2263  * Append new BTF_KIND_ENUM type with:
2264  *   - *name* - name of the enum, can be NULL or empty for anonymous enums;
2265  *   - *byte_sz* - size of the enum, in bytes.
2266  *
2267  * Enum initially has no enum values in it (and corresponds to enum forward
2268  * declaration). Enumerator values can be added by btf__add_enum_value()
2269  * immediately after btf__add_enum() succeeds.
2270  *
2271  * Returns:
2272  *   - >0, type ID of newly added BTF type;
2273  *   - <0, on error.
2274  */
2275 int btf__add_enum(struct btf *btf, const char *name, __u32 byte_sz)
2276 {
2277 	/*
2278 	 * set the signedness to be unsigned, it will change to signed
2279 	 * if any later enumerator is negative.
2280 	 */
2281 	return btf_add_enum_common(btf, name, byte_sz, false, BTF_KIND_ENUM);
2282 }
2283 
2284 /*
2285  * Append new enum value for the current ENUM type with:
2286  *   - *name* - name of the enumerator value, can't be NULL or empty;
2287  *   - *value* - integer value corresponding to enum value *name*;
2288  * Returns:
2289  *   -  0, on success;
2290  *   - <0, on error.
2291  */
2292 int btf__add_enum_value(struct btf *btf, const char *name, __s64 value)
2293 {
2294 	struct btf_type *t;
2295 	struct btf_enum *v;
2296 	int sz, name_off;
2297 
2298 	/* last type should be BTF_KIND_ENUM */
2299 	if (btf->nr_types == 0)
2300 		return libbpf_err(-EINVAL);
2301 	t = btf_last_type(btf);
2302 	if (!btf_is_enum(t))
2303 		return libbpf_err(-EINVAL);
2304 
2305 	/* non-empty name */
2306 	if (!name || !name[0])
2307 		return libbpf_err(-EINVAL);
2308 	if (value < INT_MIN || value > UINT_MAX)
2309 		return libbpf_err(-E2BIG);
2310 
2311 	/* decompose and invalidate raw data */
2312 	if (btf_ensure_modifiable(btf))
2313 		return libbpf_err(-ENOMEM);
2314 
2315 	sz = sizeof(struct btf_enum);
2316 	v = btf_add_type_mem(btf, sz);
2317 	if (!v)
2318 		return libbpf_err(-ENOMEM);
2319 
2320 	name_off = btf__add_str(btf, name);
2321 	if (name_off < 0)
2322 		return name_off;
2323 
2324 	v->name_off = name_off;
2325 	v->val = value;
2326 
2327 	/* update parent type's vlen */
2328 	t = btf_last_type(btf);
2329 	btf_type_inc_vlen(t);
2330 
2331 	/* if negative value, set signedness to signed */
2332 	if (value < 0)
2333 		t->info = btf_type_info(btf_kind(t), btf_vlen(t), true);
2334 
2335 	btf->hdr->type_len += sz;
2336 	btf->hdr->str_off += sz;
2337 	return 0;
2338 }
2339 
2340 /*
2341  * Append new BTF_KIND_ENUM64 type with:
2342  *   - *name* - name of the enum, can be NULL or empty for anonymous enums;
2343  *   - *byte_sz* - size of the enum, in bytes.
2344  *   - *is_signed* - whether the enum values are signed or not;
2345  *
2346  * Enum initially has no enum values in it (and corresponds to enum forward
2347  * declaration). Enumerator values can be added by btf__add_enum64_value()
2348  * immediately after btf__add_enum64() succeeds.
2349  *
2350  * Returns:
2351  *   - >0, type ID of newly added BTF type;
2352  *   - <0, on error.
2353  */
2354 int btf__add_enum64(struct btf *btf, const char *name, __u32 byte_sz,
2355 		    bool is_signed)
2356 {
2357 	return btf_add_enum_common(btf, name, byte_sz, is_signed,
2358 				   BTF_KIND_ENUM64);
2359 }
2360 
2361 /*
2362  * Append new enum value for the current ENUM64 type with:
2363  *   - *name* - name of the enumerator value, can't be NULL or empty;
2364  *   - *value* - integer value corresponding to enum value *name*;
2365  * Returns:
2366  *   -  0, on success;
2367  *   - <0, on error.
2368  */
2369 int btf__add_enum64_value(struct btf *btf, const char *name, __u64 value)
2370 {
2371 	struct btf_enum64 *v;
2372 	struct btf_type *t;
2373 	int sz, name_off;
2374 
2375 	/* last type should be BTF_KIND_ENUM64 */
2376 	if (btf->nr_types == 0)
2377 		return libbpf_err(-EINVAL);
2378 	t = btf_last_type(btf);
2379 	if (!btf_is_enum64(t))
2380 		return libbpf_err(-EINVAL);
2381 
2382 	/* non-empty name */
2383 	if (!name || !name[0])
2384 		return libbpf_err(-EINVAL);
2385 
2386 	/* decompose and invalidate raw data */
2387 	if (btf_ensure_modifiable(btf))
2388 		return libbpf_err(-ENOMEM);
2389 
2390 	sz = sizeof(struct btf_enum64);
2391 	v = btf_add_type_mem(btf, sz);
2392 	if (!v)
2393 		return libbpf_err(-ENOMEM);
2394 
2395 	name_off = btf__add_str(btf, name);
2396 	if (name_off < 0)
2397 		return name_off;
2398 
2399 	v->name_off = name_off;
2400 	v->val_lo32 = (__u32)value;
2401 	v->val_hi32 = value >> 32;
2402 
2403 	/* update parent type's vlen */
2404 	t = btf_last_type(btf);
2405 	btf_type_inc_vlen(t);
2406 
2407 	btf->hdr->type_len += sz;
2408 	btf->hdr->str_off += sz;
2409 	return 0;
2410 }
2411 
2412 /*
2413  * Append new BTF_KIND_FWD type with:
2414  *   - *name*, non-empty/non-NULL name;
2415  *   - *fwd_kind*, kind of forward declaration, one of BTF_FWD_STRUCT,
2416  *     BTF_FWD_UNION, or BTF_FWD_ENUM;
2417  * Returns:
2418  *   - >0, type ID of newly added BTF type;
2419  *   - <0, on error.
2420  */
2421 int btf__add_fwd(struct btf *btf, const char *name, enum btf_fwd_kind fwd_kind)
2422 {
2423 	if (!name || !name[0])
2424 		return libbpf_err(-EINVAL);
2425 
2426 	switch (fwd_kind) {
2427 	case BTF_FWD_STRUCT:
2428 	case BTF_FWD_UNION: {
2429 		struct btf_type *t;
2430 		int id;
2431 
2432 		id = btf_add_ref_kind(btf, BTF_KIND_FWD, name, 0);
2433 		if (id <= 0)
2434 			return id;
2435 		t = btf_type_by_id(btf, id);
2436 		t->info = btf_type_info(BTF_KIND_FWD, 0, fwd_kind == BTF_FWD_UNION);
2437 		return id;
2438 	}
2439 	case BTF_FWD_ENUM:
2440 		/* enum forward in BTF currently is just an enum with no enum
2441 		 * values; we also assume a standard 4-byte size for it
2442 		 */
2443 		return btf__add_enum(btf, name, sizeof(int));
2444 	default:
2445 		return libbpf_err(-EINVAL);
2446 	}
2447 }
2448 
2449 /*
2450  * Append new BTF_KING_TYPEDEF type with:
2451  *   - *name*, non-empty/non-NULL name;
2452  *   - *ref_type_id* - referenced type ID, it might not exist yet;
2453  * Returns:
2454  *   - >0, type ID of newly added BTF type;
2455  *   - <0, on error.
2456  */
2457 int btf__add_typedef(struct btf *btf, const char *name, int ref_type_id)
2458 {
2459 	if (!name || !name[0])
2460 		return libbpf_err(-EINVAL);
2461 
2462 	return btf_add_ref_kind(btf, BTF_KIND_TYPEDEF, name, ref_type_id);
2463 }
2464 
2465 /*
2466  * Append new BTF_KIND_VOLATILE type with:
2467  *   - *ref_type_id* - referenced type ID, it might not exist yet;
2468  * Returns:
2469  *   - >0, type ID of newly added BTF type;
2470  *   - <0, on error.
2471  */
2472 int btf__add_volatile(struct btf *btf, int ref_type_id)
2473 {
2474 	return btf_add_ref_kind(btf, BTF_KIND_VOLATILE, NULL, ref_type_id);
2475 }
2476 
2477 /*
2478  * Append new BTF_KIND_CONST type with:
2479  *   - *ref_type_id* - referenced type ID, it might not exist yet;
2480  * Returns:
2481  *   - >0, type ID of newly added BTF type;
2482  *   - <0, on error.
2483  */
2484 int btf__add_const(struct btf *btf, int ref_type_id)
2485 {
2486 	return btf_add_ref_kind(btf, BTF_KIND_CONST, NULL, ref_type_id);
2487 }
2488 
2489 /*
2490  * Append new BTF_KIND_RESTRICT type with:
2491  *   - *ref_type_id* - referenced type ID, it might not exist yet;
2492  * Returns:
2493  *   - >0, type ID of newly added BTF type;
2494  *   - <0, on error.
2495  */
2496 int btf__add_restrict(struct btf *btf, int ref_type_id)
2497 {
2498 	return btf_add_ref_kind(btf, BTF_KIND_RESTRICT, NULL, ref_type_id);
2499 }
2500 
2501 /*
2502  * Append new BTF_KIND_TYPE_TAG type with:
2503  *   - *value*, non-empty/non-NULL tag value;
2504  *   - *ref_type_id* - referenced type ID, it might not exist yet;
2505  * Returns:
2506  *   - >0, type ID of newly added BTF type;
2507  *   - <0, on error.
2508  */
2509 int btf__add_type_tag(struct btf *btf, const char *value, int ref_type_id)
2510 {
2511 	if (!value || !value[0])
2512 		return libbpf_err(-EINVAL);
2513 
2514 	return btf_add_ref_kind(btf, BTF_KIND_TYPE_TAG, value, ref_type_id);
2515 }
2516 
2517 /*
2518  * Append new BTF_KIND_FUNC type with:
2519  *   - *name*, non-empty/non-NULL name;
2520  *   - *proto_type_id* - FUNC_PROTO's type ID, it might not exist yet;
2521  * Returns:
2522  *   - >0, type ID of newly added BTF type;
2523  *   - <0, on error.
2524  */
2525 int btf__add_func(struct btf *btf, const char *name,
2526 		  enum btf_func_linkage linkage, int proto_type_id)
2527 {
2528 	int id;
2529 
2530 	if (!name || !name[0])
2531 		return libbpf_err(-EINVAL);
2532 	if (linkage != BTF_FUNC_STATIC && linkage != BTF_FUNC_GLOBAL &&
2533 	    linkage != BTF_FUNC_EXTERN)
2534 		return libbpf_err(-EINVAL);
2535 
2536 	id = btf_add_ref_kind(btf, BTF_KIND_FUNC, name, proto_type_id);
2537 	if (id > 0) {
2538 		struct btf_type *t = btf_type_by_id(btf, id);
2539 
2540 		t->info = btf_type_info(BTF_KIND_FUNC, linkage, 0);
2541 	}
2542 	return libbpf_err(id);
2543 }
2544 
2545 /*
2546  * Append new BTF_KIND_FUNC_PROTO with:
2547  *   - *ret_type_id* - type ID for return result of a function.
2548  *
2549  * Function prototype initially has no arguments, but they can be added by
2550  * btf__add_func_param() one by one, immediately after
2551  * btf__add_func_proto() succeeded.
2552  *
2553  * Returns:
2554  *   - >0, type ID of newly added BTF type;
2555  *   - <0, on error.
2556  */
2557 int btf__add_func_proto(struct btf *btf, int ret_type_id)
2558 {
2559 	struct btf_type *t;
2560 	int sz;
2561 
2562 	if (validate_type_id(ret_type_id))
2563 		return libbpf_err(-EINVAL);
2564 
2565 	if (btf_ensure_modifiable(btf))
2566 		return libbpf_err(-ENOMEM);
2567 
2568 	sz = sizeof(struct btf_type);
2569 	t = btf_add_type_mem(btf, sz);
2570 	if (!t)
2571 		return libbpf_err(-ENOMEM);
2572 
2573 	/* start out with vlen=0; this will be adjusted when adding enum
2574 	 * values, if necessary
2575 	 */
2576 	t->name_off = 0;
2577 	t->info = btf_type_info(BTF_KIND_FUNC_PROTO, 0, 0);
2578 	t->type = ret_type_id;
2579 
2580 	return btf_commit_type(btf, sz);
2581 }
2582 
2583 /*
2584  * Append new function parameter for current FUNC_PROTO type with:
2585  *   - *name* - parameter name, can be NULL or empty;
2586  *   - *type_id* - type ID describing the type of the parameter.
2587  * Returns:
2588  *   -  0, on success;
2589  *   - <0, on error.
2590  */
2591 int btf__add_func_param(struct btf *btf, const char *name, int type_id)
2592 {
2593 	struct btf_type *t;
2594 	struct btf_param *p;
2595 	int sz, name_off = 0;
2596 
2597 	if (validate_type_id(type_id))
2598 		return libbpf_err(-EINVAL);
2599 
2600 	/* last type should be BTF_KIND_FUNC_PROTO */
2601 	if (btf->nr_types == 0)
2602 		return libbpf_err(-EINVAL);
2603 	t = btf_last_type(btf);
2604 	if (!btf_is_func_proto(t))
2605 		return libbpf_err(-EINVAL);
2606 
2607 	/* decompose and invalidate raw data */
2608 	if (btf_ensure_modifiable(btf))
2609 		return libbpf_err(-ENOMEM);
2610 
2611 	sz = sizeof(struct btf_param);
2612 	p = btf_add_type_mem(btf, sz);
2613 	if (!p)
2614 		return libbpf_err(-ENOMEM);
2615 
2616 	if (name && name[0]) {
2617 		name_off = btf__add_str(btf, name);
2618 		if (name_off < 0)
2619 			return name_off;
2620 	}
2621 
2622 	p->name_off = name_off;
2623 	p->type = type_id;
2624 
2625 	/* update parent type's vlen */
2626 	t = btf_last_type(btf);
2627 	btf_type_inc_vlen(t);
2628 
2629 	btf->hdr->type_len += sz;
2630 	btf->hdr->str_off += sz;
2631 	return 0;
2632 }
2633 
2634 /*
2635  * Append new BTF_KIND_VAR type with:
2636  *   - *name* - non-empty/non-NULL name;
2637  *   - *linkage* - variable linkage, one of BTF_VAR_STATIC,
2638  *     BTF_VAR_GLOBAL_ALLOCATED, or BTF_VAR_GLOBAL_EXTERN;
2639  *   - *type_id* - type ID of the type describing the type of the variable.
2640  * Returns:
2641  *   - >0, type ID of newly added BTF type;
2642  *   - <0, on error.
2643  */
2644 int btf__add_var(struct btf *btf, const char *name, int linkage, int type_id)
2645 {
2646 	struct btf_type *t;
2647 	struct btf_var *v;
2648 	int sz, name_off;
2649 
2650 	/* non-empty name */
2651 	if (!name || !name[0])
2652 		return libbpf_err(-EINVAL);
2653 	if (linkage != BTF_VAR_STATIC && linkage != BTF_VAR_GLOBAL_ALLOCATED &&
2654 	    linkage != BTF_VAR_GLOBAL_EXTERN)
2655 		return libbpf_err(-EINVAL);
2656 	if (validate_type_id(type_id))
2657 		return libbpf_err(-EINVAL);
2658 
2659 	/* deconstruct BTF, if necessary, and invalidate raw_data */
2660 	if (btf_ensure_modifiable(btf))
2661 		return libbpf_err(-ENOMEM);
2662 
2663 	sz = sizeof(struct btf_type) + sizeof(struct btf_var);
2664 	t = btf_add_type_mem(btf, sz);
2665 	if (!t)
2666 		return libbpf_err(-ENOMEM);
2667 
2668 	name_off = btf__add_str(btf, name);
2669 	if (name_off < 0)
2670 		return name_off;
2671 
2672 	t->name_off = name_off;
2673 	t->info = btf_type_info(BTF_KIND_VAR, 0, 0);
2674 	t->type = type_id;
2675 
2676 	v = btf_var(t);
2677 	v->linkage = linkage;
2678 
2679 	return btf_commit_type(btf, sz);
2680 }
2681 
2682 /*
2683  * Append new BTF_KIND_DATASEC type with:
2684  *   - *name* - non-empty/non-NULL name;
2685  *   - *byte_sz* - data section size, in bytes.
2686  *
2687  * Data section is initially empty. Variables info can be added with
2688  * btf__add_datasec_var_info() calls, after btf__add_datasec() succeeds.
2689  *
2690  * Returns:
2691  *   - >0, type ID of newly added BTF type;
2692  *   - <0, on error.
2693  */
2694 int btf__add_datasec(struct btf *btf, const char *name, __u32 byte_sz)
2695 {
2696 	struct btf_type *t;
2697 	int sz, name_off;
2698 
2699 	/* non-empty name */
2700 	if (!name || !name[0])
2701 		return libbpf_err(-EINVAL);
2702 
2703 	if (btf_ensure_modifiable(btf))
2704 		return libbpf_err(-ENOMEM);
2705 
2706 	sz = sizeof(struct btf_type);
2707 	t = btf_add_type_mem(btf, sz);
2708 	if (!t)
2709 		return libbpf_err(-ENOMEM);
2710 
2711 	name_off = btf__add_str(btf, name);
2712 	if (name_off < 0)
2713 		return name_off;
2714 
2715 	/* start with vlen=0, which will be update as var_secinfos are added */
2716 	t->name_off = name_off;
2717 	t->info = btf_type_info(BTF_KIND_DATASEC, 0, 0);
2718 	t->size = byte_sz;
2719 
2720 	return btf_commit_type(btf, sz);
2721 }
2722 
2723 /*
2724  * Append new data section variable information entry for current DATASEC type:
2725  *   - *var_type_id* - type ID, describing type of the variable;
2726  *   - *offset* - variable offset within data section, in bytes;
2727  *   - *byte_sz* - variable size, in bytes.
2728  *
2729  * Returns:
2730  *   -  0, on success;
2731  *   - <0, on error.
2732  */
2733 int btf__add_datasec_var_info(struct btf *btf, int var_type_id, __u32 offset, __u32 byte_sz)
2734 {
2735 	struct btf_type *t;
2736 	struct btf_var_secinfo *v;
2737 	int sz;
2738 
2739 	/* last type should be BTF_KIND_DATASEC */
2740 	if (btf->nr_types == 0)
2741 		return libbpf_err(-EINVAL);
2742 	t = btf_last_type(btf);
2743 	if (!btf_is_datasec(t))
2744 		return libbpf_err(-EINVAL);
2745 
2746 	if (validate_type_id(var_type_id))
2747 		return libbpf_err(-EINVAL);
2748 
2749 	/* decompose and invalidate raw data */
2750 	if (btf_ensure_modifiable(btf))
2751 		return libbpf_err(-ENOMEM);
2752 
2753 	sz = sizeof(struct btf_var_secinfo);
2754 	v = btf_add_type_mem(btf, sz);
2755 	if (!v)
2756 		return libbpf_err(-ENOMEM);
2757 
2758 	v->type = var_type_id;
2759 	v->offset = offset;
2760 	v->size = byte_sz;
2761 
2762 	/* update parent type's vlen */
2763 	t = btf_last_type(btf);
2764 	btf_type_inc_vlen(t);
2765 
2766 	btf->hdr->type_len += sz;
2767 	btf->hdr->str_off += sz;
2768 	return 0;
2769 }
2770 
2771 /*
2772  * Append new BTF_KIND_DECL_TAG type with:
2773  *   - *value* - non-empty/non-NULL string;
2774  *   - *ref_type_id* - referenced type ID, it might not exist yet;
2775  *   - *component_idx* - -1 for tagging reference type, otherwise struct/union
2776  *     member or function argument index;
2777  * Returns:
2778  *   - >0, type ID of newly added BTF type;
2779  *   - <0, on error.
2780  */
2781 int btf__add_decl_tag(struct btf *btf, const char *value, int ref_type_id,
2782 		 int component_idx)
2783 {
2784 	struct btf_type *t;
2785 	int sz, value_off;
2786 
2787 	if (!value || !value[0] || component_idx < -1)
2788 		return libbpf_err(-EINVAL);
2789 
2790 	if (validate_type_id(ref_type_id))
2791 		return libbpf_err(-EINVAL);
2792 
2793 	if (btf_ensure_modifiable(btf))
2794 		return libbpf_err(-ENOMEM);
2795 
2796 	sz = sizeof(struct btf_type) + sizeof(struct btf_decl_tag);
2797 	t = btf_add_type_mem(btf, sz);
2798 	if (!t)
2799 		return libbpf_err(-ENOMEM);
2800 
2801 	value_off = btf__add_str(btf, value);
2802 	if (value_off < 0)
2803 		return value_off;
2804 
2805 	t->name_off = value_off;
2806 	t->info = btf_type_info(BTF_KIND_DECL_TAG, 0, false);
2807 	t->type = ref_type_id;
2808 	btf_decl_tag(t)->component_idx = component_idx;
2809 
2810 	return btf_commit_type(btf, sz);
2811 }
2812 
2813 struct btf_ext_sec_setup_param {
2814 	__u32 off;
2815 	__u32 len;
2816 	__u32 min_rec_size;
2817 	struct btf_ext_info *ext_info;
2818 	const char *desc;
2819 };
2820 
2821 static int btf_ext_setup_info(struct btf_ext *btf_ext,
2822 			      struct btf_ext_sec_setup_param *ext_sec)
2823 {
2824 	const struct btf_ext_info_sec *sinfo;
2825 	struct btf_ext_info *ext_info;
2826 	__u32 info_left, record_size;
2827 	size_t sec_cnt = 0;
2828 	/* The start of the info sec (including the __u32 record_size). */
2829 	void *info;
2830 
2831 	if (ext_sec->len == 0)
2832 		return 0;
2833 
2834 	if (ext_sec->off & 0x03) {
2835 		pr_debug(".BTF.ext %s section is not aligned to 4 bytes\n",
2836 		     ext_sec->desc);
2837 		return -EINVAL;
2838 	}
2839 
2840 	info = btf_ext->data + btf_ext->hdr->hdr_len + ext_sec->off;
2841 	info_left = ext_sec->len;
2842 
2843 	if (btf_ext->data + btf_ext->data_size < info + ext_sec->len) {
2844 		pr_debug("%s section (off:%u len:%u) is beyond the end of the ELF section .BTF.ext\n",
2845 			 ext_sec->desc, ext_sec->off, ext_sec->len);
2846 		return -EINVAL;
2847 	}
2848 
2849 	/* At least a record size */
2850 	if (info_left < sizeof(__u32)) {
2851 		pr_debug(".BTF.ext %s record size not found\n", ext_sec->desc);
2852 		return -EINVAL;
2853 	}
2854 
2855 	/* The record size needs to meet the minimum standard */
2856 	record_size = *(__u32 *)info;
2857 	if (record_size < ext_sec->min_rec_size ||
2858 	    record_size & 0x03) {
2859 		pr_debug("%s section in .BTF.ext has invalid record size %u\n",
2860 			 ext_sec->desc, record_size);
2861 		return -EINVAL;
2862 	}
2863 
2864 	sinfo = info + sizeof(__u32);
2865 	info_left -= sizeof(__u32);
2866 
2867 	/* If no records, return failure now so .BTF.ext won't be used. */
2868 	if (!info_left) {
2869 		pr_debug("%s section in .BTF.ext has no records", ext_sec->desc);
2870 		return -EINVAL;
2871 	}
2872 
2873 	while (info_left) {
2874 		unsigned int sec_hdrlen = sizeof(struct btf_ext_info_sec);
2875 		__u64 total_record_size;
2876 		__u32 num_records;
2877 
2878 		if (info_left < sec_hdrlen) {
2879 			pr_debug("%s section header is not found in .BTF.ext\n",
2880 			     ext_sec->desc);
2881 			return -EINVAL;
2882 		}
2883 
2884 		num_records = sinfo->num_info;
2885 		if (num_records == 0) {
2886 			pr_debug("%s section has incorrect num_records in .BTF.ext\n",
2887 			     ext_sec->desc);
2888 			return -EINVAL;
2889 		}
2890 
2891 		total_record_size = sec_hdrlen + (__u64)num_records * record_size;
2892 		if (info_left < total_record_size) {
2893 			pr_debug("%s section has incorrect num_records in .BTF.ext\n",
2894 			     ext_sec->desc);
2895 			return -EINVAL;
2896 		}
2897 
2898 		info_left -= total_record_size;
2899 		sinfo = (void *)sinfo + total_record_size;
2900 		sec_cnt++;
2901 	}
2902 
2903 	ext_info = ext_sec->ext_info;
2904 	ext_info->len = ext_sec->len - sizeof(__u32);
2905 	ext_info->rec_size = record_size;
2906 	ext_info->info = info + sizeof(__u32);
2907 	ext_info->sec_cnt = sec_cnt;
2908 
2909 	return 0;
2910 }
2911 
2912 static int btf_ext_setup_func_info(struct btf_ext *btf_ext)
2913 {
2914 	struct btf_ext_sec_setup_param param = {
2915 		.off = btf_ext->hdr->func_info_off,
2916 		.len = btf_ext->hdr->func_info_len,
2917 		.min_rec_size = sizeof(struct bpf_func_info_min),
2918 		.ext_info = &btf_ext->func_info,
2919 		.desc = "func_info"
2920 	};
2921 
2922 	return btf_ext_setup_info(btf_ext, &param);
2923 }
2924 
2925 static int btf_ext_setup_line_info(struct btf_ext *btf_ext)
2926 {
2927 	struct btf_ext_sec_setup_param param = {
2928 		.off = btf_ext->hdr->line_info_off,
2929 		.len = btf_ext->hdr->line_info_len,
2930 		.min_rec_size = sizeof(struct bpf_line_info_min),
2931 		.ext_info = &btf_ext->line_info,
2932 		.desc = "line_info",
2933 	};
2934 
2935 	return btf_ext_setup_info(btf_ext, &param);
2936 }
2937 
2938 static int btf_ext_setup_core_relos(struct btf_ext *btf_ext)
2939 {
2940 	struct btf_ext_sec_setup_param param = {
2941 		.off = btf_ext->hdr->core_relo_off,
2942 		.len = btf_ext->hdr->core_relo_len,
2943 		.min_rec_size = sizeof(struct bpf_core_relo),
2944 		.ext_info = &btf_ext->core_relo_info,
2945 		.desc = "core_relo",
2946 	};
2947 
2948 	return btf_ext_setup_info(btf_ext, &param);
2949 }
2950 
2951 static int btf_ext_parse_hdr(__u8 *data, __u32 data_size)
2952 {
2953 	const struct btf_ext_header *hdr = (struct btf_ext_header *)data;
2954 
2955 	if (data_size < offsetofend(struct btf_ext_header, hdr_len) ||
2956 	    data_size < hdr->hdr_len) {
2957 		pr_debug("BTF.ext header not found");
2958 		return -EINVAL;
2959 	}
2960 
2961 	if (hdr->magic == bswap_16(BTF_MAGIC)) {
2962 		pr_warn("BTF.ext in non-native endianness is not supported\n");
2963 		return -ENOTSUP;
2964 	} else if (hdr->magic != BTF_MAGIC) {
2965 		pr_debug("Invalid BTF.ext magic:%x\n", hdr->magic);
2966 		return -EINVAL;
2967 	}
2968 
2969 	if (hdr->version != BTF_VERSION) {
2970 		pr_debug("Unsupported BTF.ext version:%u\n", hdr->version);
2971 		return -ENOTSUP;
2972 	}
2973 
2974 	if (hdr->flags) {
2975 		pr_debug("Unsupported BTF.ext flags:%x\n", hdr->flags);
2976 		return -ENOTSUP;
2977 	}
2978 
2979 	if (data_size == hdr->hdr_len) {
2980 		pr_debug("BTF.ext has no data\n");
2981 		return -EINVAL;
2982 	}
2983 
2984 	return 0;
2985 }
2986 
2987 void btf_ext__free(struct btf_ext *btf_ext)
2988 {
2989 	if (IS_ERR_OR_NULL(btf_ext))
2990 		return;
2991 	free(btf_ext->func_info.sec_idxs);
2992 	free(btf_ext->line_info.sec_idxs);
2993 	free(btf_ext->core_relo_info.sec_idxs);
2994 	free(btf_ext->data);
2995 	free(btf_ext);
2996 }
2997 
2998 struct btf_ext *btf_ext__new(const __u8 *data, __u32 size)
2999 {
3000 	struct btf_ext *btf_ext;
3001 	int err;
3002 
3003 	btf_ext = calloc(1, sizeof(struct btf_ext));
3004 	if (!btf_ext)
3005 		return libbpf_err_ptr(-ENOMEM);
3006 
3007 	btf_ext->data_size = size;
3008 	btf_ext->data = malloc(size);
3009 	if (!btf_ext->data) {
3010 		err = -ENOMEM;
3011 		goto done;
3012 	}
3013 	memcpy(btf_ext->data, data, size);
3014 
3015 	err = btf_ext_parse_hdr(btf_ext->data, size);
3016 	if (err)
3017 		goto done;
3018 
3019 	if (btf_ext->hdr->hdr_len < offsetofend(struct btf_ext_header, line_info_len)) {
3020 		err = -EINVAL;
3021 		goto done;
3022 	}
3023 
3024 	err = btf_ext_setup_func_info(btf_ext);
3025 	if (err)
3026 		goto done;
3027 
3028 	err = btf_ext_setup_line_info(btf_ext);
3029 	if (err)
3030 		goto done;
3031 
3032 	if (btf_ext->hdr->hdr_len < offsetofend(struct btf_ext_header, core_relo_len))
3033 		goto done; /* skip core relos parsing */
3034 
3035 	err = btf_ext_setup_core_relos(btf_ext);
3036 	if (err)
3037 		goto done;
3038 
3039 done:
3040 	if (err) {
3041 		btf_ext__free(btf_ext);
3042 		return libbpf_err_ptr(err);
3043 	}
3044 
3045 	return btf_ext;
3046 }
3047 
3048 const void *btf_ext__get_raw_data(const struct btf_ext *btf_ext, __u32 *size)
3049 {
3050 	*size = btf_ext->data_size;
3051 	return btf_ext->data;
3052 }
3053 
3054 struct btf_dedup;
3055 
3056 static struct btf_dedup *btf_dedup_new(struct btf *btf, const struct btf_dedup_opts *opts);
3057 static void btf_dedup_free(struct btf_dedup *d);
3058 static int btf_dedup_prep(struct btf_dedup *d);
3059 static int btf_dedup_strings(struct btf_dedup *d);
3060 static int btf_dedup_prim_types(struct btf_dedup *d);
3061 static int btf_dedup_struct_types(struct btf_dedup *d);
3062 static int btf_dedup_ref_types(struct btf_dedup *d);
3063 static int btf_dedup_resolve_fwds(struct btf_dedup *d);
3064 static int btf_dedup_compact_types(struct btf_dedup *d);
3065 static int btf_dedup_remap_types(struct btf_dedup *d);
3066 
3067 /*
3068  * Deduplicate BTF types and strings.
3069  *
3070  * BTF dedup algorithm takes as an input `struct btf` representing `.BTF` ELF
3071  * section with all BTF type descriptors and string data. It overwrites that
3072  * memory in-place with deduplicated types and strings without any loss of
3073  * information. If optional `struct btf_ext` representing '.BTF.ext' ELF section
3074  * is provided, all the strings referenced from .BTF.ext section are honored
3075  * and updated to point to the right offsets after deduplication.
3076  *
3077  * If function returns with error, type/string data might be garbled and should
3078  * be discarded.
3079  *
3080  * More verbose and detailed description of both problem btf_dedup is solving,
3081  * as well as solution could be found at:
3082  * https://facebookmicrosites.github.io/bpf/blog/2018/11/14/btf-enhancement.html
3083  *
3084  * Problem description and justification
3085  * =====================================
3086  *
3087  * BTF type information is typically emitted either as a result of conversion
3088  * from DWARF to BTF or directly by compiler. In both cases, each compilation
3089  * unit contains information about a subset of all the types that are used
3090  * in an application. These subsets are frequently overlapping and contain a lot
3091  * of duplicated information when later concatenated together into a single
3092  * binary. This algorithm ensures that each unique type is represented by single
3093  * BTF type descriptor, greatly reducing resulting size of BTF data.
3094  *
3095  * Compilation unit isolation and subsequent duplication of data is not the only
3096  * problem. The same type hierarchy (e.g., struct and all the type that struct
3097  * references) in different compilation units can be represented in BTF to
3098  * various degrees of completeness (or, rather, incompleteness) due to
3099  * struct/union forward declarations.
3100  *
3101  * Let's take a look at an example, that we'll use to better understand the
3102  * problem (and solution). Suppose we have two compilation units, each using
3103  * same `struct S`, but each of them having incomplete type information about
3104  * struct's fields:
3105  *
3106  * // CU #1:
3107  * struct S;
3108  * struct A {
3109  *	int a;
3110  *	struct A* self;
3111  *	struct S* parent;
3112  * };
3113  * struct B;
3114  * struct S {
3115  *	struct A* a_ptr;
3116  *	struct B* b_ptr;
3117  * };
3118  *
3119  * // CU #2:
3120  * struct S;
3121  * struct A;
3122  * struct B {
3123  *	int b;
3124  *	struct B* self;
3125  *	struct S* parent;
3126  * };
3127  * struct S {
3128  *	struct A* a_ptr;
3129  *	struct B* b_ptr;
3130  * };
3131  *
3132  * In case of CU #1, BTF data will know only that `struct B` exist (but no
3133  * more), but will know the complete type information about `struct A`. While
3134  * for CU #2, it will know full type information about `struct B`, but will
3135  * only know about forward declaration of `struct A` (in BTF terms, it will
3136  * have `BTF_KIND_FWD` type descriptor with name `B`).
3137  *
3138  * This compilation unit isolation means that it's possible that there is no
3139  * single CU with complete type information describing structs `S`, `A`, and
3140  * `B`. Also, we might get tons of duplicated and redundant type information.
3141  *
3142  * Additional complication we need to keep in mind comes from the fact that
3143  * types, in general, can form graphs containing cycles, not just DAGs.
3144  *
3145  * While algorithm does deduplication, it also merges and resolves type
3146  * information (unless disabled throught `struct btf_opts`), whenever possible.
3147  * E.g., in the example above with two compilation units having partial type
3148  * information for structs `A` and `B`, the output of algorithm will emit
3149  * a single copy of each BTF type that describes structs `A`, `B`, and `S`
3150  * (as well as type information for `int` and pointers), as if they were defined
3151  * in a single compilation unit as:
3152  *
3153  * struct A {
3154  *	int a;
3155  *	struct A* self;
3156  *	struct S* parent;
3157  * };
3158  * struct B {
3159  *	int b;
3160  *	struct B* self;
3161  *	struct S* parent;
3162  * };
3163  * struct S {
3164  *	struct A* a_ptr;
3165  *	struct B* b_ptr;
3166  * };
3167  *
3168  * Algorithm summary
3169  * =================
3170  *
3171  * Algorithm completes its work in 7 separate passes:
3172  *
3173  * 1. Strings deduplication.
3174  * 2. Primitive types deduplication (int, enum, fwd).
3175  * 3. Struct/union types deduplication.
3176  * 4. Resolve unambiguous forward declarations.
3177  * 5. Reference types deduplication (pointers, typedefs, arrays, funcs, func
3178  *    protos, and const/volatile/restrict modifiers).
3179  * 6. Types compaction.
3180  * 7. Types remapping.
3181  *
3182  * Algorithm determines canonical type descriptor, which is a single
3183  * representative type for each truly unique type. This canonical type is the
3184  * one that will go into final deduplicated BTF type information. For
3185  * struct/unions, it is also the type that algorithm will merge additional type
3186  * information into (while resolving FWDs), as it discovers it from data in
3187  * other CUs. Each input BTF type eventually gets either mapped to itself, if
3188  * that type is canonical, or to some other type, if that type is equivalent
3189  * and was chosen as canonical representative. This mapping is stored in
3190  * `btf_dedup->map` array. This map is also used to record STRUCT/UNION that
3191  * FWD type got resolved to.
3192  *
3193  * To facilitate fast discovery of canonical types, we also maintain canonical
3194  * index (`btf_dedup->dedup_table`), which maps type descriptor's signature hash
3195  * (i.e., hashed kind, name, size, fields, etc) into a list of canonical types
3196  * that match that signature. With sufficiently good choice of type signature
3197  * hashing function, we can limit number of canonical types for each unique type
3198  * signature to a very small number, allowing to find canonical type for any
3199  * duplicated type very quickly.
3200  *
3201  * Struct/union deduplication is the most critical part and algorithm for
3202  * deduplicating structs/unions is described in greater details in comments for
3203  * `btf_dedup_is_equiv` function.
3204  */
3205 int btf__dedup(struct btf *btf, const struct btf_dedup_opts *opts)
3206 {
3207 	struct btf_dedup *d;
3208 	int err;
3209 
3210 	if (!OPTS_VALID(opts, btf_dedup_opts))
3211 		return libbpf_err(-EINVAL);
3212 
3213 	d = btf_dedup_new(btf, opts);
3214 	if (IS_ERR(d)) {
3215 		pr_debug("btf_dedup_new failed: %ld", PTR_ERR(d));
3216 		return libbpf_err(-EINVAL);
3217 	}
3218 
3219 	if (btf_ensure_modifiable(btf)) {
3220 		err = -ENOMEM;
3221 		goto done;
3222 	}
3223 
3224 	err = btf_dedup_prep(d);
3225 	if (err) {
3226 		pr_debug("btf_dedup_prep failed:%d\n", err);
3227 		goto done;
3228 	}
3229 	err = btf_dedup_strings(d);
3230 	if (err < 0) {
3231 		pr_debug("btf_dedup_strings failed:%d\n", err);
3232 		goto done;
3233 	}
3234 	err = btf_dedup_prim_types(d);
3235 	if (err < 0) {
3236 		pr_debug("btf_dedup_prim_types failed:%d\n", err);
3237 		goto done;
3238 	}
3239 	err = btf_dedup_struct_types(d);
3240 	if (err < 0) {
3241 		pr_debug("btf_dedup_struct_types failed:%d\n", err);
3242 		goto done;
3243 	}
3244 	err = btf_dedup_resolve_fwds(d);
3245 	if (err < 0) {
3246 		pr_debug("btf_dedup_resolve_fwds failed:%d\n", err);
3247 		goto done;
3248 	}
3249 	err = btf_dedup_ref_types(d);
3250 	if (err < 0) {
3251 		pr_debug("btf_dedup_ref_types failed:%d\n", err);
3252 		goto done;
3253 	}
3254 	err = btf_dedup_compact_types(d);
3255 	if (err < 0) {
3256 		pr_debug("btf_dedup_compact_types failed:%d\n", err);
3257 		goto done;
3258 	}
3259 	err = btf_dedup_remap_types(d);
3260 	if (err < 0) {
3261 		pr_debug("btf_dedup_remap_types failed:%d\n", err);
3262 		goto done;
3263 	}
3264 
3265 done:
3266 	btf_dedup_free(d);
3267 	return libbpf_err(err);
3268 }
3269 
3270 #define BTF_UNPROCESSED_ID ((__u32)-1)
3271 #define BTF_IN_PROGRESS_ID ((__u32)-2)
3272 
3273 struct btf_dedup {
3274 	/* .BTF section to be deduped in-place */
3275 	struct btf *btf;
3276 	/*
3277 	 * Optional .BTF.ext section. When provided, any strings referenced
3278 	 * from it will be taken into account when deduping strings
3279 	 */
3280 	struct btf_ext *btf_ext;
3281 	/*
3282 	 * This is a map from any type's signature hash to a list of possible
3283 	 * canonical representative type candidates. Hash collisions are
3284 	 * ignored, so even types of various kinds can share same list of
3285 	 * candidates, which is fine because we rely on subsequent
3286 	 * btf_xxx_equal() checks to authoritatively verify type equality.
3287 	 */
3288 	struct hashmap *dedup_table;
3289 	/* Canonical types map */
3290 	__u32 *map;
3291 	/* Hypothetical mapping, used during type graph equivalence checks */
3292 	__u32 *hypot_map;
3293 	__u32 *hypot_list;
3294 	size_t hypot_cnt;
3295 	size_t hypot_cap;
3296 	/* Whether hypothetical mapping, if successful, would need to adjust
3297 	 * already canonicalized types (due to a new forward declaration to
3298 	 * concrete type resolution). In such case, during split BTF dedup
3299 	 * candidate type would still be considered as different, because base
3300 	 * BTF is considered to be immutable.
3301 	 */
3302 	bool hypot_adjust_canon;
3303 	/* Various option modifying behavior of algorithm */
3304 	struct btf_dedup_opts opts;
3305 	/* temporary strings deduplication state */
3306 	struct strset *strs_set;
3307 };
3308 
3309 static long hash_combine(long h, long value)
3310 {
3311 	return h * 31 + value;
3312 }
3313 
3314 #define for_each_dedup_cand(d, node, hash) \
3315 	hashmap__for_each_key_entry(d->dedup_table, node, hash)
3316 
3317 static int btf_dedup_table_add(struct btf_dedup *d, long hash, __u32 type_id)
3318 {
3319 	return hashmap__append(d->dedup_table, hash, type_id);
3320 }
3321 
3322 static int btf_dedup_hypot_map_add(struct btf_dedup *d,
3323 				   __u32 from_id, __u32 to_id)
3324 {
3325 	if (d->hypot_cnt == d->hypot_cap) {
3326 		__u32 *new_list;
3327 
3328 		d->hypot_cap += max((size_t)16, d->hypot_cap / 2);
3329 		new_list = libbpf_reallocarray(d->hypot_list, d->hypot_cap, sizeof(__u32));
3330 		if (!new_list)
3331 			return -ENOMEM;
3332 		d->hypot_list = new_list;
3333 	}
3334 	d->hypot_list[d->hypot_cnt++] = from_id;
3335 	d->hypot_map[from_id] = to_id;
3336 	return 0;
3337 }
3338 
3339 static void btf_dedup_clear_hypot_map(struct btf_dedup *d)
3340 {
3341 	int i;
3342 
3343 	for (i = 0; i < d->hypot_cnt; i++)
3344 		d->hypot_map[d->hypot_list[i]] = BTF_UNPROCESSED_ID;
3345 	d->hypot_cnt = 0;
3346 	d->hypot_adjust_canon = false;
3347 }
3348 
3349 static void btf_dedup_free(struct btf_dedup *d)
3350 {
3351 	hashmap__free(d->dedup_table);
3352 	d->dedup_table = NULL;
3353 
3354 	free(d->map);
3355 	d->map = NULL;
3356 
3357 	free(d->hypot_map);
3358 	d->hypot_map = NULL;
3359 
3360 	free(d->hypot_list);
3361 	d->hypot_list = NULL;
3362 
3363 	free(d);
3364 }
3365 
3366 static size_t btf_dedup_identity_hash_fn(long key, void *ctx)
3367 {
3368 	return key;
3369 }
3370 
3371 static size_t btf_dedup_collision_hash_fn(long key, void *ctx)
3372 {
3373 	return 0;
3374 }
3375 
3376 static bool btf_dedup_equal_fn(long k1, long k2, void *ctx)
3377 {
3378 	return k1 == k2;
3379 }
3380 
3381 static struct btf_dedup *btf_dedup_new(struct btf *btf, const struct btf_dedup_opts *opts)
3382 {
3383 	struct btf_dedup *d = calloc(1, sizeof(struct btf_dedup));
3384 	hashmap_hash_fn hash_fn = btf_dedup_identity_hash_fn;
3385 	int i, err = 0, type_cnt;
3386 
3387 	if (!d)
3388 		return ERR_PTR(-ENOMEM);
3389 
3390 	if (OPTS_GET(opts, force_collisions, false))
3391 		hash_fn = btf_dedup_collision_hash_fn;
3392 
3393 	d->btf = btf;
3394 	d->btf_ext = OPTS_GET(opts, btf_ext, NULL);
3395 
3396 	d->dedup_table = hashmap__new(hash_fn, btf_dedup_equal_fn, NULL);
3397 	if (IS_ERR(d->dedup_table)) {
3398 		err = PTR_ERR(d->dedup_table);
3399 		d->dedup_table = NULL;
3400 		goto done;
3401 	}
3402 
3403 	type_cnt = btf__type_cnt(btf);
3404 	d->map = malloc(sizeof(__u32) * type_cnt);
3405 	if (!d->map) {
3406 		err = -ENOMEM;
3407 		goto done;
3408 	}
3409 	/* special BTF "void" type is made canonical immediately */
3410 	d->map[0] = 0;
3411 	for (i = 1; i < type_cnt; i++) {
3412 		struct btf_type *t = btf_type_by_id(d->btf, i);
3413 
3414 		/* VAR and DATASEC are never deduped and are self-canonical */
3415 		if (btf_is_var(t) || btf_is_datasec(t))
3416 			d->map[i] = i;
3417 		else
3418 			d->map[i] = BTF_UNPROCESSED_ID;
3419 	}
3420 
3421 	d->hypot_map = malloc(sizeof(__u32) * type_cnt);
3422 	if (!d->hypot_map) {
3423 		err = -ENOMEM;
3424 		goto done;
3425 	}
3426 	for (i = 0; i < type_cnt; i++)
3427 		d->hypot_map[i] = BTF_UNPROCESSED_ID;
3428 
3429 done:
3430 	if (err) {
3431 		btf_dedup_free(d);
3432 		return ERR_PTR(err);
3433 	}
3434 
3435 	return d;
3436 }
3437 
3438 /*
3439  * Iterate over all possible places in .BTF and .BTF.ext that can reference
3440  * string and pass pointer to it to a provided callback `fn`.
3441  */
3442 static int btf_for_each_str_off(struct btf_dedup *d, str_off_visit_fn fn, void *ctx)
3443 {
3444 	int i, r;
3445 
3446 	for (i = 0; i < d->btf->nr_types; i++) {
3447 		struct btf_type *t = btf_type_by_id(d->btf, d->btf->start_id + i);
3448 
3449 		r = btf_type_visit_str_offs(t, fn, ctx);
3450 		if (r)
3451 			return r;
3452 	}
3453 
3454 	if (!d->btf_ext)
3455 		return 0;
3456 
3457 	r = btf_ext_visit_str_offs(d->btf_ext, fn, ctx);
3458 	if (r)
3459 		return r;
3460 
3461 	return 0;
3462 }
3463 
3464 static int strs_dedup_remap_str_off(__u32 *str_off_ptr, void *ctx)
3465 {
3466 	struct btf_dedup *d = ctx;
3467 	__u32 str_off = *str_off_ptr;
3468 	const char *s;
3469 	int off, err;
3470 
3471 	/* don't touch empty string or string in main BTF */
3472 	if (str_off == 0 || str_off < d->btf->start_str_off)
3473 		return 0;
3474 
3475 	s = btf__str_by_offset(d->btf, str_off);
3476 	if (d->btf->base_btf) {
3477 		err = btf__find_str(d->btf->base_btf, s);
3478 		if (err >= 0) {
3479 			*str_off_ptr = err;
3480 			return 0;
3481 		}
3482 		if (err != -ENOENT)
3483 			return err;
3484 	}
3485 
3486 	off = strset__add_str(d->strs_set, s);
3487 	if (off < 0)
3488 		return off;
3489 
3490 	*str_off_ptr = d->btf->start_str_off + off;
3491 	return 0;
3492 }
3493 
3494 /*
3495  * Dedup string and filter out those that are not referenced from either .BTF
3496  * or .BTF.ext (if provided) sections.
3497  *
3498  * This is done by building index of all strings in BTF's string section,
3499  * then iterating over all entities that can reference strings (e.g., type
3500  * names, struct field names, .BTF.ext line info, etc) and marking corresponding
3501  * strings as used. After that all used strings are deduped and compacted into
3502  * sequential blob of memory and new offsets are calculated. Then all the string
3503  * references are iterated again and rewritten using new offsets.
3504  */
3505 static int btf_dedup_strings(struct btf_dedup *d)
3506 {
3507 	int err;
3508 
3509 	if (d->btf->strs_deduped)
3510 		return 0;
3511 
3512 	d->strs_set = strset__new(BTF_MAX_STR_OFFSET, NULL, 0);
3513 	if (IS_ERR(d->strs_set)) {
3514 		err = PTR_ERR(d->strs_set);
3515 		goto err_out;
3516 	}
3517 
3518 	if (!d->btf->base_btf) {
3519 		/* insert empty string; we won't be looking it up during strings
3520 		 * dedup, but it's good to have it for generic BTF string lookups
3521 		 */
3522 		err = strset__add_str(d->strs_set, "");
3523 		if (err < 0)
3524 			goto err_out;
3525 	}
3526 
3527 	/* remap string offsets */
3528 	err = btf_for_each_str_off(d, strs_dedup_remap_str_off, d);
3529 	if (err)
3530 		goto err_out;
3531 
3532 	/* replace BTF string data and hash with deduped ones */
3533 	strset__free(d->btf->strs_set);
3534 	d->btf->hdr->str_len = strset__data_size(d->strs_set);
3535 	d->btf->strs_set = d->strs_set;
3536 	d->strs_set = NULL;
3537 	d->btf->strs_deduped = true;
3538 	return 0;
3539 
3540 err_out:
3541 	strset__free(d->strs_set);
3542 	d->strs_set = NULL;
3543 
3544 	return err;
3545 }
3546 
3547 static long btf_hash_common(struct btf_type *t)
3548 {
3549 	long h;
3550 
3551 	h = hash_combine(0, t->name_off);
3552 	h = hash_combine(h, t->info);
3553 	h = hash_combine(h, t->size);
3554 	return h;
3555 }
3556 
3557 static bool btf_equal_common(struct btf_type *t1, struct btf_type *t2)
3558 {
3559 	return t1->name_off == t2->name_off &&
3560 	       t1->info == t2->info &&
3561 	       t1->size == t2->size;
3562 }
3563 
3564 /* Calculate type signature hash of INT or TAG. */
3565 static long btf_hash_int_decl_tag(struct btf_type *t)
3566 {
3567 	__u32 info = *(__u32 *)(t + 1);
3568 	long h;
3569 
3570 	h = btf_hash_common(t);
3571 	h = hash_combine(h, info);
3572 	return h;
3573 }
3574 
3575 /* Check structural equality of two INTs or TAGs. */
3576 static bool btf_equal_int_tag(struct btf_type *t1, struct btf_type *t2)
3577 {
3578 	__u32 info1, info2;
3579 
3580 	if (!btf_equal_common(t1, t2))
3581 		return false;
3582 	info1 = *(__u32 *)(t1 + 1);
3583 	info2 = *(__u32 *)(t2 + 1);
3584 	return info1 == info2;
3585 }
3586 
3587 /* Calculate type signature hash of ENUM/ENUM64. */
3588 static long btf_hash_enum(struct btf_type *t)
3589 {
3590 	long h;
3591 
3592 	/* don't hash vlen, enum members and size to support enum fwd resolving */
3593 	h = hash_combine(0, t->name_off);
3594 	return h;
3595 }
3596 
3597 static bool btf_equal_enum_members(struct btf_type *t1, struct btf_type *t2)
3598 {
3599 	const struct btf_enum *m1, *m2;
3600 	__u16 vlen;
3601 	int i;
3602 
3603 	vlen = btf_vlen(t1);
3604 	m1 = btf_enum(t1);
3605 	m2 = btf_enum(t2);
3606 	for (i = 0; i < vlen; i++) {
3607 		if (m1->name_off != m2->name_off || m1->val != m2->val)
3608 			return false;
3609 		m1++;
3610 		m2++;
3611 	}
3612 	return true;
3613 }
3614 
3615 static bool btf_equal_enum64_members(struct btf_type *t1, struct btf_type *t2)
3616 {
3617 	const struct btf_enum64 *m1, *m2;
3618 	__u16 vlen;
3619 	int i;
3620 
3621 	vlen = btf_vlen(t1);
3622 	m1 = btf_enum64(t1);
3623 	m2 = btf_enum64(t2);
3624 	for (i = 0; i < vlen; i++) {
3625 		if (m1->name_off != m2->name_off || m1->val_lo32 != m2->val_lo32 ||
3626 		    m1->val_hi32 != m2->val_hi32)
3627 			return false;
3628 		m1++;
3629 		m2++;
3630 	}
3631 	return true;
3632 }
3633 
3634 /* Check structural equality of two ENUMs or ENUM64s. */
3635 static bool btf_equal_enum(struct btf_type *t1, struct btf_type *t2)
3636 {
3637 	if (!btf_equal_common(t1, t2))
3638 		return false;
3639 
3640 	/* t1 & t2 kinds are identical because of btf_equal_common */
3641 	if (btf_kind(t1) == BTF_KIND_ENUM)
3642 		return btf_equal_enum_members(t1, t2);
3643 	else
3644 		return btf_equal_enum64_members(t1, t2);
3645 }
3646 
3647 static inline bool btf_is_enum_fwd(struct btf_type *t)
3648 {
3649 	return btf_is_any_enum(t) && btf_vlen(t) == 0;
3650 }
3651 
3652 static bool btf_compat_enum(struct btf_type *t1, struct btf_type *t2)
3653 {
3654 	if (!btf_is_enum_fwd(t1) && !btf_is_enum_fwd(t2))
3655 		return btf_equal_enum(t1, t2);
3656 	/* At this point either t1 or t2 or both are forward declarations, thus:
3657 	 * - skip comparing vlen because it is zero for forward declarations;
3658 	 * - skip comparing size to allow enum forward declarations
3659 	 *   to be compatible with enum64 full declarations;
3660 	 * - skip comparing kind for the same reason.
3661 	 */
3662 	return t1->name_off == t2->name_off &&
3663 	       btf_is_any_enum(t1) && btf_is_any_enum(t2);
3664 }
3665 
3666 /*
3667  * Calculate type signature hash of STRUCT/UNION, ignoring referenced type IDs,
3668  * as referenced type IDs equivalence is established separately during type
3669  * graph equivalence check algorithm.
3670  */
3671 static long btf_hash_struct(struct btf_type *t)
3672 {
3673 	const struct btf_member *member = btf_members(t);
3674 	__u32 vlen = btf_vlen(t);
3675 	long h = btf_hash_common(t);
3676 	int i;
3677 
3678 	for (i = 0; i < vlen; i++) {
3679 		h = hash_combine(h, member->name_off);
3680 		h = hash_combine(h, member->offset);
3681 		/* no hashing of referenced type ID, it can be unresolved yet */
3682 		member++;
3683 	}
3684 	return h;
3685 }
3686 
3687 /*
3688  * Check structural compatibility of two STRUCTs/UNIONs, ignoring referenced
3689  * type IDs. This check is performed during type graph equivalence check and
3690  * referenced types equivalence is checked separately.
3691  */
3692 static bool btf_shallow_equal_struct(struct btf_type *t1, struct btf_type *t2)
3693 {
3694 	const struct btf_member *m1, *m2;
3695 	__u16 vlen;
3696 	int i;
3697 
3698 	if (!btf_equal_common(t1, t2))
3699 		return false;
3700 
3701 	vlen = btf_vlen(t1);
3702 	m1 = btf_members(t1);
3703 	m2 = btf_members(t2);
3704 	for (i = 0; i < vlen; i++) {
3705 		if (m1->name_off != m2->name_off || m1->offset != m2->offset)
3706 			return false;
3707 		m1++;
3708 		m2++;
3709 	}
3710 	return true;
3711 }
3712 
3713 /*
3714  * Calculate type signature hash of ARRAY, including referenced type IDs,
3715  * under assumption that they were already resolved to canonical type IDs and
3716  * are not going to change.
3717  */
3718 static long btf_hash_array(struct btf_type *t)
3719 {
3720 	const struct btf_array *info = btf_array(t);
3721 	long h = btf_hash_common(t);
3722 
3723 	h = hash_combine(h, info->type);
3724 	h = hash_combine(h, info->index_type);
3725 	h = hash_combine(h, info->nelems);
3726 	return h;
3727 }
3728 
3729 /*
3730  * Check exact equality of two ARRAYs, taking into account referenced
3731  * type IDs, under assumption that they were already resolved to canonical
3732  * type IDs and are not going to change.
3733  * This function is called during reference types deduplication to compare
3734  * ARRAY to potential canonical representative.
3735  */
3736 static bool btf_equal_array(struct btf_type *t1, struct btf_type *t2)
3737 {
3738 	const struct btf_array *info1, *info2;
3739 
3740 	if (!btf_equal_common(t1, t2))
3741 		return false;
3742 
3743 	info1 = btf_array(t1);
3744 	info2 = btf_array(t2);
3745 	return info1->type == info2->type &&
3746 	       info1->index_type == info2->index_type &&
3747 	       info1->nelems == info2->nelems;
3748 }
3749 
3750 /*
3751  * Check structural compatibility of two ARRAYs, ignoring referenced type
3752  * IDs. This check is performed during type graph equivalence check and
3753  * referenced types equivalence is checked separately.
3754  */
3755 static bool btf_compat_array(struct btf_type *t1, struct btf_type *t2)
3756 {
3757 	if (!btf_equal_common(t1, t2))
3758 		return false;
3759 
3760 	return btf_array(t1)->nelems == btf_array(t2)->nelems;
3761 }
3762 
3763 /*
3764  * Calculate type signature hash of FUNC_PROTO, including referenced type IDs,
3765  * under assumption that they were already resolved to canonical type IDs and
3766  * are not going to change.
3767  */
3768 static long btf_hash_fnproto(struct btf_type *t)
3769 {
3770 	const struct btf_param *member = btf_params(t);
3771 	__u16 vlen = btf_vlen(t);
3772 	long h = btf_hash_common(t);
3773 	int i;
3774 
3775 	for (i = 0; i < vlen; i++) {
3776 		h = hash_combine(h, member->name_off);
3777 		h = hash_combine(h, member->type);
3778 		member++;
3779 	}
3780 	return h;
3781 }
3782 
3783 /*
3784  * Check exact equality of two FUNC_PROTOs, taking into account referenced
3785  * type IDs, under assumption that they were already resolved to canonical
3786  * type IDs and are not going to change.
3787  * This function is called during reference types deduplication to compare
3788  * FUNC_PROTO to potential canonical representative.
3789  */
3790 static bool btf_equal_fnproto(struct btf_type *t1, struct btf_type *t2)
3791 {
3792 	const struct btf_param *m1, *m2;
3793 	__u16 vlen;
3794 	int i;
3795 
3796 	if (!btf_equal_common(t1, t2))
3797 		return false;
3798 
3799 	vlen = btf_vlen(t1);
3800 	m1 = btf_params(t1);
3801 	m2 = btf_params(t2);
3802 	for (i = 0; i < vlen; i++) {
3803 		if (m1->name_off != m2->name_off || m1->type != m2->type)
3804 			return false;
3805 		m1++;
3806 		m2++;
3807 	}
3808 	return true;
3809 }
3810 
3811 /*
3812  * Check structural compatibility of two FUNC_PROTOs, ignoring referenced type
3813  * IDs. This check is performed during type graph equivalence check and
3814  * referenced types equivalence is checked separately.
3815  */
3816 static bool btf_compat_fnproto(struct btf_type *t1, struct btf_type *t2)
3817 {
3818 	const struct btf_param *m1, *m2;
3819 	__u16 vlen;
3820 	int i;
3821 
3822 	/* skip return type ID */
3823 	if (t1->name_off != t2->name_off || t1->info != t2->info)
3824 		return false;
3825 
3826 	vlen = btf_vlen(t1);
3827 	m1 = btf_params(t1);
3828 	m2 = btf_params(t2);
3829 	for (i = 0; i < vlen; i++) {
3830 		if (m1->name_off != m2->name_off)
3831 			return false;
3832 		m1++;
3833 		m2++;
3834 	}
3835 	return true;
3836 }
3837 
3838 /* Prepare split BTF for deduplication by calculating hashes of base BTF's
3839  * types and initializing the rest of the state (canonical type mapping) for
3840  * the fixed base BTF part.
3841  */
3842 static int btf_dedup_prep(struct btf_dedup *d)
3843 {
3844 	struct btf_type *t;
3845 	int type_id;
3846 	long h;
3847 
3848 	if (!d->btf->base_btf)
3849 		return 0;
3850 
3851 	for (type_id = 1; type_id < d->btf->start_id; type_id++) {
3852 		t = btf_type_by_id(d->btf, type_id);
3853 
3854 		/* all base BTF types are self-canonical by definition */
3855 		d->map[type_id] = type_id;
3856 
3857 		switch (btf_kind(t)) {
3858 		case BTF_KIND_VAR:
3859 		case BTF_KIND_DATASEC:
3860 			/* VAR and DATASEC are never hash/deduplicated */
3861 			continue;
3862 		case BTF_KIND_CONST:
3863 		case BTF_KIND_VOLATILE:
3864 		case BTF_KIND_RESTRICT:
3865 		case BTF_KIND_PTR:
3866 		case BTF_KIND_FWD:
3867 		case BTF_KIND_TYPEDEF:
3868 		case BTF_KIND_FUNC:
3869 		case BTF_KIND_FLOAT:
3870 		case BTF_KIND_TYPE_TAG:
3871 			h = btf_hash_common(t);
3872 			break;
3873 		case BTF_KIND_INT:
3874 		case BTF_KIND_DECL_TAG:
3875 			h = btf_hash_int_decl_tag(t);
3876 			break;
3877 		case BTF_KIND_ENUM:
3878 		case BTF_KIND_ENUM64:
3879 			h = btf_hash_enum(t);
3880 			break;
3881 		case BTF_KIND_STRUCT:
3882 		case BTF_KIND_UNION:
3883 			h = btf_hash_struct(t);
3884 			break;
3885 		case BTF_KIND_ARRAY:
3886 			h = btf_hash_array(t);
3887 			break;
3888 		case BTF_KIND_FUNC_PROTO:
3889 			h = btf_hash_fnproto(t);
3890 			break;
3891 		default:
3892 			pr_debug("unknown kind %d for type [%d]\n", btf_kind(t), type_id);
3893 			return -EINVAL;
3894 		}
3895 		if (btf_dedup_table_add(d, h, type_id))
3896 			return -ENOMEM;
3897 	}
3898 
3899 	return 0;
3900 }
3901 
3902 /*
3903  * Deduplicate primitive types, that can't reference other types, by calculating
3904  * their type signature hash and comparing them with any possible canonical
3905  * candidate. If no canonical candidate matches, type itself is marked as
3906  * canonical and is added into `btf_dedup->dedup_table` as another candidate.
3907  */
3908 static int btf_dedup_prim_type(struct btf_dedup *d, __u32 type_id)
3909 {
3910 	struct btf_type *t = btf_type_by_id(d->btf, type_id);
3911 	struct hashmap_entry *hash_entry;
3912 	struct btf_type *cand;
3913 	/* if we don't find equivalent type, then we are canonical */
3914 	__u32 new_id = type_id;
3915 	__u32 cand_id;
3916 	long h;
3917 
3918 	switch (btf_kind(t)) {
3919 	case BTF_KIND_CONST:
3920 	case BTF_KIND_VOLATILE:
3921 	case BTF_KIND_RESTRICT:
3922 	case BTF_KIND_PTR:
3923 	case BTF_KIND_TYPEDEF:
3924 	case BTF_KIND_ARRAY:
3925 	case BTF_KIND_STRUCT:
3926 	case BTF_KIND_UNION:
3927 	case BTF_KIND_FUNC:
3928 	case BTF_KIND_FUNC_PROTO:
3929 	case BTF_KIND_VAR:
3930 	case BTF_KIND_DATASEC:
3931 	case BTF_KIND_DECL_TAG:
3932 	case BTF_KIND_TYPE_TAG:
3933 		return 0;
3934 
3935 	case BTF_KIND_INT:
3936 		h = btf_hash_int_decl_tag(t);
3937 		for_each_dedup_cand(d, hash_entry, h) {
3938 			cand_id = hash_entry->value;
3939 			cand = btf_type_by_id(d->btf, cand_id);
3940 			if (btf_equal_int_tag(t, cand)) {
3941 				new_id = cand_id;
3942 				break;
3943 			}
3944 		}
3945 		break;
3946 
3947 	case BTF_KIND_ENUM:
3948 	case BTF_KIND_ENUM64:
3949 		h = btf_hash_enum(t);
3950 		for_each_dedup_cand(d, hash_entry, h) {
3951 			cand_id = hash_entry->value;
3952 			cand = btf_type_by_id(d->btf, cand_id);
3953 			if (btf_equal_enum(t, cand)) {
3954 				new_id = cand_id;
3955 				break;
3956 			}
3957 			if (btf_compat_enum(t, cand)) {
3958 				if (btf_is_enum_fwd(t)) {
3959 					/* resolve fwd to full enum */
3960 					new_id = cand_id;
3961 					break;
3962 				}
3963 				/* resolve canonical enum fwd to full enum */
3964 				d->map[cand_id] = type_id;
3965 			}
3966 		}
3967 		break;
3968 
3969 	case BTF_KIND_FWD:
3970 	case BTF_KIND_FLOAT:
3971 		h = btf_hash_common(t);
3972 		for_each_dedup_cand(d, hash_entry, h) {
3973 			cand_id = hash_entry->value;
3974 			cand = btf_type_by_id(d->btf, cand_id);
3975 			if (btf_equal_common(t, cand)) {
3976 				new_id = cand_id;
3977 				break;
3978 			}
3979 		}
3980 		break;
3981 
3982 	default:
3983 		return -EINVAL;
3984 	}
3985 
3986 	d->map[type_id] = new_id;
3987 	if (type_id == new_id && btf_dedup_table_add(d, h, type_id))
3988 		return -ENOMEM;
3989 
3990 	return 0;
3991 }
3992 
3993 static int btf_dedup_prim_types(struct btf_dedup *d)
3994 {
3995 	int i, err;
3996 
3997 	for (i = 0; i < d->btf->nr_types; i++) {
3998 		err = btf_dedup_prim_type(d, d->btf->start_id + i);
3999 		if (err)
4000 			return err;
4001 	}
4002 	return 0;
4003 }
4004 
4005 /*
4006  * Check whether type is already mapped into canonical one (could be to itself).
4007  */
4008 static inline bool is_type_mapped(struct btf_dedup *d, uint32_t type_id)
4009 {
4010 	return d->map[type_id] <= BTF_MAX_NR_TYPES;
4011 }
4012 
4013 /*
4014  * Resolve type ID into its canonical type ID, if any; otherwise return original
4015  * type ID. If type is FWD and is resolved into STRUCT/UNION already, follow
4016  * STRUCT/UNION link and resolve it into canonical type ID as well.
4017  */
4018 static inline __u32 resolve_type_id(struct btf_dedup *d, __u32 type_id)
4019 {
4020 	while (is_type_mapped(d, type_id) && d->map[type_id] != type_id)
4021 		type_id = d->map[type_id];
4022 	return type_id;
4023 }
4024 
4025 /*
4026  * Resolve FWD to underlying STRUCT/UNION, if any; otherwise return original
4027  * type ID.
4028  */
4029 static uint32_t resolve_fwd_id(struct btf_dedup *d, uint32_t type_id)
4030 {
4031 	__u32 orig_type_id = type_id;
4032 
4033 	if (!btf_is_fwd(btf__type_by_id(d->btf, type_id)))
4034 		return type_id;
4035 
4036 	while (is_type_mapped(d, type_id) && d->map[type_id] != type_id)
4037 		type_id = d->map[type_id];
4038 
4039 	if (!btf_is_fwd(btf__type_by_id(d->btf, type_id)))
4040 		return type_id;
4041 
4042 	return orig_type_id;
4043 }
4044 
4045 
4046 static inline __u16 btf_fwd_kind(struct btf_type *t)
4047 {
4048 	return btf_kflag(t) ? BTF_KIND_UNION : BTF_KIND_STRUCT;
4049 }
4050 
4051 /* Check if given two types are identical ARRAY definitions */
4052 static bool btf_dedup_identical_arrays(struct btf_dedup *d, __u32 id1, __u32 id2)
4053 {
4054 	struct btf_type *t1, *t2;
4055 
4056 	t1 = btf_type_by_id(d->btf, id1);
4057 	t2 = btf_type_by_id(d->btf, id2);
4058 	if (!btf_is_array(t1) || !btf_is_array(t2))
4059 		return false;
4060 
4061 	return btf_equal_array(t1, t2);
4062 }
4063 
4064 /* Check if given two types are identical STRUCT/UNION definitions */
4065 static bool btf_dedup_identical_structs(struct btf_dedup *d, __u32 id1, __u32 id2)
4066 {
4067 	const struct btf_member *m1, *m2;
4068 	struct btf_type *t1, *t2;
4069 	int n, i;
4070 
4071 	t1 = btf_type_by_id(d->btf, id1);
4072 	t2 = btf_type_by_id(d->btf, id2);
4073 
4074 	if (!btf_is_composite(t1) || btf_kind(t1) != btf_kind(t2))
4075 		return false;
4076 
4077 	if (!btf_shallow_equal_struct(t1, t2))
4078 		return false;
4079 
4080 	m1 = btf_members(t1);
4081 	m2 = btf_members(t2);
4082 	for (i = 0, n = btf_vlen(t1); i < n; i++, m1++, m2++) {
4083 		if (m1->type != m2->type &&
4084 		    !btf_dedup_identical_arrays(d, m1->type, m2->type) &&
4085 		    !btf_dedup_identical_structs(d, m1->type, m2->type))
4086 			return false;
4087 	}
4088 	return true;
4089 }
4090 
4091 /*
4092  * Check equivalence of BTF type graph formed by candidate struct/union (we'll
4093  * call it "candidate graph" in this description for brevity) to a type graph
4094  * formed by (potential) canonical struct/union ("canonical graph" for brevity
4095  * here, though keep in mind that not all types in canonical graph are
4096  * necessarily canonical representatives themselves, some of them might be
4097  * duplicates or its uniqueness might not have been established yet).
4098  * Returns:
4099  *  - >0, if type graphs are equivalent;
4100  *  -  0, if not equivalent;
4101  *  - <0, on error.
4102  *
4103  * Algorithm performs side-by-side DFS traversal of both type graphs and checks
4104  * equivalence of BTF types at each step. If at any point BTF types in candidate
4105  * and canonical graphs are not compatible structurally, whole graphs are
4106  * incompatible. If types are structurally equivalent (i.e., all information
4107  * except referenced type IDs is exactly the same), a mapping from `canon_id` to
4108  * a `cand_id` is recored in hypothetical mapping (`btf_dedup->hypot_map`).
4109  * If a type references other types, then those referenced types are checked
4110  * for equivalence recursively.
4111  *
4112  * During DFS traversal, if we find that for current `canon_id` type we
4113  * already have some mapping in hypothetical map, we check for two possible
4114  * situations:
4115  *   - `canon_id` is mapped to exactly the same type as `cand_id`. This will
4116  *     happen when type graphs have cycles. In this case we assume those two
4117  *     types are equivalent.
4118  *   - `canon_id` is mapped to different type. This is contradiction in our
4119  *     hypothetical mapping, because same graph in canonical graph corresponds
4120  *     to two different types in candidate graph, which for equivalent type
4121  *     graphs shouldn't happen. This condition terminates equivalence check
4122  *     with negative result.
4123  *
4124  * If type graphs traversal exhausts types to check and find no contradiction,
4125  * then type graphs are equivalent.
4126  *
4127  * When checking types for equivalence, there is one special case: FWD types.
4128  * If FWD type resolution is allowed and one of the types (either from canonical
4129  * or candidate graph) is FWD and other is STRUCT/UNION (depending on FWD's kind
4130  * flag) and their names match, hypothetical mapping is updated to point from
4131  * FWD to STRUCT/UNION. If graphs will be determined as equivalent successfully,
4132  * this mapping will be used to record FWD -> STRUCT/UNION mapping permanently.
4133  *
4134  * Technically, this could lead to incorrect FWD to STRUCT/UNION resolution,
4135  * if there are two exactly named (or anonymous) structs/unions that are
4136  * compatible structurally, one of which has FWD field, while other is concrete
4137  * STRUCT/UNION, but according to C sources they are different structs/unions
4138  * that are referencing different types with the same name. This is extremely
4139  * unlikely to happen, but btf_dedup API allows to disable FWD resolution if
4140  * this logic is causing problems.
4141  *
4142  * Doing FWD resolution means that both candidate and/or canonical graphs can
4143  * consists of portions of the graph that come from multiple compilation units.
4144  * This is due to the fact that types within single compilation unit are always
4145  * deduplicated and FWDs are already resolved, if referenced struct/union
4146  * definiton is available. So, if we had unresolved FWD and found corresponding
4147  * STRUCT/UNION, they will be from different compilation units. This
4148  * consequently means that when we "link" FWD to corresponding STRUCT/UNION,
4149  * type graph will likely have at least two different BTF types that describe
4150  * same type (e.g., most probably there will be two different BTF types for the
4151  * same 'int' primitive type) and could even have "overlapping" parts of type
4152  * graph that describe same subset of types.
4153  *
4154  * This in turn means that our assumption that each type in canonical graph
4155  * must correspond to exactly one type in candidate graph might not hold
4156  * anymore and will make it harder to detect contradictions using hypothetical
4157  * map. To handle this problem, we allow to follow FWD -> STRUCT/UNION
4158  * resolution only in canonical graph. FWDs in candidate graphs are never
4159  * resolved. To see why it's OK, let's check all possible situations w.r.t. FWDs
4160  * that can occur:
4161  *   - Both types in canonical and candidate graphs are FWDs. If they are
4162  *     structurally equivalent, then they can either be both resolved to the
4163  *     same STRUCT/UNION or not resolved at all. In both cases they are
4164  *     equivalent and there is no need to resolve FWD on candidate side.
4165  *   - Both types in canonical and candidate graphs are concrete STRUCT/UNION,
4166  *     so nothing to resolve as well, algorithm will check equivalence anyway.
4167  *   - Type in canonical graph is FWD, while type in candidate is concrete
4168  *     STRUCT/UNION. In this case candidate graph comes from single compilation
4169  *     unit, so there is exactly one BTF type for each unique C type. After
4170  *     resolving FWD into STRUCT/UNION, there might be more than one BTF type
4171  *     in canonical graph mapping to single BTF type in candidate graph, but
4172  *     because hypothetical mapping maps from canonical to candidate types, it's
4173  *     alright, and we still maintain the property of having single `canon_id`
4174  *     mapping to single `cand_id` (there could be two different `canon_id`
4175  *     mapped to the same `cand_id`, but it's not contradictory).
4176  *   - Type in canonical graph is concrete STRUCT/UNION, while type in candidate
4177  *     graph is FWD. In this case we are just going to check compatibility of
4178  *     STRUCT/UNION and corresponding FWD, and if they are compatible, we'll
4179  *     assume that whatever STRUCT/UNION FWD resolves to must be equivalent to
4180  *     a concrete STRUCT/UNION from canonical graph. If the rest of type graphs
4181  *     turn out equivalent, we'll re-resolve FWD to concrete STRUCT/UNION from
4182  *     canonical graph.
4183  */
4184 static int btf_dedup_is_equiv(struct btf_dedup *d, __u32 cand_id,
4185 			      __u32 canon_id)
4186 {
4187 	struct btf_type *cand_type;
4188 	struct btf_type *canon_type;
4189 	__u32 hypot_type_id;
4190 	__u16 cand_kind;
4191 	__u16 canon_kind;
4192 	int i, eq;
4193 
4194 	/* if both resolve to the same canonical, they must be equivalent */
4195 	if (resolve_type_id(d, cand_id) == resolve_type_id(d, canon_id))
4196 		return 1;
4197 
4198 	canon_id = resolve_fwd_id(d, canon_id);
4199 
4200 	hypot_type_id = d->hypot_map[canon_id];
4201 	if (hypot_type_id <= BTF_MAX_NR_TYPES) {
4202 		if (hypot_type_id == cand_id)
4203 			return 1;
4204 		/* In some cases compiler will generate different DWARF types
4205 		 * for *identical* array type definitions and use them for
4206 		 * different fields within the *same* struct. This breaks type
4207 		 * equivalence check, which makes an assumption that candidate
4208 		 * types sub-graph has a consistent and deduped-by-compiler
4209 		 * types within a single CU. So work around that by explicitly
4210 		 * allowing identical array types here.
4211 		 */
4212 		if (btf_dedup_identical_arrays(d, hypot_type_id, cand_id))
4213 			return 1;
4214 		/* It turns out that similar situation can happen with
4215 		 * struct/union sometimes, sigh... Handle the case where
4216 		 * structs/unions are exactly the same, down to the referenced
4217 		 * type IDs. Anything more complicated (e.g., if referenced
4218 		 * types are different, but equivalent) is *way more*
4219 		 * complicated and requires a many-to-many equivalence mapping.
4220 		 */
4221 		if (btf_dedup_identical_structs(d, hypot_type_id, cand_id))
4222 			return 1;
4223 		return 0;
4224 	}
4225 
4226 	if (btf_dedup_hypot_map_add(d, canon_id, cand_id))
4227 		return -ENOMEM;
4228 
4229 	cand_type = btf_type_by_id(d->btf, cand_id);
4230 	canon_type = btf_type_by_id(d->btf, canon_id);
4231 	cand_kind = btf_kind(cand_type);
4232 	canon_kind = btf_kind(canon_type);
4233 
4234 	if (cand_type->name_off != canon_type->name_off)
4235 		return 0;
4236 
4237 	/* FWD <--> STRUCT/UNION equivalence check, if enabled */
4238 	if ((cand_kind == BTF_KIND_FWD || canon_kind == BTF_KIND_FWD)
4239 	    && cand_kind != canon_kind) {
4240 		__u16 real_kind;
4241 		__u16 fwd_kind;
4242 
4243 		if (cand_kind == BTF_KIND_FWD) {
4244 			real_kind = canon_kind;
4245 			fwd_kind = btf_fwd_kind(cand_type);
4246 		} else {
4247 			real_kind = cand_kind;
4248 			fwd_kind = btf_fwd_kind(canon_type);
4249 			/* we'd need to resolve base FWD to STRUCT/UNION */
4250 			if (fwd_kind == real_kind && canon_id < d->btf->start_id)
4251 				d->hypot_adjust_canon = true;
4252 		}
4253 		return fwd_kind == real_kind;
4254 	}
4255 
4256 	if (cand_kind != canon_kind)
4257 		return 0;
4258 
4259 	switch (cand_kind) {
4260 	case BTF_KIND_INT:
4261 		return btf_equal_int_tag(cand_type, canon_type);
4262 
4263 	case BTF_KIND_ENUM:
4264 	case BTF_KIND_ENUM64:
4265 		return btf_compat_enum(cand_type, canon_type);
4266 
4267 	case BTF_KIND_FWD:
4268 	case BTF_KIND_FLOAT:
4269 		return btf_equal_common(cand_type, canon_type);
4270 
4271 	case BTF_KIND_CONST:
4272 	case BTF_KIND_VOLATILE:
4273 	case BTF_KIND_RESTRICT:
4274 	case BTF_KIND_PTR:
4275 	case BTF_KIND_TYPEDEF:
4276 	case BTF_KIND_FUNC:
4277 	case BTF_KIND_TYPE_TAG:
4278 		if (cand_type->info != canon_type->info)
4279 			return 0;
4280 		return btf_dedup_is_equiv(d, cand_type->type, canon_type->type);
4281 
4282 	case BTF_KIND_ARRAY: {
4283 		const struct btf_array *cand_arr, *canon_arr;
4284 
4285 		if (!btf_compat_array(cand_type, canon_type))
4286 			return 0;
4287 		cand_arr = btf_array(cand_type);
4288 		canon_arr = btf_array(canon_type);
4289 		eq = btf_dedup_is_equiv(d, cand_arr->index_type, canon_arr->index_type);
4290 		if (eq <= 0)
4291 			return eq;
4292 		return btf_dedup_is_equiv(d, cand_arr->type, canon_arr->type);
4293 	}
4294 
4295 	case BTF_KIND_STRUCT:
4296 	case BTF_KIND_UNION: {
4297 		const struct btf_member *cand_m, *canon_m;
4298 		__u16 vlen;
4299 
4300 		if (!btf_shallow_equal_struct(cand_type, canon_type))
4301 			return 0;
4302 		vlen = btf_vlen(cand_type);
4303 		cand_m = btf_members(cand_type);
4304 		canon_m = btf_members(canon_type);
4305 		for (i = 0; i < vlen; i++) {
4306 			eq = btf_dedup_is_equiv(d, cand_m->type, canon_m->type);
4307 			if (eq <= 0)
4308 				return eq;
4309 			cand_m++;
4310 			canon_m++;
4311 		}
4312 
4313 		return 1;
4314 	}
4315 
4316 	case BTF_KIND_FUNC_PROTO: {
4317 		const struct btf_param *cand_p, *canon_p;
4318 		__u16 vlen;
4319 
4320 		if (!btf_compat_fnproto(cand_type, canon_type))
4321 			return 0;
4322 		eq = btf_dedup_is_equiv(d, cand_type->type, canon_type->type);
4323 		if (eq <= 0)
4324 			return eq;
4325 		vlen = btf_vlen(cand_type);
4326 		cand_p = btf_params(cand_type);
4327 		canon_p = btf_params(canon_type);
4328 		for (i = 0; i < vlen; i++) {
4329 			eq = btf_dedup_is_equiv(d, cand_p->type, canon_p->type);
4330 			if (eq <= 0)
4331 				return eq;
4332 			cand_p++;
4333 			canon_p++;
4334 		}
4335 		return 1;
4336 	}
4337 
4338 	default:
4339 		return -EINVAL;
4340 	}
4341 	return 0;
4342 }
4343 
4344 /*
4345  * Use hypothetical mapping, produced by successful type graph equivalence
4346  * check, to augment existing struct/union canonical mapping, where possible.
4347  *
4348  * If BTF_KIND_FWD resolution is allowed, this mapping is also used to record
4349  * FWD -> STRUCT/UNION correspondence as well. FWD resolution is bidirectional:
4350  * it doesn't matter if FWD type was part of canonical graph or candidate one,
4351  * we are recording the mapping anyway. As opposed to carefulness required
4352  * for struct/union correspondence mapping (described below), for FWD resolution
4353  * it's not important, as by the time that FWD type (reference type) will be
4354  * deduplicated all structs/unions will be deduped already anyway.
4355  *
4356  * Recording STRUCT/UNION mapping is purely a performance optimization and is
4357  * not required for correctness. It needs to be done carefully to ensure that
4358  * struct/union from candidate's type graph is not mapped into corresponding
4359  * struct/union from canonical type graph that itself hasn't been resolved into
4360  * canonical representative. The only guarantee we have is that canonical
4361  * struct/union was determined as canonical and that won't change. But any
4362  * types referenced through that struct/union fields could have been not yet
4363  * resolved, so in case like that it's too early to establish any kind of
4364  * correspondence between structs/unions.
4365  *
4366  * No canonical correspondence is derived for primitive types (they are already
4367  * deduplicated completely already anyway) or reference types (they rely on
4368  * stability of struct/union canonical relationship for equivalence checks).
4369  */
4370 static void btf_dedup_merge_hypot_map(struct btf_dedup *d)
4371 {
4372 	__u32 canon_type_id, targ_type_id;
4373 	__u16 t_kind, c_kind;
4374 	__u32 t_id, c_id;
4375 	int i;
4376 
4377 	for (i = 0; i < d->hypot_cnt; i++) {
4378 		canon_type_id = d->hypot_list[i];
4379 		targ_type_id = d->hypot_map[canon_type_id];
4380 		t_id = resolve_type_id(d, targ_type_id);
4381 		c_id = resolve_type_id(d, canon_type_id);
4382 		t_kind = btf_kind(btf__type_by_id(d->btf, t_id));
4383 		c_kind = btf_kind(btf__type_by_id(d->btf, c_id));
4384 		/*
4385 		 * Resolve FWD into STRUCT/UNION.
4386 		 * It's ok to resolve FWD into STRUCT/UNION that's not yet
4387 		 * mapped to canonical representative (as opposed to
4388 		 * STRUCT/UNION <--> STRUCT/UNION mapping logic below), because
4389 		 * eventually that struct is going to be mapped and all resolved
4390 		 * FWDs will automatically resolve to correct canonical
4391 		 * representative. This will happen before ref type deduping,
4392 		 * which critically depends on stability of these mapping. This
4393 		 * stability is not a requirement for STRUCT/UNION equivalence
4394 		 * checks, though.
4395 		 */
4396 
4397 		/* if it's the split BTF case, we still need to point base FWD
4398 		 * to STRUCT/UNION in a split BTF, because FWDs from split BTF
4399 		 * will be resolved against base FWD. If we don't point base
4400 		 * canonical FWD to the resolved STRUCT/UNION, then all the
4401 		 * FWDs in split BTF won't be correctly resolved to a proper
4402 		 * STRUCT/UNION.
4403 		 */
4404 		if (t_kind != BTF_KIND_FWD && c_kind == BTF_KIND_FWD)
4405 			d->map[c_id] = t_id;
4406 
4407 		/* if graph equivalence determined that we'd need to adjust
4408 		 * base canonical types, then we need to only point base FWDs
4409 		 * to STRUCTs/UNIONs and do no more modifications. For all
4410 		 * other purposes the type graphs were not equivalent.
4411 		 */
4412 		if (d->hypot_adjust_canon)
4413 			continue;
4414 
4415 		if (t_kind == BTF_KIND_FWD && c_kind != BTF_KIND_FWD)
4416 			d->map[t_id] = c_id;
4417 
4418 		if ((t_kind == BTF_KIND_STRUCT || t_kind == BTF_KIND_UNION) &&
4419 		    c_kind != BTF_KIND_FWD &&
4420 		    is_type_mapped(d, c_id) &&
4421 		    !is_type_mapped(d, t_id)) {
4422 			/*
4423 			 * as a perf optimization, we can map struct/union
4424 			 * that's part of type graph we just verified for
4425 			 * equivalence. We can do that for struct/union that has
4426 			 * canonical representative only, though.
4427 			 */
4428 			d->map[t_id] = c_id;
4429 		}
4430 	}
4431 }
4432 
4433 /*
4434  * Deduplicate struct/union types.
4435  *
4436  * For each struct/union type its type signature hash is calculated, taking
4437  * into account type's name, size, number, order and names of fields, but
4438  * ignoring type ID's referenced from fields, because they might not be deduped
4439  * completely until after reference types deduplication phase. This type hash
4440  * is used to iterate over all potential canonical types, sharing same hash.
4441  * For each canonical candidate we check whether type graphs that they form
4442  * (through referenced types in fields and so on) are equivalent using algorithm
4443  * implemented in `btf_dedup_is_equiv`. If such equivalence is found and
4444  * BTF_KIND_FWD resolution is allowed, then hypothetical mapping
4445  * (btf_dedup->hypot_map) produced by aforementioned type graph equivalence
4446  * algorithm is used to record FWD -> STRUCT/UNION mapping. It's also used to
4447  * potentially map other structs/unions to their canonical representatives,
4448  * if such relationship hasn't yet been established. This speeds up algorithm
4449  * by eliminating some of the duplicate work.
4450  *
4451  * If no matching canonical representative was found, struct/union is marked
4452  * as canonical for itself and is added into btf_dedup->dedup_table hash map
4453  * for further look ups.
4454  */
4455 static int btf_dedup_struct_type(struct btf_dedup *d, __u32 type_id)
4456 {
4457 	struct btf_type *cand_type, *t;
4458 	struct hashmap_entry *hash_entry;
4459 	/* if we don't find equivalent type, then we are canonical */
4460 	__u32 new_id = type_id;
4461 	__u16 kind;
4462 	long h;
4463 
4464 	/* already deduped or is in process of deduping (loop detected) */
4465 	if (d->map[type_id] <= BTF_MAX_NR_TYPES)
4466 		return 0;
4467 
4468 	t = btf_type_by_id(d->btf, type_id);
4469 	kind = btf_kind(t);
4470 
4471 	if (kind != BTF_KIND_STRUCT && kind != BTF_KIND_UNION)
4472 		return 0;
4473 
4474 	h = btf_hash_struct(t);
4475 	for_each_dedup_cand(d, hash_entry, h) {
4476 		__u32 cand_id = hash_entry->value;
4477 		int eq;
4478 
4479 		/*
4480 		 * Even though btf_dedup_is_equiv() checks for
4481 		 * btf_shallow_equal_struct() internally when checking two
4482 		 * structs (unions) for equivalence, we need to guard here
4483 		 * from picking matching FWD type as a dedup candidate.
4484 		 * This can happen due to hash collision. In such case just
4485 		 * relying on btf_dedup_is_equiv() would lead to potentially
4486 		 * creating a loop (FWD -> STRUCT and STRUCT -> FWD), because
4487 		 * FWD and compatible STRUCT/UNION are considered equivalent.
4488 		 */
4489 		cand_type = btf_type_by_id(d->btf, cand_id);
4490 		if (!btf_shallow_equal_struct(t, cand_type))
4491 			continue;
4492 
4493 		btf_dedup_clear_hypot_map(d);
4494 		eq = btf_dedup_is_equiv(d, type_id, cand_id);
4495 		if (eq < 0)
4496 			return eq;
4497 		if (!eq)
4498 			continue;
4499 		btf_dedup_merge_hypot_map(d);
4500 		if (d->hypot_adjust_canon) /* not really equivalent */
4501 			continue;
4502 		new_id = cand_id;
4503 		break;
4504 	}
4505 
4506 	d->map[type_id] = new_id;
4507 	if (type_id == new_id && btf_dedup_table_add(d, h, type_id))
4508 		return -ENOMEM;
4509 
4510 	return 0;
4511 }
4512 
4513 static int btf_dedup_struct_types(struct btf_dedup *d)
4514 {
4515 	int i, err;
4516 
4517 	for (i = 0; i < d->btf->nr_types; i++) {
4518 		err = btf_dedup_struct_type(d, d->btf->start_id + i);
4519 		if (err)
4520 			return err;
4521 	}
4522 	return 0;
4523 }
4524 
4525 /*
4526  * Deduplicate reference type.
4527  *
4528  * Once all primitive and struct/union types got deduplicated, we can easily
4529  * deduplicate all other (reference) BTF types. This is done in two steps:
4530  *
4531  * 1. Resolve all referenced type IDs into their canonical type IDs. This
4532  * resolution can be done either immediately for primitive or struct/union types
4533  * (because they were deduped in previous two phases) or recursively for
4534  * reference types. Recursion will always terminate at either primitive or
4535  * struct/union type, at which point we can "unwind" chain of reference types
4536  * one by one. There is no danger of encountering cycles because in C type
4537  * system the only way to form type cycle is through struct/union, so any chain
4538  * of reference types, even those taking part in a type cycle, will inevitably
4539  * reach struct/union at some point.
4540  *
4541  * 2. Once all referenced type IDs are resolved into canonical ones, BTF type
4542  * becomes "stable", in the sense that no further deduplication will cause
4543  * any changes to it. With that, it's now possible to calculate type's signature
4544  * hash (this time taking into account referenced type IDs) and loop over all
4545  * potential canonical representatives. If no match was found, current type
4546  * will become canonical representative of itself and will be added into
4547  * btf_dedup->dedup_table as another possible canonical representative.
4548  */
4549 static int btf_dedup_ref_type(struct btf_dedup *d, __u32 type_id)
4550 {
4551 	struct hashmap_entry *hash_entry;
4552 	__u32 new_id = type_id, cand_id;
4553 	struct btf_type *t, *cand;
4554 	/* if we don't find equivalent type, then we are representative type */
4555 	int ref_type_id;
4556 	long h;
4557 
4558 	if (d->map[type_id] == BTF_IN_PROGRESS_ID)
4559 		return -ELOOP;
4560 	if (d->map[type_id] <= BTF_MAX_NR_TYPES)
4561 		return resolve_type_id(d, type_id);
4562 
4563 	t = btf_type_by_id(d->btf, type_id);
4564 	d->map[type_id] = BTF_IN_PROGRESS_ID;
4565 
4566 	switch (btf_kind(t)) {
4567 	case BTF_KIND_CONST:
4568 	case BTF_KIND_VOLATILE:
4569 	case BTF_KIND_RESTRICT:
4570 	case BTF_KIND_PTR:
4571 	case BTF_KIND_TYPEDEF:
4572 	case BTF_KIND_FUNC:
4573 	case BTF_KIND_TYPE_TAG:
4574 		ref_type_id = btf_dedup_ref_type(d, t->type);
4575 		if (ref_type_id < 0)
4576 			return ref_type_id;
4577 		t->type = ref_type_id;
4578 
4579 		h = btf_hash_common(t);
4580 		for_each_dedup_cand(d, hash_entry, h) {
4581 			cand_id = hash_entry->value;
4582 			cand = btf_type_by_id(d->btf, cand_id);
4583 			if (btf_equal_common(t, cand)) {
4584 				new_id = cand_id;
4585 				break;
4586 			}
4587 		}
4588 		break;
4589 
4590 	case BTF_KIND_DECL_TAG:
4591 		ref_type_id = btf_dedup_ref_type(d, t->type);
4592 		if (ref_type_id < 0)
4593 			return ref_type_id;
4594 		t->type = ref_type_id;
4595 
4596 		h = btf_hash_int_decl_tag(t);
4597 		for_each_dedup_cand(d, hash_entry, h) {
4598 			cand_id = hash_entry->value;
4599 			cand = btf_type_by_id(d->btf, cand_id);
4600 			if (btf_equal_int_tag(t, cand)) {
4601 				new_id = cand_id;
4602 				break;
4603 			}
4604 		}
4605 		break;
4606 
4607 	case BTF_KIND_ARRAY: {
4608 		struct btf_array *info = btf_array(t);
4609 
4610 		ref_type_id = btf_dedup_ref_type(d, info->type);
4611 		if (ref_type_id < 0)
4612 			return ref_type_id;
4613 		info->type = ref_type_id;
4614 
4615 		ref_type_id = btf_dedup_ref_type(d, info->index_type);
4616 		if (ref_type_id < 0)
4617 			return ref_type_id;
4618 		info->index_type = ref_type_id;
4619 
4620 		h = btf_hash_array(t);
4621 		for_each_dedup_cand(d, hash_entry, h) {
4622 			cand_id = hash_entry->value;
4623 			cand = btf_type_by_id(d->btf, cand_id);
4624 			if (btf_equal_array(t, cand)) {
4625 				new_id = cand_id;
4626 				break;
4627 			}
4628 		}
4629 		break;
4630 	}
4631 
4632 	case BTF_KIND_FUNC_PROTO: {
4633 		struct btf_param *param;
4634 		__u16 vlen;
4635 		int i;
4636 
4637 		ref_type_id = btf_dedup_ref_type(d, t->type);
4638 		if (ref_type_id < 0)
4639 			return ref_type_id;
4640 		t->type = ref_type_id;
4641 
4642 		vlen = btf_vlen(t);
4643 		param = btf_params(t);
4644 		for (i = 0; i < vlen; i++) {
4645 			ref_type_id = btf_dedup_ref_type(d, param->type);
4646 			if (ref_type_id < 0)
4647 				return ref_type_id;
4648 			param->type = ref_type_id;
4649 			param++;
4650 		}
4651 
4652 		h = btf_hash_fnproto(t);
4653 		for_each_dedup_cand(d, hash_entry, h) {
4654 			cand_id = hash_entry->value;
4655 			cand = btf_type_by_id(d->btf, cand_id);
4656 			if (btf_equal_fnproto(t, cand)) {
4657 				new_id = cand_id;
4658 				break;
4659 			}
4660 		}
4661 		break;
4662 	}
4663 
4664 	default:
4665 		return -EINVAL;
4666 	}
4667 
4668 	d->map[type_id] = new_id;
4669 	if (type_id == new_id && btf_dedup_table_add(d, h, type_id))
4670 		return -ENOMEM;
4671 
4672 	return new_id;
4673 }
4674 
4675 static int btf_dedup_ref_types(struct btf_dedup *d)
4676 {
4677 	int i, err;
4678 
4679 	for (i = 0; i < d->btf->nr_types; i++) {
4680 		err = btf_dedup_ref_type(d, d->btf->start_id + i);
4681 		if (err < 0)
4682 			return err;
4683 	}
4684 	/* we won't need d->dedup_table anymore */
4685 	hashmap__free(d->dedup_table);
4686 	d->dedup_table = NULL;
4687 	return 0;
4688 }
4689 
4690 /*
4691  * Collect a map from type names to type ids for all canonical structs
4692  * and unions. If the same name is shared by several canonical types
4693  * use a special value 0 to indicate this fact.
4694  */
4695 static int btf_dedup_fill_unique_names_map(struct btf_dedup *d, struct hashmap *names_map)
4696 {
4697 	__u32 nr_types = btf__type_cnt(d->btf);
4698 	struct btf_type *t;
4699 	__u32 type_id;
4700 	__u16 kind;
4701 	int err;
4702 
4703 	/*
4704 	 * Iterate over base and split module ids in order to get all
4705 	 * available structs in the map.
4706 	 */
4707 	for (type_id = 1; type_id < nr_types; ++type_id) {
4708 		t = btf_type_by_id(d->btf, type_id);
4709 		kind = btf_kind(t);
4710 
4711 		if (kind != BTF_KIND_STRUCT && kind != BTF_KIND_UNION)
4712 			continue;
4713 
4714 		/* Skip non-canonical types */
4715 		if (type_id != d->map[type_id])
4716 			continue;
4717 
4718 		err = hashmap__add(names_map, t->name_off, type_id);
4719 		if (err == -EEXIST)
4720 			err = hashmap__set(names_map, t->name_off, 0, NULL, NULL);
4721 
4722 		if (err)
4723 			return err;
4724 	}
4725 
4726 	return 0;
4727 }
4728 
4729 static int btf_dedup_resolve_fwd(struct btf_dedup *d, struct hashmap *names_map, __u32 type_id)
4730 {
4731 	struct btf_type *t = btf_type_by_id(d->btf, type_id);
4732 	enum btf_fwd_kind fwd_kind = btf_kflag(t);
4733 	__u16 cand_kind, kind = btf_kind(t);
4734 	struct btf_type *cand_t;
4735 	uintptr_t cand_id;
4736 
4737 	if (kind != BTF_KIND_FWD)
4738 		return 0;
4739 
4740 	/* Skip if this FWD already has a mapping */
4741 	if (type_id != d->map[type_id])
4742 		return 0;
4743 
4744 	if (!hashmap__find(names_map, t->name_off, &cand_id))
4745 		return 0;
4746 
4747 	/* Zero is a special value indicating that name is not unique */
4748 	if (!cand_id)
4749 		return 0;
4750 
4751 	cand_t = btf_type_by_id(d->btf, cand_id);
4752 	cand_kind = btf_kind(cand_t);
4753 	if ((cand_kind == BTF_KIND_STRUCT && fwd_kind != BTF_FWD_STRUCT) ||
4754 	    (cand_kind == BTF_KIND_UNION && fwd_kind != BTF_FWD_UNION))
4755 		return 0;
4756 
4757 	d->map[type_id] = cand_id;
4758 
4759 	return 0;
4760 }
4761 
4762 /*
4763  * Resolve unambiguous forward declarations.
4764  *
4765  * The lion's share of all FWD declarations is resolved during
4766  * `btf_dedup_struct_types` phase when different type graphs are
4767  * compared against each other. However, if in some compilation unit a
4768  * FWD declaration is not a part of a type graph compared against
4769  * another type graph that declaration's canonical type would not be
4770  * changed. Example:
4771  *
4772  * CU #1:
4773  *
4774  * struct foo;
4775  * struct foo *some_global;
4776  *
4777  * CU #2:
4778  *
4779  * struct foo { int u; };
4780  * struct foo *another_global;
4781  *
4782  * After `btf_dedup_struct_types` the BTF looks as follows:
4783  *
4784  * [1] STRUCT 'foo' size=4 vlen=1 ...
4785  * [2] INT 'int' size=4 ...
4786  * [3] PTR '(anon)' type_id=1
4787  * [4] FWD 'foo' fwd_kind=struct
4788  * [5] PTR '(anon)' type_id=4
4789  *
4790  * This pass assumes that such FWD declarations should be mapped to
4791  * structs or unions with identical name in case if the name is not
4792  * ambiguous.
4793  */
4794 static int btf_dedup_resolve_fwds(struct btf_dedup *d)
4795 {
4796 	int i, err;
4797 	struct hashmap *names_map;
4798 
4799 	names_map = hashmap__new(btf_dedup_identity_hash_fn, btf_dedup_equal_fn, NULL);
4800 	if (IS_ERR(names_map))
4801 		return PTR_ERR(names_map);
4802 
4803 	err = btf_dedup_fill_unique_names_map(d, names_map);
4804 	if (err < 0)
4805 		goto exit;
4806 
4807 	for (i = 0; i < d->btf->nr_types; i++) {
4808 		err = btf_dedup_resolve_fwd(d, names_map, d->btf->start_id + i);
4809 		if (err < 0)
4810 			break;
4811 	}
4812 
4813 exit:
4814 	hashmap__free(names_map);
4815 	return err;
4816 }
4817 
4818 /*
4819  * Compact types.
4820  *
4821  * After we established for each type its corresponding canonical representative
4822  * type, we now can eliminate types that are not canonical and leave only
4823  * canonical ones layed out sequentially in memory by copying them over
4824  * duplicates. During compaction btf_dedup->hypot_map array is reused to store
4825  * a map from original type ID to a new compacted type ID, which will be used
4826  * during next phase to "fix up" type IDs, referenced from struct/union and
4827  * reference types.
4828  */
4829 static int btf_dedup_compact_types(struct btf_dedup *d)
4830 {
4831 	__u32 *new_offs;
4832 	__u32 next_type_id = d->btf->start_id;
4833 	const struct btf_type *t;
4834 	void *p;
4835 	int i, id, len;
4836 
4837 	/* we are going to reuse hypot_map to store compaction remapping */
4838 	d->hypot_map[0] = 0;
4839 	/* base BTF types are not renumbered */
4840 	for (id = 1; id < d->btf->start_id; id++)
4841 		d->hypot_map[id] = id;
4842 	for (i = 0, id = d->btf->start_id; i < d->btf->nr_types; i++, id++)
4843 		d->hypot_map[id] = BTF_UNPROCESSED_ID;
4844 
4845 	p = d->btf->types_data;
4846 
4847 	for (i = 0, id = d->btf->start_id; i < d->btf->nr_types; i++, id++) {
4848 		if (d->map[id] != id)
4849 			continue;
4850 
4851 		t = btf__type_by_id(d->btf, id);
4852 		len = btf_type_size(t);
4853 		if (len < 0)
4854 			return len;
4855 
4856 		memmove(p, t, len);
4857 		d->hypot_map[id] = next_type_id;
4858 		d->btf->type_offs[next_type_id - d->btf->start_id] = p - d->btf->types_data;
4859 		p += len;
4860 		next_type_id++;
4861 	}
4862 
4863 	/* shrink struct btf's internal types index and update btf_header */
4864 	d->btf->nr_types = next_type_id - d->btf->start_id;
4865 	d->btf->type_offs_cap = d->btf->nr_types;
4866 	d->btf->hdr->type_len = p - d->btf->types_data;
4867 	new_offs = libbpf_reallocarray(d->btf->type_offs, d->btf->type_offs_cap,
4868 				       sizeof(*new_offs));
4869 	if (d->btf->type_offs_cap && !new_offs)
4870 		return -ENOMEM;
4871 	d->btf->type_offs = new_offs;
4872 	d->btf->hdr->str_off = d->btf->hdr->type_len;
4873 	d->btf->raw_size = d->btf->hdr->hdr_len + d->btf->hdr->type_len + d->btf->hdr->str_len;
4874 	return 0;
4875 }
4876 
4877 /*
4878  * Figure out final (deduplicated and compacted) type ID for provided original
4879  * `type_id` by first resolving it into corresponding canonical type ID and
4880  * then mapping it to a deduplicated type ID, stored in btf_dedup->hypot_map,
4881  * which is populated during compaction phase.
4882  */
4883 static int btf_dedup_remap_type_id(__u32 *type_id, void *ctx)
4884 {
4885 	struct btf_dedup *d = ctx;
4886 	__u32 resolved_type_id, new_type_id;
4887 
4888 	resolved_type_id = resolve_type_id(d, *type_id);
4889 	new_type_id = d->hypot_map[resolved_type_id];
4890 	if (new_type_id > BTF_MAX_NR_TYPES)
4891 		return -EINVAL;
4892 
4893 	*type_id = new_type_id;
4894 	return 0;
4895 }
4896 
4897 /*
4898  * Remap referenced type IDs into deduped type IDs.
4899  *
4900  * After BTF types are deduplicated and compacted, their final type IDs may
4901  * differ from original ones. The map from original to a corresponding
4902  * deduped type ID is stored in btf_dedup->hypot_map and is populated during
4903  * compaction phase. During remapping phase we are rewriting all type IDs
4904  * referenced from any BTF type (e.g., struct fields, func proto args, etc) to
4905  * their final deduped type IDs.
4906  */
4907 static int btf_dedup_remap_types(struct btf_dedup *d)
4908 {
4909 	int i, r;
4910 
4911 	for (i = 0; i < d->btf->nr_types; i++) {
4912 		struct btf_type *t = btf_type_by_id(d->btf, d->btf->start_id + i);
4913 
4914 		r = btf_type_visit_type_ids(t, btf_dedup_remap_type_id, d);
4915 		if (r)
4916 			return r;
4917 	}
4918 
4919 	if (!d->btf_ext)
4920 		return 0;
4921 
4922 	r = btf_ext_visit_type_ids(d->btf_ext, btf_dedup_remap_type_id, d);
4923 	if (r)
4924 		return r;
4925 
4926 	return 0;
4927 }
4928 
4929 /*
4930  * Probe few well-known locations for vmlinux kernel image and try to load BTF
4931  * data out of it to use for target BTF.
4932  */
4933 struct btf *btf__load_vmlinux_btf(void)
4934 {
4935 	const char *locations[] = {
4936 		/* try canonical vmlinux BTF through sysfs first */
4937 		"/sys/kernel/btf/vmlinux",
4938 		/* fall back to trying to find vmlinux on disk otherwise */
4939 		"/boot/vmlinux-%1$s",
4940 		"/lib/modules/%1$s/vmlinux-%1$s",
4941 		"/lib/modules/%1$s/build/vmlinux",
4942 		"/usr/lib/modules/%1$s/kernel/vmlinux",
4943 		"/usr/lib/debug/boot/vmlinux-%1$s",
4944 		"/usr/lib/debug/boot/vmlinux-%1$s.debug",
4945 		"/usr/lib/debug/lib/modules/%1$s/vmlinux",
4946 	};
4947 	char path[PATH_MAX + 1];
4948 	struct utsname buf;
4949 	struct btf *btf;
4950 	int i, err;
4951 
4952 	uname(&buf);
4953 
4954 	for (i = 0; i < ARRAY_SIZE(locations); i++) {
4955 		snprintf(path, PATH_MAX, locations[i], buf.release);
4956 
4957 		if (faccessat(AT_FDCWD, path, R_OK, AT_EACCESS))
4958 			continue;
4959 
4960 		btf = btf__parse(path, NULL);
4961 		err = libbpf_get_error(btf);
4962 		pr_debug("loading kernel BTF '%s': %d\n", path, err);
4963 		if (err)
4964 			continue;
4965 
4966 		return btf;
4967 	}
4968 
4969 	pr_warn("failed to find valid kernel BTF\n");
4970 	return libbpf_err_ptr(-ESRCH);
4971 }
4972 
4973 struct btf *libbpf_find_kernel_btf(void) __attribute__((alias("btf__load_vmlinux_btf")));
4974 
4975 struct btf *btf__load_module_btf(const char *module_name, struct btf *vmlinux_btf)
4976 {
4977 	char path[80];
4978 
4979 	snprintf(path, sizeof(path), "/sys/kernel/btf/%s", module_name);
4980 	return btf__parse_split(path, vmlinux_btf);
4981 }
4982 
4983 int btf_type_visit_type_ids(struct btf_type *t, type_id_visit_fn visit, void *ctx)
4984 {
4985 	int i, n, err;
4986 
4987 	switch (btf_kind(t)) {
4988 	case BTF_KIND_INT:
4989 	case BTF_KIND_FLOAT:
4990 	case BTF_KIND_ENUM:
4991 	case BTF_KIND_ENUM64:
4992 		return 0;
4993 
4994 	case BTF_KIND_FWD:
4995 	case BTF_KIND_CONST:
4996 	case BTF_KIND_VOLATILE:
4997 	case BTF_KIND_RESTRICT:
4998 	case BTF_KIND_PTR:
4999 	case BTF_KIND_TYPEDEF:
5000 	case BTF_KIND_FUNC:
5001 	case BTF_KIND_VAR:
5002 	case BTF_KIND_DECL_TAG:
5003 	case BTF_KIND_TYPE_TAG:
5004 		return visit(&t->type, ctx);
5005 
5006 	case BTF_KIND_ARRAY: {
5007 		struct btf_array *a = btf_array(t);
5008 
5009 		err = visit(&a->type, ctx);
5010 		err = err ?: visit(&a->index_type, ctx);
5011 		return err;
5012 	}
5013 
5014 	case BTF_KIND_STRUCT:
5015 	case BTF_KIND_UNION: {
5016 		struct btf_member *m = btf_members(t);
5017 
5018 		for (i = 0, n = btf_vlen(t); i < n; i++, m++) {
5019 			err = visit(&m->type, ctx);
5020 			if (err)
5021 				return err;
5022 		}
5023 		return 0;
5024 	}
5025 
5026 	case BTF_KIND_FUNC_PROTO: {
5027 		struct btf_param *m = btf_params(t);
5028 
5029 		err = visit(&t->type, ctx);
5030 		if (err)
5031 			return err;
5032 		for (i = 0, n = btf_vlen(t); i < n; i++, m++) {
5033 			err = visit(&m->type, ctx);
5034 			if (err)
5035 				return err;
5036 		}
5037 		return 0;
5038 	}
5039 
5040 	case BTF_KIND_DATASEC: {
5041 		struct btf_var_secinfo *m = btf_var_secinfos(t);
5042 
5043 		for (i = 0, n = btf_vlen(t); i < n; i++, m++) {
5044 			err = visit(&m->type, ctx);
5045 			if (err)
5046 				return err;
5047 		}
5048 		return 0;
5049 	}
5050 
5051 	default:
5052 		return -EINVAL;
5053 	}
5054 }
5055 
5056 int btf_type_visit_str_offs(struct btf_type *t, str_off_visit_fn visit, void *ctx)
5057 {
5058 	int i, n, err;
5059 
5060 	err = visit(&t->name_off, ctx);
5061 	if (err)
5062 		return err;
5063 
5064 	switch (btf_kind(t)) {
5065 	case BTF_KIND_STRUCT:
5066 	case BTF_KIND_UNION: {
5067 		struct btf_member *m = btf_members(t);
5068 
5069 		for (i = 0, n = btf_vlen(t); i < n; i++, m++) {
5070 			err = visit(&m->name_off, ctx);
5071 			if (err)
5072 				return err;
5073 		}
5074 		break;
5075 	}
5076 	case BTF_KIND_ENUM: {
5077 		struct btf_enum *m = btf_enum(t);
5078 
5079 		for (i = 0, n = btf_vlen(t); i < n; i++, m++) {
5080 			err = visit(&m->name_off, ctx);
5081 			if (err)
5082 				return err;
5083 		}
5084 		break;
5085 	}
5086 	case BTF_KIND_ENUM64: {
5087 		struct btf_enum64 *m = btf_enum64(t);
5088 
5089 		for (i = 0, n = btf_vlen(t); i < n; i++, m++) {
5090 			err = visit(&m->name_off, ctx);
5091 			if (err)
5092 				return err;
5093 		}
5094 		break;
5095 	}
5096 	case BTF_KIND_FUNC_PROTO: {
5097 		struct btf_param *m = btf_params(t);
5098 
5099 		for (i = 0, n = btf_vlen(t); i < n; i++, m++) {
5100 			err = visit(&m->name_off, ctx);
5101 			if (err)
5102 				return err;
5103 		}
5104 		break;
5105 	}
5106 	default:
5107 		break;
5108 	}
5109 
5110 	return 0;
5111 }
5112 
5113 int btf_ext_visit_type_ids(struct btf_ext *btf_ext, type_id_visit_fn visit, void *ctx)
5114 {
5115 	const struct btf_ext_info *seg;
5116 	struct btf_ext_info_sec *sec;
5117 	int i, err;
5118 
5119 	seg = &btf_ext->func_info;
5120 	for_each_btf_ext_sec(seg, sec) {
5121 		struct bpf_func_info_min *rec;
5122 
5123 		for_each_btf_ext_rec(seg, sec, i, rec) {
5124 			err = visit(&rec->type_id, ctx);
5125 			if (err < 0)
5126 				return err;
5127 		}
5128 	}
5129 
5130 	seg = &btf_ext->core_relo_info;
5131 	for_each_btf_ext_sec(seg, sec) {
5132 		struct bpf_core_relo *rec;
5133 
5134 		for_each_btf_ext_rec(seg, sec, i, rec) {
5135 			err = visit(&rec->type_id, ctx);
5136 			if (err < 0)
5137 				return err;
5138 		}
5139 	}
5140 
5141 	return 0;
5142 }
5143 
5144 int btf_ext_visit_str_offs(struct btf_ext *btf_ext, str_off_visit_fn visit, void *ctx)
5145 {
5146 	const struct btf_ext_info *seg;
5147 	struct btf_ext_info_sec *sec;
5148 	int i, err;
5149 
5150 	seg = &btf_ext->func_info;
5151 	for_each_btf_ext_sec(seg, sec) {
5152 		err = visit(&sec->sec_name_off, ctx);
5153 		if (err)
5154 			return err;
5155 	}
5156 
5157 	seg = &btf_ext->line_info;
5158 	for_each_btf_ext_sec(seg, sec) {
5159 		struct bpf_line_info_min *rec;
5160 
5161 		err = visit(&sec->sec_name_off, ctx);
5162 		if (err)
5163 			return err;
5164 
5165 		for_each_btf_ext_rec(seg, sec, i, rec) {
5166 			err = visit(&rec->file_name_off, ctx);
5167 			if (err)
5168 				return err;
5169 			err = visit(&rec->line_off, ctx);
5170 			if (err)
5171 				return err;
5172 		}
5173 	}
5174 
5175 	seg = &btf_ext->core_relo_info;
5176 	for_each_btf_ext_sec(seg, sec) {
5177 		struct bpf_core_relo *rec;
5178 
5179 		err = visit(&sec->sec_name_off, ctx);
5180 		if (err)
5181 			return err;
5182 
5183 		for_each_btf_ext_rec(seg, sec, i, rec) {
5184 			err = visit(&rec->access_str_off, ctx);
5185 			if (err)
5186 				return err;
5187 		}
5188 	}
5189 
5190 	return 0;
5191 }
5192