xref: /linux/tools/lib/bpf/btf.c (revision 4b911a9690d72641879ea6d13cce1de31d346d79)
1 // SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
2 /* Copyright (c) 2018 Facebook */
3 
4 #include <byteswap.h>
5 #include <endian.h>
6 #include <stdio.h>
7 #include <stdlib.h>
8 #include <string.h>
9 #include <fcntl.h>
10 #include <unistd.h>
11 #include <errno.h>
12 #include <sys/utsname.h>
13 #include <sys/param.h>
14 #include <sys/stat.h>
15 #include <linux/kernel.h>
16 #include <linux/err.h>
17 #include <linux/btf.h>
18 #include <gelf.h>
19 #include "btf.h"
20 #include "bpf.h"
21 #include "libbpf.h"
22 #include "libbpf_internal.h"
23 #include "hashmap.h"
24 #include "strset.h"
25 
26 #define BTF_MAX_NR_TYPES 0x7fffffffU
27 #define BTF_MAX_STR_OFFSET 0x7fffffffU
28 
29 static struct btf_type btf_void;
30 
31 struct btf {
32 	/* raw BTF data in native endianness */
33 	void *raw_data;
34 	/* raw BTF data in non-native endianness */
35 	void *raw_data_swapped;
36 	__u32 raw_size;
37 	/* whether target endianness differs from the native one */
38 	bool swapped_endian;
39 
40 	/*
41 	 * When BTF is loaded from an ELF or raw memory it is stored
42 	 * in a contiguous memory block. The hdr, type_data, and, strs_data
43 	 * point inside that memory region to their respective parts of BTF
44 	 * representation:
45 	 *
46 	 * +--------------------------------+
47 	 * |  Header  |  Types  |  Strings  |
48 	 * +--------------------------------+
49 	 * ^          ^         ^
50 	 * |          |         |
51 	 * hdr        |         |
52 	 * types_data-+         |
53 	 * strs_data------------+
54 	 *
55 	 * If BTF data is later modified, e.g., due to types added or
56 	 * removed, BTF deduplication performed, etc, this contiguous
57 	 * representation is broken up into three independently allocated
58 	 * memory regions to be able to modify them independently.
59 	 * raw_data is nulled out at that point, but can be later allocated
60 	 * and cached again if user calls btf__raw_data(), at which point
61 	 * raw_data will contain a contiguous copy of header, types, and
62 	 * strings:
63 	 *
64 	 * +----------+  +---------+  +-----------+
65 	 * |  Header  |  |  Types  |  |  Strings  |
66 	 * +----------+  +---------+  +-----------+
67 	 * ^             ^            ^
68 	 * |             |            |
69 	 * hdr           |            |
70 	 * types_data----+            |
71 	 * strset__data(strs_set)-----+
72 	 *
73 	 *               +----------+---------+-----------+
74 	 *               |  Header  |  Types  |  Strings  |
75 	 * raw_data----->+----------+---------+-----------+
76 	 */
77 	struct btf_header *hdr;
78 
79 	void *types_data;
80 	size_t types_data_cap; /* used size stored in hdr->type_len */
81 
82 	/* type ID to `struct btf_type *` lookup index
83 	 * type_offs[0] corresponds to the first non-VOID type:
84 	 *   - for base BTF it's type [1];
85 	 *   - for split BTF it's the first non-base BTF type.
86 	 */
87 	__u32 *type_offs;
88 	size_t type_offs_cap;
89 	/* number of types in this BTF instance:
90 	 *   - doesn't include special [0] void type;
91 	 *   - for split BTF counts number of types added on top of base BTF.
92 	 */
93 	__u32 nr_types;
94 	/* if not NULL, points to the base BTF on top of which the current
95 	 * split BTF is based
96 	 */
97 	struct btf *base_btf;
98 	/* BTF type ID of the first type in this BTF instance:
99 	 *   - for base BTF it's equal to 1;
100 	 *   - for split BTF it's equal to biggest type ID of base BTF plus 1.
101 	 */
102 	int start_id;
103 	/* logical string offset of this BTF instance:
104 	 *   - for base BTF it's equal to 0;
105 	 *   - for split BTF it's equal to total size of base BTF's string section size.
106 	 */
107 	int start_str_off;
108 
109 	/* only one of strs_data or strs_set can be non-NULL, depending on
110 	 * whether BTF is in a modifiable state (strs_set is used) or not
111 	 * (strs_data points inside raw_data)
112 	 */
113 	void *strs_data;
114 	/* a set of unique strings */
115 	struct strset *strs_set;
116 	/* whether strings are already deduplicated */
117 	bool strs_deduped;
118 
119 	/* BTF object FD, if loaded into kernel */
120 	int fd;
121 
122 	/* Pointer size (in bytes) for a target architecture of this BTF */
123 	int ptr_sz;
124 };
125 
126 static inline __u64 ptr_to_u64(const void *ptr)
127 {
128 	return (__u64) (unsigned long) ptr;
129 }
130 
131 /* Ensure given dynamically allocated memory region pointed to by *data* with
132  * capacity of *cap_cnt* elements each taking *elem_sz* bytes has enough
133  * memory to accommodate *add_cnt* new elements, assuming *cur_cnt* elements
134  * are already used. At most *max_cnt* elements can be ever allocated.
135  * If necessary, memory is reallocated and all existing data is copied over,
136  * new pointer to the memory region is stored at *data, new memory region
137  * capacity (in number of elements) is stored in *cap.
138  * On success, memory pointer to the beginning of unused memory is returned.
139  * On error, NULL is returned.
140  */
141 void *libbpf_add_mem(void **data, size_t *cap_cnt, size_t elem_sz,
142 		     size_t cur_cnt, size_t max_cnt, size_t add_cnt)
143 {
144 	size_t new_cnt;
145 	void *new_data;
146 
147 	if (cur_cnt + add_cnt <= *cap_cnt)
148 		return *data + cur_cnt * elem_sz;
149 
150 	/* requested more than the set limit */
151 	if (cur_cnt + add_cnt > max_cnt)
152 		return NULL;
153 
154 	new_cnt = *cap_cnt;
155 	new_cnt += new_cnt / 4;		  /* expand by 25% */
156 	if (new_cnt < 16)		  /* but at least 16 elements */
157 		new_cnt = 16;
158 	if (new_cnt > max_cnt)		  /* but not exceeding a set limit */
159 		new_cnt = max_cnt;
160 	if (new_cnt < cur_cnt + add_cnt)  /* also ensure we have enough memory */
161 		new_cnt = cur_cnt + add_cnt;
162 
163 	new_data = libbpf_reallocarray(*data, new_cnt, elem_sz);
164 	if (!new_data)
165 		return NULL;
166 
167 	/* zero out newly allocated portion of memory */
168 	memset(new_data + (*cap_cnt) * elem_sz, 0, (new_cnt - *cap_cnt) * elem_sz);
169 
170 	*data = new_data;
171 	*cap_cnt = new_cnt;
172 	return new_data + cur_cnt * elem_sz;
173 }
174 
175 /* Ensure given dynamically allocated memory region has enough allocated space
176  * to accommodate *need_cnt* elements of size *elem_sz* bytes each
177  */
178 int libbpf_ensure_mem(void **data, size_t *cap_cnt, size_t elem_sz, size_t need_cnt)
179 {
180 	void *p;
181 
182 	if (need_cnt <= *cap_cnt)
183 		return 0;
184 
185 	p = libbpf_add_mem(data, cap_cnt, elem_sz, *cap_cnt, SIZE_MAX, need_cnt - *cap_cnt);
186 	if (!p)
187 		return -ENOMEM;
188 
189 	return 0;
190 }
191 
192 static void *btf_add_type_offs_mem(struct btf *btf, size_t add_cnt)
193 {
194 	return libbpf_add_mem((void **)&btf->type_offs, &btf->type_offs_cap, sizeof(__u32),
195 			      btf->nr_types, BTF_MAX_NR_TYPES, add_cnt);
196 }
197 
198 static int btf_add_type_idx_entry(struct btf *btf, __u32 type_off)
199 {
200 	__u32 *p;
201 
202 	p = btf_add_type_offs_mem(btf, 1);
203 	if (!p)
204 		return -ENOMEM;
205 
206 	*p = type_off;
207 	return 0;
208 }
209 
210 static void btf_bswap_hdr(struct btf_header *h)
211 {
212 	h->magic = bswap_16(h->magic);
213 	h->hdr_len = bswap_32(h->hdr_len);
214 	h->type_off = bswap_32(h->type_off);
215 	h->type_len = bswap_32(h->type_len);
216 	h->str_off = bswap_32(h->str_off);
217 	h->str_len = bswap_32(h->str_len);
218 }
219 
220 static int btf_parse_hdr(struct btf *btf)
221 {
222 	struct btf_header *hdr = btf->hdr;
223 	__u32 meta_left;
224 
225 	if (btf->raw_size < sizeof(struct btf_header)) {
226 		pr_debug("BTF header not found\n");
227 		return -EINVAL;
228 	}
229 
230 	if (hdr->magic == bswap_16(BTF_MAGIC)) {
231 		btf->swapped_endian = true;
232 		if (bswap_32(hdr->hdr_len) != sizeof(struct btf_header)) {
233 			pr_warn("Can't load BTF with non-native endianness due to unsupported header length %u\n",
234 				bswap_32(hdr->hdr_len));
235 			return -ENOTSUP;
236 		}
237 		btf_bswap_hdr(hdr);
238 	} else if (hdr->magic != BTF_MAGIC) {
239 		pr_debug("Invalid BTF magic: %x\n", hdr->magic);
240 		return -EINVAL;
241 	}
242 
243 	if (btf->raw_size < hdr->hdr_len) {
244 		pr_debug("BTF header len %u larger than data size %u\n",
245 			 hdr->hdr_len, btf->raw_size);
246 		return -EINVAL;
247 	}
248 
249 	meta_left = btf->raw_size - hdr->hdr_len;
250 	if (meta_left < (long long)hdr->str_off + hdr->str_len) {
251 		pr_debug("Invalid BTF total size: %u\n", btf->raw_size);
252 		return -EINVAL;
253 	}
254 
255 	if ((long long)hdr->type_off + hdr->type_len > hdr->str_off) {
256 		pr_debug("Invalid BTF data sections layout: type data at %u + %u, strings data at %u + %u\n",
257 			 hdr->type_off, hdr->type_len, hdr->str_off, hdr->str_len);
258 		return -EINVAL;
259 	}
260 
261 	if (hdr->type_off % 4) {
262 		pr_debug("BTF type section is not aligned to 4 bytes\n");
263 		return -EINVAL;
264 	}
265 
266 	return 0;
267 }
268 
269 static int btf_parse_str_sec(struct btf *btf)
270 {
271 	const struct btf_header *hdr = btf->hdr;
272 	const char *start = btf->strs_data;
273 	const char *end = start + btf->hdr->str_len;
274 
275 	if (btf->base_btf && hdr->str_len == 0)
276 		return 0;
277 	if (!hdr->str_len || hdr->str_len - 1 > BTF_MAX_STR_OFFSET || end[-1]) {
278 		pr_debug("Invalid BTF string section\n");
279 		return -EINVAL;
280 	}
281 	if (!btf->base_btf && start[0]) {
282 		pr_debug("Invalid BTF string section\n");
283 		return -EINVAL;
284 	}
285 	return 0;
286 }
287 
288 static int btf_type_size(const struct btf_type *t)
289 {
290 	const int base_size = sizeof(struct btf_type);
291 	__u16 vlen = btf_vlen(t);
292 
293 	switch (btf_kind(t)) {
294 	case BTF_KIND_FWD:
295 	case BTF_KIND_CONST:
296 	case BTF_KIND_VOLATILE:
297 	case BTF_KIND_RESTRICT:
298 	case BTF_KIND_PTR:
299 	case BTF_KIND_TYPEDEF:
300 	case BTF_KIND_FUNC:
301 	case BTF_KIND_FLOAT:
302 	case BTF_KIND_TYPE_TAG:
303 		return base_size;
304 	case BTF_KIND_INT:
305 		return base_size + sizeof(__u32);
306 	case BTF_KIND_ENUM:
307 		return base_size + vlen * sizeof(struct btf_enum);
308 	case BTF_KIND_ENUM64:
309 		return base_size + vlen * sizeof(struct btf_enum64);
310 	case BTF_KIND_ARRAY:
311 		return base_size + sizeof(struct btf_array);
312 	case BTF_KIND_STRUCT:
313 	case BTF_KIND_UNION:
314 		return base_size + vlen * sizeof(struct btf_member);
315 	case BTF_KIND_FUNC_PROTO:
316 		return base_size + vlen * sizeof(struct btf_param);
317 	case BTF_KIND_VAR:
318 		return base_size + sizeof(struct btf_var);
319 	case BTF_KIND_DATASEC:
320 		return base_size + vlen * sizeof(struct btf_var_secinfo);
321 	case BTF_KIND_DECL_TAG:
322 		return base_size + sizeof(struct btf_decl_tag);
323 	default:
324 		pr_debug("Unsupported BTF_KIND:%u\n", btf_kind(t));
325 		return -EINVAL;
326 	}
327 }
328 
329 static void btf_bswap_type_base(struct btf_type *t)
330 {
331 	t->name_off = bswap_32(t->name_off);
332 	t->info = bswap_32(t->info);
333 	t->type = bswap_32(t->type);
334 }
335 
336 static int btf_bswap_type_rest(struct btf_type *t)
337 {
338 	struct btf_var_secinfo *v;
339 	struct btf_enum64 *e64;
340 	struct btf_member *m;
341 	struct btf_array *a;
342 	struct btf_param *p;
343 	struct btf_enum *e;
344 	__u16 vlen = btf_vlen(t);
345 	int i;
346 
347 	switch (btf_kind(t)) {
348 	case BTF_KIND_FWD:
349 	case BTF_KIND_CONST:
350 	case BTF_KIND_VOLATILE:
351 	case BTF_KIND_RESTRICT:
352 	case BTF_KIND_PTR:
353 	case BTF_KIND_TYPEDEF:
354 	case BTF_KIND_FUNC:
355 	case BTF_KIND_FLOAT:
356 	case BTF_KIND_TYPE_TAG:
357 		return 0;
358 	case BTF_KIND_INT:
359 		*(__u32 *)(t + 1) = bswap_32(*(__u32 *)(t + 1));
360 		return 0;
361 	case BTF_KIND_ENUM:
362 		for (i = 0, e = btf_enum(t); i < vlen; i++, e++) {
363 			e->name_off = bswap_32(e->name_off);
364 			e->val = bswap_32(e->val);
365 		}
366 		return 0;
367 	case BTF_KIND_ENUM64:
368 		for (i = 0, e64 = btf_enum64(t); i < vlen; i++, e64++) {
369 			e64->name_off = bswap_32(e64->name_off);
370 			e64->val_lo32 = bswap_32(e64->val_lo32);
371 			e64->val_hi32 = bswap_32(e64->val_hi32);
372 		}
373 		return 0;
374 	case BTF_KIND_ARRAY:
375 		a = btf_array(t);
376 		a->type = bswap_32(a->type);
377 		a->index_type = bswap_32(a->index_type);
378 		a->nelems = bswap_32(a->nelems);
379 		return 0;
380 	case BTF_KIND_STRUCT:
381 	case BTF_KIND_UNION:
382 		for (i = 0, m = btf_members(t); i < vlen; i++, m++) {
383 			m->name_off = bswap_32(m->name_off);
384 			m->type = bswap_32(m->type);
385 			m->offset = bswap_32(m->offset);
386 		}
387 		return 0;
388 	case BTF_KIND_FUNC_PROTO:
389 		for (i = 0, p = btf_params(t); i < vlen; i++, p++) {
390 			p->name_off = bswap_32(p->name_off);
391 			p->type = bswap_32(p->type);
392 		}
393 		return 0;
394 	case BTF_KIND_VAR:
395 		btf_var(t)->linkage = bswap_32(btf_var(t)->linkage);
396 		return 0;
397 	case BTF_KIND_DATASEC:
398 		for (i = 0, v = btf_var_secinfos(t); i < vlen; i++, v++) {
399 			v->type = bswap_32(v->type);
400 			v->offset = bswap_32(v->offset);
401 			v->size = bswap_32(v->size);
402 		}
403 		return 0;
404 	case BTF_KIND_DECL_TAG:
405 		btf_decl_tag(t)->component_idx = bswap_32(btf_decl_tag(t)->component_idx);
406 		return 0;
407 	default:
408 		pr_debug("Unsupported BTF_KIND:%u\n", btf_kind(t));
409 		return -EINVAL;
410 	}
411 }
412 
413 static int btf_parse_type_sec(struct btf *btf)
414 {
415 	struct btf_header *hdr = btf->hdr;
416 	void *next_type = btf->types_data;
417 	void *end_type = next_type + hdr->type_len;
418 	int err, type_size;
419 
420 	while (next_type + sizeof(struct btf_type) <= end_type) {
421 		if (btf->swapped_endian)
422 			btf_bswap_type_base(next_type);
423 
424 		type_size = btf_type_size(next_type);
425 		if (type_size < 0)
426 			return type_size;
427 		if (next_type + type_size > end_type) {
428 			pr_warn("BTF type [%d] is malformed\n", btf->start_id + btf->nr_types);
429 			return -EINVAL;
430 		}
431 
432 		if (btf->swapped_endian && btf_bswap_type_rest(next_type))
433 			return -EINVAL;
434 
435 		err = btf_add_type_idx_entry(btf, next_type - btf->types_data);
436 		if (err)
437 			return err;
438 
439 		next_type += type_size;
440 		btf->nr_types++;
441 	}
442 
443 	if (next_type != end_type) {
444 		pr_warn("BTF types data is malformed\n");
445 		return -EINVAL;
446 	}
447 
448 	return 0;
449 }
450 
451 static int btf_validate_str(const struct btf *btf, __u32 str_off, const char *what, __u32 type_id)
452 {
453 	const char *s;
454 
455 	s = btf__str_by_offset(btf, str_off);
456 	if (!s) {
457 		pr_warn("btf: type [%u]: invalid %s (string offset %u)\n", type_id, what, str_off);
458 		return -EINVAL;
459 	}
460 
461 	return 0;
462 }
463 
464 static int btf_validate_id(const struct btf *btf, __u32 id, __u32 ctx_id)
465 {
466 	const struct btf_type *t;
467 
468 	t = btf__type_by_id(btf, id);
469 	if (!t) {
470 		pr_warn("btf: type [%u]: invalid referenced type ID %u\n", ctx_id, id);
471 		return -EINVAL;
472 	}
473 
474 	return 0;
475 }
476 
477 static int btf_validate_type(const struct btf *btf, const struct btf_type *t, __u32 id)
478 {
479 	__u32 kind = btf_kind(t);
480 	int err, i, n;
481 
482 	err = btf_validate_str(btf, t->name_off, "type name", id);
483 	if (err)
484 		return err;
485 
486 	switch (kind) {
487 	case BTF_KIND_UNKN:
488 	case BTF_KIND_INT:
489 	case BTF_KIND_FWD:
490 	case BTF_KIND_FLOAT:
491 		break;
492 	case BTF_KIND_PTR:
493 	case BTF_KIND_TYPEDEF:
494 	case BTF_KIND_VOLATILE:
495 	case BTF_KIND_CONST:
496 	case BTF_KIND_RESTRICT:
497 	case BTF_KIND_VAR:
498 	case BTF_KIND_DECL_TAG:
499 	case BTF_KIND_TYPE_TAG:
500 		err = btf_validate_id(btf, t->type, id);
501 		if (err)
502 			return err;
503 		break;
504 	case BTF_KIND_ARRAY: {
505 		const struct btf_array *a = btf_array(t);
506 
507 		err = btf_validate_id(btf, a->type, id);
508 		err = err ?: btf_validate_id(btf, a->index_type, id);
509 		if (err)
510 			return err;
511 		break;
512 	}
513 	case BTF_KIND_STRUCT:
514 	case BTF_KIND_UNION: {
515 		const struct btf_member *m = btf_members(t);
516 
517 		n = btf_vlen(t);
518 		for (i = 0; i < n; i++, m++) {
519 			err = btf_validate_str(btf, m->name_off, "field name", id);
520 			err = err ?: btf_validate_id(btf, m->type, id);
521 			if (err)
522 				return err;
523 		}
524 		break;
525 	}
526 	case BTF_KIND_ENUM: {
527 		const struct btf_enum *m = btf_enum(t);
528 
529 		n = btf_vlen(t);
530 		for (i = 0; i < n; i++, m++) {
531 			err = btf_validate_str(btf, m->name_off, "enum name", id);
532 			if (err)
533 				return err;
534 		}
535 		break;
536 	}
537 	case BTF_KIND_ENUM64: {
538 		const struct btf_enum64 *m = btf_enum64(t);
539 
540 		n = btf_vlen(t);
541 		for (i = 0; i < n; i++, m++) {
542 			err = btf_validate_str(btf, m->name_off, "enum name", id);
543 			if (err)
544 				return err;
545 		}
546 		break;
547 	}
548 	case BTF_KIND_FUNC: {
549 		const struct btf_type *ft;
550 
551 		err = btf_validate_id(btf, t->type, id);
552 		if (err)
553 			return err;
554 		ft = btf__type_by_id(btf, t->type);
555 		if (btf_kind(ft) != BTF_KIND_FUNC_PROTO) {
556 			pr_warn("btf: type [%u]: referenced type [%u] is not FUNC_PROTO\n", id, t->type);
557 			return -EINVAL;
558 		}
559 		break;
560 	}
561 	case BTF_KIND_FUNC_PROTO: {
562 		const struct btf_param *m = btf_params(t);
563 
564 		n = btf_vlen(t);
565 		for (i = 0; i < n; i++, m++) {
566 			err = btf_validate_str(btf, m->name_off, "param name", id);
567 			err = err ?: btf_validate_id(btf, m->type, id);
568 			if (err)
569 				return err;
570 		}
571 		break;
572 	}
573 	case BTF_KIND_DATASEC: {
574 		const struct btf_var_secinfo *m = btf_var_secinfos(t);
575 
576 		n = btf_vlen(t);
577 		for (i = 0; i < n; i++, m++) {
578 			err = btf_validate_id(btf, m->type, id);
579 			if (err)
580 				return err;
581 		}
582 		break;
583 	}
584 	default:
585 		pr_warn("btf: type [%u]: unrecognized kind %u\n", id, kind);
586 		return -EINVAL;
587 	}
588 	return 0;
589 }
590 
591 /* Validate basic sanity of BTF. It's intentionally less thorough than
592  * kernel's validation and validates only properties of BTF that libbpf relies
593  * on to be correct (e.g., valid type IDs, valid string offsets, etc)
594  */
595 static int btf_sanity_check(const struct btf *btf)
596 {
597 	const struct btf_type *t;
598 	__u32 i, n = btf__type_cnt(btf);
599 	int err;
600 
601 	for (i = 1; i < n; i++) {
602 		t = btf_type_by_id(btf, i);
603 		err = btf_validate_type(btf, t, i);
604 		if (err)
605 			return err;
606 	}
607 	return 0;
608 }
609 
610 __u32 btf__type_cnt(const struct btf *btf)
611 {
612 	return btf->start_id + btf->nr_types;
613 }
614 
615 const struct btf *btf__base_btf(const struct btf *btf)
616 {
617 	return btf->base_btf;
618 }
619 
620 /* internal helper returning non-const pointer to a type */
621 struct btf_type *btf_type_by_id(const struct btf *btf, __u32 type_id)
622 {
623 	if (type_id == 0)
624 		return &btf_void;
625 	if (type_id < btf->start_id)
626 		return btf_type_by_id(btf->base_btf, type_id);
627 	return btf->types_data + btf->type_offs[type_id - btf->start_id];
628 }
629 
630 const struct btf_type *btf__type_by_id(const struct btf *btf, __u32 type_id)
631 {
632 	if (type_id >= btf->start_id + btf->nr_types)
633 		return errno = EINVAL, NULL;
634 	return btf_type_by_id((struct btf *)btf, type_id);
635 }
636 
637 static int determine_ptr_size(const struct btf *btf)
638 {
639 	static const char * const long_aliases[] = {
640 		"long",
641 		"long int",
642 		"int long",
643 		"unsigned long",
644 		"long unsigned",
645 		"unsigned long int",
646 		"unsigned int long",
647 		"long unsigned int",
648 		"long int unsigned",
649 		"int unsigned long",
650 		"int long unsigned",
651 	};
652 	const struct btf_type *t;
653 	const char *name;
654 	int i, j, n;
655 
656 	if (btf->base_btf && btf->base_btf->ptr_sz > 0)
657 		return btf->base_btf->ptr_sz;
658 
659 	n = btf__type_cnt(btf);
660 	for (i = 1; i < n; i++) {
661 		t = btf__type_by_id(btf, i);
662 		if (!btf_is_int(t))
663 			continue;
664 
665 		if (t->size != 4 && t->size != 8)
666 			continue;
667 
668 		name = btf__name_by_offset(btf, t->name_off);
669 		if (!name)
670 			continue;
671 
672 		for (j = 0; j < ARRAY_SIZE(long_aliases); j++) {
673 			if (strcmp(name, long_aliases[j]) == 0)
674 				return t->size;
675 		}
676 	}
677 
678 	return -1;
679 }
680 
681 static size_t btf_ptr_sz(const struct btf *btf)
682 {
683 	if (!btf->ptr_sz)
684 		((struct btf *)btf)->ptr_sz = determine_ptr_size(btf);
685 	return btf->ptr_sz < 0 ? sizeof(void *) : btf->ptr_sz;
686 }
687 
688 /* Return pointer size this BTF instance assumes. The size is heuristically
689  * determined by looking for 'long' or 'unsigned long' integer type and
690  * recording its size in bytes. If BTF type information doesn't have any such
691  * type, this function returns 0. In the latter case, native architecture's
692  * pointer size is assumed, so will be either 4 or 8, depending on
693  * architecture that libbpf was compiled for. It's possible to override
694  * guessed value by using btf__set_pointer_size() API.
695  */
696 size_t btf__pointer_size(const struct btf *btf)
697 {
698 	if (!btf->ptr_sz)
699 		((struct btf *)btf)->ptr_sz = determine_ptr_size(btf);
700 
701 	if (btf->ptr_sz < 0)
702 		/* not enough BTF type info to guess */
703 		return 0;
704 
705 	return btf->ptr_sz;
706 }
707 
708 /* Override or set pointer size in bytes. Only values of 4 and 8 are
709  * supported.
710  */
711 int btf__set_pointer_size(struct btf *btf, size_t ptr_sz)
712 {
713 	if (ptr_sz != 4 && ptr_sz != 8)
714 		return libbpf_err(-EINVAL);
715 	btf->ptr_sz = ptr_sz;
716 	return 0;
717 }
718 
719 static bool is_host_big_endian(void)
720 {
721 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
722 	return false;
723 #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
724 	return true;
725 #else
726 # error "Unrecognized __BYTE_ORDER__"
727 #endif
728 }
729 
730 enum btf_endianness btf__endianness(const struct btf *btf)
731 {
732 	if (is_host_big_endian())
733 		return btf->swapped_endian ? BTF_LITTLE_ENDIAN : BTF_BIG_ENDIAN;
734 	else
735 		return btf->swapped_endian ? BTF_BIG_ENDIAN : BTF_LITTLE_ENDIAN;
736 }
737 
738 int btf__set_endianness(struct btf *btf, enum btf_endianness endian)
739 {
740 	if (endian != BTF_LITTLE_ENDIAN && endian != BTF_BIG_ENDIAN)
741 		return libbpf_err(-EINVAL);
742 
743 	btf->swapped_endian = is_host_big_endian() != (endian == BTF_BIG_ENDIAN);
744 	if (!btf->swapped_endian) {
745 		free(btf->raw_data_swapped);
746 		btf->raw_data_swapped = NULL;
747 	}
748 	return 0;
749 }
750 
751 static bool btf_type_is_void(const struct btf_type *t)
752 {
753 	return t == &btf_void || btf_is_fwd(t);
754 }
755 
756 static bool btf_type_is_void_or_null(const struct btf_type *t)
757 {
758 	return !t || btf_type_is_void(t);
759 }
760 
761 #define MAX_RESOLVE_DEPTH 32
762 
763 __s64 btf__resolve_size(const struct btf *btf, __u32 type_id)
764 {
765 	const struct btf_array *array;
766 	const struct btf_type *t;
767 	__u32 nelems = 1;
768 	__s64 size = -1;
769 	int i;
770 
771 	t = btf__type_by_id(btf, type_id);
772 	for (i = 0; i < MAX_RESOLVE_DEPTH && !btf_type_is_void_or_null(t); i++) {
773 		switch (btf_kind(t)) {
774 		case BTF_KIND_INT:
775 		case BTF_KIND_STRUCT:
776 		case BTF_KIND_UNION:
777 		case BTF_KIND_ENUM:
778 		case BTF_KIND_ENUM64:
779 		case BTF_KIND_DATASEC:
780 		case BTF_KIND_FLOAT:
781 			size = t->size;
782 			goto done;
783 		case BTF_KIND_PTR:
784 			size = btf_ptr_sz(btf);
785 			goto done;
786 		case BTF_KIND_TYPEDEF:
787 		case BTF_KIND_VOLATILE:
788 		case BTF_KIND_CONST:
789 		case BTF_KIND_RESTRICT:
790 		case BTF_KIND_VAR:
791 		case BTF_KIND_DECL_TAG:
792 		case BTF_KIND_TYPE_TAG:
793 			type_id = t->type;
794 			break;
795 		case BTF_KIND_ARRAY:
796 			array = btf_array(t);
797 			if (nelems && array->nelems > UINT32_MAX / nelems)
798 				return libbpf_err(-E2BIG);
799 			nelems *= array->nelems;
800 			type_id = array->type;
801 			break;
802 		default:
803 			return libbpf_err(-EINVAL);
804 		}
805 
806 		t = btf__type_by_id(btf, type_id);
807 	}
808 
809 done:
810 	if (size < 0)
811 		return libbpf_err(-EINVAL);
812 	if (nelems && size > UINT32_MAX / nelems)
813 		return libbpf_err(-E2BIG);
814 
815 	return nelems * size;
816 }
817 
818 int btf__align_of(const struct btf *btf, __u32 id)
819 {
820 	const struct btf_type *t = btf__type_by_id(btf, id);
821 	__u16 kind = btf_kind(t);
822 
823 	switch (kind) {
824 	case BTF_KIND_INT:
825 	case BTF_KIND_ENUM:
826 	case BTF_KIND_ENUM64:
827 	case BTF_KIND_FLOAT:
828 		return min(btf_ptr_sz(btf), (size_t)t->size);
829 	case BTF_KIND_PTR:
830 		return btf_ptr_sz(btf);
831 	case BTF_KIND_TYPEDEF:
832 	case BTF_KIND_VOLATILE:
833 	case BTF_KIND_CONST:
834 	case BTF_KIND_RESTRICT:
835 	case BTF_KIND_TYPE_TAG:
836 		return btf__align_of(btf, t->type);
837 	case BTF_KIND_ARRAY:
838 		return btf__align_of(btf, btf_array(t)->type);
839 	case BTF_KIND_STRUCT:
840 	case BTF_KIND_UNION: {
841 		const struct btf_member *m = btf_members(t);
842 		__u16 vlen = btf_vlen(t);
843 		int i, max_align = 1, align;
844 
845 		for (i = 0; i < vlen; i++, m++) {
846 			align = btf__align_of(btf, m->type);
847 			if (align <= 0)
848 				return libbpf_err(align);
849 			max_align = max(max_align, align);
850 
851 			/* if field offset isn't aligned according to field
852 			 * type's alignment, then struct must be packed
853 			 */
854 			if (btf_member_bitfield_size(t, i) == 0 &&
855 			    (m->offset % (8 * align)) != 0)
856 				return 1;
857 		}
858 
859 		/* if struct/union size isn't a multiple of its alignment,
860 		 * then struct must be packed
861 		 */
862 		if ((t->size % max_align) != 0)
863 			return 1;
864 
865 		return max_align;
866 	}
867 	default:
868 		pr_warn("unsupported BTF_KIND:%u\n", btf_kind(t));
869 		return errno = EINVAL, 0;
870 	}
871 }
872 
873 int btf__resolve_type(const struct btf *btf, __u32 type_id)
874 {
875 	const struct btf_type *t;
876 	int depth = 0;
877 
878 	t = btf__type_by_id(btf, type_id);
879 	while (depth < MAX_RESOLVE_DEPTH &&
880 	       !btf_type_is_void_or_null(t) &&
881 	       (btf_is_mod(t) || btf_is_typedef(t) || btf_is_var(t))) {
882 		type_id = t->type;
883 		t = btf__type_by_id(btf, type_id);
884 		depth++;
885 	}
886 
887 	if (depth == MAX_RESOLVE_DEPTH || btf_type_is_void_or_null(t))
888 		return libbpf_err(-EINVAL);
889 
890 	return type_id;
891 }
892 
893 __s32 btf__find_by_name(const struct btf *btf, const char *type_name)
894 {
895 	__u32 i, nr_types = btf__type_cnt(btf);
896 
897 	if (!strcmp(type_name, "void"))
898 		return 0;
899 
900 	for (i = 1; i < nr_types; i++) {
901 		const struct btf_type *t = btf__type_by_id(btf, i);
902 		const char *name = btf__name_by_offset(btf, t->name_off);
903 
904 		if (name && !strcmp(type_name, name))
905 			return i;
906 	}
907 
908 	return libbpf_err(-ENOENT);
909 }
910 
911 static __s32 btf_find_by_name_kind(const struct btf *btf, int start_id,
912 				   const char *type_name, __u32 kind)
913 {
914 	__u32 i, nr_types = btf__type_cnt(btf);
915 
916 	if (kind == BTF_KIND_UNKN || !strcmp(type_name, "void"))
917 		return 0;
918 
919 	for (i = start_id; i < nr_types; i++) {
920 		const struct btf_type *t = btf__type_by_id(btf, i);
921 		const char *name;
922 
923 		if (btf_kind(t) != kind)
924 			continue;
925 		name = btf__name_by_offset(btf, t->name_off);
926 		if (name && !strcmp(type_name, name))
927 			return i;
928 	}
929 
930 	return libbpf_err(-ENOENT);
931 }
932 
933 __s32 btf__find_by_name_kind_own(const struct btf *btf, const char *type_name,
934 				 __u32 kind)
935 {
936 	return btf_find_by_name_kind(btf, btf->start_id, type_name, kind);
937 }
938 
939 __s32 btf__find_by_name_kind(const struct btf *btf, const char *type_name,
940 			     __u32 kind)
941 {
942 	return btf_find_by_name_kind(btf, 1, type_name, kind);
943 }
944 
945 static bool btf_is_modifiable(const struct btf *btf)
946 {
947 	return (void *)btf->hdr != btf->raw_data;
948 }
949 
950 void btf__free(struct btf *btf)
951 {
952 	if (IS_ERR_OR_NULL(btf))
953 		return;
954 
955 	if (btf->fd >= 0)
956 		close(btf->fd);
957 
958 	if (btf_is_modifiable(btf)) {
959 		/* if BTF was modified after loading, it will have a split
960 		 * in-memory representation for header, types, and strings
961 		 * sections, so we need to free all of them individually. It
962 		 * might still have a cached contiguous raw data present,
963 		 * which will be unconditionally freed below.
964 		 */
965 		free(btf->hdr);
966 		free(btf->types_data);
967 		strset__free(btf->strs_set);
968 	}
969 	free(btf->raw_data);
970 	free(btf->raw_data_swapped);
971 	free(btf->type_offs);
972 	free(btf);
973 }
974 
975 static struct btf *btf_new_empty(struct btf *base_btf)
976 {
977 	struct btf *btf;
978 
979 	btf = calloc(1, sizeof(*btf));
980 	if (!btf)
981 		return ERR_PTR(-ENOMEM);
982 
983 	btf->nr_types = 0;
984 	btf->start_id = 1;
985 	btf->start_str_off = 0;
986 	btf->fd = -1;
987 	btf->ptr_sz = sizeof(void *);
988 	btf->swapped_endian = false;
989 
990 	if (base_btf) {
991 		btf->base_btf = base_btf;
992 		btf->start_id = btf__type_cnt(base_btf);
993 		btf->start_str_off = base_btf->hdr->str_len;
994 	}
995 
996 	/* +1 for empty string at offset 0 */
997 	btf->raw_size = sizeof(struct btf_header) + (base_btf ? 0 : 1);
998 	btf->raw_data = calloc(1, btf->raw_size);
999 	if (!btf->raw_data) {
1000 		free(btf);
1001 		return ERR_PTR(-ENOMEM);
1002 	}
1003 
1004 	btf->hdr = btf->raw_data;
1005 	btf->hdr->hdr_len = sizeof(struct btf_header);
1006 	btf->hdr->magic = BTF_MAGIC;
1007 	btf->hdr->version = BTF_VERSION;
1008 
1009 	btf->types_data = btf->raw_data + btf->hdr->hdr_len;
1010 	btf->strs_data = btf->raw_data + btf->hdr->hdr_len;
1011 	btf->hdr->str_len = base_btf ? 0 : 1; /* empty string at offset 0 */
1012 
1013 	return btf;
1014 }
1015 
1016 struct btf *btf__new_empty(void)
1017 {
1018 	return libbpf_ptr(btf_new_empty(NULL));
1019 }
1020 
1021 struct btf *btf__new_empty_split(struct btf *base_btf)
1022 {
1023 	return libbpf_ptr(btf_new_empty(base_btf));
1024 }
1025 
1026 static struct btf *btf_new(const void *data, __u32 size, struct btf *base_btf)
1027 {
1028 	struct btf *btf;
1029 	int err;
1030 
1031 	btf = calloc(1, sizeof(struct btf));
1032 	if (!btf)
1033 		return ERR_PTR(-ENOMEM);
1034 
1035 	btf->nr_types = 0;
1036 	btf->start_id = 1;
1037 	btf->start_str_off = 0;
1038 	btf->fd = -1;
1039 
1040 	if (base_btf) {
1041 		btf->base_btf = base_btf;
1042 		btf->start_id = btf__type_cnt(base_btf);
1043 		btf->start_str_off = base_btf->hdr->str_len;
1044 	}
1045 
1046 	btf->raw_data = malloc(size);
1047 	if (!btf->raw_data) {
1048 		err = -ENOMEM;
1049 		goto done;
1050 	}
1051 	memcpy(btf->raw_data, data, size);
1052 	btf->raw_size = size;
1053 
1054 	btf->hdr = btf->raw_data;
1055 	err = btf_parse_hdr(btf);
1056 	if (err)
1057 		goto done;
1058 
1059 	btf->strs_data = btf->raw_data + btf->hdr->hdr_len + btf->hdr->str_off;
1060 	btf->types_data = btf->raw_data + btf->hdr->hdr_len + btf->hdr->type_off;
1061 
1062 	err = btf_parse_str_sec(btf);
1063 	err = err ?: btf_parse_type_sec(btf);
1064 	err = err ?: btf_sanity_check(btf);
1065 	if (err)
1066 		goto done;
1067 
1068 done:
1069 	if (err) {
1070 		btf__free(btf);
1071 		return ERR_PTR(err);
1072 	}
1073 
1074 	return btf;
1075 }
1076 
1077 struct btf *btf__new(const void *data, __u32 size)
1078 {
1079 	return libbpf_ptr(btf_new(data, size, NULL));
1080 }
1081 
1082 struct btf *btf__new_split(const void *data, __u32 size, struct btf *base_btf)
1083 {
1084 	return libbpf_ptr(btf_new(data, size, base_btf));
1085 }
1086 
1087 static struct btf *btf_parse_elf(const char *path, struct btf *base_btf,
1088 				 struct btf_ext **btf_ext)
1089 {
1090 	Elf_Data *btf_data = NULL, *btf_ext_data = NULL;
1091 	int err = 0, fd = -1, idx = 0;
1092 	struct btf *btf = NULL;
1093 	Elf_Scn *scn = NULL;
1094 	Elf *elf = NULL;
1095 	GElf_Ehdr ehdr;
1096 	size_t shstrndx;
1097 
1098 	if (elf_version(EV_CURRENT) == EV_NONE) {
1099 		pr_warn("failed to init libelf for %s\n", path);
1100 		return ERR_PTR(-LIBBPF_ERRNO__LIBELF);
1101 	}
1102 
1103 	fd = open(path, O_RDONLY | O_CLOEXEC);
1104 	if (fd < 0) {
1105 		err = -errno;
1106 		pr_warn("failed to open %s: %s\n", path, strerror(errno));
1107 		return ERR_PTR(err);
1108 	}
1109 
1110 	err = -LIBBPF_ERRNO__FORMAT;
1111 
1112 	elf = elf_begin(fd, ELF_C_READ, NULL);
1113 	if (!elf) {
1114 		pr_warn("failed to open %s as ELF file\n", path);
1115 		goto done;
1116 	}
1117 	if (!gelf_getehdr(elf, &ehdr)) {
1118 		pr_warn("failed to get EHDR from %s\n", path);
1119 		goto done;
1120 	}
1121 
1122 	if (elf_getshdrstrndx(elf, &shstrndx)) {
1123 		pr_warn("failed to get section names section index for %s\n",
1124 			path);
1125 		goto done;
1126 	}
1127 
1128 	if (!elf_rawdata(elf_getscn(elf, shstrndx), NULL)) {
1129 		pr_warn("failed to get e_shstrndx from %s\n", path);
1130 		goto done;
1131 	}
1132 
1133 	while ((scn = elf_nextscn(elf, scn)) != NULL) {
1134 		GElf_Shdr sh;
1135 		char *name;
1136 
1137 		idx++;
1138 		if (gelf_getshdr(scn, &sh) != &sh) {
1139 			pr_warn("failed to get section(%d) header from %s\n",
1140 				idx, path);
1141 			goto done;
1142 		}
1143 		name = elf_strptr(elf, shstrndx, sh.sh_name);
1144 		if (!name) {
1145 			pr_warn("failed to get section(%d) name from %s\n",
1146 				idx, path);
1147 			goto done;
1148 		}
1149 		if (strcmp(name, BTF_ELF_SEC) == 0) {
1150 			btf_data = elf_getdata(scn, 0);
1151 			if (!btf_data) {
1152 				pr_warn("failed to get section(%d, %s) data from %s\n",
1153 					idx, name, path);
1154 				goto done;
1155 			}
1156 			continue;
1157 		} else if (btf_ext && strcmp(name, BTF_EXT_ELF_SEC) == 0) {
1158 			btf_ext_data = elf_getdata(scn, 0);
1159 			if (!btf_ext_data) {
1160 				pr_warn("failed to get section(%d, %s) data from %s\n",
1161 					idx, name, path);
1162 				goto done;
1163 			}
1164 			continue;
1165 		}
1166 	}
1167 
1168 	if (!btf_data) {
1169 		pr_warn("failed to find '%s' ELF section in %s\n", BTF_ELF_SEC, path);
1170 		err = -ENODATA;
1171 		goto done;
1172 	}
1173 	btf = btf_new(btf_data->d_buf, btf_data->d_size, base_btf);
1174 	err = libbpf_get_error(btf);
1175 	if (err)
1176 		goto done;
1177 
1178 	switch (gelf_getclass(elf)) {
1179 	case ELFCLASS32:
1180 		btf__set_pointer_size(btf, 4);
1181 		break;
1182 	case ELFCLASS64:
1183 		btf__set_pointer_size(btf, 8);
1184 		break;
1185 	default:
1186 		pr_warn("failed to get ELF class (bitness) for %s\n", path);
1187 		break;
1188 	}
1189 
1190 	if (btf_ext && btf_ext_data) {
1191 		*btf_ext = btf_ext__new(btf_ext_data->d_buf, btf_ext_data->d_size);
1192 		err = libbpf_get_error(*btf_ext);
1193 		if (err)
1194 			goto done;
1195 	} else if (btf_ext) {
1196 		*btf_ext = NULL;
1197 	}
1198 done:
1199 	if (elf)
1200 		elf_end(elf);
1201 	close(fd);
1202 
1203 	if (!err)
1204 		return btf;
1205 
1206 	if (btf_ext)
1207 		btf_ext__free(*btf_ext);
1208 	btf__free(btf);
1209 
1210 	return ERR_PTR(err);
1211 }
1212 
1213 struct btf *btf__parse_elf(const char *path, struct btf_ext **btf_ext)
1214 {
1215 	return libbpf_ptr(btf_parse_elf(path, NULL, btf_ext));
1216 }
1217 
1218 struct btf *btf__parse_elf_split(const char *path, struct btf *base_btf)
1219 {
1220 	return libbpf_ptr(btf_parse_elf(path, base_btf, NULL));
1221 }
1222 
1223 static struct btf *btf_parse_raw(const char *path, struct btf *base_btf)
1224 {
1225 	struct btf *btf = NULL;
1226 	void *data = NULL;
1227 	FILE *f = NULL;
1228 	__u16 magic;
1229 	int err = 0;
1230 	long sz;
1231 
1232 	f = fopen(path, "rbe");
1233 	if (!f) {
1234 		err = -errno;
1235 		goto err_out;
1236 	}
1237 
1238 	/* check BTF magic */
1239 	if (fread(&magic, 1, sizeof(magic), f) < sizeof(magic)) {
1240 		err = -EIO;
1241 		goto err_out;
1242 	}
1243 	if (magic != BTF_MAGIC && magic != bswap_16(BTF_MAGIC)) {
1244 		/* definitely not a raw BTF */
1245 		err = -EPROTO;
1246 		goto err_out;
1247 	}
1248 
1249 	/* get file size */
1250 	if (fseek(f, 0, SEEK_END)) {
1251 		err = -errno;
1252 		goto err_out;
1253 	}
1254 	sz = ftell(f);
1255 	if (sz < 0) {
1256 		err = -errno;
1257 		goto err_out;
1258 	}
1259 	/* rewind to the start */
1260 	if (fseek(f, 0, SEEK_SET)) {
1261 		err = -errno;
1262 		goto err_out;
1263 	}
1264 
1265 	/* pre-alloc memory and read all of BTF data */
1266 	data = malloc(sz);
1267 	if (!data) {
1268 		err = -ENOMEM;
1269 		goto err_out;
1270 	}
1271 	if (fread(data, 1, sz, f) < sz) {
1272 		err = -EIO;
1273 		goto err_out;
1274 	}
1275 
1276 	/* finally parse BTF data */
1277 	btf = btf_new(data, sz, base_btf);
1278 
1279 err_out:
1280 	free(data);
1281 	if (f)
1282 		fclose(f);
1283 	return err ? ERR_PTR(err) : btf;
1284 }
1285 
1286 struct btf *btf__parse_raw(const char *path)
1287 {
1288 	return libbpf_ptr(btf_parse_raw(path, NULL));
1289 }
1290 
1291 struct btf *btf__parse_raw_split(const char *path, struct btf *base_btf)
1292 {
1293 	return libbpf_ptr(btf_parse_raw(path, base_btf));
1294 }
1295 
1296 static struct btf *btf_parse(const char *path, struct btf *base_btf, struct btf_ext **btf_ext)
1297 {
1298 	struct btf *btf;
1299 	int err;
1300 
1301 	if (btf_ext)
1302 		*btf_ext = NULL;
1303 
1304 	btf = btf_parse_raw(path, base_btf);
1305 	err = libbpf_get_error(btf);
1306 	if (!err)
1307 		return btf;
1308 	if (err != -EPROTO)
1309 		return ERR_PTR(err);
1310 	return btf_parse_elf(path, base_btf, btf_ext);
1311 }
1312 
1313 struct btf *btf__parse(const char *path, struct btf_ext **btf_ext)
1314 {
1315 	return libbpf_ptr(btf_parse(path, NULL, btf_ext));
1316 }
1317 
1318 struct btf *btf__parse_split(const char *path, struct btf *base_btf)
1319 {
1320 	return libbpf_ptr(btf_parse(path, base_btf, NULL));
1321 }
1322 
1323 static void *btf_get_raw_data(const struct btf *btf, __u32 *size, bool swap_endian);
1324 
1325 int btf_load_into_kernel(struct btf *btf,
1326 			 char *log_buf, size_t log_sz, __u32 log_level,
1327 			 int token_fd)
1328 {
1329 	LIBBPF_OPTS(bpf_btf_load_opts, opts);
1330 	__u32 buf_sz = 0, raw_size;
1331 	char *buf = NULL, *tmp;
1332 	void *raw_data;
1333 	int err = 0;
1334 
1335 	if (btf->fd >= 0)
1336 		return libbpf_err(-EEXIST);
1337 	if (log_sz && !log_buf)
1338 		return libbpf_err(-EINVAL);
1339 
1340 	/* cache native raw data representation */
1341 	raw_data = btf_get_raw_data(btf, &raw_size, false);
1342 	if (!raw_data) {
1343 		err = -ENOMEM;
1344 		goto done;
1345 	}
1346 	btf->raw_size = raw_size;
1347 	btf->raw_data = raw_data;
1348 
1349 retry_load:
1350 	/* if log_level is 0, we won't provide log_buf/log_size to the kernel,
1351 	 * initially. Only if BTF loading fails, we bump log_level to 1 and
1352 	 * retry, using either auto-allocated or custom log_buf. This way
1353 	 * non-NULL custom log_buf provides a buffer just in case, but hopes
1354 	 * for successful load and no need for log_buf.
1355 	 */
1356 	if (log_level) {
1357 		/* if caller didn't provide custom log_buf, we'll keep
1358 		 * allocating our own progressively bigger buffers for BTF
1359 		 * verification log
1360 		 */
1361 		if (!log_buf) {
1362 			buf_sz = max((__u32)BPF_LOG_BUF_SIZE, buf_sz * 2);
1363 			tmp = realloc(buf, buf_sz);
1364 			if (!tmp) {
1365 				err = -ENOMEM;
1366 				goto done;
1367 			}
1368 			buf = tmp;
1369 			buf[0] = '\0';
1370 		}
1371 
1372 		opts.log_buf = log_buf ? log_buf : buf;
1373 		opts.log_size = log_buf ? log_sz : buf_sz;
1374 		opts.log_level = log_level;
1375 	}
1376 
1377 	opts.token_fd = token_fd;
1378 	if (token_fd)
1379 		opts.btf_flags |= BPF_F_TOKEN_FD;
1380 
1381 	btf->fd = bpf_btf_load(raw_data, raw_size, &opts);
1382 	if (btf->fd < 0) {
1383 		/* time to turn on verbose mode and try again */
1384 		if (log_level == 0) {
1385 			log_level = 1;
1386 			goto retry_load;
1387 		}
1388 		/* only retry if caller didn't provide custom log_buf, but
1389 		 * make sure we can never overflow buf_sz
1390 		 */
1391 		if (!log_buf && errno == ENOSPC && buf_sz <= UINT_MAX / 2)
1392 			goto retry_load;
1393 
1394 		err = -errno;
1395 		pr_warn("BTF loading error: %d\n", err);
1396 		/* don't print out contents of custom log_buf */
1397 		if (!log_buf && buf[0])
1398 			pr_warn("-- BEGIN BTF LOAD LOG ---\n%s\n-- END BTF LOAD LOG --\n", buf);
1399 	}
1400 
1401 done:
1402 	free(buf);
1403 	return libbpf_err(err);
1404 }
1405 
1406 int btf__load_into_kernel(struct btf *btf)
1407 {
1408 	return btf_load_into_kernel(btf, NULL, 0, 0, 0);
1409 }
1410 
1411 int btf__fd(const struct btf *btf)
1412 {
1413 	return btf->fd;
1414 }
1415 
1416 void btf__set_fd(struct btf *btf, int fd)
1417 {
1418 	btf->fd = fd;
1419 }
1420 
1421 static const void *btf_strs_data(const struct btf *btf)
1422 {
1423 	return btf->strs_data ? btf->strs_data : strset__data(btf->strs_set);
1424 }
1425 
1426 static void *btf_get_raw_data(const struct btf *btf, __u32 *size, bool swap_endian)
1427 {
1428 	struct btf_header *hdr = btf->hdr;
1429 	struct btf_type *t;
1430 	void *data, *p;
1431 	__u32 data_sz;
1432 	int i;
1433 
1434 	data = swap_endian ? btf->raw_data_swapped : btf->raw_data;
1435 	if (data) {
1436 		*size = btf->raw_size;
1437 		return data;
1438 	}
1439 
1440 	data_sz = hdr->hdr_len + hdr->type_len + hdr->str_len;
1441 	data = calloc(1, data_sz);
1442 	if (!data)
1443 		return NULL;
1444 	p = data;
1445 
1446 	memcpy(p, hdr, hdr->hdr_len);
1447 	if (swap_endian)
1448 		btf_bswap_hdr(p);
1449 	p += hdr->hdr_len;
1450 
1451 	memcpy(p, btf->types_data, hdr->type_len);
1452 	if (swap_endian) {
1453 		for (i = 0; i < btf->nr_types; i++) {
1454 			t = p + btf->type_offs[i];
1455 			/* btf_bswap_type_rest() relies on native t->info, so
1456 			 * we swap base type info after we swapped all the
1457 			 * additional information
1458 			 */
1459 			if (btf_bswap_type_rest(t))
1460 				goto err_out;
1461 			btf_bswap_type_base(t);
1462 		}
1463 	}
1464 	p += hdr->type_len;
1465 
1466 	memcpy(p, btf_strs_data(btf), hdr->str_len);
1467 	p += hdr->str_len;
1468 
1469 	*size = data_sz;
1470 	return data;
1471 err_out:
1472 	free(data);
1473 	return NULL;
1474 }
1475 
1476 const void *btf__raw_data(const struct btf *btf_ro, __u32 *size)
1477 {
1478 	struct btf *btf = (struct btf *)btf_ro;
1479 	__u32 data_sz;
1480 	void *data;
1481 
1482 	data = btf_get_raw_data(btf, &data_sz, btf->swapped_endian);
1483 	if (!data)
1484 		return errno = ENOMEM, NULL;
1485 
1486 	btf->raw_size = data_sz;
1487 	if (btf->swapped_endian)
1488 		btf->raw_data_swapped = data;
1489 	else
1490 		btf->raw_data = data;
1491 	*size = data_sz;
1492 	return data;
1493 }
1494 
1495 __attribute__((alias("btf__raw_data")))
1496 const void *btf__get_raw_data(const struct btf *btf, __u32 *size);
1497 
1498 const char *btf__str_by_offset(const struct btf *btf, __u32 offset)
1499 {
1500 	if (offset < btf->start_str_off)
1501 		return btf__str_by_offset(btf->base_btf, offset);
1502 	else if (offset - btf->start_str_off < btf->hdr->str_len)
1503 		return btf_strs_data(btf) + (offset - btf->start_str_off);
1504 	else
1505 		return errno = EINVAL, NULL;
1506 }
1507 
1508 const char *btf__name_by_offset(const struct btf *btf, __u32 offset)
1509 {
1510 	return btf__str_by_offset(btf, offset);
1511 }
1512 
1513 struct btf *btf_get_from_fd(int btf_fd, struct btf *base_btf)
1514 {
1515 	struct bpf_btf_info btf_info;
1516 	__u32 len = sizeof(btf_info);
1517 	__u32 last_size;
1518 	struct btf *btf;
1519 	void *ptr;
1520 	int err;
1521 
1522 	/* we won't know btf_size until we call bpf_btf_get_info_by_fd(). so
1523 	 * let's start with a sane default - 4KiB here - and resize it only if
1524 	 * bpf_btf_get_info_by_fd() needs a bigger buffer.
1525 	 */
1526 	last_size = 4096;
1527 	ptr = malloc(last_size);
1528 	if (!ptr)
1529 		return ERR_PTR(-ENOMEM);
1530 
1531 	memset(&btf_info, 0, sizeof(btf_info));
1532 	btf_info.btf = ptr_to_u64(ptr);
1533 	btf_info.btf_size = last_size;
1534 	err = bpf_btf_get_info_by_fd(btf_fd, &btf_info, &len);
1535 
1536 	if (!err && btf_info.btf_size > last_size) {
1537 		void *temp_ptr;
1538 
1539 		last_size = btf_info.btf_size;
1540 		temp_ptr = realloc(ptr, last_size);
1541 		if (!temp_ptr) {
1542 			btf = ERR_PTR(-ENOMEM);
1543 			goto exit_free;
1544 		}
1545 		ptr = temp_ptr;
1546 
1547 		len = sizeof(btf_info);
1548 		memset(&btf_info, 0, sizeof(btf_info));
1549 		btf_info.btf = ptr_to_u64(ptr);
1550 		btf_info.btf_size = last_size;
1551 
1552 		err = bpf_btf_get_info_by_fd(btf_fd, &btf_info, &len);
1553 	}
1554 
1555 	if (err || btf_info.btf_size > last_size) {
1556 		btf = err ? ERR_PTR(-errno) : ERR_PTR(-E2BIG);
1557 		goto exit_free;
1558 	}
1559 
1560 	btf = btf_new(ptr, btf_info.btf_size, base_btf);
1561 
1562 exit_free:
1563 	free(ptr);
1564 	return btf;
1565 }
1566 
1567 struct btf *btf__load_from_kernel_by_id_split(__u32 id, struct btf *base_btf)
1568 {
1569 	struct btf *btf;
1570 	int btf_fd;
1571 
1572 	btf_fd = bpf_btf_get_fd_by_id(id);
1573 	if (btf_fd < 0)
1574 		return libbpf_err_ptr(-errno);
1575 
1576 	btf = btf_get_from_fd(btf_fd, base_btf);
1577 	close(btf_fd);
1578 
1579 	return libbpf_ptr(btf);
1580 }
1581 
1582 struct btf *btf__load_from_kernel_by_id(__u32 id)
1583 {
1584 	return btf__load_from_kernel_by_id_split(id, NULL);
1585 }
1586 
1587 static void btf_invalidate_raw_data(struct btf *btf)
1588 {
1589 	if (btf->raw_data) {
1590 		free(btf->raw_data);
1591 		btf->raw_data = NULL;
1592 	}
1593 	if (btf->raw_data_swapped) {
1594 		free(btf->raw_data_swapped);
1595 		btf->raw_data_swapped = NULL;
1596 	}
1597 }
1598 
1599 /* Ensure BTF is ready to be modified (by splitting into a three memory
1600  * regions for header, types, and strings). Also invalidate cached
1601  * raw_data, if any.
1602  */
1603 static int btf_ensure_modifiable(struct btf *btf)
1604 {
1605 	void *hdr, *types;
1606 	struct strset *set = NULL;
1607 	int err = -ENOMEM;
1608 
1609 	if (btf_is_modifiable(btf)) {
1610 		/* any BTF modification invalidates raw_data */
1611 		btf_invalidate_raw_data(btf);
1612 		return 0;
1613 	}
1614 
1615 	/* split raw data into three memory regions */
1616 	hdr = malloc(btf->hdr->hdr_len);
1617 	types = malloc(btf->hdr->type_len);
1618 	if (!hdr || !types)
1619 		goto err_out;
1620 
1621 	memcpy(hdr, btf->hdr, btf->hdr->hdr_len);
1622 	memcpy(types, btf->types_data, btf->hdr->type_len);
1623 
1624 	/* build lookup index for all strings */
1625 	set = strset__new(BTF_MAX_STR_OFFSET, btf->strs_data, btf->hdr->str_len);
1626 	if (IS_ERR(set)) {
1627 		err = PTR_ERR(set);
1628 		goto err_out;
1629 	}
1630 
1631 	/* only when everything was successful, update internal state */
1632 	btf->hdr = hdr;
1633 	btf->types_data = types;
1634 	btf->types_data_cap = btf->hdr->type_len;
1635 	btf->strs_data = NULL;
1636 	btf->strs_set = set;
1637 	/* if BTF was created from scratch, all strings are guaranteed to be
1638 	 * unique and deduplicated
1639 	 */
1640 	if (btf->hdr->str_len == 0)
1641 		btf->strs_deduped = true;
1642 	if (!btf->base_btf && btf->hdr->str_len == 1)
1643 		btf->strs_deduped = true;
1644 
1645 	/* invalidate raw_data representation */
1646 	btf_invalidate_raw_data(btf);
1647 
1648 	return 0;
1649 
1650 err_out:
1651 	strset__free(set);
1652 	free(hdr);
1653 	free(types);
1654 	return err;
1655 }
1656 
1657 /* Find an offset in BTF string section that corresponds to a given string *s*.
1658  * Returns:
1659  *   - >0 offset into string section, if string is found;
1660  *   - -ENOENT, if string is not in the string section;
1661  *   - <0, on any other error.
1662  */
1663 int btf__find_str(struct btf *btf, const char *s)
1664 {
1665 	int off;
1666 
1667 	if (btf->base_btf) {
1668 		off = btf__find_str(btf->base_btf, s);
1669 		if (off != -ENOENT)
1670 			return off;
1671 	}
1672 
1673 	/* BTF needs to be in a modifiable state to build string lookup index */
1674 	if (btf_ensure_modifiable(btf))
1675 		return libbpf_err(-ENOMEM);
1676 
1677 	off = strset__find_str(btf->strs_set, s);
1678 	if (off < 0)
1679 		return libbpf_err(off);
1680 
1681 	return btf->start_str_off + off;
1682 }
1683 
1684 /* Add a string s to the BTF string section.
1685  * Returns:
1686  *   - > 0 offset into string section, on success;
1687  *   - < 0, on error.
1688  */
1689 int btf__add_str(struct btf *btf, const char *s)
1690 {
1691 	int off;
1692 
1693 	if (btf->base_btf) {
1694 		off = btf__find_str(btf->base_btf, s);
1695 		if (off != -ENOENT)
1696 			return off;
1697 	}
1698 
1699 	if (btf_ensure_modifiable(btf))
1700 		return libbpf_err(-ENOMEM);
1701 
1702 	off = strset__add_str(btf->strs_set, s);
1703 	if (off < 0)
1704 		return libbpf_err(off);
1705 
1706 	btf->hdr->str_len = strset__data_size(btf->strs_set);
1707 
1708 	return btf->start_str_off + off;
1709 }
1710 
1711 static void *btf_add_type_mem(struct btf *btf, size_t add_sz)
1712 {
1713 	return libbpf_add_mem(&btf->types_data, &btf->types_data_cap, 1,
1714 			      btf->hdr->type_len, UINT_MAX, add_sz);
1715 }
1716 
1717 static void btf_type_inc_vlen(struct btf_type *t)
1718 {
1719 	t->info = btf_type_info(btf_kind(t), btf_vlen(t) + 1, btf_kflag(t));
1720 }
1721 
1722 static int btf_commit_type(struct btf *btf, int data_sz)
1723 {
1724 	int err;
1725 
1726 	err = btf_add_type_idx_entry(btf, btf->hdr->type_len);
1727 	if (err)
1728 		return libbpf_err(err);
1729 
1730 	btf->hdr->type_len += data_sz;
1731 	btf->hdr->str_off += data_sz;
1732 	btf->nr_types++;
1733 	return btf->start_id + btf->nr_types - 1;
1734 }
1735 
1736 struct btf_pipe {
1737 	const struct btf *src;
1738 	struct btf *dst;
1739 	struct hashmap *str_off_map; /* map string offsets from src to dst */
1740 };
1741 
1742 static int btf_rewrite_str(__u32 *str_off, void *ctx)
1743 {
1744 	struct btf_pipe *p = ctx;
1745 	long mapped_off;
1746 	int off, err;
1747 
1748 	if (!*str_off) /* nothing to do for empty strings */
1749 		return 0;
1750 
1751 	if (p->str_off_map &&
1752 	    hashmap__find(p->str_off_map, *str_off, &mapped_off)) {
1753 		*str_off = mapped_off;
1754 		return 0;
1755 	}
1756 
1757 	off = btf__add_str(p->dst, btf__str_by_offset(p->src, *str_off));
1758 	if (off < 0)
1759 		return off;
1760 
1761 	/* Remember string mapping from src to dst.  It avoids
1762 	 * performing expensive string comparisons.
1763 	 */
1764 	if (p->str_off_map) {
1765 		err = hashmap__append(p->str_off_map, *str_off, off);
1766 		if (err)
1767 			return err;
1768 	}
1769 
1770 	*str_off = off;
1771 	return 0;
1772 }
1773 
1774 int btf__add_type(struct btf *btf, const struct btf *src_btf, const struct btf_type *src_type)
1775 {
1776 	struct btf_pipe p = { .src = src_btf, .dst = btf };
1777 	struct btf_type *t;
1778 	int sz, err;
1779 
1780 	sz = btf_type_size(src_type);
1781 	if (sz < 0)
1782 		return libbpf_err(sz);
1783 
1784 	/* deconstruct BTF, if necessary, and invalidate raw_data */
1785 	if (btf_ensure_modifiable(btf))
1786 		return libbpf_err(-ENOMEM);
1787 
1788 	t = btf_add_type_mem(btf, sz);
1789 	if (!t)
1790 		return libbpf_err(-ENOMEM);
1791 
1792 	memcpy(t, src_type, sz);
1793 
1794 	err = btf_type_visit_str_offs(t, btf_rewrite_str, &p);
1795 	if (err)
1796 		return libbpf_err(err);
1797 
1798 	return btf_commit_type(btf, sz);
1799 }
1800 
1801 static int btf_rewrite_type_ids(__u32 *type_id, void *ctx)
1802 {
1803 	struct btf *btf = ctx;
1804 
1805 	if (!*type_id) /* nothing to do for VOID references */
1806 		return 0;
1807 
1808 	/* we haven't updated btf's type count yet, so
1809 	 * btf->start_id + btf->nr_types - 1 is the type ID offset we should
1810 	 * add to all newly added BTF types
1811 	 */
1812 	*type_id += btf->start_id + btf->nr_types - 1;
1813 	return 0;
1814 }
1815 
1816 static size_t btf_dedup_identity_hash_fn(long key, void *ctx);
1817 static bool btf_dedup_equal_fn(long k1, long k2, void *ctx);
1818 
1819 int btf__add_btf(struct btf *btf, const struct btf *src_btf)
1820 {
1821 	struct btf_pipe p = { .src = src_btf, .dst = btf };
1822 	int data_sz, sz, cnt, i, err, old_strs_len;
1823 	__u32 *off;
1824 	void *t;
1825 
1826 	/* appending split BTF isn't supported yet */
1827 	if (src_btf->base_btf)
1828 		return libbpf_err(-ENOTSUP);
1829 
1830 	/* deconstruct BTF, if necessary, and invalidate raw_data */
1831 	if (btf_ensure_modifiable(btf))
1832 		return libbpf_err(-ENOMEM);
1833 
1834 	/* remember original strings section size if we have to roll back
1835 	 * partial strings section changes
1836 	 */
1837 	old_strs_len = btf->hdr->str_len;
1838 
1839 	data_sz = src_btf->hdr->type_len;
1840 	cnt = btf__type_cnt(src_btf) - 1;
1841 
1842 	/* pre-allocate enough memory for new types */
1843 	t = btf_add_type_mem(btf, data_sz);
1844 	if (!t)
1845 		return libbpf_err(-ENOMEM);
1846 
1847 	/* pre-allocate enough memory for type offset index for new types */
1848 	off = btf_add_type_offs_mem(btf, cnt);
1849 	if (!off)
1850 		return libbpf_err(-ENOMEM);
1851 
1852 	/* Map the string offsets from src_btf to the offsets from btf to improve performance */
1853 	p.str_off_map = hashmap__new(btf_dedup_identity_hash_fn, btf_dedup_equal_fn, NULL);
1854 	if (IS_ERR(p.str_off_map))
1855 		return libbpf_err(-ENOMEM);
1856 
1857 	/* bulk copy types data for all types from src_btf */
1858 	memcpy(t, src_btf->types_data, data_sz);
1859 
1860 	for (i = 0; i < cnt; i++) {
1861 		sz = btf_type_size(t);
1862 		if (sz < 0) {
1863 			/* unlikely, has to be corrupted src_btf */
1864 			err = sz;
1865 			goto err_out;
1866 		}
1867 
1868 		/* fill out type ID to type offset mapping for lookups by type ID */
1869 		*off = t - btf->types_data;
1870 
1871 		/* add, dedup, and remap strings referenced by this BTF type */
1872 		err = btf_type_visit_str_offs(t, btf_rewrite_str, &p);
1873 		if (err)
1874 			goto err_out;
1875 
1876 		/* remap all type IDs referenced from this BTF type */
1877 		err = btf_type_visit_type_ids(t, btf_rewrite_type_ids, btf);
1878 		if (err)
1879 			goto err_out;
1880 
1881 		/* go to next type data and type offset index entry */
1882 		t += sz;
1883 		off++;
1884 	}
1885 
1886 	/* Up until now any of the copied type data was effectively invisible,
1887 	 * so if we exited early before this point due to error, BTF would be
1888 	 * effectively unmodified. There would be extra internal memory
1889 	 * pre-allocated, but it would not be available for querying.  But now
1890 	 * that we've copied and rewritten all the data successfully, we can
1891 	 * update type count and various internal offsets and sizes to
1892 	 * "commit" the changes and made them visible to the outside world.
1893 	 */
1894 	btf->hdr->type_len += data_sz;
1895 	btf->hdr->str_off += data_sz;
1896 	btf->nr_types += cnt;
1897 
1898 	hashmap__free(p.str_off_map);
1899 
1900 	/* return type ID of the first added BTF type */
1901 	return btf->start_id + btf->nr_types - cnt;
1902 err_out:
1903 	/* zero out preallocated memory as if it was just allocated with
1904 	 * libbpf_add_mem()
1905 	 */
1906 	memset(btf->types_data + btf->hdr->type_len, 0, data_sz);
1907 	memset(btf->strs_data + old_strs_len, 0, btf->hdr->str_len - old_strs_len);
1908 
1909 	/* and now restore original strings section size; types data size
1910 	 * wasn't modified, so doesn't need restoring, see big comment above
1911 	 */
1912 	btf->hdr->str_len = old_strs_len;
1913 
1914 	hashmap__free(p.str_off_map);
1915 
1916 	return libbpf_err(err);
1917 }
1918 
1919 /*
1920  * Append new BTF_KIND_INT type with:
1921  *   - *name* - non-empty, non-NULL type name;
1922  *   - *sz* - power-of-2 (1, 2, 4, ..) size of the type, in bytes;
1923  *   - encoding is a combination of BTF_INT_SIGNED, BTF_INT_CHAR, BTF_INT_BOOL.
1924  * Returns:
1925  *   - >0, type ID of newly added BTF type;
1926  *   - <0, on error.
1927  */
1928 int btf__add_int(struct btf *btf, const char *name, size_t byte_sz, int encoding)
1929 {
1930 	struct btf_type *t;
1931 	int sz, name_off;
1932 
1933 	/* non-empty name */
1934 	if (!name || !name[0])
1935 		return libbpf_err(-EINVAL);
1936 	/* byte_sz must be power of 2 */
1937 	if (!byte_sz || (byte_sz & (byte_sz - 1)) || byte_sz > 16)
1938 		return libbpf_err(-EINVAL);
1939 	if (encoding & ~(BTF_INT_SIGNED | BTF_INT_CHAR | BTF_INT_BOOL))
1940 		return libbpf_err(-EINVAL);
1941 
1942 	/* deconstruct BTF, if necessary, and invalidate raw_data */
1943 	if (btf_ensure_modifiable(btf))
1944 		return libbpf_err(-ENOMEM);
1945 
1946 	sz = sizeof(struct btf_type) + sizeof(int);
1947 	t = btf_add_type_mem(btf, sz);
1948 	if (!t)
1949 		return libbpf_err(-ENOMEM);
1950 
1951 	/* if something goes wrong later, we might end up with an extra string,
1952 	 * but that shouldn't be a problem, because BTF can't be constructed
1953 	 * completely anyway and will most probably be just discarded
1954 	 */
1955 	name_off = btf__add_str(btf, name);
1956 	if (name_off < 0)
1957 		return name_off;
1958 
1959 	t->name_off = name_off;
1960 	t->info = btf_type_info(BTF_KIND_INT, 0, 0);
1961 	t->size = byte_sz;
1962 	/* set INT info, we don't allow setting legacy bit offset/size */
1963 	*(__u32 *)(t + 1) = (encoding << 24) | (byte_sz * 8);
1964 
1965 	return btf_commit_type(btf, sz);
1966 }
1967 
1968 /*
1969  * Append new BTF_KIND_FLOAT type with:
1970  *   - *name* - non-empty, non-NULL type name;
1971  *   - *sz* - size of the type, in bytes;
1972  * Returns:
1973  *   - >0, type ID of newly added BTF type;
1974  *   - <0, on error.
1975  */
1976 int btf__add_float(struct btf *btf, const char *name, size_t byte_sz)
1977 {
1978 	struct btf_type *t;
1979 	int sz, name_off;
1980 
1981 	/* non-empty name */
1982 	if (!name || !name[0])
1983 		return libbpf_err(-EINVAL);
1984 
1985 	/* byte_sz must be one of the explicitly allowed values */
1986 	if (byte_sz != 2 && byte_sz != 4 && byte_sz != 8 && byte_sz != 12 &&
1987 	    byte_sz != 16)
1988 		return libbpf_err(-EINVAL);
1989 
1990 	if (btf_ensure_modifiable(btf))
1991 		return libbpf_err(-ENOMEM);
1992 
1993 	sz = sizeof(struct btf_type);
1994 	t = btf_add_type_mem(btf, sz);
1995 	if (!t)
1996 		return libbpf_err(-ENOMEM);
1997 
1998 	name_off = btf__add_str(btf, name);
1999 	if (name_off < 0)
2000 		return name_off;
2001 
2002 	t->name_off = name_off;
2003 	t->info = btf_type_info(BTF_KIND_FLOAT, 0, 0);
2004 	t->size = byte_sz;
2005 
2006 	return btf_commit_type(btf, sz);
2007 }
2008 
2009 /* it's completely legal to append BTF types with type IDs pointing forward to
2010  * types that haven't been appended yet, so we only make sure that id looks
2011  * sane, we can't guarantee that ID will always be valid
2012  */
2013 static int validate_type_id(int id)
2014 {
2015 	if (id < 0 || id > BTF_MAX_NR_TYPES)
2016 		return -EINVAL;
2017 	return 0;
2018 }
2019 
2020 /* generic append function for PTR, TYPEDEF, CONST/VOLATILE/RESTRICT */
2021 static int btf_add_ref_kind(struct btf *btf, int kind, const char *name, int ref_type_id)
2022 {
2023 	struct btf_type *t;
2024 	int sz, name_off = 0;
2025 
2026 	if (validate_type_id(ref_type_id))
2027 		return libbpf_err(-EINVAL);
2028 
2029 	if (btf_ensure_modifiable(btf))
2030 		return libbpf_err(-ENOMEM);
2031 
2032 	sz = sizeof(struct btf_type);
2033 	t = btf_add_type_mem(btf, sz);
2034 	if (!t)
2035 		return libbpf_err(-ENOMEM);
2036 
2037 	if (name && name[0]) {
2038 		name_off = btf__add_str(btf, name);
2039 		if (name_off < 0)
2040 			return name_off;
2041 	}
2042 
2043 	t->name_off = name_off;
2044 	t->info = btf_type_info(kind, 0, 0);
2045 	t->type = ref_type_id;
2046 
2047 	return btf_commit_type(btf, sz);
2048 }
2049 
2050 /*
2051  * Append new BTF_KIND_PTR type with:
2052  *   - *ref_type_id* - referenced type ID, it might not exist yet;
2053  * Returns:
2054  *   - >0, type ID of newly added BTF type;
2055  *   - <0, on error.
2056  */
2057 int btf__add_ptr(struct btf *btf, int ref_type_id)
2058 {
2059 	return btf_add_ref_kind(btf, BTF_KIND_PTR, NULL, ref_type_id);
2060 }
2061 
2062 /*
2063  * Append new BTF_KIND_ARRAY type with:
2064  *   - *index_type_id* - type ID of the type describing array index;
2065  *   - *elem_type_id* - type ID of the type describing array element;
2066  *   - *nr_elems* - the size of the array;
2067  * Returns:
2068  *   - >0, type ID of newly added BTF type;
2069  *   - <0, on error.
2070  */
2071 int btf__add_array(struct btf *btf, int index_type_id, int elem_type_id, __u32 nr_elems)
2072 {
2073 	struct btf_type *t;
2074 	struct btf_array *a;
2075 	int sz;
2076 
2077 	if (validate_type_id(index_type_id) || validate_type_id(elem_type_id))
2078 		return libbpf_err(-EINVAL);
2079 
2080 	if (btf_ensure_modifiable(btf))
2081 		return libbpf_err(-ENOMEM);
2082 
2083 	sz = sizeof(struct btf_type) + sizeof(struct btf_array);
2084 	t = btf_add_type_mem(btf, sz);
2085 	if (!t)
2086 		return libbpf_err(-ENOMEM);
2087 
2088 	t->name_off = 0;
2089 	t->info = btf_type_info(BTF_KIND_ARRAY, 0, 0);
2090 	t->size = 0;
2091 
2092 	a = btf_array(t);
2093 	a->type = elem_type_id;
2094 	a->index_type = index_type_id;
2095 	a->nelems = nr_elems;
2096 
2097 	return btf_commit_type(btf, sz);
2098 }
2099 
2100 /* generic STRUCT/UNION append function */
2101 static int btf_add_composite(struct btf *btf, int kind, const char *name, __u32 bytes_sz)
2102 {
2103 	struct btf_type *t;
2104 	int sz, name_off = 0;
2105 
2106 	if (btf_ensure_modifiable(btf))
2107 		return libbpf_err(-ENOMEM);
2108 
2109 	sz = sizeof(struct btf_type);
2110 	t = btf_add_type_mem(btf, sz);
2111 	if (!t)
2112 		return libbpf_err(-ENOMEM);
2113 
2114 	if (name && name[0]) {
2115 		name_off = btf__add_str(btf, name);
2116 		if (name_off < 0)
2117 			return name_off;
2118 	}
2119 
2120 	/* start out with vlen=0 and no kflag; this will be adjusted when
2121 	 * adding each member
2122 	 */
2123 	t->name_off = name_off;
2124 	t->info = btf_type_info(kind, 0, 0);
2125 	t->size = bytes_sz;
2126 
2127 	return btf_commit_type(btf, sz);
2128 }
2129 
2130 /*
2131  * Append new BTF_KIND_STRUCT type with:
2132  *   - *name* - name of the struct, can be NULL or empty for anonymous structs;
2133  *   - *byte_sz* - size of the struct, in bytes;
2134  *
2135  * Struct initially has no fields in it. Fields can be added by
2136  * btf__add_field() right after btf__add_struct() succeeds.
2137  *
2138  * Returns:
2139  *   - >0, type ID of newly added BTF type;
2140  *   - <0, on error.
2141  */
2142 int btf__add_struct(struct btf *btf, const char *name, __u32 byte_sz)
2143 {
2144 	return btf_add_composite(btf, BTF_KIND_STRUCT, name, byte_sz);
2145 }
2146 
2147 /*
2148  * Append new BTF_KIND_UNION type with:
2149  *   - *name* - name of the union, can be NULL or empty for anonymous union;
2150  *   - *byte_sz* - size of the union, in bytes;
2151  *
2152  * Union initially has no fields in it. Fields can be added by
2153  * btf__add_field() right after btf__add_union() succeeds. All fields
2154  * should have *bit_offset* of 0.
2155  *
2156  * Returns:
2157  *   - >0, type ID of newly added BTF type;
2158  *   - <0, on error.
2159  */
2160 int btf__add_union(struct btf *btf, const char *name, __u32 byte_sz)
2161 {
2162 	return btf_add_composite(btf, BTF_KIND_UNION, name, byte_sz);
2163 }
2164 
2165 static struct btf_type *btf_last_type(struct btf *btf)
2166 {
2167 	return btf_type_by_id(btf, btf__type_cnt(btf) - 1);
2168 }
2169 
2170 /*
2171  * Append new field for the current STRUCT/UNION type with:
2172  *   - *name* - name of the field, can be NULL or empty for anonymous field;
2173  *   - *type_id* - type ID for the type describing field type;
2174  *   - *bit_offset* - bit offset of the start of the field within struct/union;
2175  *   - *bit_size* - bit size of a bitfield, 0 for non-bitfield fields;
2176  * Returns:
2177  *   -  0, on success;
2178  *   - <0, on error.
2179  */
2180 int btf__add_field(struct btf *btf, const char *name, int type_id,
2181 		   __u32 bit_offset, __u32 bit_size)
2182 {
2183 	struct btf_type *t;
2184 	struct btf_member *m;
2185 	bool is_bitfield;
2186 	int sz, name_off = 0;
2187 
2188 	/* last type should be union/struct */
2189 	if (btf->nr_types == 0)
2190 		return libbpf_err(-EINVAL);
2191 	t = btf_last_type(btf);
2192 	if (!btf_is_composite(t))
2193 		return libbpf_err(-EINVAL);
2194 
2195 	if (validate_type_id(type_id))
2196 		return libbpf_err(-EINVAL);
2197 	/* best-effort bit field offset/size enforcement */
2198 	is_bitfield = bit_size || (bit_offset % 8 != 0);
2199 	if (is_bitfield && (bit_size == 0 || bit_size > 255 || bit_offset > 0xffffff))
2200 		return libbpf_err(-EINVAL);
2201 
2202 	/* only offset 0 is allowed for unions */
2203 	if (btf_is_union(t) && bit_offset)
2204 		return libbpf_err(-EINVAL);
2205 
2206 	/* decompose and invalidate raw data */
2207 	if (btf_ensure_modifiable(btf))
2208 		return libbpf_err(-ENOMEM);
2209 
2210 	sz = sizeof(struct btf_member);
2211 	m = btf_add_type_mem(btf, sz);
2212 	if (!m)
2213 		return libbpf_err(-ENOMEM);
2214 
2215 	if (name && name[0]) {
2216 		name_off = btf__add_str(btf, name);
2217 		if (name_off < 0)
2218 			return name_off;
2219 	}
2220 
2221 	m->name_off = name_off;
2222 	m->type = type_id;
2223 	m->offset = bit_offset | (bit_size << 24);
2224 
2225 	/* btf_add_type_mem can invalidate t pointer */
2226 	t = btf_last_type(btf);
2227 	/* update parent type's vlen and kflag */
2228 	t->info = btf_type_info(btf_kind(t), btf_vlen(t) + 1, is_bitfield || btf_kflag(t));
2229 
2230 	btf->hdr->type_len += sz;
2231 	btf->hdr->str_off += sz;
2232 	return 0;
2233 }
2234 
2235 static int btf_add_enum_common(struct btf *btf, const char *name, __u32 byte_sz,
2236 			       bool is_signed, __u8 kind)
2237 {
2238 	struct btf_type *t;
2239 	int sz, name_off = 0;
2240 
2241 	/* byte_sz must be power of 2 */
2242 	if (!byte_sz || (byte_sz & (byte_sz - 1)) || byte_sz > 8)
2243 		return libbpf_err(-EINVAL);
2244 
2245 	if (btf_ensure_modifiable(btf))
2246 		return libbpf_err(-ENOMEM);
2247 
2248 	sz = sizeof(struct btf_type);
2249 	t = btf_add_type_mem(btf, sz);
2250 	if (!t)
2251 		return libbpf_err(-ENOMEM);
2252 
2253 	if (name && name[0]) {
2254 		name_off = btf__add_str(btf, name);
2255 		if (name_off < 0)
2256 			return name_off;
2257 	}
2258 
2259 	/* start out with vlen=0; it will be adjusted when adding enum values */
2260 	t->name_off = name_off;
2261 	t->info = btf_type_info(kind, 0, is_signed);
2262 	t->size = byte_sz;
2263 
2264 	return btf_commit_type(btf, sz);
2265 }
2266 
2267 /*
2268  * Append new BTF_KIND_ENUM type with:
2269  *   - *name* - name of the enum, can be NULL or empty for anonymous enums;
2270  *   - *byte_sz* - size of the enum, in bytes.
2271  *
2272  * Enum initially has no enum values in it (and corresponds to enum forward
2273  * declaration). Enumerator values can be added by btf__add_enum_value()
2274  * immediately after btf__add_enum() succeeds.
2275  *
2276  * Returns:
2277  *   - >0, type ID of newly added BTF type;
2278  *   - <0, on error.
2279  */
2280 int btf__add_enum(struct btf *btf, const char *name, __u32 byte_sz)
2281 {
2282 	/*
2283 	 * set the signedness to be unsigned, it will change to signed
2284 	 * if any later enumerator is negative.
2285 	 */
2286 	return btf_add_enum_common(btf, name, byte_sz, false, BTF_KIND_ENUM);
2287 }
2288 
2289 /*
2290  * Append new enum value for the current ENUM type with:
2291  *   - *name* - name of the enumerator value, can't be NULL or empty;
2292  *   - *value* - integer value corresponding to enum value *name*;
2293  * Returns:
2294  *   -  0, on success;
2295  *   - <0, on error.
2296  */
2297 int btf__add_enum_value(struct btf *btf, const char *name, __s64 value)
2298 {
2299 	struct btf_type *t;
2300 	struct btf_enum *v;
2301 	int sz, name_off;
2302 
2303 	/* last type should be BTF_KIND_ENUM */
2304 	if (btf->nr_types == 0)
2305 		return libbpf_err(-EINVAL);
2306 	t = btf_last_type(btf);
2307 	if (!btf_is_enum(t))
2308 		return libbpf_err(-EINVAL);
2309 
2310 	/* non-empty name */
2311 	if (!name || !name[0])
2312 		return libbpf_err(-EINVAL);
2313 	if (value < INT_MIN || value > UINT_MAX)
2314 		return libbpf_err(-E2BIG);
2315 
2316 	/* decompose and invalidate raw data */
2317 	if (btf_ensure_modifiable(btf))
2318 		return libbpf_err(-ENOMEM);
2319 
2320 	sz = sizeof(struct btf_enum);
2321 	v = btf_add_type_mem(btf, sz);
2322 	if (!v)
2323 		return libbpf_err(-ENOMEM);
2324 
2325 	name_off = btf__add_str(btf, name);
2326 	if (name_off < 0)
2327 		return name_off;
2328 
2329 	v->name_off = name_off;
2330 	v->val = value;
2331 
2332 	/* update parent type's vlen */
2333 	t = btf_last_type(btf);
2334 	btf_type_inc_vlen(t);
2335 
2336 	/* if negative value, set signedness to signed */
2337 	if (value < 0)
2338 		t->info = btf_type_info(btf_kind(t), btf_vlen(t), true);
2339 
2340 	btf->hdr->type_len += sz;
2341 	btf->hdr->str_off += sz;
2342 	return 0;
2343 }
2344 
2345 /*
2346  * Append new BTF_KIND_ENUM64 type with:
2347  *   - *name* - name of the enum, can be NULL or empty for anonymous enums;
2348  *   - *byte_sz* - size of the enum, in bytes.
2349  *   - *is_signed* - whether the enum values are signed or not;
2350  *
2351  * Enum initially has no enum values in it (and corresponds to enum forward
2352  * declaration). Enumerator values can be added by btf__add_enum64_value()
2353  * immediately after btf__add_enum64() succeeds.
2354  *
2355  * Returns:
2356  *   - >0, type ID of newly added BTF type;
2357  *   - <0, on error.
2358  */
2359 int btf__add_enum64(struct btf *btf, const char *name, __u32 byte_sz,
2360 		    bool is_signed)
2361 {
2362 	return btf_add_enum_common(btf, name, byte_sz, is_signed,
2363 				   BTF_KIND_ENUM64);
2364 }
2365 
2366 /*
2367  * Append new enum value for the current ENUM64 type with:
2368  *   - *name* - name of the enumerator value, can't be NULL or empty;
2369  *   - *value* - integer value corresponding to enum value *name*;
2370  * Returns:
2371  *   -  0, on success;
2372  *   - <0, on error.
2373  */
2374 int btf__add_enum64_value(struct btf *btf, const char *name, __u64 value)
2375 {
2376 	struct btf_enum64 *v;
2377 	struct btf_type *t;
2378 	int sz, name_off;
2379 
2380 	/* last type should be BTF_KIND_ENUM64 */
2381 	if (btf->nr_types == 0)
2382 		return libbpf_err(-EINVAL);
2383 	t = btf_last_type(btf);
2384 	if (!btf_is_enum64(t))
2385 		return libbpf_err(-EINVAL);
2386 
2387 	/* non-empty name */
2388 	if (!name || !name[0])
2389 		return libbpf_err(-EINVAL);
2390 
2391 	/* decompose and invalidate raw data */
2392 	if (btf_ensure_modifiable(btf))
2393 		return libbpf_err(-ENOMEM);
2394 
2395 	sz = sizeof(struct btf_enum64);
2396 	v = btf_add_type_mem(btf, sz);
2397 	if (!v)
2398 		return libbpf_err(-ENOMEM);
2399 
2400 	name_off = btf__add_str(btf, name);
2401 	if (name_off < 0)
2402 		return name_off;
2403 
2404 	v->name_off = name_off;
2405 	v->val_lo32 = (__u32)value;
2406 	v->val_hi32 = value >> 32;
2407 
2408 	/* update parent type's vlen */
2409 	t = btf_last_type(btf);
2410 	btf_type_inc_vlen(t);
2411 
2412 	btf->hdr->type_len += sz;
2413 	btf->hdr->str_off += sz;
2414 	return 0;
2415 }
2416 
2417 /*
2418  * Append new BTF_KIND_FWD type with:
2419  *   - *name*, non-empty/non-NULL name;
2420  *   - *fwd_kind*, kind of forward declaration, one of BTF_FWD_STRUCT,
2421  *     BTF_FWD_UNION, or BTF_FWD_ENUM;
2422  * Returns:
2423  *   - >0, type ID of newly added BTF type;
2424  *   - <0, on error.
2425  */
2426 int btf__add_fwd(struct btf *btf, const char *name, enum btf_fwd_kind fwd_kind)
2427 {
2428 	if (!name || !name[0])
2429 		return libbpf_err(-EINVAL);
2430 
2431 	switch (fwd_kind) {
2432 	case BTF_FWD_STRUCT:
2433 	case BTF_FWD_UNION: {
2434 		struct btf_type *t;
2435 		int id;
2436 
2437 		id = btf_add_ref_kind(btf, BTF_KIND_FWD, name, 0);
2438 		if (id <= 0)
2439 			return id;
2440 		t = btf_type_by_id(btf, id);
2441 		t->info = btf_type_info(BTF_KIND_FWD, 0, fwd_kind == BTF_FWD_UNION);
2442 		return id;
2443 	}
2444 	case BTF_FWD_ENUM:
2445 		/* enum forward in BTF currently is just an enum with no enum
2446 		 * values; we also assume a standard 4-byte size for it
2447 		 */
2448 		return btf__add_enum(btf, name, sizeof(int));
2449 	default:
2450 		return libbpf_err(-EINVAL);
2451 	}
2452 }
2453 
2454 /*
2455  * Append new BTF_KING_TYPEDEF type with:
2456  *   - *name*, non-empty/non-NULL name;
2457  *   - *ref_type_id* - referenced type ID, it might not exist yet;
2458  * Returns:
2459  *   - >0, type ID of newly added BTF type;
2460  *   - <0, on error.
2461  */
2462 int btf__add_typedef(struct btf *btf, const char *name, int ref_type_id)
2463 {
2464 	if (!name || !name[0])
2465 		return libbpf_err(-EINVAL);
2466 
2467 	return btf_add_ref_kind(btf, BTF_KIND_TYPEDEF, name, ref_type_id);
2468 }
2469 
2470 /*
2471  * Append new BTF_KIND_VOLATILE type with:
2472  *   - *ref_type_id* - referenced type ID, it might not exist yet;
2473  * Returns:
2474  *   - >0, type ID of newly added BTF type;
2475  *   - <0, on error.
2476  */
2477 int btf__add_volatile(struct btf *btf, int ref_type_id)
2478 {
2479 	return btf_add_ref_kind(btf, BTF_KIND_VOLATILE, NULL, ref_type_id);
2480 }
2481 
2482 /*
2483  * Append new BTF_KIND_CONST type with:
2484  *   - *ref_type_id* - referenced type ID, it might not exist yet;
2485  * Returns:
2486  *   - >0, type ID of newly added BTF type;
2487  *   - <0, on error.
2488  */
2489 int btf__add_const(struct btf *btf, int ref_type_id)
2490 {
2491 	return btf_add_ref_kind(btf, BTF_KIND_CONST, NULL, ref_type_id);
2492 }
2493 
2494 /*
2495  * Append new BTF_KIND_RESTRICT type with:
2496  *   - *ref_type_id* - referenced type ID, it might not exist yet;
2497  * Returns:
2498  *   - >0, type ID of newly added BTF type;
2499  *   - <0, on error.
2500  */
2501 int btf__add_restrict(struct btf *btf, int ref_type_id)
2502 {
2503 	return btf_add_ref_kind(btf, BTF_KIND_RESTRICT, NULL, ref_type_id);
2504 }
2505 
2506 /*
2507  * Append new BTF_KIND_TYPE_TAG type with:
2508  *   - *value*, non-empty/non-NULL tag value;
2509  *   - *ref_type_id* - referenced type ID, it might not exist yet;
2510  * Returns:
2511  *   - >0, type ID of newly added BTF type;
2512  *   - <0, on error.
2513  */
2514 int btf__add_type_tag(struct btf *btf, const char *value, int ref_type_id)
2515 {
2516 	if (!value || !value[0])
2517 		return libbpf_err(-EINVAL);
2518 
2519 	return btf_add_ref_kind(btf, BTF_KIND_TYPE_TAG, value, ref_type_id);
2520 }
2521 
2522 /*
2523  * Append new BTF_KIND_FUNC type with:
2524  *   - *name*, non-empty/non-NULL name;
2525  *   - *proto_type_id* - FUNC_PROTO's type ID, it might not exist yet;
2526  * Returns:
2527  *   - >0, type ID of newly added BTF type;
2528  *   - <0, on error.
2529  */
2530 int btf__add_func(struct btf *btf, const char *name,
2531 		  enum btf_func_linkage linkage, int proto_type_id)
2532 {
2533 	int id;
2534 
2535 	if (!name || !name[0])
2536 		return libbpf_err(-EINVAL);
2537 	if (linkage != BTF_FUNC_STATIC && linkage != BTF_FUNC_GLOBAL &&
2538 	    linkage != BTF_FUNC_EXTERN)
2539 		return libbpf_err(-EINVAL);
2540 
2541 	id = btf_add_ref_kind(btf, BTF_KIND_FUNC, name, proto_type_id);
2542 	if (id > 0) {
2543 		struct btf_type *t = btf_type_by_id(btf, id);
2544 
2545 		t->info = btf_type_info(BTF_KIND_FUNC, linkage, 0);
2546 	}
2547 	return libbpf_err(id);
2548 }
2549 
2550 /*
2551  * Append new BTF_KIND_FUNC_PROTO with:
2552  *   - *ret_type_id* - type ID for return result of a function.
2553  *
2554  * Function prototype initially has no arguments, but they can be added by
2555  * btf__add_func_param() one by one, immediately after
2556  * btf__add_func_proto() succeeded.
2557  *
2558  * Returns:
2559  *   - >0, type ID of newly added BTF type;
2560  *   - <0, on error.
2561  */
2562 int btf__add_func_proto(struct btf *btf, int ret_type_id)
2563 {
2564 	struct btf_type *t;
2565 	int sz;
2566 
2567 	if (validate_type_id(ret_type_id))
2568 		return libbpf_err(-EINVAL);
2569 
2570 	if (btf_ensure_modifiable(btf))
2571 		return libbpf_err(-ENOMEM);
2572 
2573 	sz = sizeof(struct btf_type);
2574 	t = btf_add_type_mem(btf, sz);
2575 	if (!t)
2576 		return libbpf_err(-ENOMEM);
2577 
2578 	/* start out with vlen=0; this will be adjusted when adding enum
2579 	 * values, if necessary
2580 	 */
2581 	t->name_off = 0;
2582 	t->info = btf_type_info(BTF_KIND_FUNC_PROTO, 0, 0);
2583 	t->type = ret_type_id;
2584 
2585 	return btf_commit_type(btf, sz);
2586 }
2587 
2588 /*
2589  * Append new function parameter for current FUNC_PROTO type with:
2590  *   - *name* - parameter name, can be NULL or empty;
2591  *   - *type_id* - type ID describing the type of the parameter.
2592  * Returns:
2593  *   -  0, on success;
2594  *   - <0, on error.
2595  */
2596 int btf__add_func_param(struct btf *btf, const char *name, int type_id)
2597 {
2598 	struct btf_type *t;
2599 	struct btf_param *p;
2600 	int sz, name_off = 0;
2601 
2602 	if (validate_type_id(type_id))
2603 		return libbpf_err(-EINVAL);
2604 
2605 	/* last type should be BTF_KIND_FUNC_PROTO */
2606 	if (btf->nr_types == 0)
2607 		return libbpf_err(-EINVAL);
2608 	t = btf_last_type(btf);
2609 	if (!btf_is_func_proto(t))
2610 		return libbpf_err(-EINVAL);
2611 
2612 	/* decompose and invalidate raw data */
2613 	if (btf_ensure_modifiable(btf))
2614 		return libbpf_err(-ENOMEM);
2615 
2616 	sz = sizeof(struct btf_param);
2617 	p = btf_add_type_mem(btf, sz);
2618 	if (!p)
2619 		return libbpf_err(-ENOMEM);
2620 
2621 	if (name && name[0]) {
2622 		name_off = btf__add_str(btf, name);
2623 		if (name_off < 0)
2624 			return name_off;
2625 	}
2626 
2627 	p->name_off = name_off;
2628 	p->type = type_id;
2629 
2630 	/* update parent type's vlen */
2631 	t = btf_last_type(btf);
2632 	btf_type_inc_vlen(t);
2633 
2634 	btf->hdr->type_len += sz;
2635 	btf->hdr->str_off += sz;
2636 	return 0;
2637 }
2638 
2639 /*
2640  * Append new BTF_KIND_VAR type with:
2641  *   - *name* - non-empty/non-NULL name;
2642  *   - *linkage* - variable linkage, one of BTF_VAR_STATIC,
2643  *     BTF_VAR_GLOBAL_ALLOCATED, or BTF_VAR_GLOBAL_EXTERN;
2644  *   - *type_id* - type ID of the type describing the type of the variable.
2645  * Returns:
2646  *   - >0, type ID of newly added BTF type;
2647  *   - <0, on error.
2648  */
2649 int btf__add_var(struct btf *btf, const char *name, int linkage, int type_id)
2650 {
2651 	struct btf_type *t;
2652 	struct btf_var *v;
2653 	int sz, name_off;
2654 
2655 	/* non-empty name */
2656 	if (!name || !name[0])
2657 		return libbpf_err(-EINVAL);
2658 	if (linkage != BTF_VAR_STATIC && linkage != BTF_VAR_GLOBAL_ALLOCATED &&
2659 	    linkage != BTF_VAR_GLOBAL_EXTERN)
2660 		return libbpf_err(-EINVAL);
2661 	if (validate_type_id(type_id))
2662 		return libbpf_err(-EINVAL);
2663 
2664 	/* deconstruct BTF, if necessary, and invalidate raw_data */
2665 	if (btf_ensure_modifiable(btf))
2666 		return libbpf_err(-ENOMEM);
2667 
2668 	sz = sizeof(struct btf_type) + sizeof(struct btf_var);
2669 	t = btf_add_type_mem(btf, sz);
2670 	if (!t)
2671 		return libbpf_err(-ENOMEM);
2672 
2673 	name_off = btf__add_str(btf, name);
2674 	if (name_off < 0)
2675 		return name_off;
2676 
2677 	t->name_off = name_off;
2678 	t->info = btf_type_info(BTF_KIND_VAR, 0, 0);
2679 	t->type = type_id;
2680 
2681 	v = btf_var(t);
2682 	v->linkage = linkage;
2683 
2684 	return btf_commit_type(btf, sz);
2685 }
2686 
2687 /*
2688  * Append new BTF_KIND_DATASEC type with:
2689  *   - *name* - non-empty/non-NULL name;
2690  *   - *byte_sz* - data section size, in bytes.
2691  *
2692  * Data section is initially empty. Variables info can be added with
2693  * btf__add_datasec_var_info() calls, after btf__add_datasec() succeeds.
2694  *
2695  * Returns:
2696  *   - >0, type ID of newly added BTF type;
2697  *   - <0, on error.
2698  */
2699 int btf__add_datasec(struct btf *btf, const char *name, __u32 byte_sz)
2700 {
2701 	struct btf_type *t;
2702 	int sz, name_off;
2703 
2704 	/* non-empty name */
2705 	if (!name || !name[0])
2706 		return libbpf_err(-EINVAL);
2707 
2708 	if (btf_ensure_modifiable(btf))
2709 		return libbpf_err(-ENOMEM);
2710 
2711 	sz = sizeof(struct btf_type);
2712 	t = btf_add_type_mem(btf, sz);
2713 	if (!t)
2714 		return libbpf_err(-ENOMEM);
2715 
2716 	name_off = btf__add_str(btf, name);
2717 	if (name_off < 0)
2718 		return name_off;
2719 
2720 	/* start with vlen=0, which will be update as var_secinfos are added */
2721 	t->name_off = name_off;
2722 	t->info = btf_type_info(BTF_KIND_DATASEC, 0, 0);
2723 	t->size = byte_sz;
2724 
2725 	return btf_commit_type(btf, sz);
2726 }
2727 
2728 /*
2729  * Append new data section variable information entry for current DATASEC type:
2730  *   - *var_type_id* - type ID, describing type of the variable;
2731  *   - *offset* - variable offset within data section, in bytes;
2732  *   - *byte_sz* - variable size, in bytes.
2733  *
2734  * Returns:
2735  *   -  0, on success;
2736  *   - <0, on error.
2737  */
2738 int btf__add_datasec_var_info(struct btf *btf, int var_type_id, __u32 offset, __u32 byte_sz)
2739 {
2740 	struct btf_type *t;
2741 	struct btf_var_secinfo *v;
2742 	int sz;
2743 
2744 	/* last type should be BTF_KIND_DATASEC */
2745 	if (btf->nr_types == 0)
2746 		return libbpf_err(-EINVAL);
2747 	t = btf_last_type(btf);
2748 	if (!btf_is_datasec(t))
2749 		return libbpf_err(-EINVAL);
2750 
2751 	if (validate_type_id(var_type_id))
2752 		return libbpf_err(-EINVAL);
2753 
2754 	/* decompose and invalidate raw data */
2755 	if (btf_ensure_modifiable(btf))
2756 		return libbpf_err(-ENOMEM);
2757 
2758 	sz = sizeof(struct btf_var_secinfo);
2759 	v = btf_add_type_mem(btf, sz);
2760 	if (!v)
2761 		return libbpf_err(-ENOMEM);
2762 
2763 	v->type = var_type_id;
2764 	v->offset = offset;
2765 	v->size = byte_sz;
2766 
2767 	/* update parent type's vlen */
2768 	t = btf_last_type(btf);
2769 	btf_type_inc_vlen(t);
2770 
2771 	btf->hdr->type_len += sz;
2772 	btf->hdr->str_off += sz;
2773 	return 0;
2774 }
2775 
2776 /*
2777  * Append new BTF_KIND_DECL_TAG type with:
2778  *   - *value* - non-empty/non-NULL string;
2779  *   - *ref_type_id* - referenced type ID, it might not exist yet;
2780  *   - *component_idx* - -1 for tagging reference type, otherwise struct/union
2781  *     member or function argument index;
2782  * Returns:
2783  *   - >0, type ID of newly added BTF type;
2784  *   - <0, on error.
2785  */
2786 int btf__add_decl_tag(struct btf *btf, const char *value, int ref_type_id,
2787 		 int component_idx)
2788 {
2789 	struct btf_type *t;
2790 	int sz, value_off;
2791 
2792 	if (!value || !value[0] || component_idx < -1)
2793 		return libbpf_err(-EINVAL);
2794 
2795 	if (validate_type_id(ref_type_id))
2796 		return libbpf_err(-EINVAL);
2797 
2798 	if (btf_ensure_modifiable(btf))
2799 		return libbpf_err(-ENOMEM);
2800 
2801 	sz = sizeof(struct btf_type) + sizeof(struct btf_decl_tag);
2802 	t = btf_add_type_mem(btf, sz);
2803 	if (!t)
2804 		return libbpf_err(-ENOMEM);
2805 
2806 	value_off = btf__add_str(btf, value);
2807 	if (value_off < 0)
2808 		return value_off;
2809 
2810 	t->name_off = value_off;
2811 	t->info = btf_type_info(BTF_KIND_DECL_TAG, 0, false);
2812 	t->type = ref_type_id;
2813 	btf_decl_tag(t)->component_idx = component_idx;
2814 
2815 	return btf_commit_type(btf, sz);
2816 }
2817 
2818 struct btf_ext_sec_setup_param {
2819 	__u32 off;
2820 	__u32 len;
2821 	__u32 min_rec_size;
2822 	struct btf_ext_info *ext_info;
2823 	const char *desc;
2824 };
2825 
2826 static int btf_ext_setup_info(struct btf_ext *btf_ext,
2827 			      struct btf_ext_sec_setup_param *ext_sec)
2828 {
2829 	const struct btf_ext_info_sec *sinfo;
2830 	struct btf_ext_info *ext_info;
2831 	__u32 info_left, record_size;
2832 	size_t sec_cnt = 0;
2833 	/* The start of the info sec (including the __u32 record_size). */
2834 	void *info;
2835 
2836 	if (ext_sec->len == 0)
2837 		return 0;
2838 
2839 	if (ext_sec->off & 0x03) {
2840 		pr_debug(".BTF.ext %s section is not aligned to 4 bytes\n",
2841 		     ext_sec->desc);
2842 		return -EINVAL;
2843 	}
2844 
2845 	info = btf_ext->data + btf_ext->hdr->hdr_len + ext_sec->off;
2846 	info_left = ext_sec->len;
2847 
2848 	if (btf_ext->data + btf_ext->data_size < info + ext_sec->len) {
2849 		pr_debug("%s section (off:%u len:%u) is beyond the end of the ELF section .BTF.ext\n",
2850 			 ext_sec->desc, ext_sec->off, ext_sec->len);
2851 		return -EINVAL;
2852 	}
2853 
2854 	/* At least a record size */
2855 	if (info_left < sizeof(__u32)) {
2856 		pr_debug(".BTF.ext %s record size not found\n", ext_sec->desc);
2857 		return -EINVAL;
2858 	}
2859 
2860 	/* The record size needs to meet the minimum standard */
2861 	record_size = *(__u32 *)info;
2862 	if (record_size < ext_sec->min_rec_size ||
2863 	    record_size & 0x03) {
2864 		pr_debug("%s section in .BTF.ext has invalid record size %u\n",
2865 			 ext_sec->desc, record_size);
2866 		return -EINVAL;
2867 	}
2868 
2869 	sinfo = info + sizeof(__u32);
2870 	info_left -= sizeof(__u32);
2871 
2872 	/* If no records, return failure now so .BTF.ext won't be used. */
2873 	if (!info_left) {
2874 		pr_debug("%s section in .BTF.ext has no records", ext_sec->desc);
2875 		return -EINVAL;
2876 	}
2877 
2878 	while (info_left) {
2879 		unsigned int sec_hdrlen = sizeof(struct btf_ext_info_sec);
2880 		__u64 total_record_size;
2881 		__u32 num_records;
2882 
2883 		if (info_left < sec_hdrlen) {
2884 			pr_debug("%s section header is not found in .BTF.ext\n",
2885 			     ext_sec->desc);
2886 			return -EINVAL;
2887 		}
2888 
2889 		num_records = sinfo->num_info;
2890 		if (num_records == 0) {
2891 			pr_debug("%s section has incorrect num_records in .BTF.ext\n",
2892 			     ext_sec->desc);
2893 			return -EINVAL;
2894 		}
2895 
2896 		total_record_size = sec_hdrlen + (__u64)num_records * record_size;
2897 		if (info_left < total_record_size) {
2898 			pr_debug("%s section has incorrect num_records in .BTF.ext\n",
2899 			     ext_sec->desc);
2900 			return -EINVAL;
2901 		}
2902 
2903 		info_left -= total_record_size;
2904 		sinfo = (void *)sinfo + total_record_size;
2905 		sec_cnt++;
2906 	}
2907 
2908 	ext_info = ext_sec->ext_info;
2909 	ext_info->len = ext_sec->len - sizeof(__u32);
2910 	ext_info->rec_size = record_size;
2911 	ext_info->info = info + sizeof(__u32);
2912 	ext_info->sec_cnt = sec_cnt;
2913 
2914 	return 0;
2915 }
2916 
2917 static int btf_ext_setup_func_info(struct btf_ext *btf_ext)
2918 {
2919 	struct btf_ext_sec_setup_param param = {
2920 		.off = btf_ext->hdr->func_info_off,
2921 		.len = btf_ext->hdr->func_info_len,
2922 		.min_rec_size = sizeof(struct bpf_func_info_min),
2923 		.ext_info = &btf_ext->func_info,
2924 		.desc = "func_info"
2925 	};
2926 
2927 	return btf_ext_setup_info(btf_ext, &param);
2928 }
2929 
2930 static int btf_ext_setup_line_info(struct btf_ext *btf_ext)
2931 {
2932 	struct btf_ext_sec_setup_param param = {
2933 		.off = btf_ext->hdr->line_info_off,
2934 		.len = btf_ext->hdr->line_info_len,
2935 		.min_rec_size = sizeof(struct bpf_line_info_min),
2936 		.ext_info = &btf_ext->line_info,
2937 		.desc = "line_info",
2938 	};
2939 
2940 	return btf_ext_setup_info(btf_ext, &param);
2941 }
2942 
2943 static int btf_ext_setup_core_relos(struct btf_ext *btf_ext)
2944 {
2945 	struct btf_ext_sec_setup_param param = {
2946 		.off = btf_ext->hdr->core_relo_off,
2947 		.len = btf_ext->hdr->core_relo_len,
2948 		.min_rec_size = sizeof(struct bpf_core_relo),
2949 		.ext_info = &btf_ext->core_relo_info,
2950 		.desc = "core_relo",
2951 	};
2952 
2953 	return btf_ext_setup_info(btf_ext, &param);
2954 }
2955 
2956 static int btf_ext_parse_hdr(__u8 *data, __u32 data_size)
2957 {
2958 	const struct btf_ext_header *hdr = (struct btf_ext_header *)data;
2959 
2960 	if (data_size < offsetofend(struct btf_ext_header, hdr_len) ||
2961 	    data_size < hdr->hdr_len) {
2962 		pr_debug("BTF.ext header not found");
2963 		return -EINVAL;
2964 	}
2965 
2966 	if (hdr->magic == bswap_16(BTF_MAGIC)) {
2967 		pr_warn("BTF.ext in non-native endianness is not supported\n");
2968 		return -ENOTSUP;
2969 	} else if (hdr->magic != BTF_MAGIC) {
2970 		pr_debug("Invalid BTF.ext magic:%x\n", hdr->magic);
2971 		return -EINVAL;
2972 	}
2973 
2974 	if (hdr->version != BTF_VERSION) {
2975 		pr_debug("Unsupported BTF.ext version:%u\n", hdr->version);
2976 		return -ENOTSUP;
2977 	}
2978 
2979 	if (hdr->flags) {
2980 		pr_debug("Unsupported BTF.ext flags:%x\n", hdr->flags);
2981 		return -ENOTSUP;
2982 	}
2983 
2984 	if (data_size == hdr->hdr_len) {
2985 		pr_debug("BTF.ext has no data\n");
2986 		return -EINVAL;
2987 	}
2988 
2989 	return 0;
2990 }
2991 
2992 void btf_ext__free(struct btf_ext *btf_ext)
2993 {
2994 	if (IS_ERR_OR_NULL(btf_ext))
2995 		return;
2996 	free(btf_ext->func_info.sec_idxs);
2997 	free(btf_ext->line_info.sec_idxs);
2998 	free(btf_ext->core_relo_info.sec_idxs);
2999 	free(btf_ext->data);
3000 	free(btf_ext);
3001 }
3002 
3003 struct btf_ext *btf_ext__new(const __u8 *data, __u32 size)
3004 {
3005 	struct btf_ext *btf_ext;
3006 	int err;
3007 
3008 	btf_ext = calloc(1, sizeof(struct btf_ext));
3009 	if (!btf_ext)
3010 		return libbpf_err_ptr(-ENOMEM);
3011 
3012 	btf_ext->data_size = size;
3013 	btf_ext->data = malloc(size);
3014 	if (!btf_ext->data) {
3015 		err = -ENOMEM;
3016 		goto done;
3017 	}
3018 	memcpy(btf_ext->data, data, size);
3019 
3020 	err = btf_ext_parse_hdr(btf_ext->data, size);
3021 	if (err)
3022 		goto done;
3023 
3024 	if (btf_ext->hdr->hdr_len < offsetofend(struct btf_ext_header, line_info_len)) {
3025 		err = -EINVAL;
3026 		goto done;
3027 	}
3028 
3029 	err = btf_ext_setup_func_info(btf_ext);
3030 	if (err)
3031 		goto done;
3032 
3033 	err = btf_ext_setup_line_info(btf_ext);
3034 	if (err)
3035 		goto done;
3036 
3037 	if (btf_ext->hdr->hdr_len < offsetofend(struct btf_ext_header, core_relo_len))
3038 		goto done; /* skip core relos parsing */
3039 
3040 	err = btf_ext_setup_core_relos(btf_ext);
3041 	if (err)
3042 		goto done;
3043 
3044 done:
3045 	if (err) {
3046 		btf_ext__free(btf_ext);
3047 		return libbpf_err_ptr(err);
3048 	}
3049 
3050 	return btf_ext;
3051 }
3052 
3053 const void *btf_ext__raw_data(const struct btf_ext *btf_ext, __u32 *size)
3054 {
3055 	*size = btf_ext->data_size;
3056 	return btf_ext->data;
3057 }
3058 
3059 __attribute__((alias("btf_ext__raw_data")))
3060 const void *btf_ext__get_raw_data(const struct btf_ext *btf_ext, __u32 *size);
3061 
3062 
3063 struct btf_dedup;
3064 
3065 static struct btf_dedup *btf_dedup_new(struct btf *btf, const struct btf_dedup_opts *opts);
3066 static void btf_dedup_free(struct btf_dedup *d);
3067 static int btf_dedup_prep(struct btf_dedup *d);
3068 static int btf_dedup_strings(struct btf_dedup *d);
3069 static int btf_dedup_prim_types(struct btf_dedup *d);
3070 static int btf_dedup_struct_types(struct btf_dedup *d);
3071 static int btf_dedup_ref_types(struct btf_dedup *d);
3072 static int btf_dedup_resolve_fwds(struct btf_dedup *d);
3073 static int btf_dedup_compact_types(struct btf_dedup *d);
3074 static int btf_dedup_remap_types(struct btf_dedup *d);
3075 
3076 /*
3077  * Deduplicate BTF types and strings.
3078  *
3079  * BTF dedup algorithm takes as an input `struct btf` representing `.BTF` ELF
3080  * section with all BTF type descriptors and string data. It overwrites that
3081  * memory in-place with deduplicated types and strings without any loss of
3082  * information. If optional `struct btf_ext` representing '.BTF.ext' ELF section
3083  * is provided, all the strings referenced from .BTF.ext section are honored
3084  * and updated to point to the right offsets after deduplication.
3085  *
3086  * If function returns with error, type/string data might be garbled and should
3087  * be discarded.
3088  *
3089  * More verbose and detailed description of both problem btf_dedup is solving,
3090  * as well as solution could be found at:
3091  * https://facebookmicrosites.github.io/bpf/blog/2018/11/14/btf-enhancement.html
3092  *
3093  * Problem description and justification
3094  * =====================================
3095  *
3096  * BTF type information is typically emitted either as a result of conversion
3097  * from DWARF to BTF or directly by compiler. In both cases, each compilation
3098  * unit contains information about a subset of all the types that are used
3099  * in an application. These subsets are frequently overlapping and contain a lot
3100  * of duplicated information when later concatenated together into a single
3101  * binary. This algorithm ensures that each unique type is represented by single
3102  * BTF type descriptor, greatly reducing resulting size of BTF data.
3103  *
3104  * Compilation unit isolation and subsequent duplication of data is not the only
3105  * problem. The same type hierarchy (e.g., struct and all the type that struct
3106  * references) in different compilation units can be represented in BTF to
3107  * various degrees of completeness (or, rather, incompleteness) due to
3108  * struct/union forward declarations.
3109  *
3110  * Let's take a look at an example, that we'll use to better understand the
3111  * problem (and solution). Suppose we have two compilation units, each using
3112  * same `struct S`, but each of them having incomplete type information about
3113  * struct's fields:
3114  *
3115  * // CU #1:
3116  * struct S;
3117  * struct A {
3118  *	int a;
3119  *	struct A* self;
3120  *	struct S* parent;
3121  * };
3122  * struct B;
3123  * struct S {
3124  *	struct A* a_ptr;
3125  *	struct B* b_ptr;
3126  * };
3127  *
3128  * // CU #2:
3129  * struct S;
3130  * struct A;
3131  * struct B {
3132  *	int b;
3133  *	struct B* self;
3134  *	struct S* parent;
3135  * };
3136  * struct S {
3137  *	struct A* a_ptr;
3138  *	struct B* b_ptr;
3139  * };
3140  *
3141  * In case of CU #1, BTF data will know only that `struct B` exist (but no
3142  * more), but will know the complete type information about `struct A`. While
3143  * for CU #2, it will know full type information about `struct B`, but will
3144  * only know about forward declaration of `struct A` (in BTF terms, it will
3145  * have `BTF_KIND_FWD` type descriptor with name `B`).
3146  *
3147  * This compilation unit isolation means that it's possible that there is no
3148  * single CU with complete type information describing structs `S`, `A`, and
3149  * `B`. Also, we might get tons of duplicated and redundant type information.
3150  *
3151  * Additional complication we need to keep in mind comes from the fact that
3152  * types, in general, can form graphs containing cycles, not just DAGs.
3153  *
3154  * While algorithm does deduplication, it also merges and resolves type
3155  * information (unless disabled throught `struct btf_opts`), whenever possible.
3156  * E.g., in the example above with two compilation units having partial type
3157  * information for structs `A` and `B`, the output of algorithm will emit
3158  * a single copy of each BTF type that describes structs `A`, `B`, and `S`
3159  * (as well as type information for `int` and pointers), as if they were defined
3160  * in a single compilation unit as:
3161  *
3162  * struct A {
3163  *	int a;
3164  *	struct A* self;
3165  *	struct S* parent;
3166  * };
3167  * struct B {
3168  *	int b;
3169  *	struct B* self;
3170  *	struct S* parent;
3171  * };
3172  * struct S {
3173  *	struct A* a_ptr;
3174  *	struct B* b_ptr;
3175  * };
3176  *
3177  * Algorithm summary
3178  * =================
3179  *
3180  * Algorithm completes its work in 7 separate passes:
3181  *
3182  * 1. Strings deduplication.
3183  * 2. Primitive types deduplication (int, enum, fwd).
3184  * 3. Struct/union types deduplication.
3185  * 4. Resolve unambiguous forward declarations.
3186  * 5. Reference types deduplication (pointers, typedefs, arrays, funcs, func
3187  *    protos, and const/volatile/restrict modifiers).
3188  * 6. Types compaction.
3189  * 7. Types remapping.
3190  *
3191  * Algorithm determines canonical type descriptor, which is a single
3192  * representative type for each truly unique type. This canonical type is the
3193  * one that will go into final deduplicated BTF type information. For
3194  * struct/unions, it is also the type that algorithm will merge additional type
3195  * information into (while resolving FWDs), as it discovers it from data in
3196  * other CUs. Each input BTF type eventually gets either mapped to itself, if
3197  * that type is canonical, or to some other type, if that type is equivalent
3198  * and was chosen as canonical representative. This mapping is stored in
3199  * `btf_dedup->map` array. This map is also used to record STRUCT/UNION that
3200  * FWD type got resolved to.
3201  *
3202  * To facilitate fast discovery of canonical types, we also maintain canonical
3203  * index (`btf_dedup->dedup_table`), which maps type descriptor's signature hash
3204  * (i.e., hashed kind, name, size, fields, etc) into a list of canonical types
3205  * that match that signature. With sufficiently good choice of type signature
3206  * hashing function, we can limit number of canonical types for each unique type
3207  * signature to a very small number, allowing to find canonical type for any
3208  * duplicated type very quickly.
3209  *
3210  * Struct/union deduplication is the most critical part and algorithm for
3211  * deduplicating structs/unions is described in greater details in comments for
3212  * `btf_dedup_is_equiv` function.
3213  */
3214 int btf__dedup(struct btf *btf, const struct btf_dedup_opts *opts)
3215 {
3216 	struct btf_dedup *d;
3217 	int err;
3218 
3219 	if (!OPTS_VALID(opts, btf_dedup_opts))
3220 		return libbpf_err(-EINVAL);
3221 
3222 	d = btf_dedup_new(btf, opts);
3223 	if (IS_ERR(d)) {
3224 		pr_debug("btf_dedup_new failed: %ld", PTR_ERR(d));
3225 		return libbpf_err(-EINVAL);
3226 	}
3227 
3228 	if (btf_ensure_modifiable(btf)) {
3229 		err = -ENOMEM;
3230 		goto done;
3231 	}
3232 
3233 	err = btf_dedup_prep(d);
3234 	if (err) {
3235 		pr_debug("btf_dedup_prep failed:%d\n", err);
3236 		goto done;
3237 	}
3238 	err = btf_dedup_strings(d);
3239 	if (err < 0) {
3240 		pr_debug("btf_dedup_strings failed:%d\n", err);
3241 		goto done;
3242 	}
3243 	err = btf_dedup_prim_types(d);
3244 	if (err < 0) {
3245 		pr_debug("btf_dedup_prim_types failed:%d\n", err);
3246 		goto done;
3247 	}
3248 	err = btf_dedup_struct_types(d);
3249 	if (err < 0) {
3250 		pr_debug("btf_dedup_struct_types failed:%d\n", err);
3251 		goto done;
3252 	}
3253 	err = btf_dedup_resolve_fwds(d);
3254 	if (err < 0) {
3255 		pr_debug("btf_dedup_resolve_fwds failed:%d\n", err);
3256 		goto done;
3257 	}
3258 	err = btf_dedup_ref_types(d);
3259 	if (err < 0) {
3260 		pr_debug("btf_dedup_ref_types failed:%d\n", err);
3261 		goto done;
3262 	}
3263 	err = btf_dedup_compact_types(d);
3264 	if (err < 0) {
3265 		pr_debug("btf_dedup_compact_types failed:%d\n", err);
3266 		goto done;
3267 	}
3268 	err = btf_dedup_remap_types(d);
3269 	if (err < 0) {
3270 		pr_debug("btf_dedup_remap_types failed:%d\n", err);
3271 		goto done;
3272 	}
3273 
3274 done:
3275 	btf_dedup_free(d);
3276 	return libbpf_err(err);
3277 }
3278 
3279 #define BTF_UNPROCESSED_ID ((__u32)-1)
3280 #define BTF_IN_PROGRESS_ID ((__u32)-2)
3281 
3282 struct btf_dedup {
3283 	/* .BTF section to be deduped in-place */
3284 	struct btf *btf;
3285 	/*
3286 	 * Optional .BTF.ext section. When provided, any strings referenced
3287 	 * from it will be taken into account when deduping strings
3288 	 */
3289 	struct btf_ext *btf_ext;
3290 	/*
3291 	 * This is a map from any type's signature hash to a list of possible
3292 	 * canonical representative type candidates. Hash collisions are
3293 	 * ignored, so even types of various kinds can share same list of
3294 	 * candidates, which is fine because we rely on subsequent
3295 	 * btf_xxx_equal() checks to authoritatively verify type equality.
3296 	 */
3297 	struct hashmap *dedup_table;
3298 	/* Canonical types map */
3299 	__u32 *map;
3300 	/* Hypothetical mapping, used during type graph equivalence checks */
3301 	__u32 *hypot_map;
3302 	__u32 *hypot_list;
3303 	size_t hypot_cnt;
3304 	size_t hypot_cap;
3305 	/* Whether hypothetical mapping, if successful, would need to adjust
3306 	 * already canonicalized types (due to a new forward declaration to
3307 	 * concrete type resolution). In such case, during split BTF dedup
3308 	 * candidate type would still be considered as different, because base
3309 	 * BTF is considered to be immutable.
3310 	 */
3311 	bool hypot_adjust_canon;
3312 	/* Various option modifying behavior of algorithm */
3313 	struct btf_dedup_opts opts;
3314 	/* temporary strings deduplication state */
3315 	struct strset *strs_set;
3316 };
3317 
3318 static long hash_combine(long h, long value)
3319 {
3320 	return h * 31 + value;
3321 }
3322 
3323 #define for_each_dedup_cand(d, node, hash) \
3324 	hashmap__for_each_key_entry(d->dedup_table, node, hash)
3325 
3326 static int btf_dedup_table_add(struct btf_dedup *d, long hash, __u32 type_id)
3327 {
3328 	return hashmap__append(d->dedup_table, hash, type_id);
3329 }
3330 
3331 static int btf_dedup_hypot_map_add(struct btf_dedup *d,
3332 				   __u32 from_id, __u32 to_id)
3333 {
3334 	if (d->hypot_cnt == d->hypot_cap) {
3335 		__u32 *new_list;
3336 
3337 		d->hypot_cap += max((size_t)16, d->hypot_cap / 2);
3338 		new_list = libbpf_reallocarray(d->hypot_list, d->hypot_cap, sizeof(__u32));
3339 		if (!new_list)
3340 			return -ENOMEM;
3341 		d->hypot_list = new_list;
3342 	}
3343 	d->hypot_list[d->hypot_cnt++] = from_id;
3344 	d->hypot_map[from_id] = to_id;
3345 	return 0;
3346 }
3347 
3348 static void btf_dedup_clear_hypot_map(struct btf_dedup *d)
3349 {
3350 	int i;
3351 
3352 	for (i = 0; i < d->hypot_cnt; i++)
3353 		d->hypot_map[d->hypot_list[i]] = BTF_UNPROCESSED_ID;
3354 	d->hypot_cnt = 0;
3355 	d->hypot_adjust_canon = false;
3356 }
3357 
3358 static void btf_dedup_free(struct btf_dedup *d)
3359 {
3360 	hashmap__free(d->dedup_table);
3361 	d->dedup_table = NULL;
3362 
3363 	free(d->map);
3364 	d->map = NULL;
3365 
3366 	free(d->hypot_map);
3367 	d->hypot_map = NULL;
3368 
3369 	free(d->hypot_list);
3370 	d->hypot_list = NULL;
3371 
3372 	free(d);
3373 }
3374 
3375 static size_t btf_dedup_identity_hash_fn(long key, void *ctx)
3376 {
3377 	return key;
3378 }
3379 
3380 static size_t btf_dedup_collision_hash_fn(long key, void *ctx)
3381 {
3382 	return 0;
3383 }
3384 
3385 static bool btf_dedup_equal_fn(long k1, long k2, void *ctx)
3386 {
3387 	return k1 == k2;
3388 }
3389 
3390 static struct btf_dedup *btf_dedup_new(struct btf *btf, const struct btf_dedup_opts *opts)
3391 {
3392 	struct btf_dedup *d = calloc(1, sizeof(struct btf_dedup));
3393 	hashmap_hash_fn hash_fn = btf_dedup_identity_hash_fn;
3394 	int i, err = 0, type_cnt;
3395 
3396 	if (!d)
3397 		return ERR_PTR(-ENOMEM);
3398 
3399 	if (OPTS_GET(opts, force_collisions, false))
3400 		hash_fn = btf_dedup_collision_hash_fn;
3401 
3402 	d->btf = btf;
3403 	d->btf_ext = OPTS_GET(opts, btf_ext, NULL);
3404 
3405 	d->dedup_table = hashmap__new(hash_fn, btf_dedup_equal_fn, NULL);
3406 	if (IS_ERR(d->dedup_table)) {
3407 		err = PTR_ERR(d->dedup_table);
3408 		d->dedup_table = NULL;
3409 		goto done;
3410 	}
3411 
3412 	type_cnt = btf__type_cnt(btf);
3413 	d->map = malloc(sizeof(__u32) * type_cnt);
3414 	if (!d->map) {
3415 		err = -ENOMEM;
3416 		goto done;
3417 	}
3418 	/* special BTF "void" type is made canonical immediately */
3419 	d->map[0] = 0;
3420 	for (i = 1; i < type_cnt; i++) {
3421 		struct btf_type *t = btf_type_by_id(d->btf, i);
3422 
3423 		/* VAR and DATASEC are never deduped and are self-canonical */
3424 		if (btf_is_var(t) || btf_is_datasec(t))
3425 			d->map[i] = i;
3426 		else
3427 			d->map[i] = BTF_UNPROCESSED_ID;
3428 	}
3429 
3430 	d->hypot_map = malloc(sizeof(__u32) * type_cnt);
3431 	if (!d->hypot_map) {
3432 		err = -ENOMEM;
3433 		goto done;
3434 	}
3435 	for (i = 0; i < type_cnt; i++)
3436 		d->hypot_map[i] = BTF_UNPROCESSED_ID;
3437 
3438 done:
3439 	if (err) {
3440 		btf_dedup_free(d);
3441 		return ERR_PTR(err);
3442 	}
3443 
3444 	return d;
3445 }
3446 
3447 /*
3448  * Iterate over all possible places in .BTF and .BTF.ext that can reference
3449  * string and pass pointer to it to a provided callback `fn`.
3450  */
3451 static int btf_for_each_str_off(struct btf_dedup *d, str_off_visit_fn fn, void *ctx)
3452 {
3453 	int i, r;
3454 
3455 	for (i = 0; i < d->btf->nr_types; i++) {
3456 		struct btf_type *t = btf_type_by_id(d->btf, d->btf->start_id + i);
3457 
3458 		r = btf_type_visit_str_offs(t, fn, ctx);
3459 		if (r)
3460 			return r;
3461 	}
3462 
3463 	if (!d->btf_ext)
3464 		return 0;
3465 
3466 	r = btf_ext_visit_str_offs(d->btf_ext, fn, ctx);
3467 	if (r)
3468 		return r;
3469 
3470 	return 0;
3471 }
3472 
3473 static int strs_dedup_remap_str_off(__u32 *str_off_ptr, void *ctx)
3474 {
3475 	struct btf_dedup *d = ctx;
3476 	__u32 str_off = *str_off_ptr;
3477 	const char *s;
3478 	int off, err;
3479 
3480 	/* don't touch empty string or string in main BTF */
3481 	if (str_off == 0 || str_off < d->btf->start_str_off)
3482 		return 0;
3483 
3484 	s = btf__str_by_offset(d->btf, str_off);
3485 	if (d->btf->base_btf) {
3486 		err = btf__find_str(d->btf->base_btf, s);
3487 		if (err >= 0) {
3488 			*str_off_ptr = err;
3489 			return 0;
3490 		}
3491 		if (err != -ENOENT)
3492 			return err;
3493 	}
3494 
3495 	off = strset__add_str(d->strs_set, s);
3496 	if (off < 0)
3497 		return off;
3498 
3499 	*str_off_ptr = d->btf->start_str_off + off;
3500 	return 0;
3501 }
3502 
3503 /*
3504  * Dedup string and filter out those that are not referenced from either .BTF
3505  * or .BTF.ext (if provided) sections.
3506  *
3507  * This is done by building index of all strings in BTF's string section,
3508  * then iterating over all entities that can reference strings (e.g., type
3509  * names, struct field names, .BTF.ext line info, etc) and marking corresponding
3510  * strings as used. After that all used strings are deduped and compacted into
3511  * sequential blob of memory and new offsets are calculated. Then all the string
3512  * references are iterated again and rewritten using new offsets.
3513  */
3514 static int btf_dedup_strings(struct btf_dedup *d)
3515 {
3516 	int err;
3517 
3518 	if (d->btf->strs_deduped)
3519 		return 0;
3520 
3521 	d->strs_set = strset__new(BTF_MAX_STR_OFFSET, NULL, 0);
3522 	if (IS_ERR(d->strs_set)) {
3523 		err = PTR_ERR(d->strs_set);
3524 		goto err_out;
3525 	}
3526 
3527 	if (!d->btf->base_btf) {
3528 		/* insert empty string; we won't be looking it up during strings
3529 		 * dedup, but it's good to have it for generic BTF string lookups
3530 		 */
3531 		err = strset__add_str(d->strs_set, "");
3532 		if (err < 0)
3533 			goto err_out;
3534 	}
3535 
3536 	/* remap string offsets */
3537 	err = btf_for_each_str_off(d, strs_dedup_remap_str_off, d);
3538 	if (err)
3539 		goto err_out;
3540 
3541 	/* replace BTF string data and hash with deduped ones */
3542 	strset__free(d->btf->strs_set);
3543 	d->btf->hdr->str_len = strset__data_size(d->strs_set);
3544 	d->btf->strs_set = d->strs_set;
3545 	d->strs_set = NULL;
3546 	d->btf->strs_deduped = true;
3547 	return 0;
3548 
3549 err_out:
3550 	strset__free(d->strs_set);
3551 	d->strs_set = NULL;
3552 
3553 	return err;
3554 }
3555 
3556 static long btf_hash_common(struct btf_type *t)
3557 {
3558 	long h;
3559 
3560 	h = hash_combine(0, t->name_off);
3561 	h = hash_combine(h, t->info);
3562 	h = hash_combine(h, t->size);
3563 	return h;
3564 }
3565 
3566 static bool btf_equal_common(struct btf_type *t1, struct btf_type *t2)
3567 {
3568 	return t1->name_off == t2->name_off &&
3569 	       t1->info == t2->info &&
3570 	       t1->size == t2->size;
3571 }
3572 
3573 /* Calculate type signature hash of INT or TAG. */
3574 static long btf_hash_int_decl_tag(struct btf_type *t)
3575 {
3576 	__u32 info = *(__u32 *)(t + 1);
3577 	long h;
3578 
3579 	h = btf_hash_common(t);
3580 	h = hash_combine(h, info);
3581 	return h;
3582 }
3583 
3584 /* Check structural equality of two INTs or TAGs. */
3585 static bool btf_equal_int_tag(struct btf_type *t1, struct btf_type *t2)
3586 {
3587 	__u32 info1, info2;
3588 
3589 	if (!btf_equal_common(t1, t2))
3590 		return false;
3591 	info1 = *(__u32 *)(t1 + 1);
3592 	info2 = *(__u32 *)(t2 + 1);
3593 	return info1 == info2;
3594 }
3595 
3596 /* Calculate type signature hash of ENUM/ENUM64. */
3597 static long btf_hash_enum(struct btf_type *t)
3598 {
3599 	long h;
3600 
3601 	/* don't hash vlen, enum members and size to support enum fwd resolving */
3602 	h = hash_combine(0, t->name_off);
3603 	return h;
3604 }
3605 
3606 static bool btf_equal_enum_members(struct btf_type *t1, struct btf_type *t2)
3607 {
3608 	const struct btf_enum *m1, *m2;
3609 	__u16 vlen;
3610 	int i;
3611 
3612 	vlen = btf_vlen(t1);
3613 	m1 = btf_enum(t1);
3614 	m2 = btf_enum(t2);
3615 	for (i = 0; i < vlen; i++) {
3616 		if (m1->name_off != m2->name_off || m1->val != m2->val)
3617 			return false;
3618 		m1++;
3619 		m2++;
3620 	}
3621 	return true;
3622 }
3623 
3624 static bool btf_equal_enum64_members(struct btf_type *t1, struct btf_type *t2)
3625 {
3626 	const struct btf_enum64 *m1, *m2;
3627 	__u16 vlen;
3628 	int i;
3629 
3630 	vlen = btf_vlen(t1);
3631 	m1 = btf_enum64(t1);
3632 	m2 = btf_enum64(t2);
3633 	for (i = 0; i < vlen; i++) {
3634 		if (m1->name_off != m2->name_off || m1->val_lo32 != m2->val_lo32 ||
3635 		    m1->val_hi32 != m2->val_hi32)
3636 			return false;
3637 		m1++;
3638 		m2++;
3639 	}
3640 	return true;
3641 }
3642 
3643 /* Check structural equality of two ENUMs or ENUM64s. */
3644 static bool btf_equal_enum(struct btf_type *t1, struct btf_type *t2)
3645 {
3646 	if (!btf_equal_common(t1, t2))
3647 		return false;
3648 
3649 	/* t1 & t2 kinds are identical because of btf_equal_common */
3650 	if (btf_kind(t1) == BTF_KIND_ENUM)
3651 		return btf_equal_enum_members(t1, t2);
3652 	else
3653 		return btf_equal_enum64_members(t1, t2);
3654 }
3655 
3656 static inline bool btf_is_enum_fwd(struct btf_type *t)
3657 {
3658 	return btf_is_any_enum(t) && btf_vlen(t) == 0;
3659 }
3660 
3661 static bool btf_compat_enum(struct btf_type *t1, struct btf_type *t2)
3662 {
3663 	if (!btf_is_enum_fwd(t1) && !btf_is_enum_fwd(t2))
3664 		return btf_equal_enum(t1, t2);
3665 	/* At this point either t1 or t2 or both are forward declarations, thus:
3666 	 * - skip comparing vlen because it is zero for forward declarations;
3667 	 * - skip comparing size to allow enum forward declarations
3668 	 *   to be compatible with enum64 full declarations;
3669 	 * - skip comparing kind for the same reason.
3670 	 */
3671 	return t1->name_off == t2->name_off &&
3672 	       btf_is_any_enum(t1) && btf_is_any_enum(t2);
3673 }
3674 
3675 /*
3676  * Calculate type signature hash of STRUCT/UNION, ignoring referenced type IDs,
3677  * as referenced type IDs equivalence is established separately during type
3678  * graph equivalence check algorithm.
3679  */
3680 static long btf_hash_struct(struct btf_type *t)
3681 {
3682 	const struct btf_member *member = btf_members(t);
3683 	__u32 vlen = btf_vlen(t);
3684 	long h = btf_hash_common(t);
3685 	int i;
3686 
3687 	for (i = 0; i < vlen; i++) {
3688 		h = hash_combine(h, member->name_off);
3689 		h = hash_combine(h, member->offset);
3690 		/* no hashing of referenced type ID, it can be unresolved yet */
3691 		member++;
3692 	}
3693 	return h;
3694 }
3695 
3696 /*
3697  * Check structural compatibility of two STRUCTs/UNIONs, ignoring referenced
3698  * type IDs. This check is performed during type graph equivalence check and
3699  * referenced types equivalence is checked separately.
3700  */
3701 static bool btf_shallow_equal_struct(struct btf_type *t1, struct btf_type *t2)
3702 {
3703 	const struct btf_member *m1, *m2;
3704 	__u16 vlen;
3705 	int i;
3706 
3707 	if (!btf_equal_common(t1, t2))
3708 		return false;
3709 
3710 	vlen = btf_vlen(t1);
3711 	m1 = btf_members(t1);
3712 	m2 = btf_members(t2);
3713 	for (i = 0; i < vlen; i++) {
3714 		if (m1->name_off != m2->name_off || m1->offset != m2->offset)
3715 			return false;
3716 		m1++;
3717 		m2++;
3718 	}
3719 	return true;
3720 }
3721 
3722 /*
3723  * Calculate type signature hash of ARRAY, including referenced type IDs,
3724  * under assumption that they were already resolved to canonical type IDs and
3725  * are not going to change.
3726  */
3727 static long btf_hash_array(struct btf_type *t)
3728 {
3729 	const struct btf_array *info = btf_array(t);
3730 	long h = btf_hash_common(t);
3731 
3732 	h = hash_combine(h, info->type);
3733 	h = hash_combine(h, info->index_type);
3734 	h = hash_combine(h, info->nelems);
3735 	return h;
3736 }
3737 
3738 /*
3739  * Check exact equality of two ARRAYs, taking into account referenced
3740  * type IDs, under assumption that they were already resolved to canonical
3741  * type IDs and are not going to change.
3742  * This function is called during reference types deduplication to compare
3743  * ARRAY to potential canonical representative.
3744  */
3745 static bool btf_equal_array(struct btf_type *t1, struct btf_type *t2)
3746 {
3747 	const struct btf_array *info1, *info2;
3748 
3749 	if (!btf_equal_common(t1, t2))
3750 		return false;
3751 
3752 	info1 = btf_array(t1);
3753 	info2 = btf_array(t2);
3754 	return info1->type == info2->type &&
3755 	       info1->index_type == info2->index_type &&
3756 	       info1->nelems == info2->nelems;
3757 }
3758 
3759 /*
3760  * Check structural compatibility of two ARRAYs, ignoring referenced type
3761  * IDs. This check is performed during type graph equivalence check and
3762  * referenced types equivalence is checked separately.
3763  */
3764 static bool btf_compat_array(struct btf_type *t1, struct btf_type *t2)
3765 {
3766 	if (!btf_equal_common(t1, t2))
3767 		return false;
3768 
3769 	return btf_array(t1)->nelems == btf_array(t2)->nelems;
3770 }
3771 
3772 /*
3773  * Calculate type signature hash of FUNC_PROTO, including referenced type IDs,
3774  * under assumption that they were already resolved to canonical type IDs and
3775  * are not going to change.
3776  */
3777 static long btf_hash_fnproto(struct btf_type *t)
3778 {
3779 	const struct btf_param *member = btf_params(t);
3780 	__u16 vlen = btf_vlen(t);
3781 	long h = btf_hash_common(t);
3782 	int i;
3783 
3784 	for (i = 0; i < vlen; i++) {
3785 		h = hash_combine(h, member->name_off);
3786 		h = hash_combine(h, member->type);
3787 		member++;
3788 	}
3789 	return h;
3790 }
3791 
3792 /*
3793  * Check exact equality of two FUNC_PROTOs, taking into account referenced
3794  * type IDs, under assumption that they were already resolved to canonical
3795  * type IDs and are not going to change.
3796  * This function is called during reference types deduplication to compare
3797  * FUNC_PROTO to potential canonical representative.
3798  */
3799 static bool btf_equal_fnproto(struct btf_type *t1, struct btf_type *t2)
3800 {
3801 	const struct btf_param *m1, *m2;
3802 	__u16 vlen;
3803 	int i;
3804 
3805 	if (!btf_equal_common(t1, t2))
3806 		return false;
3807 
3808 	vlen = btf_vlen(t1);
3809 	m1 = btf_params(t1);
3810 	m2 = btf_params(t2);
3811 	for (i = 0; i < vlen; i++) {
3812 		if (m1->name_off != m2->name_off || m1->type != m2->type)
3813 			return false;
3814 		m1++;
3815 		m2++;
3816 	}
3817 	return true;
3818 }
3819 
3820 /*
3821  * Check structural compatibility of two FUNC_PROTOs, ignoring referenced type
3822  * IDs. This check is performed during type graph equivalence check and
3823  * referenced types equivalence is checked separately.
3824  */
3825 static bool btf_compat_fnproto(struct btf_type *t1, struct btf_type *t2)
3826 {
3827 	const struct btf_param *m1, *m2;
3828 	__u16 vlen;
3829 	int i;
3830 
3831 	/* skip return type ID */
3832 	if (t1->name_off != t2->name_off || t1->info != t2->info)
3833 		return false;
3834 
3835 	vlen = btf_vlen(t1);
3836 	m1 = btf_params(t1);
3837 	m2 = btf_params(t2);
3838 	for (i = 0; i < vlen; i++) {
3839 		if (m1->name_off != m2->name_off)
3840 			return false;
3841 		m1++;
3842 		m2++;
3843 	}
3844 	return true;
3845 }
3846 
3847 /* Prepare split BTF for deduplication by calculating hashes of base BTF's
3848  * types and initializing the rest of the state (canonical type mapping) for
3849  * the fixed base BTF part.
3850  */
3851 static int btf_dedup_prep(struct btf_dedup *d)
3852 {
3853 	struct btf_type *t;
3854 	int type_id;
3855 	long h;
3856 
3857 	if (!d->btf->base_btf)
3858 		return 0;
3859 
3860 	for (type_id = 1; type_id < d->btf->start_id; type_id++) {
3861 		t = btf_type_by_id(d->btf, type_id);
3862 
3863 		/* all base BTF types are self-canonical by definition */
3864 		d->map[type_id] = type_id;
3865 
3866 		switch (btf_kind(t)) {
3867 		case BTF_KIND_VAR:
3868 		case BTF_KIND_DATASEC:
3869 			/* VAR and DATASEC are never hash/deduplicated */
3870 			continue;
3871 		case BTF_KIND_CONST:
3872 		case BTF_KIND_VOLATILE:
3873 		case BTF_KIND_RESTRICT:
3874 		case BTF_KIND_PTR:
3875 		case BTF_KIND_FWD:
3876 		case BTF_KIND_TYPEDEF:
3877 		case BTF_KIND_FUNC:
3878 		case BTF_KIND_FLOAT:
3879 		case BTF_KIND_TYPE_TAG:
3880 			h = btf_hash_common(t);
3881 			break;
3882 		case BTF_KIND_INT:
3883 		case BTF_KIND_DECL_TAG:
3884 			h = btf_hash_int_decl_tag(t);
3885 			break;
3886 		case BTF_KIND_ENUM:
3887 		case BTF_KIND_ENUM64:
3888 			h = btf_hash_enum(t);
3889 			break;
3890 		case BTF_KIND_STRUCT:
3891 		case BTF_KIND_UNION:
3892 			h = btf_hash_struct(t);
3893 			break;
3894 		case BTF_KIND_ARRAY:
3895 			h = btf_hash_array(t);
3896 			break;
3897 		case BTF_KIND_FUNC_PROTO:
3898 			h = btf_hash_fnproto(t);
3899 			break;
3900 		default:
3901 			pr_debug("unknown kind %d for type [%d]\n", btf_kind(t), type_id);
3902 			return -EINVAL;
3903 		}
3904 		if (btf_dedup_table_add(d, h, type_id))
3905 			return -ENOMEM;
3906 	}
3907 
3908 	return 0;
3909 }
3910 
3911 /*
3912  * Deduplicate primitive types, that can't reference other types, by calculating
3913  * their type signature hash and comparing them with any possible canonical
3914  * candidate. If no canonical candidate matches, type itself is marked as
3915  * canonical and is added into `btf_dedup->dedup_table` as another candidate.
3916  */
3917 static int btf_dedup_prim_type(struct btf_dedup *d, __u32 type_id)
3918 {
3919 	struct btf_type *t = btf_type_by_id(d->btf, type_id);
3920 	struct hashmap_entry *hash_entry;
3921 	struct btf_type *cand;
3922 	/* if we don't find equivalent type, then we are canonical */
3923 	__u32 new_id = type_id;
3924 	__u32 cand_id;
3925 	long h;
3926 
3927 	switch (btf_kind(t)) {
3928 	case BTF_KIND_CONST:
3929 	case BTF_KIND_VOLATILE:
3930 	case BTF_KIND_RESTRICT:
3931 	case BTF_KIND_PTR:
3932 	case BTF_KIND_TYPEDEF:
3933 	case BTF_KIND_ARRAY:
3934 	case BTF_KIND_STRUCT:
3935 	case BTF_KIND_UNION:
3936 	case BTF_KIND_FUNC:
3937 	case BTF_KIND_FUNC_PROTO:
3938 	case BTF_KIND_VAR:
3939 	case BTF_KIND_DATASEC:
3940 	case BTF_KIND_DECL_TAG:
3941 	case BTF_KIND_TYPE_TAG:
3942 		return 0;
3943 
3944 	case BTF_KIND_INT:
3945 		h = btf_hash_int_decl_tag(t);
3946 		for_each_dedup_cand(d, hash_entry, h) {
3947 			cand_id = hash_entry->value;
3948 			cand = btf_type_by_id(d->btf, cand_id);
3949 			if (btf_equal_int_tag(t, cand)) {
3950 				new_id = cand_id;
3951 				break;
3952 			}
3953 		}
3954 		break;
3955 
3956 	case BTF_KIND_ENUM:
3957 	case BTF_KIND_ENUM64:
3958 		h = btf_hash_enum(t);
3959 		for_each_dedup_cand(d, hash_entry, h) {
3960 			cand_id = hash_entry->value;
3961 			cand = btf_type_by_id(d->btf, cand_id);
3962 			if (btf_equal_enum(t, cand)) {
3963 				new_id = cand_id;
3964 				break;
3965 			}
3966 			if (btf_compat_enum(t, cand)) {
3967 				if (btf_is_enum_fwd(t)) {
3968 					/* resolve fwd to full enum */
3969 					new_id = cand_id;
3970 					break;
3971 				}
3972 				/* resolve canonical enum fwd to full enum */
3973 				d->map[cand_id] = type_id;
3974 			}
3975 		}
3976 		break;
3977 
3978 	case BTF_KIND_FWD:
3979 	case BTF_KIND_FLOAT:
3980 		h = btf_hash_common(t);
3981 		for_each_dedup_cand(d, hash_entry, h) {
3982 			cand_id = hash_entry->value;
3983 			cand = btf_type_by_id(d->btf, cand_id);
3984 			if (btf_equal_common(t, cand)) {
3985 				new_id = cand_id;
3986 				break;
3987 			}
3988 		}
3989 		break;
3990 
3991 	default:
3992 		return -EINVAL;
3993 	}
3994 
3995 	d->map[type_id] = new_id;
3996 	if (type_id == new_id && btf_dedup_table_add(d, h, type_id))
3997 		return -ENOMEM;
3998 
3999 	return 0;
4000 }
4001 
4002 static int btf_dedup_prim_types(struct btf_dedup *d)
4003 {
4004 	int i, err;
4005 
4006 	for (i = 0; i < d->btf->nr_types; i++) {
4007 		err = btf_dedup_prim_type(d, d->btf->start_id + i);
4008 		if (err)
4009 			return err;
4010 	}
4011 	return 0;
4012 }
4013 
4014 /*
4015  * Check whether type is already mapped into canonical one (could be to itself).
4016  */
4017 static inline bool is_type_mapped(struct btf_dedup *d, uint32_t type_id)
4018 {
4019 	return d->map[type_id] <= BTF_MAX_NR_TYPES;
4020 }
4021 
4022 /*
4023  * Resolve type ID into its canonical type ID, if any; otherwise return original
4024  * type ID. If type is FWD and is resolved into STRUCT/UNION already, follow
4025  * STRUCT/UNION link and resolve it into canonical type ID as well.
4026  */
4027 static inline __u32 resolve_type_id(struct btf_dedup *d, __u32 type_id)
4028 {
4029 	while (is_type_mapped(d, type_id) && d->map[type_id] != type_id)
4030 		type_id = d->map[type_id];
4031 	return type_id;
4032 }
4033 
4034 /*
4035  * Resolve FWD to underlying STRUCT/UNION, if any; otherwise return original
4036  * type ID.
4037  */
4038 static uint32_t resolve_fwd_id(struct btf_dedup *d, uint32_t type_id)
4039 {
4040 	__u32 orig_type_id = type_id;
4041 
4042 	if (!btf_is_fwd(btf__type_by_id(d->btf, type_id)))
4043 		return type_id;
4044 
4045 	while (is_type_mapped(d, type_id) && d->map[type_id] != type_id)
4046 		type_id = d->map[type_id];
4047 
4048 	if (!btf_is_fwd(btf__type_by_id(d->btf, type_id)))
4049 		return type_id;
4050 
4051 	return orig_type_id;
4052 }
4053 
4054 
4055 static inline __u16 btf_fwd_kind(struct btf_type *t)
4056 {
4057 	return btf_kflag(t) ? BTF_KIND_UNION : BTF_KIND_STRUCT;
4058 }
4059 
4060 /* Check if given two types are identical ARRAY definitions */
4061 static bool btf_dedup_identical_arrays(struct btf_dedup *d, __u32 id1, __u32 id2)
4062 {
4063 	struct btf_type *t1, *t2;
4064 
4065 	t1 = btf_type_by_id(d->btf, id1);
4066 	t2 = btf_type_by_id(d->btf, id2);
4067 	if (!btf_is_array(t1) || !btf_is_array(t2))
4068 		return false;
4069 
4070 	return btf_equal_array(t1, t2);
4071 }
4072 
4073 /* Check if given two types are identical STRUCT/UNION definitions */
4074 static bool btf_dedup_identical_structs(struct btf_dedup *d, __u32 id1, __u32 id2)
4075 {
4076 	const struct btf_member *m1, *m2;
4077 	struct btf_type *t1, *t2;
4078 	int n, i;
4079 
4080 	t1 = btf_type_by_id(d->btf, id1);
4081 	t2 = btf_type_by_id(d->btf, id2);
4082 
4083 	if (!btf_is_composite(t1) || btf_kind(t1) != btf_kind(t2))
4084 		return false;
4085 
4086 	if (!btf_shallow_equal_struct(t1, t2))
4087 		return false;
4088 
4089 	m1 = btf_members(t1);
4090 	m2 = btf_members(t2);
4091 	for (i = 0, n = btf_vlen(t1); i < n; i++, m1++, m2++) {
4092 		if (m1->type != m2->type &&
4093 		    !btf_dedup_identical_arrays(d, m1->type, m2->type) &&
4094 		    !btf_dedup_identical_structs(d, m1->type, m2->type))
4095 			return false;
4096 	}
4097 	return true;
4098 }
4099 
4100 /*
4101  * Check equivalence of BTF type graph formed by candidate struct/union (we'll
4102  * call it "candidate graph" in this description for brevity) to a type graph
4103  * formed by (potential) canonical struct/union ("canonical graph" for brevity
4104  * here, though keep in mind that not all types in canonical graph are
4105  * necessarily canonical representatives themselves, some of them might be
4106  * duplicates or its uniqueness might not have been established yet).
4107  * Returns:
4108  *  - >0, if type graphs are equivalent;
4109  *  -  0, if not equivalent;
4110  *  - <0, on error.
4111  *
4112  * Algorithm performs side-by-side DFS traversal of both type graphs and checks
4113  * equivalence of BTF types at each step. If at any point BTF types in candidate
4114  * and canonical graphs are not compatible structurally, whole graphs are
4115  * incompatible. If types are structurally equivalent (i.e., all information
4116  * except referenced type IDs is exactly the same), a mapping from `canon_id` to
4117  * a `cand_id` is recored in hypothetical mapping (`btf_dedup->hypot_map`).
4118  * If a type references other types, then those referenced types are checked
4119  * for equivalence recursively.
4120  *
4121  * During DFS traversal, if we find that for current `canon_id` type we
4122  * already have some mapping in hypothetical map, we check for two possible
4123  * situations:
4124  *   - `canon_id` is mapped to exactly the same type as `cand_id`. This will
4125  *     happen when type graphs have cycles. In this case we assume those two
4126  *     types are equivalent.
4127  *   - `canon_id` is mapped to different type. This is contradiction in our
4128  *     hypothetical mapping, because same graph in canonical graph corresponds
4129  *     to two different types in candidate graph, which for equivalent type
4130  *     graphs shouldn't happen. This condition terminates equivalence check
4131  *     with negative result.
4132  *
4133  * If type graphs traversal exhausts types to check and find no contradiction,
4134  * then type graphs are equivalent.
4135  *
4136  * When checking types for equivalence, there is one special case: FWD types.
4137  * If FWD type resolution is allowed and one of the types (either from canonical
4138  * or candidate graph) is FWD and other is STRUCT/UNION (depending on FWD's kind
4139  * flag) and their names match, hypothetical mapping is updated to point from
4140  * FWD to STRUCT/UNION. If graphs will be determined as equivalent successfully,
4141  * this mapping will be used to record FWD -> STRUCT/UNION mapping permanently.
4142  *
4143  * Technically, this could lead to incorrect FWD to STRUCT/UNION resolution,
4144  * if there are two exactly named (or anonymous) structs/unions that are
4145  * compatible structurally, one of which has FWD field, while other is concrete
4146  * STRUCT/UNION, but according to C sources they are different structs/unions
4147  * that are referencing different types with the same name. This is extremely
4148  * unlikely to happen, but btf_dedup API allows to disable FWD resolution if
4149  * this logic is causing problems.
4150  *
4151  * Doing FWD resolution means that both candidate and/or canonical graphs can
4152  * consists of portions of the graph that come from multiple compilation units.
4153  * This is due to the fact that types within single compilation unit are always
4154  * deduplicated and FWDs are already resolved, if referenced struct/union
4155  * definiton is available. So, if we had unresolved FWD and found corresponding
4156  * STRUCT/UNION, they will be from different compilation units. This
4157  * consequently means that when we "link" FWD to corresponding STRUCT/UNION,
4158  * type graph will likely have at least two different BTF types that describe
4159  * same type (e.g., most probably there will be two different BTF types for the
4160  * same 'int' primitive type) and could even have "overlapping" parts of type
4161  * graph that describe same subset of types.
4162  *
4163  * This in turn means that our assumption that each type in canonical graph
4164  * must correspond to exactly one type in candidate graph might not hold
4165  * anymore and will make it harder to detect contradictions using hypothetical
4166  * map. To handle this problem, we allow to follow FWD -> STRUCT/UNION
4167  * resolution only in canonical graph. FWDs in candidate graphs are never
4168  * resolved. To see why it's OK, let's check all possible situations w.r.t. FWDs
4169  * that can occur:
4170  *   - Both types in canonical and candidate graphs are FWDs. If they are
4171  *     structurally equivalent, then they can either be both resolved to the
4172  *     same STRUCT/UNION or not resolved at all. In both cases they are
4173  *     equivalent and there is no need to resolve FWD on candidate side.
4174  *   - Both types in canonical and candidate graphs are concrete STRUCT/UNION,
4175  *     so nothing to resolve as well, algorithm will check equivalence anyway.
4176  *   - Type in canonical graph is FWD, while type in candidate is concrete
4177  *     STRUCT/UNION. In this case candidate graph comes from single compilation
4178  *     unit, so there is exactly one BTF type for each unique C type. After
4179  *     resolving FWD into STRUCT/UNION, there might be more than one BTF type
4180  *     in canonical graph mapping to single BTF type in candidate graph, but
4181  *     because hypothetical mapping maps from canonical to candidate types, it's
4182  *     alright, and we still maintain the property of having single `canon_id`
4183  *     mapping to single `cand_id` (there could be two different `canon_id`
4184  *     mapped to the same `cand_id`, but it's not contradictory).
4185  *   - Type in canonical graph is concrete STRUCT/UNION, while type in candidate
4186  *     graph is FWD. In this case we are just going to check compatibility of
4187  *     STRUCT/UNION and corresponding FWD, and if they are compatible, we'll
4188  *     assume that whatever STRUCT/UNION FWD resolves to must be equivalent to
4189  *     a concrete STRUCT/UNION from canonical graph. If the rest of type graphs
4190  *     turn out equivalent, we'll re-resolve FWD to concrete STRUCT/UNION from
4191  *     canonical graph.
4192  */
4193 static int btf_dedup_is_equiv(struct btf_dedup *d, __u32 cand_id,
4194 			      __u32 canon_id)
4195 {
4196 	struct btf_type *cand_type;
4197 	struct btf_type *canon_type;
4198 	__u32 hypot_type_id;
4199 	__u16 cand_kind;
4200 	__u16 canon_kind;
4201 	int i, eq;
4202 
4203 	/* if both resolve to the same canonical, they must be equivalent */
4204 	if (resolve_type_id(d, cand_id) == resolve_type_id(d, canon_id))
4205 		return 1;
4206 
4207 	canon_id = resolve_fwd_id(d, canon_id);
4208 
4209 	hypot_type_id = d->hypot_map[canon_id];
4210 	if (hypot_type_id <= BTF_MAX_NR_TYPES) {
4211 		if (hypot_type_id == cand_id)
4212 			return 1;
4213 		/* In some cases compiler will generate different DWARF types
4214 		 * for *identical* array type definitions and use them for
4215 		 * different fields within the *same* struct. This breaks type
4216 		 * equivalence check, which makes an assumption that candidate
4217 		 * types sub-graph has a consistent and deduped-by-compiler
4218 		 * types within a single CU. So work around that by explicitly
4219 		 * allowing identical array types here.
4220 		 */
4221 		if (btf_dedup_identical_arrays(d, hypot_type_id, cand_id))
4222 			return 1;
4223 		/* It turns out that similar situation can happen with
4224 		 * struct/union sometimes, sigh... Handle the case where
4225 		 * structs/unions are exactly the same, down to the referenced
4226 		 * type IDs. Anything more complicated (e.g., if referenced
4227 		 * types are different, but equivalent) is *way more*
4228 		 * complicated and requires a many-to-many equivalence mapping.
4229 		 */
4230 		if (btf_dedup_identical_structs(d, hypot_type_id, cand_id))
4231 			return 1;
4232 		return 0;
4233 	}
4234 
4235 	if (btf_dedup_hypot_map_add(d, canon_id, cand_id))
4236 		return -ENOMEM;
4237 
4238 	cand_type = btf_type_by_id(d->btf, cand_id);
4239 	canon_type = btf_type_by_id(d->btf, canon_id);
4240 	cand_kind = btf_kind(cand_type);
4241 	canon_kind = btf_kind(canon_type);
4242 
4243 	if (cand_type->name_off != canon_type->name_off)
4244 		return 0;
4245 
4246 	/* FWD <--> STRUCT/UNION equivalence check, if enabled */
4247 	if ((cand_kind == BTF_KIND_FWD || canon_kind == BTF_KIND_FWD)
4248 	    && cand_kind != canon_kind) {
4249 		__u16 real_kind;
4250 		__u16 fwd_kind;
4251 
4252 		if (cand_kind == BTF_KIND_FWD) {
4253 			real_kind = canon_kind;
4254 			fwd_kind = btf_fwd_kind(cand_type);
4255 		} else {
4256 			real_kind = cand_kind;
4257 			fwd_kind = btf_fwd_kind(canon_type);
4258 			/* we'd need to resolve base FWD to STRUCT/UNION */
4259 			if (fwd_kind == real_kind && canon_id < d->btf->start_id)
4260 				d->hypot_adjust_canon = true;
4261 		}
4262 		return fwd_kind == real_kind;
4263 	}
4264 
4265 	if (cand_kind != canon_kind)
4266 		return 0;
4267 
4268 	switch (cand_kind) {
4269 	case BTF_KIND_INT:
4270 		return btf_equal_int_tag(cand_type, canon_type);
4271 
4272 	case BTF_KIND_ENUM:
4273 	case BTF_KIND_ENUM64:
4274 		return btf_compat_enum(cand_type, canon_type);
4275 
4276 	case BTF_KIND_FWD:
4277 	case BTF_KIND_FLOAT:
4278 		return btf_equal_common(cand_type, canon_type);
4279 
4280 	case BTF_KIND_CONST:
4281 	case BTF_KIND_VOLATILE:
4282 	case BTF_KIND_RESTRICT:
4283 	case BTF_KIND_PTR:
4284 	case BTF_KIND_TYPEDEF:
4285 	case BTF_KIND_FUNC:
4286 	case BTF_KIND_TYPE_TAG:
4287 		if (cand_type->info != canon_type->info)
4288 			return 0;
4289 		return btf_dedup_is_equiv(d, cand_type->type, canon_type->type);
4290 
4291 	case BTF_KIND_ARRAY: {
4292 		const struct btf_array *cand_arr, *canon_arr;
4293 
4294 		if (!btf_compat_array(cand_type, canon_type))
4295 			return 0;
4296 		cand_arr = btf_array(cand_type);
4297 		canon_arr = btf_array(canon_type);
4298 		eq = btf_dedup_is_equiv(d, cand_arr->index_type, canon_arr->index_type);
4299 		if (eq <= 0)
4300 			return eq;
4301 		return btf_dedup_is_equiv(d, cand_arr->type, canon_arr->type);
4302 	}
4303 
4304 	case BTF_KIND_STRUCT:
4305 	case BTF_KIND_UNION: {
4306 		const struct btf_member *cand_m, *canon_m;
4307 		__u16 vlen;
4308 
4309 		if (!btf_shallow_equal_struct(cand_type, canon_type))
4310 			return 0;
4311 		vlen = btf_vlen(cand_type);
4312 		cand_m = btf_members(cand_type);
4313 		canon_m = btf_members(canon_type);
4314 		for (i = 0; i < vlen; i++) {
4315 			eq = btf_dedup_is_equiv(d, cand_m->type, canon_m->type);
4316 			if (eq <= 0)
4317 				return eq;
4318 			cand_m++;
4319 			canon_m++;
4320 		}
4321 
4322 		return 1;
4323 	}
4324 
4325 	case BTF_KIND_FUNC_PROTO: {
4326 		const struct btf_param *cand_p, *canon_p;
4327 		__u16 vlen;
4328 
4329 		if (!btf_compat_fnproto(cand_type, canon_type))
4330 			return 0;
4331 		eq = btf_dedup_is_equiv(d, cand_type->type, canon_type->type);
4332 		if (eq <= 0)
4333 			return eq;
4334 		vlen = btf_vlen(cand_type);
4335 		cand_p = btf_params(cand_type);
4336 		canon_p = btf_params(canon_type);
4337 		for (i = 0; i < vlen; i++) {
4338 			eq = btf_dedup_is_equiv(d, cand_p->type, canon_p->type);
4339 			if (eq <= 0)
4340 				return eq;
4341 			cand_p++;
4342 			canon_p++;
4343 		}
4344 		return 1;
4345 	}
4346 
4347 	default:
4348 		return -EINVAL;
4349 	}
4350 	return 0;
4351 }
4352 
4353 /*
4354  * Use hypothetical mapping, produced by successful type graph equivalence
4355  * check, to augment existing struct/union canonical mapping, where possible.
4356  *
4357  * If BTF_KIND_FWD resolution is allowed, this mapping is also used to record
4358  * FWD -> STRUCT/UNION correspondence as well. FWD resolution is bidirectional:
4359  * it doesn't matter if FWD type was part of canonical graph or candidate one,
4360  * we are recording the mapping anyway. As opposed to carefulness required
4361  * for struct/union correspondence mapping (described below), for FWD resolution
4362  * it's not important, as by the time that FWD type (reference type) will be
4363  * deduplicated all structs/unions will be deduped already anyway.
4364  *
4365  * Recording STRUCT/UNION mapping is purely a performance optimization and is
4366  * not required for correctness. It needs to be done carefully to ensure that
4367  * struct/union from candidate's type graph is not mapped into corresponding
4368  * struct/union from canonical type graph that itself hasn't been resolved into
4369  * canonical representative. The only guarantee we have is that canonical
4370  * struct/union was determined as canonical and that won't change. But any
4371  * types referenced through that struct/union fields could have been not yet
4372  * resolved, so in case like that it's too early to establish any kind of
4373  * correspondence between structs/unions.
4374  *
4375  * No canonical correspondence is derived for primitive types (they are already
4376  * deduplicated completely already anyway) or reference types (they rely on
4377  * stability of struct/union canonical relationship for equivalence checks).
4378  */
4379 static void btf_dedup_merge_hypot_map(struct btf_dedup *d)
4380 {
4381 	__u32 canon_type_id, targ_type_id;
4382 	__u16 t_kind, c_kind;
4383 	__u32 t_id, c_id;
4384 	int i;
4385 
4386 	for (i = 0; i < d->hypot_cnt; i++) {
4387 		canon_type_id = d->hypot_list[i];
4388 		targ_type_id = d->hypot_map[canon_type_id];
4389 		t_id = resolve_type_id(d, targ_type_id);
4390 		c_id = resolve_type_id(d, canon_type_id);
4391 		t_kind = btf_kind(btf__type_by_id(d->btf, t_id));
4392 		c_kind = btf_kind(btf__type_by_id(d->btf, c_id));
4393 		/*
4394 		 * Resolve FWD into STRUCT/UNION.
4395 		 * It's ok to resolve FWD into STRUCT/UNION that's not yet
4396 		 * mapped to canonical representative (as opposed to
4397 		 * STRUCT/UNION <--> STRUCT/UNION mapping logic below), because
4398 		 * eventually that struct is going to be mapped and all resolved
4399 		 * FWDs will automatically resolve to correct canonical
4400 		 * representative. This will happen before ref type deduping,
4401 		 * which critically depends on stability of these mapping. This
4402 		 * stability is not a requirement for STRUCT/UNION equivalence
4403 		 * checks, though.
4404 		 */
4405 
4406 		/* if it's the split BTF case, we still need to point base FWD
4407 		 * to STRUCT/UNION in a split BTF, because FWDs from split BTF
4408 		 * will be resolved against base FWD. If we don't point base
4409 		 * canonical FWD to the resolved STRUCT/UNION, then all the
4410 		 * FWDs in split BTF won't be correctly resolved to a proper
4411 		 * STRUCT/UNION.
4412 		 */
4413 		if (t_kind != BTF_KIND_FWD && c_kind == BTF_KIND_FWD)
4414 			d->map[c_id] = t_id;
4415 
4416 		/* if graph equivalence determined that we'd need to adjust
4417 		 * base canonical types, then we need to only point base FWDs
4418 		 * to STRUCTs/UNIONs and do no more modifications. For all
4419 		 * other purposes the type graphs were not equivalent.
4420 		 */
4421 		if (d->hypot_adjust_canon)
4422 			continue;
4423 
4424 		if (t_kind == BTF_KIND_FWD && c_kind != BTF_KIND_FWD)
4425 			d->map[t_id] = c_id;
4426 
4427 		if ((t_kind == BTF_KIND_STRUCT || t_kind == BTF_KIND_UNION) &&
4428 		    c_kind != BTF_KIND_FWD &&
4429 		    is_type_mapped(d, c_id) &&
4430 		    !is_type_mapped(d, t_id)) {
4431 			/*
4432 			 * as a perf optimization, we can map struct/union
4433 			 * that's part of type graph we just verified for
4434 			 * equivalence. We can do that for struct/union that has
4435 			 * canonical representative only, though.
4436 			 */
4437 			d->map[t_id] = c_id;
4438 		}
4439 	}
4440 }
4441 
4442 /*
4443  * Deduplicate struct/union types.
4444  *
4445  * For each struct/union type its type signature hash is calculated, taking
4446  * into account type's name, size, number, order and names of fields, but
4447  * ignoring type ID's referenced from fields, because they might not be deduped
4448  * completely until after reference types deduplication phase. This type hash
4449  * is used to iterate over all potential canonical types, sharing same hash.
4450  * For each canonical candidate we check whether type graphs that they form
4451  * (through referenced types in fields and so on) are equivalent using algorithm
4452  * implemented in `btf_dedup_is_equiv`. If such equivalence is found and
4453  * BTF_KIND_FWD resolution is allowed, then hypothetical mapping
4454  * (btf_dedup->hypot_map) produced by aforementioned type graph equivalence
4455  * algorithm is used to record FWD -> STRUCT/UNION mapping. It's also used to
4456  * potentially map other structs/unions to their canonical representatives,
4457  * if such relationship hasn't yet been established. This speeds up algorithm
4458  * by eliminating some of the duplicate work.
4459  *
4460  * If no matching canonical representative was found, struct/union is marked
4461  * as canonical for itself and is added into btf_dedup->dedup_table hash map
4462  * for further look ups.
4463  */
4464 static int btf_dedup_struct_type(struct btf_dedup *d, __u32 type_id)
4465 {
4466 	struct btf_type *cand_type, *t;
4467 	struct hashmap_entry *hash_entry;
4468 	/* if we don't find equivalent type, then we are canonical */
4469 	__u32 new_id = type_id;
4470 	__u16 kind;
4471 	long h;
4472 
4473 	/* already deduped or is in process of deduping (loop detected) */
4474 	if (d->map[type_id] <= BTF_MAX_NR_TYPES)
4475 		return 0;
4476 
4477 	t = btf_type_by_id(d->btf, type_id);
4478 	kind = btf_kind(t);
4479 
4480 	if (kind != BTF_KIND_STRUCT && kind != BTF_KIND_UNION)
4481 		return 0;
4482 
4483 	h = btf_hash_struct(t);
4484 	for_each_dedup_cand(d, hash_entry, h) {
4485 		__u32 cand_id = hash_entry->value;
4486 		int eq;
4487 
4488 		/*
4489 		 * Even though btf_dedup_is_equiv() checks for
4490 		 * btf_shallow_equal_struct() internally when checking two
4491 		 * structs (unions) for equivalence, we need to guard here
4492 		 * from picking matching FWD type as a dedup candidate.
4493 		 * This can happen due to hash collision. In such case just
4494 		 * relying on btf_dedup_is_equiv() would lead to potentially
4495 		 * creating a loop (FWD -> STRUCT and STRUCT -> FWD), because
4496 		 * FWD and compatible STRUCT/UNION are considered equivalent.
4497 		 */
4498 		cand_type = btf_type_by_id(d->btf, cand_id);
4499 		if (!btf_shallow_equal_struct(t, cand_type))
4500 			continue;
4501 
4502 		btf_dedup_clear_hypot_map(d);
4503 		eq = btf_dedup_is_equiv(d, type_id, cand_id);
4504 		if (eq < 0)
4505 			return eq;
4506 		if (!eq)
4507 			continue;
4508 		btf_dedup_merge_hypot_map(d);
4509 		if (d->hypot_adjust_canon) /* not really equivalent */
4510 			continue;
4511 		new_id = cand_id;
4512 		break;
4513 	}
4514 
4515 	d->map[type_id] = new_id;
4516 	if (type_id == new_id && btf_dedup_table_add(d, h, type_id))
4517 		return -ENOMEM;
4518 
4519 	return 0;
4520 }
4521 
4522 static int btf_dedup_struct_types(struct btf_dedup *d)
4523 {
4524 	int i, err;
4525 
4526 	for (i = 0; i < d->btf->nr_types; i++) {
4527 		err = btf_dedup_struct_type(d, d->btf->start_id + i);
4528 		if (err)
4529 			return err;
4530 	}
4531 	return 0;
4532 }
4533 
4534 /*
4535  * Deduplicate reference type.
4536  *
4537  * Once all primitive and struct/union types got deduplicated, we can easily
4538  * deduplicate all other (reference) BTF types. This is done in two steps:
4539  *
4540  * 1. Resolve all referenced type IDs into their canonical type IDs. This
4541  * resolution can be done either immediately for primitive or struct/union types
4542  * (because they were deduped in previous two phases) or recursively for
4543  * reference types. Recursion will always terminate at either primitive or
4544  * struct/union type, at which point we can "unwind" chain of reference types
4545  * one by one. There is no danger of encountering cycles because in C type
4546  * system the only way to form type cycle is through struct/union, so any chain
4547  * of reference types, even those taking part in a type cycle, will inevitably
4548  * reach struct/union at some point.
4549  *
4550  * 2. Once all referenced type IDs are resolved into canonical ones, BTF type
4551  * becomes "stable", in the sense that no further deduplication will cause
4552  * any changes to it. With that, it's now possible to calculate type's signature
4553  * hash (this time taking into account referenced type IDs) and loop over all
4554  * potential canonical representatives. If no match was found, current type
4555  * will become canonical representative of itself and will be added into
4556  * btf_dedup->dedup_table as another possible canonical representative.
4557  */
4558 static int btf_dedup_ref_type(struct btf_dedup *d, __u32 type_id)
4559 {
4560 	struct hashmap_entry *hash_entry;
4561 	__u32 new_id = type_id, cand_id;
4562 	struct btf_type *t, *cand;
4563 	/* if we don't find equivalent type, then we are representative type */
4564 	int ref_type_id;
4565 	long h;
4566 
4567 	if (d->map[type_id] == BTF_IN_PROGRESS_ID)
4568 		return -ELOOP;
4569 	if (d->map[type_id] <= BTF_MAX_NR_TYPES)
4570 		return resolve_type_id(d, type_id);
4571 
4572 	t = btf_type_by_id(d->btf, type_id);
4573 	d->map[type_id] = BTF_IN_PROGRESS_ID;
4574 
4575 	switch (btf_kind(t)) {
4576 	case BTF_KIND_CONST:
4577 	case BTF_KIND_VOLATILE:
4578 	case BTF_KIND_RESTRICT:
4579 	case BTF_KIND_PTR:
4580 	case BTF_KIND_TYPEDEF:
4581 	case BTF_KIND_FUNC:
4582 	case BTF_KIND_TYPE_TAG:
4583 		ref_type_id = btf_dedup_ref_type(d, t->type);
4584 		if (ref_type_id < 0)
4585 			return ref_type_id;
4586 		t->type = ref_type_id;
4587 
4588 		h = btf_hash_common(t);
4589 		for_each_dedup_cand(d, hash_entry, h) {
4590 			cand_id = hash_entry->value;
4591 			cand = btf_type_by_id(d->btf, cand_id);
4592 			if (btf_equal_common(t, cand)) {
4593 				new_id = cand_id;
4594 				break;
4595 			}
4596 		}
4597 		break;
4598 
4599 	case BTF_KIND_DECL_TAG:
4600 		ref_type_id = btf_dedup_ref_type(d, t->type);
4601 		if (ref_type_id < 0)
4602 			return ref_type_id;
4603 		t->type = ref_type_id;
4604 
4605 		h = btf_hash_int_decl_tag(t);
4606 		for_each_dedup_cand(d, hash_entry, h) {
4607 			cand_id = hash_entry->value;
4608 			cand = btf_type_by_id(d->btf, cand_id);
4609 			if (btf_equal_int_tag(t, cand)) {
4610 				new_id = cand_id;
4611 				break;
4612 			}
4613 		}
4614 		break;
4615 
4616 	case BTF_KIND_ARRAY: {
4617 		struct btf_array *info = btf_array(t);
4618 
4619 		ref_type_id = btf_dedup_ref_type(d, info->type);
4620 		if (ref_type_id < 0)
4621 			return ref_type_id;
4622 		info->type = ref_type_id;
4623 
4624 		ref_type_id = btf_dedup_ref_type(d, info->index_type);
4625 		if (ref_type_id < 0)
4626 			return ref_type_id;
4627 		info->index_type = ref_type_id;
4628 
4629 		h = btf_hash_array(t);
4630 		for_each_dedup_cand(d, hash_entry, h) {
4631 			cand_id = hash_entry->value;
4632 			cand = btf_type_by_id(d->btf, cand_id);
4633 			if (btf_equal_array(t, cand)) {
4634 				new_id = cand_id;
4635 				break;
4636 			}
4637 		}
4638 		break;
4639 	}
4640 
4641 	case BTF_KIND_FUNC_PROTO: {
4642 		struct btf_param *param;
4643 		__u16 vlen;
4644 		int i;
4645 
4646 		ref_type_id = btf_dedup_ref_type(d, t->type);
4647 		if (ref_type_id < 0)
4648 			return ref_type_id;
4649 		t->type = ref_type_id;
4650 
4651 		vlen = btf_vlen(t);
4652 		param = btf_params(t);
4653 		for (i = 0; i < vlen; i++) {
4654 			ref_type_id = btf_dedup_ref_type(d, param->type);
4655 			if (ref_type_id < 0)
4656 				return ref_type_id;
4657 			param->type = ref_type_id;
4658 			param++;
4659 		}
4660 
4661 		h = btf_hash_fnproto(t);
4662 		for_each_dedup_cand(d, hash_entry, h) {
4663 			cand_id = hash_entry->value;
4664 			cand = btf_type_by_id(d->btf, cand_id);
4665 			if (btf_equal_fnproto(t, cand)) {
4666 				new_id = cand_id;
4667 				break;
4668 			}
4669 		}
4670 		break;
4671 	}
4672 
4673 	default:
4674 		return -EINVAL;
4675 	}
4676 
4677 	d->map[type_id] = new_id;
4678 	if (type_id == new_id && btf_dedup_table_add(d, h, type_id))
4679 		return -ENOMEM;
4680 
4681 	return new_id;
4682 }
4683 
4684 static int btf_dedup_ref_types(struct btf_dedup *d)
4685 {
4686 	int i, err;
4687 
4688 	for (i = 0; i < d->btf->nr_types; i++) {
4689 		err = btf_dedup_ref_type(d, d->btf->start_id + i);
4690 		if (err < 0)
4691 			return err;
4692 	}
4693 	/* we won't need d->dedup_table anymore */
4694 	hashmap__free(d->dedup_table);
4695 	d->dedup_table = NULL;
4696 	return 0;
4697 }
4698 
4699 /*
4700  * Collect a map from type names to type ids for all canonical structs
4701  * and unions. If the same name is shared by several canonical types
4702  * use a special value 0 to indicate this fact.
4703  */
4704 static int btf_dedup_fill_unique_names_map(struct btf_dedup *d, struct hashmap *names_map)
4705 {
4706 	__u32 nr_types = btf__type_cnt(d->btf);
4707 	struct btf_type *t;
4708 	__u32 type_id;
4709 	__u16 kind;
4710 	int err;
4711 
4712 	/*
4713 	 * Iterate over base and split module ids in order to get all
4714 	 * available structs in the map.
4715 	 */
4716 	for (type_id = 1; type_id < nr_types; ++type_id) {
4717 		t = btf_type_by_id(d->btf, type_id);
4718 		kind = btf_kind(t);
4719 
4720 		if (kind != BTF_KIND_STRUCT && kind != BTF_KIND_UNION)
4721 			continue;
4722 
4723 		/* Skip non-canonical types */
4724 		if (type_id != d->map[type_id])
4725 			continue;
4726 
4727 		err = hashmap__add(names_map, t->name_off, type_id);
4728 		if (err == -EEXIST)
4729 			err = hashmap__set(names_map, t->name_off, 0, NULL, NULL);
4730 
4731 		if (err)
4732 			return err;
4733 	}
4734 
4735 	return 0;
4736 }
4737 
4738 static int btf_dedup_resolve_fwd(struct btf_dedup *d, struct hashmap *names_map, __u32 type_id)
4739 {
4740 	struct btf_type *t = btf_type_by_id(d->btf, type_id);
4741 	enum btf_fwd_kind fwd_kind = btf_kflag(t);
4742 	__u16 cand_kind, kind = btf_kind(t);
4743 	struct btf_type *cand_t;
4744 	uintptr_t cand_id;
4745 
4746 	if (kind != BTF_KIND_FWD)
4747 		return 0;
4748 
4749 	/* Skip if this FWD already has a mapping */
4750 	if (type_id != d->map[type_id])
4751 		return 0;
4752 
4753 	if (!hashmap__find(names_map, t->name_off, &cand_id))
4754 		return 0;
4755 
4756 	/* Zero is a special value indicating that name is not unique */
4757 	if (!cand_id)
4758 		return 0;
4759 
4760 	cand_t = btf_type_by_id(d->btf, cand_id);
4761 	cand_kind = btf_kind(cand_t);
4762 	if ((cand_kind == BTF_KIND_STRUCT && fwd_kind != BTF_FWD_STRUCT) ||
4763 	    (cand_kind == BTF_KIND_UNION && fwd_kind != BTF_FWD_UNION))
4764 		return 0;
4765 
4766 	d->map[type_id] = cand_id;
4767 
4768 	return 0;
4769 }
4770 
4771 /*
4772  * Resolve unambiguous forward declarations.
4773  *
4774  * The lion's share of all FWD declarations is resolved during
4775  * `btf_dedup_struct_types` phase when different type graphs are
4776  * compared against each other. However, if in some compilation unit a
4777  * FWD declaration is not a part of a type graph compared against
4778  * another type graph that declaration's canonical type would not be
4779  * changed. Example:
4780  *
4781  * CU #1:
4782  *
4783  * struct foo;
4784  * struct foo *some_global;
4785  *
4786  * CU #2:
4787  *
4788  * struct foo { int u; };
4789  * struct foo *another_global;
4790  *
4791  * After `btf_dedup_struct_types` the BTF looks as follows:
4792  *
4793  * [1] STRUCT 'foo' size=4 vlen=1 ...
4794  * [2] INT 'int' size=4 ...
4795  * [3] PTR '(anon)' type_id=1
4796  * [4] FWD 'foo' fwd_kind=struct
4797  * [5] PTR '(anon)' type_id=4
4798  *
4799  * This pass assumes that such FWD declarations should be mapped to
4800  * structs or unions with identical name in case if the name is not
4801  * ambiguous.
4802  */
4803 static int btf_dedup_resolve_fwds(struct btf_dedup *d)
4804 {
4805 	int i, err;
4806 	struct hashmap *names_map;
4807 
4808 	names_map = hashmap__new(btf_dedup_identity_hash_fn, btf_dedup_equal_fn, NULL);
4809 	if (IS_ERR(names_map))
4810 		return PTR_ERR(names_map);
4811 
4812 	err = btf_dedup_fill_unique_names_map(d, names_map);
4813 	if (err < 0)
4814 		goto exit;
4815 
4816 	for (i = 0; i < d->btf->nr_types; i++) {
4817 		err = btf_dedup_resolve_fwd(d, names_map, d->btf->start_id + i);
4818 		if (err < 0)
4819 			break;
4820 	}
4821 
4822 exit:
4823 	hashmap__free(names_map);
4824 	return err;
4825 }
4826 
4827 /*
4828  * Compact types.
4829  *
4830  * After we established for each type its corresponding canonical representative
4831  * type, we now can eliminate types that are not canonical and leave only
4832  * canonical ones layed out sequentially in memory by copying them over
4833  * duplicates. During compaction btf_dedup->hypot_map array is reused to store
4834  * a map from original type ID to a new compacted type ID, which will be used
4835  * during next phase to "fix up" type IDs, referenced from struct/union and
4836  * reference types.
4837  */
4838 static int btf_dedup_compact_types(struct btf_dedup *d)
4839 {
4840 	__u32 *new_offs;
4841 	__u32 next_type_id = d->btf->start_id;
4842 	const struct btf_type *t;
4843 	void *p;
4844 	int i, id, len;
4845 
4846 	/* we are going to reuse hypot_map to store compaction remapping */
4847 	d->hypot_map[0] = 0;
4848 	/* base BTF types are not renumbered */
4849 	for (id = 1; id < d->btf->start_id; id++)
4850 		d->hypot_map[id] = id;
4851 	for (i = 0, id = d->btf->start_id; i < d->btf->nr_types; i++, id++)
4852 		d->hypot_map[id] = BTF_UNPROCESSED_ID;
4853 
4854 	p = d->btf->types_data;
4855 
4856 	for (i = 0, id = d->btf->start_id; i < d->btf->nr_types; i++, id++) {
4857 		if (d->map[id] != id)
4858 			continue;
4859 
4860 		t = btf__type_by_id(d->btf, id);
4861 		len = btf_type_size(t);
4862 		if (len < 0)
4863 			return len;
4864 
4865 		memmove(p, t, len);
4866 		d->hypot_map[id] = next_type_id;
4867 		d->btf->type_offs[next_type_id - d->btf->start_id] = p - d->btf->types_data;
4868 		p += len;
4869 		next_type_id++;
4870 	}
4871 
4872 	/* shrink struct btf's internal types index and update btf_header */
4873 	d->btf->nr_types = next_type_id - d->btf->start_id;
4874 	d->btf->type_offs_cap = d->btf->nr_types;
4875 	d->btf->hdr->type_len = p - d->btf->types_data;
4876 	new_offs = libbpf_reallocarray(d->btf->type_offs, d->btf->type_offs_cap,
4877 				       sizeof(*new_offs));
4878 	if (d->btf->type_offs_cap && !new_offs)
4879 		return -ENOMEM;
4880 	d->btf->type_offs = new_offs;
4881 	d->btf->hdr->str_off = d->btf->hdr->type_len;
4882 	d->btf->raw_size = d->btf->hdr->hdr_len + d->btf->hdr->type_len + d->btf->hdr->str_len;
4883 	return 0;
4884 }
4885 
4886 /*
4887  * Figure out final (deduplicated and compacted) type ID for provided original
4888  * `type_id` by first resolving it into corresponding canonical type ID and
4889  * then mapping it to a deduplicated type ID, stored in btf_dedup->hypot_map,
4890  * which is populated during compaction phase.
4891  */
4892 static int btf_dedup_remap_type_id(__u32 *type_id, void *ctx)
4893 {
4894 	struct btf_dedup *d = ctx;
4895 	__u32 resolved_type_id, new_type_id;
4896 
4897 	resolved_type_id = resolve_type_id(d, *type_id);
4898 	new_type_id = d->hypot_map[resolved_type_id];
4899 	if (new_type_id > BTF_MAX_NR_TYPES)
4900 		return -EINVAL;
4901 
4902 	*type_id = new_type_id;
4903 	return 0;
4904 }
4905 
4906 /*
4907  * Remap referenced type IDs into deduped type IDs.
4908  *
4909  * After BTF types are deduplicated and compacted, their final type IDs may
4910  * differ from original ones. The map from original to a corresponding
4911  * deduped type ID is stored in btf_dedup->hypot_map and is populated during
4912  * compaction phase. During remapping phase we are rewriting all type IDs
4913  * referenced from any BTF type (e.g., struct fields, func proto args, etc) to
4914  * their final deduped type IDs.
4915  */
4916 static int btf_dedup_remap_types(struct btf_dedup *d)
4917 {
4918 	int i, r;
4919 
4920 	for (i = 0; i < d->btf->nr_types; i++) {
4921 		struct btf_type *t = btf_type_by_id(d->btf, d->btf->start_id + i);
4922 
4923 		r = btf_type_visit_type_ids(t, btf_dedup_remap_type_id, d);
4924 		if (r)
4925 			return r;
4926 	}
4927 
4928 	if (!d->btf_ext)
4929 		return 0;
4930 
4931 	r = btf_ext_visit_type_ids(d->btf_ext, btf_dedup_remap_type_id, d);
4932 	if (r)
4933 		return r;
4934 
4935 	return 0;
4936 }
4937 
4938 /*
4939  * Probe few well-known locations for vmlinux kernel image and try to load BTF
4940  * data out of it to use for target BTF.
4941  */
4942 struct btf *btf__load_vmlinux_btf(void)
4943 {
4944 	const char *sysfs_btf_path = "/sys/kernel/btf/vmlinux";
4945 	/* fall back locations, trying to find vmlinux on disk */
4946 	const char *locations[] = {
4947 		"/boot/vmlinux-%1$s",
4948 		"/lib/modules/%1$s/vmlinux-%1$s",
4949 		"/lib/modules/%1$s/build/vmlinux",
4950 		"/usr/lib/modules/%1$s/kernel/vmlinux",
4951 		"/usr/lib/debug/boot/vmlinux-%1$s",
4952 		"/usr/lib/debug/boot/vmlinux-%1$s.debug",
4953 		"/usr/lib/debug/lib/modules/%1$s/vmlinux",
4954 	};
4955 	char path[PATH_MAX + 1];
4956 	struct utsname buf;
4957 	struct btf *btf;
4958 	int i, err;
4959 
4960 	/* is canonical sysfs location accessible? */
4961 	if (faccessat(AT_FDCWD, sysfs_btf_path, F_OK, AT_EACCESS) < 0) {
4962 		pr_warn("kernel BTF is missing at '%s', was CONFIG_DEBUG_INFO_BTF enabled?\n",
4963 			sysfs_btf_path);
4964 	} else {
4965 		btf = btf__parse(sysfs_btf_path, NULL);
4966 		if (!btf) {
4967 			err = -errno;
4968 			pr_warn("failed to read kernel BTF from '%s': %d\n", sysfs_btf_path, err);
4969 			return libbpf_err_ptr(err);
4970 		}
4971 		pr_debug("loaded kernel BTF from '%s'\n", sysfs_btf_path);
4972 		return btf;
4973 	}
4974 
4975 	/* try fallback locations */
4976 	uname(&buf);
4977 	for (i = 0; i < ARRAY_SIZE(locations); i++) {
4978 		snprintf(path, PATH_MAX, locations[i], buf.release);
4979 
4980 		if (faccessat(AT_FDCWD, path, R_OK, AT_EACCESS))
4981 			continue;
4982 
4983 		btf = btf__parse(path, NULL);
4984 		err = libbpf_get_error(btf);
4985 		pr_debug("loading kernel BTF '%s': %d\n", path, err);
4986 		if (err)
4987 			continue;
4988 
4989 		return btf;
4990 	}
4991 
4992 	pr_warn("failed to find valid kernel BTF\n");
4993 	return libbpf_err_ptr(-ESRCH);
4994 }
4995 
4996 struct btf *libbpf_find_kernel_btf(void) __attribute__((alias("btf__load_vmlinux_btf")));
4997 
4998 struct btf *btf__load_module_btf(const char *module_name, struct btf *vmlinux_btf)
4999 {
5000 	char path[80];
5001 
5002 	snprintf(path, sizeof(path), "/sys/kernel/btf/%s", module_name);
5003 	return btf__parse_split(path, vmlinux_btf);
5004 }
5005 
5006 int btf_type_visit_type_ids(struct btf_type *t, type_id_visit_fn visit, void *ctx)
5007 {
5008 	int i, n, err;
5009 
5010 	switch (btf_kind(t)) {
5011 	case BTF_KIND_INT:
5012 	case BTF_KIND_FLOAT:
5013 	case BTF_KIND_ENUM:
5014 	case BTF_KIND_ENUM64:
5015 		return 0;
5016 
5017 	case BTF_KIND_FWD:
5018 	case BTF_KIND_CONST:
5019 	case BTF_KIND_VOLATILE:
5020 	case BTF_KIND_RESTRICT:
5021 	case BTF_KIND_PTR:
5022 	case BTF_KIND_TYPEDEF:
5023 	case BTF_KIND_FUNC:
5024 	case BTF_KIND_VAR:
5025 	case BTF_KIND_DECL_TAG:
5026 	case BTF_KIND_TYPE_TAG:
5027 		return visit(&t->type, ctx);
5028 
5029 	case BTF_KIND_ARRAY: {
5030 		struct btf_array *a = btf_array(t);
5031 
5032 		err = visit(&a->type, ctx);
5033 		err = err ?: visit(&a->index_type, ctx);
5034 		return err;
5035 	}
5036 
5037 	case BTF_KIND_STRUCT:
5038 	case BTF_KIND_UNION: {
5039 		struct btf_member *m = btf_members(t);
5040 
5041 		for (i = 0, n = btf_vlen(t); i < n; i++, m++) {
5042 			err = visit(&m->type, ctx);
5043 			if (err)
5044 				return err;
5045 		}
5046 		return 0;
5047 	}
5048 
5049 	case BTF_KIND_FUNC_PROTO: {
5050 		struct btf_param *m = btf_params(t);
5051 
5052 		err = visit(&t->type, ctx);
5053 		if (err)
5054 			return err;
5055 		for (i = 0, n = btf_vlen(t); i < n; i++, m++) {
5056 			err = visit(&m->type, ctx);
5057 			if (err)
5058 				return err;
5059 		}
5060 		return 0;
5061 	}
5062 
5063 	case BTF_KIND_DATASEC: {
5064 		struct btf_var_secinfo *m = btf_var_secinfos(t);
5065 
5066 		for (i = 0, n = btf_vlen(t); i < n; i++, m++) {
5067 			err = visit(&m->type, ctx);
5068 			if (err)
5069 				return err;
5070 		}
5071 		return 0;
5072 	}
5073 
5074 	default:
5075 		return -EINVAL;
5076 	}
5077 }
5078 
5079 int btf_type_visit_str_offs(struct btf_type *t, str_off_visit_fn visit, void *ctx)
5080 {
5081 	int i, n, err;
5082 
5083 	err = visit(&t->name_off, ctx);
5084 	if (err)
5085 		return err;
5086 
5087 	switch (btf_kind(t)) {
5088 	case BTF_KIND_STRUCT:
5089 	case BTF_KIND_UNION: {
5090 		struct btf_member *m = btf_members(t);
5091 
5092 		for (i = 0, n = btf_vlen(t); i < n; i++, m++) {
5093 			err = visit(&m->name_off, ctx);
5094 			if (err)
5095 				return err;
5096 		}
5097 		break;
5098 	}
5099 	case BTF_KIND_ENUM: {
5100 		struct btf_enum *m = btf_enum(t);
5101 
5102 		for (i = 0, n = btf_vlen(t); i < n; i++, m++) {
5103 			err = visit(&m->name_off, ctx);
5104 			if (err)
5105 				return err;
5106 		}
5107 		break;
5108 	}
5109 	case BTF_KIND_ENUM64: {
5110 		struct btf_enum64 *m = btf_enum64(t);
5111 
5112 		for (i = 0, n = btf_vlen(t); i < n; i++, m++) {
5113 			err = visit(&m->name_off, ctx);
5114 			if (err)
5115 				return err;
5116 		}
5117 		break;
5118 	}
5119 	case BTF_KIND_FUNC_PROTO: {
5120 		struct btf_param *m = btf_params(t);
5121 
5122 		for (i = 0, n = btf_vlen(t); i < n; i++, m++) {
5123 			err = visit(&m->name_off, ctx);
5124 			if (err)
5125 				return err;
5126 		}
5127 		break;
5128 	}
5129 	default:
5130 		break;
5131 	}
5132 
5133 	return 0;
5134 }
5135 
5136 int btf_ext_visit_type_ids(struct btf_ext *btf_ext, type_id_visit_fn visit, void *ctx)
5137 {
5138 	const struct btf_ext_info *seg;
5139 	struct btf_ext_info_sec *sec;
5140 	int i, err;
5141 
5142 	seg = &btf_ext->func_info;
5143 	for_each_btf_ext_sec(seg, sec) {
5144 		struct bpf_func_info_min *rec;
5145 
5146 		for_each_btf_ext_rec(seg, sec, i, rec) {
5147 			err = visit(&rec->type_id, ctx);
5148 			if (err < 0)
5149 				return err;
5150 		}
5151 	}
5152 
5153 	seg = &btf_ext->core_relo_info;
5154 	for_each_btf_ext_sec(seg, sec) {
5155 		struct bpf_core_relo *rec;
5156 
5157 		for_each_btf_ext_rec(seg, sec, i, rec) {
5158 			err = visit(&rec->type_id, ctx);
5159 			if (err < 0)
5160 				return err;
5161 		}
5162 	}
5163 
5164 	return 0;
5165 }
5166 
5167 int btf_ext_visit_str_offs(struct btf_ext *btf_ext, str_off_visit_fn visit, void *ctx)
5168 {
5169 	const struct btf_ext_info *seg;
5170 	struct btf_ext_info_sec *sec;
5171 	int i, err;
5172 
5173 	seg = &btf_ext->func_info;
5174 	for_each_btf_ext_sec(seg, sec) {
5175 		err = visit(&sec->sec_name_off, ctx);
5176 		if (err)
5177 			return err;
5178 	}
5179 
5180 	seg = &btf_ext->line_info;
5181 	for_each_btf_ext_sec(seg, sec) {
5182 		struct bpf_line_info_min *rec;
5183 
5184 		err = visit(&sec->sec_name_off, ctx);
5185 		if (err)
5186 			return err;
5187 
5188 		for_each_btf_ext_rec(seg, sec, i, rec) {
5189 			err = visit(&rec->file_name_off, ctx);
5190 			if (err)
5191 				return err;
5192 			err = visit(&rec->line_off, ctx);
5193 			if (err)
5194 				return err;
5195 		}
5196 	}
5197 
5198 	seg = &btf_ext->core_relo_info;
5199 	for_each_btf_ext_sec(seg, sec) {
5200 		struct bpf_core_relo *rec;
5201 
5202 		err = visit(&sec->sec_name_off, ctx);
5203 		if (err)
5204 			return err;
5205 
5206 		for_each_btf_ext_rec(seg, sec, i, rec) {
5207 			err = visit(&rec->access_str_off, ctx);
5208 			if (err)
5209 				return err;
5210 		}
5211 	}
5212 
5213 	return 0;
5214 }
5215