xref: /linux/tools/lib/bpf/btf.c (revision 118ae46b794271ebcfcc9bab95e1c766198c8209)
1 // SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
2 /* Copyright (c) 2018 Facebook */
3 
4 #include <byteswap.h>
5 #include <endian.h>
6 #include <stdio.h>
7 #include <stdlib.h>
8 #include <string.h>
9 #include <fcntl.h>
10 #include <unistd.h>
11 #include <errno.h>
12 #include <sys/utsname.h>
13 #include <sys/param.h>
14 #include <sys/stat.h>
15 #include <linux/kernel.h>
16 #include <linux/err.h>
17 #include <linux/btf.h>
18 #include <gelf.h>
19 #include "btf.h"
20 #include "bpf.h"
21 #include "libbpf.h"
22 #include "libbpf_internal.h"
23 #include "hashmap.h"
24 #include "strset.h"
25 #include "str_error.h"
26 
27 #define BTF_MAX_NR_TYPES 0x7fffffffU
28 #define BTF_MAX_STR_OFFSET 0x7fffffffU
29 
30 static struct btf_type btf_void;
31 
32 struct btf {
33 	/* raw BTF data in native endianness */
34 	void *raw_data;
35 	/* raw BTF data in non-native endianness */
36 	void *raw_data_swapped;
37 	__u32 raw_size;
38 	/* whether target endianness differs from the native one */
39 	bool swapped_endian;
40 
41 	/*
42 	 * When BTF is loaded from an ELF or raw memory it is stored
43 	 * in a contiguous memory block. The hdr, type_data, and, strs_data
44 	 * point inside that memory region to their respective parts of BTF
45 	 * representation:
46 	 *
47 	 * +--------------------------------+
48 	 * |  Header  |  Types  |  Strings  |
49 	 * +--------------------------------+
50 	 * ^          ^         ^
51 	 * |          |         |
52 	 * hdr        |         |
53 	 * types_data-+         |
54 	 * strs_data------------+
55 	 *
56 	 * If BTF data is later modified, e.g., due to types added or
57 	 * removed, BTF deduplication performed, etc, this contiguous
58 	 * representation is broken up into three independently allocated
59 	 * memory regions to be able to modify them independently.
60 	 * raw_data is nulled out at that point, but can be later allocated
61 	 * and cached again if user calls btf__raw_data(), at which point
62 	 * raw_data will contain a contiguous copy of header, types, and
63 	 * strings:
64 	 *
65 	 * +----------+  +---------+  +-----------+
66 	 * |  Header  |  |  Types  |  |  Strings  |
67 	 * +----------+  +---------+  +-----------+
68 	 * ^             ^            ^
69 	 * |             |            |
70 	 * hdr           |            |
71 	 * types_data----+            |
72 	 * strset__data(strs_set)-----+
73 	 *
74 	 *               +----------+---------+-----------+
75 	 *               |  Header  |  Types  |  Strings  |
76 	 * raw_data----->+----------+---------+-----------+
77 	 */
78 	struct btf_header *hdr;
79 
80 	void *types_data;
81 	size_t types_data_cap; /* used size stored in hdr->type_len */
82 
83 	/* type ID to `struct btf_type *` lookup index
84 	 * type_offs[0] corresponds to the first non-VOID type:
85 	 *   - for base BTF it's type [1];
86 	 *   - for split BTF it's the first non-base BTF type.
87 	 */
88 	__u32 *type_offs;
89 	size_t type_offs_cap;
90 	/* number of types in this BTF instance:
91 	 *   - doesn't include special [0] void type;
92 	 *   - for split BTF counts number of types added on top of base BTF.
93 	 */
94 	__u32 nr_types;
95 	/* if not NULL, points to the base BTF on top of which the current
96 	 * split BTF is based
97 	 */
98 	struct btf *base_btf;
99 	/* BTF type ID of the first type in this BTF instance:
100 	 *   - for base BTF it's equal to 1;
101 	 *   - for split BTF it's equal to biggest type ID of base BTF plus 1.
102 	 */
103 	int start_id;
104 	/* logical string offset of this BTF instance:
105 	 *   - for base BTF it's equal to 0;
106 	 *   - for split BTF it's equal to total size of base BTF's string section size.
107 	 */
108 	int start_str_off;
109 
110 	/* only one of strs_data or strs_set can be non-NULL, depending on
111 	 * whether BTF is in a modifiable state (strs_set is used) or not
112 	 * (strs_data points inside raw_data)
113 	 */
114 	void *strs_data;
115 	/* a set of unique strings */
116 	struct strset *strs_set;
117 	/* whether strings are already deduplicated */
118 	bool strs_deduped;
119 
120 	/* whether base_btf should be freed in btf_free for this instance */
121 	bool owns_base;
122 
123 	/* BTF object FD, if loaded into kernel */
124 	int fd;
125 
126 	/* Pointer size (in bytes) for a target architecture of this BTF */
127 	int ptr_sz;
128 };
129 
130 static inline __u64 ptr_to_u64(const void *ptr)
131 {
132 	return (__u64) (unsigned long) ptr;
133 }
134 
135 /* Ensure given dynamically allocated memory region pointed to by *data* with
136  * capacity of *cap_cnt* elements each taking *elem_sz* bytes has enough
137  * memory to accommodate *add_cnt* new elements, assuming *cur_cnt* elements
138  * are already used. At most *max_cnt* elements can be ever allocated.
139  * If necessary, memory is reallocated and all existing data is copied over,
140  * new pointer to the memory region is stored at *data, new memory region
141  * capacity (in number of elements) is stored in *cap.
142  * On success, memory pointer to the beginning of unused memory is returned.
143  * On error, NULL is returned.
144  */
145 void *libbpf_add_mem(void **data, size_t *cap_cnt, size_t elem_sz,
146 		     size_t cur_cnt, size_t max_cnt, size_t add_cnt)
147 {
148 	size_t new_cnt;
149 	void *new_data;
150 
151 	if (cur_cnt + add_cnt <= *cap_cnt)
152 		return *data + cur_cnt * elem_sz;
153 
154 	/* requested more than the set limit */
155 	if (cur_cnt + add_cnt > max_cnt)
156 		return NULL;
157 
158 	new_cnt = *cap_cnt;
159 	new_cnt += new_cnt / 4;		  /* expand by 25% */
160 	if (new_cnt < 16)		  /* but at least 16 elements */
161 		new_cnt = 16;
162 	if (new_cnt > max_cnt)		  /* but not exceeding a set limit */
163 		new_cnt = max_cnt;
164 	if (new_cnt < cur_cnt + add_cnt)  /* also ensure we have enough memory */
165 		new_cnt = cur_cnt + add_cnt;
166 
167 	new_data = libbpf_reallocarray(*data, new_cnt, elem_sz);
168 	if (!new_data)
169 		return NULL;
170 
171 	/* zero out newly allocated portion of memory */
172 	memset(new_data + (*cap_cnt) * elem_sz, 0, (new_cnt - *cap_cnt) * elem_sz);
173 
174 	*data = new_data;
175 	*cap_cnt = new_cnt;
176 	return new_data + cur_cnt * elem_sz;
177 }
178 
179 /* Ensure given dynamically allocated memory region has enough allocated space
180  * to accommodate *need_cnt* elements of size *elem_sz* bytes each
181  */
182 int libbpf_ensure_mem(void **data, size_t *cap_cnt, size_t elem_sz, size_t need_cnt)
183 {
184 	void *p;
185 
186 	if (need_cnt <= *cap_cnt)
187 		return 0;
188 
189 	p = libbpf_add_mem(data, cap_cnt, elem_sz, *cap_cnt, SIZE_MAX, need_cnt - *cap_cnt);
190 	if (!p)
191 		return -ENOMEM;
192 
193 	return 0;
194 }
195 
196 static void *btf_add_type_offs_mem(struct btf *btf, size_t add_cnt)
197 {
198 	return libbpf_add_mem((void **)&btf->type_offs, &btf->type_offs_cap, sizeof(__u32),
199 			      btf->nr_types, BTF_MAX_NR_TYPES, add_cnt);
200 }
201 
202 static int btf_add_type_idx_entry(struct btf *btf, __u32 type_off)
203 {
204 	__u32 *p;
205 
206 	p = btf_add_type_offs_mem(btf, 1);
207 	if (!p)
208 		return -ENOMEM;
209 
210 	*p = type_off;
211 	return 0;
212 }
213 
214 static void btf_bswap_hdr(struct btf_header *h)
215 {
216 	h->magic = bswap_16(h->magic);
217 	h->hdr_len = bswap_32(h->hdr_len);
218 	h->type_off = bswap_32(h->type_off);
219 	h->type_len = bswap_32(h->type_len);
220 	h->str_off = bswap_32(h->str_off);
221 	h->str_len = bswap_32(h->str_len);
222 }
223 
224 static int btf_parse_hdr(struct btf *btf)
225 {
226 	struct btf_header *hdr = btf->hdr;
227 	__u32 meta_left;
228 
229 	if (btf->raw_size < sizeof(struct btf_header)) {
230 		pr_debug("BTF header not found\n");
231 		return -EINVAL;
232 	}
233 
234 	if (hdr->magic == bswap_16(BTF_MAGIC)) {
235 		btf->swapped_endian = true;
236 		if (bswap_32(hdr->hdr_len) != sizeof(struct btf_header)) {
237 			pr_warn("Can't load BTF with non-native endianness due to unsupported header length %u\n",
238 				bswap_32(hdr->hdr_len));
239 			return -ENOTSUP;
240 		}
241 		btf_bswap_hdr(hdr);
242 	} else if (hdr->magic != BTF_MAGIC) {
243 		pr_debug("Invalid BTF magic: %x\n", hdr->magic);
244 		return -EINVAL;
245 	}
246 
247 	if (btf->raw_size < hdr->hdr_len) {
248 		pr_debug("BTF header len %u larger than data size %u\n",
249 			 hdr->hdr_len, btf->raw_size);
250 		return -EINVAL;
251 	}
252 
253 	meta_left = btf->raw_size - hdr->hdr_len;
254 	if (meta_left < (long long)hdr->str_off + hdr->str_len) {
255 		pr_debug("Invalid BTF total size: %u\n", btf->raw_size);
256 		return -EINVAL;
257 	}
258 
259 	if ((long long)hdr->type_off + hdr->type_len > hdr->str_off) {
260 		pr_debug("Invalid BTF data sections layout: type data at %u + %u, strings data at %u + %u\n",
261 			 hdr->type_off, hdr->type_len, hdr->str_off, hdr->str_len);
262 		return -EINVAL;
263 	}
264 
265 	if (hdr->type_off % 4) {
266 		pr_debug("BTF type section is not aligned to 4 bytes\n");
267 		return -EINVAL;
268 	}
269 
270 	return 0;
271 }
272 
273 static int btf_parse_str_sec(struct btf *btf)
274 {
275 	const struct btf_header *hdr = btf->hdr;
276 	const char *start = btf->strs_data;
277 	const char *end = start + btf->hdr->str_len;
278 
279 	if (btf->base_btf && hdr->str_len == 0)
280 		return 0;
281 	if (!hdr->str_len || hdr->str_len - 1 > BTF_MAX_STR_OFFSET || end[-1]) {
282 		pr_debug("Invalid BTF string section\n");
283 		return -EINVAL;
284 	}
285 	if (!btf->base_btf && start[0]) {
286 		pr_debug("Malformed BTF string section, did you forget to provide base BTF?\n");
287 		return -EINVAL;
288 	}
289 	return 0;
290 }
291 
292 static int btf_type_size(const struct btf_type *t)
293 {
294 	const int base_size = sizeof(struct btf_type);
295 	__u16 vlen = btf_vlen(t);
296 
297 	switch (btf_kind(t)) {
298 	case BTF_KIND_FWD:
299 	case BTF_KIND_CONST:
300 	case BTF_KIND_VOLATILE:
301 	case BTF_KIND_RESTRICT:
302 	case BTF_KIND_PTR:
303 	case BTF_KIND_TYPEDEF:
304 	case BTF_KIND_FUNC:
305 	case BTF_KIND_FLOAT:
306 	case BTF_KIND_TYPE_TAG:
307 		return base_size;
308 	case BTF_KIND_INT:
309 		return base_size + sizeof(__u32);
310 	case BTF_KIND_ENUM:
311 		return base_size + vlen * sizeof(struct btf_enum);
312 	case BTF_KIND_ENUM64:
313 		return base_size + vlen * sizeof(struct btf_enum64);
314 	case BTF_KIND_ARRAY:
315 		return base_size + sizeof(struct btf_array);
316 	case BTF_KIND_STRUCT:
317 	case BTF_KIND_UNION:
318 		return base_size + vlen * sizeof(struct btf_member);
319 	case BTF_KIND_FUNC_PROTO:
320 		return base_size + vlen * sizeof(struct btf_param);
321 	case BTF_KIND_VAR:
322 		return base_size + sizeof(struct btf_var);
323 	case BTF_KIND_DATASEC:
324 		return base_size + vlen * sizeof(struct btf_var_secinfo);
325 	case BTF_KIND_DECL_TAG:
326 		return base_size + sizeof(struct btf_decl_tag);
327 	default:
328 		pr_debug("Unsupported BTF_KIND:%u\n", btf_kind(t));
329 		return -EINVAL;
330 	}
331 }
332 
333 static void btf_bswap_type_base(struct btf_type *t)
334 {
335 	t->name_off = bswap_32(t->name_off);
336 	t->info = bswap_32(t->info);
337 	t->type = bswap_32(t->type);
338 }
339 
340 static int btf_bswap_type_rest(struct btf_type *t)
341 {
342 	struct btf_var_secinfo *v;
343 	struct btf_enum64 *e64;
344 	struct btf_member *m;
345 	struct btf_array *a;
346 	struct btf_param *p;
347 	struct btf_enum *e;
348 	__u16 vlen = btf_vlen(t);
349 	int i;
350 
351 	switch (btf_kind(t)) {
352 	case BTF_KIND_FWD:
353 	case BTF_KIND_CONST:
354 	case BTF_KIND_VOLATILE:
355 	case BTF_KIND_RESTRICT:
356 	case BTF_KIND_PTR:
357 	case BTF_KIND_TYPEDEF:
358 	case BTF_KIND_FUNC:
359 	case BTF_KIND_FLOAT:
360 	case BTF_KIND_TYPE_TAG:
361 		return 0;
362 	case BTF_KIND_INT:
363 		*(__u32 *)(t + 1) = bswap_32(*(__u32 *)(t + 1));
364 		return 0;
365 	case BTF_KIND_ENUM:
366 		for (i = 0, e = btf_enum(t); i < vlen; i++, e++) {
367 			e->name_off = bswap_32(e->name_off);
368 			e->val = bswap_32(e->val);
369 		}
370 		return 0;
371 	case BTF_KIND_ENUM64:
372 		for (i = 0, e64 = btf_enum64(t); i < vlen; i++, e64++) {
373 			e64->name_off = bswap_32(e64->name_off);
374 			e64->val_lo32 = bswap_32(e64->val_lo32);
375 			e64->val_hi32 = bswap_32(e64->val_hi32);
376 		}
377 		return 0;
378 	case BTF_KIND_ARRAY:
379 		a = btf_array(t);
380 		a->type = bswap_32(a->type);
381 		a->index_type = bswap_32(a->index_type);
382 		a->nelems = bswap_32(a->nelems);
383 		return 0;
384 	case BTF_KIND_STRUCT:
385 	case BTF_KIND_UNION:
386 		for (i = 0, m = btf_members(t); i < vlen; i++, m++) {
387 			m->name_off = bswap_32(m->name_off);
388 			m->type = bswap_32(m->type);
389 			m->offset = bswap_32(m->offset);
390 		}
391 		return 0;
392 	case BTF_KIND_FUNC_PROTO:
393 		for (i = 0, p = btf_params(t); i < vlen; i++, p++) {
394 			p->name_off = bswap_32(p->name_off);
395 			p->type = bswap_32(p->type);
396 		}
397 		return 0;
398 	case BTF_KIND_VAR:
399 		btf_var(t)->linkage = bswap_32(btf_var(t)->linkage);
400 		return 0;
401 	case BTF_KIND_DATASEC:
402 		for (i = 0, v = btf_var_secinfos(t); i < vlen; i++, v++) {
403 			v->type = bswap_32(v->type);
404 			v->offset = bswap_32(v->offset);
405 			v->size = bswap_32(v->size);
406 		}
407 		return 0;
408 	case BTF_KIND_DECL_TAG:
409 		btf_decl_tag(t)->component_idx = bswap_32(btf_decl_tag(t)->component_idx);
410 		return 0;
411 	default:
412 		pr_debug("Unsupported BTF_KIND:%u\n", btf_kind(t));
413 		return -EINVAL;
414 	}
415 }
416 
417 static int btf_parse_type_sec(struct btf *btf)
418 {
419 	struct btf_header *hdr = btf->hdr;
420 	void *next_type = btf->types_data;
421 	void *end_type = next_type + hdr->type_len;
422 	int err, type_size;
423 
424 	while (next_type + sizeof(struct btf_type) <= end_type) {
425 		if (btf->swapped_endian)
426 			btf_bswap_type_base(next_type);
427 
428 		type_size = btf_type_size(next_type);
429 		if (type_size < 0)
430 			return type_size;
431 		if (next_type + type_size > end_type) {
432 			pr_warn("BTF type [%d] is malformed\n", btf->start_id + btf->nr_types);
433 			return -EINVAL;
434 		}
435 
436 		if (btf->swapped_endian && btf_bswap_type_rest(next_type))
437 			return -EINVAL;
438 
439 		err = btf_add_type_idx_entry(btf, next_type - btf->types_data);
440 		if (err)
441 			return err;
442 
443 		next_type += type_size;
444 		btf->nr_types++;
445 	}
446 
447 	if (next_type != end_type) {
448 		pr_warn("BTF types data is malformed\n");
449 		return -EINVAL;
450 	}
451 
452 	return 0;
453 }
454 
455 static int btf_validate_str(const struct btf *btf, __u32 str_off, const char *what, __u32 type_id)
456 {
457 	const char *s;
458 
459 	s = btf__str_by_offset(btf, str_off);
460 	if (!s) {
461 		pr_warn("btf: type [%u]: invalid %s (string offset %u)\n", type_id, what, str_off);
462 		return -EINVAL;
463 	}
464 
465 	return 0;
466 }
467 
468 static int btf_validate_id(const struct btf *btf, __u32 id, __u32 ctx_id)
469 {
470 	const struct btf_type *t;
471 
472 	t = btf__type_by_id(btf, id);
473 	if (!t) {
474 		pr_warn("btf: type [%u]: invalid referenced type ID %u\n", ctx_id, id);
475 		return -EINVAL;
476 	}
477 
478 	return 0;
479 }
480 
481 static int btf_validate_type(const struct btf *btf, const struct btf_type *t, __u32 id)
482 {
483 	__u32 kind = btf_kind(t);
484 	int err, i, n;
485 
486 	err = btf_validate_str(btf, t->name_off, "type name", id);
487 	if (err)
488 		return err;
489 
490 	switch (kind) {
491 	case BTF_KIND_UNKN:
492 	case BTF_KIND_INT:
493 	case BTF_KIND_FWD:
494 	case BTF_KIND_FLOAT:
495 		break;
496 	case BTF_KIND_PTR:
497 	case BTF_KIND_TYPEDEF:
498 	case BTF_KIND_VOLATILE:
499 	case BTF_KIND_CONST:
500 	case BTF_KIND_RESTRICT:
501 	case BTF_KIND_VAR:
502 	case BTF_KIND_DECL_TAG:
503 	case BTF_KIND_TYPE_TAG:
504 		err = btf_validate_id(btf, t->type, id);
505 		if (err)
506 			return err;
507 		break;
508 	case BTF_KIND_ARRAY: {
509 		const struct btf_array *a = btf_array(t);
510 
511 		err = btf_validate_id(btf, a->type, id);
512 		err = err ?: btf_validate_id(btf, a->index_type, id);
513 		if (err)
514 			return err;
515 		break;
516 	}
517 	case BTF_KIND_STRUCT:
518 	case BTF_KIND_UNION: {
519 		const struct btf_member *m = btf_members(t);
520 
521 		n = btf_vlen(t);
522 		for (i = 0; i < n; i++, m++) {
523 			err = btf_validate_str(btf, m->name_off, "field name", id);
524 			err = err ?: btf_validate_id(btf, m->type, id);
525 			if (err)
526 				return err;
527 		}
528 		break;
529 	}
530 	case BTF_KIND_ENUM: {
531 		const struct btf_enum *m = btf_enum(t);
532 
533 		n = btf_vlen(t);
534 		for (i = 0; i < n; i++, m++) {
535 			err = btf_validate_str(btf, m->name_off, "enum name", id);
536 			if (err)
537 				return err;
538 		}
539 		break;
540 	}
541 	case BTF_KIND_ENUM64: {
542 		const struct btf_enum64 *m = btf_enum64(t);
543 
544 		n = btf_vlen(t);
545 		for (i = 0; i < n; i++, m++) {
546 			err = btf_validate_str(btf, m->name_off, "enum name", id);
547 			if (err)
548 				return err;
549 		}
550 		break;
551 	}
552 	case BTF_KIND_FUNC: {
553 		const struct btf_type *ft;
554 
555 		err = btf_validate_id(btf, t->type, id);
556 		if (err)
557 			return err;
558 		ft = btf__type_by_id(btf, t->type);
559 		if (btf_kind(ft) != BTF_KIND_FUNC_PROTO) {
560 			pr_warn("btf: type [%u]: referenced type [%u] is not FUNC_PROTO\n", id, t->type);
561 			return -EINVAL;
562 		}
563 		break;
564 	}
565 	case BTF_KIND_FUNC_PROTO: {
566 		const struct btf_param *m = btf_params(t);
567 
568 		n = btf_vlen(t);
569 		for (i = 0; i < n; i++, m++) {
570 			err = btf_validate_str(btf, m->name_off, "param name", id);
571 			err = err ?: btf_validate_id(btf, m->type, id);
572 			if (err)
573 				return err;
574 		}
575 		break;
576 	}
577 	case BTF_KIND_DATASEC: {
578 		const struct btf_var_secinfo *m = btf_var_secinfos(t);
579 
580 		n = btf_vlen(t);
581 		for (i = 0; i < n; i++, m++) {
582 			err = btf_validate_id(btf, m->type, id);
583 			if (err)
584 				return err;
585 		}
586 		break;
587 	}
588 	default:
589 		pr_warn("btf: type [%u]: unrecognized kind %u\n", id, kind);
590 		return -EINVAL;
591 	}
592 	return 0;
593 }
594 
595 /* Validate basic sanity of BTF. It's intentionally less thorough than
596  * kernel's validation and validates only properties of BTF that libbpf relies
597  * on to be correct (e.g., valid type IDs, valid string offsets, etc)
598  */
599 static int btf_sanity_check(const struct btf *btf)
600 {
601 	const struct btf_type *t;
602 	__u32 i, n = btf__type_cnt(btf);
603 	int err;
604 
605 	for (i = btf->start_id; i < n; i++) {
606 		t = btf_type_by_id(btf, i);
607 		err = btf_validate_type(btf, t, i);
608 		if (err)
609 			return err;
610 	}
611 	return 0;
612 }
613 
614 __u32 btf__type_cnt(const struct btf *btf)
615 {
616 	return btf->start_id + btf->nr_types;
617 }
618 
619 const struct btf *btf__base_btf(const struct btf *btf)
620 {
621 	return btf->base_btf;
622 }
623 
624 /* internal helper returning non-const pointer to a type */
625 struct btf_type *btf_type_by_id(const struct btf *btf, __u32 type_id)
626 {
627 	if (type_id == 0)
628 		return &btf_void;
629 	if (type_id < btf->start_id)
630 		return btf_type_by_id(btf->base_btf, type_id);
631 	return btf->types_data + btf->type_offs[type_id - btf->start_id];
632 }
633 
634 const struct btf_type *btf__type_by_id(const struct btf *btf, __u32 type_id)
635 {
636 	if (type_id >= btf->start_id + btf->nr_types)
637 		return errno = EINVAL, NULL;
638 	return btf_type_by_id((struct btf *)btf, type_id);
639 }
640 
641 static int determine_ptr_size(const struct btf *btf)
642 {
643 	static const char * const long_aliases[] = {
644 		"long",
645 		"long int",
646 		"int long",
647 		"unsigned long",
648 		"long unsigned",
649 		"unsigned long int",
650 		"unsigned int long",
651 		"long unsigned int",
652 		"long int unsigned",
653 		"int unsigned long",
654 		"int long unsigned",
655 	};
656 	const struct btf_type *t;
657 	const char *name;
658 	int i, j, n;
659 
660 	if (btf->base_btf && btf->base_btf->ptr_sz > 0)
661 		return btf->base_btf->ptr_sz;
662 
663 	n = btf__type_cnt(btf);
664 	for (i = 1; i < n; i++) {
665 		t = btf__type_by_id(btf, i);
666 		if (!btf_is_int(t))
667 			continue;
668 
669 		if (t->size != 4 && t->size != 8)
670 			continue;
671 
672 		name = btf__name_by_offset(btf, t->name_off);
673 		if (!name)
674 			continue;
675 
676 		for (j = 0; j < ARRAY_SIZE(long_aliases); j++) {
677 			if (strcmp(name, long_aliases[j]) == 0)
678 				return t->size;
679 		}
680 	}
681 
682 	return -1;
683 }
684 
685 static size_t btf_ptr_sz(const struct btf *btf)
686 {
687 	if (!btf->ptr_sz)
688 		((struct btf *)btf)->ptr_sz = determine_ptr_size(btf);
689 	return btf->ptr_sz < 0 ? sizeof(void *) : btf->ptr_sz;
690 }
691 
692 /* Return pointer size this BTF instance assumes. The size is heuristically
693  * determined by looking for 'long' or 'unsigned long' integer type and
694  * recording its size in bytes. If BTF type information doesn't have any such
695  * type, this function returns 0. In the latter case, native architecture's
696  * pointer size is assumed, so will be either 4 or 8, depending on
697  * architecture that libbpf was compiled for. It's possible to override
698  * guessed value by using btf__set_pointer_size() API.
699  */
700 size_t btf__pointer_size(const struct btf *btf)
701 {
702 	if (!btf->ptr_sz)
703 		((struct btf *)btf)->ptr_sz = determine_ptr_size(btf);
704 
705 	if (btf->ptr_sz < 0)
706 		/* not enough BTF type info to guess */
707 		return 0;
708 
709 	return btf->ptr_sz;
710 }
711 
712 /* Override or set pointer size in bytes. Only values of 4 and 8 are
713  * supported.
714  */
715 int btf__set_pointer_size(struct btf *btf, size_t ptr_sz)
716 {
717 	if (ptr_sz != 4 && ptr_sz != 8)
718 		return libbpf_err(-EINVAL);
719 	btf->ptr_sz = ptr_sz;
720 	return 0;
721 }
722 
723 static bool is_host_big_endian(void)
724 {
725 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
726 	return false;
727 #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
728 	return true;
729 #else
730 # error "Unrecognized __BYTE_ORDER__"
731 #endif
732 }
733 
734 enum btf_endianness btf__endianness(const struct btf *btf)
735 {
736 	if (is_host_big_endian())
737 		return btf->swapped_endian ? BTF_LITTLE_ENDIAN : BTF_BIG_ENDIAN;
738 	else
739 		return btf->swapped_endian ? BTF_BIG_ENDIAN : BTF_LITTLE_ENDIAN;
740 }
741 
742 int btf__set_endianness(struct btf *btf, enum btf_endianness endian)
743 {
744 	if (endian != BTF_LITTLE_ENDIAN && endian != BTF_BIG_ENDIAN)
745 		return libbpf_err(-EINVAL);
746 
747 	btf->swapped_endian = is_host_big_endian() != (endian == BTF_BIG_ENDIAN);
748 	if (!btf->swapped_endian) {
749 		free(btf->raw_data_swapped);
750 		btf->raw_data_swapped = NULL;
751 	}
752 	return 0;
753 }
754 
755 static bool btf_type_is_void(const struct btf_type *t)
756 {
757 	return t == &btf_void || btf_is_fwd(t);
758 }
759 
760 static bool btf_type_is_void_or_null(const struct btf_type *t)
761 {
762 	return !t || btf_type_is_void(t);
763 }
764 
765 #define MAX_RESOLVE_DEPTH 32
766 
767 __s64 btf__resolve_size(const struct btf *btf, __u32 type_id)
768 {
769 	const struct btf_array *array;
770 	const struct btf_type *t;
771 	__u32 nelems = 1;
772 	__s64 size = -1;
773 	int i;
774 
775 	t = btf__type_by_id(btf, type_id);
776 	for (i = 0; i < MAX_RESOLVE_DEPTH && !btf_type_is_void_or_null(t); i++) {
777 		switch (btf_kind(t)) {
778 		case BTF_KIND_INT:
779 		case BTF_KIND_STRUCT:
780 		case BTF_KIND_UNION:
781 		case BTF_KIND_ENUM:
782 		case BTF_KIND_ENUM64:
783 		case BTF_KIND_DATASEC:
784 		case BTF_KIND_FLOAT:
785 			size = t->size;
786 			goto done;
787 		case BTF_KIND_PTR:
788 			size = btf_ptr_sz(btf);
789 			goto done;
790 		case BTF_KIND_TYPEDEF:
791 		case BTF_KIND_VOLATILE:
792 		case BTF_KIND_CONST:
793 		case BTF_KIND_RESTRICT:
794 		case BTF_KIND_VAR:
795 		case BTF_KIND_DECL_TAG:
796 		case BTF_KIND_TYPE_TAG:
797 			type_id = t->type;
798 			break;
799 		case BTF_KIND_ARRAY:
800 			array = btf_array(t);
801 			if (nelems && array->nelems > UINT32_MAX / nelems)
802 				return libbpf_err(-E2BIG);
803 			nelems *= array->nelems;
804 			type_id = array->type;
805 			break;
806 		default:
807 			return libbpf_err(-EINVAL);
808 		}
809 
810 		t = btf__type_by_id(btf, type_id);
811 	}
812 
813 done:
814 	if (size < 0)
815 		return libbpf_err(-EINVAL);
816 	if (nelems && size > UINT32_MAX / nelems)
817 		return libbpf_err(-E2BIG);
818 
819 	return nelems * size;
820 }
821 
822 int btf__align_of(const struct btf *btf, __u32 id)
823 {
824 	const struct btf_type *t = btf__type_by_id(btf, id);
825 	__u16 kind = btf_kind(t);
826 
827 	switch (kind) {
828 	case BTF_KIND_INT:
829 	case BTF_KIND_ENUM:
830 	case BTF_KIND_ENUM64:
831 	case BTF_KIND_FLOAT:
832 		return min(btf_ptr_sz(btf), (size_t)t->size);
833 	case BTF_KIND_PTR:
834 		return btf_ptr_sz(btf);
835 	case BTF_KIND_TYPEDEF:
836 	case BTF_KIND_VOLATILE:
837 	case BTF_KIND_CONST:
838 	case BTF_KIND_RESTRICT:
839 	case BTF_KIND_TYPE_TAG:
840 		return btf__align_of(btf, t->type);
841 	case BTF_KIND_ARRAY:
842 		return btf__align_of(btf, btf_array(t)->type);
843 	case BTF_KIND_STRUCT:
844 	case BTF_KIND_UNION: {
845 		const struct btf_member *m = btf_members(t);
846 		__u16 vlen = btf_vlen(t);
847 		int i, max_align = 1, align;
848 
849 		for (i = 0; i < vlen; i++, m++) {
850 			align = btf__align_of(btf, m->type);
851 			if (align <= 0)
852 				return libbpf_err(align);
853 			max_align = max(max_align, align);
854 
855 			/* if field offset isn't aligned according to field
856 			 * type's alignment, then struct must be packed
857 			 */
858 			if (btf_member_bitfield_size(t, i) == 0 &&
859 			    (m->offset % (8 * align)) != 0)
860 				return 1;
861 		}
862 
863 		/* if struct/union size isn't a multiple of its alignment,
864 		 * then struct must be packed
865 		 */
866 		if ((t->size % max_align) != 0)
867 			return 1;
868 
869 		return max_align;
870 	}
871 	default:
872 		pr_warn("unsupported BTF_KIND:%u\n", btf_kind(t));
873 		return errno = EINVAL, 0;
874 	}
875 }
876 
877 int btf__resolve_type(const struct btf *btf, __u32 type_id)
878 {
879 	const struct btf_type *t;
880 	int depth = 0;
881 
882 	t = btf__type_by_id(btf, type_id);
883 	while (depth < MAX_RESOLVE_DEPTH &&
884 	       !btf_type_is_void_or_null(t) &&
885 	       (btf_is_mod(t) || btf_is_typedef(t) || btf_is_var(t))) {
886 		type_id = t->type;
887 		t = btf__type_by_id(btf, type_id);
888 		depth++;
889 	}
890 
891 	if (depth == MAX_RESOLVE_DEPTH || btf_type_is_void_or_null(t))
892 		return libbpf_err(-EINVAL);
893 
894 	return type_id;
895 }
896 
897 __s32 btf__find_by_name(const struct btf *btf, const char *type_name)
898 {
899 	__u32 i, nr_types = btf__type_cnt(btf);
900 
901 	if (!strcmp(type_name, "void"))
902 		return 0;
903 
904 	for (i = 1; i < nr_types; i++) {
905 		const struct btf_type *t = btf__type_by_id(btf, i);
906 		const char *name = btf__name_by_offset(btf, t->name_off);
907 
908 		if (name && !strcmp(type_name, name))
909 			return i;
910 	}
911 
912 	return libbpf_err(-ENOENT);
913 }
914 
915 static __s32 btf_find_by_name_kind(const struct btf *btf, int start_id,
916 				   const char *type_name, __u32 kind)
917 {
918 	__u32 i, nr_types = btf__type_cnt(btf);
919 
920 	if (kind == BTF_KIND_UNKN || !strcmp(type_name, "void"))
921 		return 0;
922 
923 	for (i = start_id; i < nr_types; i++) {
924 		const struct btf_type *t = btf__type_by_id(btf, i);
925 		const char *name;
926 
927 		if (btf_kind(t) != kind)
928 			continue;
929 		name = btf__name_by_offset(btf, t->name_off);
930 		if (name && !strcmp(type_name, name))
931 			return i;
932 	}
933 
934 	return libbpf_err(-ENOENT);
935 }
936 
937 __s32 btf__find_by_name_kind_own(const struct btf *btf, const char *type_name,
938 				 __u32 kind)
939 {
940 	return btf_find_by_name_kind(btf, btf->start_id, type_name, kind);
941 }
942 
943 __s32 btf__find_by_name_kind(const struct btf *btf, const char *type_name,
944 			     __u32 kind)
945 {
946 	return btf_find_by_name_kind(btf, 1, type_name, kind);
947 }
948 
949 static bool btf_is_modifiable(const struct btf *btf)
950 {
951 	return (void *)btf->hdr != btf->raw_data;
952 }
953 
954 void btf__free(struct btf *btf)
955 {
956 	if (IS_ERR_OR_NULL(btf))
957 		return;
958 
959 	if (btf->fd >= 0)
960 		close(btf->fd);
961 
962 	if (btf_is_modifiable(btf)) {
963 		/* if BTF was modified after loading, it will have a split
964 		 * in-memory representation for header, types, and strings
965 		 * sections, so we need to free all of them individually. It
966 		 * might still have a cached contiguous raw data present,
967 		 * which will be unconditionally freed below.
968 		 */
969 		free(btf->hdr);
970 		free(btf->types_data);
971 		strset__free(btf->strs_set);
972 	}
973 	free(btf->raw_data);
974 	free(btf->raw_data_swapped);
975 	free(btf->type_offs);
976 	if (btf->owns_base)
977 		btf__free(btf->base_btf);
978 	free(btf);
979 }
980 
981 static struct btf *btf_new_empty(struct btf *base_btf)
982 {
983 	struct btf *btf;
984 
985 	btf = calloc(1, sizeof(*btf));
986 	if (!btf)
987 		return ERR_PTR(-ENOMEM);
988 
989 	btf->nr_types = 0;
990 	btf->start_id = 1;
991 	btf->start_str_off = 0;
992 	btf->fd = -1;
993 	btf->ptr_sz = sizeof(void *);
994 	btf->swapped_endian = false;
995 
996 	if (base_btf) {
997 		btf->base_btf = base_btf;
998 		btf->start_id = btf__type_cnt(base_btf);
999 		btf->start_str_off = base_btf->hdr->str_len;
1000 		btf->swapped_endian = base_btf->swapped_endian;
1001 	}
1002 
1003 	/* +1 for empty string at offset 0 */
1004 	btf->raw_size = sizeof(struct btf_header) + (base_btf ? 0 : 1);
1005 	btf->raw_data = calloc(1, btf->raw_size);
1006 	if (!btf->raw_data) {
1007 		free(btf);
1008 		return ERR_PTR(-ENOMEM);
1009 	}
1010 
1011 	btf->hdr = btf->raw_data;
1012 	btf->hdr->hdr_len = sizeof(struct btf_header);
1013 	btf->hdr->magic = BTF_MAGIC;
1014 	btf->hdr->version = BTF_VERSION;
1015 
1016 	btf->types_data = btf->raw_data + btf->hdr->hdr_len;
1017 	btf->strs_data = btf->raw_data + btf->hdr->hdr_len;
1018 	btf->hdr->str_len = base_btf ? 0 : 1; /* empty string at offset 0 */
1019 
1020 	return btf;
1021 }
1022 
1023 struct btf *btf__new_empty(void)
1024 {
1025 	return libbpf_ptr(btf_new_empty(NULL));
1026 }
1027 
1028 struct btf *btf__new_empty_split(struct btf *base_btf)
1029 {
1030 	return libbpf_ptr(btf_new_empty(base_btf));
1031 }
1032 
1033 static struct btf *btf_new(const void *data, __u32 size, struct btf *base_btf)
1034 {
1035 	struct btf *btf;
1036 	int err;
1037 
1038 	btf = calloc(1, sizeof(struct btf));
1039 	if (!btf)
1040 		return ERR_PTR(-ENOMEM);
1041 
1042 	btf->nr_types = 0;
1043 	btf->start_id = 1;
1044 	btf->start_str_off = 0;
1045 	btf->fd = -1;
1046 
1047 	if (base_btf) {
1048 		btf->base_btf = base_btf;
1049 		btf->start_id = btf__type_cnt(base_btf);
1050 		btf->start_str_off = base_btf->hdr->str_len;
1051 	}
1052 
1053 	btf->raw_data = malloc(size);
1054 	if (!btf->raw_data) {
1055 		err = -ENOMEM;
1056 		goto done;
1057 	}
1058 	memcpy(btf->raw_data, data, size);
1059 	btf->raw_size = size;
1060 
1061 	btf->hdr = btf->raw_data;
1062 	err = btf_parse_hdr(btf);
1063 	if (err)
1064 		goto done;
1065 
1066 	btf->strs_data = btf->raw_data + btf->hdr->hdr_len + btf->hdr->str_off;
1067 	btf->types_data = btf->raw_data + btf->hdr->hdr_len + btf->hdr->type_off;
1068 
1069 	err = btf_parse_str_sec(btf);
1070 	err = err ?: btf_parse_type_sec(btf);
1071 	err = err ?: btf_sanity_check(btf);
1072 	if (err)
1073 		goto done;
1074 
1075 done:
1076 	if (err) {
1077 		btf__free(btf);
1078 		return ERR_PTR(err);
1079 	}
1080 
1081 	return btf;
1082 }
1083 
1084 struct btf *btf__new(const void *data, __u32 size)
1085 {
1086 	return libbpf_ptr(btf_new(data, size, NULL));
1087 }
1088 
1089 struct btf *btf__new_split(const void *data, __u32 size, struct btf *base_btf)
1090 {
1091 	return libbpf_ptr(btf_new(data, size, base_btf));
1092 }
1093 
1094 struct btf_elf_secs {
1095 	Elf_Data *btf_data;
1096 	Elf_Data *btf_ext_data;
1097 	Elf_Data *btf_base_data;
1098 };
1099 
1100 static int btf_find_elf_sections(Elf *elf, const char *path, struct btf_elf_secs *secs)
1101 {
1102 	Elf_Scn *scn = NULL;
1103 	Elf_Data *data;
1104 	GElf_Ehdr ehdr;
1105 	size_t shstrndx;
1106 	int idx = 0;
1107 
1108 	if (!gelf_getehdr(elf, &ehdr)) {
1109 		pr_warn("failed to get EHDR from %s\n", path);
1110 		goto err;
1111 	}
1112 
1113 	if (elf_getshdrstrndx(elf, &shstrndx)) {
1114 		pr_warn("failed to get section names section index for %s\n",
1115 			path);
1116 		goto err;
1117 	}
1118 
1119 	if (!elf_rawdata(elf_getscn(elf, shstrndx), NULL)) {
1120 		pr_warn("failed to get e_shstrndx from %s\n", path);
1121 		goto err;
1122 	}
1123 
1124 	while ((scn = elf_nextscn(elf, scn)) != NULL) {
1125 		Elf_Data **field;
1126 		GElf_Shdr sh;
1127 		char *name;
1128 
1129 		idx++;
1130 		if (gelf_getshdr(scn, &sh) != &sh) {
1131 			pr_warn("failed to get section(%d) header from %s\n",
1132 				idx, path);
1133 			goto err;
1134 		}
1135 		name = elf_strptr(elf, shstrndx, sh.sh_name);
1136 		if (!name) {
1137 			pr_warn("failed to get section(%d) name from %s\n",
1138 				idx, path);
1139 			goto err;
1140 		}
1141 
1142 		if (strcmp(name, BTF_ELF_SEC) == 0)
1143 			field = &secs->btf_data;
1144 		else if (strcmp(name, BTF_EXT_ELF_SEC) == 0)
1145 			field = &secs->btf_ext_data;
1146 		else if (strcmp(name, BTF_BASE_ELF_SEC) == 0)
1147 			field = &secs->btf_base_data;
1148 		else
1149 			continue;
1150 
1151 		if (sh.sh_type != SHT_PROGBITS) {
1152 			pr_warn("unexpected section type (%d) of section(%d, %s) from %s\n",
1153 				sh.sh_type, idx, name, path);
1154 			goto err;
1155 		}
1156 
1157 		data = elf_getdata(scn, 0);
1158 		if (!data) {
1159 			pr_warn("failed to get section(%d, %s) data from %s\n",
1160 				idx, name, path);
1161 			goto err;
1162 		}
1163 		*field = data;
1164 	}
1165 
1166 	return 0;
1167 
1168 err:
1169 	return -LIBBPF_ERRNO__FORMAT;
1170 }
1171 
1172 static struct btf *btf_parse_elf(const char *path, struct btf *base_btf,
1173 				 struct btf_ext **btf_ext)
1174 {
1175 	struct btf_elf_secs secs = {};
1176 	struct btf *dist_base_btf = NULL;
1177 	struct btf *btf = NULL;
1178 	int err = 0, fd = -1;
1179 	Elf *elf = NULL;
1180 
1181 	if (elf_version(EV_CURRENT) == EV_NONE) {
1182 		pr_warn("failed to init libelf for %s\n", path);
1183 		return ERR_PTR(-LIBBPF_ERRNO__LIBELF);
1184 	}
1185 
1186 	fd = open(path, O_RDONLY | O_CLOEXEC);
1187 	if (fd < 0) {
1188 		err = -errno;
1189 		pr_warn("failed to open %s: %s\n", path, errstr(err));
1190 		return ERR_PTR(err);
1191 	}
1192 
1193 	elf = elf_begin(fd, ELF_C_READ, NULL);
1194 	if (!elf) {
1195 		err = -LIBBPF_ERRNO__FORMAT;
1196 		pr_warn("failed to open %s as ELF file\n", path);
1197 		goto done;
1198 	}
1199 
1200 	err = btf_find_elf_sections(elf, path, &secs);
1201 	if (err)
1202 		goto done;
1203 
1204 	if (!secs.btf_data) {
1205 		pr_warn("failed to find '%s' ELF section in %s\n", BTF_ELF_SEC, path);
1206 		err = -ENODATA;
1207 		goto done;
1208 	}
1209 
1210 	if (secs.btf_base_data) {
1211 		dist_base_btf = btf_new(secs.btf_base_data->d_buf, secs.btf_base_data->d_size,
1212 					NULL);
1213 		if (IS_ERR(dist_base_btf)) {
1214 			err = PTR_ERR(dist_base_btf);
1215 			dist_base_btf = NULL;
1216 			goto done;
1217 		}
1218 	}
1219 
1220 	btf = btf_new(secs.btf_data->d_buf, secs.btf_data->d_size,
1221 		      dist_base_btf ?: base_btf);
1222 	if (IS_ERR(btf)) {
1223 		err = PTR_ERR(btf);
1224 		goto done;
1225 	}
1226 	if (dist_base_btf && base_btf) {
1227 		err = btf__relocate(btf, base_btf);
1228 		if (err)
1229 			goto done;
1230 		btf__free(dist_base_btf);
1231 		dist_base_btf = NULL;
1232 	}
1233 
1234 	if (dist_base_btf)
1235 		btf->owns_base = true;
1236 
1237 	switch (gelf_getclass(elf)) {
1238 	case ELFCLASS32:
1239 		btf__set_pointer_size(btf, 4);
1240 		break;
1241 	case ELFCLASS64:
1242 		btf__set_pointer_size(btf, 8);
1243 		break;
1244 	default:
1245 		pr_warn("failed to get ELF class (bitness) for %s\n", path);
1246 		break;
1247 	}
1248 
1249 	if (btf_ext && secs.btf_ext_data) {
1250 		*btf_ext = btf_ext__new(secs.btf_ext_data->d_buf, secs.btf_ext_data->d_size);
1251 		if (IS_ERR(*btf_ext)) {
1252 			err = PTR_ERR(*btf_ext);
1253 			goto done;
1254 		}
1255 	} else if (btf_ext) {
1256 		*btf_ext = NULL;
1257 	}
1258 done:
1259 	if (elf)
1260 		elf_end(elf);
1261 	close(fd);
1262 
1263 	if (!err)
1264 		return btf;
1265 
1266 	if (btf_ext)
1267 		btf_ext__free(*btf_ext);
1268 	btf__free(dist_base_btf);
1269 	btf__free(btf);
1270 
1271 	return ERR_PTR(err);
1272 }
1273 
1274 struct btf *btf__parse_elf(const char *path, struct btf_ext **btf_ext)
1275 {
1276 	return libbpf_ptr(btf_parse_elf(path, NULL, btf_ext));
1277 }
1278 
1279 struct btf *btf__parse_elf_split(const char *path, struct btf *base_btf)
1280 {
1281 	return libbpf_ptr(btf_parse_elf(path, base_btf, NULL));
1282 }
1283 
1284 static struct btf *btf_parse_raw(const char *path, struct btf *base_btf)
1285 {
1286 	struct btf *btf = NULL;
1287 	void *data = NULL;
1288 	FILE *f = NULL;
1289 	__u16 magic;
1290 	int err = 0;
1291 	long sz;
1292 
1293 	f = fopen(path, "rbe");
1294 	if (!f) {
1295 		err = -errno;
1296 		goto err_out;
1297 	}
1298 
1299 	/* check BTF magic */
1300 	if (fread(&magic, 1, sizeof(magic), f) < sizeof(magic)) {
1301 		err = -EIO;
1302 		goto err_out;
1303 	}
1304 	if (magic != BTF_MAGIC && magic != bswap_16(BTF_MAGIC)) {
1305 		/* definitely not a raw BTF */
1306 		err = -EPROTO;
1307 		goto err_out;
1308 	}
1309 
1310 	/* get file size */
1311 	if (fseek(f, 0, SEEK_END)) {
1312 		err = -errno;
1313 		goto err_out;
1314 	}
1315 	sz = ftell(f);
1316 	if (sz < 0) {
1317 		err = -errno;
1318 		goto err_out;
1319 	}
1320 	/* rewind to the start */
1321 	if (fseek(f, 0, SEEK_SET)) {
1322 		err = -errno;
1323 		goto err_out;
1324 	}
1325 
1326 	/* pre-alloc memory and read all of BTF data */
1327 	data = malloc(sz);
1328 	if (!data) {
1329 		err = -ENOMEM;
1330 		goto err_out;
1331 	}
1332 	if (fread(data, 1, sz, f) < sz) {
1333 		err = -EIO;
1334 		goto err_out;
1335 	}
1336 
1337 	/* finally parse BTF data */
1338 	btf = btf_new(data, sz, base_btf);
1339 
1340 err_out:
1341 	free(data);
1342 	if (f)
1343 		fclose(f);
1344 	return err ? ERR_PTR(err) : btf;
1345 }
1346 
1347 struct btf *btf__parse_raw(const char *path)
1348 {
1349 	return libbpf_ptr(btf_parse_raw(path, NULL));
1350 }
1351 
1352 struct btf *btf__parse_raw_split(const char *path, struct btf *base_btf)
1353 {
1354 	return libbpf_ptr(btf_parse_raw(path, base_btf));
1355 }
1356 
1357 static struct btf *btf_parse(const char *path, struct btf *base_btf, struct btf_ext **btf_ext)
1358 {
1359 	struct btf *btf;
1360 	int err;
1361 
1362 	if (btf_ext)
1363 		*btf_ext = NULL;
1364 
1365 	btf = btf_parse_raw(path, base_btf);
1366 	err = libbpf_get_error(btf);
1367 	if (!err)
1368 		return btf;
1369 	if (err != -EPROTO)
1370 		return ERR_PTR(err);
1371 	return btf_parse_elf(path, base_btf, btf_ext);
1372 }
1373 
1374 struct btf *btf__parse(const char *path, struct btf_ext **btf_ext)
1375 {
1376 	return libbpf_ptr(btf_parse(path, NULL, btf_ext));
1377 }
1378 
1379 struct btf *btf__parse_split(const char *path, struct btf *base_btf)
1380 {
1381 	return libbpf_ptr(btf_parse(path, base_btf, NULL));
1382 }
1383 
1384 static void *btf_get_raw_data(const struct btf *btf, __u32 *size, bool swap_endian);
1385 
1386 int btf_load_into_kernel(struct btf *btf,
1387 			 char *log_buf, size_t log_sz, __u32 log_level,
1388 			 int token_fd)
1389 {
1390 	LIBBPF_OPTS(bpf_btf_load_opts, opts);
1391 	__u32 buf_sz = 0, raw_size;
1392 	char *buf = NULL, *tmp;
1393 	void *raw_data;
1394 	int err = 0;
1395 
1396 	if (btf->fd >= 0)
1397 		return libbpf_err(-EEXIST);
1398 	if (log_sz && !log_buf)
1399 		return libbpf_err(-EINVAL);
1400 
1401 	/* cache native raw data representation */
1402 	raw_data = btf_get_raw_data(btf, &raw_size, false);
1403 	if (!raw_data) {
1404 		err = -ENOMEM;
1405 		goto done;
1406 	}
1407 	btf->raw_size = raw_size;
1408 	btf->raw_data = raw_data;
1409 
1410 retry_load:
1411 	/* if log_level is 0, we won't provide log_buf/log_size to the kernel,
1412 	 * initially. Only if BTF loading fails, we bump log_level to 1 and
1413 	 * retry, using either auto-allocated or custom log_buf. This way
1414 	 * non-NULL custom log_buf provides a buffer just in case, but hopes
1415 	 * for successful load and no need for log_buf.
1416 	 */
1417 	if (log_level) {
1418 		/* if caller didn't provide custom log_buf, we'll keep
1419 		 * allocating our own progressively bigger buffers for BTF
1420 		 * verification log
1421 		 */
1422 		if (!log_buf) {
1423 			buf_sz = max((__u32)BPF_LOG_BUF_SIZE, buf_sz * 2);
1424 			tmp = realloc(buf, buf_sz);
1425 			if (!tmp) {
1426 				err = -ENOMEM;
1427 				goto done;
1428 			}
1429 			buf = tmp;
1430 			buf[0] = '\0';
1431 		}
1432 
1433 		opts.log_buf = log_buf ? log_buf : buf;
1434 		opts.log_size = log_buf ? log_sz : buf_sz;
1435 		opts.log_level = log_level;
1436 	}
1437 
1438 	opts.token_fd = token_fd;
1439 	if (token_fd)
1440 		opts.btf_flags |= BPF_F_TOKEN_FD;
1441 
1442 	btf->fd = bpf_btf_load(raw_data, raw_size, &opts);
1443 	if (btf->fd < 0) {
1444 		/* time to turn on verbose mode and try again */
1445 		if (log_level == 0) {
1446 			log_level = 1;
1447 			goto retry_load;
1448 		}
1449 		/* only retry if caller didn't provide custom log_buf, but
1450 		 * make sure we can never overflow buf_sz
1451 		 */
1452 		if (!log_buf && errno == ENOSPC && buf_sz <= UINT_MAX / 2)
1453 			goto retry_load;
1454 
1455 		err = -errno;
1456 		pr_warn("BTF loading error: %s\n", errstr(err));
1457 		/* don't print out contents of custom log_buf */
1458 		if (!log_buf && buf[0])
1459 			pr_warn("-- BEGIN BTF LOAD LOG ---\n%s\n-- END BTF LOAD LOG --\n", buf);
1460 	}
1461 
1462 done:
1463 	free(buf);
1464 	return libbpf_err(err);
1465 }
1466 
1467 int btf__load_into_kernel(struct btf *btf)
1468 {
1469 	return btf_load_into_kernel(btf, NULL, 0, 0, 0);
1470 }
1471 
1472 int btf__fd(const struct btf *btf)
1473 {
1474 	return btf->fd;
1475 }
1476 
1477 void btf__set_fd(struct btf *btf, int fd)
1478 {
1479 	btf->fd = fd;
1480 }
1481 
1482 static const void *btf_strs_data(const struct btf *btf)
1483 {
1484 	return btf->strs_data ? btf->strs_data : strset__data(btf->strs_set);
1485 }
1486 
1487 static void *btf_get_raw_data(const struct btf *btf, __u32 *size, bool swap_endian)
1488 {
1489 	struct btf_header *hdr = btf->hdr;
1490 	struct btf_type *t;
1491 	void *data, *p;
1492 	__u32 data_sz;
1493 	int i;
1494 
1495 	data = swap_endian ? btf->raw_data_swapped : btf->raw_data;
1496 	if (data) {
1497 		*size = btf->raw_size;
1498 		return data;
1499 	}
1500 
1501 	data_sz = hdr->hdr_len + hdr->type_len + hdr->str_len;
1502 	data = calloc(1, data_sz);
1503 	if (!data)
1504 		return NULL;
1505 	p = data;
1506 
1507 	memcpy(p, hdr, hdr->hdr_len);
1508 	if (swap_endian)
1509 		btf_bswap_hdr(p);
1510 	p += hdr->hdr_len;
1511 
1512 	memcpy(p, btf->types_data, hdr->type_len);
1513 	if (swap_endian) {
1514 		for (i = 0; i < btf->nr_types; i++) {
1515 			t = p + btf->type_offs[i];
1516 			/* btf_bswap_type_rest() relies on native t->info, so
1517 			 * we swap base type info after we swapped all the
1518 			 * additional information
1519 			 */
1520 			if (btf_bswap_type_rest(t))
1521 				goto err_out;
1522 			btf_bswap_type_base(t);
1523 		}
1524 	}
1525 	p += hdr->type_len;
1526 
1527 	memcpy(p, btf_strs_data(btf), hdr->str_len);
1528 	p += hdr->str_len;
1529 
1530 	*size = data_sz;
1531 	return data;
1532 err_out:
1533 	free(data);
1534 	return NULL;
1535 }
1536 
1537 const void *btf__raw_data(const struct btf *btf_ro, __u32 *size)
1538 {
1539 	struct btf *btf = (struct btf *)btf_ro;
1540 	__u32 data_sz;
1541 	void *data;
1542 
1543 	data = btf_get_raw_data(btf, &data_sz, btf->swapped_endian);
1544 	if (!data)
1545 		return errno = ENOMEM, NULL;
1546 
1547 	btf->raw_size = data_sz;
1548 	if (btf->swapped_endian)
1549 		btf->raw_data_swapped = data;
1550 	else
1551 		btf->raw_data = data;
1552 	*size = data_sz;
1553 	return data;
1554 }
1555 
1556 __attribute__((alias("btf__raw_data")))
1557 const void *btf__get_raw_data(const struct btf *btf, __u32 *size);
1558 
1559 const char *btf__str_by_offset(const struct btf *btf, __u32 offset)
1560 {
1561 	if (offset < btf->start_str_off)
1562 		return btf__str_by_offset(btf->base_btf, offset);
1563 	else if (offset - btf->start_str_off < btf->hdr->str_len)
1564 		return btf_strs_data(btf) + (offset - btf->start_str_off);
1565 	else
1566 		return errno = EINVAL, NULL;
1567 }
1568 
1569 const char *btf__name_by_offset(const struct btf *btf, __u32 offset)
1570 {
1571 	return btf__str_by_offset(btf, offset);
1572 }
1573 
1574 struct btf *btf_get_from_fd(int btf_fd, struct btf *base_btf)
1575 {
1576 	struct bpf_btf_info btf_info;
1577 	__u32 len = sizeof(btf_info);
1578 	__u32 last_size;
1579 	struct btf *btf;
1580 	void *ptr;
1581 	int err;
1582 
1583 	/* we won't know btf_size until we call bpf_btf_get_info_by_fd(). so
1584 	 * let's start with a sane default - 4KiB here - and resize it only if
1585 	 * bpf_btf_get_info_by_fd() needs a bigger buffer.
1586 	 */
1587 	last_size = 4096;
1588 	ptr = malloc(last_size);
1589 	if (!ptr)
1590 		return ERR_PTR(-ENOMEM);
1591 
1592 	memset(&btf_info, 0, sizeof(btf_info));
1593 	btf_info.btf = ptr_to_u64(ptr);
1594 	btf_info.btf_size = last_size;
1595 	err = bpf_btf_get_info_by_fd(btf_fd, &btf_info, &len);
1596 
1597 	if (!err && btf_info.btf_size > last_size) {
1598 		void *temp_ptr;
1599 
1600 		last_size = btf_info.btf_size;
1601 		temp_ptr = realloc(ptr, last_size);
1602 		if (!temp_ptr) {
1603 			btf = ERR_PTR(-ENOMEM);
1604 			goto exit_free;
1605 		}
1606 		ptr = temp_ptr;
1607 
1608 		len = sizeof(btf_info);
1609 		memset(&btf_info, 0, sizeof(btf_info));
1610 		btf_info.btf = ptr_to_u64(ptr);
1611 		btf_info.btf_size = last_size;
1612 
1613 		err = bpf_btf_get_info_by_fd(btf_fd, &btf_info, &len);
1614 	}
1615 
1616 	if (err || btf_info.btf_size > last_size) {
1617 		btf = err ? ERR_PTR(-errno) : ERR_PTR(-E2BIG);
1618 		goto exit_free;
1619 	}
1620 
1621 	btf = btf_new(ptr, btf_info.btf_size, base_btf);
1622 
1623 exit_free:
1624 	free(ptr);
1625 	return btf;
1626 }
1627 
1628 struct btf *btf_load_from_kernel(__u32 id, struct btf *base_btf, int token_fd)
1629 {
1630 	struct btf *btf;
1631 	int btf_fd;
1632 	LIBBPF_OPTS(bpf_get_fd_by_id_opts, opts);
1633 
1634 	if (token_fd) {
1635 		opts.open_flags |= BPF_F_TOKEN_FD;
1636 		opts.token_fd = token_fd;
1637 	}
1638 
1639 	btf_fd = bpf_btf_get_fd_by_id_opts(id, &opts);
1640 	if (btf_fd < 0)
1641 		return libbpf_err_ptr(-errno);
1642 
1643 	btf = btf_get_from_fd(btf_fd, base_btf);
1644 	close(btf_fd);
1645 
1646 	return libbpf_ptr(btf);
1647 }
1648 
1649 struct btf *btf__load_from_kernel_by_id_split(__u32 id, struct btf *base_btf)
1650 {
1651 	return btf_load_from_kernel(id, base_btf, 0);
1652 }
1653 
1654 struct btf *btf__load_from_kernel_by_id(__u32 id)
1655 {
1656 	return btf__load_from_kernel_by_id_split(id, NULL);
1657 }
1658 
1659 static void btf_invalidate_raw_data(struct btf *btf)
1660 {
1661 	if (btf->raw_data) {
1662 		free(btf->raw_data);
1663 		btf->raw_data = NULL;
1664 	}
1665 	if (btf->raw_data_swapped) {
1666 		free(btf->raw_data_swapped);
1667 		btf->raw_data_swapped = NULL;
1668 	}
1669 }
1670 
1671 /* Ensure BTF is ready to be modified (by splitting into a three memory
1672  * regions for header, types, and strings). Also invalidate cached
1673  * raw_data, if any.
1674  */
1675 static int btf_ensure_modifiable(struct btf *btf)
1676 {
1677 	void *hdr, *types;
1678 	struct strset *set = NULL;
1679 	int err = -ENOMEM;
1680 
1681 	if (btf_is_modifiable(btf)) {
1682 		/* any BTF modification invalidates raw_data */
1683 		btf_invalidate_raw_data(btf);
1684 		return 0;
1685 	}
1686 
1687 	/* split raw data into three memory regions */
1688 	hdr = malloc(btf->hdr->hdr_len);
1689 	types = malloc(btf->hdr->type_len);
1690 	if (!hdr || !types)
1691 		goto err_out;
1692 
1693 	memcpy(hdr, btf->hdr, btf->hdr->hdr_len);
1694 	memcpy(types, btf->types_data, btf->hdr->type_len);
1695 
1696 	/* build lookup index for all strings */
1697 	set = strset__new(BTF_MAX_STR_OFFSET, btf->strs_data, btf->hdr->str_len);
1698 	if (IS_ERR(set)) {
1699 		err = PTR_ERR(set);
1700 		goto err_out;
1701 	}
1702 
1703 	/* only when everything was successful, update internal state */
1704 	btf->hdr = hdr;
1705 	btf->types_data = types;
1706 	btf->types_data_cap = btf->hdr->type_len;
1707 	btf->strs_data = NULL;
1708 	btf->strs_set = set;
1709 	/* if BTF was created from scratch, all strings are guaranteed to be
1710 	 * unique and deduplicated
1711 	 */
1712 	if (btf->hdr->str_len == 0)
1713 		btf->strs_deduped = true;
1714 	if (!btf->base_btf && btf->hdr->str_len == 1)
1715 		btf->strs_deduped = true;
1716 
1717 	/* invalidate raw_data representation */
1718 	btf_invalidate_raw_data(btf);
1719 
1720 	return 0;
1721 
1722 err_out:
1723 	strset__free(set);
1724 	free(hdr);
1725 	free(types);
1726 	return err;
1727 }
1728 
1729 /* Find an offset in BTF string section that corresponds to a given string *s*.
1730  * Returns:
1731  *   - >0 offset into string section, if string is found;
1732  *   - -ENOENT, if string is not in the string section;
1733  *   - <0, on any other error.
1734  */
1735 int btf__find_str(struct btf *btf, const char *s)
1736 {
1737 	int off;
1738 
1739 	if (btf->base_btf) {
1740 		off = btf__find_str(btf->base_btf, s);
1741 		if (off != -ENOENT)
1742 			return off;
1743 	}
1744 
1745 	/* BTF needs to be in a modifiable state to build string lookup index */
1746 	if (btf_ensure_modifiable(btf))
1747 		return libbpf_err(-ENOMEM);
1748 
1749 	off = strset__find_str(btf->strs_set, s);
1750 	if (off < 0)
1751 		return libbpf_err(off);
1752 
1753 	return btf->start_str_off + off;
1754 }
1755 
1756 /* Add a string s to the BTF string section.
1757  * Returns:
1758  *   - > 0 offset into string section, on success;
1759  *   - < 0, on error.
1760  */
1761 int btf__add_str(struct btf *btf, const char *s)
1762 {
1763 	int off;
1764 
1765 	if (btf->base_btf) {
1766 		off = btf__find_str(btf->base_btf, s);
1767 		if (off != -ENOENT)
1768 			return off;
1769 	}
1770 
1771 	if (btf_ensure_modifiable(btf))
1772 		return libbpf_err(-ENOMEM);
1773 
1774 	off = strset__add_str(btf->strs_set, s);
1775 	if (off < 0)
1776 		return libbpf_err(off);
1777 
1778 	btf->hdr->str_len = strset__data_size(btf->strs_set);
1779 
1780 	return btf->start_str_off + off;
1781 }
1782 
1783 static void *btf_add_type_mem(struct btf *btf, size_t add_sz)
1784 {
1785 	return libbpf_add_mem(&btf->types_data, &btf->types_data_cap, 1,
1786 			      btf->hdr->type_len, UINT_MAX, add_sz);
1787 }
1788 
1789 static void btf_type_inc_vlen(struct btf_type *t)
1790 {
1791 	t->info = btf_type_info(btf_kind(t), btf_vlen(t) + 1, btf_kflag(t));
1792 }
1793 
1794 static int btf_commit_type(struct btf *btf, int data_sz)
1795 {
1796 	int err;
1797 
1798 	err = btf_add_type_idx_entry(btf, btf->hdr->type_len);
1799 	if (err)
1800 		return libbpf_err(err);
1801 
1802 	btf->hdr->type_len += data_sz;
1803 	btf->hdr->str_off += data_sz;
1804 	btf->nr_types++;
1805 	return btf->start_id + btf->nr_types - 1;
1806 }
1807 
1808 struct btf_pipe {
1809 	const struct btf *src;
1810 	struct btf *dst;
1811 	struct hashmap *str_off_map; /* map string offsets from src to dst */
1812 };
1813 
1814 static int btf_rewrite_str(struct btf_pipe *p, __u32 *str_off)
1815 {
1816 	long mapped_off;
1817 	int off, err;
1818 
1819 	if (!*str_off) /* nothing to do for empty strings */
1820 		return 0;
1821 
1822 	if (p->str_off_map &&
1823 	    hashmap__find(p->str_off_map, *str_off, &mapped_off)) {
1824 		*str_off = mapped_off;
1825 		return 0;
1826 	}
1827 
1828 	off = btf__add_str(p->dst, btf__str_by_offset(p->src, *str_off));
1829 	if (off < 0)
1830 		return off;
1831 
1832 	/* Remember string mapping from src to dst.  It avoids
1833 	 * performing expensive string comparisons.
1834 	 */
1835 	if (p->str_off_map) {
1836 		err = hashmap__append(p->str_off_map, *str_off, off);
1837 		if (err)
1838 			return err;
1839 	}
1840 
1841 	*str_off = off;
1842 	return 0;
1843 }
1844 
1845 static int btf_add_type(struct btf_pipe *p, const struct btf_type *src_type)
1846 {
1847 	struct btf_field_iter it;
1848 	struct btf_type *t;
1849 	__u32 *str_off;
1850 	int sz, err;
1851 
1852 	sz = btf_type_size(src_type);
1853 	if (sz < 0)
1854 		return libbpf_err(sz);
1855 
1856 	/* deconstruct BTF, if necessary, and invalidate raw_data */
1857 	if (btf_ensure_modifiable(p->dst))
1858 		return libbpf_err(-ENOMEM);
1859 
1860 	t = btf_add_type_mem(p->dst, sz);
1861 	if (!t)
1862 		return libbpf_err(-ENOMEM);
1863 
1864 	memcpy(t, src_type, sz);
1865 
1866 	err = btf_field_iter_init(&it, t, BTF_FIELD_ITER_STRS);
1867 	if (err)
1868 		return libbpf_err(err);
1869 
1870 	while ((str_off = btf_field_iter_next(&it))) {
1871 		err = btf_rewrite_str(p, str_off);
1872 		if (err)
1873 			return libbpf_err(err);
1874 	}
1875 
1876 	return btf_commit_type(p->dst, sz);
1877 }
1878 
1879 int btf__add_type(struct btf *btf, const struct btf *src_btf, const struct btf_type *src_type)
1880 {
1881 	struct btf_pipe p = { .src = src_btf, .dst = btf };
1882 
1883 	return btf_add_type(&p, src_type);
1884 }
1885 
1886 static size_t btf_dedup_identity_hash_fn(long key, void *ctx);
1887 static bool btf_dedup_equal_fn(long k1, long k2, void *ctx);
1888 
1889 int btf__add_btf(struct btf *btf, const struct btf *src_btf)
1890 {
1891 	struct btf_pipe p = { .src = src_btf, .dst = btf };
1892 	int data_sz, sz, cnt, i, err, old_strs_len;
1893 	__u32 *off;
1894 	void *t;
1895 
1896 	/* appending split BTF isn't supported yet */
1897 	if (src_btf->base_btf)
1898 		return libbpf_err(-ENOTSUP);
1899 
1900 	/* deconstruct BTF, if necessary, and invalidate raw_data */
1901 	if (btf_ensure_modifiable(btf))
1902 		return libbpf_err(-ENOMEM);
1903 
1904 	/* remember original strings section size if we have to roll back
1905 	 * partial strings section changes
1906 	 */
1907 	old_strs_len = btf->hdr->str_len;
1908 
1909 	data_sz = src_btf->hdr->type_len;
1910 	cnt = btf__type_cnt(src_btf) - 1;
1911 
1912 	/* pre-allocate enough memory for new types */
1913 	t = btf_add_type_mem(btf, data_sz);
1914 	if (!t)
1915 		return libbpf_err(-ENOMEM);
1916 
1917 	/* pre-allocate enough memory for type offset index for new types */
1918 	off = btf_add_type_offs_mem(btf, cnt);
1919 	if (!off)
1920 		return libbpf_err(-ENOMEM);
1921 
1922 	/* Map the string offsets from src_btf to the offsets from btf to improve performance */
1923 	p.str_off_map = hashmap__new(btf_dedup_identity_hash_fn, btf_dedup_equal_fn, NULL);
1924 	if (IS_ERR(p.str_off_map))
1925 		return libbpf_err(-ENOMEM);
1926 
1927 	/* bulk copy types data for all types from src_btf */
1928 	memcpy(t, src_btf->types_data, data_sz);
1929 
1930 	for (i = 0; i < cnt; i++) {
1931 		struct btf_field_iter it;
1932 		__u32 *type_id, *str_off;
1933 
1934 		sz = btf_type_size(t);
1935 		if (sz < 0) {
1936 			/* unlikely, has to be corrupted src_btf */
1937 			err = sz;
1938 			goto err_out;
1939 		}
1940 
1941 		/* fill out type ID to type offset mapping for lookups by type ID */
1942 		*off = t - btf->types_data;
1943 
1944 		/* add, dedup, and remap strings referenced by this BTF type */
1945 		err = btf_field_iter_init(&it, t, BTF_FIELD_ITER_STRS);
1946 		if (err)
1947 			goto err_out;
1948 		while ((str_off = btf_field_iter_next(&it))) {
1949 			err = btf_rewrite_str(&p, str_off);
1950 			if (err)
1951 				goto err_out;
1952 		}
1953 
1954 		/* remap all type IDs referenced from this BTF type */
1955 		err = btf_field_iter_init(&it, t, BTF_FIELD_ITER_IDS);
1956 		if (err)
1957 			goto err_out;
1958 
1959 		while ((type_id = btf_field_iter_next(&it))) {
1960 			if (!*type_id) /* nothing to do for VOID references */
1961 				continue;
1962 
1963 			/* we haven't updated btf's type count yet, so
1964 			 * btf->start_id + btf->nr_types - 1 is the type ID offset we should
1965 			 * add to all newly added BTF types
1966 			 */
1967 			*type_id += btf->start_id + btf->nr_types - 1;
1968 		}
1969 
1970 		/* go to next type data and type offset index entry */
1971 		t += sz;
1972 		off++;
1973 	}
1974 
1975 	/* Up until now any of the copied type data was effectively invisible,
1976 	 * so if we exited early before this point due to error, BTF would be
1977 	 * effectively unmodified. There would be extra internal memory
1978 	 * pre-allocated, but it would not be available for querying.  But now
1979 	 * that we've copied and rewritten all the data successfully, we can
1980 	 * update type count and various internal offsets and sizes to
1981 	 * "commit" the changes and made them visible to the outside world.
1982 	 */
1983 	btf->hdr->type_len += data_sz;
1984 	btf->hdr->str_off += data_sz;
1985 	btf->nr_types += cnt;
1986 
1987 	hashmap__free(p.str_off_map);
1988 
1989 	/* return type ID of the first added BTF type */
1990 	return btf->start_id + btf->nr_types - cnt;
1991 err_out:
1992 	/* zero out preallocated memory as if it was just allocated with
1993 	 * libbpf_add_mem()
1994 	 */
1995 	memset(btf->types_data + btf->hdr->type_len, 0, data_sz);
1996 	memset(btf->strs_data + old_strs_len, 0, btf->hdr->str_len - old_strs_len);
1997 
1998 	/* and now restore original strings section size; types data size
1999 	 * wasn't modified, so doesn't need restoring, see big comment above
2000 	 */
2001 	btf->hdr->str_len = old_strs_len;
2002 
2003 	hashmap__free(p.str_off_map);
2004 
2005 	return libbpf_err(err);
2006 }
2007 
2008 /*
2009  * Append new BTF_KIND_INT type with:
2010  *   - *name* - non-empty, non-NULL type name;
2011  *   - *sz* - power-of-2 (1, 2, 4, ..) size of the type, in bytes;
2012  *   - encoding is a combination of BTF_INT_SIGNED, BTF_INT_CHAR, BTF_INT_BOOL.
2013  * Returns:
2014  *   - >0, type ID of newly added BTF type;
2015  *   - <0, on error.
2016  */
2017 int btf__add_int(struct btf *btf, const char *name, size_t byte_sz, int encoding)
2018 {
2019 	struct btf_type *t;
2020 	int sz, name_off;
2021 
2022 	/* non-empty name */
2023 	if (!name || !name[0])
2024 		return libbpf_err(-EINVAL);
2025 	/* byte_sz must be power of 2 */
2026 	if (!byte_sz || (byte_sz & (byte_sz - 1)) || byte_sz > 16)
2027 		return libbpf_err(-EINVAL);
2028 	if (encoding & ~(BTF_INT_SIGNED | BTF_INT_CHAR | BTF_INT_BOOL))
2029 		return libbpf_err(-EINVAL);
2030 
2031 	/* deconstruct BTF, if necessary, and invalidate raw_data */
2032 	if (btf_ensure_modifiable(btf))
2033 		return libbpf_err(-ENOMEM);
2034 
2035 	sz = sizeof(struct btf_type) + sizeof(int);
2036 	t = btf_add_type_mem(btf, sz);
2037 	if (!t)
2038 		return libbpf_err(-ENOMEM);
2039 
2040 	/* if something goes wrong later, we might end up with an extra string,
2041 	 * but that shouldn't be a problem, because BTF can't be constructed
2042 	 * completely anyway and will most probably be just discarded
2043 	 */
2044 	name_off = btf__add_str(btf, name);
2045 	if (name_off < 0)
2046 		return name_off;
2047 
2048 	t->name_off = name_off;
2049 	t->info = btf_type_info(BTF_KIND_INT, 0, 0);
2050 	t->size = byte_sz;
2051 	/* set INT info, we don't allow setting legacy bit offset/size */
2052 	*(__u32 *)(t + 1) = (encoding << 24) | (byte_sz * 8);
2053 
2054 	return btf_commit_type(btf, sz);
2055 }
2056 
2057 /*
2058  * Append new BTF_KIND_FLOAT type with:
2059  *   - *name* - non-empty, non-NULL type name;
2060  *   - *sz* - size of the type, in bytes;
2061  * Returns:
2062  *   - >0, type ID of newly added BTF type;
2063  *   - <0, on error.
2064  */
2065 int btf__add_float(struct btf *btf, const char *name, size_t byte_sz)
2066 {
2067 	struct btf_type *t;
2068 	int sz, name_off;
2069 
2070 	/* non-empty name */
2071 	if (!name || !name[0])
2072 		return libbpf_err(-EINVAL);
2073 
2074 	/* byte_sz must be one of the explicitly allowed values */
2075 	if (byte_sz != 2 && byte_sz != 4 && byte_sz != 8 && byte_sz != 12 &&
2076 	    byte_sz != 16)
2077 		return libbpf_err(-EINVAL);
2078 
2079 	if (btf_ensure_modifiable(btf))
2080 		return libbpf_err(-ENOMEM);
2081 
2082 	sz = sizeof(struct btf_type);
2083 	t = btf_add_type_mem(btf, sz);
2084 	if (!t)
2085 		return libbpf_err(-ENOMEM);
2086 
2087 	name_off = btf__add_str(btf, name);
2088 	if (name_off < 0)
2089 		return name_off;
2090 
2091 	t->name_off = name_off;
2092 	t->info = btf_type_info(BTF_KIND_FLOAT, 0, 0);
2093 	t->size = byte_sz;
2094 
2095 	return btf_commit_type(btf, sz);
2096 }
2097 
2098 /* it's completely legal to append BTF types with type IDs pointing forward to
2099  * types that haven't been appended yet, so we only make sure that id looks
2100  * sane, we can't guarantee that ID will always be valid
2101  */
2102 static int validate_type_id(int id)
2103 {
2104 	if (id < 0 || id > BTF_MAX_NR_TYPES)
2105 		return -EINVAL;
2106 	return 0;
2107 }
2108 
2109 /* generic append function for PTR, TYPEDEF, CONST/VOLATILE/RESTRICT */
2110 static int btf_add_ref_kind(struct btf *btf, int kind, const char *name, int ref_type_id, int kflag)
2111 {
2112 	struct btf_type *t;
2113 	int sz, name_off = 0;
2114 
2115 	if (validate_type_id(ref_type_id))
2116 		return libbpf_err(-EINVAL);
2117 
2118 	if (btf_ensure_modifiable(btf))
2119 		return libbpf_err(-ENOMEM);
2120 
2121 	sz = sizeof(struct btf_type);
2122 	t = btf_add_type_mem(btf, sz);
2123 	if (!t)
2124 		return libbpf_err(-ENOMEM);
2125 
2126 	if (name && name[0]) {
2127 		name_off = btf__add_str(btf, name);
2128 		if (name_off < 0)
2129 			return name_off;
2130 	}
2131 
2132 	t->name_off = name_off;
2133 	t->info = btf_type_info(kind, 0, kflag);
2134 	t->type = ref_type_id;
2135 
2136 	return btf_commit_type(btf, sz);
2137 }
2138 
2139 /*
2140  * Append new BTF_KIND_PTR type with:
2141  *   - *ref_type_id* - referenced type ID, it might not exist yet;
2142  * Returns:
2143  *   - >0, type ID of newly added BTF type;
2144  *   - <0, on error.
2145  */
2146 int btf__add_ptr(struct btf *btf, int ref_type_id)
2147 {
2148 	return btf_add_ref_kind(btf, BTF_KIND_PTR, NULL, ref_type_id, 0);
2149 }
2150 
2151 /*
2152  * Append new BTF_KIND_ARRAY type with:
2153  *   - *index_type_id* - type ID of the type describing array index;
2154  *   - *elem_type_id* - type ID of the type describing array element;
2155  *   - *nr_elems* - the size of the array;
2156  * Returns:
2157  *   - >0, type ID of newly added BTF type;
2158  *   - <0, on error.
2159  */
2160 int btf__add_array(struct btf *btf, int index_type_id, int elem_type_id, __u32 nr_elems)
2161 {
2162 	struct btf_type *t;
2163 	struct btf_array *a;
2164 	int sz;
2165 
2166 	if (validate_type_id(index_type_id) || validate_type_id(elem_type_id))
2167 		return libbpf_err(-EINVAL);
2168 
2169 	if (btf_ensure_modifiable(btf))
2170 		return libbpf_err(-ENOMEM);
2171 
2172 	sz = sizeof(struct btf_type) + sizeof(struct btf_array);
2173 	t = btf_add_type_mem(btf, sz);
2174 	if (!t)
2175 		return libbpf_err(-ENOMEM);
2176 
2177 	t->name_off = 0;
2178 	t->info = btf_type_info(BTF_KIND_ARRAY, 0, 0);
2179 	t->size = 0;
2180 
2181 	a = btf_array(t);
2182 	a->type = elem_type_id;
2183 	a->index_type = index_type_id;
2184 	a->nelems = nr_elems;
2185 
2186 	return btf_commit_type(btf, sz);
2187 }
2188 
2189 /* generic STRUCT/UNION append function */
2190 static int btf_add_composite(struct btf *btf, int kind, const char *name, __u32 bytes_sz)
2191 {
2192 	struct btf_type *t;
2193 	int sz, name_off = 0;
2194 
2195 	if (btf_ensure_modifiable(btf))
2196 		return libbpf_err(-ENOMEM);
2197 
2198 	sz = sizeof(struct btf_type);
2199 	t = btf_add_type_mem(btf, sz);
2200 	if (!t)
2201 		return libbpf_err(-ENOMEM);
2202 
2203 	if (name && name[0]) {
2204 		name_off = btf__add_str(btf, name);
2205 		if (name_off < 0)
2206 			return name_off;
2207 	}
2208 
2209 	/* start out with vlen=0 and no kflag; this will be adjusted when
2210 	 * adding each member
2211 	 */
2212 	t->name_off = name_off;
2213 	t->info = btf_type_info(kind, 0, 0);
2214 	t->size = bytes_sz;
2215 
2216 	return btf_commit_type(btf, sz);
2217 }
2218 
2219 /*
2220  * Append new BTF_KIND_STRUCT type with:
2221  *   - *name* - name of the struct, can be NULL or empty for anonymous structs;
2222  *   - *byte_sz* - size of the struct, in bytes;
2223  *
2224  * Struct initially has no fields in it. Fields can be added by
2225  * btf__add_field() right after btf__add_struct() succeeds.
2226  *
2227  * Returns:
2228  *   - >0, type ID of newly added BTF type;
2229  *   - <0, on error.
2230  */
2231 int btf__add_struct(struct btf *btf, const char *name, __u32 byte_sz)
2232 {
2233 	return btf_add_composite(btf, BTF_KIND_STRUCT, name, byte_sz);
2234 }
2235 
2236 /*
2237  * Append new BTF_KIND_UNION type with:
2238  *   - *name* - name of the union, can be NULL or empty for anonymous union;
2239  *   - *byte_sz* - size of the union, in bytes;
2240  *
2241  * Union initially has no fields in it. Fields can be added by
2242  * btf__add_field() right after btf__add_union() succeeds. All fields
2243  * should have *bit_offset* of 0.
2244  *
2245  * Returns:
2246  *   - >0, type ID of newly added BTF type;
2247  *   - <0, on error.
2248  */
2249 int btf__add_union(struct btf *btf, const char *name, __u32 byte_sz)
2250 {
2251 	return btf_add_composite(btf, BTF_KIND_UNION, name, byte_sz);
2252 }
2253 
2254 static struct btf_type *btf_last_type(struct btf *btf)
2255 {
2256 	return btf_type_by_id(btf, btf__type_cnt(btf) - 1);
2257 }
2258 
2259 /*
2260  * Append new field for the current STRUCT/UNION type with:
2261  *   - *name* - name of the field, can be NULL or empty for anonymous field;
2262  *   - *type_id* - type ID for the type describing field type;
2263  *   - *bit_offset* - bit offset of the start of the field within struct/union;
2264  *   - *bit_size* - bit size of a bitfield, 0 for non-bitfield fields;
2265  * Returns:
2266  *   -  0, on success;
2267  *   - <0, on error.
2268  */
2269 int btf__add_field(struct btf *btf, const char *name, int type_id,
2270 		   __u32 bit_offset, __u32 bit_size)
2271 {
2272 	struct btf_type *t;
2273 	struct btf_member *m;
2274 	bool is_bitfield;
2275 	int sz, name_off = 0;
2276 
2277 	/* last type should be union/struct */
2278 	if (btf->nr_types == 0)
2279 		return libbpf_err(-EINVAL);
2280 	t = btf_last_type(btf);
2281 	if (!btf_is_composite(t))
2282 		return libbpf_err(-EINVAL);
2283 
2284 	if (validate_type_id(type_id))
2285 		return libbpf_err(-EINVAL);
2286 	/* best-effort bit field offset/size enforcement */
2287 	is_bitfield = bit_size || (bit_offset % 8 != 0);
2288 	if (is_bitfield && (bit_size == 0 || bit_size > 255 || bit_offset > 0xffffff))
2289 		return libbpf_err(-EINVAL);
2290 
2291 	/* only offset 0 is allowed for unions */
2292 	if (btf_is_union(t) && bit_offset)
2293 		return libbpf_err(-EINVAL);
2294 
2295 	/* decompose and invalidate raw data */
2296 	if (btf_ensure_modifiable(btf))
2297 		return libbpf_err(-ENOMEM);
2298 
2299 	sz = sizeof(struct btf_member);
2300 	m = btf_add_type_mem(btf, sz);
2301 	if (!m)
2302 		return libbpf_err(-ENOMEM);
2303 
2304 	if (name && name[0]) {
2305 		name_off = btf__add_str(btf, name);
2306 		if (name_off < 0)
2307 			return name_off;
2308 	}
2309 
2310 	m->name_off = name_off;
2311 	m->type = type_id;
2312 	m->offset = bit_offset | (bit_size << 24);
2313 
2314 	/* btf_add_type_mem can invalidate t pointer */
2315 	t = btf_last_type(btf);
2316 	/* update parent type's vlen and kflag */
2317 	t->info = btf_type_info(btf_kind(t), btf_vlen(t) + 1, is_bitfield || btf_kflag(t));
2318 
2319 	btf->hdr->type_len += sz;
2320 	btf->hdr->str_off += sz;
2321 	return 0;
2322 }
2323 
2324 static int btf_add_enum_common(struct btf *btf, const char *name, __u32 byte_sz,
2325 			       bool is_signed, __u8 kind)
2326 {
2327 	struct btf_type *t;
2328 	int sz, name_off = 0;
2329 
2330 	/* byte_sz must be power of 2 */
2331 	if (!byte_sz || (byte_sz & (byte_sz - 1)) || byte_sz > 8)
2332 		return libbpf_err(-EINVAL);
2333 
2334 	if (btf_ensure_modifiable(btf))
2335 		return libbpf_err(-ENOMEM);
2336 
2337 	sz = sizeof(struct btf_type);
2338 	t = btf_add_type_mem(btf, sz);
2339 	if (!t)
2340 		return libbpf_err(-ENOMEM);
2341 
2342 	if (name && name[0]) {
2343 		name_off = btf__add_str(btf, name);
2344 		if (name_off < 0)
2345 			return name_off;
2346 	}
2347 
2348 	/* start out with vlen=0; it will be adjusted when adding enum values */
2349 	t->name_off = name_off;
2350 	t->info = btf_type_info(kind, 0, is_signed);
2351 	t->size = byte_sz;
2352 
2353 	return btf_commit_type(btf, sz);
2354 }
2355 
2356 /*
2357  * Append new BTF_KIND_ENUM type with:
2358  *   - *name* - name of the enum, can be NULL or empty for anonymous enums;
2359  *   - *byte_sz* - size of the enum, in bytes.
2360  *
2361  * Enum initially has no enum values in it (and corresponds to enum forward
2362  * declaration). Enumerator values can be added by btf__add_enum_value()
2363  * immediately after btf__add_enum() succeeds.
2364  *
2365  * Returns:
2366  *   - >0, type ID of newly added BTF type;
2367  *   - <0, on error.
2368  */
2369 int btf__add_enum(struct btf *btf, const char *name, __u32 byte_sz)
2370 {
2371 	/*
2372 	 * set the signedness to be unsigned, it will change to signed
2373 	 * if any later enumerator is negative.
2374 	 */
2375 	return btf_add_enum_common(btf, name, byte_sz, false, BTF_KIND_ENUM);
2376 }
2377 
2378 /*
2379  * Append new enum value for the current ENUM type with:
2380  *   - *name* - name of the enumerator value, can't be NULL or empty;
2381  *   - *value* - integer value corresponding to enum value *name*;
2382  * Returns:
2383  *   -  0, on success;
2384  *   - <0, on error.
2385  */
2386 int btf__add_enum_value(struct btf *btf, const char *name, __s64 value)
2387 {
2388 	struct btf_type *t;
2389 	struct btf_enum *v;
2390 	int sz, name_off;
2391 
2392 	/* last type should be BTF_KIND_ENUM */
2393 	if (btf->nr_types == 0)
2394 		return libbpf_err(-EINVAL);
2395 	t = btf_last_type(btf);
2396 	if (!btf_is_enum(t))
2397 		return libbpf_err(-EINVAL);
2398 
2399 	/* non-empty name */
2400 	if (!name || !name[0])
2401 		return libbpf_err(-EINVAL);
2402 	if (value < INT_MIN || value > UINT_MAX)
2403 		return libbpf_err(-E2BIG);
2404 
2405 	/* decompose and invalidate raw data */
2406 	if (btf_ensure_modifiable(btf))
2407 		return libbpf_err(-ENOMEM);
2408 
2409 	sz = sizeof(struct btf_enum);
2410 	v = btf_add_type_mem(btf, sz);
2411 	if (!v)
2412 		return libbpf_err(-ENOMEM);
2413 
2414 	name_off = btf__add_str(btf, name);
2415 	if (name_off < 0)
2416 		return name_off;
2417 
2418 	v->name_off = name_off;
2419 	v->val = value;
2420 
2421 	/* update parent type's vlen */
2422 	t = btf_last_type(btf);
2423 	btf_type_inc_vlen(t);
2424 
2425 	/* if negative value, set signedness to signed */
2426 	if (value < 0)
2427 		t->info = btf_type_info(btf_kind(t), btf_vlen(t), true);
2428 
2429 	btf->hdr->type_len += sz;
2430 	btf->hdr->str_off += sz;
2431 	return 0;
2432 }
2433 
2434 /*
2435  * Append new BTF_KIND_ENUM64 type with:
2436  *   - *name* - name of the enum, can be NULL or empty for anonymous enums;
2437  *   - *byte_sz* - size of the enum, in bytes.
2438  *   - *is_signed* - whether the enum values are signed or not;
2439  *
2440  * Enum initially has no enum values in it (and corresponds to enum forward
2441  * declaration). Enumerator values can be added by btf__add_enum64_value()
2442  * immediately after btf__add_enum64() succeeds.
2443  *
2444  * Returns:
2445  *   - >0, type ID of newly added BTF type;
2446  *   - <0, on error.
2447  */
2448 int btf__add_enum64(struct btf *btf, const char *name, __u32 byte_sz,
2449 		    bool is_signed)
2450 {
2451 	return btf_add_enum_common(btf, name, byte_sz, is_signed,
2452 				   BTF_KIND_ENUM64);
2453 }
2454 
2455 /*
2456  * Append new enum value for the current ENUM64 type with:
2457  *   - *name* - name of the enumerator value, can't be NULL or empty;
2458  *   - *value* - integer value corresponding to enum value *name*;
2459  * Returns:
2460  *   -  0, on success;
2461  *   - <0, on error.
2462  */
2463 int btf__add_enum64_value(struct btf *btf, const char *name, __u64 value)
2464 {
2465 	struct btf_enum64 *v;
2466 	struct btf_type *t;
2467 	int sz, name_off;
2468 
2469 	/* last type should be BTF_KIND_ENUM64 */
2470 	if (btf->nr_types == 0)
2471 		return libbpf_err(-EINVAL);
2472 	t = btf_last_type(btf);
2473 	if (!btf_is_enum64(t))
2474 		return libbpf_err(-EINVAL);
2475 
2476 	/* non-empty name */
2477 	if (!name || !name[0])
2478 		return libbpf_err(-EINVAL);
2479 
2480 	/* decompose and invalidate raw data */
2481 	if (btf_ensure_modifiable(btf))
2482 		return libbpf_err(-ENOMEM);
2483 
2484 	sz = sizeof(struct btf_enum64);
2485 	v = btf_add_type_mem(btf, sz);
2486 	if (!v)
2487 		return libbpf_err(-ENOMEM);
2488 
2489 	name_off = btf__add_str(btf, name);
2490 	if (name_off < 0)
2491 		return name_off;
2492 
2493 	v->name_off = name_off;
2494 	v->val_lo32 = (__u32)value;
2495 	v->val_hi32 = value >> 32;
2496 
2497 	/* update parent type's vlen */
2498 	t = btf_last_type(btf);
2499 	btf_type_inc_vlen(t);
2500 
2501 	btf->hdr->type_len += sz;
2502 	btf->hdr->str_off += sz;
2503 	return 0;
2504 }
2505 
2506 /*
2507  * Append new BTF_KIND_FWD type with:
2508  *   - *name*, non-empty/non-NULL name;
2509  *   - *fwd_kind*, kind of forward declaration, one of BTF_FWD_STRUCT,
2510  *     BTF_FWD_UNION, or BTF_FWD_ENUM;
2511  * Returns:
2512  *   - >0, type ID of newly added BTF type;
2513  *   - <0, on error.
2514  */
2515 int btf__add_fwd(struct btf *btf, const char *name, enum btf_fwd_kind fwd_kind)
2516 {
2517 	if (!name || !name[0])
2518 		return libbpf_err(-EINVAL);
2519 
2520 	switch (fwd_kind) {
2521 	case BTF_FWD_STRUCT:
2522 	case BTF_FWD_UNION: {
2523 		struct btf_type *t;
2524 		int id;
2525 
2526 		id = btf_add_ref_kind(btf, BTF_KIND_FWD, name, 0, 0);
2527 		if (id <= 0)
2528 			return id;
2529 		t = btf_type_by_id(btf, id);
2530 		t->info = btf_type_info(BTF_KIND_FWD, 0, fwd_kind == BTF_FWD_UNION);
2531 		return id;
2532 	}
2533 	case BTF_FWD_ENUM:
2534 		/* enum forward in BTF currently is just an enum with no enum
2535 		 * values; we also assume a standard 4-byte size for it
2536 		 */
2537 		return btf__add_enum(btf, name, sizeof(int));
2538 	default:
2539 		return libbpf_err(-EINVAL);
2540 	}
2541 }
2542 
2543 /*
2544  * Append new BTF_KING_TYPEDEF type with:
2545  *   - *name*, non-empty/non-NULL name;
2546  *   - *ref_type_id* - referenced type ID, it might not exist yet;
2547  * Returns:
2548  *   - >0, type ID of newly added BTF type;
2549  *   - <0, on error.
2550  */
2551 int btf__add_typedef(struct btf *btf, const char *name, int ref_type_id)
2552 {
2553 	if (!name || !name[0])
2554 		return libbpf_err(-EINVAL);
2555 
2556 	return btf_add_ref_kind(btf, BTF_KIND_TYPEDEF, name, ref_type_id, 0);
2557 }
2558 
2559 /*
2560  * Append new BTF_KIND_VOLATILE type with:
2561  *   - *ref_type_id* - referenced type ID, it might not exist yet;
2562  * Returns:
2563  *   - >0, type ID of newly added BTF type;
2564  *   - <0, on error.
2565  */
2566 int btf__add_volatile(struct btf *btf, int ref_type_id)
2567 {
2568 	return btf_add_ref_kind(btf, BTF_KIND_VOLATILE, NULL, ref_type_id, 0);
2569 }
2570 
2571 /*
2572  * Append new BTF_KIND_CONST type with:
2573  *   - *ref_type_id* - referenced type ID, it might not exist yet;
2574  * Returns:
2575  *   - >0, type ID of newly added BTF type;
2576  *   - <0, on error.
2577  */
2578 int btf__add_const(struct btf *btf, int ref_type_id)
2579 {
2580 	return btf_add_ref_kind(btf, BTF_KIND_CONST, NULL, ref_type_id, 0);
2581 }
2582 
2583 /*
2584  * Append new BTF_KIND_RESTRICT type with:
2585  *   - *ref_type_id* - referenced type ID, it might not exist yet;
2586  * Returns:
2587  *   - >0, type ID of newly added BTF type;
2588  *   - <0, on error.
2589  */
2590 int btf__add_restrict(struct btf *btf, int ref_type_id)
2591 {
2592 	return btf_add_ref_kind(btf, BTF_KIND_RESTRICT, NULL, ref_type_id, 0);
2593 }
2594 
2595 /*
2596  * Append new BTF_KIND_TYPE_TAG type with:
2597  *   - *value*, non-empty/non-NULL tag value;
2598  *   - *ref_type_id* - referenced type ID, it might not exist yet;
2599  * Returns:
2600  *   - >0, type ID of newly added BTF type;
2601  *   - <0, on error.
2602  */
2603 int btf__add_type_tag(struct btf *btf, const char *value, int ref_type_id)
2604 {
2605 	if (!value || !value[0])
2606 		return libbpf_err(-EINVAL);
2607 
2608 	return btf_add_ref_kind(btf, BTF_KIND_TYPE_TAG, value, ref_type_id, 0);
2609 }
2610 
2611 /*
2612  * Append new BTF_KIND_TYPE_TAG type with:
2613  *   - *value*, non-empty/non-NULL tag value;
2614  *   - *ref_type_id* - referenced type ID, it might not exist yet;
2615  * Set info->kflag to 1, indicating this tag is an __attribute__
2616  * Returns:
2617  *   - >0, type ID of newly added BTF type;
2618  *   - <0, on error.
2619  */
2620 int btf__add_type_attr(struct btf *btf, const char *value, int ref_type_id)
2621 {
2622 	if (!value || !value[0])
2623 		return libbpf_err(-EINVAL);
2624 
2625 	return btf_add_ref_kind(btf, BTF_KIND_TYPE_TAG, value, ref_type_id, 1);
2626 }
2627 
2628 /*
2629  * Append new BTF_KIND_FUNC type with:
2630  *   - *name*, non-empty/non-NULL name;
2631  *   - *proto_type_id* - FUNC_PROTO's type ID, it might not exist yet;
2632  * Returns:
2633  *   - >0, type ID of newly added BTF type;
2634  *   - <0, on error.
2635  */
2636 int btf__add_func(struct btf *btf, const char *name,
2637 		  enum btf_func_linkage linkage, int proto_type_id)
2638 {
2639 	int id;
2640 
2641 	if (!name || !name[0])
2642 		return libbpf_err(-EINVAL);
2643 	if (linkage != BTF_FUNC_STATIC && linkage != BTF_FUNC_GLOBAL &&
2644 	    linkage != BTF_FUNC_EXTERN)
2645 		return libbpf_err(-EINVAL);
2646 
2647 	id = btf_add_ref_kind(btf, BTF_KIND_FUNC, name, proto_type_id, 0);
2648 	if (id > 0) {
2649 		struct btf_type *t = btf_type_by_id(btf, id);
2650 
2651 		t->info = btf_type_info(BTF_KIND_FUNC, linkage, 0);
2652 	}
2653 	return libbpf_err(id);
2654 }
2655 
2656 /*
2657  * Append new BTF_KIND_FUNC_PROTO with:
2658  *   - *ret_type_id* - type ID for return result of a function.
2659  *
2660  * Function prototype initially has no arguments, but they can be added by
2661  * btf__add_func_param() one by one, immediately after
2662  * btf__add_func_proto() succeeded.
2663  *
2664  * Returns:
2665  *   - >0, type ID of newly added BTF type;
2666  *   - <0, on error.
2667  */
2668 int btf__add_func_proto(struct btf *btf, int ret_type_id)
2669 {
2670 	struct btf_type *t;
2671 	int sz;
2672 
2673 	if (validate_type_id(ret_type_id))
2674 		return libbpf_err(-EINVAL);
2675 
2676 	if (btf_ensure_modifiable(btf))
2677 		return libbpf_err(-ENOMEM);
2678 
2679 	sz = sizeof(struct btf_type);
2680 	t = btf_add_type_mem(btf, sz);
2681 	if (!t)
2682 		return libbpf_err(-ENOMEM);
2683 
2684 	/* start out with vlen=0; this will be adjusted when adding enum
2685 	 * values, if necessary
2686 	 */
2687 	t->name_off = 0;
2688 	t->info = btf_type_info(BTF_KIND_FUNC_PROTO, 0, 0);
2689 	t->type = ret_type_id;
2690 
2691 	return btf_commit_type(btf, sz);
2692 }
2693 
2694 /*
2695  * Append new function parameter for current FUNC_PROTO type with:
2696  *   - *name* - parameter name, can be NULL or empty;
2697  *   - *type_id* - type ID describing the type of the parameter.
2698  * Returns:
2699  *   -  0, on success;
2700  *   - <0, on error.
2701  */
2702 int btf__add_func_param(struct btf *btf, const char *name, int type_id)
2703 {
2704 	struct btf_type *t;
2705 	struct btf_param *p;
2706 	int sz, name_off = 0;
2707 
2708 	if (validate_type_id(type_id))
2709 		return libbpf_err(-EINVAL);
2710 
2711 	/* last type should be BTF_KIND_FUNC_PROTO */
2712 	if (btf->nr_types == 0)
2713 		return libbpf_err(-EINVAL);
2714 	t = btf_last_type(btf);
2715 	if (!btf_is_func_proto(t))
2716 		return libbpf_err(-EINVAL);
2717 
2718 	/* decompose and invalidate raw data */
2719 	if (btf_ensure_modifiable(btf))
2720 		return libbpf_err(-ENOMEM);
2721 
2722 	sz = sizeof(struct btf_param);
2723 	p = btf_add_type_mem(btf, sz);
2724 	if (!p)
2725 		return libbpf_err(-ENOMEM);
2726 
2727 	if (name && name[0]) {
2728 		name_off = btf__add_str(btf, name);
2729 		if (name_off < 0)
2730 			return name_off;
2731 	}
2732 
2733 	p->name_off = name_off;
2734 	p->type = type_id;
2735 
2736 	/* update parent type's vlen */
2737 	t = btf_last_type(btf);
2738 	btf_type_inc_vlen(t);
2739 
2740 	btf->hdr->type_len += sz;
2741 	btf->hdr->str_off += sz;
2742 	return 0;
2743 }
2744 
2745 /*
2746  * Append new BTF_KIND_VAR type with:
2747  *   - *name* - non-empty/non-NULL name;
2748  *   - *linkage* - variable linkage, one of BTF_VAR_STATIC,
2749  *     BTF_VAR_GLOBAL_ALLOCATED, or BTF_VAR_GLOBAL_EXTERN;
2750  *   - *type_id* - type ID of the type describing the type of the variable.
2751  * Returns:
2752  *   - >0, type ID of newly added BTF type;
2753  *   - <0, on error.
2754  */
2755 int btf__add_var(struct btf *btf, const char *name, int linkage, int type_id)
2756 {
2757 	struct btf_type *t;
2758 	struct btf_var *v;
2759 	int sz, name_off;
2760 
2761 	/* non-empty name */
2762 	if (!name || !name[0])
2763 		return libbpf_err(-EINVAL);
2764 	if (linkage != BTF_VAR_STATIC && linkage != BTF_VAR_GLOBAL_ALLOCATED &&
2765 	    linkage != BTF_VAR_GLOBAL_EXTERN)
2766 		return libbpf_err(-EINVAL);
2767 	if (validate_type_id(type_id))
2768 		return libbpf_err(-EINVAL);
2769 
2770 	/* deconstruct BTF, if necessary, and invalidate raw_data */
2771 	if (btf_ensure_modifiable(btf))
2772 		return libbpf_err(-ENOMEM);
2773 
2774 	sz = sizeof(struct btf_type) + sizeof(struct btf_var);
2775 	t = btf_add_type_mem(btf, sz);
2776 	if (!t)
2777 		return libbpf_err(-ENOMEM);
2778 
2779 	name_off = btf__add_str(btf, name);
2780 	if (name_off < 0)
2781 		return name_off;
2782 
2783 	t->name_off = name_off;
2784 	t->info = btf_type_info(BTF_KIND_VAR, 0, 0);
2785 	t->type = type_id;
2786 
2787 	v = btf_var(t);
2788 	v->linkage = linkage;
2789 
2790 	return btf_commit_type(btf, sz);
2791 }
2792 
2793 /*
2794  * Append new BTF_KIND_DATASEC type with:
2795  *   - *name* - non-empty/non-NULL name;
2796  *   - *byte_sz* - data section size, in bytes.
2797  *
2798  * Data section is initially empty. Variables info can be added with
2799  * btf__add_datasec_var_info() calls, after btf__add_datasec() succeeds.
2800  *
2801  * Returns:
2802  *   - >0, type ID of newly added BTF type;
2803  *   - <0, on error.
2804  */
2805 int btf__add_datasec(struct btf *btf, const char *name, __u32 byte_sz)
2806 {
2807 	struct btf_type *t;
2808 	int sz, name_off;
2809 
2810 	/* non-empty name */
2811 	if (!name || !name[0])
2812 		return libbpf_err(-EINVAL);
2813 
2814 	if (btf_ensure_modifiable(btf))
2815 		return libbpf_err(-ENOMEM);
2816 
2817 	sz = sizeof(struct btf_type);
2818 	t = btf_add_type_mem(btf, sz);
2819 	if (!t)
2820 		return libbpf_err(-ENOMEM);
2821 
2822 	name_off = btf__add_str(btf, name);
2823 	if (name_off < 0)
2824 		return name_off;
2825 
2826 	/* start with vlen=0, which will be update as var_secinfos are added */
2827 	t->name_off = name_off;
2828 	t->info = btf_type_info(BTF_KIND_DATASEC, 0, 0);
2829 	t->size = byte_sz;
2830 
2831 	return btf_commit_type(btf, sz);
2832 }
2833 
2834 /*
2835  * Append new data section variable information entry for current DATASEC type:
2836  *   - *var_type_id* - type ID, describing type of the variable;
2837  *   - *offset* - variable offset within data section, in bytes;
2838  *   - *byte_sz* - variable size, in bytes.
2839  *
2840  * Returns:
2841  *   -  0, on success;
2842  *   - <0, on error.
2843  */
2844 int btf__add_datasec_var_info(struct btf *btf, int var_type_id, __u32 offset, __u32 byte_sz)
2845 {
2846 	struct btf_type *t;
2847 	struct btf_var_secinfo *v;
2848 	int sz;
2849 
2850 	/* last type should be BTF_KIND_DATASEC */
2851 	if (btf->nr_types == 0)
2852 		return libbpf_err(-EINVAL);
2853 	t = btf_last_type(btf);
2854 	if (!btf_is_datasec(t))
2855 		return libbpf_err(-EINVAL);
2856 
2857 	if (validate_type_id(var_type_id))
2858 		return libbpf_err(-EINVAL);
2859 
2860 	/* decompose and invalidate raw data */
2861 	if (btf_ensure_modifiable(btf))
2862 		return libbpf_err(-ENOMEM);
2863 
2864 	sz = sizeof(struct btf_var_secinfo);
2865 	v = btf_add_type_mem(btf, sz);
2866 	if (!v)
2867 		return libbpf_err(-ENOMEM);
2868 
2869 	v->type = var_type_id;
2870 	v->offset = offset;
2871 	v->size = byte_sz;
2872 
2873 	/* update parent type's vlen */
2874 	t = btf_last_type(btf);
2875 	btf_type_inc_vlen(t);
2876 
2877 	btf->hdr->type_len += sz;
2878 	btf->hdr->str_off += sz;
2879 	return 0;
2880 }
2881 
2882 static int btf_add_decl_tag(struct btf *btf, const char *value, int ref_type_id,
2883 			    int component_idx, int kflag)
2884 {
2885 	struct btf_type *t;
2886 	int sz, value_off;
2887 
2888 	if (!value || !value[0] || component_idx < -1)
2889 		return libbpf_err(-EINVAL);
2890 
2891 	if (validate_type_id(ref_type_id))
2892 		return libbpf_err(-EINVAL);
2893 
2894 	if (btf_ensure_modifiable(btf))
2895 		return libbpf_err(-ENOMEM);
2896 
2897 	sz = sizeof(struct btf_type) + sizeof(struct btf_decl_tag);
2898 	t = btf_add_type_mem(btf, sz);
2899 	if (!t)
2900 		return libbpf_err(-ENOMEM);
2901 
2902 	value_off = btf__add_str(btf, value);
2903 	if (value_off < 0)
2904 		return value_off;
2905 
2906 	t->name_off = value_off;
2907 	t->info = btf_type_info(BTF_KIND_DECL_TAG, 0, kflag);
2908 	t->type = ref_type_id;
2909 	btf_decl_tag(t)->component_idx = component_idx;
2910 
2911 	return btf_commit_type(btf, sz);
2912 }
2913 
2914 /*
2915  * Append new BTF_KIND_DECL_TAG type with:
2916  *   - *value* - non-empty/non-NULL string;
2917  *   - *ref_type_id* - referenced type ID, it might not exist yet;
2918  *   - *component_idx* - -1 for tagging reference type, otherwise struct/union
2919  *     member or function argument index;
2920  * Returns:
2921  *   - >0, type ID of newly added BTF type;
2922  *   - <0, on error.
2923  */
2924 int btf__add_decl_tag(struct btf *btf, const char *value, int ref_type_id,
2925 		      int component_idx)
2926 {
2927 	return btf_add_decl_tag(btf, value, ref_type_id, component_idx, 0);
2928 }
2929 
2930 /*
2931  * Append new BTF_KIND_DECL_TAG type with:
2932  *   - *value* - non-empty/non-NULL string;
2933  *   - *ref_type_id* - referenced type ID, it might not exist yet;
2934  *   - *component_idx* - -1 for tagging reference type, otherwise struct/union
2935  *     member or function argument index;
2936  * Set info->kflag to 1, indicating this tag is an __attribute__
2937  * Returns:
2938  *   - >0, type ID of newly added BTF type;
2939  *   - <0, on error.
2940  */
2941 int btf__add_decl_attr(struct btf *btf, const char *value, int ref_type_id,
2942 		       int component_idx)
2943 {
2944 	return btf_add_decl_tag(btf, value, ref_type_id, component_idx, 1);
2945 }
2946 
2947 struct btf_ext_sec_info_param {
2948 	__u32 off;
2949 	__u32 len;
2950 	__u32 min_rec_size;
2951 	struct btf_ext_info *ext_info;
2952 	const char *desc;
2953 };
2954 
2955 /*
2956  * Parse a single info subsection of the BTF.ext info data:
2957  *  - validate subsection structure and elements
2958  *  - save info subsection start and sizing details in struct btf_ext
2959  *  - endian-independent operation, for calling before byte-swapping
2960  */
2961 static int btf_ext_parse_sec_info(struct btf_ext *btf_ext,
2962 				  struct btf_ext_sec_info_param *ext_sec,
2963 				  bool is_native)
2964 {
2965 	const struct btf_ext_info_sec *sinfo;
2966 	struct btf_ext_info *ext_info;
2967 	__u32 info_left, record_size;
2968 	size_t sec_cnt = 0;
2969 	void *info;
2970 
2971 	if (ext_sec->len == 0)
2972 		return 0;
2973 
2974 	if (ext_sec->off & 0x03) {
2975 		pr_debug(".BTF.ext %s section is not aligned to 4 bytes\n",
2976 		     ext_sec->desc);
2977 		return -EINVAL;
2978 	}
2979 
2980 	/* The start of the info sec (including the __u32 record_size). */
2981 	info = btf_ext->data + btf_ext->hdr->hdr_len + ext_sec->off;
2982 	info_left = ext_sec->len;
2983 
2984 	if (btf_ext->data + btf_ext->data_size < info + ext_sec->len) {
2985 		pr_debug("%s section (off:%u len:%u) is beyond the end of the ELF section .BTF.ext\n",
2986 			 ext_sec->desc, ext_sec->off, ext_sec->len);
2987 		return -EINVAL;
2988 	}
2989 
2990 	/* At least a record size */
2991 	if (info_left < sizeof(__u32)) {
2992 		pr_debug(".BTF.ext %s record size not found\n", ext_sec->desc);
2993 		return -EINVAL;
2994 	}
2995 
2996 	/* The record size needs to meet either the minimum standard or, when
2997 	 * handling non-native endianness data, the exact standard so as
2998 	 * to allow safe byte-swapping.
2999 	 */
3000 	record_size = is_native ? *(__u32 *)info : bswap_32(*(__u32 *)info);
3001 	if (record_size < ext_sec->min_rec_size ||
3002 	    (!is_native && record_size != ext_sec->min_rec_size) ||
3003 	    record_size & 0x03) {
3004 		pr_debug("%s section in .BTF.ext has invalid record size %u\n",
3005 			 ext_sec->desc, record_size);
3006 		return -EINVAL;
3007 	}
3008 
3009 	sinfo = info + sizeof(__u32);
3010 	info_left -= sizeof(__u32);
3011 
3012 	/* If no records, return failure now so .BTF.ext won't be used. */
3013 	if (!info_left) {
3014 		pr_debug("%s section in .BTF.ext has no records\n", ext_sec->desc);
3015 		return -EINVAL;
3016 	}
3017 
3018 	while (info_left) {
3019 		unsigned int sec_hdrlen = sizeof(struct btf_ext_info_sec);
3020 		__u64 total_record_size;
3021 		__u32 num_records;
3022 
3023 		if (info_left < sec_hdrlen) {
3024 			pr_debug("%s section header is not found in .BTF.ext\n",
3025 			     ext_sec->desc);
3026 			return -EINVAL;
3027 		}
3028 
3029 		num_records = is_native ? sinfo->num_info : bswap_32(sinfo->num_info);
3030 		if (num_records == 0) {
3031 			pr_debug("%s section has incorrect num_records in .BTF.ext\n",
3032 			     ext_sec->desc);
3033 			return -EINVAL;
3034 		}
3035 
3036 		total_record_size = sec_hdrlen + (__u64)num_records * record_size;
3037 		if (info_left < total_record_size) {
3038 			pr_debug("%s section has incorrect num_records in .BTF.ext\n",
3039 			     ext_sec->desc);
3040 			return -EINVAL;
3041 		}
3042 
3043 		info_left -= total_record_size;
3044 		sinfo = (void *)sinfo + total_record_size;
3045 		sec_cnt++;
3046 	}
3047 
3048 	ext_info = ext_sec->ext_info;
3049 	ext_info->len = ext_sec->len - sizeof(__u32);
3050 	ext_info->rec_size = record_size;
3051 	ext_info->info = info + sizeof(__u32);
3052 	ext_info->sec_cnt = sec_cnt;
3053 
3054 	return 0;
3055 }
3056 
3057 /* Parse all info secs in the BTF.ext info data */
3058 static int btf_ext_parse_info(struct btf_ext *btf_ext, bool is_native)
3059 {
3060 	struct btf_ext_sec_info_param func_info = {
3061 		.off = btf_ext->hdr->func_info_off,
3062 		.len = btf_ext->hdr->func_info_len,
3063 		.min_rec_size = sizeof(struct bpf_func_info_min),
3064 		.ext_info = &btf_ext->func_info,
3065 		.desc = "func_info"
3066 	};
3067 	struct btf_ext_sec_info_param line_info = {
3068 		.off = btf_ext->hdr->line_info_off,
3069 		.len = btf_ext->hdr->line_info_len,
3070 		.min_rec_size = sizeof(struct bpf_line_info_min),
3071 		.ext_info = &btf_ext->line_info,
3072 		.desc = "line_info",
3073 	};
3074 	struct btf_ext_sec_info_param core_relo = {
3075 		.min_rec_size = sizeof(struct bpf_core_relo),
3076 		.ext_info = &btf_ext->core_relo_info,
3077 		.desc = "core_relo",
3078 	};
3079 	int err;
3080 
3081 	err = btf_ext_parse_sec_info(btf_ext, &func_info, is_native);
3082 	if (err)
3083 		return err;
3084 
3085 	err = btf_ext_parse_sec_info(btf_ext, &line_info, is_native);
3086 	if (err)
3087 		return err;
3088 
3089 	if (btf_ext->hdr->hdr_len < offsetofend(struct btf_ext_header, core_relo_len))
3090 		return 0; /* skip core relos parsing */
3091 
3092 	core_relo.off = btf_ext->hdr->core_relo_off;
3093 	core_relo.len = btf_ext->hdr->core_relo_len;
3094 	err = btf_ext_parse_sec_info(btf_ext, &core_relo, is_native);
3095 	if (err)
3096 		return err;
3097 
3098 	return 0;
3099 }
3100 
3101 /* Swap byte-order of BTF.ext header with any endianness */
3102 static void btf_ext_bswap_hdr(struct btf_ext_header *h)
3103 {
3104 	bool is_native = h->magic == BTF_MAGIC;
3105 	__u32 hdr_len;
3106 
3107 	hdr_len = is_native ? h->hdr_len : bswap_32(h->hdr_len);
3108 
3109 	h->magic = bswap_16(h->magic);
3110 	h->hdr_len = bswap_32(h->hdr_len);
3111 	h->func_info_off = bswap_32(h->func_info_off);
3112 	h->func_info_len = bswap_32(h->func_info_len);
3113 	h->line_info_off = bswap_32(h->line_info_off);
3114 	h->line_info_len = bswap_32(h->line_info_len);
3115 
3116 	if (hdr_len < offsetofend(struct btf_ext_header, core_relo_len))
3117 		return;
3118 
3119 	h->core_relo_off = bswap_32(h->core_relo_off);
3120 	h->core_relo_len = bswap_32(h->core_relo_len);
3121 }
3122 
3123 /* Swap byte-order of generic info subsection */
3124 static void btf_ext_bswap_info_sec(void *info, __u32 len, bool is_native,
3125 				   info_rec_bswap_fn bswap_fn)
3126 {
3127 	struct btf_ext_info_sec *sec;
3128 	__u32 info_left, rec_size, *rs;
3129 
3130 	if (len == 0)
3131 		return;
3132 
3133 	rs = info;				/* info record size */
3134 	rec_size = is_native ? *rs : bswap_32(*rs);
3135 	*rs = bswap_32(*rs);
3136 
3137 	sec = info + sizeof(__u32);		/* info sec #1 */
3138 	info_left = len - sizeof(__u32);
3139 	while (info_left) {
3140 		unsigned int sec_hdrlen = sizeof(struct btf_ext_info_sec);
3141 		__u32 i, num_recs;
3142 		void *p;
3143 
3144 		num_recs = is_native ? sec->num_info : bswap_32(sec->num_info);
3145 		sec->sec_name_off = bswap_32(sec->sec_name_off);
3146 		sec->num_info = bswap_32(sec->num_info);
3147 		p = sec->data;			/* info rec #1 */
3148 		for (i = 0; i < num_recs; i++, p += rec_size)
3149 			bswap_fn(p);
3150 		sec = p;
3151 		info_left -= sec_hdrlen + (__u64)rec_size * num_recs;
3152 	}
3153 }
3154 
3155 /*
3156  * Swap byte-order of all info data in a BTF.ext section
3157  *  - requires BTF.ext hdr in native endianness
3158  */
3159 static void btf_ext_bswap_info(struct btf_ext *btf_ext, void *data)
3160 {
3161 	const bool is_native = btf_ext->swapped_endian;
3162 	const struct btf_ext_header *h = data;
3163 	void *info;
3164 
3165 	/* Swap func_info subsection byte-order */
3166 	info = data + h->hdr_len + h->func_info_off;
3167 	btf_ext_bswap_info_sec(info, h->func_info_len, is_native,
3168 			       (info_rec_bswap_fn)bpf_func_info_bswap);
3169 
3170 	/* Swap line_info subsection byte-order */
3171 	info = data + h->hdr_len + h->line_info_off;
3172 	btf_ext_bswap_info_sec(info, h->line_info_len, is_native,
3173 			       (info_rec_bswap_fn)bpf_line_info_bswap);
3174 
3175 	/* Swap core_relo subsection byte-order (if present) */
3176 	if (h->hdr_len < offsetofend(struct btf_ext_header, core_relo_len))
3177 		return;
3178 
3179 	info = data + h->hdr_len + h->core_relo_off;
3180 	btf_ext_bswap_info_sec(info, h->core_relo_len, is_native,
3181 			       (info_rec_bswap_fn)bpf_core_relo_bswap);
3182 }
3183 
3184 /* Parse hdr data and info sections: check and convert to native endianness */
3185 static int btf_ext_parse(struct btf_ext *btf_ext)
3186 {
3187 	__u32 hdr_len, data_size = btf_ext->data_size;
3188 	struct btf_ext_header *hdr = btf_ext->hdr;
3189 	bool swapped_endian = false;
3190 	int err;
3191 
3192 	if (data_size < offsetofend(struct btf_ext_header, hdr_len)) {
3193 		pr_debug("BTF.ext header too short\n");
3194 		return -EINVAL;
3195 	}
3196 
3197 	hdr_len = hdr->hdr_len;
3198 	if (hdr->magic == bswap_16(BTF_MAGIC)) {
3199 		swapped_endian = true;
3200 		hdr_len = bswap_32(hdr_len);
3201 	} else if (hdr->magic != BTF_MAGIC) {
3202 		pr_debug("Invalid BTF.ext magic:%x\n", hdr->magic);
3203 		return -EINVAL;
3204 	}
3205 
3206 	/* Ensure known version of structs, current BTF_VERSION == 1 */
3207 	if (hdr->version != 1) {
3208 		pr_debug("Unsupported BTF.ext version:%u\n", hdr->version);
3209 		return -ENOTSUP;
3210 	}
3211 
3212 	if (hdr->flags) {
3213 		pr_debug("Unsupported BTF.ext flags:%x\n", hdr->flags);
3214 		return -ENOTSUP;
3215 	}
3216 
3217 	if (data_size < hdr_len) {
3218 		pr_debug("BTF.ext header not found\n");
3219 		return -EINVAL;
3220 	} else if (data_size == hdr_len) {
3221 		pr_debug("BTF.ext has no data\n");
3222 		return -EINVAL;
3223 	}
3224 
3225 	/* Verify mandatory hdr info details present */
3226 	if (hdr_len < offsetofend(struct btf_ext_header, line_info_len)) {
3227 		pr_warn("BTF.ext header missing func_info, line_info\n");
3228 		return -EINVAL;
3229 	}
3230 
3231 	/* Keep hdr native byte-order in memory for introspection */
3232 	if (swapped_endian)
3233 		btf_ext_bswap_hdr(btf_ext->hdr);
3234 
3235 	/* Validate info subsections and cache key metadata */
3236 	err = btf_ext_parse_info(btf_ext, !swapped_endian);
3237 	if (err)
3238 		return err;
3239 
3240 	/* Keep infos native byte-order in memory for introspection */
3241 	if (swapped_endian)
3242 		btf_ext_bswap_info(btf_ext, btf_ext->data);
3243 
3244 	/*
3245 	 * Set btf_ext->swapped_endian only after all header and info data has
3246 	 * been swapped, helping bswap functions determine if their data are
3247 	 * in native byte-order when called.
3248 	 */
3249 	btf_ext->swapped_endian = swapped_endian;
3250 	return 0;
3251 }
3252 
3253 void btf_ext__free(struct btf_ext *btf_ext)
3254 {
3255 	if (IS_ERR_OR_NULL(btf_ext))
3256 		return;
3257 	free(btf_ext->func_info.sec_idxs);
3258 	free(btf_ext->line_info.sec_idxs);
3259 	free(btf_ext->core_relo_info.sec_idxs);
3260 	free(btf_ext->data);
3261 	free(btf_ext->data_swapped);
3262 	free(btf_ext);
3263 }
3264 
3265 struct btf_ext *btf_ext__new(const __u8 *data, __u32 size)
3266 {
3267 	struct btf_ext *btf_ext;
3268 	int err;
3269 
3270 	btf_ext = calloc(1, sizeof(struct btf_ext));
3271 	if (!btf_ext)
3272 		return libbpf_err_ptr(-ENOMEM);
3273 
3274 	btf_ext->data_size = size;
3275 	btf_ext->data = malloc(size);
3276 	if (!btf_ext->data) {
3277 		err = -ENOMEM;
3278 		goto done;
3279 	}
3280 	memcpy(btf_ext->data, data, size);
3281 
3282 	err = btf_ext_parse(btf_ext);
3283 
3284 done:
3285 	if (err) {
3286 		btf_ext__free(btf_ext);
3287 		return libbpf_err_ptr(err);
3288 	}
3289 
3290 	return btf_ext;
3291 }
3292 
3293 static void *btf_ext_raw_data(const struct btf_ext *btf_ext_ro, bool swap_endian)
3294 {
3295 	struct btf_ext *btf_ext = (struct btf_ext *)btf_ext_ro;
3296 	const __u32 data_sz = btf_ext->data_size;
3297 	void *data;
3298 
3299 	/* Return native data (always present) or swapped data if present */
3300 	if (!swap_endian)
3301 		return btf_ext->data;
3302 	else if (btf_ext->data_swapped)
3303 		return btf_ext->data_swapped;
3304 
3305 	/* Recreate missing swapped data, then cache and return */
3306 	data = calloc(1, data_sz);
3307 	if (!data)
3308 		return NULL;
3309 	memcpy(data, btf_ext->data, data_sz);
3310 
3311 	btf_ext_bswap_info(btf_ext, data);
3312 	btf_ext_bswap_hdr(data);
3313 	btf_ext->data_swapped = data;
3314 	return data;
3315 }
3316 
3317 const void *btf_ext__raw_data(const struct btf_ext *btf_ext, __u32 *size)
3318 {
3319 	void *data;
3320 
3321 	data = btf_ext_raw_data(btf_ext, btf_ext->swapped_endian);
3322 	if (!data)
3323 		return errno = ENOMEM, NULL;
3324 
3325 	*size = btf_ext->data_size;
3326 	return data;
3327 }
3328 
3329 __attribute__((alias("btf_ext__raw_data")))
3330 const void *btf_ext__get_raw_data(const struct btf_ext *btf_ext, __u32 *size);
3331 
3332 enum btf_endianness btf_ext__endianness(const struct btf_ext *btf_ext)
3333 {
3334 	if (is_host_big_endian())
3335 		return btf_ext->swapped_endian ? BTF_LITTLE_ENDIAN : BTF_BIG_ENDIAN;
3336 	else
3337 		return btf_ext->swapped_endian ? BTF_BIG_ENDIAN : BTF_LITTLE_ENDIAN;
3338 }
3339 
3340 int btf_ext__set_endianness(struct btf_ext *btf_ext, enum btf_endianness endian)
3341 {
3342 	if (endian != BTF_LITTLE_ENDIAN && endian != BTF_BIG_ENDIAN)
3343 		return libbpf_err(-EINVAL);
3344 
3345 	btf_ext->swapped_endian = is_host_big_endian() != (endian == BTF_BIG_ENDIAN);
3346 
3347 	if (!btf_ext->swapped_endian) {
3348 		free(btf_ext->data_swapped);
3349 		btf_ext->data_swapped = NULL;
3350 	}
3351 	return 0;
3352 }
3353 
3354 struct btf_dedup;
3355 
3356 static struct btf_dedup *btf_dedup_new(struct btf *btf, const struct btf_dedup_opts *opts);
3357 static void btf_dedup_free(struct btf_dedup *d);
3358 static int btf_dedup_prep(struct btf_dedup *d);
3359 static int btf_dedup_strings(struct btf_dedup *d);
3360 static int btf_dedup_prim_types(struct btf_dedup *d);
3361 static int btf_dedup_struct_types(struct btf_dedup *d);
3362 static int btf_dedup_ref_types(struct btf_dedup *d);
3363 static int btf_dedup_resolve_fwds(struct btf_dedup *d);
3364 static int btf_dedup_compact_types(struct btf_dedup *d);
3365 static int btf_dedup_remap_types(struct btf_dedup *d);
3366 
3367 /*
3368  * Deduplicate BTF types and strings.
3369  *
3370  * BTF dedup algorithm takes as an input `struct btf` representing `.BTF` ELF
3371  * section with all BTF type descriptors and string data. It overwrites that
3372  * memory in-place with deduplicated types and strings without any loss of
3373  * information. If optional `struct btf_ext` representing '.BTF.ext' ELF section
3374  * is provided, all the strings referenced from .BTF.ext section are honored
3375  * and updated to point to the right offsets after deduplication.
3376  *
3377  * If function returns with error, type/string data might be garbled and should
3378  * be discarded.
3379  *
3380  * More verbose and detailed description of both problem btf_dedup is solving,
3381  * as well as solution could be found at:
3382  * https://facebookmicrosites.github.io/bpf/blog/2018/11/14/btf-enhancement.html
3383  *
3384  * Problem description and justification
3385  * =====================================
3386  *
3387  * BTF type information is typically emitted either as a result of conversion
3388  * from DWARF to BTF or directly by compiler. In both cases, each compilation
3389  * unit contains information about a subset of all the types that are used
3390  * in an application. These subsets are frequently overlapping and contain a lot
3391  * of duplicated information when later concatenated together into a single
3392  * binary. This algorithm ensures that each unique type is represented by single
3393  * BTF type descriptor, greatly reducing resulting size of BTF data.
3394  *
3395  * Compilation unit isolation and subsequent duplication of data is not the only
3396  * problem. The same type hierarchy (e.g., struct and all the type that struct
3397  * references) in different compilation units can be represented in BTF to
3398  * various degrees of completeness (or, rather, incompleteness) due to
3399  * struct/union forward declarations.
3400  *
3401  * Let's take a look at an example, that we'll use to better understand the
3402  * problem (and solution). Suppose we have two compilation units, each using
3403  * same `struct S`, but each of them having incomplete type information about
3404  * struct's fields:
3405  *
3406  * // CU #1:
3407  * struct S;
3408  * struct A {
3409  *	int a;
3410  *	struct A* self;
3411  *	struct S* parent;
3412  * };
3413  * struct B;
3414  * struct S {
3415  *	struct A* a_ptr;
3416  *	struct B* b_ptr;
3417  * };
3418  *
3419  * // CU #2:
3420  * struct S;
3421  * struct A;
3422  * struct B {
3423  *	int b;
3424  *	struct B* self;
3425  *	struct S* parent;
3426  * };
3427  * struct S {
3428  *	struct A* a_ptr;
3429  *	struct B* b_ptr;
3430  * };
3431  *
3432  * In case of CU #1, BTF data will know only that `struct B` exist (but no
3433  * more), but will know the complete type information about `struct A`. While
3434  * for CU #2, it will know full type information about `struct B`, but will
3435  * only know about forward declaration of `struct A` (in BTF terms, it will
3436  * have `BTF_KIND_FWD` type descriptor with name `B`).
3437  *
3438  * This compilation unit isolation means that it's possible that there is no
3439  * single CU with complete type information describing structs `S`, `A`, and
3440  * `B`. Also, we might get tons of duplicated and redundant type information.
3441  *
3442  * Additional complication we need to keep in mind comes from the fact that
3443  * types, in general, can form graphs containing cycles, not just DAGs.
3444  *
3445  * While algorithm does deduplication, it also merges and resolves type
3446  * information (unless disabled throught `struct btf_opts`), whenever possible.
3447  * E.g., in the example above with two compilation units having partial type
3448  * information for structs `A` and `B`, the output of algorithm will emit
3449  * a single copy of each BTF type that describes structs `A`, `B`, and `S`
3450  * (as well as type information for `int` and pointers), as if they were defined
3451  * in a single compilation unit as:
3452  *
3453  * struct A {
3454  *	int a;
3455  *	struct A* self;
3456  *	struct S* parent;
3457  * };
3458  * struct B {
3459  *	int b;
3460  *	struct B* self;
3461  *	struct S* parent;
3462  * };
3463  * struct S {
3464  *	struct A* a_ptr;
3465  *	struct B* b_ptr;
3466  * };
3467  *
3468  * Algorithm summary
3469  * =================
3470  *
3471  * Algorithm completes its work in 7 separate passes:
3472  *
3473  * 1. Strings deduplication.
3474  * 2. Primitive types deduplication (int, enum, fwd).
3475  * 3. Struct/union types deduplication.
3476  * 4. Resolve unambiguous forward declarations.
3477  * 5. Reference types deduplication (pointers, typedefs, arrays, funcs, func
3478  *    protos, and const/volatile/restrict modifiers).
3479  * 6. Types compaction.
3480  * 7. Types remapping.
3481  *
3482  * Algorithm determines canonical type descriptor, which is a single
3483  * representative type for each truly unique type. This canonical type is the
3484  * one that will go into final deduplicated BTF type information. For
3485  * struct/unions, it is also the type that algorithm will merge additional type
3486  * information into (while resolving FWDs), as it discovers it from data in
3487  * other CUs. Each input BTF type eventually gets either mapped to itself, if
3488  * that type is canonical, or to some other type, if that type is equivalent
3489  * and was chosen as canonical representative. This mapping is stored in
3490  * `btf_dedup->map` array. This map is also used to record STRUCT/UNION that
3491  * FWD type got resolved to.
3492  *
3493  * To facilitate fast discovery of canonical types, we also maintain canonical
3494  * index (`btf_dedup->dedup_table`), which maps type descriptor's signature hash
3495  * (i.e., hashed kind, name, size, fields, etc) into a list of canonical types
3496  * that match that signature. With sufficiently good choice of type signature
3497  * hashing function, we can limit number of canonical types for each unique type
3498  * signature to a very small number, allowing to find canonical type for any
3499  * duplicated type very quickly.
3500  *
3501  * Struct/union deduplication is the most critical part and algorithm for
3502  * deduplicating structs/unions is described in greater details in comments for
3503  * `btf_dedup_is_equiv` function.
3504  */
3505 int btf__dedup(struct btf *btf, const struct btf_dedup_opts *opts)
3506 {
3507 	struct btf_dedup *d;
3508 	int err;
3509 
3510 	if (!OPTS_VALID(opts, btf_dedup_opts))
3511 		return libbpf_err(-EINVAL);
3512 
3513 	d = btf_dedup_new(btf, opts);
3514 	if (IS_ERR(d)) {
3515 		pr_debug("btf_dedup_new failed: %ld\n", PTR_ERR(d));
3516 		return libbpf_err(-EINVAL);
3517 	}
3518 
3519 	if (btf_ensure_modifiable(btf)) {
3520 		err = -ENOMEM;
3521 		goto done;
3522 	}
3523 
3524 	err = btf_dedup_prep(d);
3525 	if (err) {
3526 		pr_debug("btf_dedup_prep failed: %s\n", errstr(err));
3527 		goto done;
3528 	}
3529 	err = btf_dedup_strings(d);
3530 	if (err < 0) {
3531 		pr_debug("btf_dedup_strings failed: %s\n", errstr(err));
3532 		goto done;
3533 	}
3534 	err = btf_dedup_prim_types(d);
3535 	if (err < 0) {
3536 		pr_debug("btf_dedup_prim_types failed: %s\n", errstr(err));
3537 		goto done;
3538 	}
3539 	err = btf_dedup_struct_types(d);
3540 	if (err < 0) {
3541 		pr_debug("btf_dedup_struct_types failed: %s\n", errstr(err));
3542 		goto done;
3543 	}
3544 	err = btf_dedup_resolve_fwds(d);
3545 	if (err < 0) {
3546 		pr_debug("btf_dedup_resolve_fwds failed: %s\n", errstr(err));
3547 		goto done;
3548 	}
3549 	err = btf_dedup_ref_types(d);
3550 	if (err < 0) {
3551 		pr_debug("btf_dedup_ref_types failed: %s\n", errstr(err));
3552 		goto done;
3553 	}
3554 	err = btf_dedup_compact_types(d);
3555 	if (err < 0) {
3556 		pr_debug("btf_dedup_compact_types failed: %s\n", errstr(err));
3557 		goto done;
3558 	}
3559 	err = btf_dedup_remap_types(d);
3560 	if (err < 0) {
3561 		pr_debug("btf_dedup_remap_types failed: %s\n", errstr(err));
3562 		goto done;
3563 	}
3564 
3565 done:
3566 	btf_dedup_free(d);
3567 	return libbpf_err(err);
3568 }
3569 
3570 #define BTF_UNPROCESSED_ID ((__u32)-1)
3571 #define BTF_IN_PROGRESS_ID ((__u32)-2)
3572 
3573 struct btf_dedup {
3574 	/* .BTF section to be deduped in-place */
3575 	struct btf *btf;
3576 	/*
3577 	 * Optional .BTF.ext section. When provided, any strings referenced
3578 	 * from it will be taken into account when deduping strings
3579 	 */
3580 	struct btf_ext *btf_ext;
3581 	/*
3582 	 * This is a map from any type's signature hash to a list of possible
3583 	 * canonical representative type candidates. Hash collisions are
3584 	 * ignored, so even types of various kinds can share same list of
3585 	 * candidates, which is fine because we rely on subsequent
3586 	 * btf_xxx_equal() checks to authoritatively verify type equality.
3587 	 */
3588 	struct hashmap *dedup_table;
3589 	/* Canonical types map */
3590 	__u32 *map;
3591 	/* Hypothetical mapping, used during type graph equivalence checks */
3592 	__u32 *hypot_map;
3593 	__u32 *hypot_list;
3594 	size_t hypot_cnt;
3595 	size_t hypot_cap;
3596 	/* Whether hypothetical mapping, if successful, would need to adjust
3597 	 * already canonicalized types (due to a new forward declaration to
3598 	 * concrete type resolution). In such case, during split BTF dedup
3599 	 * candidate type would still be considered as different, because base
3600 	 * BTF is considered to be immutable.
3601 	 */
3602 	bool hypot_adjust_canon;
3603 	/* Various option modifying behavior of algorithm */
3604 	struct btf_dedup_opts opts;
3605 	/* temporary strings deduplication state */
3606 	struct strset *strs_set;
3607 };
3608 
3609 static unsigned long hash_combine(unsigned long h, unsigned long value)
3610 {
3611 	return h * 31 + value;
3612 }
3613 
3614 #define for_each_dedup_cand(d, node, hash) \
3615 	hashmap__for_each_key_entry(d->dedup_table, node, hash)
3616 
3617 static int btf_dedup_table_add(struct btf_dedup *d, long hash, __u32 type_id)
3618 {
3619 	return hashmap__append(d->dedup_table, hash, type_id);
3620 }
3621 
3622 static int btf_dedup_hypot_map_add(struct btf_dedup *d,
3623 				   __u32 from_id, __u32 to_id)
3624 {
3625 	if (d->hypot_cnt == d->hypot_cap) {
3626 		__u32 *new_list;
3627 
3628 		d->hypot_cap += max((size_t)16, d->hypot_cap / 2);
3629 		new_list = libbpf_reallocarray(d->hypot_list, d->hypot_cap, sizeof(__u32));
3630 		if (!new_list)
3631 			return -ENOMEM;
3632 		d->hypot_list = new_list;
3633 	}
3634 	d->hypot_list[d->hypot_cnt++] = from_id;
3635 	d->hypot_map[from_id] = to_id;
3636 	return 0;
3637 }
3638 
3639 static void btf_dedup_clear_hypot_map(struct btf_dedup *d)
3640 {
3641 	int i;
3642 
3643 	for (i = 0; i < d->hypot_cnt; i++)
3644 		d->hypot_map[d->hypot_list[i]] = BTF_UNPROCESSED_ID;
3645 	d->hypot_cnt = 0;
3646 	d->hypot_adjust_canon = false;
3647 }
3648 
3649 static void btf_dedup_free(struct btf_dedup *d)
3650 {
3651 	hashmap__free(d->dedup_table);
3652 	d->dedup_table = NULL;
3653 
3654 	free(d->map);
3655 	d->map = NULL;
3656 
3657 	free(d->hypot_map);
3658 	d->hypot_map = NULL;
3659 
3660 	free(d->hypot_list);
3661 	d->hypot_list = NULL;
3662 
3663 	free(d);
3664 }
3665 
3666 static size_t btf_dedup_identity_hash_fn(long key, void *ctx)
3667 {
3668 	return key;
3669 }
3670 
3671 static size_t btf_dedup_collision_hash_fn(long key, void *ctx)
3672 {
3673 	return 0;
3674 }
3675 
3676 static bool btf_dedup_equal_fn(long k1, long k2, void *ctx)
3677 {
3678 	return k1 == k2;
3679 }
3680 
3681 static struct btf_dedup *btf_dedup_new(struct btf *btf, const struct btf_dedup_opts *opts)
3682 {
3683 	struct btf_dedup *d = calloc(1, sizeof(struct btf_dedup));
3684 	hashmap_hash_fn hash_fn = btf_dedup_identity_hash_fn;
3685 	int i, err = 0, type_cnt;
3686 
3687 	if (!d)
3688 		return ERR_PTR(-ENOMEM);
3689 
3690 	if (OPTS_GET(opts, force_collisions, false))
3691 		hash_fn = btf_dedup_collision_hash_fn;
3692 
3693 	d->btf = btf;
3694 	d->btf_ext = OPTS_GET(opts, btf_ext, NULL);
3695 
3696 	d->dedup_table = hashmap__new(hash_fn, btf_dedup_equal_fn, NULL);
3697 	if (IS_ERR(d->dedup_table)) {
3698 		err = PTR_ERR(d->dedup_table);
3699 		d->dedup_table = NULL;
3700 		goto done;
3701 	}
3702 
3703 	type_cnt = btf__type_cnt(btf);
3704 	d->map = malloc(sizeof(__u32) * type_cnt);
3705 	if (!d->map) {
3706 		err = -ENOMEM;
3707 		goto done;
3708 	}
3709 	/* special BTF "void" type is made canonical immediately */
3710 	d->map[0] = 0;
3711 	for (i = 1; i < type_cnt; i++) {
3712 		struct btf_type *t = btf_type_by_id(d->btf, i);
3713 
3714 		/* VAR and DATASEC are never deduped and are self-canonical */
3715 		if (btf_is_var(t) || btf_is_datasec(t))
3716 			d->map[i] = i;
3717 		else
3718 			d->map[i] = BTF_UNPROCESSED_ID;
3719 	}
3720 
3721 	d->hypot_map = malloc(sizeof(__u32) * type_cnt);
3722 	if (!d->hypot_map) {
3723 		err = -ENOMEM;
3724 		goto done;
3725 	}
3726 	for (i = 0; i < type_cnt; i++)
3727 		d->hypot_map[i] = BTF_UNPROCESSED_ID;
3728 
3729 done:
3730 	if (err) {
3731 		btf_dedup_free(d);
3732 		return ERR_PTR(err);
3733 	}
3734 
3735 	return d;
3736 }
3737 
3738 /*
3739  * Iterate over all possible places in .BTF and .BTF.ext that can reference
3740  * string and pass pointer to it to a provided callback `fn`.
3741  */
3742 static int btf_for_each_str_off(struct btf_dedup *d, str_off_visit_fn fn, void *ctx)
3743 {
3744 	int i, r;
3745 
3746 	for (i = 0; i < d->btf->nr_types; i++) {
3747 		struct btf_field_iter it;
3748 		struct btf_type *t = btf_type_by_id(d->btf, d->btf->start_id + i);
3749 		__u32 *str_off;
3750 
3751 		r = btf_field_iter_init(&it, t, BTF_FIELD_ITER_STRS);
3752 		if (r)
3753 			return r;
3754 
3755 		while ((str_off = btf_field_iter_next(&it))) {
3756 			r = fn(str_off, ctx);
3757 			if (r)
3758 				return r;
3759 		}
3760 	}
3761 
3762 	if (!d->btf_ext)
3763 		return 0;
3764 
3765 	r = btf_ext_visit_str_offs(d->btf_ext, fn, ctx);
3766 	if (r)
3767 		return r;
3768 
3769 	return 0;
3770 }
3771 
3772 static int strs_dedup_remap_str_off(__u32 *str_off_ptr, void *ctx)
3773 {
3774 	struct btf_dedup *d = ctx;
3775 	__u32 str_off = *str_off_ptr;
3776 	const char *s;
3777 	int off, err;
3778 
3779 	/* don't touch empty string or string in main BTF */
3780 	if (str_off == 0 || str_off < d->btf->start_str_off)
3781 		return 0;
3782 
3783 	s = btf__str_by_offset(d->btf, str_off);
3784 	if (d->btf->base_btf) {
3785 		err = btf__find_str(d->btf->base_btf, s);
3786 		if (err >= 0) {
3787 			*str_off_ptr = err;
3788 			return 0;
3789 		}
3790 		if (err != -ENOENT)
3791 			return err;
3792 	}
3793 
3794 	off = strset__add_str(d->strs_set, s);
3795 	if (off < 0)
3796 		return off;
3797 
3798 	*str_off_ptr = d->btf->start_str_off + off;
3799 	return 0;
3800 }
3801 
3802 /*
3803  * Dedup string and filter out those that are not referenced from either .BTF
3804  * or .BTF.ext (if provided) sections.
3805  *
3806  * This is done by building index of all strings in BTF's string section,
3807  * then iterating over all entities that can reference strings (e.g., type
3808  * names, struct field names, .BTF.ext line info, etc) and marking corresponding
3809  * strings as used. After that all used strings are deduped and compacted into
3810  * sequential blob of memory and new offsets are calculated. Then all the string
3811  * references are iterated again and rewritten using new offsets.
3812  */
3813 static int btf_dedup_strings(struct btf_dedup *d)
3814 {
3815 	int err;
3816 
3817 	if (d->btf->strs_deduped)
3818 		return 0;
3819 
3820 	d->strs_set = strset__new(BTF_MAX_STR_OFFSET, NULL, 0);
3821 	if (IS_ERR(d->strs_set)) {
3822 		err = PTR_ERR(d->strs_set);
3823 		goto err_out;
3824 	}
3825 
3826 	if (!d->btf->base_btf) {
3827 		/* insert empty string; we won't be looking it up during strings
3828 		 * dedup, but it's good to have it for generic BTF string lookups
3829 		 */
3830 		err = strset__add_str(d->strs_set, "");
3831 		if (err < 0)
3832 			goto err_out;
3833 	}
3834 
3835 	/* remap string offsets */
3836 	err = btf_for_each_str_off(d, strs_dedup_remap_str_off, d);
3837 	if (err)
3838 		goto err_out;
3839 
3840 	/* replace BTF string data and hash with deduped ones */
3841 	strset__free(d->btf->strs_set);
3842 	d->btf->hdr->str_len = strset__data_size(d->strs_set);
3843 	d->btf->strs_set = d->strs_set;
3844 	d->strs_set = NULL;
3845 	d->btf->strs_deduped = true;
3846 	return 0;
3847 
3848 err_out:
3849 	strset__free(d->strs_set);
3850 	d->strs_set = NULL;
3851 
3852 	return err;
3853 }
3854 
3855 static long btf_hash_common(struct btf_type *t)
3856 {
3857 	long h;
3858 
3859 	h = hash_combine(0, t->name_off);
3860 	h = hash_combine(h, t->info);
3861 	h = hash_combine(h, t->size);
3862 	return h;
3863 }
3864 
3865 static bool btf_equal_common(struct btf_type *t1, struct btf_type *t2)
3866 {
3867 	return t1->name_off == t2->name_off &&
3868 	       t1->info == t2->info &&
3869 	       t1->size == t2->size;
3870 }
3871 
3872 /* Calculate type signature hash of INT or TAG. */
3873 static long btf_hash_int_decl_tag(struct btf_type *t)
3874 {
3875 	__u32 info = *(__u32 *)(t + 1);
3876 	long h;
3877 
3878 	h = btf_hash_common(t);
3879 	h = hash_combine(h, info);
3880 	return h;
3881 }
3882 
3883 /* Check structural equality of two INTs or TAGs. */
3884 static bool btf_equal_int_tag(struct btf_type *t1, struct btf_type *t2)
3885 {
3886 	__u32 info1, info2;
3887 
3888 	if (!btf_equal_common(t1, t2))
3889 		return false;
3890 	info1 = *(__u32 *)(t1 + 1);
3891 	info2 = *(__u32 *)(t2 + 1);
3892 	return info1 == info2;
3893 }
3894 
3895 /* Calculate type signature hash of ENUM/ENUM64. */
3896 static long btf_hash_enum(struct btf_type *t)
3897 {
3898 	long h;
3899 
3900 	/* don't hash vlen, enum members and size to support enum fwd resolving */
3901 	h = hash_combine(0, t->name_off);
3902 	return h;
3903 }
3904 
3905 static bool btf_equal_enum_members(struct btf_type *t1, struct btf_type *t2)
3906 {
3907 	const struct btf_enum *m1, *m2;
3908 	__u16 vlen;
3909 	int i;
3910 
3911 	vlen = btf_vlen(t1);
3912 	m1 = btf_enum(t1);
3913 	m2 = btf_enum(t2);
3914 	for (i = 0; i < vlen; i++) {
3915 		if (m1->name_off != m2->name_off || m1->val != m2->val)
3916 			return false;
3917 		m1++;
3918 		m2++;
3919 	}
3920 	return true;
3921 }
3922 
3923 static bool btf_equal_enum64_members(struct btf_type *t1, struct btf_type *t2)
3924 {
3925 	const struct btf_enum64 *m1, *m2;
3926 	__u16 vlen;
3927 	int i;
3928 
3929 	vlen = btf_vlen(t1);
3930 	m1 = btf_enum64(t1);
3931 	m2 = btf_enum64(t2);
3932 	for (i = 0; i < vlen; i++) {
3933 		if (m1->name_off != m2->name_off || m1->val_lo32 != m2->val_lo32 ||
3934 		    m1->val_hi32 != m2->val_hi32)
3935 			return false;
3936 		m1++;
3937 		m2++;
3938 	}
3939 	return true;
3940 }
3941 
3942 /* Check structural equality of two ENUMs or ENUM64s. */
3943 static bool btf_equal_enum(struct btf_type *t1, struct btf_type *t2)
3944 {
3945 	if (!btf_equal_common(t1, t2))
3946 		return false;
3947 
3948 	/* t1 & t2 kinds are identical because of btf_equal_common */
3949 	if (btf_kind(t1) == BTF_KIND_ENUM)
3950 		return btf_equal_enum_members(t1, t2);
3951 	else
3952 		return btf_equal_enum64_members(t1, t2);
3953 }
3954 
3955 static inline bool btf_is_enum_fwd(struct btf_type *t)
3956 {
3957 	return btf_is_any_enum(t) && btf_vlen(t) == 0;
3958 }
3959 
3960 static bool btf_compat_enum(struct btf_type *t1, struct btf_type *t2)
3961 {
3962 	if (!btf_is_enum_fwd(t1) && !btf_is_enum_fwd(t2))
3963 		return btf_equal_enum(t1, t2);
3964 	/* At this point either t1 or t2 or both are forward declarations, thus:
3965 	 * - skip comparing vlen because it is zero for forward declarations;
3966 	 * - skip comparing size to allow enum forward declarations
3967 	 *   to be compatible with enum64 full declarations;
3968 	 * - skip comparing kind for the same reason.
3969 	 */
3970 	return t1->name_off == t2->name_off &&
3971 	       btf_is_any_enum(t1) && btf_is_any_enum(t2);
3972 }
3973 
3974 /*
3975  * Calculate type signature hash of STRUCT/UNION, ignoring referenced type IDs,
3976  * as referenced type IDs equivalence is established separately during type
3977  * graph equivalence check algorithm.
3978  */
3979 static long btf_hash_struct(struct btf_type *t)
3980 {
3981 	const struct btf_member *member = btf_members(t);
3982 	__u32 vlen = btf_vlen(t);
3983 	long h = btf_hash_common(t);
3984 	int i;
3985 
3986 	for (i = 0; i < vlen; i++) {
3987 		h = hash_combine(h, member->name_off);
3988 		h = hash_combine(h, member->offset);
3989 		/* no hashing of referenced type ID, it can be unresolved yet */
3990 		member++;
3991 	}
3992 	return h;
3993 }
3994 
3995 /*
3996  * Check structural compatibility of two STRUCTs/UNIONs, ignoring referenced
3997  * type IDs. This check is performed during type graph equivalence check and
3998  * referenced types equivalence is checked separately.
3999  */
4000 static bool btf_shallow_equal_struct(struct btf_type *t1, struct btf_type *t2)
4001 {
4002 	const struct btf_member *m1, *m2;
4003 	__u16 vlen;
4004 	int i;
4005 
4006 	if (!btf_equal_common(t1, t2))
4007 		return false;
4008 
4009 	vlen = btf_vlen(t1);
4010 	m1 = btf_members(t1);
4011 	m2 = btf_members(t2);
4012 	for (i = 0; i < vlen; i++) {
4013 		if (m1->name_off != m2->name_off || m1->offset != m2->offset)
4014 			return false;
4015 		m1++;
4016 		m2++;
4017 	}
4018 	return true;
4019 }
4020 
4021 /*
4022  * Calculate type signature hash of ARRAY, including referenced type IDs,
4023  * under assumption that they were already resolved to canonical type IDs and
4024  * are not going to change.
4025  */
4026 static long btf_hash_array(struct btf_type *t)
4027 {
4028 	const struct btf_array *info = btf_array(t);
4029 	long h = btf_hash_common(t);
4030 
4031 	h = hash_combine(h, info->type);
4032 	h = hash_combine(h, info->index_type);
4033 	h = hash_combine(h, info->nelems);
4034 	return h;
4035 }
4036 
4037 /*
4038  * Check exact equality of two ARRAYs, taking into account referenced
4039  * type IDs, under assumption that they were already resolved to canonical
4040  * type IDs and are not going to change.
4041  * This function is called during reference types deduplication to compare
4042  * ARRAY to potential canonical representative.
4043  */
4044 static bool btf_equal_array(struct btf_type *t1, struct btf_type *t2)
4045 {
4046 	const struct btf_array *info1, *info2;
4047 
4048 	if (!btf_equal_common(t1, t2))
4049 		return false;
4050 
4051 	info1 = btf_array(t1);
4052 	info2 = btf_array(t2);
4053 	return info1->type == info2->type &&
4054 	       info1->index_type == info2->index_type &&
4055 	       info1->nelems == info2->nelems;
4056 }
4057 
4058 /*
4059  * Check structural compatibility of two ARRAYs, ignoring referenced type
4060  * IDs. This check is performed during type graph equivalence check and
4061  * referenced types equivalence is checked separately.
4062  */
4063 static bool btf_compat_array(struct btf_type *t1, struct btf_type *t2)
4064 {
4065 	if (!btf_equal_common(t1, t2))
4066 		return false;
4067 
4068 	return btf_array(t1)->nelems == btf_array(t2)->nelems;
4069 }
4070 
4071 /*
4072  * Calculate type signature hash of FUNC_PROTO, including referenced type IDs,
4073  * under assumption that they were already resolved to canonical type IDs and
4074  * are not going to change.
4075  */
4076 static long btf_hash_fnproto(struct btf_type *t)
4077 {
4078 	const struct btf_param *member = btf_params(t);
4079 	__u16 vlen = btf_vlen(t);
4080 	long h = btf_hash_common(t);
4081 	int i;
4082 
4083 	for (i = 0; i < vlen; i++) {
4084 		h = hash_combine(h, member->name_off);
4085 		h = hash_combine(h, member->type);
4086 		member++;
4087 	}
4088 	return h;
4089 }
4090 
4091 /*
4092  * Check exact equality of two FUNC_PROTOs, taking into account referenced
4093  * type IDs, under assumption that they were already resolved to canonical
4094  * type IDs and are not going to change.
4095  * This function is called during reference types deduplication to compare
4096  * FUNC_PROTO to potential canonical representative.
4097  */
4098 static bool btf_equal_fnproto(struct btf_type *t1, struct btf_type *t2)
4099 {
4100 	const struct btf_param *m1, *m2;
4101 	__u16 vlen;
4102 	int i;
4103 
4104 	if (!btf_equal_common(t1, t2))
4105 		return false;
4106 
4107 	vlen = btf_vlen(t1);
4108 	m1 = btf_params(t1);
4109 	m2 = btf_params(t2);
4110 	for (i = 0; i < vlen; i++) {
4111 		if (m1->name_off != m2->name_off || m1->type != m2->type)
4112 			return false;
4113 		m1++;
4114 		m2++;
4115 	}
4116 	return true;
4117 }
4118 
4119 /*
4120  * Check structural compatibility of two FUNC_PROTOs, ignoring referenced type
4121  * IDs. This check is performed during type graph equivalence check and
4122  * referenced types equivalence is checked separately.
4123  */
4124 static bool btf_compat_fnproto(struct btf_type *t1, struct btf_type *t2)
4125 {
4126 	const struct btf_param *m1, *m2;
4127 	__u16 vlen;
4128 	int i;
4129 
4130 	/* skip return type ID */
4131 	if (t1->name_off != t2->name_off || t1->info != t2->info)
4132 		return false;
4133 
4134 	vlen = btf_vlen(t1);
4135 	m1 = btf_params(t1);
4136 	m2 = btf_params(t2);
4137 	for (i = 0; i < vlen; i++) {
4138 		if (m1->name_off != m2->name_off)
4139 			return false;
4140 		m1++;
4141 		m2++;
4142 	}
4143 	return true;
4144 }
4145 
4146 /* Prepare split BTF for deduplication by calculating hashes of base BTF's
4147  * types and initializing the rest of the state (canonical type mapping) for
4148  * the fixed base BTF part.
4149  */
4150 static int btf_dedup_prep(struct btf_dedup *d)
4151 {
4152 	struct btf_type *t;
4153 	int type_id;
4154 	long h;
4155 
4156 	if (!d->btf->base_btf)
4157 		return 0;
4158 
4159 	for (type_id = 1; type_id < d->btf->start_id; type_id++) {
4160 		t = btf_type_by_id(d->btf, type_id);
4161 
4162 		/* all base BTF types are self-canonical by definition */
4163 		d->map[type_id] = type_id;
4164 
4165 		switch (btf_kind(t)) {
4166 		case BTF_KIND_VAR:
4167 		case BTF_KIND_DATASEC:
4168 			/* VAR and DATASEC are never hash/deduplicated */
4169 			continue;
4170 		case BTF_KIND_CONST:
4171 		case BTF_KIND_VOLATILE:
4172 		case BTF_KIND_RESTRICT:
4173 		case BTF_KIND_PTR:
4174 		case BTF_KIND_FWD:
4175 		case BTF_KIND_TYPEDEF:
4176 		case BTF_KIND_FUNC:
4177 		case BTF_KIND_FLOAT:
4178 		case BTF_KIND_TYPE_TAG:
4179 			h = btf_hash_common(t);
4180 			break;
4181 		case BTF_KIND_INT:
4182 		case BTF_KIND_DECL_TAG:
4183 			h = btf_hash_int_decl_tag(t);
4184 			break;
4185 		case BTF_KIND_ENUM:
4186 		case BTF_KIND_ENUM64:
4187 			h = btf_hash_enum(t);
4188 			break;
4189 		case BTF_KIND_STRUCT:
4190 		case BTF_KIND_UNION:
4191 			h = btf_hash_struct(t);
4192 			break;
4193 		case BTF_KIND_ARRAY:
4194 			h = btf_hash_array(t);
4195 			break;
4196 		case BTF_KIND_FUNC_PROTO:
4197 			h = btf_hash_fnproto(t);
4198 			break;
4199 		default:
4200 			pr_debug("unknown kind %d for type [%d]\n", btf_kind(t), type_id);
4201 			return -EINVAL;
4202 		}
4203 		if (btf_dedup_table_add(d, h, type_id))
4204 			return -ENOMEM;
4205 	}
4206 
4207 	return 0;
4208 }
4209 
4210 /*
4211  * Deduplicate primitive types, that can't reference other types, by calculating
4212  * their type signature hash and comparing them with any possible canonical
4213  * candidate. If no canonical candidate matches, type itself is marked as
4214  * canonical and is added into `btf_dedup->dedup_table` as another candidate.
4215  */
4216 static int btf_dedup_prim_type(struct btf_dedup *d, __u32 type_id)
4217 {
4218 	struct btf_type *t = btf_type_by_id(d->btf, type_id);
4219 	struct hashmap_entry *hash_entry;
4220 	struct btf_type *cand;
4221 	/* if we don't find equivalent type, then we are canonical */
4222 	__u32 new_id = type_id;
4223 	__u32 cand_id;
4224 	long h;
4225 
4226 	switch (btf_kind(t)) {
4227 	case BTF_KIND_CONST:
4228 	case BTF_KIND_VOLATILE:
4229 	case BTF_KIND_RESTRICT:
4230 	case BTF_KIND_PTR:
4231 	case BTF_KIND_TYPEDEF:
4232 	case BTF_KIND_ARRAY:
4233 	case BTF_KIND_STRUCT:
4234 	case BTF_KIND_UNION:
4235 	case BTF_KIND_FUNC:
4236 	case BTF_KIND_FUNC_PROTO:
4237 	case BTF_KIND_VAR:
4238 	case BTF_KIND_DATASEC:
4239 	case BTF_KIND_DECL_TAG:
4240 	case BTF_KIND_TYPE_TAG:
4241 		return 0;
4242 
4243 	case BTF_KIND_INT:
4244 		h = btf_hash_int_decl_tag(t);
4245 		for_each_dedup_cand(d, hash_entry, h) {
4246 			cand_id = hash_entry->value;
4247 			cand = btf_type_by_id(d->btf, cand_id);
4248 			if (btf_equal_int_tag(t, cand)) {
4249 				new_id = cand_id;
4250 				break;
4251 			}
4252 		}
4253 		break;
4254 
4255 	case BTF_KIND_ENUM:
4256 	case BTF_KIND_ENUM64:
4257 		h = btf_hash_enum(t);
4258 		for_each_dedup_cand(d, hash_entry, h) {
4259 			cand_id = hash_entry->value;
4260 			cand = btf_type_by_id(d->btf, cand_id);
4261 			if (btf_equal_enum(t, cand)) {
4262 				new_id = cand_id;
4263 				break;
4264 			}
4265 			if (btf_compat_enum(t, cand)) {
4266 				if (btf_is_enum_fwd(t)) {
4267 					/* resolve fwd to full enum */
4268 					new_id = cand_id;
4269 					break;
4270 				}
4271 				/* resolve canonical enum fwd to full enum */
4272 				d->map[cand_id] = type_id;
4273 			}
4274 		}
4275 		break;
4276 
4277 	case BTF_KIND_FWD:
4278 	case BTF_KIND_FLOAT:
4279 		h = btf_hash_common(t);
4280 		for_each_dedup_cand(d, hash_entry, h) {
4281 			cand_id = hash_entry->value;
4282 			cand = btf_type_by_id(d->btf, cand_id);
4283 			if (btf_equal_common(t, cand)) {
4284 				new_id = cand_id;
4285 				break;
4286 			}
4287 		}
4288 		break;
4289 
4290 	default:
4291 		return -EINVAL;
4292 	}
4293 
4294 	d->map[type_id] = new_id;
4295 	if (type_id == new_id && btf_dedup_table_add(d, h, type_id))
4296 		return -ENOMEM;
4297 
4298 	return 0;
4299 }
4300 
4301 static int btf_dedup_prim_types(struct btf_dedup *d)
4302 {
4303 	int i, err;
4304 
4305 	for (i = 0; i < d->btf->nr_types; i++) {
4306 		err = btf_dedup_prim_type(d, d->btf->start_id + i);
4307 		if (err)
4308 			return err;
4309 	}
4310 	return 0;
4311 }
4312 
4313 /*
4314  * Check whether type is already mapped into canonical one (could be to itself).
4315  */
4316 static inline bool is_type_mapped(struct btf_dedup *d, uint32_t type_id)
4317 {
4318 	return d->map[type_id] <= BTF_MAX_NR_TYPES;
4319 }
4320 
4321 /*
4322  * Resolve type ID into its canonical type ID, if any; otherwise return original
4323  * type ID. If type is FWD and is resolved into STRUCT/UNION already, follow
4324  * STRUCT/UNION link and resolve it into canonical type ID as well.
4325  */
4326 static inline __u32 resolve_type_id(struct btf_dedup *d, __u32 type_id)
4327 {
4328 	while (is_type_mapped(d, type_id) && d->map[type_id] != type_id)
4329 		type_id = d->map[type_id];
4330 	return type_id;
4331 }
4332 
4333 /*
4334  * Resolve FWD to underlying STRUCT/UNION, if any; otherwise return original
4335  * type ID.
4336  */
4337 static uint32_t resolve_fwd_id(struct btf_dedup *d, uint32_t type_id)
4338 {
4339 	__u32 orig_type_id = type_id;
4340 
4341 	if (!btf_is_fwd(btf__type_by_id(d->btf, type_id)))
4342 		return type_id;
4343 
4344 	while (is_type_mapped(d, type_id) && d->map[type_id] != type_id)
4345 		type_id = d->map[type_id];
4346 
4347 	if (!btf_is_fwd(btf__type_by_id(d->btf, type_id)))
4348 		return type_id;
4349 
4350 	return orig_type_id;
4351 }
4352 
4353 
4354 static inline __u16 btf_fwd_kind(struct btf_type *t)
4355 {
4356 	return btf_kflag(t) ? BTF_KIND_UNION : BTF_KIND_STRUCT;
4357 }
4358 
4359 static bool btf_dedup_identical_types(struct btf_dedup *d, __u32 id1, __u32 id2, int depth)
4360 {
4361 	struct btf_type *t1, *t2;
4362 	int k1, k2;
4363 recur:
4364 	if (depth <= 0)
4365 		return false;
4366 
4367 	t1 = btf_type_by_id(d->btf, id1);
4368 	t2 = btf_type_by_id(d->btf, id2);
4369 
4370 	k1 = btf_kind(t1);
4371 	k2 = btf_kind(t2);
4372 	if (k1 != k2)
4373 		return false;
4374 
4375 	switch (k1) {
4376 	case BTF_KIND_UNKN: /* VOID */
4377 		return true;
4378 	case BTF_KIND_INT:
4379 		return btf_equal_int_tag(t1, t2);
4380 	case BTF_KIND_ENUM:
4381 	case BTF_KIND_ENUM64:
4382 		return btf_compat_enum(t1, t2);
4383 	case BTF_KIND_FWD:
4384 	case BTF_KIND_FLOAT:
4385 		return btf_equal_common(t1, t2);
4386 	case BTF_KIND_CONST:
4387 	case BTF_KIND_VOLATILE:
4388 	case BTF_KIND_RESTRICT:
4389 	case BTF_KIND_PTR:
4390 	case BTF_KIND_TYPEDEF:
4391 	case BTF_KIND_FUNC:
4392 	case BTF_KIND_TYPE_TAG:
4393 		if (t1->info != t2->info || t1->name_off != t2->name_off)
4394 			return false;
4395 		id1 = t1->type;
4396 		id2 = t2->type;
4397 		goto recur;
4398 	case BTF_KIND_ARRAY: {
4399 		struct btf_array *a1, *a2;
4400 
4401 		if (!btf_compat_array(t1, t2))
4402 			return false;
4403 
4404 		a1 = btf_array(t1);
4405 		a2 = btf_array(t1);
4406 
4407 		if (a1->index_type != a2->index_type &&
4408 		    !btf_dedup_identical_types(d, a1->index_type, a2->index_type, depth - 1))
4409 			return false;
4410 
4411 		if (a1->type != a2->type &&
4412 		    !btf_dedup_identical_types(d, a1->type, a2->type, depth - 1))
4413 			return false;
4414 
4415 		return true;
4416 	}
4417 	case BTF_KIND_STRUCT:
4418 	case BTF_KIND_UNION: {
4419 		const struct btf_member *m1, *m2;
4420 		int i, n;
4421 
4422 		if (!btf_shallow_equal_struct(t1, t2))
4423 			return false;
4424 
4425 		m1 = btf_members(t1);
4426 		m2 = btf_members(t2);
4427 		for (i = 0, n = btf_vlen(t1); i < n; i++, m1++, m2++) {
4428 			if (m1->type == m2->type)
4429 				continue;
4430 			if (!btf_dedup_identical_types(d, m1->type, m2->type, depth - 1))
4431 				return false;
4432 		}
4433 		return true;
4434 	}
4435 	case BTF_KIND_FUNC_PROTO: {
4436 		const struct btf_param *p1, *p2;
4437 		int i, n;
4438 
4439 		if (!btf_compat_fnproto(t1, t2))
4440 			return false;
4441 
4442 		if (t1->type != t2->type &&
4443 		    !btf_dedup_identical_types(d, t1->type, t2->type, depth - 1))
4444 			return false;
4445 
4446 		p1 = btf_params(t1);
4447 		p2 = btf_params(t2);
4448 		for (i = 0, n = btf_vlen(t1); i < n; i++, p1++, p2++) {
4449 			if (p1->type == p2->type)
4450 				continue;
4451 			if (!btf_dedup_identical_types(d, p1->type, p2->type, depth - 1))
4452 				return false;
4453 		}
4454 		return true;
4455 	}
4456 	default:
4457 		return false;
4458 	}
4459 }
4460 
4461 
4462 /*
4463  * Check equivalence of BTF type graph formed by candidate struct/union (we'll
4464  * call it "candidate graph" in this description for brevity) to a type graph
4465  * formed by (potential) canonical struct/union ("canonical graph" for brevity
4466  * here, though keep in mind that not all types in canonical graph are
4467  * necessarily canonical representatives themselves, some of them might be
4468  * duplicates or its uniqueness might not have been established yet).
4469  * Returns:
4470  *  - >0, if type graphs are equivalent;
4471  *  -  0, if not equivalent;
4472  *  - <0, on error.
4473  *
4474  * Algorithm performs side-by-side DFS traversal of both type graphs and checks
4475  * equivalence of BTF types at each step. If at any point BTF types in candidate
4476  * and canonical graphs are not compatible structurally, whole graphs are
4477  * incompatible. If types are structurally equivalent (i.e., all information
4478  * except referenced type IDs is exactly the same), a mapping from `canon_id` to
4479  * a `cand_id` is recoded in hypothetical mapping (`btf_dedup->hypot_map`).
4480  * If a type references other types, then those referenced types are checked
4481  * for equivalence recursively.
4482  *
4483  * During DFS traversal, if we find that for current `canon_id` type we
4484  * already have some mapping in hypothetical map, we check for two possible
4485  * situations:
4486  *   - `canon_id` is mapped to exactly the same type as `cand_id`. This will
4487  *     happen when type graphs have cycles. In this case we assume those two
4488  *     types are equivalent.
4489  *   - `canon_id` is mapped to different type. This is contradiction in our
4490  *     hypothetical mapping, because same graph in canonical graph corresponds
4491  *     to two different types in candidate graph, which for equivalent type
4492  *     graphs shouldn't happen. This condition terminates equivalence check
4493  *     with negative result.
4494  *
4495  * If type graphs traversal exhausts types to check and find no contradiction,
4496  * then type graphs are equivalent.
4497  *
4498  * When checking types for equivalence, there is one special case: FWD types.
4499  * If FWD type resolution is allowed and one of the types (either from canonical
4500  * or candidate graph) is FWD and other is STRUCT/UNION (depending on FWD's kind
4501  * flag) and their names match, hypothetical mapping is updated to point from
4502  * FWD to STRUCT/UNION. If graphs will be determined as equivalent successfully,
4503  * this mapping will be used to record FWD -> STRUCT/UNION mapping permanently.
4504  *
4505  * Technically, this could lead to incorrect FWD to STRUCT/UNION resolution,
4506  * if there are two exactly named (or anonymous) structs/unions that are
4507  * compatible structurally, one of which has FWD field, while other is concrete
4508  * STRUCT/UNION, but according to C sources they are different structs/unions
4509  * that are referencing different types with the same name. This is extremely
4510  * unlikely to happen, but btf_dedup API allows to disable FWD resolution if
4511  * this logic is causing problems.
4512  *
4513  * Doing FWD resolution means that both candidate and/or canonical graphs can
4514  * consists of portions of the graph that come from multiple compilation units.
4515  * This is due to the fact that types within single compilation unit are always
4516  * deduplicated and FWDs are already resolved, if referenced struct/union
4517  * definition is available. So, if we had unresolved FWD and found corresponding
4518  * STRUCT/UNION, they will be from different compilation units. This
4519  * consequently means that when we "link" FWD to corresponding STRUCT/UNION,
4520  * type graph will likely have at least two different BTF types that describe
4521  * same type (e.g., most probably there will be two different BTF types for the
4522  * same 'int' primitive type) and could even have "overlapping" parts of type
4523  * graph that describe same subset of types.
4524  *
4525  * This in turn means that our assumption that each type in canonical graph
4526  * must correspond to exactly one type in candidate graph might not hold
4527  * anymore and will make it harder to detect contradictions using hypothetical
4528  * map. To handle this problem, we allow to follow FWD -> STRUCT/UNION
4529  * resolution only in canonical graph. FWDs in candidate graphs are never
4530  * resolved. To see why it's OK, let's check all possible situations w.r.t. FWDs
4531  * that can occur:
4532  *   - Both types in canonical and candidate graphs are FWDs. If they are
4533  *     structurally equivalent, then they can either be both resolved to the
4534  *     same STRUCT/UNION or not resolved at all. In both cases they are
4535  *     equivalent and there is no need to resolve FWD on candidate side.
4536  *   - Both types in canonical and candidate graphs are concrete STRUCT/UNION,
4537  *     so nothing to resolve as well, algorithm will check equivalence anyway.
4538  *   - Type in canonical graph is FWD, while type in candidate is concrete
4539  *     STRUCT/UNION. In this case candidate graph comes from single compilation
4540  *     unit, so there is exactly one BTF type for each unique C type. After
4541  *     resolving FWD into STRUCT/UNION, there might be more than one BTF type
4542  *     in canonical graph mapping to single BTF type in candidate graph, but
4543  *     because hypothetical mapping maps from canonical to candidate types, it's
4544  *     alright, and we still maintain the property of having single `canon_id`
4545  *     mapping to single `cand_id` (there could be two different `canon_id`
4546  *     mapped to the same `cand_id`, but it's not contradictory).
4547  *   - Type in canonical graph is concrete STRUCT/UNION, while type in candidate
4548  *     graph is FWD. In this case we are just going to check compatibility of
4549  *     STRUCT/UNION and corresponding FWD, and if they are compatible, we'll
4550  *     assume that whatever STRUCT/UNION FWD resolves to must be equivalent to
4551  *     a concrete STRUCT/UNION from canonical graph. If the rest of type graphs
4552  *     turn out equivalent, we'll re-resolve FWD to concrete STRUCT/UNION from
4553  *     canonical graph.
4554  */
4555 static int btf_dedup_is_equiv(struct btf_dedup *d, __u32 cand_id,
4556 			      __u32 canon_id)
4557 {
4558 	struct btf_type *cand_type;
4559 	struct btf_type *canon_type;
4560 	__u32 hypot_type_id;
4561 	__u16 cand_kind;
4562 	__u16 canon_kind;
4563 	int i, eq;
4564 
4565 	/* if both resolve to the same canonical, they must be equivalent */
4566 	if (resolve_type_id(d, cand_id) == resolve_type_id(d, canon_id))
4567 		return 1;
4568 
4569 	canon_id = resolve_fwd_id(d, canon_id);
4570 
4571 	hypot_type_id = d->hypot_map[canon_id];
4572 	if (hypot_type_id <= BTF_MAX_NR_TYPES) {
4573 		if (hypot_type_id == cand_id)
4574 			return 1;
4575 		/* In some cases compiler will generate different DWARF types
4576 		 * for *identical* array type definitions and use them for
4577 		 * different fields within the *same* struct. This breaks type
4578 		 * equivalence check, which makes an assumption that candidate
4579 		 * types sub-graph has a consistent and deduped-by-compiler
4580 		 * types within a single CU. And similar situation can happen
4581 		 * with struct/union sometimes, and event with pointers.
4582 		 * So accommodate cases like this doing a structural
4583 		 * comparison recursively, but avoiding being stuck in endless
4584 		 * loops by limiting the depth up to which we check.
4585 		 */
4586 		if (btf_dedup_identical_types(d, hypot_type_id, cand_id, 16))
4587 			return 1;
4588 		return 0;
4589 	}
4590 
4591 	if (btf_dedup_hypot_map_add(d, canon_id, cand_id))
4592 		return -ENOMEM;
4593 
4594 	cand_type = btf_type_by_id(d->btf, cand_id);
4595 	canon_type = btf_type_by_id(d->btf, canon_id);
4596 	cand_kind = btf_kind(cand_type);
4597 	canon_kind = btf_kind(canon_type);
4598 
4599 	if (cand_type->name_off != canon_type->name_off)
4600 		return 0;
4601 
4602 	/* FWD <--> STRUCT/UNION equivalence check, if enabled */
4603 	if ((cand_kind == BTF_KIND_FWD || canon_kind == BTF_KIND_FWD)
4604 	    && cand_kind != canon_kind) {
4605 		__u16 real_kind;
4606 		__u16 fwd_kind;
4607 
4608 		if (cand_kind == BTF_KIND_FWD) {
4609 			real_kind = canon_kind;
4610 			fwd_kind = btf_fwd_kind(cand_type);
4611 		} else {
4612 			real_kind = cand_kind;
4613 			fwd_kind = btf_fwd_kind(canon_type);
4614 			/* we'd need to resolve base FWD to STRUCT/UNION */
4615 			if (fwd_kind == real_kind && canon_id < d->btf->start_id)
4616 				d->hypot_adjust_canon = true;
4617 		}
4618 		return fwd_kind == real_kind;
4619 	}
4620 
4621 	if (cand_kind != canon_kind)
4622 		return 0;
4623 
4624 	switch (cand_kind) {
4625 	case BTF_KIND_INT:
4626 		return btf_equal_int_tag(cand_type, canon_type);
4627 
4628 	case BTF_KIND_ENUM:
4629 	case BTF_KIND_ENUM64:
4630 		return btf_compat_enum(cand_type, canon_type);
4631 
4632 	case BTF_KIND_FWD:
4633 	case BTF_KIND_FLOAT:
4634 		return btf_equal_common(cand_type, canon_type);
4635 
4636 	case BTF_KIND_CONST:
4637 	case BTF_KIND_VOLATILE:
4638 	case BTF_KIND_RESTRICT:
4639 	case BTF_KIND_PTR:
4640 	case BTF_KIND_TYPEDEF:
4641 	case BTF_KIND_FUNC:
4642 	case BTF_KIND_TYPE_TAG:
4643 		if (cand_type->info != canon_type->info)
4644 			return 0;
4645 		return btf_dedup_is_equiv(d, cand_type->type, canon_type->type);
4646 
4647 	case BTF_KIND_ARRAY: {
4648 		const struct btf_array *cand_arr, *canon_arr;
4649 
4650 		if (!btf_compat_array(cand_type, canon_type))
4651 			return 0;
4652 		cand_arr = btf_array(cand_type);
4653 		canon_arr = btf_array(canon_type);
4654 		eq = btf_dedup_is_equiv(d, cand_arr->index_type, canon_arr->index_type);
4655 		if (eq <= 0)
4656 			return eq;
4657 		return btf_dedup_is_equiv(d, cand_arr->type, canon_arr->type);
4658 	}
4659 
4660 	case BTF_KIND_STRUCT:
4661 	case BTF_KIND_UNION: {
4662 		const struct btf_member *cand_m, *canon_m;
4663 		__u16 vlen;
4664 
4665 		if (!btf_shallow_equal_struct(cand_type, canon_type))
4666 			return 0;
4667 		vlen = btf_vlen(cand_type);
4668 		cand_m = btf_members(cand_type);
4669 		canon_m = btf_members(canon_type);
4670 		for (i = 0; i < vlen; i++) {
4671 			eq = btf_dedup_is_equiv(d, cand_m->type, canon_m->type);
4672 			if (eq <= 0)
4673 				return eq;
4674 			cand_m++;
4675 			canon_m++;
4676 		}
4677 
4678 		return 1;
4679 	}
4680 
4681 	case BTF_KIND_FUNC_PROTO: {
4682 		const struct btf_param *cand_p, *canon_p;
4683 		__u16 vlen;
4684 
4685 		if (!btf_compat_fnproto(cand_type, canon_type))
4686 			return 0;
4687 		eq = btf_dedup_is_equiv(d, cand_type->type, canon_type->type);
4688 		if (eq <= 0)
4689 			return eq;
4690 		vlen = btf_vlen(cand_type);
4691 		cand_p = btf_params(cand_type);
4692 		canon_p = btf_params(canon_type);
4693 		for (i = 0; i < vlen; i++) {
4694 			eq = btf_dedup_is_equiv(d, cand_p->type, canon_p->type);
4695 			if (eq <= 0)
4696 				return eq;
4697 			cand_p++;
4698 			canon_p++;
4699 		}
4700 		return 1;
4701 	}
4702 
4703 	default:
4704 		return -EINVAL;
4705 	}
4706 	return 0;
4707 }
4708 
4709 /*
4710  * Use hypothetical mapping, produced by successful type graph equivalence
4711  * check, to augment existing struct/union canonical mapping, where possible.
4712  *
4713  * If BTF_KIND_FWD resolution is allowed, this mapping is also used to record
4714  * FWD -> STRUCT/UNION correspondence as well. FWD resolution is bidirectional:
4715  * it doesn't matter if FWD type was part of canonical graph or candidate one,
4716  * we are recording the mapping anyway. As opposed to carefulness required
4717  * for struct/union correspondence mapping (described below), for FWD resolution
4718  * it's not important, as by the time that FWD type (reference type) will be
4719  * deduplicated all structs/unions will be deduped already anyway.
4720  *
4721  * Recording STRUCT/UNION mapping is purely a performance optimization and is
4722  * not required for correctness. It needs to be done carefully to ensure that
4723  * struct/union from candidate's type graph is not mapped into corresponding
4724  * struct/union from canonical type graph that itself hasn't been resolved into
4725  * canonical representative. The only guarantee we have is that canonical
4726  * struct/union was determined as canonical and that won't change. But any
4727  * types referenced through that struct/union fields could have been not yet
4728  * resolved, so in case like that it's too early to establish any kind of
4729  * correspondence between structs/unions.
4730  *
4731  * No canonical correspondence is derived for primitive types (they are already
4732  * deduplicated completely already anyway) or reference types (they rely on
4733  * stability of struct/union canonical relationship for equivalence checks).
4734  */
4735 static void btf_dedup_merge_hypot_map(struct btf_dedup *d)
4736 {
4737 	__u32 canon_type_id, targ_type_id;
4738 	__u16 t_kind, c_kind;
4739 	__u32 t_id, c_id;
4740 	int i;
4741 
4742 	for (i = 0; i < d->hypot_cnt; i++) {
4743 		canon_type_id = d->hypot_list[i];
4744 		targ_type_id = d->hypot_map[canon_type_id];
4745 		t_id = resolve_type_id(d, targ_type_id);
4746 		c_id = resolve_type_id(d, canon_type_id);
4747 		t_kind = btf_kind(btf__type_by_id(d->btf, t_id));
4748 		c_kind = btf_kind(btf__type_by_id(d->btf, c_id));
4749 		/*
4750 		 * Resolve FWD into STRUCT/UNION.
4751 		 * It's ok to resolve FWD into STRUCT/UNION that's not yet
4752 		 * mapped to canonical representative (as opposed to
4753 		 * STRUCT/UNION <--> STRUCT/UNION mapping logic below), because
4754 		 * eventually that struct is going to be mapped and all resolved
4755 		 * FWDs will automatically resolve to correct canonical
4756 		 * representative. This will happen before ref type deduping,
4757 		 * which critically depends on stability of these mapping. This
4758 		 * stability is not a requirement for STRUCT/UNION equivalence
4759 		 * checks, though.
4760 		 */
4761 
4762 		/* if it's the split BTF case, we still need to point base FWD
4763 		 * to STRUCT/UNION in a split BTF, because FWDs from split BTF
4764 		 * will be resolved against base FWD. If we don't point base
4765 		 * canonical FWD to the resolved STRUCT/UNION, then all the
4766 		 * FWDs in split BTF won't be correctly resolved to a proper
4767 		 * STRUCT/UNION.
4768 		 */
4769 		if (t_kind != BTF_KIND_FWD && c_kind == BTF_KIND_FWD)
4770 			d->map[c_id] = t_id;
4771 
4772 		/* if graph equivalence determined that we'd need to adjust
4773 		 * base canonical types, then we need to only point base FWDs
4774 		 * to STRUCTs/UNIONs and do no more modifications. For all
4775 		 * other purposes the type graphs were not equivalent.
4776 		 */
4777 		if (d->hypot_adjust_canon)
4778 			continue;
4779 
4780 		if (t_kind == BTF_KIND_FWD && c_kind != BTF_KIND_FWD)
4781 			d->map[t_id] = c_id;
4782 
4783 		if ((t_kind == BTF_KIND_STRUCT || t_kind == BTF_KIND_UNION) &&
4784 		    c_kind != BTF_KIND_FWD &&
4785 		    is_type_mapped(d, c_id) &&
4786 		    !is_type_mapped(d, t_id)) {
4787 			/*
4788 			 * as a perf optimization, we can map struct/union
4789 			 * that's part of type graph we just verified for
4790 			 * equivalence. We can do that for struct/union that has
4791 			 * canonical representative only, though.
4792 			 */
4793 			d->map[t_id] = c_id;
4794 		}
4795 	}
4796 }
4797 
4798 /*
4799  * Deduplicate struct/union types.
4800  *
4801  * For each struct/union type its type signature hash is calculated, taking
4802  * into account type's name, size, number, order and names of fields, but
4803  * ignoring type ID's referenced from fields, because they might not be deduped
4804  * completely until after reference types deduplication phase. This type hash
4805  * is used to iterate over all potential canonical types, sharing same hash.
4806  * For each canonical candidate we check whether type graphs that they form
4807  * (through referenced types in fields and so on) are equivalent using algorithm
4808  * implemented in `btf_dedup_is_equiv`. If such equivalence is found and
4809  * BTF_KIND_FWD resolution is allowed, then hypothetical mapping
4810  * (btf_dedup->hypot_map) produced by aforementioned type graph equivalence
4811  * algorithm is used to record FWD -> STRUCT/UNION mapping. It's also used to
4812  * potentially map other structs/unions to their canonical representatives,
4813  * if such relationship hasn't yet been established. This speeds up algorithm
4814  * by eliminating some of the duplicate work.
4815  *
4816  * If no matching canonical representative was found, struct/union is marked
4817  * as canonical for itself and is added into btf_dedup->dedup_table hash map
4818  * for further look ups.
4819  */
4820 static int btf_dedup_struct_type(struct btf_dedup *d, __u32 type_id)
4821 {
4822 	struct btf_type *cand_type, *t;
4823 	struct hashmap_entry *hash_entry;
4824 	/* if we don't find equivalent type, then we are canonical */
4825 	__u32 new_id = type_id;
4826 	__u16 kind;
4827 	long h;
4828 
4829 	/* already deduped or is in process of deduping (loop detected) */
4830 	if (d->map[type_id] <= BTF_MAX_NR_TYPES)
4831 		return 0;
4832 
4833 	t = btf_type_by_id(d->btf, type_id);
4834 	kind = btf_kind(t);
4835 
4836 	if (kind != BTF_KIND_STRUCT && kind != BTF_KIND_UNION)
4837 		return 0;
4838 
4839 	h = btf_hash_struct(t);
4840 	for_each_dedup_cand(d, hash_entry, h) {
4841 		__u32 cand_id = hash_entry->value;
4842 		int eq;
4843 
4844 		/*
4845 		 * Even though btf_dedup_is_equiv() checks for
4846 		 * btf_shallow_equal_struct() internally when checking two
4847 		 * structs (unions) for equivalence, we need to guard here
4848 		 * from picking matching FWD type as a dedup candidate.
4849 		 * This can happen due to hash collision. In such case just
4850 		 * relying on btf_dedup_is_equiv() would lead to potentially
4851 		 * creating a loop (FWD -> STRUCT and STRUCT -> FWD), because
4852 		 * FWD and compatible STRUCT/UNION are considered equivalent.
4853 		 */
4854 		cand_type = btf_type_by_id(d->btf, cand_id);
4855 		if (!btf_shallow_equal_struct(t, cand_type))
4856 			continue;
4857 
4858 		btf_dedup_clear_hypot_map(d);
4859 		eq = btf_dedup_is_equiv(d, type_id, cand_id);
4860 		if (eq < 0)
4861 			return eq;
4862 		if (!eq)
4863 			continue;
4864 		btf_dedup_merge_hypot_map(d);
4865 		if (d->hypot_adjust_canon) /* not really equivalent */
4866 			continue;
4867 		new_id = cand_id;
4868 		break;
4869 	}
4870 
4871 	d->map[type_id] = new_id;
4872 	if (type_id == new_id && btf_dedup_table_add(d, h, type_id))
4873 		return -ENOMEM;
4874 
4875 	return 0;
4876 }
4877 
4878 static int btf_dedup_struct_types(struct btf_dedup *d)
4879 {
4880 	int i, err;
4881 
4882 	for (i = 0; i < d->btf->nr_types; i++) {
4883 		err = btf_dedup_struct_type(d, d->btf->start_id + i);
4884 		if (err)
4885 			return err;
4886 	}
4887 	return 0;
4888 }
4889 
4890 /*
4891  * Deduplicate reference type.
4892  *
4893  * Once all primitive and struct/union types got deduplicated, we can easily
4894  * deduplicate all other (reference) BTF types. This is done in two steps:
4895  *
4896  * 1. Resolve all referenced type IDs into their canonical type IDs. This
4897  * resolution can be done either immediately for primitive or struct/union types
4898  * (because they were deduped in previous two phases) or recursively for
4899  * reference types. Recursion will always terminate at either primitive or
4900  * struct/union type, at which point we can "unwind" chain of reference types
4901  * one by one. There is no danger of encountering cycles because in C type
4902  * system the only way to form type cycle is through struct/union, so any chain
4903  * of reference types, even those taking part in a type cycle, will inevitably
4904  * reach struct/union at some point.
4905  *
4906  * 2. Once all referenced type IDs are resolved into canonical ones, BTF type
4907  * becomes "stable", in the sense that no further deduplication will cause
4908  * any changes to it. With that, it's now possible to calculate type's signature
4909  * hash (this time taking into account referenced type IDs) and loop over all
4910  * potential canonical representatives. If no match was found, current type
4911  * will become canonical representative of itself and will be added into
4912  * btf_dedup->dedup_table as another possible canonical representative.
4913  */
4914 static int btf_dedup_ref_type(struct btf_dedup *d, __u32 type_id)
4915 {
4916 	struct hashmap_entry *hash_entry;
4917 	__u32 new_id = type_id, cand_id;
4918 	struct btf_type *t, *cand;
4919 	/* if we don't find equivalent type, then we are representative type */
4920 	int ref_type_id;
4921 	long h;
4922 
4923 	if (d->map[type_id] == BTF_IN_PROGRESS_ID)
4924 		return -ELOOP;
4925 	if (d->map[type_id] <= BTF_MAX_NR_TYPES)
4926 		return resolve_type_id(d, type_id);
4927 
4928 	t = btf_type_by_id(d->btf, type_id);
4929 	d->map[type_id] = BTF_IN_PROGRESS_ID;
4930 
4931 	switch (btf_kind(t)) {
4932 	case BTF_KIND_CONST:
4933 	case BTF_KIND_VOLATILE:
4934 	case BTF_KIND_RESTRICT:
4935 	case BTF_KIND_PTR:
4936 	case BTF_KIND_TYPEDEF:
4937 	case BTF_KIND_FUNC:
4938 	case BTF_KIND_TYPE_TAG:
4939 		ref_type_id = btf_dedup_ref_type(d, t->type);
4940 		if (ref_type_id < 0)
4941 			return ref_type_id;
4942 		t->type = ref_type_id;
4943 
4944 		h = btf_hash_common(t);
4945 		for_each_dedup_cand(d, hash_entry, h) {
4946 			cand_id = hash_entry->value;
4947 			cand = btf_type_by_id(d->btf, cand_id);
4948 			if (btf_equal_common(t, cand)) {
4949 				new_id = cand_id;
4950 				break;
4951 			}
4952 		}
4953 		break;
4954 
4955 	case BTF_KIND_DECL_TAG:
4956 		ref_type_id = btf_dedup_ref_type(d, t->type);
4957 		if (ref_type_id < 0)
4958 			return ref_type_id;
4959 		t->type = ref_type_id;
4960 
4961 		h = btf_hash_int_decl_tag(t);
4962 		for_each_dedup_cand(d, hash_entry, h) {
4963 			cand_id = hash_entry->value;
4964 			cand = btf_type_by_id(d->btf, cand_id);
4965 			if (btf_equal_int_tag(t, cand)) {
4966 				new_id = cand_id;
4967 				break;
4968 			}
4969 		}
4970 		break;
4971 
4972 	case BTF_KIND_ARRAY: {
4973 		struct btf_array *info = btf_array(t);
4974 
4975 		ref_type_id = btf_dedup_ref_type(d, info->type);
4976 		if (ref_type_id < 0)
4977 			return ref_type_id;
4978 		info->type = ref_type_id;
4979 
4980 		ref_type_id = btf_dedup_ref_type(d, info->index_type);
4981 		if (ref_type_id < 0)
4982 			return ref_type_id;
4983 		info->index_type = ref_type_id;
4984 
4985 		h = btf_hash_array(t);
4986 		for_each_dedup_cand(d, hash_entry, h) {
4987 			cand_id = hash_entry->value;
4988 			cand = btf_type_by_id(d->btf, cand_id);
4989 			if (btf_equal_array(t, cand)) {
4990 				new_id = cand_id;
4991 				break;
4992 			}
4993 		}
4994 		break;
4995 	}
4996 
4997 	case BTF_KIND_FUNC_PROTO: {
4998 		struct btf_param *param;
4999 		__u16 vlen;
5000 		int i;
5001 
5002 		ref_type_id = btf_dedup_ref_type(d, t->type);
5003 		if (ref_type_id < 0)
5004 			return ref_type_id;
5005 		t->type = ref_type_id;
5006 
5007 		vlen = btf_vlen(t);
5008 		param = btf_params(t);
5009 		for (i = 0; i < vlen; i++) {
5010 			ref_type_id = btf_dedup_ref_type(d, param->type);
5011 			if (ref_type_id < 0)
5012 				return ref_type_id;
5013 			param->type = ref_type_id;
5014 			param++;
5015 		}
5016 
5017 		h = btf_hash_fnproto(t);
5018 		for_each_dedup_cand(d, hash_entry, h) {
5019 			cand_id = hash_entry->value;
5020 			cand = btf_type_by_id(d->btf, cand_id);
5021 			if (btf_equal_fnproto(t, cand)) {
5022 				new_id = cand_id;
5023 				break;
5024 			}
5025 		}
5026 		break;
5027 	}
5028 
5029 	default:
5030 		return -EINVAL;
5031 	}
5032 
5033 	d->map[type_id] = new_id;
5034 	if (type_id == new_id && btf_dedup_table_add(d, h, type_id))
5035 		return -ENOMEM;
5036 
5037 	return new_id;
5038 }
5039 
5040 static int btf_dedup_ref_types(struct btf_dedup *d)
5041 {
5042 	int i, err;
5043 
5044 	for (i = 0; i < d->btf->nr_types; i++) {
5045 		err = btf_dedup_ref_type(d, d->btf->start_id + i);
5046 		if (err < 0)
5047 			return err;
5048 	}
5049 	/* we won't need d->dedup_table anymore */
5050 	hashmap__free(d->dedup_table);
5051 	d->dedup_table = NULL;
5052 	return 0;
5053 }
5054 
5055 /*
5056  * Collect a map from type names to type ids for all canonical structs
5057  * and unions. If the same name is shared by several canonical types
5058  * use a special value 0 to indicate this fact.
5059  */
5060 static int btf_dedup_fill_unique_names_map(struct btf_dedup *d, struct hashmap *names_map)
5061 {
5062 	__u32 nr_types = btf__type_cnt(d->btf);
5063 	struct btf_type *t;
5064 	__u32 type_id;
5065 	__u16 kind;
5066 	int err;
5067 
5068 	/*
5069 	 * Iterate over base and split module ids in order to get all
5070 	 * available structs in the map.
5071 	 */
5072 	for (type_id = 1; type_id < nr_types; ++type_id) {
5073 		t = btf_type_by_id(d->btf, type_id);
5074 		kind = btf_kind(t);
5075 
5076 		if (kind != BTF_KIND_STRUCT && kind != BTF_KIND_UNION)
5077 			continue;
5078 
5079 		/* Skip non-canonical types */
5080 		if (type_id != d->map[type_id])
5081 			continue;
5082 
5083 		err = hashmap__add(names_map, t->name_off, type_id);
5084 		if (err == -EEXIST)
5085 			err = hashmap__set(names_map, t->name_off, 0, NULL, NULL);
5086 
5087 		if (err)
5088 			return err;
5089 	}
5090 
5091 	return 0;
5092 }
5093 
5094 static int btf_dedup_resolve_fwd(struct btf_dedup *d, struct hashmap *names_map, __u32 type_id)
5095 {
5096 	struct btf_type *t = btf_type_by_id(d->btf, type_id);
5097 	enum btf_fwd_kind fwd_kind = btf_kflag(t);
5098 	__u16 cand_kind, kind = btf_kind(t);
5099 	struct btf_type *cand_t;
5100 	uintptr_t cand_id;
5101 
5102 	if (kind != BTF_KIND_FWD)
5103 		return 0;
5104 
5105 	/* Skip if this FWD already has a mapping */
5106 	if (type_id != d->map[type_id])
5107 		return 0;
5108 
5109 	if (!hashmap__find(names_map, t->name_off, &cand_id))
5110 		return 0;
5111 
5112 	/* Zero is a special value indicating that name is not unique */
5113 	if (!cand_id)
5114 		return 0;
5115 
5116 	cand_t = btf_type_by_id(d->btf, cand_id);
5117 	cand_kind = btf_kind(cand_t);
5118 	if ((cand_kind == BTF_KIND_STRUCT && fwd_kind != BTF_FWD_STRUCT) ||
5119 	    (cand_kind == BTF_KIND_UNION && fwd_kind != BTF_FWD_UNION))
5120 		return 0;
5121 
5122 	d->map[type_id] = cand_id;
5123 
5124 	return 0;
5125 }
5126 
5127 /*
5128  * Resolve unambiguous forward declarations.
5129  *
5130  * The lion's share of all FWD declarations is resolved during
5131  * `btf_dedup_struct_types` phase when different type graphs are
5132  * compared against each other. However, if in some compilation unit a
5133  * FWD declaration is not a part of a type graph compared against
5134  * another type graph that declaration's canonical type would not be
5135  * changed. Example:
5136  *
5137  * CU #1:
5138  *
5139  * struct foo;
5140  * struct foo *some_global;
5141  *
5142  * CU #2:
5143  *
5144  * struct foo { int u; };
5145  * struct foo *another_global;
5146  *
5147  * After `btf_dedup_struct_types` the BTF looks as follows:
5148  *
5149  * [1] STRUCT 'foo' size=4 vlen=1 ...
5150  * [2] INT 'int' size=4 ...
5151  * [3] PTR '(anon)' type_id=1
5152  * [4] FWD 'foo' fwd_kind=struct
5153  * [5] PTR '(anon)' type_id=4
5154  *
5155  * This pass assumes that such FWD declarations should be mapped to
5156  * structs or unions with identical name in case if the name is not
5157  * ambiguous.
5158  */
5159 static int btf_dedup_resolve_fwds(struct btf_dedup *d)
5160 {
5161 	int i, err;
5162 	struct hashmap *names_map;
5163 
5164 	names_map = hashmap__new(btf_dedup_identity_hash_fn, btf_dedup_equal_fn, NULL);
5165 	if (IS_ERR(names_map))
5166 		return PTR_ERR(names_map);
5167 
5168 	err = btf_dedup_fill_unique_names_map(d, names_map);
5169 	if (err < 0)
5170 		goto exit;
5171 
5172 	for (i = 0; i < d->btf->nr_types; i++) {
5173 		err = btf_dedup_resolve_fwd(d, names_map, d->btf->start_id + i);
5174 		if (err < 0)
5175 			break;
5176 	}
5177 
5178 exit:
5179 	hashmap__free(names_map);
5180 	return err;
5181 }
5182 
5183 /*
5184  * Compact types.
5185  *
5186  * After we established for each type its corresponding canonical representative
5187  * type, we now can eliminate types that are not canonical and leave only
5188  * canonical ones layed out sequentially in memory by copying them over
5189  * duplicates. During compaction btf_dedup->hypot_map array is reused to store
5190  * a map from original type ID to a new compacted type ID, which will be used
5191  * during next phase to "fix up" type IDs, referenced from struct/union and
5192  * reference types.
5193  */
5194 static int btf_dedup_compact_types(struct btf_dedup *d)
5195 {
5196 	__u32 *new_offs;
5197 	__u32 next_type_id = d->btf->start_id;
5198 	const struct btf_type *t;
5199 	void *p;
5200 	int i, id, len;
5201 
5202 	/* we are going to reuse hypot_map to store compaction remapping */
5203 	d->hypot_map[0] = 0;
5204 	/* base BTF types are not renumbered */
5205 	for (id = 1; id < d->btf->start_id; id++)
5206 		d->hypot_map[id] = id;
5207 	for (i = 0, id = d->btf->start_id; i < d->btf->nr_types; i++, id++)
5208 		d->hypot_map[id] = BTF_UNPROCESSED_ID;
5209 
5210 	p = d->btf->types_data;
5211 
5212 	for (i = 0, id = d->btf->start_id; i < d->btf->nr_types; i++, id++) {
5213 		if (d->map[id] != id)
5214 			continue;
5215 
5216 		t = btf__type_by_id(d->btf, id);
5217 		len = btf_type_size(t);
5218 		if (len < 0)
5219 			return len;
5220 
5221 		memmove(p, t, len);
5222 		d->hypot_map[id] = next_type_id;
5223 		d->btf->type_offs[next_type_id - d->btf->start_id] = p - d->btf->types_data;
5224 		p += len;
5225 		next_type_id++;
5226 	}
5227 
5228 	/* shrink struct btf's internal types index and update btf_header */
5229 	d->btf->nr_types = next_type_id - d->btf->start_id;
5230 	d->btf->type_offs_cap = d->btf->nr_types;
5231 	d->btf->hdr->type_len = p - d->btf->types_data;
5232 	new_offs = libbpf_reallocarray(d->btf->type_offs, d->btf->type_offs_cap,
5233 				       sizeof(*new_offs));
5234 	if (d->btf->type_offs_cap && !new_offs)
5235 		return -ENOMEM;
5236 	d->btf->type_offs = new_offs;
5237 	d->btf->hdr->str_off = d->btf->hdr->type_len;
5238 	d->btf->raw_size = d->btf->hdr->hdr_len + d->btf->hdr->type_len + d->btf->hdr->str_len;
5239 	return 0;
5240 }
5241 
5242 /*
5243  * Figure out final (deduplicated and compacted) type ID for provided original
5244  * `type_id` by first resolving it into corresponding canonical type ID and
5245  * then mapping it to a deduplicated type ID, stored in btf_dedup->hypot_map,
5246  * which is populated during compaction phase.
5247  */
5248 static int btf_dedup_remap_type_id(__u32 *type_id, void *ctx)
5249 {
5250 	struct btf_dedup *d = ctx;
5251 	__u32 resolved_type_id, new_type_id;
5252 
5253 	resolved_type_id = resolve_type_id(d, *type_id);
5254 	new_type_id = d->hypot_map[resolved_type_id];
5255 	if (new_type_id > BTF_MAX_NR_TYPES)
5256 		return -EINVAL;
5257 
5258 	*type_id = new_type_id;
5259 	return 0;
5260 }
5261 
5262 /*
5263  * Remap referenced type IDs into deduped type IDs.
5264  *
5265  * After BTF types are deduplicated and compacted, their final type IDs may
5266  * differ from original ones. The map from original to a corresponding
5267  * deduped type ID is stored in btf_dedup->hypot_map and is populated during
5268  * compaction phase. During remapping phase we are rewriting all type IDs
5269  * referenced from any BTF type (e.g., struct fields, func proto args, etc) to
5270  * their final deduped type IDs.
5271  */
5272 static int btf_dedup_remap_types(struct btf_dedup *d)
5273 {
5274 	int i, r;
5275 
5276 	for (i = 0; i < d->btf->nr_types; i++) {
5277 		struct btf_type *t = btf_type_by_id(d->btf, d->btf->start_id + i);
5278 		struct btf_field_iter it;
5279 		__u32 *type_id;
5280 
5281 		r = btf_field_iter_init(&it, t, BTF_FIELD_ITER_IDS);
5282 		if (r)
5283 			return r;
5284 
5285 		while ((type_id = btf_field_iter_next(&it))) {
5286 			__u32 resolved_id, new_id;
5287 
5288 			resolved_id = resolve_type_id(d, *type_id);
5289 			new_id = d->hypot_map[resolved_id];
5290 			if (new_id > BTF_MAX_NR_TYPES)
5291 				return -EINVAL;
5292 
5293 			*type_id = new_id;
5294 		}
5295 	}
5296 
5297 	if (!d->btf_ext)
5298 		return 0;
5299 
5300 	r = btf_ext_visit_type_ids(d->btf_ext, btf_dedup_remap_type_id, d);
5301 	if (r)
5302 		return r;
5303 
5304 	return 0;
5305 }
5306 
5307 /*
5308  * Probe few well-known locations for vmlinux kernel image and try to load BTF
5309  * data out of it to use for target BTF.
5310  */
5311 struct btf *btf__load_vmlinux_btf(void)
5312 {
5313 	const char *sysfs_btf_path = "/sys/kernel/btf/vmlinux";
5314 	/* fall back locations, trying to find vmlinux on disk */
5315 	const char *locations[] = {
5316 		"/boot/vmlinux-%1$s",
5317 		"/lib/modules/%1$s/vmlinux-%1$s",
5318 		"/lib/modules/%1$s/build/vmlinux",
5319 		"/usr/lib/modules/%1$s/kernel/vmlinux",
5320 		"/usr/lib/debug/boot/vmlinux-%1$s",
5321 		"/usr/lib/debug/boot/vmlinux-%1$s.debug",
5322 		"/usr/lib/debug/lib/modules/%1$s/vmlinux",
5323 	};
5324 	char path[PATH_MAX + 1];
5325 	struct utsname buf;
5326 	struct btf *btf;
5327 	int i, err;
5328 
5329 	/* is canonical sysfs location accessible? */
5330 	if (faccessat(AT_FDCWD, sysfs_btf_path, F_OK, AT_EACCESS) < 0) {
5331 		pr_warn("kernel BTF is missing at '%s', was CONFIG_DEBUG_INFO_BTF enabled?\n",
5332 			sysfs_btf_path);
5333 	} else {
5334 		btf = btf__parse(sysfs_btf_path, NULL);
5335 		if (!btf) {
5336 			err = -errno;
5337 			pr_warn("failed to read kernel BTF from '%s': %s\n",
5338 				sysfs_btf_path, errstr(err));
5339 			return libbpf_err_ptr(err);
5340 		}
5341 		pr_debug("loaded kernel BTF from '%s'\n", sysfs_btf_path);
5342 		return btf;
5343 	}
5344 
5345 	/* try fallback locations */
5346 	uname(&buf);
5347 	for (i = 0; i < ARRAY_SIZE(locations); i++) {
5348 		snprintf(path, PATH_MAX, locations[i], buf.release);
5349 
5350 		if (faccessat(AT_FDCWD, path, R_OK, AT_EACCESS))
5351 			continue;
5352 
5353 		btf = btf__parse(path, NULL);
5354 		err = libbpf_get_error(btf);
5355 		pr_debug("loading kernel BTF '%s': %s\n", path, errstr(err));
5356 		if (err)
5357 			continue;
5358 
5359 		return btf;
5360 	}
5361 
5362 	pr_warn("failed to find valid kernel BTF\n");
5363 	return libbpf_err_ptr(-ESRCH);
5364 }
5365 
5366 struct btf *libbpf_find_kernel_btf(void) __attribute__((alias("btf__load_vmlinux_btf")));
5367 
5368 struct btf *btf__load_module_btf(const char *module_name, struct btf *vmlinux_btf)
5369 {
5370 	char path[80];
5371 
5372 	snprintf(path, sizeof(path), "/sys/kernel/btf/%s", module_name);
5373 	return btf__parse_split(path, vmlinux_btf);
5374 }
5375 
5376 int btf_ext_visit_type_ids(struct btf_ext *btf_ext, type_id_visit_fn visit, void *ctx)
5377 {
5378 	const struct btf_ext_info *seg;
5379 	struct btf_ext_info_sec *sec;
5380 	int i, err;
5381 
5382 	seg = &btf_ext->func_info;
5383 	for_each_btf_ext_sec(seg, sec) {
5384 		struct bpf_func_info_min *rec;
5385 
5386 		for_each_btf_ext_rec(seg, sec, i, rec) {
5387 			err = visit(&rec->type_id, ctx);
5388 			if (err < 0)
5389 				return err;
5390 		}
5391 	}
5392 
5393 	seg = &btf_ext->core_relo_info;
5394 	for_each_btf_ext_sec(seg, sec) {
5395 		struct bpf_core_relo *rec;
5396 
5397 		for_each_btf_ext_rec(seg, sec, i, rec) {
5398 			err = visit(&rec->type_id, ctx);
5399 			if (err < 0)
5400 				return err;
5401 		}
5402 	}
5403 
5404 	return 0;
5405 }
5406 
5407 int btf_ext_visit_str_offs(struct btf_ext *btf_ext, str_off_visit_fn visit, void *ctx)
5408 {
5409 	const struct btf_ext_info *seg;
5410 	struct btf_ext_info_sec *sec;
5411 	int i, err;
5412 
5413 	seg = &btf_ext->func_info;
5414 	for_each_btf_ext_sec(seg, sec) {
5415 		err = visit(&sec->sec_name_off, ctx);
5416 		if (err)
5417 			return err;
5418 	}
5419 
5420 	seg = &btf_ext->line_info;
5421 	for_each_btf_ext_sec(seg, sec) {
5422 		struct bpf_line_info_min *rec;
5423 
5424 		err = visit(&sec->sec_name_off, ctx);
5425 		if (err)
5426 			return err;
5427 
5428 		for_each_btf_ext_rec(seg, sec, i, rec) {
5429 			err = visit(&rec->file_name_off, ctx);
5430 			if (err)
5431 				return err;
5432 			err = visit(&rec->line_off, ctx);
5433 			if (err)
5434 				return err;
5435 		}
5436 	}
5437 
5438 	seg = &btf_ext->core_relo_info;
5439 	for_each_btf_ext_sec(seg, sec) {
5440 		struct bpf_core_relo *rec;
5441 
5442 		err = visit(&sec->sec_name_off, ctx);
5443 		if (err)
5444 			return err;
5445 
5446 		for_each_btf_ext_rec(seg, sec, i, rec) {
5447 			err = visit(&rec->access_str_off, ctx);
5448 			if (err)
5449 				return err;
5450 		}
5451 	}
5452 
5453 	return 0;
5454 }
5455 
5456 struct btf_distill {
5457 	struct btf_pipe pipe;
5458 	int *id_map;
5459 	unsigned int split_start_id;
5460 	unsigned int split_start_str;
5461 	int diff_id;
5462 };
5463 
5464 static int btf_add_distilled_type_ids(struct btf_distill *dist, __u32 i)
5465 {
5466 	struct btf_type *split_t = btf_type_by_id(dist->pipe.src, i);
5467 	struct btf_field_iter it;
5468 	__u32 *id;
5469 	int err;
5470 
5471 	err = btf_field_iter_init(&it, split_t, BTF_FIELD_ITER_IDS);
5472 	if (err)
5473 		return err;
5474 	while ((id = btf_field_iter_next(&it))) {
5475 		struct btf_type *base_t;
5476 
5477 		if (!*id)
5478 			continue;
5479 		/* split BTF id, not needed */
5480 		if (*id >= dist->split_start_id)
5481 			continue;
5482 		/* already added ? */
5483 		if (dist->id_map[*id] > 0)
5484 			continue;
5485 
5486 		/* only a subset of base BTF types should be referenced from
5487 		 * split BTF; ensure nothing unexpected is referenced.
5488 		 */
5489 		base_t = btf_type_by_id(dist->pipe.src, *id);
5490 		switch (btf_kind(base_t)) {
5491 		case BTF_KIND_INT:
5492 		case BTF_KIND_FLOAT:
5493 		case BTF_KIND_FWD:
5494 		case BTF_KIND_ARRAY:
5495 		case BTF_KIND_STRUCT:
5496 		case BTF_KIND_UNION:
5497 		case BTF_KIND_TYPEDEF:
5498 		case BTF_KIND_ENUM:
5499 		case BTF_KIND_ENUM64:
5500 		case BTF_KIND_PTR:
5501 		case BTF_KIND_CONST:
5502 		case BTF_KIND_RESTRICT:
5503 		case BTF_KIND_VOLATILE:
5504 		case BTF_KIND_FUNC_PROTO:
5505 		case BTF_KIND_TYPE_TAG:
5506 			dist->id_map[*id] = *id;
5507 			break;
5508 		default:
5509 			pr_warn("unexpected reference to base type[%u] of kind [%u] when creating distilled base BTF.\n",
5510 				*id, btf_kind(base_t));
5511 			return -EINVAL;
5512 		}
5513 		/* If a base type is used, ensure types it refers to are
5514 		 * marked as used also; so for example if we find a PTR to INT
5515 		 * we need both the PTR and INT.
5516 		 *
5517 		 * The only exception is named struct/unions, since distilled
5518 		 * base BTF composite types have no members.
5519 		 */
5520 		if (btf_is_composite(base_t) && base_t->name_off)
5521 			continue;
5522 		err = btf_add_distilled_type_ids(dist, *id);
5523 		if (err)
5524 			return err;
5525 	}
5526 	return 0;
5527 }
5528 
5529 static int btf_add_distilled_types(struct btf_distill *dist)
5530 {
5531 	bool adding_to_base = dist->pipe.dst->start_id == 1;
5532 	int id = btf__type_cnt(dist->pipe.dst);
5533 	struct btf_type *t;
5534 	int i, err = 0;
5535 
5536 
5537 	/* Add types for each of the required references to either distilled
5538 	 * base or split BTF, depending on type characteristics.
5539 	 */
5540 	for (i = 1; i < dist->split_start_id; i++) {
5541 		const char *name;
5542 		int kind;
5543 
5544 		if (!dist->id_map[i])
5545 			continue;
5546 		t = btf_type_by_id(dist->pipe.src, i);
5547 		kind = btf_kind(t);
5548 		name = btf__name_by_offset(dist->pipe.src, t->name_off);
5549 
5550 		switch (kind) {
5551 		case BTF_KIND_INT:
5552 		case BTF_KIND_FLOAT:
5553 		case BTF_KIND_FWD:
5554 			/* Named int, float, fwd are added to base. */
5555 			if (!adding_to_base)
5556 				continue;
5557 			err = btf_add_type(&dist->pipe, t);
5558 			break;
5559 		case BTF_KIND_STRUCT:
5560 		case BTF_KIND_UNION:
5561 			/* Named struct/union are added to base as 0-vlen
5562 			 * struct/union of same size.  Anonymous struct/unions
5563 			 * are added to split BTF as-is.
5564 			 */
5565 			if (adding_to_base) {
5566 				if (!t->name_off)
5567 					continue;
5568 				err = btf_add_composite(dist->pipe.dst, kind, name, t->size);
5569 			} else {
5570 				if (t->name_off)
5571 					continue;
5572 				err = btf_add_type(&dist->pipe, t);
5573 			}
5574 			break;
5575 		case BTF_KIND_ENUM:
5576 		case BTF_KIND_ENUM64:
5577 			/* Named enum[64]s are added to base as a sized
5578 			 * enum; relocation will match with appropriately-named
5579 			 * and sized enum or enum64.
5580 			 *
5581 			 * Anonymous enums are added to split BTF as-is.
5582 			 */
5583 			if (adding_to_base) {
5584 				if (!t->name_off)
5585 					continue;
5586 				err = btf__add_enum(dist->pipe.dst, name, t->size);
5587 			} else {
5588 				if (t->name_off)
5589 					continue;
5590 				err = btf_add_type(&dist->pipe, t);
5591 			}
5592 			break;
5593 		case BTF_KIND_ARRAY:
5594 		case BTF_KIND_TYPEDEF:
5595 		case BTF_KIND_PTR:
5596 		case BTF_KIND_CONST:
5597 		case BTF_KIND_RESTRICT:
5598 		case BTF_KIND_VOLATILE:
5599 		case BTF_KIND_FUNC_PROTO:
5600 		case BTF_KIND_TYPE_TAG:
5601 			/* All other types are added to split BTF. */
5602 			if (adding_to_base)
5603 				continue;
5604 			err = btf_add_type(&dist->pipe, t);
5605 			break;
5606 		default:
5607 			pr_warn("unexpected kind when adding base type '%s'[%u] of kind [%u] to distilled base BTF.\n",
5608 				name, i, kind);
5609 			return -EINVAL;
5610 
5611 		}
5612 		if (err < 0)
5613 			break;
5614 		dist->id_map[i] = id++;
5615 	}
5616 	return err;
5617 }
5618 
5619 /* Split BTF ids without a mapping will be shifted downwards since distilled
5620  * base BTF is smaller than the original base BTF.  For those that have a
5621  * mapping (either to base or updated split BTF), update the id based on
5622  * that mapping.
5623  */
5624 static int btf_update_distilled_type_ids(struct btf_distill *dist, __u32 i)
5625 {
5626 	struct btf_type *t = btf_type_by_id(dist->pipe.dst, i);
5627 	struct btf_field_iter it;
5628 	__u32 *id;
5629 	int err;
5630 
5631 	err = btf_field_iter_init(&it, t, BTF_FIELD_ITER_IDS);
5632 	if (err)
5633 		return err;
5634 	while ((id = btf_field_iter_next(&it))) {
5635 		if (dist->id_map[*id])
5636 			*id = dist->id_map[*id];
5637 		else if (*id >= dist->split_start_id)
5638 			*id -= dist->diff_id;
5639 	}
5640 	return 0;
5641 }
5642 
5643 /* Create updated split BTF with distilled base BTF; distilled base BTF
5644  * consists of BTF information required to clarify the types that split
5645  * BTF refers to, omitting unneeded details.  Specifically it will contain
5646  * base types and memberless definitions of named structs, unions and enumerated
5647  * types. Associated reference types like pointers, arrays and anonymous
5648  * structs, unions and enumerated types will be added to split BTF.
5649  * Size is recorded for named struct/unions to help guide matching to the
5650  * target base BTF during later relocation.
5651  *
5652  * The only case where structs, unions or enumerated types are fully represented
5653  * is when they are anonymous; in such cases, the anonymous type is added to
5654  * split BTF in full.
5655  *
5656  * We return newly-created split BTF where the split BTF refers to a newly-created
5657  * distilled base BTF. Both must be freed separately by the caller.
5658  */
5659 int btf__distill_base(const struct btf *src_btf, struct btf **new_base_btf,
5660 		      struct btf **new_split_btf)
5661 {
5662 	struct btf *new_base = NULL, *new_split = NULL;
5663 	const struct btf *old_base;
5664 	unsigned int n = btf__type_cnt(src_btf);
5665 	struct btf_distill dist = {};
5666 	struct btf_type *t;
5667 	int i, err = 0;
5668 
5669 	/* src BTF must be split BTF. */
5670 	old_base = btf__base_btf(src_btf);
5671 	if (!new_base_btf || !new_split_btf || !old_base)
5672 		return libbpf_err(-EINVAL);
5673 
5674 	new_base = btf__new_empty();
5675 	if (!new_base)
5676 		return libbpf_err(-ENOMEM);
5677 
5678 	btf__set_endianness(new_base, btf__endianness(src_btf));
5679 
5680 	dist.id_map = calloc(n, sizeof(*dist.id_map));
5681 	if (!dist.id_map) {
5682 		err = -ENOMEM;
5683 		goto done;
5684 	}
5685 	dist.pipe.src = src_btf;
5686 	dist.pipe.dst = new_base;
5687 	dist.pipe.str_off_map = hashmap__new(btf_dedup_identity_hash_fn, btf_dedup_equal_fn, NULL);
5688 	if (IS_ERR(dist.pipe.str_off_map)) {
5689 		err = -ENOMEM;
5690 		goto done;
5691 	}
5692 	dist.split_start_id = btf__type_cnt(old_base);
5693 	dist.split_start_str = old_base->hdr->str_len;
5694 
5695 	/* Pass over src split BTF; generate the list of base BTF type ids it
5696 	 * references; these will constitute our distilled BTF set to be
5697 	 * distributed over base and split BTF as appropriate.
5698 	 */
5699 	for (i = src_btf->start_id; i < n; i++) {
5700 		err = btf_add_distilled_type_ids(&dist, i);
5701 		if (err < 0)
5702 			goto done;
5703 	}
5704 	/* Next add types for each of the required references to base BTF and split BTF
5705 	 * in turn.
5706 	 */
5707 	err = btf_add_distilled_types(&dist);
5708 	if (err < 0)
5709 		goto done;
5710 
5711 	/* Create new split BTF with distilled base BTF as its base; the final
5712 	 * state is split BTF with distilled base BTF that represents enough
5713 	 * about its base references to allow it to be relocated with the base
5714 	 * BTF available.
5715 	 */
5716 	new_split = btf__new_empty_split(new_base);
5717 	if (!new_split) {
5718 		err = -errno;
5719 		goto done;
5720 	}
5721 	dist.pipe.dst = new_split;
5722 	/* First add all split types */
5723 	for (i = src_btf->start_id; i < n; i++) {
5724 		t = btf_type_by_id(src_btf, i);
5725 		err = btf_add_type(&dist.pipe, t);
5726 		if (err < 0)
5727 			goto done;
5728 	}
5729 	/* Now add distilled types to split BTF that are not added to base. */
5730 	err = btf_add_distilled_types(&dist);
5731 	if (err < 0)
5732 		goto done;
5733 
5734 	/* All split BTF ids will be shifted downwards since there are less base
5735 	 * BTF ids in distilled base BTF.
5736 	 */
5737 	dist.diff_id = dist.split_start_id - btf__type_cnt(new_base);
5738 
5739 	n = btf__type_cnt(new_split);
5740 	/* Now update base/split BTF ids. */
5741 	for (i = 1; i < n; i++) {
5742 		err = btf_update_distilled_type_ids(&dist, i);
5743 		if (err < 0)
5744 			break;
5745 	}
5746 done:
5747 	free(dist.id_map);
5748 	hashmap__free(dist.pipe.str_off_map);
5749 	if (err) {
5750 		btf__free(new_split);
5751 		btf__free(new_base);
5752 		return libbpf_err(err);
5753 	}
5754 	*new_base_btf = new_base;
5755 	*new_split_btf = new_split;
5756 
5757 	return 0;
5758 }
5759 
5760 const struct btf_header *btf_header(const struct btf *btf)
5761 {
5762 	return btf->hdr;
5763 }
5764 
5765 void btf_set_base_btf(struct btf *btf, const struct btf *base_btf)
5766 {
5767 	btf->base_btf = (struct btf *)base_btf;
5768 	btf->start_id = btf__type_cnt(base_btf);
5769 	btf->start_str_off = base_btf->hdr->str_len;
5770 }
5771 
5772 int btf__relocate(struct btf *btf, const struct btf *base_btf)
5773 {
5774 	int err = btf_relocate(btf, base_btf, NULL);
5775 
5776 	if (!err)
5777 		btf->owns_base = false;
5778 	return libbpf_err(err);
5779 }
5780