xref: /linux/tools/lib/bpf/btf.c (revision cbf33b8e0b360f667b17106c15d9e2aac77a76a1)
1 // SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
2 /* Copyright (c) 2018 Facebook */
3 
4 #include <byteswap.h>
5 #include <endian.h>
6 #include <stdio.h>
7 #include <stdlib.h>
8 #include <string.h>
9 #include <fcntl.h>
10 #include <unistd.h>
11 #include <errno.h>
12 #include <sys/utsname.h>
13 #include <sys/param.h>
14 #include <sys/stat.h>
15 #include <sys/mman.h>
16 #include <linux/kernel.h>
17 #include <linux/err.h>
18 #include <linux/btf.h>
19 #include <gelf.h>
20 #include "btf.h"
21 #include "bpf.h"
22 #include "libbpf.h"
23 #include "libbpf_internal.h"
24 #include "hashmap.h"
25 #include "strset.h"
26 
27 #define BTF_MAX_NR_TYPES 0x7fffffffU
28 #define BTF_MAX_STR_OFFSET 0x7fffffffU
29 
30 static struct btf_type btf_void;
31 
32 struct btf {
33 	/* raw BTF data in native endianness */
34 	void *raw_data;
35 	/* raw BTF data in non-native endianness */
36 	void *raw_data_swapped;
37 	__u32 raw_size;
38 	/* whether target endianness differs from the native one */
39 	bool swapped_endian;
40 
41 	/*
42 	 * When BTF is loaded from an ELF or raw memory it is stored
43 	 * in a contiguous memory block. The hdr, type_data, and, strs_data
44 	 * point inside that memory region to their respective parts of BTF
45 	 * representation:
46 	 *
47 	 * +--------------------------------+
48 	 * |  Header  |  Types  |  Strings  |
49 	 * +--------------------------------+
50 	 * ^          ^         ^
51 	 * |          |         |
52 	 * hdr        |         |
53 	 * types_data-+         |
54 	 * strs_data------------+
55 	 *
56 	 * If BTF data is later modified, e.g., due to types added or
57 	 * removed, BTF deduplication performed, etc, this contiguous
58 	 * representation is broken up into three independently allocated
59 	 * memory regions to be able to modify them independently.
60 	 * raw_data is nulled out at that point, but can be later allocated
61 	 * and cached again if user calls btf__raw_data(), at which point
62 	 * raw_data will contain a contiguous copy of header, types, and
63 	 * strings:
64 	 *
65 	 * +----------+  +---------+  +-----------+
66 	 * |  Header  |  |  Types  |  |  Strings  |
67 	 * +----------+  +---------+  +-----------+
68 	 * ^             ^            ^
69 	 * |             |            |
70 	 * hdr           |            |
71 	 * types_data----+            |
72 	 * strset__data(strs_set)-----+
73 	 *
74 	 *               +----------+---------+-----------+
75 	 *               |  Header  |  Types  |  Strings  |
76 	 * raw_data----->+----------+---------+-----------+
77 	 */
78 	struct btf_header *hdr;
79 
80 	void *types_data;
81 	size_t types_data_cap; /* used size stored in hdr->type_len */
82 
83 	/* type ID to `struct btf_type *` lookup index
84 	 * type_offs[0] corresponds to the first non-VOID type:
85 	 *   - for base BTF it's type [1];
86 	 *   - for split BTF it's the first non-base BTF type.
87 	 */
88 	__u32 *type_offs;
89 	size_t type_offs_cap;
90 	/* number of types in this BTF instance:
91 	 *   - doesn't include special [0] void type;
92 	 *   - for split BTF counts number of types added on top of base BTF.
93 	 */
94 	__u32 nr_types;
95 	/* if not NULL, points to the base BTF on top of which the current
96 	 * split BTF is based
97 	 */
98 	struct btf *base_btf;
99 	/* BTF type ID of the first type in this BTF instance:
100 	 *   - for base BTF it's equal to 1;
101 	 *   - for split BTF it's equal to biggest type ID of base BTF plus 1.
102 	 */
103 	int start_id;
104 	/* logical string offset of this BTF instance:
105 	 *   - for base BTF it's equal to 0;
106 	 *   - for split BTF it's equal to total size of base BTF's string section size.
107 	 */
108 	int start_str_off;
109 
110 	/* only one of strs_data or strs_set can be non-NULL, depending on
111 	 * whether BTF is in a modifiable state (strs_set is used) or not
112 	 * (strs_data points inside raw_data)
113 	 */
114 	void *strs_data;
115 	/* a set of unique strings */
116 	struct strset *strs_set;
117 	/* whether strings are already deduplicated */
118 	bool strs_deduped;
119 
120 	/* whether base_btf should be freed in btf_free for this instance */
121 	bool owns_base;
122 
123 	/* whether raw_data is a (read-only) mmap */
124 	bool raw_data_is_mmap;
125 
126 	/* BTF object FD, if loaded into kernel */
127 	int fd;
128 
129 	/* Pointer size (in bytes) for a target architecture of this BTF */
130 	int ptr_sz;
131 };
132 
133 static inline __u64 ptr_to_u64(const void *ptr)
134 {
135 	return (__u64) (unsigned long) ptr;
136 }
137 
138 /* Ensure given dynamically allocated memory region pointed to by *data* with
139  * capacity of *cap_cnt* elements each taking *elem_sz* bytes has enough
140  * memory to accommodate *add_cnt* new elements, assuming *cur_cnt* elements
141  * are already used. At most *max_cnt* elements can be ever allocated.
142  * If necessary, memory is reallocated and all existing data is copied over,
143  * new pointer to the memory region is stored at *data, new memory region
144  * capacity (in number of elements) is stored in *cap.
145  * On success, memory pointer to the beginning of unused memory is returned.
146  * On error, NULL is returned.
147  */
148 void *libbpf_add_mem(void **data, size_t *cap_cnt, size_t elem_sz,
149 		     size_t cur_cnt, size_t max_cnt, size_t add_cnt)
150 {
151 	size_t new_cnt;
152 	void *new_data;
153 
154 	if (cur_cnt + add_cnt <= *cap_cnt)
155 		return *data + cur_cnt * elem_sz;
156 
157 	/* requested more than the set limit */
158 	if (cur_cnt + add_cnt > max_cnt)
159 		return NULL;
160 
161 	new_cnt = *cap_cnt;
162 	new_cnt += new_cnt / 4;		  /* expand by 25% */
163 	if (new_cnt < 16)		  /* but at least 16 elements */
164 		new_cnt = 16;
165 	if (new_cnt > max_cnt)		  /* but not exceeding a set limit */
166 		new_cnt = max_cnt;
167 	if (new_cnt < cur_cnt + add_cnt)  /* also ensure we have enough memory */
168 		new_cnt = cur_cnt + add_cnt;
169 
170 	new_data = libbpf_reallocarray(*data, new_cnt, elem_sz);
171 	if (!new_data)
172 		return NULL;
173 
174 	/* zero out newly allocated portion of memory */
175 	memset(new_data + (*cap_cnt) * elem_sz, 0, (new_cnt - *cap_cnt) * elem_sz);
176 
177 	*data = new_data;
178 	*cap_cnt = new_cnt;
179 	return new_data + cur_cnt * elem_sz;
180 }
181 
182 /* Ensure given dynamically allocated memory region has enough allocated space
183  * to accommodate *need_cnt* elements of size *elem_sz* bytes each
184  */
185 int libbpf_ensure_mem(void **data, size_t *cap_cnt, size_t elem_sz, size_t need_cnt)
186 {
187 	void *p;
188 
189 	if (need_cnt <= *cap_cnt)
190 		return 0;
191 
192 	p = libbpf_add_mem(data, cap_cnt, elem_sz, *cap_cnt, SIZE_MAX, need_cnt - *cap_cnt);
193 	if (!p)
194 		return -ENOMEM;
195 
196 	return 0;
197 }
198 
199 static void *btf_add_type_offs_mem(struct btf *btf, size_t add_cnt)
200 {
201 	return libbpf_add_mem((void **)&btf->type_offs, &btf->type_offs_cap, sizeof(__u32),
202 			      btf->nr_types, BTF_MAX_NR_TYPES, add_cnt);
203 }
204 
205 static int btf_add_type_idx_entry(struct btf *btf, __u32 type_off)
206 {
207 	__u32 *p;
208 
209 	p = btf_add_type_offs_mem(btf, 1);
210 	if (!p)
211 		return -ENOMEM;
212 
213 	*p = type_off;
214 	return 0;
215 }
216 
217 static void btf_bswap_hdr(struct btf_header *h)
218 {
219 	h->magic = bswap_16(h->magic);
220 	h->hdr_len = bswap_32(h->hdr_len);
221 	h->type_off = bswap_32(h->type_off);
222 	h->type_len = bswap_32(h->type_len);
223 	h->str_off = bswap_32(h->str_off);
224 	h->str_len = bswap_32(h->str_len);
225 }
226 
227 static int btf_parse_hdr(struct btf *btf)
228 {
229 	struct btf_header *hdr = btf->hdr;
230 	__u32 meta_left;
231 
232 	if (btf->raw_size < sizeof(struct btf_header)) {
233 		pr_debug("BTF header not found\n");
234 		return -EINVAL;
235 	}
236 
237 	if (hdr->magic == bswap_16(BTF_MAGIC)) {
238 		btf->swapped_endian = true;
239 		if (bswap_32(hdr->hdr_len) != sizeof(struct btf_header)) {
240 			pr_warn("Can't load BTF with non-native endianness due to unsupported header length %u\n",
241 				bswap_32(hdr->hdr_len));
242 			return -ENOTSUP;
243 		}
244 		btf_bswap_hdr(hdr);
245 	} else if (hdr->magic != BTF_MAGIC) {
246 		pr_debug("Invalid BTF magic: %x\n", hdr->magic);
247 		return -EINVAL;
248 	}
249 
250 	if (btf->raw_size < hdr->hdr_len) {
251 		pr_debug("BTF header len %u larger than data size %u\n",
252 			 hdr->hdr_len, btf->raw_size);
253 		return -EINVAL;
254 	}
255 
256 	meta_left = btf->raw_size - hdr->hdr_len;
257 	if (meta_left < (long long)hdr->str_off + hdr->str_len) {
258 		pr_debug("Invalid BTF total size: %u\n", btf->raw_size);
259 		return -EINVAL;
260 	}
261 
262 	if ((long long)hdr->type_off + hdr->type_len > hdr->str_off) {
263 		pr_debug("Invalid BTF data sections layout: type data at %u + %u, strings data at %u + %u\n",
264 			 hdr->type_off, hdr->type_len, hdr->str_off, hdr->str_len);
265 		return -EINVAL;
266 	}
267 
268 	if (hdr->type_off % 4) {
269 		pr_debug("BTF type section is not aligned to 4 bytes\n");
270 		return -EINVAL;
271 	}
272 
273 	return 0;
274 }
275 
276 static int btf_parse_str_sec(struct btf *btf)
277 {
278 	const struct btf_header *hdr = btf->hdr;
279 	const char *start = btf->strs_data;
280 	const char *end = start + btf->hdr->str_len;
281 
282 	if (btf->base_btf && hdr->str_len == 0)
283 		return 0;
284 	if (!hdr->str_len || hdr->str_len - 1 > BTF_MAX_STR_OFFSET || end[-1]) {
285 		pr_debug("Invalid BTF string section\n");
286 		return -EINVAL;
287 	}
288 	if (!btf->base_btf && start[0]) {
289 		pr_debug("Malformed BTF string section, did you forget to provide base BTF?\n");
290 		return -EINVAL;
291 	}
292 	return 0;
293 }
294 
295 static int btf_type_size(const struct btf_type *t)
296 {
297 	const int base_size = sizeof(struct btf_type);
298 	__u16 vlen = btf_vlen(t);
299 
300 	switch (btf_kind(t)) {
301 	case BTF_KIND_FWD:
302 	case BTF_KIND_CONST:
303 	case BTF_KIND_VOLATILE:
304 	case BTF_KIND_RESTRICT:
305 	case BTF_KIND_PTR:
306 	case BTF_KIND_TYPEDEF:
307 	case BTF_KIND_FUNC:
308 	case BTF_KIND_FLOAT:
309 	case BTF_KIND_TYPE_TAG:
310 		return base_size;
311 	case BTF_KIND_INT:
312 		return base_size + sizeof(__u32);
313 	case BTF_KIND_ENUM:
314 		return base_size + vlen * sizeof(struct btf_enum);
315 	case BTF_KIND_ENUM64:
316 		return base_size + vlen * sizeof(struct btf_enum64);
317 	case BTF_KIND_ARRAY:
318 		return base_size + sizeof(struct btf_array);
319 	case BTF_KIND_STRUCT:
320 	case BTF_KIND_UNION:
321 		return base_size + vlen * sizeof(struct btf_member);
322 	case BTF_KIND_FUNC_PROTO:
323 		return base_size + vlen * sizeof(struct btf_param);
324 	case BTF_KIND_VAR:
325 		return base_size + sizeof(struct btf_var);
326 	case BTF_KIND_DATASEC:
327 		return base_size + vlen * sizeof(struct btf_var_secinfo);
328 	case BTF_KIND_DECL_TAG:
329 		return base_size + sizeof(struct btf_decl_tag);
330 	default:
331 		pr_debug("Unsupported BTF_KIND:%u\n", btf_kind(t));
332 		return -EINVAL;
333 	}
334 }
335 
336 static void btf_bswap_type_base(struct btf_type *t)
337 {
338 	t->name_off = bswap_32(t->name_off);
339 	t->info = bswap_32(t->info);
340 	t->type = bswap_32(t->type);
341 }
342 
343 static int btf_bswap_type_rest(struct btf_type *t)
344 {
345 	struct btf_var_secinfo *v;
346 	struct btf_enum64 *e64;
347 	struct btf_member *m;
348 	struct btf_array *a;
349 	struct btf_param *p;
350 	struct btf_enum *e;
351 	__u16 vlen = btf_vlen(t);
352 	int i;
353 
354 	switch (btf_kind(t)) {
355 	case BTF_KIND_FWD:
356 	case BTF_KIND_CONST:
357 	case BTF_KIND_VOLATILE:
358 	case BTF_KIND_RESTRICT:
359 	case BTF_KIND_PTR:
360 	case BTF_KIND_TYPEDEF:
361 	case BTF_KIND_FUNC:
362 	case BTF_KIND_FLOAT:
363 	case BTF_KIND_TYPE_TAG:
364 		return 0;
365 	case BTF_KIND_INT:
366 		*(__u32 *)(t + 1) = bswap_32(*(__u32 *)(t + 1));
367 		return 0;
368 	case BTF_KIND_ENUM:
369 		for (i = 0, e = btf_enum(t); i < vlen; i++, e++) {
370 			e->name_off = bswap_32(e->name_off);
371 			e->val = bswap_32(e->val);
372 		}
373 		return 0;
374 	case BTF_KIND_ENUM64:
375 		for (i = 0, e64 = btf_enum64(t); i < vlen; i++, e64++) {
376 			e64->name_off = bswap_32(e64->name_off);
377 			e64->val_lo32 = bswap_32(e64->val_lo32);
378 			e64->val_hi32 = bswap_32(e64->val_hi32);
379 		}
380 		return 0;
381 	case BTF_KIND_ARRAY:
382 		a = btf_array(t);
383 		a->type = bswap_32(a->type);
384 		a->index_type = bswap_32(a->index_type);
385 		a->nelems = bswap_32(a->nelems);
386 		return 0;
387 	case BTF_KIND_STRUCT:
388 	case BTF_KIND_UNION:
389 		for (i = 0, m = btf_members(t); i < vlen; i++, m++) {
390 			m->name_off = bswap_32(m->name_off);
391 			m->type = bswap_32(m->type);
392 			m->offset = bswap_32(m->offset);
393 		}
394 		return 0;
395 	case BTF_KIND_FUNC_PROTO:
396 		for (i = 0, p = btf_params(t); i < vlen; i++, p++) {
397 			p->name_off = bswap_32(p->name_off);
398 			p->type = bswap_32(p->type);
399 		}
400 		return 0;
401 	case BTF_KIND_VAR:
402 		btf_var(t)->linkage = bswap_32(btf_var(t)->linkage);
403 		return 0;
404 	case BTF_KIND_DATASEC:
405 		for (i = 0, v = btf_var_secinfos(t); i < vlen; i++, v++) {
406 			v->type = bswap_32(v->type);
407 			v->offset = bswap_32(v->offset);
408 			v->size = bswap_32(v->size);
409 		}
410 		return 0;
411 	case BTF_KIND_DECL_TAG:
412 		btf_decl_tag(t)->component_idx = bswap_32(btf_decl_tag(t)->component_idx);
413 		return 0;
414 	default:
415 		pr_debug("Unsupported BTF_KIND:%u\n", btf_kind(t));
416 		return -EINVAL;
417 	}
418 }
419 
420 static int btf_parse_type_sec(struct btf *btf)
421 {
422 	struct btf_header *hdr = btf->hdr;
423 	void *next_type = btf->types_data;
424 	void *end_type = next_type + hdr->type_len;
425 	int err, type_size;
426 
427 	while (next_type + sizeof(struct btf_type) <= end_type) {
428 		if (btf->swapped_endian)
429 			btf_bswap_type_base(next_type);
430 
431 		type_size = btf_type_size(next_type);
432 		if (type_size < 0)
433 			return type_size;
434 		if (next_type + type_size > end_type) {
435 			pr_warn("BTF type [%d] is malformed\n", btf->start_id + btf->nr_types);
436 			return -EINVAL;
437 		}
438 
439 		if (btf->swapped_endian && btf_bswap_type_rest(next_type))
440 			return -EINVAL;
441 
442 		err = btf_add_type_idx_entry(btf, next_type - btf->types_data);
443 		if (err)
444 			return err;
445 
446 		next_type += type_size;
447 		btf->nr_types++;
448 	}
449 
450 	if (next_type != end_type) {
451 		pr_warn("BTF types data is malformed\n");
452 		return -EINVAL;
453 	}
454 
455 	return 0;
456 }
457 
458 static int btf_validate_str(const struct btf *btf, __u32 str_off, const char *what, __u32 type_id)
459 {
460 	const char *s;
461 
462 	s = btf__str_by_offset(btf, str_off);
463 	if (!s) {
464 		pr_warn("btf: type [%u]: invalid %s (string offset %u)\n", type_id, what, str_off);
465 		return -EINVAL;
466 	}
467 
468 	return 0;
469 }
470 
471 static int btf_validate_id(const struct btf *btf, __u32 id, __u32 ctx_id)
472 {
473 	const struct btf_type *t;
474 
475 	t = btf__type_by_id(btf, id);
476 	if (!t) {
477 		pr_warn("btf: type [%u]: invalid referenced type ID %u\n", ctx_id, id);
478 		return -EINVAL;
479 	}
480 
481 	return 0;
482 }
483 
484 static int btf_validate_type(const struct btf *btf, const struct btf_type *t, __u32 id)
485 {
486 	__u32 kind = btf_kind(t);
487 	int err, i, n;
488 
489 	err = btf_validate_str(btf, t->name_off, "type name", id);
490 	if (err)
491 		return err;
492 
493 	switch (kind) {
494 	case BTF_KIND_UNKN:
495 	case BTF_KIND_INT:
496 	case BTF_KIND_FWD:
497 	case BTF_KIND_FLOAT:
498 		break;
499 	case BTF_KIND_PTR:
500 	case BTF_KIND_TYPEDEF:
501 	case BTF_KIND_VOLATILE:
502 	case BTF_KIND_CONST:
503 	case BTF_KIND_RESTRICT:
504 	case BTF_KIND_VAR:
505 	case BTF_KIND_DECL_TAG:
506 	case BTF_KIND_TYPE_TAG:
507 		err = btf_validate_id(btf, t->type, id);
508 		if (err)
509 			return err;
510 		break;
511 	case BTF_KIND_ARRAY: {
512 		const struct btf_array *a = btf_array(t);
513 
514 		err = btf_validate_id(btf, a->type, id);
515 		err = err ?: btf_validate_id(btf, a->index_type, id);
516 		if (err)
517 			return err;
518 		break;
519 	}
520 	case BTF_KIND_STRUCT:
521 	case BTF_KIND_UNION: {
522 		const struct btf_member *m = btf_members(t);
523 
524 		n = btf_vlen(t);
525 		for (i = 0; i < n; i++, m++) {
526 			err = btf_validate_str(btf, m->name_off, "field name", id);
527 			err = err ?: btf_validate_id(btf, m->type, id);
528 			if (err)
529 				return err;
530 		}
531 		break;
532 	}
533 	case BTF_KIND_ENUM: {
534 		const struct btf_enum *m = btf_enum(t);
535 
536 		n = btf_vlen(t);
537 		for (i = 0; i < n; i++, m++) {
538 			err = btf_validate_str(btf, m->name_off, "enum name", id);
539 			if (err)
540 				return err;
541 		}
542 		break;
543 	}
544 	case BTF_KIND_ENUM64: {
545 		const struct btf_enum64 *m = btf_enum64(t);
546 
547 		n = btf_vlen(t);
548 		for (i = 0; i < n; i++, m++) {
549 			err = btf_validate_str(btf, m->name_off, "enum name", id);
550 			if (err)
551 				return err;
552 		}
553 		break;
554 	}
555 	case BTF_KIND_FUNC: {
556 		const struct btf_type *ft;
557 
558 		err = btf_validate_id(btf, t->type, id);
559 		if (err)
560 			return err;
561 		ft = btf__type_by_id(btf, t->type);
562 		if (btf_kind(ft) != BTF_KIND_FUNC_PROTO) {
563 			pr_warn("btf: type [%u]: referenced type [%u] is not FUNC_PROTO\n", id, t->type);
564 			return -EINVAL;
565 		}
566 		break;
567 	}
568 	case BTF_KIND_FUNC_PROTO: {
569 		const struct btf_param *m = btf_params(t);
570 
571 		n = btf_vlen(t);
572 		for (i = 0; i < n; i++, m++) {
573 			err = btf_validate_str(btf, m->name_off, "param name", id);
574 			err = err ?: btf_validate_id(btf, m->type, id);
575 			if (err)
576 				return err;
577 		}
578 		break;
579 	}
580 	case BTF_KIND_DATASEC: {
581 		const struct btf_var_secinfo *m = btf_var_secinfos(t);
582 
583 		n = btf_vlen(t);
584 		for (i = 0; i < n; i++, m++) {
585 			err = btf_validate_id(btf, m->type, id);
586 			if (err)
587 				return err;
588 		}
589 		break;
590 	}
591 	default:
592 		pr_warn("btf: type [%u]: unrecognized kind %u\n", id, kind);
593 		return -EINVAL;
594 	}
595 	return 0;
596 }
597 
598 /* Validate basic sanity of BTF. It's intentionally less thorough than
599  * kernel's validation and validates only properties of BTF that libbpf relies
600  * on to be correct (e.g., valid type IDs, valid string offsets, etc)
601  */
602 static int btf_sanity_check(const struct btf *btf)
603 {
604 	const struct btf_type *t;
605 	__u32 i, n = btf__type_cnt(btf);
606 	int err;
607 
608 	for (i = btf->start_id; i < n; i++) {
609 		t = btf_type_by_id(btf, i);
610 		err = btf_validate_type(btf, t, i);
611 		if (err)
612 			return err;
613 	}
614 	return 0;
615 }
616 
617 __u32 btf__type_cnt(const struct btf *btf)
618 {
619 	return btf->start_id + btf->nr_types;
620 }
621 
622 const struct btf *btf__base_btf(const struct btf *btf)
623 {
624 	return btf->base_btf;
625 }
626 
627 /* internal helper returning non-const pointer to a type */
628 struct btf_type *btf_type_by_id(const struct btf *btf, __u32 type_id)
629 {
630 	if (type_id == 0)
631 		return &btf_void;
632 	if (type_id < btf->start_id)
633 		return btf_type_by_id(btf->base_btf, type_id);
634 	return btf->types_data + btf->type_offs[type_id - btf->start_id];
635 }
636 
637 const struct btf_type *btf__type_by_id(const struct btf *btf, __u32 type_id)
638 {
639 	if (type_id >= btf->start_id + btf->nr_types)
640 		return errno = EINVAL, NULL;
641 	return btf_type_by_id((struct btf *)btf, type_id);
642 }
643 
644 static int determine_ptr_size(const struct btf *btf)
645 {
646 	static const char * const long_aliases[] = {
647 		"long",
648 		"long int",
649 		"int long",
650 		"unsigned long",
651 		"long unsigned",
652 		"unsigned long int",
653 		"unsigned int long",
654 		"long unsigned int",
655 		"long int unsigned",
656 		"int unsigned long",
657 		"int long unsigned",
658 	};
659 	const struct btf_type *t;
660 	const char *name;
661 	int i, j, n;
662 
663 	if (btf->base_btf && btf->base_btf->ptr_sz > 0)
664 		return btf->base_btf->ptr_sz;
665 
666 	n = btf__type_cnt(btf);
667 	for (i = 1; i < n; i++) {
668 		t = btf__type_by_id(btf, i);
669 		if (!btf_is_int(t))
670 			continue;
671 
672 		if (t->size != 4 && t->size != 8)
673 			continue;
674 
675 		name = btf__name_by_offset(btf, t->name_off);
676 		if (!name)
677 			continue;
678 
679 		for (j = 0; j < ARRAY_SIZE(long_aliases); j++) {
680 			if (strcmp(name, long_aliases[j]) == 0)
681 				return t->size;
682 		}
683 	}
684 
685 	return -1;
686 }
687 
688 static size_t btf_ptr_sz(const struct btf *btf)
689 {
690 	if (!btf->ptr_sz)
691 		((struct btf *)btf)->ptr_sz = determine_ptr_size(btf);
692 	return btf->ptr_sz < 0 ? sizeof(void *) : btf->ptr_sz;
693 }
694 
695 /* Return pointer size this BTF instance assumes. The size is heuristically
696  * determined by looking for 'long' or 'unsigned long' integer type and
697  * recording its size in bytes. If BTF type information doesn't have any such
698  * type, this function returns 0. In the latter case, native architecture's
699  * pointer size is assumed, so will be either 4 or 8, depending on
700  * architecture that libbpf was compiled for. It's possible to override
701  * guessed value by using btf__set_pointer_size() API.
702  */
703 size_t btf__pointer_size(const struct btf *btf)
704 {
705 	if (!btf->ptr_sz)
706 		((struct btf *)btf)->ptr_sz = determine_ptr_size(btf);
707 
708 	if (btf->ptr_sz < 0)
709 		/* not enough BTF type info to guess */
710 		return 0;
711 
712 	return btf->ptr_sz;
713 }
714 
715 /* Override or set pointer size in bytes. Only values of 4 and 8 are
716  * supported.
717  */
718 int btf__set_pointer_size(struct btf *btf, size_t ptr_sz)
719 {
720 	if (ptr_sz != 4 && ptr_sz != 8)
721 		return libbpf_err(-EINVAL);
722 	btf->ptr_sz = ptr_sz;
723 	return 0;
724 }
725 
726 static bool is_host_big_endian(void)
727 {
728 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
729 	return false;
730 #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
731 	return true;
732 #else
733 # error "Unrecognized __BYTE_ORDER__"
734 #endif
735 }
736 
737 enum btf_endianness btf__endianness(const struct btf *btf)
738 {
739 	if (is_host_big_endian())
740 		return btf->swapped_endian ? BTF_LITTLE_ENDIAN : BTF_BIG_ENDIAN;
741 	else
742 		return btf->swapped_endian ? BTF_BIG_ENDIAN : BTF_LITTLE_ENDIAN;
743 }
744 
745 int btf__set_endianness(struct btf *btf, enum btf_endianness endian)
746 {
747 	if (endian != BTF_LITTLE_ENDIAN && endian != BTF_BIG_ENDIAN)
748 		return libbpf_err(-EINVAL);
749 
750 	btf->swapped_endian = is_host_big_endian() != (endian == BTF_BIG_ENDIAN);
751 	if (!btf->swapped_endian) {
752 		free(btf->raw_data_swapped);
753 		btf->raw_data_swapped = NULL;
754 	}
755 	return 0;
756 }
757 
758 static bool btf_type_is_void(const struct btf_type *t)
759 {
760 	return t == &btf_void || btf_is_fwd(t);
761 }
762 
763 static bool btf_type_is_void_or_null(const struct btf_type *t)
764 {
765 	return !t || btf_type_is_void(t);
766 }
767 
768 #define MAX_RESOLVE_DEPTH 32
769 
770 __s64 btf__resolve_size(const struct btf *btf, __u32 type_id)
771 {
772 	const struct btf_array *array;
773 	const struct btf_type *t;
774 	__u32 nelems = 1;
775 	__s64 size = -1;
776 	int i;
777 
778 	t = btf__type_by_id(btf, type_id);
779 	for (i = 0; i < MAX_RESOLVE_DEPTH && !btf_type_is_void_or_null(t); i++) {
780 		switch (btf_kind(t)) {
781 		case BTF_KIND_INT:
782 		case BTF_KIND_STRUCT:
783 		case BTF_KIND_UNION:
784 		case BTF_KIND_ENUM:
785 		case BTF_KIND_ENUM64:
786 		case BTF_KIND_DATASEC:
787 		case BTF_KIND_FLOAT:
788 			size = t->size;
789 			goto done;
790 		case BTF_KIND_PTR:
791 			size = btf_ptr_sz(btf);
792 			goto done;
793 		case BTF_KIND_TYPEDEF:
794 		case BTF_KIND_VOLATILE:
795 		case BTF_KIND_CONST:
796 		case BTF_KIND_RESTRICT:
797 		case BTF_KIND_VAR:
798 		case BTF_KIND_DECL_TAG:
799 		case BTF_KIND_TYPE_TAG:
800 			type_id = t->type;
801 			break;
802 		case BTF_KIND_ARRAY:
803 			array = btf_array(t);
804 			if (nelems && array->nelems > UINT32_MAX / nelems)
805 				return libbpf_err(-E2BIG);
806 			nelems *= array->nelems;
807 			type_id = array->type;
808 			break;
809 		default:
810 			return libbpf_err(-EINVAL);
811 		}
812 
813 		t = btf__type_by_id(btf, type_id);
814 	}
815 
816 done:
817 	if (size < 0)
818 		return libbpf_err(-EINVAL);
819 	if (nelems && size > UINT32_MAX / nelems)
820 		return libbpf_err(-E2BIG);
821 
822 	return nelems * size;
823 }
824 
825 int btf__align_of(const struct btf *btf, __u32 id)
826 {
827 	const struct btf_type *t = btf__type_by_id(btf, id);
828 	__u16 kind = btf_kind(t);
829 
830 	switch (kind) {
831 	case BTF_KIND_INT:
832 	case BTF_KIND_ENUM:
833 	case BTF_KIND_ENUM64:
834 	case BTF_KIND_FLOAT:
835 		return min(btf_ptr_sz(btf), (size_t)t->size);
836 	case BTF_KIND_PTR:
837 		return btf_ptr_sz(btf);
838 	case BTF_KIND_TYPEDEF:
839 	case BTF_KIND_VOLATILE:
840 	case BTF_KIND_CONST:
841 	case BTF_KIND_RESTRICT:
842 	case BTF_KIND_TYPE_TAG:
843 		return btf__align_of(btf, t->type);
844 	case BTF_KIND_ARRAY:
845 		return btf__align_of(btf, btf_array(t)->type);
846 	case BTF_KIND_STRUCT:
847 	case BTF_KIND_UNION: {
848 		const struct btf_member *m = btf_members(t);
849 		__u16 vlen = btf_vlen(t);
850 		int i, max_align = 1, align;
851 
852 		for (i = 0; i < vlen; i++, m++) {
853 			align = btf__align_of(btf, m->type);
854 			if (align <= 0)
855 				return libbpf_err(align);
856 			max_align = max(max_align, align);
857 
858 			/* if field offset isn't aligned according to field
859 			 * type's alignment, then struct must be packed
860 			 */
861 			if (btf_member_bitfield_size(t, i) == 0 &&
862 			    (m->offset % (8 * align)) != 0)
863 				return 1;
864 		}
865 
866 		/* if struct/union size isn't a multiple of its alignment,
867 		 * then struct must be packed
868 		 */
869 		if ((t->size % max_align) != 0)
870 			return 1;
871 
872 		return max_align;
873 	}
874 	default:
875 		pr_warn("unsupported BTF_KIND:%u\n", btf_kind(t));
876 		return errno = EINVAL, 0;
877 	}
878 }
879 
880 int btf__resolve_type(const struct btf *btf, __u32 type_id)
881 {
882 	const struct btf_type *t;
883 	int depth = 0;
884 
885 	t = btf__type_by_id(btf, type_id);
886 	while (depth < MAX_RESOLVE_DEPTH &&
887 	       !btf_type_is_void_or_null(t) &&
888 	       (btf_is_mod(t) || btf_is_typedef(t) || btf_is_var(t))) {
889 		type_id = t->type;
890 		t = btf__type_by_id(btf, type_id);
891 		depth++;
892 	}
893 
894 	if (depth == MAX_RESOLVE_DEPTH || btf_type_is_void_or_null(t))
895 		return libbpf_err(-EINVAL);
896 
897 	return type_id;
898 }
899 
900 __s32 btf__find_by_name(const struct btf *btf, const char *type_name)
901 {
902 	__u32 i, nr_types = btf__type_cnt(btf);
903 
904 	if (!strcmp(type_name, "void"))
905 		return 0;
906 
907 	for (i = 1; i < nr_types; i++) {
908 		const struct btf_type *t = btf__type_by_id(btf, i);
909 		const char *name = btf__name_by_offset(btf, t->name_off);
910 
911 		if (name && !strcmp(type_name, name))
912 			return i;
913 	}
914 
915 	return libbpf_err(-ENOENT);
916 }
917 
918 static __s32 btf_find_by_name_kind(const struct btf *btf, int start_id,
919 				   const char *type_name, __u32 kind)
920 {
921 	__u32 i, nr_types = btf__type_cnt(btf);
922 
923 	if (kind == BTF_KIND_UNKN || !strcmp(type_name, "void"))
924 		return 0;
925 
926 	for (i = start_id; i < nr_types; i++) {
927 		const struct btf_type *t = btf__type_by_id(btf, i);
928 		const char *name;
929 
930 		if (btf_kind(t) != kind)
931 			continue;
932 		name = btf__name_by_offset(btf, t->name_off);
933 		if (name && !strcmp(type_name, name))
934 			return i;
935 	}
936 
937 	return libbpf_err(-ENOENT);
938 }
939 
940 __s32 btf__find_by_name_kind_own(const struct btf *btf, const char *type_name,
941 				 __u32 kind)
942 {
943 	return btf_find_by_name_kind(btf, btf->start_id, type_name, kind);
944 }
945 
946 __s32 btf__find_by_name_kind(const struct btf *btf, const char *type_name,
947 			     __u32 kind)
948 {
949 	return btf_find_by_name_kind(btf, 1, type_name, kind);
950 }
951 
952 static bool btf_is_modifiable(const struct btf *btf)
953 {
954 	return (void *)btf->hdr != btf->raw_data;
955 }
956 
957 static void btf_free_raw_data(struct btf *btf)
958 {
959 	if (btf->raw_data_is_mmap) {
960 		munmap(btf->raw_data, btf->raw_size);
961 		btf->raw_data_is_mmap = false;
962 	} else {
963 		free(btf->raw_data);
964 	}
965 	btf->raw_data = NULL;
966 }
967 
968 void btf__free(struct btf *btf)
969 {
970 	if (IS_ERR_OR_NULL(btf))
971 		return;
972 
973 	if (btf->fd >= 0)
974 		close(btf->fd);
975 
976 	if (btf_is_modifiable(btf)) {
977 		/* if BTF was modified after loading, it will have a split
978 		 * in-memory representation for header, types, and strings
979 		 * sections, so we need to free all of them individually. It
980 		 * might still have a cached contiguous raw data present,
981 		 * which will be unconditionally freed below.
982 		 */
983 		free(btf->hdr);
984 		free(btf->types_data);
985 		strset__free(btf->strs_set);
986 	}
987 	btf_free_raw_data(btf);
988 	free(btf->raw_data_swapped);
989 	free(btf->type_offs);
990 	if (btf->owns_base)
991 		btf__free(btf->base_btf);
992 	free(btf);
993 }
994 
995 static struct btf *btf_new_empty(struct btf *base_btf)
996 {
997 	struct btf *btf;
998 
999 	btf = calloc(1, sizeof(*btf));
1000 	if (!btf)
1001 		return ERR_PTR(-ENOMEM);
1002 
1003 	btf->nr_types = 0;
1004 	btf->start_id = 1;
1005 	btf->start_str_off = 0;
1006 	btf->fd = -1;
1007 	btf->ptr_sz = sizeof(void *);
1008 	btf->swapped_endian = false;
1009 
1010 	if (base_btf) {
1011 		btf->base_btf = base_btf;
1012 		btf->start_id = btf__type_cnt(base_btf);
1013 		btf->start_str_off = base_btf->hdr->str_len + base_btf->start_str_off;
1014 		btf->swapped_endian = base_btf->swapped_endian;
1015 	}
1016 
1017 	/* +1 for empty string at offset 0 */
1018 	btf->raw_size = sizeof(struct btf_header) + (base_btf ? 0 : 1);
1019 	btf->raw_data = calloc(1, btf->raw_size);
1020 	if (!btf->raw_data) {
1021 		free(btf);
1022 		return ERR_PTR(-ENOMEM);
1023 	}
1024 
1025 	btf->hdr = btf->raw_data;
1026 	btf->hdr->hdr_len = sizeof(struct btf_header);
1027 	btf->hdr->magic = BTF_MAGIC;
1028 	btf->hdr->version = BTF_VERSION;
1029 
1030 	btf->types_data = btf->raw_data + btf->hdr->hdr_len;
1031 	btf->strs_data = btf->raw_data + btf->hdr->hdr_len;
1032 	btf->hdr->str_len = base_btf ? 0 : 1; /* empty string at offset 0 */
1033 
1034 	return btf;
1035 }
1036 
1037 struct btf *btf__new_empty(void)
1038 {
1039 	return libbpf_ptr(btf_new_empty(NULL));
1040 }
1041 
1042 struct btf *btf__new_empty_split(struct btf *base_btf)
1043 {
1044 	return libbpf_ptr(btf_new_empty(base_btf));
1045 }
1046 
1047 static struct btf *btf_new(const void *data, __u32 size, struct btf *base_btf, bool is_mmap)
1048 {
1049 	struct btf *btf;
1050 	int err;
1051 
1052 	btf = calloc(1, sizeof(struct btf));
1053 	if (!btf)
1054 		return ERR_PTR(-ENOMEM);
1055 
1056 	btf->nr_types = 0;
1057 	btf->start_id = 1;
1058 	btf->start_str_off = 0;
1059 	btf->fd = -1;
1060 
1061 	if (base_btf) {
1062 		btf->base_btf = base_btf;
1063 		btf->start_id = btf__type_cnt(base_btf);
1064 		btf->start_str_off = base_btf->hdr->str_len;
1065 	}
1066 
1067 	if (is_mmap) {
1068 		btf->raw_data = (void *)data;
1069 		btf->raw_data_is_mmap = true;
1070 	} else {
1071 		btf->raw_data = malloc(size);
1072 		if (!btf->raw_data) {
1073 			err = -ENOMEM;
1074 			goto done;
1075 		}
1076 		memcpy(btf->raw_data, data, size);
1077 	}
1078 
1079 	btf->raw_size = size;
1080 
1081 	btf->hdr = btf->raw_data;
1082 	err = btf_parse_hdr(btf);
1083 	if (err)
1084 		goto done;
1085 
1086 	btf->strs_data = btf->raw_data + btf->hdr->hdr_len + btf->hdr->str_off;
1087 	btf->types_data = btf->raw_data + btf->hdr->hdr_len + btf->hdr->type_off;
1088 
1089 	err = btf_parse_str_sec(btf);
1090 	err = err ?: btf_parse_type_sec(btf);
1091 	err = err ?: btf_sanity_check(btf);
1092 	if (err)
1093 		goto done;
1094 
1095 done:
1096 	if (err) {
1097 		btf__free(btf);
1098 		return ERR_PTR(err);
1099 	}
1100 
1101 	return btf;
1102 }
1103 
1104 struct btf *btf__new(const void *data, __u32 size)
1105 {
1106 	return libbpf_ptr(btf_new(data, size, NULL, false));
1107 }
1108 
1109 struct btf *btf__new_split(const void *data, __u32 size, struct btf *base_btf)
1110 {
1111 	return libbpf_ptr(btf_new(data, size, base_btf, false));
1112 }
1113 
1114 struct btf_elf_secs {
1115 	Elf_Data *btf_data;
1116 	Elf_Data *btf_ext_data;
1117 	Elf_Data *btf_base_data;
1118 };
1119 
1120 static int btf_find_elf_sections(Elf *elf, const char *path, struct btf_elf_secs *secs)
1121 {
1122 	Elf_Scn *scn = NULL;
1123 	Elf_Data *data;
1124 	GElf_Ehdr ehdr;
1125 	size_t shstrndx;
1126 	int idx = 0;
1127 
1128 	if (!gelf_getehdr(elf, &ehdr)) {
1129 		pr_warn("failed to get EHDR from %s\n", path);
1130 		goto err;
1131 	}
1132 
1133 	if (elf_getshdrstrndx(elf, &shstrndx)) {
1134 		pr_warn("failed to get section names section index for %s\n",
1135 			path);
1136 		goto err;
1137 	}
1138 
1139 	if (!elf_rawdata(elf_getscn(elf, shstrndx), NULL)) {
1140 		pr_warn("failed to get e_shstrndx from %s\n", path);
1141 		goto err;
1142 	}
1143 
1144 	while ((scn = elf_nextscn(elf, scn)) != NULL) {
1145 		Elf_Data **field;
1146 		GElf_Shdr sh;
1147 		char *name;
1148 
1149 		idx++;
1150 		if (gelf_getshdr(scn, &sh) != &sh) {
1151 			pr_warn("failed to get section(%d) header from %s\n",
1152 				idx, path);
1153 			goto err;
1154 		}
1155 		name = elf_strptr(elf, shstrndx, sh.sh_name);
1156 		if (!name) {
1157 			pr_warn("failed to get section(%d) name from %s\n",
1158 				idx, path);
1159 			goto err;
1160 		}
1161 
1162 		if (strcmp(name, BTF_ELF_SEC) == 0)
1163 			field = &secs->btf_data;
1164 		else if (strcmp(name, BTF_EXT_ELF_SEC) == 0)
1165 			field = &secs->btf_ext_data;
1166 		else if (strcmp(name, BTF_BASE_ELF_SEC) == 0)
1167 			field = &secs->btf_base_data;
1168 		else
1169 			continue;
1170 
1171 		if (sh.sh_type != SHT_PROGBITS) {
1172 			pr_warn("unexpected section type (%d) of section(%d, %s) from %s\n",
1173 				sh.sh_type, idx, name, path);
1174 			goto err;
1175 		}
1176 
1177 		data = elf_getdata(scn, 0);
1178 		if (!data) {
1179 			pr_warn("failed to get section(%d, %s) data from %s\n",
1180 				idx, name, path);
1181 			goto err;
1182 		}
1183 		*field = data;
1184 	}
1185 
1186 	return 0;
1187 
1188 err:
1189 	return -LIBBPF_ERRNO__FORMAT;
1190 }
1191 
1192 static struct btf *btf_parse_elf(const char *path, struct btf *base_btf,
1193 				 struct btf_ext **btf_ext)
1194 {
1195 	struct btf_elf_secs secs = {};
1196 	struct btf *dist_base_btf = NULL;
1197 	struct btf *btf = NULL;
1198 	int err = 0, fd = -1;
1199 	Elf *elf = NULL;
1200 
1201 	if (elf_version(EV_CURRENT) == EV_NONE) {
1202 		pr_warn("failed to init libelf for %s\n", path);
1203 		return ERR_PTR(-LIBBPF_ERRNO__LIBELF);
1204 	}
1205 
1206 	fd = open(path, O_RDONLY | O_CLOEXEC);
1207 	if (fd < 0) {
1208 		err = -errno;
1209 		pr_warn("failed to open %s: %s\n", path, errstr(err));
1210 		return ERR_PTR(err);
1211 	}
1212 
1213 	elf = elf_begin(fd, ELF_C_READ, NULL);
1214 	if (!elf) {
1215 		err = -LIBBPF_ERRNO__FORMAT;
1216 		pr_warn("failed to open %s as ELF file\n", path);
1217 		goto done;
1218 	}
1219 
1220 	err = btf_find_elf_sections(elf, path, &secs);
1221 	if (err)
1222 		goto done;
1223 
1224 	if (!secs.btf_data) {
1225 		pr_warn("failed to find '%s' ELF section in %s\n", BTF_ELF_SEC, path);
1226 		err = -ENODATA;
1227 		goto done;
1228 	}
1229 
1230 	if (secs.btf_base_data) {
1231 		dist_base_btf = btf_new(secs.btf_base_data->d_buf, secs.btf_base_data->d_size,
1232 					NULL, false);
1233 		if (IS_ERR(dist_base_btf)) {
1234 			err = PTR_ERR(dist_base_btf);
1235 			dist_base_btf = NULL;
1236 			goto done;
1237 		}
1238 	}
1239 
1240 	btf = btf_new(secs.btf_data->d_buf, secs.btf_data->d_size,
1241 		      dist_base_btf ?: base_btf, false);
1242 	if (IS_ERR(btf)) {
1243 		err = PTR_ERR(btf);
1244 		goto done;
1245 	}
1246 	if (dist_base_btf && base_btf) {
1247 		err = btf__relocate(btf, base_btf);
1248 		if (err)
1249 			goto done;
1250 		btf__free(dist_base_btf);
1251 		dist_base_btf = NULL;
1252 	}
1253 
1254 	if (dist_base_btf)
1255 		btf->owns_base = true;
1256 
1257 	switch (gelf_getclass(elf)) {
1258 	case ELFCLASS32:
1259 		btf__set_pointer_size(btf, 4);
1260 		break;
1261 	case ELFCLASS64:
1262 		btf__set_pointer_size(btf, 8);
1263 		break;
1264 	default:
1265 		pr_warn("failed to get ELF class (bitness) for %s\n", path);
1266 		break;
1267 	}
1268 
1269 	if (btf_ext && secs.btf_ext_data) {
1270 		*btf_ext = btf_ext__new(secs.btf_ext_data->d_buf, secs.btf_ext_data->d_size);
1271 		if (IS_ERR(*btf_ext)) {
1272 			err = PTR_ERR(*btf_ext);
1273 			goto done;
1274 		}
1275 	} else if (btf_ext) {
1276 		*btf_ext = NULL;
1277 	}
1278 done:
1279 	if (elf)
1280 		elf_end(elf);
1281 	close(fd);
1282 
1283 	if (!err)
1284 		return btf;
1285 
1286 	if (btf_ext)
1287 		btf_ext__free(*btf_ext);
1288 	btf__free(dist_base_btf);
1289 	btf__free(btf);
1290 
1291 	return ERR_PTR(err);
1292 }
1293 
1294 struct btf *btf__parse_elf(const char *path, struct btf_ext **btf_ext)
1295 {
1296 	return libbpf_ptr(btf_parse_elf(path, NULL, btf_ext));
1297 }
1298 
1299 struct btf *btf__parse_elf_split(const char *path, struct btf *base_btf)
1300 {
1301 	return libbpf_ptr(btf_parse_elf(path, base_btf, NULL));
1302 }
1303 
1304 static struct btf *btf_parse_raw(const char *path, struct btf *base_btf)
1305 {
1306 	struct btf *btf = NULL;
1307 	void *data = NULL;
1308 	FILE *f = NULL;
1309 	__u16 magic;
1310 	int err = 0;
1311 	long sz;
1312 
1313 	f = fopen(path, "rbe");
1314 	if (!f) {
1315 		err = -errno;
1316 		goto err_out;
1317 	}
1318 
1319 	/* check BTF magic */
1320 	if (fread(&magic, 1, sizeof(magic), f) < sizeof(magic)) {
1321 		err = -EIO;
1322 		goto err_out;
1323 	}
1324 	if (magic != BTF_MAGIC && magic != bswap_16(BTF_MAGIC)) {
1325 		/* definitely not a raw BTF */
1326 		err = -EPROTO;
1327 		goto err_out;
1328 	}
1329 
1330 	/* get file size */
1331 	if (fseek(f, 0, SEEK_END)) {
1332 		err = -errno;
1333 		goto err_out;
1334 	}
1335 	sz = ftell(f);
1336 	if (sz < 0) {
1337 		err = -errno;
1338 		goto err_out;
1339 	}
1340 	/* rewind to the start */
1341 	if (fseek(f, 0, SEEK_SET)) {
1342 		err = -errno;
1343 		goto err_out;
1344 	}
1345 
1346 	/* pre-alloc memory and read all of BTF data */
1347 	data = malloc(sz);
1348 	if (!data) {
1349 		err = -ENOMEM;
1350 		goto err_out;
1351 	}
1352 	if (fread(data, 1, sz, f) < sz) {
1353 		err = -EIO;
1354 		goto err_out;
1355 	}
1356 
1357 	/* finally parse BTF data */
1358 	btf = btf_new(data, sz, base_btf, false);
1359 
1360 err_out:
1361 	free(data);
1362 	if (f)
1363 		fclose(f);
1364 	return err ? ERR_PTR(err) : btf;
1365 }
1366 
1367 struct btf *btf__parse_raw(const char *path)
1368 {
1369 	return libbpf_ptr(btf_parse_raw(path, NULL));
1370 }
1371 
1372 struct btf *btf__parse_raw_split(const char *path, struct btf *base_btf)
1373 {
1374 	return libbpf_ptr(btf_parse_raw(path, base_btf));
1375 }
1376 
1377 static struct btf *btf_parse_raw_mmap(const char *path, struct btf *base_btf)
1378 {
1379 	struct stat st;
1380 	void *data;
1381 	struct btf *btf;
1382 	int fd, err;
1383 
1384 	fd = open(path, O_RDONLY);
1385 	if (fd < 0)
1386 		return ERR_PTR(-errno);
1387 
1388 	if (fstat(fd, &st) < 0) {
1389 		err = -errno;
1390 		close(fd);
1391 		return ERR_PTR(err);
1392 	}
1393 
1394 	data = mmap(NULL, st.st_size, PROT_READ, MAP_PRIVATE, fd, 0);
1395 	err = -errno;
1396 	close(fd);
1397 
1398 	if (data == MAP_FAILED)
1399 		return ERR_PTR(err);
1400 
1401 	btf = btf_new(data, st.st_size, base_btf, true);
1402 	if (IS_ERR(btf))
1403 		munmap(data, st.st_size);
1404 
1405 	return btf;
1406 }
1407 
1408 static struct btf *btf_parse(const char *path, struct btf *base_btf, struct btf_ext **btf_ext)
1409 {
1410 	struct btf *btf;
1411 	int err;
1412 
1413 	if (btf_ext)
1414 		*btf_ext = NULL;
1415 
1416 	btf = btf_parse_raw(path, base_btf);
1417 	err = libbpf_get_error(btf);
1418 	if (!err)
1419 		return btf;
1420 	if (err != -EPROTO)
1421 		return ERR_PTR(err);
1422 	return btf_parse_elf(path, base_btf, btf_ext);
1423 }
1424 
1425 struct btf *btf__parse(const char *path, struct btf_ext **btf_ext)
1426 {
1427 	return libbpf_ptr(btf_parse(path, NULL, btf_ext));
1428 }
1429 
1430 struct btf *btf__parse_split(const char *path, struct btf *base_btf)
1431 {
1432 	return libbpf_ptr(btf_parse(path, base_btf, NULL));
1433 }
1434 
1435 static void *btf_get_raw_data(const struct btf *btf, __u32 *size, bool swap_endian);
1436 
1437 int btf_load_into_kernel(struct btf *btf,
1438 			 char *log_buf, size_t log_sz, __u32 log_level,
1439 			 int token_fd)
1440 {
1441 	LIBBPF_OPTS(bpf_btf_load_opts, opts);
1442 	__u32 buf_sz = 0, raw_size;
1443 	char *buf = NULL, *tmp;
1444 	void *raw_data;
1445 	int err = 0;
1446 
1447 	if (btf->fd >= 0)
1448 		return libbpf_err(-EEXIST);
1449 	if (log_sz && !log_buf)
1450 		return libbpf_err(-EINVAL);
1451 
1452 	/* cache native raw data representation */
1453 	raw_data = btf_get_raw_data(btf, &raw_size, false);
1454 	if (!raw_data) {
1455 		err = -ENOMEM;
1456 		goto done;
1457 	}
1458 	btf->raw_size = raw_size;
1459 	btf->raw_data = raw_data;
1460 
1461 retry_load:
1462 	/* if log_level is 0, we won't provide log_buf/log_size to the kernel,
1463 	 * initially. Only if BTF loading fails, we bump log_level to 1 and
1464 	 * retry, using either auto-allocated or custom log_buf. This way
1465 	 * non-NULL custom log_buf provides a buffer just in case, but hopes
1466 	 * for successful load and no need for log_buf.
1467 	 */
1468 	if (log_level) {
1469 		/* if caller didn't provide custom log_buf, we'll keep
1470 		 * allocating our own progressively bigger buffers for BTF
1471 		 * verification log
1472 		 */
1473 		if (!log_buf) {
1474 			buf_sz = max((__u32)BPF_LOG_BUF_SIZE, buf_sz * 2);
1475 			tmp = realloc(buf, buf_sz);
1476 			if (!tmp) {
1477 				err = -ENOMEM;
1478 				goto done;
1479 			}
1480 			buf = tmp;
1481 			buf[0] = '\0';
1482 		}
1483 
1484 		opts.log_buf = log_buf ? log_buf : buf;
1485 		opts.log_size = log_buf ? log_sz : buf_sz;
1486 		opts.log_level = log_level;
1487 	}
1488 
1489 	opts.token_fd = token_fd;
1490 	if (token_fd)
1491 		opts.btf_flags |= BPF_F_TOKEN_FD;
1492 
1493 	btf->fd = bpf_btf_load(raw_data, raw_size, &opts);
1494 	if (btf->fd < 0) {
1495 		/* time to turn on verbose mode and try again */
1496 		if (log_level == 0) {
1497 			log_level = 1;
1498 			goto retry_load;
1499 		}
1500 		/* only retry if caller didn't provide custom log_buf, but
1501 		 * make sure we can never overflow buf_sz
1502 		 */
1503 		if (!log_buf && errno == ENOSPC && buf_sz <= UINT_MAX / 2)
1504 			goto retry_load;
1505 
1506 		err = -errno;
1507 		pr_warn("BTF loading error: %s\n", errstr(err));
1508 		/* don't print out contents of custom log_buf */
1509 		if (!log_buf && buf[0])
1510 			pr_warn("-- BEGIN BTF LOAD LOG ---\n%s\n-- END BTF LOAD LOG --\n", buf);
1511 	}
1512 
1513 done:
1514 	free(buf);
1515 	return libbpf_err(err);
1516 }
1517 
1518 int btf__load_into_kernel(struct btf *btf)
1519 {
1520 	return btf_load_into_kernel(btf, NULL, 0, 0, 0);
1521 }
1522 
1523 int btf__fd(const struct btf *btf)
1524 {
1525 	return btf->fd;
1526 }
1527 
1528 void btf__set_fd(struct btf *btf, int fd)
1529 {
1530 	btf->fd = fd;
1531 }
1532 
1533 static const void *btf_strs_data(const struct btf *btf)
1534 {
1535 	return btf->strs_data ? btf->strs_data : strset__data(btf->strs_set);
1536 }
1537 
1538 static void *btf_get_raw_data(const struct btf *btf, __u32 *size, bool swap_endian)
1539 {
1540 	struct btf_header *hdr = btf->hdr;
1541 	struct btf_type *t;
1542 	void *data, *p;
1543 	__u32 data_sz;
1544 	int i;
1545 
1546 	data = swap_endian ? btf->raw_data_swapped : btf->raw_data;
1547 	if (data) {
1548 		*size = btf->raw_size;
1549 		return data;
1550 	}
1551 
1552 	data_sz = hdr->hdr_len + hdr->type_len + hdr->str_len;
1553 	data = calloc(1, data_sz);
1554 	if (!data)
1555 		return NULL;
1556 	p = data;
1557 
1558 	memcpy(p, hdr, hdr->hdr_len);
1559 	if (swap_endian)
1560 		btf_bswap_hdr(p);
1561 	p += hdr->hdr_len;
1562 
1563 	memcpy(p, btf->types_data, hdr->type_len);
1564 	if (swap_endian) {
1565 		for (i = 0; i < btf->nr_types; i++) {
1566 			t = p + btf->type_offs[i];
1567 			/* btf_bswap_type_rest() relies on native t->info, so
1568 			 * we swap base type info after we swapped all the
1569 			 * additional information
1570 			 */
1571 			if (btf_bswap_type_rest(t))
1572 				goto err_out;
1573 			btf_bswap_type_base(t);
1574 		}
1575 	}
1576 	p += hdr->type_len;
1577 
1578 	memcpy(p, btf_strs_data(btf), hdr->str_len);
1579 	p += hdr->str_len;
1580 
1581 	*size = data_sz;
1582 	return data;
1583 err_out:
1584 	free(data);
1585 	return NULL;
1586 }
1587 
1588 const void *btf__raw_data(const struct btf *btf_ro, __u32 *size)
1589 {
1590 	struct btf *btf = (struct btf *)btf_ro;
1591 	__u32 data_sz;
1592 	void *data;
1593 
1594 	data = btf_get_raw_data(btf, &data_sz, btf->swapped_endian);
1595 	if (!data)
1596 		return errno = ENOMEM, NULL;
1597 
1598 	btf->raw_size = data_sz;
1599 	if (btf->swapped_endian)
1600 		btf->raw_data_swapped = data;
1601 	else
1602 		btf->raw_data = data;
1603 	*size = data_sz;
1604 	return data;
1605 }
1606 
1607 __attribute__((alias("btf__raw_data")))
1608 const void *btf__get_raw_data(const struct btf *btf, __u32 *size);
1609 
1610 const char *btf__str_by_offset(const struct btf *btf, __u32 offset)
1611 {
1612 	if (offset < btf->start_str_off)
1613 		return btf__str_by_offset(btf->base_btf, offset);
1614 	else if (offset - btf->start_str_off < btf->hdr->str_len)
1615 		return btf_strs_data(btf) + (offset - btf->start_str_off);
1616 	else
1617 		return errno = EINVAL, NULL;
1618 }
1619 
1620 const char *btf__name_by_offset(const struct btf *btf, __u32 offset)
1621 {
1622 	return btf__str_by_offset(btf, offset);
1623 }
1624 
1625 struct btf *btf_get_from_fd(int btf_fd, struct btf *base_btf)
1626 {
1627 	struct bpf_btf_info btf_info;
1628 	__u32 len = sizeof(btf_info);
1629 	__u32 last_size;
1630 	struct btf *btf;
1631 	void *ptr;
1632 	int err;
1633 
1634 	/* we won't know btf_size until we call bpf_btf_get_info_by_fd(). so
1635 	 * let's start with a sane default - 4KiB here - and resize it only if
1636 	 * bpf_btf_get_info_by_fd() needs a bigger buffer.
1637 	 */
1638 	last_size = 4096;
1639 	ptr = malloc(last_size);
1640 	if (!ptr)
1641 		return ERR_PTR(-ENOMEM);
1642 
1643 	memset(&btf_info, 0, sizeof(btf_info));
1644 	btf_info.btf = ptr_to_u64(ptr);
1645 	btf_info.btf_size = last_size;
1646 	err = bpf_btf_get_info_by_fd(btf_fd, &btf_info, &len);
1647 
1648 	if (!err && btf_info.btf_size > last_size) {
1649 		void *temp_ptr;
1650 
1651 		last_size = btf_info.btf_size;
1652 		temp_ptr = realloc(ptr, last_size);
1653 		if (!temp_ptr) {
1654 			btf = ERR_PTR(-ENOMEM);
1655 			goto exit_free;
1656 		}
1657 		ptr = temp_ptr;
1658 
1659 		len = sizeof(btf_info);
1660 		memset(&btf_info, 0, sizeof(btf_info));
1661 		btf_info.btf = ptr_to_u64(ptr);
1662 		btf_info.btf_size = last_size;
1663 
1664 		err = bpf_btf_get_info_by_fd(btf_fd, &btf_info, &len);
1665 	}
1666 
1667 	if (err || btf_info.btf_size > last_size) {
1668 		btf = err ? ERR_PTR(-errno) : ERR_PTR(-E2BIG);
1669 		goto exit_free;
1670 	}
1671 
1672 	btf = btf_new(ptr, btf_info.btf_size, base_btf, false);
1673 
1674 exit_free:
1675 	free(ptr);
1676 	return btf;
1677 }
1678 
1679 struct btf *btf_load_from_kernel(__u32 id, struct btf *base_btf, int token_fd)
1680 {
1681 	struct btf *btf;
1682 	int btf_fd;
1683 	LIBBPF_OPTS(bpf_get_fd_by_id_opts, opts);
1684 
1685 	if (token_fd) {
1686 		opts.open_flags |= BPF_F_TOKEN_FD;
1687 		opts.token_fd = token_fd;
1688 	}
1689 
1690 	btf_fd = bpf_btf_get_fd_by_id_opts(id, &opts);
1691 	if (btf_fd < 0)
1692 		return libbpf_err_ptr(-errno);
1693 
1694 	btf = btf_get_from_fd(btf_fd, base_btf);
1695 	close(btf_fd);
1696 
1697 	return libbpf_ptr(btf);
1698 }
1699 
1700 struct btf *btf__load_from_kernel_by_id_split(__u32 id, struct btf *base_btf)
1701 {
1702 	return btf_load_from_kernel(id, base_btf, 0);
1703 }
1704 
1705 struct btf *btf__load_from_kernel_by_id(__u32 id)
1706 {
1707 	return btf__load_from_kernel_by_id_split(id, NULL);
1708 }
1709 
1710 static void btf_invalidate_raw_data(struct btf *btf)
1711 {
1712 	if (btf->raw_data)
1713 		btf_free_raw_data(btf);
1714 	if (btf->raw_data_swapped) {
1715 		free(btf->raw_data_swapped);
1716 		btf->raw_data_swapped = NULL;
1717 	}
1718 }
1719 
1720 /* Ensure BTF is ready to be modified (by splitting into a three memory
1721  * regions for header, types, and strings). Also invalidate cached
1722  * raw_data, if any.
1723  */
1724 static int btf_ensure_modifiable(struct btf *btf)
1725 {
1726 	void *hdr, *types;
1727 	struct strset *set = NULL;
1728 	int err = -ENOMEM;
1729 
1730 	if (btf_is_modifiable(btf)) {
1731 		/* any BTF modification invalidates raw_data */
1732 		btf_invalidate_raw_data(btf);
1733 		return 0;
1734 	}
1735 
1736 	/* split raw data into three memory regions */
1737 	hdr = malloc(btf->hdr->hdr_len);
1738 	types = malloc(btf->hdr->type_len);
1739 	if (!hdr || !types)
1740 		goto err_out;
1741 
1742 	memcpy(hdr, btf->hdr, btf->hdr->hdr_len);
1743 	memcpy(types, btf->types_data, btf->hdr->type_len);
1744 
1745 	/* build lookup index for all strings */
1746 	set = strset__new(BTF_MAX_STR_OFFSET, btf->strs_data, btf->hdr->str_len);
1747 	if (IS_ERR(set)) {
1748 		err = PTR_ERR(set);
1749 		goto err_out;
1750 	}
1751 
1752 	/* only when everything was successful, update internal state */
1753 	btf->hdr = hdr;
1754 	btf->types_data = types;
1755 	btf->types_data_cap = btf->hdr->type_len;
1756 	btf->strs_data = NULL;
1757 	btf->strs_set = set;
1758 	/* if BTF was created from scratch, all strings are guaranteed to be
1759 	 * unique and deduplicated
1760 	 */
1761 	if (btf->hdr->str_len == 0)
1762 		btf->strs_deduped = true;
1763 	if (!btf->base_btf && btf->hdr->str_len == 1)
1764 		btf->strs_deduped = true;
1765 
1766 	/* invalidate raw_data representation */
1767 	btf_invalidate_raw_data(btf);
1768 
1769 	return 0;
1770 
1771 err_out:
1772 	strset__free(set);
1773 	free(hdr);
1774 	free(types);
1775 	return err;
1776 }
1777 
1778 /* Find an offset in BTF string section that corresponds to a given string *s*.
1779  * Returns:
1780  *   - >0 offset into string section, if string is found;
1781  *   - -ENOENT, if string is not in the string section;
1782  *   - <0, on any other error.
1783  */
1784 int btf__find_str(struct btf *btf, const char *s)
1785 {
1786 	int off;
1787 
1788 	if (btf->base_btf) {
1789 		off = btf__find_str(btf->base_btf, s);
1790 		if (off != -ENOENT)
1791 			return off;
1792 	}
1793 
1794 	/* BTF needs to be in a modifiable state to build string lookup index */
1795 	if (btf_ensure_modifiable(btf))
1796 		return libbpf_err(-ENOMEM);
1797 
1798 	off = strset__find_str(btf->strs_set, s);
1799 	if (off < 0)
1800 		return libbpf_err(off);
1801 
1802 	return btf->start_str_off + off;
1803 }
1804 
1805 /* Add a string s to the BTF string section.
1806  * Returns:
1807  *   - > 0 offset into string section, on success;
1808  *   - < 0, on error.
1809  */
1810 int btf__add_str(struct btf *btf, const char *s)
1811 {
1812 	int off;
1813 
1814 	if (btf->base_btf) {
1815 		off = btf__find_str(btf->base_btf, s);
1816 		if (off != -ENOENT)
1817 			return off;
1818 	}
1819 
1820 	if (btf_ensure_modifiable(btf))
1821 		return libbpf_err(-ENOMEM);
1822 
1823 	off = strset__add_str(btf->strs_set, s);
1824 	if (off < 0)
1825 		return libbpf_err(off);
1826 
1827 	btf->hdr->str_len = strset__data_size(btf->strs_set);
1828 
1829 	return btf->start_str_off + off;
1830 }
1831 
1832 static void *btf_add_type_mem(struct btf *btf, size_t add_sz)
1833 {
1834 	return libbpf_add_mem(&btf->types_data, &btf->types_data_cap, 1,
1835 			      btf->hdr->type_len, UINT_MAX, add_sz);
1836 }
1837 
1838 static void btf_type_inc_vlen(struct btf_type *t)
1839 {
1840 	t->info = btf_type_info(btf_kind(t), btf_vlen(t) + 1, btf_kflag(t));
1841 }
1842 
1843 static int btf_commit_type(struct btf *btf, int data_sz)
1844 {
1845 	int err;
1846 
1847 	err = btf_add_type_idx_entry(btf, btf->hdr->type_len);
1848 	if (err)
1849 		return libbpf_err(err);
1850 
1851 	btf->hdr->type_len += data_sz;
1852 	btf->hdr->str_off += data_sz;
1853 	btf->nr_types++;
1854 	return btf->start_id + btf->nr_types - 1;
1855 }
1856 
1857 struct btf_pipe {
1858 	const struct btf *src;
1859 	struct btf *dst;
1860 	struct hashmap *str_off_map; /* map string offsets from src to dst */
1861 };
1862 
1863 static int btf_rewrite_str(struct btf_pipe *p, __u32 *str_off)
1864 {
1865 	long mapped_off;
1866 	int off, err;
1867 
1868 	if (!*str_off) /* nothing to do for empty strings */
1869 		return 0;
1870 
1871 	if (p->str_off_map &&
1872 	    hashmap__find(p->str_off_map, *str_off, &mapped_off)) {
1873 		*str_off = mapped_off;
1874 		return 0;
1875 	}
1876 
1877 	off = btf__add_str(p->dst, btf__str_by_offset(p->src, *str_off));
1878 	if (off < 0)
1879 		return off;
1880 
1881 	/* Remember string mapping from src to dst.  It avoids
1882 	 * performing expensive string comparisons.
1883 	 */
1884 	if (p->str_off_map) {
1885 		err = hashmap__append(p->str_off_map, *str_off, off);
1886 		if (err)
1887 			return err;
1888 	}
1889 
1890 	*str_off = off;
1891 	return 0;
1892 }
1893 
1894 static int btf_add_type(struct btf_pipe *p, const struct btf_type *src_type)
1895 {
1896 	struct btf_field_iter it;
1897 	struct btf_type *t;
1898 	__u32 *str_off;
1899 	int sz, err;
1900 
1901 	sz = btf_type_size(src_type);
1902 	if (sz < 0)
1903 		return libbpf_err(sz);
1904 
1905 	/* deconstruct BTF, if necessary, and invalidate raw_data */
1906 	if (btf_ensure_modifiable(p->dst))
1907 		return libbpf_err(-ENOMEM);
1908 
1909 	t = btf_add_type_mem(p->dst, sz);
1910 	if (!t)
1911 		return libbpf_err(-ENOMEM);
1912 
1913 	memcpy(t, src_type, sz);
1914 
1915 	err = btf_field_iter_init(&it, t, BTF_FIELD_ITER_STRS);
1916 	if (err)
1917 		return libbpf_err(err);
1918 
1919 	while ((str_off = btf_field_iter_next(&it))) {
1920 		err = btf_rewrite_str(p, str_off);
1921 		if (err)
1922 			return libbpf_err(err);
1923 	}
1924 
1925 	return btf_commit_type(p->dst, sz);
1926 }
1927 
1928 int btf__add_type(struct btf *btf, const struct btf *src_btf, const struct btf_type *src_type)
1929 {
1930 	struct btf_pipe p = { .src = src_btf, .dst = btf };
1931 
1932 	return btf_add_type(&p, src_type);
1933 }
1934 
1935 static size_t btf_dedup_identity_hash_fn(long key, void *ctx);
1936 static bool btf_dedup_equal_fn(long k1, long k2, void *ctx);
1937 
1938 int btf__add_btf(struct btf *btf, const struct btf *src_btf)
1939 {
1940 	struct btf_pipe p = { .src = src_btf, .dst = btf };
1941 	int data_sz, sz, cnt, i, err, old_strs_len;
1942 	__u32 *off;
1943 	void *t;
1944 
1945 	/* appending split BTF isn't supported yet */
1946 	if (src_btf->base_btf)
1947 		return libbpf_err(-ENOTSUP);
1948 
1949 	/* deconstruct BTF, if necessary, and invalidate raw_data */
1950 	if (btf_ensure_modifiable(btf))
1951 		return libbpf_err(-ENOMEM);
1952 
1953 	/* remember original strings section size if we have to roll back
1954 	 * partial strings section changes
1955 	 */
1956 	old_strs_len = btf->hdr->str_len;
1957 
1958 	data_sz = src_btf->hdr->type_len;
1959 	cnt = btf__type_cnt(src_btf) - 1;
1960 
1961 	/* pre-allocate enough memory for new types */
1962 	t = btf_add_type_mem(btf, data_sz);
1963 	if (!t)
1964 		return libbpf_err(-ENOMEM);
1965 
1966 	/* pre-allocate enough memory for type offset index for new types */
1967 	off = btf_add_type_offs_mem(btf, cnt);
1968 	if (!off)
1969 		return libbpf_err(-ENOMEM);
1970 
1971 	/* Map the string offsets from src_btf to the offsets from btf to improve performance */
1972 	p.str_off_map = hashmap__new(btf_dedup_identity_hash_fn, btf_dedup_equal_fn, NULL);
1973 	if (IS_ERR(p.str_off_map))
1974 		return libbpf_err(-ENOMEM);
1975 
1976 	/* bulk copy types data for all types from src_btf */
1977 	memcpy(t, src_btf->types_data, data_sz);
1978 
1979 	for (i = 0; i < cnt; i++) {
1980 		struct btf_field_iter it;
1981 		__u32 *type_id, *str_off;
1982 
1983 		sz = btf_type_size(t);
1984 		if (sz < 0) {
1985 			/* unlikely, has to be corrupted src_btf */
1986 			err = sz;
1987 			goto err_out;
1988 		}
1989 
1990 		/* fill out type ID to type offset mapping for lookups by type ID */
1991 		*off = t - btf->types_data;
1992 
1993 		/* add, dedup, and remap strings referenced by this BTF type */
1994 		err = btf_field_iter_init(&it, t, BTF_FIELD_ITER_STRS);
1995 		if (err)
1996 			goto err_out;
1997 		while ((str_off = btf_field_iter_next(&it))) {
1998 			err = btf_rewrite_str(&p, str_off);
1999 			if (err)
2000 				goto err_out;
2001 		}
2002 
2003 		/* remap all type IDs referenced from this BTF type */
2004 		err = btf_field_iter_init(&it, t, BTF_FIELD_ITER_IDS);
2005 		if (err)
2006 			goto err_out;
2007 
2008 		while ((type_id = btf_field_iter_next(&it))) {
2009 			if (!*type_id) /* nothing to do for VOID references */
2010 				continue;
2011 
2012 			/* we haven't updated btf's type count yet, so
2013 			 * btf->start_id + btf->nr_types - 1 is the type ID offset we should
2014 			 * add to all newly added BTF types
2015 			 */
2016 			*type_id += btf->start_id + btf->nr_types - 1;
2017 		}
2018 
2019 		/* go to next type data and type offset index entry */
2020 		t += sz;
2021 		off++;
2022 	}
2023 
2024 	/* Up until now any of the copied type data was effectively invisible,
2025 	 * so if we exited early before this point due to error, BTF would be
2026 	 * effectively unmodified. There would be extra internal memory
2027 	 * pre-allocated, but it would not be available for querying.  But now
2028 	 * that we've copied and rewritten all the data successfully, we can
2029 	 * update type count and various internal offsets and sizes to
2030 	 * "commit" the changes and made them visible to the outside world.
2031 	 */
2032 	btf->hdr->type_len += data_sz;
2033 	btf->hdr->str_off += data_sz;
2034 	btf->nr_types += cnt;
2035 
2036 	hashmap__free(p.str_off_map);
2037 
2038 	/* return type ID of the first added BTF type */
2039 	return btf->start_id + btf->nr_types - cnt;
2040 err_out:
2041 	/* zero out preallocated memory as if it was just allocated with
2042 	 * libbpf_add_mem()
2043 	 */
2044 	memset(btf->types_data + btf->hdr->type_len, 0, data_sz);
2045 	memset(btf->strs_data + old_strs_len, 0, btf->hdr->str_len - old_strs_len);
2046 
2047 	/* and now restore original strings section size; types data size
2048 	 * wasn't modified, so doesn't need restoring, see big comment above
2049 	 */
2050 	btf->hdr->str_len = old_strs_len;
2051 
2052 	hashmap__free(p.str_off_map);
2053 
2054 	return libbpf_err(err);
2055 }
2056 
2057 /*
2058  * Append new BTF_KIND_INT type with:
2059  *   - *name* - non-empty, non-NULL type name;
2060  *   - *sz* - power-of-2 (1, 2, 4, ..) size of the type, in bytes;
2061  *   - encoding is a combination of BTF_INT_SIGNED, BTF_INT_CHAR, BTF_INT_BOOL.
2062  * Returns:
2063  *   - >0, type ID of newly added BTF type;
2064  *   - <0, on error.
2065  */
2066 int btf__add_int(struct btf *btf, const char *name, size_t byte_sz, int encoding)
2067 {
2068 	struct btf_type *t;
2069 	int sz, name_off;
2070 
2071 	/* non-empty name */
2072 	if (!name || !name[0])
2073 		return libbpf_err(-EINVAL);
2074 	/* byte_sz must be power of 2 */
2075 	if (!byte_sz || (byte_sz & (byte_sz - 1)) || byte_sz > 16)
2076 		return libbpf_err(-EINVAL);
2077 	if (encoding & ~(BTF_INT_SIGNED | BTF_INT_CHAR | BTF_INT_BOOL))
2078 		return libbpf_err(-EINVAL);
2079 
2080 	/* deconstruct BTF, if necessary, and invalidate raw_data */
2081 	if (btf_ensure_modifiable(btf))
2082 		return libbpf_err(-ENOMEM);
2083 
2084 	sz = sizeof(struct btf_type) + sizeof(int);
2085 	t = btf_add_type_mem(btf, sz);
2086 	if (!t)
2087 		return libbpf_err(-ENOMEM);
2088 
2089 	/* if something goes wrong later, we might end up with an extra string,
2090 	 * but that shouldn't be a problem, because BTF can't be constructed
2091 	 * completely anyway and will most probably be just discarded
2092 	 */
2093 	name_off = btf__add_str(btf, name);
2094 	if (name_off < 0)
2095 		return name_off;
2096 
2097 	t->name_off = name_off;
2098 	t->info = btf_type_info(BTF_KIND_INT, 0, 0);
2099 	t->size = byte_sz;
2100 	/* set INT info, we don't allow setting legacy bit offset/size */
2101 	*(__u32 *)(t + 1) = (encoding << 24) | (byte_sz * 8);
2102 
2103 	return btf_commit_type(btf, sz);
2104 }
2105 
2106 /*
2107  * Append new BTF_KIND_FLOAT type with:
2108  *   - *name* - non-empty, non-NULL type name;
2109  *   - *sz* - size of the type, in bytes;
2110  * Returns:
2111  *   - >0, type ID of newly added BTF type;
2112  *   - <0, on error.
2113  */
2114 int btf__add_float(struct btf *btf, const char *name, size_t byte_sz)
2115 {
2116 	struct btf_type *t;
2117 	int sz, name_off;
2118 
2119 	/* non-empty name */
2120 	if (!name || !name[0])
2121 		return libbpf_err(-EINVAL);
2122 
2123 	/* byte_sz must be one of the explicitly allowed values */
2124 	if (byte_sz != 2 && byte_sz != 4 && byte_sz != 8 && byte_sz != 12 &&
2125 	    byte_sz != 16)
2126 		return libbpf_err(-EINVAL);
2127 
2128 	if (btf_ensure_modifiable(btf))
2129 		return libbpf_err(-ENOMEM);
2130 
2131 	sz = sizeof(struct btf_type);
2132 	t = btf_add_type_mem(btf, sz);
2133 	if (!t)
2134 		return libbpf_err(-ENOMEM);
2135 
2136 	name_off = btf__add_str(btf, name);
2137 	if (name_off < 0)
2138 		return name_off;
2139 
2140 	t->name_off = name_off;
2141 	t->info = btf_type_info(BTF_KIND_FLOAT, 0, 0);
2142 	t->size = byte_sz;
2143 
2144 	return btf_commit_type(btf, sz);
2145 }
2146 
2147 /* it's completely legal to append BTF types with type IDs pointing forward to
2148  * types that haven't been appended yet, so we only make sure that id looks
2149  * sane, we can't guarantee that ID will always be valid
2150  */
2151 static int validate_type_id(int id)
2152 {
2153 	if (id < 0 || id > BTF_MAX_NR_TYPES)
2154 		return -EINVAL;
2155 	return 0;
2156 }
2157 
2158 /* generic append function for PTR, TYPEDEF, CONST/VOLATILE/RESTRICT */
2159 static int btf_add_ref_kind(struct btf *btf, int kind, const char *name, int ref_type_id, int kflag)
2160 {
2161 	struct btf_type *t;
2162 	int sz, name_off = 0;
2163 
2164 	if (validate_type_id(ref_type_id))
2165 		return libbpf_err(-EINVAL);
2166 
2167 	if (btf_ensure_modifiable(btf))
2168 		return libbpf_err(-ENOMEM);
2169 
2170 	sz = sizeof(struct btf_type);
2171 	t = btf_add_type_mem(btf, sz);
2172 	if (!t)
2173 		return libbpf_err(-ENOMEM);
2174 
2175 	if (name && name[0]) {
2176 		name_off = btf__add_str(btf, name);
2177 		if (name_off < 0)
2178 			return name_off;
2179 	}
2180 
2181 	t->name_off = name_off;
2182 	t->info = btf_type_info(kind, 0, kflag);
2183 	t->type = ref_type_id;
2184 
2185 	return btf_commit_type(btf, sz);
2186 }
2187 
2188 /*
2189  * Append new BTF_KIND_PTR type with:
2190  *   - *ref_type_id* - referenced type ID, it might not exist yet;
2191  * Returns:
2192  *   - >0, type ID of newly added BTF type;
2193  *   - <0, on error.
2194  */
2195 int btf__add_ptr(struct btf *btf, int ref_type_id)
2196 {
2197 	return btf_add_ref_kind(btf, BTF_KIND_PTR, NULL, ref_type_id, 0);
2198 }
2199 
2200 /*
2201  * Append new BTF_KIND_ARRAY type with:
2202  *   - *index_type_id* - type ID of the type describing array index;
2203  *   - *elem_type_id* - type ID of the type describing array element;
2204  *   - *nr_elems* - the size of the array;
2205  * Returns:
2206  *   - >0, type ID of newly added BTF type;
2207  *   - <0, on error.
2208  */
2209 int btf__add_array(struct btf *btf, int index_type_id, int elem_type_id, __u32 nr_elems)
2210 {
2211 	struct btf_type *t;
2212 	struct btf_array *a;
2213 	int sz;
2214 
2215 	if (validate_type_id(index_type_id) || validate_type_id(elem_type_id))
2216 		return libbpf_err(-EINVAL);
2217 
2218 	if (btf_ensure_modifiable(btf))
2219 		return libbpf_err(-ENOMEM);
2220 
2221 	sz = sizeof(struct btf_type) + sizeof(struct btf_array);
2222 	t = btf_add_type_mem(btf, sz);
2223 	if (!t)
2224 		return libbpf_err(-ENOMEM);
2225 
2226 	t->name_off = 0;
2227 	t->info = btf_type_info(BTF_KIND_ARRAY, 0, 0);
2228 	t->size = 0;
2229 
2230 	a = btf_array(t);
2231 	a->type = elem_type_id;
2232 	a->index_type = index_type_id;
2233 	a->nelems = nr_elems;
2234 
2235 	return btf_commit_type(btf, sz);
2236 }
2237 
2238 /* generic STRUCT/UNION append function */
2239 static int btf_add_composite(struct btf *btf, int kind, const char *name, __u32 bytes_sz)
2240 {
2241 	struct btf_type *t;
2242 	int sz, name_off = 0;
2243 
2244 	if (btf_ensure_modifiable(btf))
2245 		return libbpf_err(-ENOMEM);
2246 
2247 	sz = sizeof(struct btf_type);
2248 	t = btf_add_type_mem(btf, sz);
2249 	if (!t)
2250 		return libbpf_err(-ENOMEM);
2251 
2252 	if (name && name[0]) {
2253 		name_off = btf__add_str(btf, name);
2254 		if (name_off < 0)
2255 			return name_off;
2256 	}
2257 
2258 	/* start out with vlen=0 and no kflag; this will be adjusted when
2259 	 * adding each member
2260 	 */
2261 	t->name_off = name_off;
2262 	t->info = btf_type_info(kind, 0, 0);
2263 	t->size = bytes_sz;
2264 
2265 	return btf_commit_type(btf, sz);
2266 }
2267 
2268 /*
2269  * Append new BTF_KIND_STRUCT type with:
2270  *   - *name* - name of the struct, can be NULL or empty for anonymous structs;
2271  *   - *byte_sz* - size of the struct, in bytes;
2272  *
2273  * Struct initially has no fields in it. Fields can be added by
2274  * btf__add_field() right after btf__add_struct() succeeds.
2275  *
2276  * Returns:
2277  *   - >0, type ID of newly added BTF type;
2278  *   - <0, on error.
2279  */
2280 int btf__add_struct(struct btf *btf, const char *name, __u32 byte_sz)
2281 {
2282 	return btf_add_composite(btf, BTF_KIND_STRUCT, name, byte_sz);
2283 }
2284 
2285 /*
2286  * Append new BTF_KIND_UNION type with:
2287  *   - *name* - name of the union, can be NULL or empty for anonymous union;
2288  *   - *byte_sz* - size of the union, in bytes;
2289  *
2290  * Union initially has no fields in it. Fields can be added by
2291  * btf__add_field() right after btf__add_union() succeeds. All fields
2292  * should have *bit_offset* of 0.
2293  *
2294  * Returns:
2295  *   - >0, type ID of newly added BTF type;
2296  *   - <0, on error.
2297  */
2298 int btf__add_union(struct btf *btf, const char *name, __u32 byte_sz)
2299 {
2300 	return btf_add_composite(btf, BTF_KIND_UNION, name, byte_sz);
2301 }
2302 
2303 static struct btf_type *btf_last_type(struct btf *btf)
2304 {
2305 	return btf_type_by_id(btf, btf__type_cnt(btf) - 1);
2306 }
2307 
2308 /*
2309  * Append new field for the current STRUCT/UNION type with:
2310  *   - *name* - name of the field, can be NULL or empty for anonymous field;
2311  *   - *type_id* - type ID for the type describing field type;
2312  *   - *bit_offset* - bit offset of the start of the field within struct/union;
2313  *   - *bit_size* - bit size of a bitfield, 0 for non-bitfield fields;
2314  * Returns:
2315  *   -  0, on success;
2316  *   - <0, on error.
2317  */
2318 int btf__add_field(struct btf *btf, const char *name, int type_id,
2319 		   __u32 bit_offset, __u32 bit_size)
2320 {
2321 	struct btf_type *t;
2322 	struct btf_member *m;
2323 	bool is_bitfield;
2324 	int sz, name_off = 0;
2325 
2326 	/* last type should be union/struct */
2327 	if (btf->nr_types == 0)
2328 		return libbpf_err(-EINVAL);
2329 	t = btf_last_type(btf);
2330 	if (!btf_is_composite(t))
2331 		return libbpf_err(-EINVAL);
2332 
2333 	if (validate_type_id(type_id))
2334 		return libbpf_err(-EINVAL);
2335 	/* best-effort bit field offset/size enforcement */
2336 	is_bitfield = bit_size || (bit_offset % 8 != 0);
2337 	if (is_bitfield && (bit_size == 0 || bit_size > 255 || bit_offset > 0xffffff))
2338 		return libbpf_err(-EINVAL);
2339 
2340 	/* only offset 0 is allowed for unions */
2341 	if (btf_is_union(t) && bit_offset)
2342 		return libbpf_err(-EINVAL);
2343 
2344 	/* decompose and invalidate raw data */
2345 	if (btf_ensure_modifiable(btf))
2346 		return libbpf_err(-ENOMEM);
2347 
2348 	sz = sizeof(struct btf_member);
2349 	m = btf_add_type_mem(btf, sz);
2350 	if (!m)
2351 		return libbpf_err(-ENOMEM);
2352 
2353 	if (name && name[0]) {
2354 		name_off = btf__add_str(btf, name);
2355 		if (name_off < 0)
2356 			return name_off;
2357 	}
2358 
2359 	m->name_off = name_off;
2360 	m->type = type_id;
2361 	m->offset = bit_offset | (bit_size << 24);
2362 
2363 	/* btf_add_type_mem can invalidate t pointer */
2364 	t = btf_last_type(btf);
2365 	/* update parent type's vlen and kflag */
2366 	t->info = btf_type_info(btf_kind(t), btf_vlen(t) + 1, is_bitfield || btf_kflag(t));
2367 
2368 	btf->hdr->type_len += sz;
2369 	btf->hdr->str_off += sz;
2370 	return 0;
2371 }
2372 
2373 static int btf_add_enum_common(struct btf *btf, const char *name, __u32 byte_sz,
2374 			       bool is_signed, __u8 kind)
2375 {
2376 	struct btf_type *t;
2377 	int sz, name_off = 0;
2378 
2379 	/* byte_sz must be power of 2 */
2380 	if (!byte_sz || (byte_sz & (byte_sz - 1)) || byte_sz > 8)
2381 		return libbpf_err(-EINVAL);
2382 
2383 	if (btf_ensure_modifiable(btf))
2384 		return libbpf_err(-ENOMEM);
2385 
2386 	sz = sizeof(struct btf_type);
2387 	t = btf_add_type_mem(btf, sz);
2388 	if (!t)
2389 		return libbpf_err(-ENOMEM);
2390 
2391 	if (name && name[0]) {
2392 		name_off = btf__add_str(btf, name);
2393 		if (name_off < 0)
2394 			return name_off;
2395 	}
2396 
2397 	/* start out with vlen=0; it will be adjusted when adding enum values */
2398 	t->name_off = name_off;
2399 	t->info = btf_type_info(kind, 0, is_signed);
2400 	t->size = byte_sz;
2401 
2402 	return btf_commit_type(btf, sz);
2403 }
2404 
2405 /*
2406  * Append new BTF_KIND_ENUM type with:
2407  *   - *name* - name of the enum, can be NULL or empty for anonymous enums;
2408  *   - *byte_sz* - size of the enum, in bytes.
2409  *
2410  * Enum initially has no enum values in it (and corresponds to enum forward
2411  * declaration). Enumerator values can be added by btf__add_enum_value()
2412  * immediately after btf__add_enum() succeeds.
2413  *
2414  * Returns:
2415  *   - >0, type ID of newly added BTF type;
2416  *   - <0, on error.
2417  */
2418 int btf__add_enum(struct btf *btf, const char *name, __u32 byte_sz)
2419 {
2420 	/*
2421 	 * set the signedness to be unsigned, it will change to signed
2422 	 * if any later enumerator is negative.
2423 	 */
2424 	return btf_add_enum_common(btf, name, byte_sz, false, BTF_KIND_ENUM);
2425 }
2426 
2427 /*
2428  * Append new enum value for the current ENUM type with:
2429  *   - *name* - name of the enumerator value, can't be NULL or empty;
2430  *   - *value* - integer value corresponding to enum value *name*;
2431  * Returns:
2432  *   -  0, on success;
2433  *   - <0, on error.
2434  */
2435 int btf__add_enum_value(struct btf *btf, const char *name, __s64 value)
2436 {
2437 	struct btf_type *t;
2438 	struct btf_enum *v;
2439 	int sz, name_off;
2440 
2441 	/* last type should be BTF_KIND_ENUM */
2442 	if (btf->nr_types == 0)
2443 		return libbpf_err(-EINVAL);
2444 	t = btf_last_type(btf);
2445 	if (!btf_is_enum(t))
2446 		return libbpf_err(-EINVAL);
2447 
2448 	/* non-empty name */
2449 	if (!name || !name[0])
2450 		return libbpf_err(-EINVAL);
2451 	if (value < INT_MIN || value > UINT_MAX)
2452 		return libbpf_err(-E2BIG);
2453 
2454 	/* decompose and invalidate raw data */
2455 	if (btf_ensure_modifiable(btf))
2456 		return libbpf_err(-ENOMEM);
2457 
2458 	sz = sizeof(struct btf_enum);
2459 	v = btf_add_type_mem(btf, sz);
2460 	if (!v)
2461 		return libbpf_err(-ENOMEM);
2462 
2463 	name_off = btf__add_str(btf, name);
2464 	if (name_off < 0)
2465 		return name_off;
2466 
2467 	v->name_off = name_off;
2468 	v->val = value;
2469 
2470 	/* update parent type's vlen */
2471 	t = btf_last_type(btf);
2472 	btf_type_inc_vlen(t);
2473 
2474 	/* if negative value, set signedness to signed */
2475 	if (value < 0)
2476 		t->info = btf_type_info(btf_kind(t), btf_vlen(t), true);
2477 
2478 	btf->hdr->type_len += sz;
2479 	btf->hdr->str_off += sz;
2480 	return 0;
2481 }
2482 
2483 /*
2484  * Append new BTF_KIND_ENUM64 type with:
2485  *   - *name* - name of the enum, can be NULL or empty for anonymous enums;
2486  *   - *byte_sz* - size of the enum, in bytes.
2487  *   - *is_signed* - whether the enum values are signed or not;
2488  *
2489  * Enum initially has no enum values in it (and corresponds to enum forward
2490  * declaration). Enumerator values can be added by btf__add_enum64_value()
2491  * immediately after btf__add_enum64() succeeds.
2492  *
2493  * Returns:
2494  *   - >0, type ID of newly added BTF type;
2495  *   - <0, on error.
2496  */
2497 int btf__add_enum64(struct btf *btf, const char *name, __u32 byte_sz,
2498 		    bool is_signed)
2499 {
2500 	return btf_add_enum_common(btf, name, byte_sz, is_signed,
2501 				   BTF_KIND_ENUM64);
2502 }
2503 
2504 /*
2505  * Append new enum value for the current ENUM64 type with:
2506  *   - *name* - name of the enumerator value, can't be NULL or empty;
2507  *   - *value* - integer value corresponding to enum value *name*;
2508  * Returns:
2509  *   -  0, on success;
2510  *   - <0, on error.
2511  */
2512 int btf__add_enum64_value(struct btf *btf, const char *name, __u64 value)
2513 {
2514 	struct btf_enum64 *v;
2515 	struct btf_type *t;
2516 	int sz, name_off;
2517 
2518 	/* last type should be BTF_KIND_ENUM64 */
2519 	if (btf->nr_types == 0)
2520 		return libbpf_err(-EINVAL);
2521 	t = btf_last_type(btf);
2522 	if (!btf_is_enum64(t))
2523 		return libbpf_err(-EINVAL);
2524 
2525 	/* non-empty name */
2526 	if (!name || !name[0])
2527 		return libbpf_err(-EINVAL);
2528 
2529 	/* decompose and invalidate raw data */
2530 	if (btf_ensure_modifiable(btf))
2531 		return libbpf_err(-ENOMEM);
2532 
2533 	sz = sizeof(struct btf_enum64);
2534 	v = btf_add_type_mem(btf, sz);
2535 	if (!v)
2536 		return libbpf_err(-ENOMEM);
2537 
2538 	name_off = btf__add_str(btf, name);
2539 	if (name_off < 0)
2540 		return name_off;
2541 
2542 	v->name_off = name_off;
2543 	v->val_lo32 = (__u32)value;
2544 	v->val_hi32 = value >> 32;
2545 
2546 	/* update parent type's vlen */
2547 	t = btf_last_type(btf);
2548 	btf_type_inc_vlen(t);
2549 
2550 	btf->hdr->type_len += sz;
2551 	btf->hdr->str_off += sz;
2552 	return 0;
2553 }
2554 
2555 /*
2556  * Append new BTF_KIND_FWD type with:
2557  *   - *name*, non-empty/non-NULL name;
2558  *   - *fwd_kind*, kind of forward declaration, one of BTF_FWD_STRUCT,
2559  *     BTF_FWD_UNION, or BTF_FWD_ENUM;
2560  * Returns:
2561  *   - >0, type ID of newly added BTF type;
2562  *   - <0, on error.
2563  */
2564 int btf__add_fwd(struct btf *btf, const char *name, enum btf_fwd_kind fwd_kind)
2565 {
2566 	if (!name || !name[0])
2567 		return libbpf_err(-EINVAL);
2568 
2569 	switch (fwd_kind) {
2570 	case BTF_FWD_STRUCT:
2571 	case BTF_FWD_UNION: {
2572 		struct btf_type *t;
2573 		int id;
2574 
2575 		id = btf_add_ref_kind(btf, BTF_KIND_FWD, name, 0, 0);
2576 		if (id <= 0)
2577 			return id;
2578 		t = btf_type_by_id(btf, id);
2579 		t->info = btf_type_info(BTF_KIND_FWD, 0, fwd_kind == BTF_FWD_UNION);
2580 		return id;
2581 	}
2582 	case BTF_FWD_ENUM:
2583 		/* enum forward in BTF currently is just an enum with no enum
2584 		 * values; we also assume a standard 4-byte size for it
2585 		 */
2586 		return btf__add_enum(btf, name, sizeof(int));
2587 	default:
2588 		return libbpf_err(-EINVAL);
2589 	}
2590 }
2591 
2592 /*
2593  * Append new BTF_KING_TYPEDEF type with:
2594  *   - *name*, non-empty/non-NULL name;
2595  *   - *ref_type_id* - referenced type ID, it might not exist yet;
2596  * Returns:
2597  *   - >0, type ID of newly added BTF type;
2598  *   - <0, on error.
2599  */
2600 int btf__add_typedef(struct btf *btf, const char *name, int ref_type_id)
2601 {
2602 	if (!name || !name[0])
2603 		return libbpf_err(-EINVAL);
2604 
2605 	return btf_add_ref_kind(btf, BTF_KIND_TYPEDEF, name, ref_type_id, 0);
2606 }
2607 
2608 /*
2609  * Append new BTF_KIND_VOLATILE type with:
2610  *   - *ref_type_id* - referenced type ID, it might not exist yet;
2611  * Returns:
2612  *   - >0, type ID of newly added BTF type;
2613  *   - <0, on error.
2614  */
2615 int btf__add_volatile(struct btf *btf, int ref_type_id)
2616 {
2617 	return btf_add_ref_kind(btf, BTF_KIND_VOLATILE, NULL, ref_type_id, 0);
2618 }
2619 
2620 /*
2621  * Append new BTF_KIND_CONST type with:
2622  *   - *ref_type_id* - referenced type ID, it might not exist yet;
2623  * Returns:
2624  *   - >0, type ID of newly added BTF type;
2625  *   - <0, on error.
2626  */
2627 int btf__add_const(struct btf *btf, int ref_type_id)
2628 {
2629 	return btf_add_ref_kind(btf, BTF_KIND_CONST, NULL, ref_type_id, 0);
2630 }
2631 
2632 /*
2633  * Append new BTF_KIND_RESTRICT type with:
2634  *   - *ref_type_id* - referenced type ID, it might not exist yet;
2635  * Returns:
2636  *   - >0, type ID of newly added BTF type;
2637  *   - <0, on error.
2638  */
2639 int btf__add_restrict(struct btf *btf, int ref_type_id)
2640 {
2641 	return btf_add_ref_kind(btf, BTF_KIND_RESTRICT, NULL, ref_type_id, 0);
2642 }
2643 
2644 /*
2645  * Append new BTF_KIND_TYPE_TAG type with:
2646  *   - *value*, non-empty/non-NULL tag value;
2647  *   - *ref_type_id* - referenced type ID, it might not exist yet;
2648  * Returns:
2649  *   - >0, type ID of newly added BTF type;
2650  *   - <0, on error.
2651  */
2652 int btf__add_type_tag(struct btf *btf, const char *value, int ref_type_id)
2653 {
2654 	if (!value || !value[0])
2655 		return libbpf_err(-EINVAL);
2656 
2657 	return btf_add_ref_kind(btf, BTF_KIND_TYPE_TAG, value, ref_type_id, 0);
2658 }
2659 
2660 /*
2661  * Append new BTF_KIND_TYPE_TAG type with:
2662  *   - *value*, non-empty/non-NULL tag value;
2663  *   - *ref_type_id* - referenced type ID, it might not exist yet;
2664  * Set info->kflag to 1, indicating this tag is an __attribute__
2665  * Returns:
2666  *   - >0, type ID of newly added BTF type;
2667  *   - <0, on error.
2668  */
2669 int btf__add_type_attr(struct btf *btf, const char *value, int ref_type_id)
2670 {
2671 	if (!value || !value[0])
2672 		return libbpf_err(-EINVAL);
2673 
2674 	return btf_add_ref_kind(btf, BTF_KIND_TYPE_TAG, value, ref_type_id, 1);
2675 }
2676 
2677 /*
2678  * Append new BTF_KIND_FUNC type with:
2679  *   - *name*, non-empty/non-NULL name;
2680  *   - *proto_type_id* - FUNC_PROTO's type ID, it might not exist yet;
2681  * Returns:
2682  *   - >0, type ID of newly added BTF type;
2683  *   - <0, on error.
2684  */
2685 int btf__add_func(struct btf *btf, const char *name,
2686 		  enum btf_func_linkage linkage, int proto_type_id)
2687 {
2688 	int id;
2689 
2690 	if (!name || !name[0])
2691 		return libbpf_err(-EINVAL);
2692 	if (linkage != BTF_FUNC_STATIC && linkage != BTF_FUNC_GLOBAL &&
2693 	    linkage != BTF_FUNC_EXTERN)
2694 		return libbpf_err(-EINVAL);
2695 
2696 	id = btf_add_ref_kind(btf, BTF_KIND_FUNC, name, proto_type_id, 0);
2697 	if (id > 0) {
2698 		struct btf_type *t = btf_type_by_id(btf, id);
2699 
2700 		t->info = btf_type_info(BTF_KIND_FUNC, linkage, 0);
2701 	}
2702 	return libbpf_err(id);
2703 }
2704 
2705 /*
2706  * Append new BTF_KIND_FUNC_PROTO with:
2707  *   - *ret_type_id* - type ID for return result of a function.
2708  *
2709  * Function prototype initially has no arguments, but they can be added by
2710  * btf__add_func_param() one by one, immediately after
2711  * btf__add_func_proto() succeeded.
2712  *
2713  * Returns:
2714  *   - >0, type ID of newly added BTF type;
2715  *   - <0, on error.
2716  */
2717 int btf__add_func_proto(struct btf *btf, int ret_type_id)
2718 {
2719 	struct btf_type *t;
2720 	int sz;
2721 
2722 	if (validate_type_id(ret_type_id))
2723 		return libbpf_err(-EINVAL);
2724 
2725 	if (btf_ensure_modifiable(btf))
2726 		return libbpf_err(-ENOMEM);
2727 
2728 	sz = sizeof(struct btf_type);
2729 	t = btf_add_type_mem(btf, sz);
2730 	if (!t)
2731 		return libbpf_err(-ENOMEM);
2732 
2733 	/* start out with vlen=0; this will be adjusted when adding enum
2734 	 * values, if necessary
2735 	 */
2736 	t->name_off = 0;
2737 	t->info = btf_type_info(BTF_KIND_FUNC_PROTO, 0, 0);
2738 	t->type = ret_type_id;
2739 
2740 	return btf_commit_type(btf, sz);
2741 }
2742 
2743 /*
2744  * Append new function parameter for current FUNC_PROTO type with:
2745  *   - *name* - parameter name, can be NULL or empty;
2746  *   - *type_id* - type ID describing the type of the parameter.
2747  * Returns:
2748  *   -  0, on success;
2749  *   - <0, on error.
2750  */
2751 int btf__add_func_param(struct btf *btf, const char *name, int type_id)
2752 {
2753 	struct btf_type *t;
2754 	struct btf_param *p;
2755 	int sz, name_off = 0;
2756 
2757 	if (validate_type_id(type_id))
2758 		return libbpf_err(-EINVAL);
2759 
2760 	/* last type should be BTF_KIND_FUNC_PROTO */
2761 	if (btf->nr_types == 0)
2762 		return libbpf_err(-EINVAL);
2763 	t = btf_last_type(btf);
2764 	if (!btf_is_func_proto(t))
2765 		return libbpf_err(-EINVAL);
2766 
2767 	/* decompose and invalidate raw data */
2768 	if (btf_ensure_modifiable(btf))
2769 		return libbpf_err(-ENOMEM);
2770 
2771 	sz = sizeof(struct btf_param);
2772 	p = btf_add_type_mem(btf, sz);
2773 	if (!p)
2774 		return libbpf_err(-ENOMEM);
2775 
2776 	if (name && name[0]) {
2777 		name_off = btf__add_str(btf, name);
2778 		if (name_off < 0)
2779 			return name_off;
2780 	}
2781 
2782 	p->name_off = name_off;
2783 	p->type = type_id;
2784 
2785 	/* update parent type's vlen */
2786 	t = btf_last_type(btf);
2787 	btf_type_inc_vlen(t);
2788 
2789 	btf->hdr->type_len += sz;
2790 	btf->hdr->str_off += sz;
2791 	return 0;
2792 }
2793 
2794 /*
2795  * Append new BTF_KIND_VAR type with:
2796  *   - *name* - non-empty/non-NULL name;
2797  *   - *linkage* - variable linkage, one of BTF_VAR_STATIC,
2798  *     BTF_VAR_GLOBAL_ALLOCATED, or BTF_VAR_GLOBAL_EXTERN;
2799  *   - *type_id* - type ID of the type describing the type of the variable.
2800  * Returns:
2801  *   - >0, type ID of newly added BTF type;
2802  *   - <0, on error.
2803  */
2804 int btf__add_var(struct btf *btf, const char *name, int linkage, int type_id)
2805 {
2806 	struct btf_type *t;
2807 	struct btf_var *v;
2808 	int sz, name_off;
2809 
2810 	/* non-empty name */
2811 	if (!name || !name[0])
2812 		return libbpf_err(-EINVAL);
2813 	if (linkage != BTF_VAR_STATIC && linkage != BTF_VAR_GLOBAL_ALLOCATED &&
2814 	    linkage != BTF_VAR_GLOBAL_EXTERN)
2815 		return libbpf_err(-EINVAL);
2816 	if (validate_type_id(type_id))
2817 		return libbpf_err(-EINVAL);
2818 
2819 	/* deconstruct BTF, if necessary, and invalidate raw_data */
2820 	if (btf_ensure_modifiable(btf))
2821 		return libbpf_err(-ENOMEM);
2822 
2823 	sz = sizeof(struct btf_type) + sizeof(struct btf_var);
2824 	t = btf_add_type_mem(btf, sz);
2825 	if (!t)
2826 		return libbpf_err(-ENOMEM);
2827 
2828 	name_off = btf__add_str(btf, name);
2829 	if (name_off < 0)
2830 		return name_off;
2831 
2832 	t->name_off = name_off;
2833 	t->info = btf_type_info(BTF_KIND_VAR, 0, 0);
2834 	t->type = type_id;
2835 
2836 	v = btf_var(t);
2837 	v->linkage = linkage;
2838 
2839 	return btf_commit_type(btf, sz);
2840 }
2841 
2842 /*
2843  * Append new BTF_KIND_DATASEC type with:
2844  *   - *name* - non-empty/non-NULL name;
2845  *   - *byte_sz* - data section size, in bytes.
2846  *
2847  * Data section is initially empty. Variables info can be added with
2848  * btf__add_datasec_var_info() calls, after btf__add_datasec() succeeds.
2849  *
2850  * Returns:
2851  *   - >0, type ID of newly added BTF type;
2852  *   - <0, on error.
2853  */
2854 int btf__add_datasec(struct btf *btf, const char *name, __u32 byte_sz)
2855 {
2856 	struct btf_type *t;
2857 	int sz, name_off;
2858 
2859 	/* non-empty name */
2860 	if (!name || !name[0])
2861 		return libbpf_err(-EINVAL);
2862 
2863 	if (btf_ensure_modifiable(btf))
2864 		return libbpf_err(-ENOMEM);
2865 
2866 	sz = sizeof(struct btf_type);
2867 	t = btf_add_type_mem(btf, sz);
2868 	if (!t)
2869 		return libbpf_err(-ENOMEM);
2870 
2871 	name_off = btf__add_str(btf, name);
2872 	if (name_off < 0)
2873 		return name_off;
2874 
2875 	/* start with vlen=0, which will be update as var_secinfos are added */
2876 	t->name_off = name_off;
2877 	t->info = btf_type_info(BTF_KIND_DATASEC, 0, 0);
2878 	t->size = byte_sz;
2879 
2880 	return btf_commit_type(btf, sz);
2881 }
2882 
2883 /*
2884  * Append new data section variable information entry for current DATASEC type:
2885  *   - *var_type_id* - type ID, describing type of the variable;
2886  *   - *offset* - variable offset within data section, in bytes;
2887  *   - *byte_sz* - variable size, in bytes.
2888  *
2889  * Returns:
2890  *   -  0, on success;
2891  *   - <0, on error.
2892  */
2893 int btf__add_datasec_var_info(struct btf *btf, int var_type_id, __u32 offset, __u32 byte_sz)
2894 {
2895 	struct btf_type *t;
2896 	struct btf_var_secinfo *v;
2897 	int sz;
2898 
2899 	/* last type should be BTF_KIND_DATASEC */
2900 	if (btf->nr_types == 0)
2901 		return libbpf_err(-EINVAL);
2902 	t = btf_last_type(btf);
2903 	if (!btf_is_datasec(t))
2904 		return libbpf_err(-EINVAL);
2905 
2906 	if (validate_type_id(var_type_id))
2907 		return libbpf_err(-EINVAL);
2908 
2909 	/* decompose and invalidate raw data */
2910 	if (btf_ensure_modifiable(btf))
2911 		return libbpf_err(-ENOMEM);
2912 
2913 	sz = sizeof(struct btf_var_secinfo);
2914 	v = btf_add_type_mem(btf, sz);
2915 	if (!v)
2916 		return libbpf_err(-ENOMEM);
2917 
2918 	v->type = var_type_id;
2919 	v->offset = offset;
2920 	v->size = byte_sz;
2921 
2922 	/* update parent type's vlen */
2923 	t = btf_last_type(btf);
2924 	btf_type_inc_vlen(t);
2925 
2926 	btf->hdr->type_len += sz;
2927 	btf->hdr->str_off += sz;
2928 	return 0;
2929 }
2930 
2931 static int btf_add_decl_tag(struct btf *btf, const char *value, int ref_type_id,
2932 			    int component_idx, int kflag)
2933 {
2934 	struct btf_type *t;
2935 	int sz, value_off;
2936 
2937 	if (!value || !value[0] || component_idx < -1)
2938 		return libbpf_err(-EINVAL);
2939 
2940 	if (validate_type_id(ref_type_id))
2941 		return libbpf_err(-EINVAL);
2942 
2943 	if (btf_ensure_modifiable(btf))
2944 		return libbpf_err(-ENOMEM);
2945 
2946 	sz = sizeof(struct btf_type) + sizeof(struct btf_decl_tag);
2947 	t = btf_add_type_mem(btf, sz);
2948 	if (!t)
2949 		return libbpf_err(-ENOMEM);
2950 
2951 	value_off = btf__add_str(btf, value);
2952 	if (value_off < 0)
2953 		return value_off;
2954 
2955 	t->name_off = value_off;
2956 	t->info = btf_type_info(BTF_KIND_DECL_TAG, 0, kflag);
2957 	t->type = ref_type_id;
2958 	btf_decl_tag(t)->component_idx = component_idx;
2959 
2960 	return btf_commit_type(btf, sz);
2961 }
2962 
2963 /*
2964  * Append new BTF_KIND_DECL_TAG type with:
2965  *   - *value* - non-empty/non-NULL string;
2966  *   - *ref_type_id* - referenced type ID, it might not exist yet;
2967  *   - *component_idx* - -1 for tagging reference type, otherwise struct/union
2968  *     member or function argument index;
2969  * Returns:
2970  *   - >0, type ID of newly added BTF type;
2971  *   - <0, on error.
2972  */
2973 int btf__add_decl_tag(struct btf *btf, const char *value, int ref_type_id,
2974 		      int component_idx)
2975 {
2976 	return btf_add_decl_tag(btf, value, ref_type_id, component_idx, 0);
2977 }
2978 
2979 /*
2980  * Append new BTF_KIND_DECL_TAG type with:
2981  *   - *value* - non-empty/non-NULL string;
2982  *   - *ref_type_id* - referenced type ID, it might not exist yet;
2983  *   - *component_idx* - -1 for tagging reference type, otherwise struct/union
2984  *     member or function argument index;
2985  * Set info->kflag to 1, indicating this tag is an __attribute__
2986  * Returns:
2987  *   - >0, type ID of newly added BTF type;
2988  *   - <0, on error.
2989  */
2990 int btf__add_decl_attr(struct btf *btf, const char *value, int ref_type_id,
2991 		       int component_idx)
2992 {
2993 	return btf_add_decl_tag(btf, value, ref_type_id, component_idx, 1);
2994 }
2995 
2996 struct btf_ext_sec_info_param {
2997 	__u32 off;
2998 	__u32 len;
2999 	__u32 min_rec_size;
3000 	struct btf_ext_info *ext_info;
3001 	const char *desc;
3002 };
3003 
3004 /*
3005  * Parse a single info subsection of the BTF.ext info data:
3006  *  - validate subsection structure and elements
3007  *  - save info subsection start and sizing details in struct btf_ext
3008  *  - endian-independent operation, for calling before byte-swapping
3009  */
3010 static int btf_ext_parse_sec_info(struct btf_ext *btf_ext,
3011 				  struct btf_ext_sec_info_param *ext_sec,
3012 				  bool is_native)
3013 {
3014 	const struct btf_ext_info_sec *sinfo;
3015 	struct btf_ext_info *ext_info;
3016 	__u32 info_left, record_size;
3017 	size_t sec_cnt = 0;
3018 	void *info;
3019 
3020 	if (ext_sec->len == 0)
3021 		return 0;
3022 
3023 	if (ext_sec->off & 0x03) {
3024 		pr_debug(".BTF.ext %s section is not aligned to 4 bytes\n",
3025 		     ext_sec->desc);
3026 		return -EINVAL;
3027 	}
3028 
3029 	/* The start of the info sec (including the __u32 record_size). */
3030 	info = btf_ext->data + btf_ext->hdr->hdr_len + ext_sec->off;
3031 	info_left = ext_sec->len;
3032 
3033 	if (btf_ext->data + btf_ext->data_size < info + ext_sec->len) {
3034 		pr_debug("%s section (off:%u len:%u) is beyond the end of the ELF section .BTF.ext\n",
3035 			 ext_sec->desc, ext_sec->off, ext_sec->len);
3036 		return -EINVAL;
3037 	}
3038 
3039 	/* At least a record size */
3040 	if (info_left < sizeof(__u32)) {
3041 		pr_debug(".BTF.ext %s record size not found\n", ext_sec->desc);
3042 		return -EINVAL;
3043 	}
3044 
3045 	/* The record size needs to meet either the minimum standard or, when
3046 	 * handling non-native endianness data, the exact standard so as
3047 	 * to allow safe byte-swapping.
3048 	 */
3049 	record_size = is_native ? *(__u32 *)info : bswap_32(*(__u32 *)info);
3050 	if (record_size < ext_sec->min_rec_size ||
3051 	    (!is_native && record_size != ext_sec->min_rec_size) ||
3052 	    record_size & 0x03) {
3053 		pr_debug("%s section in .BTF.ext has invalid record size %u\n",
3054 			 ext_sec->desc, record_size);
3055 		return -EINVAL;
3056 	}
3057 
3058 	sinfo = info + sizeof(__u32);
3059 	info_left -= sizeof(__u32);
3060 
3061 	/* If no records, return failure now so .BTF.ext won't be used. */
3062 	if (!info_left) {
3063 		pr_debug("%s section in .BTF.ext has no records\n", ext_sec->desc);
3064 		return -EINVAL;
3065 	}
3066 
3067 	while (info_left) {
3068 		unsigned int sec_hdrlen = sizeof(struct btf_ext_info_sec);
3069 		__u64 total_record_size;
3070 		__u32 num_records;
3071 
3072 		if (info_left < sec_hdrlen) {
3073 			pr_debug("%s section header is not found in .BTF.ext\n",
3074 			     ext_sec->desc);
3075 			return -EINVAL;
3076 		}
3077 
3078 		num_records = is_native ? sinfo->num_info : bswap_32(sinfo->num_info);
3079 		if (num_records == 0) {
3080 			pr_debug("%s section has incorrect num_records in .BTF.ext\n",
3081 			     ext_sec->desc);
3082 			return -EINVAL;
3083 		}
3084 
3085 		total_record_size = sec_hdrlen + (__u64)num_records * record_size;
3086 		if (info_left < total_record_size) {
3087 			pr_debug("%s section has incorrect num_records in .BTF.ext\n",
3088 			     ext_sec->desc);
3089 			return -EINVAL;
3090 		}
3091 
3092 		info_left -= total_record_size;
3093 		sinfo = (void *)sinfo + total_record_size;
3094 		sec_cnt++;
3095 	}
3096 
3097 	ext_info = ext_sec->ext_info;
3098 	ext_info->len = ext_sec->len - sizeof(__u32);
3099 	ext_info->rec_size = record_size;
3100 	ext_info->info = info + sizeof(__u32);
3101 	ext_info->sec_cnt = sec_cnt;
3102 
3103 	return 0;
3104 }
3105 
3106 /* Parse all info secs in the BTF.ext info data */
3107 static int btf_ext_parse_info(struct btf_ext *btf_ext, bool is_native)
3108 {
3109 	struct btf_ext_sec_info_param func_info = {
3110 		.off = btf_ext->hdr->func_info_off,
3111 		.len = btf_ext->hdr->func_info_len,
3112 		.min_rec_size = sizeof(struct bpf_func_info_min),
3113 		.ext_info = &btf_ext->func_info,
3114 		.desc = "func_info"
3115 	};
3116 	struct btf_ext_sec_info_param line_info = {
3117 		.off = btf_ext->hdr->line_info_off,
3118 		.len = btf_ext->hdr->line_info_len,
3119 		.min_rec_size = sizeof(struct bpf_line_info_min),
3120 		.ext_info = &btf_ext->line_info,
3121 		.desc = "line_info",
3122 	};
3123 	struct btf_ext_sec_info_param core_relo = {
3124 		.min_rec_size = sizeof(struct bpf_core_relo),
3125 		.ext_info = &btf_ext->core_relo_info,
3126 		.desc = "core_relo",
3127 	};
3128 	int err;
3129 
3130 	err = btf_ext_parse_sec_info(btf_ext, &func_info, is_native);
3131 	if (err)
3132 		return err;
3133 
3134 	err = btf_ext_parse_sec_info(btf_ext, &line_info, is_native);
3135 	if (err)
3136 		return err;
3137 
3138 	if (btf_ext->hdr->hdr_len < offsetofend(struct btf_ext_header, core_relo_len))
3139 		return 0; /* skip core relos parsing */
3140 
3141 	core_relo.off = btf_ext->hdr->core_relo_off;
3142 	core_relo.len = btf_ext->hdr->core_relo_len;
3143 	err = btf_ext_parse_sec_info(btf_ext, &core_relo, is_native);
3144 	if (err)
3145 		return err;
3146 
3147 	return 0;
3148 }
3149 
3150 /* Swap byte-order of BTF.ext header with any endianness */
3151 static void btf_ext_bswap_hdr(struct btf_ext_header *h)
3152 {
3153 	bool is_native = h->magic == BTF_MAGIC;
3154 	__u32 hdr_len;
3155 
3156 	hdr_len = is_native ? h->hdr_len : bswap_32(h->hdr_len);
3157 
3158 	h->magic = bswap_16(h->magic);
3159 	h->hdr_len = bswap_32(h->hdr_len);
3160 	h->func_info_off = bswap_32(h->func_info_off);
3161 	h->func_info_len = bswap_32(h->func_info_len);
3162 	h->line_info_off = bswap_32(h->line_info_off);
3163 	h->line_info_len = bswap_32(h->line_info_len);
3164 
3165 	if (hdr_len < offsetofend(struct btf_ext_header, core_relo_len))
3166 		return;
3167 
3168 	h->core_relo_off = bswap_32(h->core_relo_off);
3169 	h->core_relo_len = bswap_32(h->core_relo_len);
3170 }
3171 
3172 /* Swap byte-order of generic info subsection */
3173 static void btf_ext_bswap_info_sec(void *info, __u32 len, bool is_native,
3174 				   info_rec_bswap_fn bswap_fn)
3175 {
3176 	struct btf_ext_info_sec *sec;
3177 	__u32 info_left, rec_size, *rs;
3178 
3179 	if (len == 0)
3180 		return;
3181 
3182 	rs = info;				/* info record size */
3183 	rec_size = is_native ? *rs : bswap_32(*rs);
3184 	*rs = bswap_32(*rs);
3185 
3186 	sec = info + sizeof(__u32);		/* info sec #1 */
3187 	info_left = len - sizeof(__u32);
3188 	while (info_left) {
3189 		unsigned int sec_hdrlen = sizeof(struct btf_ext_info_sec);
3190 		__u32 i, num_recs;
3191 		void *p;
3192 
3193 		num_recs = is_native ? sec->num_info : bswap_32(sec->num_info);
3194 		sec->sec_name_off = bswap_32(sec->sec_name_off);
3195 		sec->num_info = bswap_32(sec->num_info);
3196 		p = sec->data;			/* info rec #1 */
3197 		for (i = 0; i < num_recs; i++, p += rec_size)
3198 			bswap_fn(p);
3199 		sec = p;
3200 		info_left -= sec_hdrlen + (__u64)rec_size * num_recs;
3201 	}
3202 }
3203 
3204 /*
3205  * Swap byte-order of all info data in a BTF.ext section
3206  *  - requires BTF.ext hdr in native endianness
3207  */
3208 static void btf_ext_bswap_info(struct btf_ext *btf_ext, void *data)
3209 {
3210 	const bool is_native = btf_ext->swapped_endian;
3211 	const struct btf_ext_header *h = data;
3212 	void *info;
3213 
3214 	/* Swap func_info subsection byte-order */
3215 	info = data + h->hdr_len + h->func_info_off;
3216 	btf_ext_bswap_info_sec(info, h->func_info_len, is_native,
3217 			       (info_rec_bswap_fn)bpf_func_info_bswap);
3218 
3219 	/* Swap line_info subsection byte-order */
3220 	info = data + h->hdr_len + h->line_info_off;
3221 	btf_ext_bswap_info_sec(info, h->line_info_len, is_native,
3222 			       (info_rec_bswap_fn)bpf_line_info_bswap);
3223 
3224 	/* Swap core_relo subsection byte-order (if present) */
3225 	if (h->hdr_len < offsetofend(struct btf_ext_header, core_relo_len))
3226 		return;
3227 
3228 	info = data + h->hdr_len + h->core_relo_off;
3229 	btf_ext_bswap_info_sec(info, h->core_relo_len, is_native,
3230 			       (info_rec_bswap_fn)bpf_core_relo_bswap);
3231 }
3232 
3233 /* Parse hdr data and info sections: check and convert to native endianness */
3234 static int btf_ext_parse(struct btf_ext *btf_ext)
3235 {
3236 	__u32 hdr_len, data_size = btf_ext->data_size;
3237 	struct btf_ext_header *hdr = btf_ext->hdr;
3238 	bool swapped_endian = false;
3239 	int err;
3240 
3241 	if (data_size < offsetofend(struct btf_ext_header, hdr_len)) {
3242 		pr_debug("BTF.ext header too short\n");
3243 		return -EINVAL;
3244 	}
3245 
3246 	hdr_len = hdr->hdr_len;
3247 	if (hdr->magic == bswap_16(BTF_MAGIC)) {
3248 		swapped_endian = true;
3249 		hdr_len = bswap_32(hdr_len);
3250 	} else if (hdr->magic != BTF_MAGIC) {
3251 		pr_debug("Invalid BTF.ext magic:%x\n", hdr->magic);
3252 		return -EINVAL;
3253 	}
3254 
3255 	/* Ensure known version of structs, current BTF_VERSION == 1 */
3256 	if (hdr->version != 1) {
3257 		pr_debug("Unsupported BTF.ext version:%u\n", hdr->version);
3258 		return -ENOTSUP;
3259 	}
3260 
3261 	if (hdr->flags) {
3262 		pr_debug("Unsupported BTF.ext flags:%x\n", hdr->flags);
3263 		return -ENOTSUP;
3264 	}
3265 
3266 	if (data_size < hdr_len) {
3267 		pr_debug("BTF.ext header not found\n");
3268 		return -EINVAL;
3269 	} else if (data_size == hdr_len) {
3270 		pr_debug("BTF.ext has no data\n");
3271 		return -EINVAL;
3272 	}
3273 
3274 	/* Verify mandatory hdr info details present */
3275 	if (hdr_len < offsetofend(struct btf_ext_header, line_info_len)) {
3276 		pr_warn("BTF.ext header missing func_info, line_info\n");
3277 		return -EINVAL;
3278 	}
3279 
3280 	/* Keep hdr native byte-order in memory for introspection */
3281 	if (swapped_endian)
3282 		btf_ext_bswap_hdr(btf_ext->hdr);
3283 
3284 	/* Validate info subsections and cache key metadata */
3285 	err = btf_ext_parse_info(btf_ext, !swapped_endian);
3286 	if (err)
3287 		return err;
3288 
3289 	/* Keep infos native byte-order in memory for introspection */
3290 	if (swapped_endian)
3291 		btf_ext_bswap_info(btf_ext, btf_ext->data);
3292 
3293 	/*
3294 	 * Set btf_ext->swapped_endian only after all header and info data has
3295 	 * been swapped, helping bswap functions determine if their data are
3296 	 * in native byte-order when called.
3297 	 */
3298 	btf_ext->swapped_endian = swapped_endian;
3299 	return 0;
3300 }
3301 
3302 void btf_ext__free(struct btf_ext *btf_ext)
3303 {
3304 	if (IS_ERR_OR_NULL(btf_ext))
3305 		return;
3306 	free(btf_ext->func_info.sec_idxs);
3307 	free(btf_ext->line_info.sec_idxs);
3308 	free(btf_ext->core_relo_info.sec_idxs);
3309 	free(btf_ext->data);
3310 	free(btf_ext->data_swapped);
3311 	free(btf_ext);
3312 }
3313 
3314 struct btf_ext *btf_ext__new(const __u8 *data, __u32 size)
3315 {
3316 	struct btf_ext *btf_ext;
3317 	int err;
3318 
3319 	btf_ext = calloc(1, sizeof(struct btf_ext));
3320 	if (!btf_ext)
3321 		return libbpf_err_ptr(-ENOMEM);
3322 
3323 	btf_ext->data_size = size;
3324 	btf_ext->data = malloc(size);
3325 	if (!btf_ext->data) {
3326 		err = -ENOMEM;
3327 		goto done;
3328 	}
3329 	memcpy(btf_ext->data, data, size);
3330 
3331 	err = btf_ext_parse(btf_ext);
3332 
3333 done:
3334 	if (err) {
3335 		btf_ext__free(btf_ext);
3336 		return libbpf_err_ptr(err);
3337 	}
3338 
3339 	return btf_ext;
3340 }
3341 
3342 static void *btf_ext_raw_data(const struct btf_ext *btf_ext_ro, bool swap_endian)
3343 {
3344 	struct btf_ext *btf_ext = (struct btf_ext *)btf_ext_ro;
3345 	const __u32 data_sz = btf_ext->data_size;
3346 	void *data;
3347 
3348 	/* Return native data (always present) or swapped data if present */
3349 	if (!swap_endian)
3350 		return btf_ext->data;
3351 	else if (btf_ext->data_swapped)
3352 		return btf_ext->data_swapped;
3353 
3354 	/* Recreate missing swapped data, then cache and return */
3355 	data = calloc(1, data_sz);
3356 	if (!data)
3357 		return NULL;
3358 	memcpy(data, btf_ext->data, data_sz);
3359 
3360 	btf_ext_bswap_info(btf_ext, data);
3361 	btf_ext_bswap_hdr(data);
3362 	btf_ext->data_swapped = data;
3363 	return data;
3364 }
3365 
3366 const void *btf_ext__raw_data(const struct btf_ext *btf_ext, __u32 *size)
3367 {
3368 	void *data;
3369 
3370 	data = btf_ext_raw_data(btf_ext, btf_ext->swapped_endian);
3371 	if (!data)
3372 		return errno = ENOMEM, NULL;
3373 
3374 	*size = btf_ext->data_size;
3375 	return data;
3376 }
3377 
3378 __attribute__((alias("btf_ext__raw_data")))
3379 const void *btf_ext__get_raw_data(const struct btf_ext *btf_ext, __u32 *size);
3380 
3381 enum btf_endianness btf_ext__endianness(const struct btf_ext *btf_ext)
3382 {
3383 	if (is_host_big_endian())
3384 		return btf_ext->swapped_endian ? BTF_LITTLE_ENDIAN : BTF_BIG_ENDIAN;
3385 	else
3386 		return btf_ext->swapped_endian ? BTF_BIG_ENDIAN : BTF_LITTLE_ENDIAN;
3387 }
3388 
3389 int btf_ext__set_endianness(struct btf_ext *btf_ext, enum btf_endianness endian)
3390 {
3391 	if (endian != BTF_LITTLE_ENDIAN && endian != BTF_BIG_ENDIAN)
3392 		return libbpf_err(-EINVAL);
3393 
3394 	btf_ext->swapped_endian = is_host_big_endian() != (endian == BTF_BIG_ENDIAN);
3395 
3396 	if (!btf_ext->swapped_endian) {
3397 		free(btf_ext->data_swapped);
3398 		btf_ext->data_swapped = NULL;
3399 	}
3400 	return 0;
3401 }
3402 
3403 struct btf_dedup;
3404 
3405 static struct btf_dedup *btf_dedup_new(struct btf *btf, const struct btf_dedup_opts *opts);
3406 static void btf_dedup_free(struct btf_dedup *d);
3407 static int btf_dedup_prep(struct btf_dedup *d);
3408 static int btf_dedup_strings(struct btf_dedup *d);
3409 static int btf_dedup_prim_types(struct btf_dedup *d);
3410 static int btf_dedup_struct_types(struct btf_dedup *d);
3411 static int btf_dedup_ref_types(struct btf_dedup *d);
3412 static int btf_dedup_resolve_fwds(struct btf_dedup *d);
3413 static int btf_dedup_compact_types(struct btf_dedup *d);
3414 static int btf_dedup_remap_types(struct btf_dedup *d);
3415 
3416 /*
3417  * Deduplicate BTF types and strings.
3418  *
3419  * BTF dedup algorithm takes as an input `struct btf` representing `.BTF` ELF
3420  * section with all BTF type descriptors and string data. It overwrites that
3421  * memory in-place with deduplicated types and strings without any loss of
3422  * information. If optional `struct btf_ext` representing '.BTF.ext' ELF section
3423  * is provided, all the strings referenced from .BTF.ext section are honored
3424  * and updated to point to the right offsets after deduplication.
3425  *
3426  * If function returns with error, type/string data might be garbled and should
3427  * be discarded.
3428  *
3429  * More verbose and detailed description of both problem btf_dedup is solving,
3430  * as well as solution could be found at:
3431  * https://facebookmicrosites.github.io/bpf/blog/2018/11/14/btf-enhancement.html
3432  *
3433  * Problem description and justification
3434  * =====================================
3435  *
3436  * BTF type information is typically emitted either as a result of conversion
3437  * from DWARF to BTF or directly by compiler. In both cases, each compilation
3438  * unit contains information about a subset of all the types that are used
3439  * in an application. These subsets are frequently overlapping and contain a lot
3440  * of duplicated information when later concatenated together into a single
3441  * binary. This algorithm ensures that each unique type is represented by single
3442  * BTF type descriptor, greatly reducing resulting size of BTF data.
3443  *
3444  * Compilation unit isolation and subsequent duplication of data is not the only
3445  * problem. The same type hierarchy (e.g., struct and all the type that struct
3446  * references) in different compilation units can be represented in BTF to
3447  * various degrees of completeness (or, rather, incompleteness) due to
3448  * struct/union forward declarations.
3449  *
3450  * Let's take a look at an example, that we'll use to better understand the
3451  * problem (and solution). Suppose we have two compilation units, each using
3452  * same `struct S`, but each of them having incomplete type information about
3453  * struct's fields:
3454  *
3455  * // CU #1:
3456  * struct S;
3457  * struct A {
3458  *	int a;
3459  *	struct A* self;
3460  *	struct S* parent;
3461  * };
3462  * struct B;
3463  * struct S {
3464  *	struct A* a_ptr;
3465  *	struct B* b_ptr;
3466  * };
3467  *
3468  * // CU #2:
3469  * struct S;
3470  * struct A;
3471  * struct B {
3472  *	int b;
3473  *	struct B* self;
3474  *	struct S* parent;
3475  * };
3476  * struct S {
3477  *	struct A* a_ptr;
3478  *	struct B* b_ptr;
3479  * };
3480  *
3481  * In case of CU #1, BTF data will know only that `struct B` exist (but no
3482  * more), but will know the complete type information about `struct A`. While
3483  * for CU #2, it will know full type information about `struct B`, but will
3484  * only know about forward declaration of `struct A` (in BTF terms, it will
3485  * have `BTF_KIND_FWD` type descriptor with name `B`).
3486  *
3487  * This compilation unit isolation means that it's possible that there is no
3488  * single CU with complete type information describing structs `S`, `A`, and
3489  * `B`. Also, we might get tons of duplicated and redundant type information.
3490  *
3491  * Additional complication we need to keep in mind comes from the fact that
3492  * types, in general, can form graphs containing cycles, not just DAGs.
3493  *
3494  * While algorithm does deduplication, it also merges and resolves type
3495  * information (unless disabled throught `struct btf_opts`), whenever possible.
3496  * E.g., in the example above with two compilation units having partial type
3497  * information for structs `A` and `B`, the output of algorithm will emit
3498  * a single copy of each BTF type that describes structs `A`, `B`, and `S`
3499  * (as well as type information for `int` and pointers), as if they were defined
3500  * in a single compilation unit as:
3501  *
3502  * struct A {
3503  *	int a;
3504  *	struct A* self;
3505  *	struct S* parent;
3506  * };
3507  * struct B {
3508  *	int b;
3509  *	struct B* self;
3510  *	struct S* parent;
3511  * };
3512  * struct S {
3513  *	struct A* a_ptr;
3514  *	struct B* b_ptr;
3515  * };
3516  *
3517  * Algorithm summary
3518  * =================
3519  *
3520  * Algorithm completes its work in 7 separate passes:
3521  *
3522  * 1. Strings deduplication.
3523  * 2. Primitive types deduplication (int, enum, fwd).
3524  * 3. Struct/union types deduplication.
3525  * 4. Resolve unambiguous forward declarations.
3526  * 5. Reference types deduplication (pointers, typedefs, arrays, funcs, func
3527  *    protos, and const/volatile/restrict modifiers).
3528  * 6. Types compaction.
3529  * 7. Types remapping.
3530  *
3531  * Algorithm determines canonical type descriptor, which is a single
3532  * representative type for each truly unique type. This canonical type is the
3533  * one that will go into final deduplicated BTF type information. For
3534  * struct/unions, it is also the type that algorithm will merge additional type
3535  * information into (while resolving FWDs), as it discovers it from data in
3536  * other CUs. Each input BTF type eventually gets either mapped to itself, if
3537  * that type is canonical, or to some other type, if that type is equivalent
3538  * and was chosen as canonical representative. This mapping is stored in
3539  * `btf_dedup->map` array. This map is also used to record STRUCT/UNION that
3540  * FWD type got resolved to.
3541  *
3542  * To facilitate fast discovery of canonical types, we also maintain canonical
3543  * index (`btf_dedup->dedup_table`), which maps type descriptor's signature hash
3544  * (i.e., hashed kind, name, size, fields, etc) into a list of canonical types
3545  * that match that signature. With sufficiently good choice of type signature
3546  * hashing function, we can limit number of canonical types for each unique type
3547  * signature to a very small number, allowing to find canonical type for any
3548  * duplicated type very quickly.
3549  *
3550  * Struct/union deduplication is the most critical part and algorithm for
3551  * deduplicating structs/unions is described in greater details in comments for
3552  * `btf_dedup_is_equiv` function.
3553  */
3554 int btf__dedup(struct btf *btf, const struct btf_dedup_opts *opts)
3555 {
3556 	struct btf_dedup *d;
3557 	int err;
3558 
3559 	if (!OPTS_VALID(opts, btf_dedup_opts))
3560 		return libbpf_err(-EINVAL);
3561 
3562 	d = btf_dedup_new(btf, opts);
3563 	if (IS_ERR(d)) {
3564 		pr_debug("btf_dedup_new failed: %ld\n", PTR_ERR(d));
3565 		return libbpf_err(-EINVAL);
3566 	}
3567 
3568 	if (btf_ensure_modifiable(btf)) {
3569 		err = -ENOMEM;
3570 		goto done;
3571 	}
3572 
3573 	err = btf_dedup_prep(d);
3574 	if (err) {
3575 		pr_debug("btf_dedup_prep failed: %s\n", errstr(err));
3576 		goto done;
3577 	}
3578 	err = btf_dedup_strings(d);
3579 	if (err < 0) {
3580 		pr_debug("btf_dedup_strings failed: %s\n", errstr(err));
3581 		goto done;
3582 	}
3583 	err = btf_dedup_prim_types(d);
3584 	if (err < 0) {
3585 		pr_debug("btf_dedup_prim_types failed: %s\n", errstr(err));
3586 		goto done;
3587 	}
3588 	err = btf_dedup_struct_types(d);
3589 	if (err < 0) {
3590 		pr_debug("btf_dedup_struct_types failed: %s\n", errstr(err));
3591 		goto done;
3592 	}
3593 	err = btf_dedup_resolve_fwds(d);
3594 	if (err < 0) {
3595 		pr_debug("btf_dedup_resolve_fwds failed: %s\n", errstr(err));
3596 		goto done;
3597 	}
3598 	err = btf_dedup_ref_types(d);
3599 	if (err < 0) {
3600 		pr_debug("btf_dedup_ref_types failed: %s\n", errstr(err));
3601 		goto done;
3602 	}
3603 	err = btf_dedup_compact_types(d);
3604 	if (err < 0) {
3605 		pr_debug("btf_dedup_compact_types failed: %s\n", errstr(err));
3606 		goto done;
3607 	}
3608 	err = btf_dedup_remap_types(d);
3609 	if (err < 0) {
3610 		pr_debug("btf_dedup_remap_types failed: %s\n", errstr(err));
3611 		goto done;
3612 	}
3613 
3614 done:
3615 	btf_dedup_free(d);
3616 	return libbpf_err(err);
3617 }
3618 
3619 #define BTF_UNPROCESSED_ID ((__u32)-1)
3620 #define BTF_IN_PROGRESS_ID ((__u32)-2)
3621 
3622 struct btf_dedup {
3623 	/* .BTF section to be deduped in-place */
3624 	struct btf *btf;
3625 	/*
3626 	 * Optional .BTF.ext section. When provided, any strings referenced
3627 	 * from it will be taken into account when deduping strings
3628 	 */
3629 	struct btf_ext *btf_ext;
3630 	/*
3631 	 * This is a map from any type's signature hash to a list of possible
3632 	 * canonical representative type candidates. Hash collisions are
3633 	 * ignored, so even types of various kinds can share same list of
3634 	 * candidates, which is fine because we rely on subsequent
3635 	 * btf_xxx_equal() checks to authoritatively verify type equality.
3636 	 */
3637 	struct hashmap *dedup_table;
3638 	/* Canonical types map */
3639 	__u32 *map;
3640 	/* Hypothetical mapping, used during type graph equivalence checks */
3641 	__u32 *hypot_map;
3642 	__u32 *hypot_list;
3643 	size_t hypot_cnt;
3644 	size_t hypot_cap;
3645 	/* Whether hypothetical mapping, if successful, would need to adjust
3646 	 * already canonicalized types (due to a new forward declaration to
3647 	 * concrete type resolution). In such case, during split BTF dedup
3648 	 * candidate type would still be considered as different, because base
3649 	 * BTF is considered to be immutable.
3650 	 */
3651 	bool hypot_adjust_canon;
3652 	/* Various option modifying behavior of algorithm */
3653 	struct btf_dedup_opts opts;
3654 	/* temporary strings deduplication state */
3655 	struct strset *strs_set;
3656 };
3657 
3658 static unsigned long hash_combine(unsigned long h, unsigned long value)
3659 {
3660 	return h * 31 + value;
3661 }
3662 
3663 #define for_each_dedup_cand(d, node, hash) \
3664 	hashmap__for_each_key_entry(d->dedup_table, node, hash)
3665 
3666 static int btf_dedup_table_add(struct btf_dedup *d, long hash, __u32 type_id)
3667 {
3668 	return hashmap__append(d->dedup_table, hash, type_id);
3669 }
3670 
3671 static int btf_dedup_hypot_map_add(struct btf_dedup *d,
3672 				   __u32 from_id, __u32 to_id)
3673 {
3674 	if (d->hypot_cnt == d->hypot_cap) {
3675 		__u32 *new_list;
3676 
3677 		d->hypot_cap += max((size_t)16, d->hypot_cap / 2);
3678 		new_list = libbpf_reallocarray(d->hypot_list, d->hypot_cap, sizeof(__u32));
3679 		if (!new_list)
3680 			return -ENOMEM;
3681 		d->hypot_list = new_list;
3682 	}
3683 	d->hypot_list[d->hypot_cnt++] = from_id;
3684 	d->hypot_map[from_id] = to_id;
3685 	return 0;
3686 }
3687 
3688 static void btf_dedup_clear_hypot_map(struct btf_dedup *d)
3689 {
3690 	int i;
3691 
3692 	for (i = 0; i < d->hypot_cnt; i++)
3693 		d->hypot_map[d->hypot_list[i]] = BTF_UNPROCESSED_ID;
3694 	d->hypot_cnt = 0;
3695 	d->hypot_adjust_canon = false;
3696 }
3697 
3698 static void btf_dedup_free(struct btf_dedup *d)
3699 {
3700 	hashmap__free(d->dedup_table);
3701 	d->dedup_table = NULL;
3702 
3703 	free(d->map);
3704 	d->map = NULL;
3705 
3706 	free(d->hypot_map);
3707 	d->hypot_map = NULL;
3708 
3709 	free(d->hypot_list);
3710 	d->hypot_list = NULL;
3711 
3712 	free(d);
3713 }
3714 
3715 static size_t btf_dedup_identity_hash_fn(long key, void *ctx)
3716 {
3717 	return key;
3718 }
3719 
3720 static size_t btf_dedup_collision_hash_fn(long key, void *ctx)
3721 {
3722 	return 0;
3723 }
3724 
3725 static bool btf_dedup_equal_fn(long k1, long k2, void *ctx)
3726 {
3727 	return k1 == k2;
3728 }
3729 
3730 static struct btf_dedup *btf_dedup_new(struct btf *btf, const struct btf_dedup_opts *opts)
3731 {
3732 	struct btf_dedup *d = calloc(1, sizeof(struct btf_dedup));
3733 	hashmap_hash_fn hash_fn = btf_dedup_identity_hash_fn;
3734 	int i, err = 0, type_cnt;
3735 
3736 	if (!d)
3737 		return ERR_PTR(-ENOMEM);
3738 
3739 	if (OPTS_GET(opts, force_collisions, false))
3740 		hash_fn = btf_dedup_collision_hash_fn;
3741 
3742 	d->btf = btf;
3743 	d->btf_ext = OPTS_GET(opts, btf_ext, NULL);
3744 
3745 	d->dedup_table = hashmap__new(hash_fn, btf_dedup_equal_fn, NULL);
3746 	if (IS_ERR(d->dedup_table)) {
3747 		err = PTR_ERR(d->dedup_table);
3748 		d->dedup_table = NULL;
3749 		goto done;
3750 	}
3751 
3752 	type_cnt = btf__type_cnt(btf);
3753 	d->map = malloc(sizeof(__u32) * type_cnt);
3754 	if (!d->map) {
3755 		err = -ENOMEM;
3756 		goto done;
3757 	}
3758 	/* special BTF "void" type is made canonical immediately */
3759 	d->map[0] = 0;
3760 	for (i = 1; i < type_cnt; i++) {
3761 		struct btf_type *t = btf_type_by_id(d->btf, i);
3762 
3763 		/* VAR and DATASEC are never deduped and are self-canonical */
3764 		if (btf_is_var(t) || btf_is_datasec(t))
3765 			d->map[i] = i;
3766 		else
3767 			d->map[i] = BTF_UNPROCESSED_ID;
3768 	}
3769 
3770 	d->hypot_map = malloc(sizeof(__u32) * type_cnt);
3771 	if (!d->hypot_map) {
3772 		err = -ENOMEM;
3773 		goto done;
3774 	}
3775 	for (i = 0; i < type_cnt; i++)
3776 		d->hypot_map[i] = BTF_UNPROCESSED_ID;
3777 
3778 done:
3779 	if (err) {
3780 		btf_dedup_free(d);
3781 		return ERR_PTR(err);
3782 	}
3783 
3784 	return d;
3785 }
3786 
3787 /*
3788  * Iterate over all possible places in .BTF and .BTF.ext that can reference
3789  * string and pass pointer to it to a provided callback `fn`.
3790  */
3791 static int btf_for_each_str_off(struct btf_dedup *d, str_off_visit_fn fn, void *ctx)
3792 {
3793 	int i, r;
3794 
3795 	for (i = 0; i < d->btf->nr_types; i++) {
3796 		struct btf_field_iter it;
3797 		struct btf_type *t = btf_type_by_id(d->btf, d->btf->start_id + i);
3798 		__u32 *str_off;
3799 
3800 		r = btf_field_iter_init(&it, t, BTF_FIELD_ITER_STRS);
3801 		if (r)
3802 			return r;
3803 
3804 		while ((str_off = btf_field_iter_next(&it))) {
3805 			r = fn(str_off, ctx);
3806 			if (r)
3807 				return r;
3808 		}
3809 	}
3810 
3811 	if (!d->btf_ext)
3812 		return 0;
3813 
3814 	r = btf_ext_visit_str_offs(d->btf_ext, fn, ctx);
3815 	if (r)
3816 		return r;
3817 
3818 	return 0;
3819 }
3820 
3821 static int strs_dedup_remap_str_off(__u32 *str_off_ptr, void *ctx)
3822 {
3823 	struct btf_dedup *d = ctx;
3824 	__u32 str_off = *str_off_ptr;
3825 	const char *s;
3826 	int off, err;
3827 
3828 	/* don't touch empty string or string in main BTF */
3829 	if (str_off == 0 || str_off < d->btf->start_str_off)
3830 		return 0;
3831 
3832 	s = btf__str_by_offset(d->btf, str_off);
3833 	if (d->btf->base_btf) {
3834 		err = btf__find_str(d->btf->base_btf, s);
3835 		if (err >= 0) {
3836 			*str_off_ptr = err;
3837 			return 0;
3838 		}
3839 		if (err != -ENOENT)
3840 			return err;
3841 	}
3842 
3843 	off = strset__add_str(d->strs_set, s);
3844 	if (off < 0)
3845 		return off;
3846 
3847 	*str_off_ptr = d->btf->start_str_off + off;
3848 	return 0;
3849 }
3850 
3851 /*
3852  * Dedup string and filter out those that are not referenced from either .BTF
3853  * or .BTF.ext (if provided) sections.
3854  *
3855  * This is done by building index of all strings in BTF's string section,
3856  * then iterating over all entities that can reference strings (e.g., type
3857  * names, struct field names, .BTF.ext line info, etc) and marking corresponding
3858  * strings as used. After that all used strings are deduped and compacted into
3859  * sequential blob of memory and new offsets are calculated. Then all the string
3860  * references are iterated again and rewritten using new offsets.
3861  */
3862 static int btf_dedup_strings(struct btf_dedup *d)
3863 {
3864 	int err;
3865 
3866 	if (d->btf->strs_deduped)
3867 		return 0;
3868 
3869 	d->strs_set = strset__new(BTF_MAX_STR_OFFSET, NULL, 0);
3870 	if (IS_ERR(d->strs_set)) {
3871 		err = PTR_ERR(d->strs_set);
3872 		goto err_out;
3873 	}
3874 
3875 	if (!d->btf->base_btf) {
3876 		/* insert empty string; we won't be looking it up during strings
3877 		 * dedup, but it's good to have it for generic BTF string lookups
3878 		 */
3879 		err = strset__add_str(d->strs_set, "");
3880 		if (err < 0)
3881 			goto err_out;
3882 	}
3883 
3884 	/* remap string offsets */
3885 	err = btf_for_each_str_off(d, strs_dedup_remap_str_off, d);
3886 	if (err)
3887 		goto err_out;
3888 
3889 	/* replace BTF string data and hash with deduped ones */
3890 	strset__free(d->btf->strs_set);
3891 	d->btf->hdr->str_len = strset__data_size(d->strs_set);
3892 	d->btf->strs_set = d->strs_set;
3893 	d->strs_set = NULL;
3894 	d->btf->strs_deduped = true;
3895 	return 0;
3896 
3897 err_out:
3898 	strset__free(d->strs_set);
3899 	d->strs_set = NULL;
3900 
3901 	return err;
3902 }
3903 
3904 static long btf_hash_common(struct btf_type *t)
3905 {
3906 	long h;
3907 
3908 	h = hash_combine(0, t->name_off);
3909 	h = hash_combine(h, t->info);
3910 	h = hash_combine(h, t->size);
3911 	return h;
3912 }
3913 
3914 static bool btf_equal_common(struct btf_type *t1, struct btf_type *t2)
3915 {
3916 	return t1->name_off == t2->name_off &&
3917 	       t1->info == t2->info &&
3918 	       t1->size == t2->size;
3919 }
3920 
3921 /* Calculate type signature hash of INT or TAG. */
3922 static long btf_hash_int_decl_tag(struct btf_type *t)
3923 {
3924 	__u32 info = *(__u32 *)(t + 1);
3925 	long h;
3926 
3927 	h = btf_hash_common(t);
3928 	h = hash_combine(h, info);
3929 	return h;
3930 }
3931 
3932 /* Check structural equality of two INTs or TAGs. */
3933 static bool btf_equal_int_tag(struct btf_type *t1, struct btf_type *t2)
3934 {
3935 	__u32 info1, info2;
3936 
3937 	if (!btf_equal_common(t1, t2))
3938 		return false;
3939 	info1 = *(__u32 *)(t1 + 1);
3940 	info2 = *(__u32 *)(t2 + 1);
3941 	return info1 == info2;
3942 }
3943 
3944 /* Calculate type signature hash of ENUM/ENUM64. */
3945 static long btf_hash_enum(struct btf_type *t)
3946 {
3947 	long h;
3948 
3949 	/* don't hash vlen, enum members and size to support enum fwd resolving */
3950 	h = hash_combine(0, t->name_off);
3951 	return h;
3952 }
3953 
3954 static bool btf_equal_enum_members(struct btf_type *t1, struct btf_type *t2)
3955 {
3956 	const struct btf_enum *m1, *m2;
3957 	__u16 vlen;
3958 	int i;
3959 
3960 	vlen = btf_vlen(t1);
3961 	m1 = btf_enum(t1);
3962 	m2 = btf_enum(t2);
3963 	for (i = 0; i < vlen; i++) {
3964 		if (m1->name_off != m2->name_off || m1->val != m2->val)
3965 			return false;
3966 		m1++;
3967 		m2++;
3968 	}
3969 	return true;
3970 }
3971 
3972 static bool btf_equal_enum64_members(struct btf_type *t1, struct btf_type *t2)
3973 {
3974 	const struct btf_enum64 *m1, *m2;
3975 	__u16 vlen;
3976 	int i;
3977 
3978 	vlen = btf_vlen(t1);
3979 	m1 = btf_enum64(t1);
3980 	m2 = btf_enum64(t2);
3981 	for (i = 0; i < vlen; i++) {
3982 		if (m1->name_off != m2->name_off || m1->val_lo32 != m2->val_lo32 ||
3983 		    m1->val_hi32 != m2->val_hi32)
3984 			return false;
3985 		m1++;
3986 		m2++;
3987 	}
3988 	return true;
3989 }
3990 
3991 /* Check structural equality of two ENUMs or ENUM64s. */
3992 static bool btf_equal_enum(struct btf_type *t1, struct btf_type *t2)
3993 {
3994 	if (!btf_equal_common(t1, t2))
3995 		return false;
3996 
3997 	/* t1 & t2 kinds are identical because of btf_equal_common */
3998 	if (btf_kind(t1) == BTF_KIND_ENUM)
3999 		return btf_equal_enum_members(t1, t2);
4000 	else
4001 		return btf_equal_enum64_members(t1, t2);
4002 }
4003 
4004 static inline bool btf_is_enum_fwd(struct btf_type *t)
4005 {
4006 	return btf_is_any_enum(t) && btf_vlen(t) == 0;
4007 }
4008 
4009 static bool btf_compat_enum(struct btf_type *t1, struct btf_type *t2)
4010 {
4011 	if (!btf_is_enum_fwd(t1) && !btf_is_enum_fwd(t2))
4012 		return btf_equal_enum(t1, t2);
4013 	/* At this point either t1 or t2 or both are forward declarations, thus:
4014 	 * - skip comparing vlen because it is zero for forward declarations;
4015 	 * - skip comparing size to allow enum forward declarations
4016 	 *   to be compatible with enum64 full declarations;
4017 	 * - skip comparing kind for the same reason.
4018 	 */
4019 	return t1->name_off == t2->name_off &&
4020 	       btf_is_any_enum(t1) && btf_is_any_enum(t2);
4021 }
4022 
4023 /*
4024  * Calculate type signature hash of STRUCT/UNION, ignoring referenced type IDs,
4025  * as referenced type IDs equivalence is established separately during type
4026  * graph equivalence check algorithm.
4027  */
4028 static long btf_hash_struct(struct btf_type *t)
4029 {
4030 	const struct btf_member *member = btf_members(t);
4031 	__u32 vlen = btf_vlen(t);
4032 	long h = btf_hash_common(t);
4033 	int i;
4034 
4035 	for (i = 0; i < vlen; i++) {
4036 		h = hash_combine(h, member->name_off);
4037 		h = hash_combine(h, member->offset);
4038 		/* no hashing of referenced type ID, it can be unresolved yet */
4039 		member++;
4040 	}
4041 	return h;
4042 }
4043 
4044 /*
4045  * Check structural compatibility of two STRUCTs/UNIONs, ignoring referenced
4046  * type IDs. This check is performed during type graph equivalence check and
4047  * referenced types equivalence is checked separately.
4048  */
4049 static bool btf_shallow_equal_struct(struct btf_type *t1, struct btf_type *t2)
4050 {
4051 	const struct btf_member *m1, *m2;
4052 	__u16 vlen;
4053 	int i;
4054 
4055 	if (!btf_equal_common(t1, t2))
4056 		return false;
4057 
4058 	vlen = btf_vlen(t1);
4059 	m1 = btf_members(t1);
4060 	m2 = btf_members(t2);
4061 	for (i = 0; i < vlen; i++) {
4062 		if (m1->name_off != m2->name_off || m1->offset != m2->offset)
4063 			return false;
4064 		m1++;
4065 		m2++;
4066 	}
4067 	return true;
4068 }
4069 
4070 /*
4071  * Calculate type signature hash of ARRAY, including referenced type IDs,
4072  * under assumption that they were already resolved to canonical type IDs and
4073  * are not going to change.
4074  */
4075 static long btf_hash_array(struct btf_type *t)
4076 {
4077 	const struct btf_array *info = btf_array(t);
4078 	long h = btf_hash_common(t);
4079 
4080 	h = hash_combine(h, info->type);
4081 	h = hash_combine(h, info->index_type);
4082 	h = hash_combine(h, info->nelems);
4083 	return h;
4084 }
4085 
4086 /*
4087  * Check exact equality of two ARRAYs, taking into account referenced
4088  * type IDs, under assumption that they were already resolved to canonical
4089  * type IDs and are not going to change.
4090  * This function is called during reference types deduplication to compare
4091  * ARRAY to potential canonical representative.
4092  */
4093 static bool btf_equal_array(struct btf_type *t1, struct btf_type *t2)
4094 {
4095 	const struct btf_array *info1, *info2;
4096 
4097 	if (!btf_equal_common(t1, t2))
4098 		return false;
4099 
4100 	info1 = btf_array(t1);
4101 	info2 = btf_array(t2);
4102 	return info1->type == info2->type &&
4103 	       info1->index_type == info2->index_type &&
4104 	       info1->nelems == info2->nelems;
4105 }
4106 
4107 /*
4108  * Check structural compatibility of two ARRAYs, ignoring referenced type
4109  * IDs. This check is performed during type graph equivalence check and
4110  * referenced types equivalence is checked separately.
4111  */
4112 static bool btf_compat_array(struct btf_type *t1, struct btf_type *t2)
4113 {
4114 	if (!btf_equal_common(t1, t2))
4115 		return false;
4116 
4117 	return btf_array(t1)->nelems == btf_array(t2)->nelems;
4118 }
4119 
4120 /*
4121  * Calculate type signature hash of FUNC_PROTO, including referenced type IDs,
4122  * under assumption that they were already resolved to canonical type IDs and
4123  * are not going to change.
4124  */
4125 static long btf_hash_fnproto(struct btf_type *t)
4126 {
4127 	const struct btf_param *member = btf_params(t);
4128 	__u16 vlen = btf_vlen(t);
4129 	long h = btf_hash_common(t);
4130 	int i;
4131 
4132 	for (i = 0; i < vlen; i++) {
4133 		h = hash_combine(h, member->name_off);
4134 		h = hash_combine(h, member->type);
4135 		member++;
4136 	}
4137 	return h;
4138 }
4139 
4140 /*
4141  * Check exact equality of two FUNC_PROTOs, taking into account referenced
4142  * type IDs, under assumption that they were already resolved to canonical
4143  * type IDs and are not going to change.
4144  * This function is called during reference types deduplication to compare
4145  * FUNC_PROTO to potential canonical representative.
4146  */
4147 static bool btf_equal_fnproto(struct btf_type *t1, struct btf_type *t2)
4148 {
4149 	const struct btf_param *m1, *m2;
4150 	__u16 vlen;
4151 	int i;
4152 
4153 	if (!btf_equal_common(t1, t2))
4154 		return false;
4155 
4156 	vlen = btf_vlen(t1);
4157 	m1 = btf_params(t1);
4158 	m2 = btf_params(t2);
4159 	for (i = 0; i < vlen; i++) {
4160 		if (m1->name_off != m2->name_off || m1->type != m2->type)
4161 			return false;
4162 		m1++;
4163 		m2++;
4164 	}
4165 	return true;
4166 }
4167 
4168 /*
4169  * Check structural compatibility of two FUNC_PROTOs, ignoring referenced type
4170  * IDs. This check is performed during type graph equivalence check and
4171  * referenced types equivalence is checked separately.
4172  */
4173 static bool btf_compat_fnproto(struct btf_type *t1, struct btf_type *t2)
4174 {
4175 	const struct btf_param *m1, *m2;
4176 	__u16 vlen;
4177 	int i;
4178 
4179 	/* skip return type ID */
4180 	if (t1->name_off != t2->name_off || t1->info != t2->info)
4181 		return false;
4182 
4183 	vlen = btf_vlen(t1);
4184 	m1 = btf_params(t1);
4185 	m2 = btf_params(t2);
4186 	for (i = 0; i < vlen; i++) {
4187 		if (m1->name_off != m2->name_off)
4188 			return false;
4189 		m1++;
4190 		m2++;
4191 	}
4192 	return true;
4193 }
4194 
4195 /* Prepare split BTF for deduplication by calculating hashes of base BTF's
4196  * types and initializing the rest of the state (canonical type mapping) for
4197  * the fixed base BTF part.
4198  */
4199 static int btf_dedup_prep(struct btf_dedup *d)
4200 {
4201 	struct btf_type *t;
4202 	int type_id;
4203 	long h;
4204 
4205 	if (!d->btf->base_btf)
4206 		return 0;
4207 
4208 	for (type_id = 1; type_id < d->btf->start_id; type_id++) {
4209 		t = btf_type_by_id(d->btf, type_id);
4210 
4211 		/* all base BTF types are self-canonical by definition */
4212 		d->map[type_id] = type_id;
4213 
4214 		switch (btf_kind(t)) {
4215 		case BTF_KIND_VAR:
4216 		case BTF_KIND_DATASEC:
4217 			/* VAR and DATASEC are never hash/deduplicated */
4218 			continue;
4219 		case BTF_KIND_CONST:
4220 		case BTF_KIND_VOLATILE:
4221 		case BTF_KIND_RESTRICT:
4222 		case BTF_KIND_PTR:
4223 		case BTF_KIND_FWD:
4224 		case BTF_KIND_TYPEDEF:
4225 		case BTF_KIND_FUNC:
4226 		case BTF_KIND_FLOAT:
4227 		case BTF_KIND_TYPE_TAG:
4228 			h = btf_hash_common(t);
4229 			break;
4230 		case BTF_KIND_INT:
4231 		case BTF_KIND_DECL_TAG:
4232 			h = btf_hash_int_decl_tag(t);
4233 			break;
4234 		case BTF_KIND_ENUM:
4235 		case BTF_KIND_ENUM64:
4236 			h = btf_hash_enum(t);
4237 			break;
4238 		case BTF_KIND_STRUCT:
4239 		case BTF_KIND_UNION:
4240 			h = btf_hash_struct(t);
4241 			break;
4242 		case BTF_KIND_ARRAY:
4243 			h = btf_hash_array(t);
4244 			break;
4245 		case BTF_KIND_FUNC_PROTO:
4246 			h = btf_hash_fnproto(t);
4247 			break;
4248 		default:
4249 			pr_debug("unknown kind %d for type [%d]\n", btf_kind(t), type_id);
4250 			return -EINVAL;
4251 		}
4252 		if (btf_dedup_table_add(d, h, type_id))
4253 			return -ENOMEM;
4254 	}
4255 
4256 	return 0;
4257 }
4258 
4259 /*
4260  * Deduplicate primitive types, that can't reference other types, by calculating
4261  * their type signature hash and comparing them with any possible canonical
4262  * candidate. If no canonical candidate matches, type itself is marked as
4263  * canonical and is added into `btf_dedup->dedup_table` as another candidate.
4264  */
4265 static int btf_dedup_prim_type(struct btf_dedup *d, __u32 type_id)
4266 {
4267 	struct btf_type *t = btf_type_by_id(d->btf, type_id);
4268 	struct hashmap_entry *hash_entry;
4269 	struct btf_type *cand;
4270 	/* if we don't find equivalent type, then we are canonical */
4271 	__u32 new_id = type_id;
4272 	__u32 cand_id;
4273 	long h;
4274 
4275 	switch (btf_kind(t)) {
4276 	case BTF_KIND_CONST:
4277 	case BTF_KIND_VOLATILE:
4278 	case BTF_KIND_RESTRICT:
4279 	case BTF_KIND_PTR:
4280 	case BTF_KIND_TYPEDEF:
4281 	case BTF_KIND_ARRAY:
4282 	case BTF_KIND_STRUCT:
4283 	case BTF_KIND_UNION:
4284 	case BTF_KIND_FUNC:
4285 	case BTF_KIND_FUNC_PROTO:
4286 	case BTF_KIND_VAR:
4287 	case BTF_KIND_DATASEC:
4288 	case BTF_KIND_DECL_TAG:
4289 	case BTF_KIND_TYPE_TAG:
4290 		return 0;
4291 
4292 	case BTF_KIND_INT:
4293 		h = btf_hash_int_decl_tag(t);
4294 		for_each_dedup_cand(d, hash_entry, h) {
4295 			cand_id = hash_entry->value;
4296 			cand = btf_type_by_id(d->btf, cand_id);
4297 			if (btf_equal_int_tag(t, cand)) {
4298 				new_id = cand_id;
4299 				break;
4300 			}
4301 		}
4302 		break;
4303 
4304 	case BTF_KIND_ENUM:
4305 	case BTF_KIND_ENUM64:
4306 		h = btf_hash_enum(t);
4307 		for_each_dedup_cand(d, hash_entry, h) {
4308 			cand_id = hash_entry->value;
4309 			cand = btf_type_by_id(d->btf, cand_id);
4310 			if (btf_equal_enum(t, cand)) {
4311 				new_id = cand_id;
4312 				break;
4313 			}
4314 			if (btf_compat_enum(t, cand)) {
4315 				if (btf_is_enum_fwd(t)) {
4316 					/* resolve fwd to full enum */
4317 					new_id = cand_id;
4318 					break;
4319 				}
4320 				/* resolve canonical enum fwd to full enum */
4321 				d->map[cand_id] = type_id;
4322 			}
4323 		}
4324 		break;
4325 
4326 	case BTF_KIND_FWD:
4327 	case BTF_KIND_FLOAT:
4328 		h = btf_hash_common(t);
4329 		for_each_dedup_cand(d, hash_entry, h) {
4330 			cand_id = hash_entry->value;
4331 			cand = btf_type_by_id(d->btf, cand_id);
4332 			if (btf_equal_common(t, cand)) {
4333 				new_id = cand_id;
4334 				break;
4335 			}
4336 		}
4337 		break;
4338 
4339 	default:
4340 		return -EINVAL;
4341 	}
4342 
4343 	d->map[type_id] = new_id;
4344 	if (type_id == new_id && btf_dedup_table_add(d, h, type_id))
4345 		return -ENOMEM;
4346 
4347 	return 0;
4348 }
4349 
4350 static int btf_dedup_prim_types(struct btf_dedup *d)
4351 {
4352 	int i, err;
4353 
4354 	for (i = 0; i < d->btf->nr_types; i++) {
4355 		err = btf_dedup_prim_type(d, d->btf->start_id + i);
4356 		if (err)
4357 			return err;
4358 	}
4359 	return 0;
4360 }
4361 
4362 /*
4363  * Check whether type is already mapped into canonical one (could be to itself).
4364  */
4365 static inline bool is_type_mapped(struct btf_dedup *d, uint32_t type_id)
4366 {
4367 	return d->map[type_id] <= BTF_MAX_NR_TYPES;
4368 }
4369 
4370 /*
4371  * Resolve type ID into its canonical type ID, if any; otherwise return original
4372  * type ID. If type is FWD and is resolved into STRUCT/UNION already, follow
4373  * STRUCT/UNION link and resolve it into canonical type ID as well.
4374  */
4375 static inline __u32 resolve_type_id(struct btf_dedup *d, __u32 type_id)
4376 {
4377 	while (is_type_mapped(d, type_id) && d->map[type_id] != type_id)
4378 		type_id = d->map[type_id];
4379 	return type_id;
4380 }
4381 
4382 /*
4383  * Resolve FWD to underlying STRUCT/UNION, if any; otherwise return original
4384  * type ID.
4385  */
4386 static uint32_t resolve_fwd_id(struct btf_dedup *d, uint32_t type_id)
4387 {
4388 	__u32 orig_type_id = type_id;
4389 
4390 	if (!btf_is_fwd(btf__type_by_id(d->btf, type_id)))
4391 		return type_id;
4392 
4393 	while (is_type_mapped(d, type_id) && d->map[type_id] != type_id)
4394 		type_id = d->map[type_id];
4395 
4396 	if (!btf_is_fwd(btf__type_by_id(d->btf, type_id)))
4397 		return type_id;
4398 
4399 	return orig_type_id;
4400 }
4401 
4402 
4403 static inline __u16 btf_fwd_kind(struct btf_type *t)
4404 {
4405 	return btf_kflag(t) ? BTF_KIND_UNION : BTF_KIND_STRUCT;
4406 }
4407 
4408 static bool btf_dedup_identical_types(struct btf_dedup *d, __u32 id1, __u32 id2, int depth)
4409 {
4410 	struct btf_type *t1, *t2;
4411 	int k1, k2;
4412 recur:
4413 	if (depth <= 0)
4414 		return false;
4415 
4416 	t1 = btf_type_by_id(d->btf, id1);
4417 	t2 = btf_type_by_id(d->btf, id2);
4418 
4419 	k1 = btf_kind(t1);
4420 	k2 = btf_kind(t2);
4421 	if (k1 != k2)
4422 		return false;
4423 
4424 	switch (k1) {
4425 	case BTF_KIND_UNKN: /* VOID */
4426 		return true;
4427 	case BTF_KIND_INT:
4428 		return btf_equal_int_tag(t1, t2);
4429 	case BTF_KIND_ENUM:
4430 	case BTF_KIND_ENUM64:
4431 		return btf_compat_enum(t1, t2);
4432 	case BTF_KIND_FWD:
4433 	case BTF_KIND_FLOAT:
4434 		return btf_equal_common(t1, t2);
4435 	case BTF_KIND_CONST:
4436 	case BTF_KIND_VOLATILE:
4437 	case BTF_KIND_RESTRICT:
4438 	case BTF_KIND_PTR:
4439 	case BTF_KIND_TYPEDEF:
4440 	case BTF_KIND_FUNC:
4441 	case BTF_KIND_TYPE_TAG:
4442 		if (t1->info != t2->info || t1->name_off != t2->name_off)
4443 			return false;
4444 		id1 = t1->type;
4445 		id2 = t2->type;
4446 		goto recur;
4447 	case BTF_KIND_ARRAY: {
4448 		struct btf_array *a1, *a2;
4449 
4450 		if (!btf_compat_array(t1, t2))
4451 			return false;
4452 
4453 		a1 = btf_array(t1);
4454 		a2 = btf_array(t1);
4455 
4456 		if (a1->index_type != a2->index_type &&
4457 		    !btf_dedup_identical_types(d, a1->index_type, a2->index_type, depth - 1))
4458 			return false;
4459 
4460 		if (a1->type != a2->type &&
4461 		    !btf_dedup_identical_types(d, a1->type, a2->type, depth - 1))
4462 			return false;
4463 
4464 		return true;
4465 	}
4466 	case BTF_KIND_STRUCT:
4467 	case BTF_KIND_UNION: {
4468 		const struct btf_member *m1, *m2;
4469 		int i, n;
4470 
4471 		if (!btf_shallow_equal_struct(t1, t2))
4472 			return false;
4473 
4474 		m1 = btf_members(t1);
4475 		m2 = btf_members(t2);
4476 		for (i = 0, n = btf_vlen(t1); i < n; i++, m1++, m2++) {
4477 			if (m1->type == m2->type)
4478 				continue;
4479 			if (!btf_dedup_identical_types(d, m1->type, m2->type, depth - 1))
4480 				return false;
4481 		}
4482 		return true;
4483 	}
4484 	case BTF_KIND_FUNC_PROTO: {
4485 		const struct btf_param *p1, *p2;
4486 		int i, n;
4487 
4488 		if (!btf_compat_fnproto(t1, t2))
4489 			return false;
4490 
4491 		if (t1->type != t2->type &&
4492 		    !btf_dedup_identical_types(d, t1->type, t2->type, depth - 1))
4493 			return false;
4494 
4495 		p1 = btf_params(t1);
4496 		p2 = btf_params(t2);
4497 		for (i = 0, n = btf_vlen(t1); i < n; i++, p1++, p2++) {
4498 			if (p1->type == p2->type)
4499 				continue;
4500 			if (!btf_dedup_identical_types(d, p1->type, p2->type, depth - 1))
4501 				return false;
4502 		}
4503 		return true;
4504 	}
4505 	default:
4506 		return false;
4507 	}
4508 }
4509 
4510 
4511 /*
4512  * Check equivalence of BTF type graph formed by candidate struct/union (we'll
4513  * call it "candidate graph" in this description for brevity) to a type graph
4514  * formed by (potential) canonical struct/union ("canonical graph" for brevity
4515  * here, though keep in mind that not all types in canonical graph are
4516  * necessarily canonical representatives themselves, some of them might be
4517  * duplicates or its uniqueness might not have been established yet).
4518  * Returns:
4519  *  - >0, if type graphs are equivalent;
4520  *  -  0, if not equivalent;
4521  *  - <0, on error.
4522  *
4523  * Algorithm performs side-by-side DFS traversal of both type graphs and checks
4524  * equivalence of BTF types at each step. If at any point BTF types in candidate
4525  * and canonical graphs are not compatible structurally, whole graphs are
4526  * incompatible. If types are structurally equivalent (i.e., all information
4527  * except referenced type IDs is exactly the same), a mapping from `canon_id` to
4528  * a `cand_id` is recoded in hypothetical mapping (`btf_dedup->hypot_map`).
4529  * If a type references other types, then those referenced types are checked
4530  * for equivalence recursively.
4531  *
4532  * During DFS traversal, if we find that for current `canon_id` type we
4533  * already have some mapping in hypothetical map, we check for two possible
4534  * situations:
4535  *   - `canon_id` is mapped to exactly the same type as `cand_id`. This will
4536  *     happen when type graphs have cycles. In this case we assume those two
4537  *     types are equivalent.
4538  *   - `canon_id` is mapped to different type. This is contradiction in our
4539  *     hypothetical mapping, because same graph in canonical graph corresponds
4540  *     to two different types in candidate graph, which for equivalent type
4541  *     graphs shouldn't happen. This condition terminates equivalence check
4542  *     with negative result.
4543  *
4544  * If type graphs traversal exhausts types to check and find no contradiction,
4545  * then type graphs are equivalent.
4546  *
4547  * When checking types for equivalence, there is one special case: FWD types.
4548  * If FWD type resolution is allowed and one of the types (either from canonical
4549  * or candidate graph) is FWD and other is STRUCT/UNION (depending on FWD's kind
4550  * flag) and their names match, hypothetical mapping is updated to point from
4551  * FWD to STRUCT/UNION. If graphs will be determined as equivalent successfully,
4552  * this mapping will be used to record FWD -> STRUCT/UNION mapping permanently.
4553  *
4554  * Technically, this could lead to incorrect FWD to STRUCT/UNION resolution,
4555  * if there are two exactly named (or anonymous) structs/unions that are
4556  * compatible structurally, one of which has FWD field, while other is concrete
4557  * STRUCT/UNION, but according to C sources they are different structs/unions
4558  * that are referencing different types with the same name. This is extremely
4559  * unlikely to happen, but btf_dedup API allows to disable FWD resolution if
4560  * this logic is causing problems.
4561  *
4562  * Doing FWD resolution means that both candidate and/or canonical graphs can
4563  * consists of portions of the graph that come from multiple compilation units.
4564  * This is due to the fact that types within single compilation unit are always
4565  * deduplicated and FWDs are already resolved, if referenced struct/union
4566  * definition is available. So, if we had unresolved FWD and found corresponding
4567  * STRUCT/UNION, they will be from different compilation units. This
4568  * consequently means that when we "link" FWD to corresponding STRUCT/UNION,
4569  * type graph will likely have at least two different BTF types that describe
4570  * same type (e.g., most probably there will be two different BTF types for the
4571  * same 'int' primitive type) and could even have "overlapping" parts of type
4572  * graph that describe same subset of types.
4573  *
4574  * This in turn means that our assumption that each type in canonical graph
4575  * must correspond to exactly one type in candidate graph might not hold
4576  * anymore and will make it harder to detect contradictions using hypothetical
4577  * map. To handle this problem, we allow to follow FWD -> STRUCT/UNION
4578  * resolution only in canonical graph. FWDs in candidate graphs are never
4579  * resolved. To see why it's OK, let's check all possible situations w.r.t. FWDs
4580  * that can occur:
4581  *   - Both types in canonical and candidate graphs are FWDs. If they are
4582  *     structurally equivalent, then they can either be both resolved to the
4583  *     same STRUCT/UNION or not resolved at all. In both cases they are
4584  *     equivalent and there is no need to resolve FWD on candidate side.
4585  *   - Both types in canonical and candidate graphs are concrete STRUCT/UNION,
4586  *     so nothing to resolve as well, algorithm will check equivalence anyway.
4587  *   - Type in canonical graph is FWD, while type in candidate is concrete
4588  *     STRUCT/UNION. In this case candidate graph comes from single compilation
4589  *     unit, so there is exactly one BTF type for each unique C type. After
4590  *     resolving FWD into STRUCT/UNION, there might be more than one BTF type
4591  *     in canonical graph mapping to single BTF type in candidate graph, but
4592  *     because hypothetical mapping maps from canonical to candidate types, it's
4593  *     alright, and we still maintain the property of having single `canon_id`
4594  *     mapping to single `cand_id` (there could be two different `canon_id`
4595  *     mapped to the same `cand_id`, but it's not contradictory).
4596  *   - Type in canonical graph is concrete STRUCT/UNION, while type in candidate
4597  *     graph is FWD. In this case we are just going to check compatibility of
4598  *     STRUCT/UNION and corresponding FWD, and if they are compatible, we'll
4599  *     assume that whatever STRUCT/UNION FWD resolves to must be equivalent to
4600  *     a concrete STRUCT/UNION from canonical graph. If the rest of type graphs
4601  *     turn out equivalent, we'll re-resolve FWD to concrete STRUCT/UNION from
4602  *     canonical graph.
4603  */
4604 static int btf_dedup_is_equiv(struct btf_dedup *d, __u32 cand_id,
4605 			      __u32 canon_id)
4606 {
4607 	struct btf_type *cand_type;
4608 	struct btf_type *canon_type;
4609 	__u32 hypot_type_id;
4610 	__u16 cand_kind;
4611 	__u16 canon_kind;
4612 	int i, eq;
4613 
4614 	/* if both resolve to the same canonical, they must be equivalent */
4615 	if (resolve_type_id(d, cand_id) == resolve_type_id(d, canon_id))
4616 		return 1;
4617 
4618 	canon_id = resolve_fwd_id(d, canon_id);
4619 
4620 	hypot_type_id = d->hypot_map[canon_id];
4621 	if (hypot_type_id <= BTF_MAX_NR_TYPES) {
4622 		if (hypot_type_id == cand_id)
4623 			return 1;
4624 		/* In some cases compiler will generate different DWARF types
4625 		 * for *identical* array type definitions and use them for
4626 		 * different fields within the *same* struct. This breaks type
4627 		 * equivalence check, which makes an assumption that candidate
4628 		 * types sub-graph has a consistent and deduped-by-compiler
4629 		 * types within a single CU. And similar situation can happen
4630 		 * with struct/union sometimes, and event with pointers.
4631 		 * So accommodate cases like this doing a structural
4632 		 * comparison recursively, but avoiding being stuck in endless
4633 		 * loops by limiting the depth up to which we check.
4634 		 */
4635 		if (btf_dedup_identical_types(d, hypot_type_id, cand_id, 16))
4636 			return 1;
4637 		return 0;
4638 	}
4639 
4640 	if (btf_dedup_hypot_map_add(d, canon_id, cand_id))
4641 		return -ENOMEM;
4642 
4643 	cand_type = btf_type_by_id(d->btf, cand_id);
4644 	canon_type = btf_type_by_id(d->btf, canon_id);
4645 	cand_kind = btf_kind(cand_type);
4646 	canon_kind = btf_kind(canon_type);
4647 
4648 	if (cand_type->name_off != canon_type->name_off)
4649 		return 0;
4650 
4651 	/* FWD <--> STRUCT/UNION equivalence check, if enabled */
4652 	if ((cand_kind == BTF_KIND_FWD || canon_kind == BTF_KIND_FWD)
4653 	    && cand_kind != canon_kind) {
4654 		__u16 real_kind;
4655 		__u16 fwd_kind;
4656 
4657 		if (cand_kind == BTF_KIND_FWD) {
4658 			real_kind = canon_kind;
4659 			fwd_kind = btf_fwd_kind(cand_type);
4660 		} else {
4661 			real_kind = cand_kind;
4662 			fwd_kind = btf_fwd_kind(canon_type);
4663 			/* we'd need to resolve base FWD to STRUCT/UNION */
4664 			if (fwd_kind == real_kind && canon_id < d->btf->start_id)
4665 				d->hypot_adjust_canon = true;
4666 		}
4667 		return fwd_kind == real_kind;
4668 	}
4669 
4670 	if (cand_kind != canon_kind)
4671 		return 0;
4672 
4673 	switch (cand_kind) {
4674 	case BTF_KIND_INT:
4675 		return btf_equal_int_tag(cand_type, canon_type);
4676 
4677 	case BTF_KIND_ENUM:
4678 	case BTF_KIND_ENUM64:
4679 		return btf_compat_enum(cand_type, canon_type);
4680 
4681 	case BTF_KIND_FWD:
4682 	case BTF_KIND_FLOAT:
4683 		return btf_equal_common(cand_type, canon_type);
4684 
4685 	case BTF_KIND_CONST:
4686 	case BTF_KIND_VOLATILE:
4687 	case BTF_KIND_RESTRICT:
4688 	case BTF_KIND_PTR:
4689 	case BTF_KIND_TYPEDEF:
4690 	case BTF_KIND_FUNC:
4691 	case BTF_KIND_TYPE_TAG:
4692 		if (cand_type->info != canon_type->info)
4693 			return 0;
4694 		return btf_dedup_is_equiv(d, cand_type->type, canon_type->type);
4695 
4696 	case BTF_KIND_ARRAY: {
4697 		const struct btf_array *cand_arr, *canon_arr;
4698 
4699 		if (!btf_compat_array(cand_type, canon_type))
4700 			return 0;
4701 		cand_arr = btf_array(cand_type);
4702 		canon_arr = btf_array(canon_type);
4703 		eq = btf_dedup_is_equiv(d, cand_arr->index_type, canon_arr->index_type);
4704 		if (eq <= 0)
4705 			return eq;
4706 		return btf_dedup_is_equiv(d, cand_arr->type, canon_arr->type);
4707 	}
4708 
4709 	case BTF_KIND_STRUCT:
4710 	case BTF_KIND_UNION: {
4711 		const struct btf_member *cand_m, *canon_m;
4712 		__u16 vlen;
4713 
4714 		if (!btf_shallow_equal_struct(cand_type, canon_type))
4715 			return 0;
4716 		vlen = btf_vlen(cand_type);
4717 		cand_m = btf_members(cand_type);
4718 		canon_m = btf_members(canon_type);
4719 		for (i = 0; i < vlen; i++) {
4720 			eq = btf_dedup_is_equiv(d, cand_m->type, canon_m->type);
4721 			if (eq <= 0)
4722 				return eq;
4723 			cand_m++;
4724 			canon_m++;
4725 		}
4726 
4727 		return 1;
4728 	}
4729 
4730 	case BTF_KIND_FUNC_PROTO: {
4731 		const struct btf_param *cand_p, *canon_p;
4732 		__u16 vlen;
4733 
4734 		if (!btf_compat_fnproto(cand_type, canon_type))
4735 			return 0;
4736 		eq = btf_dedup_is_equiv(d, cand_type->type, canon_type->type);
4737 		if (eq <= 0)
4738 			return eq;
4739 		vlen = btf_vlen(cand_type);
4740 		cand_p = btf_params(cand_type);
4741 		canon_p = btf_params(canon_type);
4742 		for (i = 0; i < vlen; i++) {
4743 			eq = btf_dedup_is_equiv(d, cand_p->type, canon_p->type);
4744 			if (eq <= 0)
4745 				return eq;
4746 			cand_p++;
4747 			canon_p++;
4748 		}
4749 		return 1;
4750 	}
4751 
4752 	default:
4753 		return -EINVAL;
4754 	}
4755 	return 0;
4756 }
4757 
4758 /*
4759  * Use hypothetical mapping, produced by successful type graph equivalence
4760  * check, to augment existing struct/union canonical mapping, where possible.
4761  *
4762  * If BTF_KIND_FWD resolution is allowed, this mapping is also used to record
4763  * FWD -> STRUCT/UNION correspondence as well. FWD resolution is bidirectional:
4764  * it doesn't matter if FWD type was part of canonical graph or candidate one,
4765  * we are recording the mapping anyway. As opposed to carefulness required
4766  * for struct/union correspondence mapping (described below), for FWD resolution
4767  * it's not important, as by the time that FWD type (reference type) will be
4768  * deduplicated all structs/unions will be deduped already anyway.
4769  *
4770  * Recording STRUCT/UNION mapping is purely a performance optimization and is
4771  * not required for correctness. It needs to be done carefully to ensure that
4772  * struct/union from candidate's type graph is not mapped into corresponding
4773  * struct/union from canonical type graph that itself hasn't been resolved into
4774  * canonical representative. The only guarantee we have is that canonical
4775  * struct/union was determined as canonical and that won't change. But any
4776  * types referenced through that struct/union fields could have been not yet
4777  * resolved, so in case like that it's too early to establish any kind of
4778  * correspondence between structs/unions.
4779  *
4780  * No canonical correspondence is derived for primitive types (they are already
4781  * deduplicated completely already anyway) or reference types (they rely on
4782  * stability of struct/union canonical relationship for equivalence checks).
4783  */
4784 static void btf_dedup_merge_hypot_map(struct btf_dedup *d)
4785 {
4786 	__u32 canon_type_id, targ_type_id;
4787 	__u16 t_kind, c_kind;
4788 	__u32 t_id, c_id;
4789 	int i;
4790 
4791 	for (i = 0; i < d->hypot_cnt; i++) {
4792 		canon_type_id = d->hypot_list[i];
4793 		targ_type_id = d->hypot_map[canon_type_id];
4794 		t_id = resolve_type_id(d, targ_type_id);
4795 		c_id = resolve_type_id(d, canon_type_id);
4796 		t_kind = btf_kind(btf__type_by_id(d->btf, t_id));
4797 		c_kind = btf_kind(btf__type_by_id(d->btf, c_id));
4798 		/*
4799 		 * Resolve FWD into STRUCT/UNION.
4800 		 * It's ok to resolve FWD into STRUCT/UNION that's not yet
4801 		 * mapped to canonical representative (as opposed to
4802 		 * STRUCT/UNION <--> STRUCT/UNION mapping logic below), because
4803 		 * eventually that struct is going to be mapped and all resolved
4804 		 * FWDs will automatically resolve to correct canonical
4805 		 * representative. This will happen before ref type deduping,
4806 		 * which critically depends on stability of these mapping. This
4807 		 * stability is not a requirement for STRUCT/UNION equivalence
4808 		 * checks, though.
4809 		 */
4810 
4811 		/* if it's the split BTF case, we still need to point base FWD
4812 		 * to STRUCT/UNION in a split BTF, because FWDs from split BTF
4813 		 * will be resolved against base FWD. If we don't point base
4814 		 * canonical FWD to the resolved STRUCT/UNION, then all the
4815 		 * FWDs in split BTF won't be correctly resolved to a proper
4816 		 * STRUCT/UNION.
4817 		 */
4818 		if (t_kind != BTF_KIND_FWD && c_kind == BTF_KIND_FWD)
4819 			d->map[c_id] = t_id;
4820 
4821 		/* if graph equivalence determined that we'd need to adjust
4822 		 * base canonical types, then we need to only point base FWDs
4823 		 * to STRUCTs/UNIONs and do no more modifications. For all
4824 		 * other purposes the type graphs were not equivalent.
4825 		 */
4826 		if (d->hypot_adjust_canon)
4827 			continue;
4828 
4829 		if (t_kind == BTF_KIND_FWD && c_kind != BTF_KIND_FWD)
4830 			d->map[t_id] = c_id;
4831 
4832 		if ((t_kind == BTF_KIND_STRUCT || t_kind == BTF_KIND_UNION) &&
4833 		    c_kind != BTF_KIND_FWD &&
4834 		    is_type_mapped(d, c_id) &&
4835 		    !is_type_mapped(d, t_id)) {
4836 			/*
4837 			 * as a perf optimization, we can map struct/union
4838 			 * that's part of type graph we just verified for
4839 			 * equivalence. We can do that for struct/union that has
4840 			 * canonical representative only, though.
4841 			 */
4842 			d->map[t_id] = c_id;
4843 		}
4844 	}
4845 }
4846 
4847 /*
4848  * Deduplicate struct/union types.
4849  *
4850  * For each struct/union type its type signature hash is calculated, taking
4851  * into account type's name, size, number, order and names of fields, but
4852  * ignoring type ID's referenced from fields, because they might not be deduped
4853  * completely until after reference types deduplication phase. This type hash
4854  * is used to iterate over all potential canonical types, sharing same hash.
4855  * For each canonical candidate we check whether type graphs that they form
4856  * (through referenced types in fields and so on) are equivalent using algorithm
4857  * implemented in `btf_dedup_is_equiv`. If such equivalence is found and
4858  * BTF_KIND_FWD resolution is allowed, then hypothetical mapping
4859  * (btf_dedup->hypot_map) produced by aforementioned type graph equivalence
4860  * algorithm is used to record FWD -> STRUCT/UNION mapping. It's also used to
4861  * potentially map other structs/unions to their canonical representatives,
4862  * if such relationship hasn't yet been established. This speeds up algorithm
4863  * by eliminating some of the duplicate work.
4864  *
4865  * If no matching canonical representative was found, struct/union is marked
4866  * as canonical for itself and is added into btf_dedup->dedup_table hash map
4867  * for further look ups.
4868  */
4869 static int btf_dedup_struct_type(struct btf_dedup *d, __u32 type_id)
4870 {
4871 	struct btf_type *cand_type, *t;
4872 	struct hashmap_entry *hash_entry;
4873 	/* if we don't find equivalent type, then we are canonical */
4874 	__u32 new_id = type_id;
4875 	__u16 kind;
4876 	long h;
4877 
4878 	/* already deduped or is in process of deduping (loop detected) */
4879 	if (d->map[type_id] <= BTF_MAX_NR_TYPES)
4880 		return 0;
4881 
4882 	t = btf_type_by_id(d->btf, type_id);
4883 	kind = btf_kind(t);
4884 
4885 	if (kind != BTF_KIND_STRUCT && kind != BTF_KIND_UNION)
4886 		return 0;
4887 
4888 	h = btf_hash_struct(t);
4889 	for_each_dedup_cand(d, hash_entry, h) {
4890 		__u32 cand_id = hash_entry->value;
4891 		int eq;
4892 
4893 		/*
4894 		 * Even though btf_dedup_is_equiv() checks for
4895 		 * btf_shallow_equal_struct() internally when checking two
4896 		 * structs (unions) for equivalence, we need to guard here
4897 		 * from picking matching FWD type as a dedup candidate.
4898 		 * This can happen due to hash collision. In such case just
4899 		 * relying on btf_dedup_is_equiv() would lead to potentially
4900 		 * creating a loop (FWD -> STRUCT and STRUCT -> FWD), because
4901 		 * FWD and compatible STRUCT/UNION are considered equivalent.
4902 		 */
4903 		cand_type = btf_type_by_id(d->btf, cand_id);
4904 		if (!btf_shallow_equal_struct(t, cand_type))
4905 			continue;
4906 
4907 		btf_dedup_clear_hypot_map(d);
4908 		eq = btf_dedup_is_equiv(d, type_id, cand_id);
4909 		if (eq < 0)
4910 			return eq;
4911 		if (!eq)
4912 			continue;
4913 		btf_dedup_merge_hypot_map(d);
4914 		if (d->hypot_adjust_canon) /* not really equivalent */
4915 			continue;
4916 		new_id = cand_id;
4917 		break;
4918 	}
4919 
4920 	d->map[type_id] = new_id;
4921 	if (type_id == new_id && btf_dedup_table_add(d, h, type_id))
4922 		return -ENOMEM;
4923 
4924 	return 0;
4925 }
4926 
4927 static int btf_dedup_struct_types(struct btf_dedup *d)
4928 {
4929 	int i, err;
4930 
4931 	for (i = 0; i < d->btf->nr_types; i++) {
4932 		err = btf_dedup_struct_type(d, d->btf->start_id + i);
4933 		if (err)
4934 			return err;
4935 	}
4936 	return 0;
4937 }
4938 
4939 /*
4940  * Deduplicate reference type.
4941  *
4942  * Once all primitive and struct/union types got deduplicated, we can easily
4943  * deduplicate all other (reference) BTF types. This is done in two steps:
4944  *
4945  * 1. Resolve all referenced type IDs into their canonical type IDs. This
4946  * resolution can be done either immediately for primitive or struct/union types
4947  * (because they were deduped in previous two phases) or recursively for
4948  * reference types. Recursion will always terminate at either primitive or
4949  * struct/union type, at which point we can "unwind" chain of reference types
4950  * one by one. There is no danger of encountering cycles because in C type
4951  * system the only way to form type cycle is through struct/union, so any chain
4952  * of reference types, even those taking part in a type cycle, will inevitably
4953  * reach struct/union at some point.
4954  *
4955  * 2. Once all referenced type IDs are resolved into canonical ones, BTF type
4956  * becomes "stable", in the sense that no further deduplication will cause
4957  * any changes to it. With that, it's now possible to calculate type's signature
4958  * hash (this time taking into account referenced type IDs) and loop over all
4959  * potential canonical representatives. If no match was found, current type
4960  * will become canonical representative of itself and will be added into
4961  * btf_dedup->dedup_table as another possible canonical representative.
4962  */
4963 static int btf_dedup_ref_type(struct btf_dedup *d, __u32 type_id)
4964 {
4965 	struct hashmap_entry *hash_entry;
4966 	__u32 new_id = type_id, cand_id;
4967 	struct btf_type *t, *cand;
4968 	/* if we don't find equivalent type, then we are representative type */
4969 	int ref_type_id;
4970 	long h;
4971 
4972 	if (d->map[type_id] == BTF_IN_PROGRESS_ID)
4973 		return -ELOOP;
4974 	if (d->map[type_id] <= BTF_MAX_NR_TYPES)
4975 		return resolve_type_id(d, type_id);
4976 
4977 	t = btf_type_by_id(d->btf, type_id);
4978 	d->map[type_id] = BTF_IN_PROGRESS_ID;
4979 
4980 	switch (btf_kind(t)) {
4981 	case BTF_KIND_CONST:
4982 	case BTF_KIND_VOLATILE:
4983 	case BTF_KIND_RESTRICT:
4984 	case BTF_KIND_PTR:
4985 	case BTF_KIND_TYPEDEF:
4986 	case BTF_KIND_FUNC:
4987 	case BTF_KIND_TYPE_TAG:
4988 		ref_type_id = btf_dedup_ref_type(d, t->type);
4989 		if (ref_type_id < 0)
4990 			return ref_type_id;
4991 		t->type = ref_type_id;
4992 
4993 		h = btf_hash_common(t);
4994 		for_each_dedup_cand(d, hash_entry, h) {
4995 			cand_id = hash_entry->value;
4996 			cand = btf_type_by_id(d->btf, cand_id);
4997 			if (btf_equal_common(t, cand)) {
4998 				new_id = cand_id;
4999 				break;
5000 			}
5001 		}
5002 		break;
5003 
5004 	case BTF_KIND_DECL_TAG:
5005 		ref_type_id = btf_dedup_ref_type(d, t->type);
5006 		if (ref_type_id < 0)
5007 			return ref_type_id;
5008 		t->type = ref_type_id;
5009 
5010 		h = btf_hash_int_decl_tag(t);
5011 		for_each_dedup_cand(d, hash_entry, h) {
5012 			cand_id = hash_entry->value;
5013 			cand = btf_type_by_id(d->btf, cand_id);
5014 			if (btf_equal_int_tag(t, cand)) {
5015 				new_id = cand_id;
5016 				break;
5017 			}
5018 		}
5019 		break;
5020 
5021 	case BTF_KIND_ARRAY: {
5022 		struct btf_array *info = btf_array(t);
5023 
5024 		ref_type_id = btf_dedup_ref_type(d, info->type);
5025 		if (ref_type_id < 0)
5026 			return ref_type_id;
5027 		info->type = ref_type_id;
5028 
5029 		ref_type_id = btf_dedup_ref_type(d, info->index_type);
5030 		if (ref_type_id < 0)
5031 			return ref_type_id;
5032 		info->index_type = ref_type_id;
5033 
5034 		h = btf_hash_array(t);
5035 		for_each_dedup_cand(d, hash_entry, h) {
5036 			cand_id = hash_entry->value;
5037 			cand = btf_type_by_id(d->btf, cand_id);
5038 			if (btf_equal_array(t, cand)) {
5039 				new_id = cand_id;
5040 				break;
5041 			}
5042 		}
5043 		break;
5044 	}
5045 
5046 	case BTF_KIND_FUNC_PROTO: {
5047 		struct btf_param *param;
5048 		__u16 vlen;
5049 		int i;
5050 
5051 		ref_type_id = btf_dedup_ref_type(d, t->type);
5052 		if (ref_type_id < 0)
5053 			return ref_type_id;
5054 		t->type = ref_type_id;
5055 
5056 		vlen = btf_vlen(t);
5057 		param = btf_params(t);
5058 		for (i = 0; i < vlen; i++) {
5059 			ref_type_id = btf_dedup_ref_type(d, param->type);
5060 			if (ref_type_id < 0)
5061 				return ref_type_id;
5062 			param->type = ref_type_id;
5063 			param++;
5064 		}
5065 
5066 		h = btf_hash_fnproto(t);
5067 		for_each_dedup_cand(d, hash_entry, h) {
5068 			cand_id = hash_entry->value;
5069 			cand = btf_type_by_id(d->btf, cand_id);
5070 			if (btf_equal_fnproto(t, cand)) {
5071 				new_id = cand_id;
5072 				break;
5073 			}
5074 		}
5075 		break;
5076 	}
5077 
5078 	default:
5079 		return -EINVAL;
5080 	}
5081 
5082 	d->map[type_id] = new_id;
5083 	if (type_id == new_id && btf_dedup_table_add(d, h, type_id))
5084 		return -ENOMEM;
5085 
5086 	return new_id;
5087 }
5088 
5089 static int btf_dedup_ref_types(struct btf_dedup *d)
5090 {
5091 	int i, err;
5092 
5093 	for (i = 0; i < d->btf->nr_types; i++) {
5094 		err = btf_dedup_ref_type(d, d->btf->start_id + i);
5095 		if (err < 0)
5096 			return err;
5097 	}
5098 	/* we won't need d->dedup_table anymore */
5099 	hashmap__free(d->dedup_table);
5100 	d->dedup_table = NULL;
5101 	return 0;
5102 }
5103 
5104 /*
5105  * Collect a map from type names to type ids for all canonical structs
5106  * and unions. If the same name is shared by several canonical types
5107  * use a special value 0 to indicate this fact.
5108  */
5109 static int btf_dedup_fill_unique_names_map(struct btf_dedup *d, struct hashmap *names_map)
5110 {
5111 	__u32 nr_types = btf__type_cnt(d->btf);
5112 	struct btf_type *t;
5113 	__u32 type_id;
5114 	__u16 kind;
5115 	int err;
5116 
5117 	/*
5118 	 * Iterate over base and split module ids in order to get all
5119 	 * available structs in the map.
5120 	 */
5121 	for (type_id = 1; type_id < nr_types; ++type_id) {
5122 		t = btf_type_by_id(d->btf, type_id);
5123 		kind = btf_kind(t);
5124 
5125 		if (kind != BTF_KIND_STRUCT && kind != BTF_KIND_UNION)
5126 			continue;
5127 
5128 		/* Skip non-canonical types */
5129 		if (type_id != d->map[type_id])
5130 			continue;
5131 
5132 		err = hashmap__add(names_map, t->name_off, type_id);
5133 		if (err == -EEXIST)
5134 			err = hashmap__set(names_map, t->name_off, 0, NULL, NULL);
5135 
5136 		if (err)
5137 			return err;
5138 	}
5139 
5140 	return 0;
5141 }
5142 
5143 static int btf_dedup_resolve_fwd(struct btf_dedup *d, struct hashmap *names_map, __u32 type_id)
5144 {
5145 	struct btf_type *t = btf_type_by_id(d->btf, type_id);
5146 	enum btf_fwd_kind fwd_kind = btf_kflag(t);
5147 	__u16 cand_kind, kind = btf_kind(t);
5148 	struct btf_type *cand_t;
5149 	uintptr_t cand_id;
5150 
5151 	if (kind != BTF_KIND_FWD)
5152 		return 0;
5153 
5154 	/* Skip if this FWD already has a mapping */
5155 	if (type_id != d->map[type_id])
5156 		return 0;
5157 
5158 	if (!hashmap__find(names_map, t->name_off, &cand_id))
5159 		return 0;
5160 
5161 	/* Zero is a special value indicating that name is not unique */
5162 	if (!cand_id)
5163 		return 0;
5164 
5165 	cand_t = btf_type_by_id(d->btf, cand_id);
5166 	cand_kind = btf_kind(cand_t);
5167 	if ((cand_kind == BTF_KIND_STRUCT && fwd_kind != BTF_FWD_STRUCT) ||
5168 	    (cand_kind == BTF_KIND_UNION && fwd_kind != BTF_FWD_UNION))
5169 		return 0;
5170 
5171 	d->map[type_id] = cand_id;
5172 
5173 	return 0;
5174 }
5175 
5176 /*
5177  * Resolve unambiguous forward declarations.
5178  *
5179  * The lion's share of all FWD declarations is resolved during
5180  * `btf_dedup_struct_types` phase when different type graphs are
5181  * compared against each other. However, if in some compilation unit a
5182  * FWD declaration is not a part of a type graph compared against
5183  * another type graph that declaration's canonical type would not be
5184  * changed. Example:
5185  *
5186  * CU #1:
5187  *
5188  * struct foo;
5189  * struct foo *some_global;
5190  *
5191  * CU #2:
5192  *
5193  * struct foo { int u; };
5194  * struct foo *another_global;
5195  *
5196  * After `btf_dedup_struct_types` the BTF looks as follows:
5197  *
5198  * [1] STRUCT 'foo' size=4 vlen=1 ...
5199  * [2] INT 'int' size=4 ...
5200  * [3] PTR '(anon)' type_id=1
5201  * [4] FWD 'foo' fwd_kind=struct
5202  * [5] PTR '(anon)' type_id=4
5203  *
5204  * This pass assumes that such FWD declarations should be mapped to
5205  * structs or unions with identical name in case if the name is not
5206  * ambiguous.
5207  */
5208 static int btf_dedup_resolve_fwds(struct btf_dedup *d)
5209 {
5210 	int i, err;
5211 	struct hashmap *names_map;
5212 
5213 	names_map = hashmap__new(btf_dedup_identity_hash_fn, btf_dedup_equal_fn, NULL);
5214 	if (IS_ERR(names_map))
5215 		return PTR_ERR(names_map);
5216 
5217 	err = btf_dedup_fill_unique_names_map(d, names_map);
5218 	if (err < 0)
5219 		goto exit;
5220 
5221 	for (i = 0; i < d->btf->nr_types; i++) {
5222 		err = btf_dedup_resolve_fwd(d, names_map, d->btf->start_id + i);
5223 		if (err < 0)
5224 			break;
5225 	}
5226 
5227 exit:
5228 	hashmap__free(names_map);
5229 	return err;
5230 }
5231 
5232 /*
5233  * Compact types.
5234  *
5235  * After we established for each type its corresponding canonical representative
5236  * type, we now can eliminate types that are not canonical and leave only
5237  * canonical ones layed out sequentially in memory by copying them over
5238  * duplicates. During compaction btf_dedup->hypot_map array is reused to store
5239  * a map from original type ID to a new compacted type ID, which will be used
5240  * during next phase to "fix up" type IDs, referenced from struct/union and
5241  * reference types.
5242  */
5243 static int btf_dedup_compact_types(struct btf_dedup *d)
5244 {
5245 	__u32 *new_offs;
5246 	__u32 next_type_id = d->btf->start_id;
5247 	const struct btf_type *t;
5248 	void *p;
5249 	int i, id, len;
5250 
5251 	/* we are going to reuse hypot_map to store compaction remapping */
5252 	d->hypot_map[0] = 0;
5253 	/* base BTF types are not renumbered */
5254 	for (id = 1; id < d->btf->start_id; id++)
5255 		d->hypot_map[id] = id;
5256 	for (i = 0, id = d->btf->start_id; i < d->btf->nr_types; i++, id++)
5257 		d->hypot_map[id] = BTF_UNPROCESSED_ID;
5258 
5259 	p = d->btf->types_data;
5260 
5261 	for (i = 0, id = d->btf->start_id; i < d->btf->nr_types; i++, id++) {
5262 		if (d->map[id] != id)
5263 			continue;
5264 
5265 		t = btf__type_by_id(d->btf, id);
5266 		len = btf_type_size(t);
5267 		if (len < 0)
5268 			return len;
5269 
5270 		memmove(p, t, len);
5271 		d->hypot_map[id] = next_type_id;
5272 		d->btf->type_offs[next_type_id - d->btf->start_id] = p - d->btf->types_data;
5273 		p += len;
5274 		next_type_id++;
5275 	}
5276 
5277 	/* shrink struct btf's internal types index and update btf_header */
5278 	d->btf->nr_types = next_type_id - d->btf->start_id;
5279 	d->btf->type_offs_cap = d->btf->nr_types;
5280 	d->btf->hdr->type_len = p - d->btf->types_data;
5281 	new_offs = libbpf_reallocarray(d->btf->type_offs, d->btf->type_offs_cap,
5282 				       sizeof(*new_offs));
5283 	if (d->btf->type_offs_cap && !new_offs)
5284 		return -ENOMEM;
5285 	d->btf->type_offs = new_offs;
5286 	d->btf->hdr->str_off = d->btf->hdr->type_len;
5287 	d->btf->raw_size = d->btf->hdr->hdr_len + d->btf->hdr->type_len + d->btf->hdr->str_len;
5288 	return 0;
5289 }
5290 
5291 /*
5292  * Figure out final (deduplicated and compacted) type ID for provided original
5293  * `type_id` by first resolving it into corresponding canonical type ID and
5294  * then mapping it to a deduplicated type ID, stored in btf_dedup->hypot_map,
5295  * which is populated during compaction phase.
5296  */
5297 static int btf_dedup_remap_type_id(__u32 *type_id, void *ctx)
5298 {
5299 	struct btf_dedup *d = ctx;
5300 	__u32 resolved_type_id, new_type_id;
5301 
5302 	resolved_type_id = resolve_type_id(d, *type_id);
5303 	new_type_id = d->hypot_map[resolved_type_id];
5304 	if (new_type_id > BTF_MAX_NR_TYPES)
5305 		return -EINVAL;
5306 
5307 	*type_id = new_type_id;
5308 	return 0;
5309 }
5310 
5311 /*
5312  * Remap referenced type IDs into deduped type IDs.
5313  *
5314  * After BTF types are deduplicated and compacted, their final type IDs may
5315  * differ from original ones. The map from original to a corresponding
5316  * deduped type ID is stored in btf_dedup->hypot_map and is populated during
5317  * compaction phase. During remapping phase we are rewriting all type IDs
5318  * referenced from any BTF type (e.g., struct fields, func proto args, etc) to
5319  * their final deduped type IDs.
5320  */
5321 static int btf_dedup_remap_types(struct btf_dedup *d)
5322 {
5323 	int i, r;
5324 
5325 	for (i = 0; i < d->btf->nr_types; i++) {
5326 		struct btf_type *t = btf_type_by_id(d->btf, d->btf->start_id + i);
5327 		struct btf_field_iter it;
5328 		__u32 *type_id;
5329 
5330 		r = btf_field_iter_init(&it, t, BTF_FIELD_ITER_IDS);
5331 		if (r)
5332 			return r;
5333 
5334 		while ((type_id = btf_field_iter_next(&it))) {
5335 			__u32 resolved_id, new_id;
5336 
5337 			resolved_id = resolve_type_id(d, *type_id);
5338 			new_id = d->hypot_map[resolved_id];
5339 			if (new_id > BTF_MAX_NR_TYPES)
5340 				return -EINVAL;
5341 
5342 			*type_id = new_id;
5343 		}
5344 	}
5345 
5346 	if (!d->btf_ext)
5347 		return 0;
5348 
5349 	r = btf_ext_visit_type_ids(d->btf_ext, btf_dedup_remap_type_id, d);
5350 	if (r)
5351 		return r;
5352 
5353 	return 0;
5354 }
5355 
5356 /*
5357  * Probe few well-known locations for vmlinux kernel image and try to load BTF
5358  * data out of it to use for target BTF.
5359  */
5360 struct btf *btf__load_vmlinux_btf(void)
5361 {
5362 	const char *sysfs_btf_path = "/sys/kernel/btf/vmlinux";
5363 	/* fall back locations, trying to find vmlinux on disk */
5364 	const char *locations[] = {
5365 		"/boot/vmlinux-%1$s",
5366 		"/lib/modules/%1$s/vmlinux-%1$s",
5367 		"/lib/modules/%1$s/build/vmlinux",
5368 		"/usr/lib/modules/%1$s/kernel/vmlinux",
5369 		"/usr/lib/debug/boot/vmlinux-%1$s",
5370 		"/usr/lib/debug/boot/vmlinux-%1$s.debug",
5371 		"/usr/lib/debug/lib/modules/%1$s/vmlinux",
5372 	};
5373 	char path[PATH_MAX + 1];
5374 	struct utsname buf;
5375 	struct btf *btf;
5376 	int i, err;
5377 
5378 	/* is canonical sysfs location accessible? */
5379 	if (faccessat(AT_FDCWD, sysfs_btf_path, F_OK, AT_EACCESS) < 0) {
5380 		pr_warn("kernel BTF is missing at '%s', was CONFIG_DEBUG_INFO_BTF enabled?\n",
5381 			sysfs_btf_path);
5382 	} else {
5383 		btf = btf_parse_raw_mmap(sysfs_btf_path, NULL);
5384 		if (IS_ERR(btf))
5385 			btf = btf__parse(sysfs_btf_path, NULL);
5386 
5387 		if (!btf) {
5388 			err = -errno;
5389 			pr_warn("failed to read kernel BTF from '%s': %s\n",
5390 				sysfs_btf_path, errstr(err));
5391 			return libbpf_err_ptr(err);
5392 		}
5393 		pr_debug("loaded kernel BTF from '%s'\n", sysfs_btf_path);
5394 		return btf;
5395 	}
5396 
5397 	/* try fallback locations */
5398 	uname(&buf);
5399 	for (i = 0; i < ARRAY_SIZE(locations); i++) {
5400 		snprintf(path, PATH_MAX, locations[i], buf.release);
5401 
5402 		if (faccessat(AT_FDCWD, path, R_OK, AT_EACCESS))
5403 			continue;
5404 
5405 		btf = btf__parse(path, NULL);
5406 		err = libbpf_get_error(btf);
5407 		pr_debug("loading kernel BTF '%s': %s\n", path, errstr(err));
5408 		if (err)
5409 			continue;
5410 
5411 		return btf;
5412 	}
5413 
5414 	pr_warn("failed to find valid kernel BTF\n");
5415 	return libbpf_err_ptr(-ESRCH);
5416 }
5417 
5418 struct btf *libbpf_find_kernel_btf(void) __attribute__((alias("btf__load_vmlinux_btf")));
5419 
5420 struct btf *btf__load_module_btf(const char *module_name, struct btf *vmlinux_btf)
5421 {
5422 	char path[80];
5423 
5424 	snprintf(path, sizeof(path), "/sys/kernel/btf/%s", module_name);
5425 	return btf__parse_split(path, vmlinux_btf);
5426 }
5427 
5428 int btf_ext_visit_type_ids(struct btf_ext *btf_ext, type_id_visit_fn visit, void *ctx)
5429 {
5430 	const struct btf_ext_info *seg;
5431 	struct btf_ext_info_sec *sec;
5432 	int i, err;
5433 
5434 	seg = &btf_ext->func_info;
5435 	for_each_btf_ext_sec(seg, sec) {
5436 		struct bpf_func_info_min *rec;
5437 
5438 		for_each_btf_ext_rec(seg, sec, i, rec) {
5439 			err = visit(&rec->type_id, ctx);
5440 			if (err < 0)
5441 				return err;
5442 		}
5443 	}
5444 
5445 	seg = &btf_ext->core_relo_info;
5446 	for_each_btf_ext_sec(seg, sec) {
5447 		struct bpf_core_relo *rec;
5448 
5449 		for_each_btf_ext_rec(seg, sec, i, rec) {
5450 			err = visit(&rec->type_id, ctx);
5451 			if (err < 0)
5452 				return err;
5453 		}
5454 	}
5455 
5456 	return 0;
5457 }
5458 
5459 int btf_ext_visit_str_offs(struct btf_ext *btf_ext, str_off_visit_fn visit, void *ctx)
5460 {
5461 	const struct btf_ext_info *seg;
5462 	struct btf_ext_info_sec *sec;
5463 	int i, err;
5464 
5465 	seg = &btf_ext->func_info;
5466 	for_each_btf_ext_sec(seg, sec) {
5467 		err = visit(&sec->sec_name_off, ctx);
5468 		if (err)
5469 			return err;
5470 	}
5471 
5472 	seg = &btf_ext->line_info;
5473 	for_each_btf_ext_sec(seg, sec) {
5474 		struct bpf_line_info_min *rec;
5475 
5476 		err = visit(&sec->sec_name_off, ctx);
5477 		if (err)
5478 			return err;
5479 
5480 		for_each_btf_ext_rec(seg, sec, i, rec) {
5481 			err = visit(&rec->file_name_off, ctx);
5482 			if (err)
5483 				return err;
5484 			err = visit(&rec->line_off, ctx);
5485 			if (err)
5486 				return err;
5487 		}
5488 	}
5489 
5490 	seg = &btf_ext->core_relo_info;
5491 	for_each_btf_ext_sec(seg, sec) {
5492 		struct bpf_core_relo *rec;
5493 
5494 		err = visit(&sec->sec_name_off, ctx);
5495 		if (err)
5496 			return err;
5497 
5498 		for_each_btf_ext_rec(seg, sec, i, rec) {
5499 			err = visit(&rec->access_str_off, ctx);
5500 			if (err)
5501 				return err;
5502 		}
5503 	}
5504 
5505 	return 0;
5506 }
5507 
5508 struct btf_distill {
5509 	struct btf_pipe pipe;
5510 	int *id_map;
5511 	unsigned int split_start_id;
5512 	unsigned int split_start_str;
5513 	int diff_id;
5514 };
5515 
5516 static int btf_add_distilled_type_ids(struct btf_distill *dist, __u32 i)
5517 {
5518 	struct btf_type *split_t = btf_type_by_id(dist->pipe.src, i);
5519 	struct btf_field_iter it;
5520 	__u32 *id;
5521 	int err;
5522 
5523 	err = btf_field_iter_init(&it, split_t, BTF_FIELD_ITER_IDS);
5524 	if (err)
5525 		return err;
5526 	while ((id = btf_field_iter_next(&it))) {
5527 		struct btf_type *base_t;
5528 
5529 		if (!*id)
5530 			continue;
5531 		/* split BTF id, not needed */
5532 		if (*id >= dist->split_start_id)
5533 			continue;
5534 		/* already added ? */
5535 		if (dist->id_map[*id] > 0)
5536 			continue;
5537 
5538 		/* only a subset of base BTF types should be referenced from
5539 		 * split BTF; ensure nothing unexpected is referenced.
5540 		 */
5541 		base_t = btf_type_by_id(dist->pipe.src, *id);
5542 		switch (btf_kind(base_t)) {
5543 		case BTF_KIND_INT:
5544 		case BTF_KIND_FLOAT:
5545 		case BTF_KIND_FWD:
5546 		case BTF_KIND_ARRAY:
5547 		case BTF_KIND_STRUCT:
5548 		case BTF_KIND_UNION:
5549 		case BTF_KIND_TYPEDEF:
5550 		case BTF_KIND_ENUM:
5551 		case BTF_KIND_ENUM64:
5552 		case BTF_KIND_PTR:
5553 		case BTF_KIND_CONST:
5554 		case BTF_KIND_RESTRICT:
5555 		case BTF_KIND_VOLATILE:
5556 		case BTF_KIND_FUNC_PROTO:
5557 		case BTF_KIND_TYPE_TAG:
5558 			dist->id_map[*id] = *id;
5559 			break;
5560 		default:
5561 			pr_warn("unexpected reference to base type[%u] of kind [%u] when creating distilled base BTF.\n",
5562 				*id, btf_kind(base_t));
5563 			return -EINVAL;
5564 		}
5565 		/* If a base type is used, ensure types it refers to are
5566 		 * marked as used also; so for example if we find a PTR to INT
5567 		 * we need both the PTR and INT.
5568 		 *
5569 		 * The only exception is named struct/unions, since distilled
5570 		 * base BTF composite types have no members.
5571 		 */
5572 		if (btf_is_composite(base_t) && base_t->name_off)
5573 			continue;
5574 		err = btf_add_distilled_type_ids(dist, *id);
5575 		if (err)
5576 			return err;
5577 	}
5578 	return 0;
5579 }
5580 
5581 static int btf_add_distilled_types(struct btf_distill *dist)
5582 {
5583 	bool adding_to_base = dist->pipe.dst->start_id == 1;
5584 	int id = btf__type_cnt(dist->pipe.dst);
5585 	struct btf_type *t;
5586 	int i, err = 0;
5587 
5588 
5589 	/* Add types for each of the required references to either distilled
5590 	 * base or split BTF, depending on type characteristics.
5591 	 */
5592 	for (i = 1; i < dist->split_start_id; i++) {
5593 		const char *name;
5594 		int kind;
5595 
5596 		if (!dist->id_map[i])
5597 			continue;
5598 		t = btf_type_by_id(dist->pipe.src, i);
5599 		kind = btf_kind(t);
5600 		name = btf__name_by_offset(dist->pipe.src, t->name_off);
5601 
5602 		switch (kind) {
5603 		case BTF_KIND_INT:
5604 		case BTF_KIND_FLOAT:
5605 		case BTF_KIND_FWD:
5606 			/* Named int, float, fwd are added to base. */
5607 			if (!adding_to_base)
5608 				continue;
5609 			err = btf_add_type(&dist->pipe, t);
5610 			break;
5611 		case BTF_KIND_STRUCT:
5612 		case BTF_KIND_UNION:
5613 			/* Named struct/union are added to base as 0-vlen
5614 			 * struct/union of same size.  Anonymous struct/unions
5615 			 * are added to split BTF as-is.
5616 			 */
5617 			if (adding_to_base) {
5618 				if (!t->name_off)
5619 					continue;
5620 				err = btf_add_composite(dist->pipe.dst, kind, name, t->size);
5621 			} else {
5622 				if (t->name_off)
5623 					continue;
5624 				err = btf_add_type(&dist->pipe, t);
5625 			}
5626 			break;
5627 		case BTF_KIND_ENUM:
5628 		case BTF_KIND_ENUM64:
5629 			/* Named enum[64]s are added to base as a sized
5630 			 * enum; relocation will match with appropriately-named
5631 			 * and sized enum or enum64.
5632 			 *
5633 			 * Anonymous enums are added to split BTF as-is.
5634 			 */
5635 			if (adding_to_base) {
5636 				if (!t->name_off)
5637 					continue;
5638 				err = btf__add_enum(dist->pipe.dst, name, t->size);
5639 			} else {
5640 				if (t->name_off)
5641 					continue;
5642 				err = btf_add_type(&dist->pipe, t);
5643 			}
5644 			break;
5645 		case BTF_KIND_ARRAY:
5646 		case BTF_KIND_TYPEDEF:
5647 		case BTF_KIND_PTR:
5648 		case BTF_KIND_CONST:
5649 		case BTF_KIND_RESTRICT:
5650 		case BTF_KIND_VOLATILE:
5651 		case BTF_KIND_FUNC_PROTO:
5652 		case BTF_KIND_TYPE_TAG:
5653 			/* All other types are added to split BTF. */
5654 			if (adding_to_base)
5655 				continue;
5656 			err = btf_add_type(&dist->pipe, t);
5657 			break;
5658 		default:
5659 			pr_warn("unexpected kind when adding base type '%s'[%u] of kind [%u] to distilled base BTF.\n",
5660 				name, i, kind);
5661 			return -EINVAL;
5662 
5663 		}
5664 		if (err < 0)
5665 			break;
5666 		dist->id_map[i] = id++;
5667 	}
5668 	return err;
5669 }
5670 
5671 /* Split BTF ids without a mapping will be shifted downwards since distilled
5672  * base BTF is smaller than the original base BTF.  For those that have a
5673  * mapping (either to base or updated split BTF), update the id based on
5674  * that mapping.
5675  */
5676 static int btf_update_distilled_type_ids(struct btf_distill *dist, __u32 i)
5677 {
5678 	struct btf_type *t = btf_type_by_id(dist->pipe.dst, i);
5679 	struct btf_field_iter it;
5680 	__u32 *id;
5681 	int err;
5682 
5683 	err = btf_field_iter_init(&it, t, BTF_FIELD_ITER_IDS);
5684 	if (err)
5685 		return err;
5686 	while ((id = btf_field_iter_next(&it))) {
5687 		if (dist->id_map[*id])
5688 			*id = dist->id_map[*id];
5689 		else if (*id >= dist->split_start_id)
5690 			*id -= dist->diff_id;
5691 	}
5692 	return 0;
5693 }
5694 
5695 /* Create updated split BTF with distilled base BTF; distilled base BTF
5696  * consists of BTF information required to clarify the types that split
5697  * BTF refers to, omitting unneeded details.  Specifically it will contain
5698  * base types and memberless definitions of named structs, unions and enumerated
5699  * types. Associated reference types like pointers, arrays and anonymous
5700  * structs, unions and enumerated types will be added to split BTF.
5701  * Size is recorded for named struct/unions to help guide matching to the
5702  * target base BTF during later relocation.
5703  *
5704  * The only case where structs, unions or enumerated types are fully represented
5705  * is when they are anonymous; in such cases, the anonymous type is added to
5706  * split BTF in full.
5707  *
5708  * We return newly-created split BTF where the split BTF refers to a newly-created
5709  * distilled base BTF. Both must be freed separately by the caller.
5710  */
5711 int btf__distill_base(const struct btf *src_btf, struct btf **new_base_btf,
5712 		      struct btf **new_split_btf)
5713 {
5714 	struct btf *new_base = NULL, *new_split = NULL;
5715 	const struct btf *old_base;
5716 	unsigned int n = btf__type_cnt(src_btf);
5717 	struct btf_distill dist = {};
5718 	struct btf_type *t;
5719 	int i, err = 0;
5720 
5721 	/* src BTF must be split BTF. */
5722 	old_base = btf__base_btf(src_btf);
5723 	if (!new_base_btf || !new_split_btf || !old_base)
5724 		return libbpf_err(-EINVAL);
5725 
5726 	new_base = btf__new_empty();
5727 	if (!new_base)
5728 		return libbpf_err(-ENOMEM);
5729 
5730 	btf__set_endianness(new_base, btf__endianness(src_btf));
5731 
5732 	dist.id_map = calloc(n, sizeof(*dist.id_map));
5733 	if (!dist.id_map) {
5734 		err = -ENOMEM;
5735 		goto done;
5736 	}
5737 	dist.pipe.src = src_btf;
5738 	dist.pipe.dst = new_base;
5739 	dist.pipe.str_off_map = hashmap__new(btf_dedup_identity_hash_fn, btf_dedup_equal_fn, NULL);
5740 	if (IS_ERR(dist.pipe.str_off_map)) {
5741 		err = -ENOMEM;
5742 		goto done;
5743 	}
5744 	dist.split_start_id = btf__type_cnt(old_base);
5745 	dist.split_start_str = old_base->hdr->str_len;
5746 
5747 	/* Pass over src split BTF; generate the list of base BTF type ids it
5748 	 * references; these will constitute our distilled BTF set to be
5749 	 * distributed over base and split BTF as appropriate.
5750 	 */
5751 	for (i = src_btf->start_id; i < n; i++) {
5752 		err = btf_add_distilled_type_ids(&dist, i);
5753 		if (err < 0)
5754 			goto done;
5755 	}
5756 	/* Next add types for each of the required references to base BTF and split BTF
5757 	 * in turn.
5758 	 */
5759 	err = btf_add_distilled_types(&dist);
5760 	if (err < 0)
5761 		goto done;
5762 
5763 	/* Create new split BTF with distilled base BTF as its base; the final
5764 	 * state is split BTF with distilled base BTF that represents enough
5765 	 * about its base references to allow it to be relocated with the base
5766 	 * BTF available.
5767 	 */
5768 	new_split = btf__new_empty_split(new_base);
5769 	if (!new_split) {
5770 		err = -errno;
5771 		goto done;
5772 	}
5773 	dist.pipe.dst = new_split;
5774 	/* First add all split types */
5775 	for (i = src_btf->start_id; i < n; i++) {
5776 		t = btf_type_by_id(src_btf, i);
5777 		err = btf_add_type(&dist.pipe, t);
5778 		if (err < 0)
5779 			goto done;
5780 	}
5781 	/* Now add distilled types to split BTF that are not added to base. */
5782 	err = btf_add_distilled_types(&dist);
5783 	if (err < 0)
5784 		goto done;
5785 
5786 	/* All split BTF ids will be shifted downwards since there are less base
5787 	 * BTF ids in distilled base BTF.
5788 	 */
5789 	dist.diff_id = dist.split_start_id - btf__type_cnt(new_base);
5790 
5791 	n = btf__type_cnt(new_split);
5792 	/* Now update base/split BTF ids. */
5793 	for (i = 1; i < n; i++) {
5794 		err = btf_update_distilled_type_ids(&dist, i);
5795 		if (err < 0)
5796 			break;
5797 	}
5798 done:
5799 	free(dist.id_map);
5800 	hashmap__free(dist.pipe.str_off_map);
5801 	if (err) {
5802 		btf__free(new_split);
5803 		btf__free(new_base);
5804 		return libbpf_err(err);
5805 	}
5806 	*new_base_btf = new_base;
5807 	*new_split_btf = new_split;
5808 
5809 	return 0;
5810 }
5811 
5812 const struct btf_header *btf_header(const struct btf *btf)
5813 {
5814 	return btf->hdr;
5815 }
5816 
5817 void btf_set_base_btf(struct btf *btf, const struct btf *base_btf)
5818 {
5819 	btf->base_btf = (struct btf *)base_btf;
5820 	btf->start_id = btf__type_cnt(base_btf);
5821 	btf->start_str_off = base_btf->hdr->str_len;
5822 }
5823 
5824 int btf__relocate(struct btf *btf, const struct btf *base_btf)
5825 {
5826 	int err = btf_relocate(btf, base_btf, NULL);
5827 
5828 	if (!err)
5829 		btf->owns_base = false;
5830 	return libbpf_err(err);
5831 }
5832