xref: /linux/tools/lib/bpf/btf.c (revision c532de5a67a70f8533d495f8f2aaa9a0491c3ad0)
1 // SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
2 /* Copyright (c) 2018 Facebook */
3 
4 #include <byteswap.h>
5 #include <endian.h>
6 #include <stdio.h>
7 #include <stdlib.h>
8 #include <string.h>
9 #include <fcntl.h>
10 #include <unistd.h>
11 #include <errno.h>
12 #include <sys/utsname.h>
13 #include <sys/param.h>
14 #include <sys/stat.h>
15 #include <linux/kernel.h>
16 #include <linux/err.h>
17 #include <linux/btf.h>
18 #include <gelf.h>
19 #include "btf.h"
20 #include "bpf.h"
21 #include "libbpf.h"
22 #include "libbpf_internal.h"
23 #include "hashmap.h"
24 #include "strset.h"
25 
26 #define BTF_MAX_NR_TYPES 0x7fffffffU
27 #define BTF_MAX_STR_OFFSET 0x7fffffffU
28 
29 static struct btf_type btf_void;
30 
31 struct btf {
32 	/* raw BTF data in native endianness */
33 	void *raw_data;
34 	/* raw BTF data in non-native endianness */
35 	void *raw_data_swapped;
36 	__u32 raw_size;
37 	/* whether target endianness differs from the native one */
38 	bool swapped_endian;
39 
40 	/*
41 	 * When BTF is loaded from an ELF or raw memory it is stored
42 	 * in a contiguous memory block. The hdr, type_data, and, strs_data
43 	 * point inside that memory region to their respective parts of BTF
44 	 * representation:
45 	 *
46 	 * +--------------------------------+
47 	 * |  Header  |  Types  |  Strings  |
48 	 * +--------------------------------+
49 	 * ^          ^         ^
50 	 * |          |         |
51 	 * hdr        |         |
52 	 * types_data-+         |
53 	 * strs_data------------+
54 	 *
55 	 * If BTF data is later modified, e.g., due to types added or
56 	 * removed, BTF deduplication performed, etc, this contiguous
57 	 * representation is broken up into three independently allocated
58 	 * memory regions to be able to modify them independently.
59 	 * raw_data is nulled out at that point, but can be later allocated
60 	 * and cached again if user calls btf__raw_data(), at which point
61 	 * raw_data will contain a contiguous copy of header, types, and
62 	 * strings:
63 	 *
64 	 * +----------+  +---------+  +-----------+
65 	 * |  Header  |  |  Types  |  |  Strings  |
66 	 * +----------+  +---------+  +-----------+
67 	 * ^             ^            ^
68 	 * |             |            |
69 	 * hdr           |            |
70 	 * types_data----+            |
71 	 * strset__data(strs_set)-----+
72 	 *
73 	 *               +----------+---------+-----------+
74 	 *               |  Header  |  Types  |  Strings  |
75 	 * raw_data----->+----------+---------+-----------+
76 	 */
77 	struct btf_header *hdr;
78 
79 	void *types_data;
80 	size_t types_data_cap; /* used size stored in hdr->type_len */
81 
82 	/* type ID to `struct btf_type *` lookup index
83 	 * type_offs[0] corresponds to the first non-VOID type:
84 	 *   - for base BTF it's type [1];
85 	 *   - for split BTF it's the first non-base BTF type.
86 	 */
87 	__u32 *type_offs;
88 	size_t type_offs_cap;
89 	/* number of types in this BTF instance:
90 	 *   - doesn't include special [0] void type;
91 	 *   - for split BTF counts number of types added on top of base BTF.
92 	 */
93 	__u32 nr_types;
94 	/* if not NULL, points to the base BTF on top of which the current
95 	 * split BTF is based
96 	 */
97 	struct btf *base_btf;
98 	/* BTF type ID of the first type in this BTF instance:
99 	 *   - for base BTF it's equal to 1;
100 	 *   - for split BTF it's equal to biggest type ID of base BTF plus 1.
101 	 */
102 	int start_id;
103 	/* logical string offset of this BTF instance:
104 	 *   - for base BTF it's equal to 0;
105 	 *   - for split BTF it's equal to total size of base BTF's string section size.
106 	 */
107 	int start_str_off;
108 
109 	/* only one of strs_data or strs_set can be non-NULL, depending on
110 	 * whether BTF is in a modifiable state (strs_set is used) or not
111 	 * (strs_data points inside raw_data)
112 	 */
113 	void *strs_data;
114 	/* a set of unique strings */
115 	struct strset *strs_set;
116 	/* whether strings are already deduplicated */
117 	bool strs_deduped;
118 
119 	/* whether base_btf should be freed in btf_free for this instance */
120 	bool owns_base;
121 
122 	/* BTF object FD, if loaded into kernel */
123 	int fd;
124 
125 	/* Pointer size (in bytes) for a target architecture of this BTF */
126 	int ptr_sz;
127 };
128 
129 static inline __u64 ptr_to_u64(const void *ptr)
130 {
131 	return (__u64) (unsigned long) ptr;
132 }
133 
134 /* Ensure given dynamically allocated memory region pointed to by *data* with
135  * capacity of *cap_cnt* elements each taking *elem_sz* bytes has enough
136  * memory to accommodate *add_cnt* new elements, assuming *cur_cnt* elements
137  * are already used. At most *max_cnt* elements can be ever allocated.
138  * If necessary, memory is reallocated and all existing data is copied over,
139  * new pointer to the memory region is stored at *data, new memory region
140  * capacity (in number of elements) is stored in *cap.
141  * On success, memory pointer to the beginning of unused memory is returned.
142  * On error, NULL is returned.
143  */
144 void *libbpf_add_mem(void **data, size_t *cap_cnt, size_t elem_sz,
145 		     size_t cur_cnt, size_t max_cnt, size_t add_cnt)
146 {
147 	size_t new_cnt;
148 	void *new_data;
149 
150 	if (cur_cnt + add_cnt <= *cap_cnt)
151 		return *data + cur_cnt * elem_sz;
152 
153 	/* requested more than the set limit */
154 	if (cur_cnt + add_cnt > max_cnt)
155 		return NULL;
156 
157 	new_cnt = *cap_cnt;
158 	new_cnt += new_cnt / 4;		  /* expand by 25% */
159 	if (new_cnt < 16)		  /* but at least 16 elements */
160 		new_cnt = 16;
161 	if (new_cnt > max_cnt)		  /* but not exceeding a set limit */
162 		new_cnt = max_cnt;
163 	if (new_cnt < cur_cnt + add_cnt)  /* also ensure we have enough memory */
164 		new_cnt = cur_cnt + add_cnt;
165 
166 	new_data = libbpf_reallocarray(*data, new_cnt, elem_sz);
167 	if (!new_data)
168 		return NULL;
169 
170 	/* zero out newly allocated portion of memory */
171 	memset(new_data + (*cap_cnt) * elem_sz, 0, (new_cnt - *cap_cnt) * elem_sz);
172 
173 	*data = new_data;
174 	*cap_cnt = new_cnt;
175 	return new_data + cur_cnt * elem_sz;
176 }
177 
178 /* Ensure given dynamically allocated memory region has enough allocated space
179  * to accommodate *need_cnt* elements of size *elem_sz* bytes each
180  */
181 int libbpf_ensure_mem(void **data, size_t *cap_cnt, size_t elem_sz, size_t need_cnt)
182 {
183 	void *p;
184 
185 	if (need_cnt <= *cap_cnt)
186 		return 0;
187 
188 	p = libbpf_add_mem(data, cap_cnt, elem_sz, *cap_cnt, SIZE_MAX, need_cnt - *cap_cnt);
189 	if (!p)
190 		return -ENOMEM;
191 
192 	return 0;
193 }
194 
195 static void *btf_add_type_offs_mem(struct btf *btf, size_t add_cnt)
196 {
197 	return libbpf_add_mem((void **)&btf->type_offs, &btf->type_offs_cap, sizeof(__u32),
198 			      btf->nr_types, BTF_MAX_NR_TYPES, add_cnt);
199 }
200 
201 static int btf_add_type_idx_entry(struct btf *btf, __u32 type_off)
202 {
203 	__u32 *p;
204 
205 	p = btf_add_type_offs_mem(btf, 1);
206 	if (!p)
207 		return -ENOMEM;
208 
209 	*p = type_off;
210 	return 0;
211 }
212 
213 static void btf_bswap_hdr(struct btf_header *h)
214 {
215 	h->magic = bswap_16(h->magic);
216 	h->hdr_len = bswap_32(h->hdr_len);
217 	h->type_off = bswap_32(h->type_off);
218 	h->type_len = bswap_32(h->type_len);
219 	h->str_off = bswap_32(h->str_off);
220 	h->str_len = bswap_32(h->str_len);
221 }
222 
223 static int btf_parse_hdr(struct btf *btf)
224 {
225 	struct btf_header *hdr = btf->hdr;
226 	__u32 meta_left;
227 
228 	if (btf->raw_size < sizeof(struct btf_header)) {
229 		pr_debug("BTF header not found\n");
230 		return -EINVAL;
231 	}
232 
233 	if (hdr->magic == bswap_16(BTF_MAGIC)) {
234 		btf->swapped_endian = true;
235 		if (bswap_32(hdr->hdr_len) != sizeof(struct btf_header)) {
236 			pr_warn("Can't load BTF with non-native endianness due to unsupported header length %u\n",
237 				bswap_32(hdr->hdr_len));
238 			return -ENOTSUP;
239 		}
240 		btf_bswap_hdr(hdr);
241 	} else if (hdr->magic != BTF_MAGIC) {
242 		pr_debug("Invalid BTF magic: %x\n", hdr->magic);
243 		return -EINVAL;
244 	}
245 
246 	if (btf->raw_size < hdr->hdr_len) {
247 		pr_debug("BTF header len %u larger than data size %u\n",
248 			 hdr->hdr_len, btf->raw_size);
249 		return -EINVAL;
250 	}
251 
252 	meta_left = btf->raw_size - hdr->hdr_len;
253 	if (meta_left < (long long)hdr->str_off + hdr->str_len) {
254 		pr_debug("Invalid BTF total size: %u\n", btf->raw_size);
255 		return -EINVAL;
256 	}
257 
258 	if ((long long)hdr->type_off + hdr->type_len > hdr->str_off) {
259 		pr_debug("Invalid BTF data sections layout: type data at %u + %u, strings data at %u + %u\n",
260 			 hdr->type_off, hdr->type_len, hdr->str_off, hdr->str_len);
261 		return -EINVAL;
262 	}
263 
264 	if (hdr->type_off % 4) {
265 		pr_debug("BTF type section is not aligned to 4 bytes\n");
266 		return -EINVAL;
267 	}
268 
269 	return 0;
270 }
271 
272 static int btf_parse_str_sec(struct btf *btf)
273 {
274 	const struct btf_header *hdr = btf->hdr;
275 	const char *start = btf->strs_data;
276 	const char *end = start + btf->hdr->str_len;
277 
278 	if (btf->base_btf && hdr->str_len == 0)
279 		return 0;
280 	if (!hdr->str_len || hdr->str_len - 1 > BTF_MAX_STR_OFFSET || end[-1]) {
281 		pr_debug("Invalid BTF string section\n");
282 		return -EINVAL;
283 	}
284 	if (!btf->base_btf && start[0]) {
285 		pr_debug("Invalid BTF string section\n");
286 		return -EINVAL;
287 	}
288 	return 0;
289 }
290 
291 static int btf_type_size(const struct btf_type *t)
292 {
293 	const int base_size = sizeof(struct btf_type);
294 	__u16 vlen = btf_vlen(t);
295 
296 	switch (btf_kind(t)) {
297 	case BTF_KIND_FWD:
298 	case BTF_KIND_CONST:
299 	case BTF_KIND_VOLATILE:
300 	case BTF_KIND_RESTRICT:
301 	case BTF_KIND_PTR:
302 	case BTF_KIND_TYPEDEF:
303 	case BTF_KIND_FUNC:
304 	case BTF_KIND_FLOAT:
305 	case BTF_KIND_TYPE_TAG:
306 		return base_size;
307 	case BTF_KIND_INT:
308 		return base_size + sizeof(__u32);
309 	case BTF_KIND_ENUM:
310 		return base_size + vlen * sizeof(struct btf_enum);
311 	case BTF_KIND_ENUM64:
312 		return base_size + vlen * sizeof(struct btf_enum64);
313 	case BTF_KIND_ARRAY:
314 		return base_size + sizeof(struct btf_array);
315 	case BTF_KIND_STRUCT:
316 	case BTF_KIND_UNION:
317 		return base_size + vlen * sizeof(struct btf_member);
318 	case BTF_KIND_FUNC_PROTO:
319 		return base_size + vlen * sizeof(struct btf_param);
320 	case BTF_KIND_VAR:
321 		return base_size + sizeof(struct btf_var);
322 	case BTF_KIND_DATASEC:
323 		return base_size + vlen * sizeof(struct btf_var_secinfo);
324 	case BTF_KIND_DECL_TAG:
325 		return base_size + sizeof(struct btf_decl_tag);
326 	default:
327 		pr_debug("Unsupported BTF_KIND:%u\n", btf_kind(t));
328 		return -EINVAL;
329 	}
330 }
331 
332 static void btf_bswap_type_base(struct btf_type *t)
333 {
334 	t->name_off = bswap_32(t->name_off);
335 	t->info = bswap_32(t->info);
336 	t->type = bswap_32(t->type);
337 }
338 
339 static int btf_bswap_type_rest(struct btf_type *t)
340 {
341 	struct btf_var_secinfo *v;
342 	struct btf_enum64 *e64;
343 	struct btf_member *m;
344 	struct btf_array *a;
345 	struct btf_param *p;
346 	struct btf_enum *e;
347 	__u16 vlen = btf_vlen(t);
348 	int i;
349 
350 	switch (btf_kind(t)) {
351 	case BTF_KIND_FWD:
352 	case BTF_KIND_CONST:
353 	case BTF_KIND_VOLATILE:
354 	case BTF_KIND_RESTRICT:
355 	case BTF_KIND_PTR:
356 	case BTF_KIND_TYPEDEF:
357 	case BTF_KIND_FUNC:
358 	case BTF_KIND_FLOAT:
359 	case BTF_KIND_TYPE_TAG:
360 		return 0;
361 	case BTF_KIND_INT:
362 		*(__u32 *)(t + 1) = bswap_32(*(__u32 *)(t + 1));
363 		return 0;
364 	case BTF_KIND_ENUM:
365 		for (i = 0, e = btf_enum(t); i < vlen; i++, e++) {
366 			e->name_off = bswap_32(e->name_off);
367 			e->val = bswap_32(e->val);
368 		}
369 		return 0;
370 	case BTF_KIND_ENUM64:
371 		for (i = 0, e64 = btf_enum64(t); i < vlen; i++, e64++) {
372 			e64->name_off = bswap_32(e64->name_off);
373 			e64->val_lo32 = bswap_32(e64->val_lo32);
374 			e64->val_hi32 = bswap_32(e64->val_hi32);
375 		}
376 		return 0;
377 	case BTF_KIND_ARRAY:
378 		a = btf_array(t);
379 		a->type = bswap_32(a->type);
380 		a->index_type = bswap_32(a->index_type);
381 		a->nelems = bswap_32(a->nelems);
382 		return 0;
383 	case BTF_KIND_STRUCT:
384 	case BTF_KIND_UNION:
385 		for (i = 0, m = btf_members(t); i < vlen; i++, m++) {
386 			m->name_off = bswap_32(m->name_off);
387 			m->type = bswap_32(m->type);
388 			m->offset = bswap_32(m->offset);
389 		}
390 		return 0;
391 	case BTF_KIND_FUNC_PROTO:
392 		for (i = 0, p = btf_params(t); i < vlen; i++, p++) {
393 			p->name_off = bswap_32(p->name_off);
394 			p->type = bswap_32(p->type);
395 		}
396 		return 0;
397 	case BTF_KIND_VAR:
398 		btf_var(t)->linkage = bswap_32(btf_var(t)->linkage);
399 		return 0;
400 	case BTF_KIND_DATASEC:
401 		for (i = 0, v = btf_var_secinfos(t); i < vlen; i++, v++) {
402 			v->type = bswap_32(v->type);
403 			v->offset = bswap_32(v->offset);
404 			v->size = bswap_32(v->size);
405 		}
406 		return 0;
407 	case BTF_KIND_DECL_TAG:
408 		btf_decl_tag(t)->component_idx = bswap_32(btf_decl_tag(t)->component_idx);
409 		return 0;
410 	default:
411 		pr_debug("Unsupported BTF_KIND:%u\n", btf_kind(t));
412 		return -EINVAL;
413 	}
414 }
415 
416 static int btf_parse_type_sec(struct btf *btf)
417 {
418 	struct btf_header *hdr = btf->hdr;
419 	void *next_type = btf->types_data;
420 	void *end_type = next_type + hdr->type_len;
421 	int err, type_size;
422 
423 	while (next_type + sizeof(struct btf_type) <= end_type) {
424 		if (btf->swapped_endian)
425 			btf_bswap_type_base(next_type);
426 
427 		type_size = btf_type_size(next_type);
428 		if (type_size < 0)
429 			return type_size;
430 		if (next_type + type_size > end_type) {
431 			pr_warn("BTF type [%d] is malformed\n", btf->start_id + btf->nr_types);
432 			return -EINVAL;
433 		}
434 
435 		if (btf->swapped_endian && btf_bswap_type_rest(next_type))
436 			return -EINVAL;
437 
438 		err = btf_add_type_idx_entry(btf, next_type - btf->types_data);
439 		if (err)
440 			return err;
441 
442 		next_type += type_size;
443 		btf->nr_types++;
444 	}
445 
446 	if (next_type != end_type) {
447 		pr_warn("BTF types data is malformed\n");
448 		return -EINVAL;
449 	}
450 
451 	return 0;
452 }
453 
454 static int btf_validate_str(const struct btf *btf, __u32 str_off, const char *what, __u32 type_id)
455 {
456 	const char *s;
457 
458 	s = btf__str_by_offset(btf, str_off);
459 	if (!s) {
460 		pr_warn("btf: type [%u]: invalid %s (string offset %u)\n", type_id, what, str_off);
461 		return -EINVAL;
462 	}
463 
464 	return 0;
465 }
466 
467 static int btf_validate_id(const struct btf *btf, __u32 id, __u32 ctx_id)
468 {
469 	const struct btf_type *t;
470 
471 	t = btf__type_by_id(btf, id);
472 	if (!t) {
473 		pr_warn("btf: type [%u]: invalid referenced type ID %u\n", ctx_id, id);
474 		return -EINVAL;
475 	}
476 
477 	return 0;
478 }
479 
480 static int btf_validate_type(const struct btf *btf, const struct btf_type *t, __u32 id)
481 {
482 	__u32 kind = btf_kind(t);
483 	int err, i, n;
484 
485 	err = btf_validate_str(btf, t->name_off, "type name", id);
486 	if (err)
487 		return err;
488 
489 	switch (kind) {
490 	case BTF_KIND_UNKN:
491 	case BTF_KIND_INT:
492 	case BTF_KIND_FWD:
493 	case BTF_KIND_FLOAT:
494 		break;
495 	case BTF_KIND_PTR:
496 	case BTF_KIND_TYPEDEF:
497 	case BTF_KIND_VOLATILE:
498 	case BTF_KIND_CONST:
499 	case BTF_KIND_RESTRICT:
500 	case BTF_KIND_VAR:
501 	case BTF_KIND_DECL_TAG:
502 	case BTF_KIND_TYPE_TAG:
503 		err = btf_validate_id(btf, t->type, id);
504 		if (err)
505 			return err;
506 		break;
507 	case BTF_KIND_ARRAY: {
508 		const struct btf_array *a = btf_array(t);
509 
510 		err = btf_validate_id(btf, a->type, id);
511 		err = err ?: btf_validate_id(btf, a->index_type, id);
512 		if (err)
513 			return err;
514 		break;
515 	}
516 	case BTF_KIND_STRUCT:
517 	case BTF_KIND_UNION: {
518 		const struct btf_member *m = btf_members(t);
519 
520 		n = btf_vlen(t);
521 		for (i = 0; i < n; i++, m++) {
522 			err = btf_validate_str(btf, m->name_off, "field name", id);
523 			err = err ?: btf_validate_id(btf, m->type, id);
524 			if (err)
525 				return err;
526 		}
527 		break;
528 	}
529 	case BTF_KIND_ENUM: {
530 		const struct btf_enum *m = btf_enum(t);
531 
532 		n = btf_vlen(t);
533 		for (i = 0; i < n; i++, m++) {
534 			err = btf_validate_str(btf, m->name_off, "enum name", id);
535 			if (err)
536 				return err;
537 		}
538 		break;
539 	}
540 	case BTF_KIND_ENUM64: {
541 		const struct btf_enum64 *m = btf_enum64(t);
542 
543 		n = btf_vlen(t);
544 		for (i = 0; i < n; i++, m++) {
545 			err = btf_validate_str(btf, m->name_off, "enum name", id);
546 			if (err)
547 				return err;
548 		}
549 		break;
550 	}
551 	case BTF_KIND_FUNC: {
552 		const struct btf_type *ft;
553 
554 		err = btf_validate_id(btf, t->type, id);
555 		if (err)
556 			return err;
557 		ft = btf__type_by_id(btf, t->type);
558 		if (btf_kind(ft) != BTF_KIND_FUNC_PROTO) {
559 			pr_warn("btf: type [%u]: referenced type [%u] is not FUNC_PROTO\n", id, t->type);
560 			return -EINVAL;
561 		}
562 		break;
563 	}
564 	case BTF_KIND_FUNC_PROTO: {
565 		const struct btf_param *m = btf_params(t);
566 
567 		n = btf_vlen(t);
568 		for (i = 0; i < n; i++, m++) {
569 			err = btf_validate_str(btf, m->name_off, "param name", id);
570 			err = err ?: btf_validate_id(btf, m->type, id);
571 			if (err)
572 				return err;
573 		}
574 		break;
575 	}
576 	case BTF_KIND_DATASEC: {
577 		const struct btf_var_secinfo *m = btf_var_secinfos(t);
578 
579 		n = btf_vlen(t);
580 		for (i = 0; i < n; i++, m++) {
581 			err = btf_validate_id(btf, m->type, id);
582 			if (err)
583 				return err;
584 		}
585 		break;
586 	}
587 	default:
588 		pr_warn("btf: type [%u]: unrecognized kind %u\n", id, kind);
589 		return -EINVAL;
590 	}
591 	return 0;
592 }
593 
594 /* Validate basic sanity of BTF. It's intentionally less thorough than
595  * kernel's validation and validates only properties of BTF that libbpf relies
596  * on to be correct (e.g., valid type IDs, valid string offsets, etc)
597  */
598 static int btf_sanity_check(const struct btf *btf)
599 {
600 	const struct btf_type *t;
601 	__u32 i, n = btf__type_cnt(btf);
602 	int err;
603 
604 	for (i = btf->start_id; i < n; i++) {
605 		t = btf_type_by_id(btf, i);
606 		err = btf_validate_type(btf, t, i);
607 		if (err)
608 			return err;
609 	}
610 	return 0;
611 }
612 
613 __u32 btf__type_cnt(const struct btf *btf)
614 {
615 	return btf->start_id + btf->nr_types;
616 }
617 
618 const struct btf *btf__base_btf(const struct btf *btf)
619 {
620 	return btf->base_btf;
621 }
622 
623 /* internal helper returning non-const pointer to a type */
624 struct btf_type *btf_type_by_id(const struct btf *btf, __u32 type_id)
625 {
626 	if (type_id == 0)
627 		return &btf_void;
628 	if (type_id < btf->start_id)
629 		return btf_type_by_id(btf->base_btf, type_id);
630 	return btf->types_data + btf->type_offs[type_id - btf->start_id];
631 }
632 
633 const struct btf_type *btf__type_by_id(const struct btf *btf, __u32 type_id)
634 {
635 	if (type_id >= btf->start_id + btf->nr_types)
636 		return errno = EINVAL, NULL;
637 	return btf_type_by_id((struct btf *)btf, type_id);
638 }
639 
640 static int determine_ptr_size(const struct btf *btf)
641 {
642 	static const char * const long_aliases[] = {
643 		"long",
644 		"long int",
645 		"int long",
646 		"unsigned long",
647 		"long unsigned",
648 		"unsigned long int",
649 		"unsigned int long",
650 		"long unsigned int",
651 		"long int unsigned",
652 		"int unsigned long",
653 		"int long unsigned",
654 	};
655 	const struct btf_type *t;
656 	const char *name;
657 	int i, j, n;
658 
659 	if (btf->base_btf && btf->base_btf->ptr_sz > 0)
660 		return btf->base_btf->ptr_sz;
661 
662 	n = btf__type_cnt(btf);
663 	for (i = 1; i < n; i++) {
664 		t = btf__type_by_id(btf, i);
665 		if (!btf_is_int(t))
666 			continue;
667 
668 		if (t->size != 4 && t->size != 8)
669 			continue;
670 
671 		name = btf__name_by_offset(btf, t->name_off);
672 		if (!name)
673 			continue;
674 
675 		for (j = 0; j < ARRAY_SIZE(long_aliases); j++) {
676 			if (strcmp(name, long_aliases[j]) == 0)
677 				return t->size;
678 		}
679 	}
680 
681 	return -1;
682 }
683 
684 static size_t btf_ptr_sz(const struct btf *btf)
685 {
686 	if (!btf->ptr_sz)
687 		((struct btf *)btf)->ptr_sz = determine_ptr_size(btf);
688 	return btf->ptr_sz < 0 ? sizeof(void *) : btf->ptr_sz;
689 }
690 
691 /* Return pointer size this BTF instance assumes. The size is heuristically
692  * determined by looking for 'long' or 'unsigned long' integer type and
693  * recording its size in bytes. If BTF type information doesn't have any such
694  * type, this function returns 0. In the latter case, native architecture's
695  * pointer size is assumed, so will be either 4 or 8, depending on
696  * architecture that libbpf was compiled for. It's possible to override
697  * guessed value by using btf__set_pointer_size() API.
698  */
699 size_t btf__pointer_size(const struct btf *btf)
700 {
701 	if (!btf->ptr_sz)
702 		((struct btf *)btf)->ptr_sz = determine_ptr_size(btf);
703 
704 	if (btf->ptr_sz < 0)
705 		/* not enough BTF type info to guess */
706 		return 0;
707 
708 	return btf->ptr_sz;
709 }
710 
711 /* Override or set pointer size in bytes. Only values of 4 and 8 are
712  * supported.
713  */
714 int btf__set_pointer_size(struct btf *btf, size_t ptr_sz)
715 {
716 	if (ptr_sz != 4 && ptr_sz != 8)
717 		return libbpf_err(-EINVAL);
718 	btf->ptr_sz = ptr_sz;
719 	return 0;
720 }
721 
722 static bool is_host_big_endian(void)
723 {
724 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
725 	return false;
726 #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
727 	return true;
728 #else
729 # error "Unrecognized __BYTE_ORDER__"
730 #endif
731 }
732 
733 enum btf_endianness btf__endianness(const struct btf *btf)
734 {
735 	if (is_host_big_endian())
736 		return btf->swapped_endian ? BTF_LITTLE_ENDIAN : BTF_BIG_ENDIAN;
737 	else
738 		return btf->swapped_endian ? BTF_BIG_ENDIAN : BTF_LITTLE_ENDIAN;
739 }
740 
741 int btf__set_endianness(struct btf *btf, enum btf_endianness endian)
742 {
743 	if (endian != BTF_LITTLE_ENDIAN && endian != BTF_BIG_ENDIAN)
744 		return libbpf_err(-EINVAL);
745 
746 	btf->swapped_endian = is_host_big_endian() != (endian == BTF_BIG_ENDIAN);
747 	if (!btf->swapped_endian) {
748 		free(btf->raw_data_swapped);
749 		btf->raw_data_swapped = NULL;
750 	}
751 	return 0;
752 }
753 
754 static bool btf_type_is_void(const struct btf_type *t)
755 {
756 	return t == &btf_void || btf_is_fwd(t);
757 }
758 
759 static bool btf_type_is_void_or_null(const struct btf_type *t)
760 {
761 	return !t || btf_type_is_void(t);
762 }
763 
764 #define MAX_RESOLVE_DEPTH 32
765 
766 __s64 btf__resolve_size(const struct btf *btf, __u32 type_id)
767 {
768 	const struct btf_array *array;
769 	const struct btf_type *t;
770 	__u32 nelems = 1;
771 	__s64 size = -1;
772 	int i;
773 
774 	t = btf__type_by_id(btf, type_id);
775 	for (i = 0; i < MAX_RESOLVE_DEPTH && !btf_type_is_void_or_null(t); i++) {
776 		switch (btf_kind(t)) {
777 		case BTF_KIND_INT:
778 		case BTF_KIND_STRUCT:
779 		case BTF_KIND_UNION:
780 		case BTF_KIND_ENUM:
781 		case BTF_KIND_ENUM64:
782 		case BTF_KIND_DATASEC:
783 		case BTF_KIND_FLOAT:
784 			size = t->size;
785 			goto done;
786 		case BTF_KIND_PTR:
787 			size = btf_ptr_sz(btf);
788 			goto done;
789 		case BTF_KIND_TYPEDEF:
790 		case BTF_KIND_VOLATILE:
791 		case BTF_KIND_CONST:
792 		case BTF_KIND_RESTRICT:
793 		case BTF_KIND_VAR:
794 		case BTF_KIND_DECL_TAG:
795 		case BTF_KIND_TYPE_TAG:
796 			type_id = t->type;
797 			break;
798 		case BTF_KIND_ARRAY:
799 			array = btf_array(t);
800 			if (nelems && array->nelems > UINT32_MAX / nelems)
801 				return libbpf_err(-E2BIG);
802 			nelems *= array->nelems;
803 			type_id = array->type;
804 			break;
805 		default:
806 			return libbpf_err(-EINVAL);
807 		}
808 
809 		t = btf__type_by_id(btf, type_id);
810 	}
811 
812 done:
813 	if (size < 0)
814 		return libbpf_err(-EINVAL);
815 	if (nelems && size > UINT32_MAX / nelems)
816 		return libbpf_err(-E2BIG);
817 
818 	return nelems * size;
819 }
820 
821 int btf__align_of(const struct btf *btf, __u32 id)
822 {
823 	const struct btf_type *t = btf__type_by_id(btf, id);
824 	__u16 kind = btf_kind(t);
825 
826 	switch (kind) {
827 	case BTF_KIND_INT:
828 	case BTF_KIND_ENUM:
829 	case BTF_KIND_ENUM64:
830 	case BTF_KIND_FLOAT:
831 		return min(btf_ptr_sz(btf), (size_t)t->size);
832 	case BTF_KIND_PTR:
833 		return btf_ptr_sz(btf);
834 	case BTF_KIND_TYPEDEF:
835 	case BTF_KIND_VOLATILE:
836 	case BTF_KIND_CONST:
837 	case BTF_KIND_RESTRICT:
838 	case BTF_KIND_TYPE_TAG:
839 		return btf__align_of(btf, t->type);
840 	case BTF_KIND_ARRAY:
841 		return btf__align_of(btf, btf_array(t)->type);
842 	case BTF_KIND_STRUCT:
843 	case BTF_KIND_UNION: {
844 		const struct btf_member *m = btf_members(t);
845 		__u16 vlen = btf_vlen(t);
846 		int i, max_align = 1, align;
847 
848 		for (i = 0; i < vlen; i++, m++) {
849 			align = btf__align_of(btf, m->type);
850 			if (align <= 0)
851 				return libbpf_err(align);
852 			max_align = max(max_align, align);
853 
854 			/* if field offset isn't aligned according to field
855 			 * type's alignment, then struct must be packed
856 			 */
857 			if (btf_member_bitfield_size(t, i) == 0 &&
858 			    (m->offset % (8 * align)) != 0)
859 				return 1;
860 		}
861 
862 		/* if struct/union size isn't a multiple of its alignment,
863 		 * then struct must be packed
864 		 */
865 		if ((t->size % max_align) != 0)
866 			return 1;
867 
868 		return max_align;
869 	}
870 	default:
871 		pr_warn("unsupported BTF_KIND:%u\n", btf_kind(t));
872 		return errno = EINVAL, 0;
873 	}
874 }
875 
876 int btf__resolve_type(const struct btf *btf, __u32 type_id)
877 {
878 	const struct btf_type *t;
879 	int depth = 0;
880 
881 	t = btf__type_by_id(btf, type_id);
882 	while (depth < MAX_RESOLVE_DEPTH &&
883 	       !btf_type_is_void_or_null(t) &&
884 	       (btf_is_mod(t) || btf_is_typedef(t) || btf_is_var(t))) {
885 		type_id = t->type;
886 		t = btf__type_by_id(btf, type_id);
887 		depth++;
888 	}
889 
890 	if (depth == MAX_RESOLVE_DEPTH || btf_type_is_void_or_null(t))
891 		return libbpf_err(-EINVAL);
892 
893 	return type_id;
894 }
895 
896 __s32 btf__find_by_name(const struct btf *btf, const char *type_name)
897 {
898 	__u32 i, nr_types = btf__type_cnt(btf);
899 
900 	if (!strcmp(type_name, "void"))
901 		return 0;
902 
903 	for (i = 1; i < nr_types; i++) {
904 		const struct btf_type *t = btf__type_by_id(btf, i);
905 		const char *name = btf__name_by_offset(btf, t->name_off);
906 
907 		if (name && !strcmp(type_name, name))
908 			return i;
909 	}
910 
911 	return libbpf_err(-ENOENT);
912 }
913 
914 static __s32 btf_find_by_name_kind(const struct btf *btf, int start_id,
915 				   const char *type_name, __u32 kind)
916 {
917 	__u32 i, nr_types = btf__type_cnt(btf);
918 
919 	if (kind == BTF_KIND_UNKN || !strcmp(type_name, "void"))
920 		return 0;
921 
922 	for (i = start_id; i < nr_types; i++) {
923 		const struct btf_type *t = btf__type_by_id(btf, i);
924 		const char *name;
925 
926 		if (btf_kind(t) != kind)
927 			continue;
928 		name = btf__name_by_offset(btf, t->name_off);
929 		if (name && !strcmp(type_name, name))
930 			return i;
931 	}
932 
933 	return libbpf_err(-ENOENT);
934 }
935 
936 __s32 btf__find_by_name_kind_own(const struct btf *btf, const char *type_name,
937 				 __u32 kind)
938 {
939 	return btf_find_by_name_kind(btf, btf->start_id, type_name, kind);
940 }
941 
942 __s32 btf__find_by_name_kind(const struct btf *btf, const char *type_name,
943 			     __u32 kind)
944 {
945 	return btf_find_by_name_kind(btf, 1, type_name, kind);
946 }
947 
948 static bool btf_is_modifiable(const struct btf *btf)
949 {
950 	return (void *)btf->hdr != btf->raw_data;
951 }
952 
953 void btf__free(struct btf *btf)
954 {
955 	if (IS_ERR_OR_NULL(btf))
956 		return;
957 
958 	if (btf->fd >= 0)
959 		close(btf->fd);
960 
961 	if (btf_is_modifiable(btf)) {
962 		/* if BTF was modified after loading, it will have a split
963 		 * in-memory representation for header, types, and strings
964 		 * sections, so we need to free all of them individually. It
965 		 * might still have a cached contiguous raw data present,
966 		 * which will be unconditionally freed below.
967 		 */
968 		free(btf->hdr);
969 		free(btf->types_data);
970 		strset__free(btf->strs_set);
971 	}
972 	free(btf->raw_data);
973 	free(btf->raw_data_swapped);
974 	free(btf->type_offs);
975 	if (btf->owns_base)
976 		btf__free(btf->base_btf);
977 	free(btf);
978 }
979 
980 static struct btf *btf_new_empty(struct btf *base_btf)
981 {
982 	struct btf *btf;
983 
984 	btf = calloc(1, sizeof(*btf));
985 	if (!btf)
986 		return ERR_PTR(-ENOMEM);
987 
988 	btf->nr_types = 0;
989 	btf->start_id = 1;
990 	btf->start_str_off = 0;
991 	btf->fd = -1;
992 	btf->ptr_sz = sizeof(void *);
993 	btf->swapped_endian = false;
994 
995 	if (base_btf) {
996 		btf->base_btf = base_btf;
997 		btf->start_id = btf__type_cnt(base_btf);
998 		btf->start_str_off = base_btf->hdr->str_len;
999 		btf->swapped_endian = base_btf->swapped_endian;
1000 	}
1001 
1002 	/* +1 for empty string at offset 0 */
1003 	btf->raw_size = sizeof(struct btf_header) + (base_btf ? 0 : 1);
1004 	btf->raw_data = calloc(1, btf->raw_size);
1005 	if (!btf->raw_data) {
1006 		free(btf);
1007 		return ERR_PTR(-ENOMEM);
1008 	}
1009 
1010 	btf->hdr = btf->raw_data;
1011 	btf->hdr->hdr_len = sizeof(struct btf_header);
1012 	btf->hdr->magic = BTF_MAGIC;
1013 	btf->hdr->version = BTF_VERSION;
1014 
1015 	btf->types_data = btf->raw_data + btf->hdr->hdr_len;
1016 	btf->strs_data = btf->raw_data + btf->hdr->hdr_len;
1017 	btf->hdr->str_len = base_btf ? 0 : 1; /* empty string at offset 0 */
1018 
1019 	return btf;
1020 }
1021 
1022 struct btf *btf__new_empty(void)
1023 {
1024 	return libbpf_ptr(btf_new_empty(NULL));
1025 }
1026 
1027 struct btf *btf__new_empty_split(struct btf *base_btf)
1028 {
1029 	return libbpf_ptr(btf_new_empty(base_btf));
1030 }
1031 
1032 static struct btf *btf_new(const void *data, __u32 size, struct btf *base_btf)
1033 {
1034 	struct btf *btf;
1035 	int err;
1036 
1037 	btf = calloc(1, sizeof(struct btf));
1038 	if (!btf)
1039 		return ERR_PTR(-ENOMEM);
1040 
1041 	btf->nr_types = 0;
1042 	btf->start_id = 1;
1043 	btf->start_str_off = 0;
1044 	btf->fd = -1;
1045 
1046 	if (base_btf) {
1047 		btf->base_btf = base_btf;
1048 		btf->start_id = btf__type_cnt(base_btf);
1049 		btf->start_str_off = base_btf->hdr->str_len;
1050 	}
1051 
1052 	btf->raw_data = malloc(size);
1053 	if (!btf->raw_data) {
1054 		err = -ENOMEM;
1055 		goto done;
1056 	}
1057 	memcpy(btf->raw_data, data, size);
1058 	btf->raw_size = size;
1059 
1060 	btf->hdr = btf->raw_data;
1061 	err = btf_parse_hdr(btf);
1062 	if (err)
1063 		goto done;
1064 
1065 	btf->strs_data = btf->raw_data + btf->hdr->hdr_len + btf->hdr->str_off;
1066 	btf->types_data = btf->raw_data + btf->hdr->hdr_len + btf->hdr->type_off;
1067 
1068 	err = btf_parse_str_sec(btf);
1069 	err = err ?: btf_parse_type_sec(btf);
1070 	err = err ?: btf_sanity_check(btf);
1071 	if (err)
1072 		goto done;
1073 
1074 done:
1075 	if (err) {
1076 		btf__free(btf);
1077 		return ERR_PTR(err);
1078 	}
1079 
1080 	return btf;
1081 }
1082 
1083 struct btf *btf__new(const void *data, __u32 size)
1084 {
1085 	return libbpf_ptr(btf_new(data, size, NULL));
1086 }
1087 
1088 struct btf *btf__new_split(const void *data, __u32 size, struct btf *base_btf)
1089 {
1090 	return libbpf_ptr(btf_new(data, size, base_btf));
1091 }
1092 
1093 struct btf_elf_secs {
1094 	Elf_Data *btf_data;
1095 	Elf_Data *btf_ext_data;
1096 	Elf_Data *btf_base_data;
1097 };
1098 
1099 static int btf_find_elf_sections(Elf *elf, const char *path, struct btf_elf_secs *secs)
1100 {
1101 	Elf_Scn *scn = NULL;
1102 	Elf_Data *data;
1103 	GElf_Ehdr ehdr;
1104 	size_t shstrndx;
1105 	int idx = 0;
1106 
1107 	if (!gelf_getehdr(elf, &ehdr)) {
1108 		pr_warn("failed to get EHDR from %s\n", path);
1109 		goto err;
1110 	}
1111 
1112 	if (elf_getshdrstrndx(elf, &shstrndx)) {
1113 		pr_warn("failed to get section names section index for %s\n",
1114 			path);
1115 		goto err;
1116 	}
1117 
1118 	if (!elf_rawdata(elf_getscn(elf, shstrndx), NULL)) {
1119 		pr_warn("failed to get e_shstrndx from %s\n", path);
1120 		goto err;
1121 	}
1122 
1123 	while ((scn = elf_nextscn(elf, scn)) != NULL) {
1124 		Elf_Data **field;
1125 		GElf_Shdr sh;
1126 		char *name;
1127 
1128 		idx++;
1129 		if (gelf_getshdr(scn, &sh) != &sh) {
1130 			pr_warn("failed to get section(%d) header from %s\n",
1131 				idx, path);
1132 			goto err;
1133 		}
1134 		name = elf_strptr(elf, shstrndx, sh.sh_name);
1135 		if (!name) {
1136 			pr_warn("failed to get section(%d) name from %s\n",
1137 				idx, path);
1138 			goto err;
1139 		}
1140 
1141 		if (strcmp(name, BTF_ELF_SEC) == 0)
1142 			field = &secs->btf_data;
1143 		else if (strcmp(name, BTF_EXT_ELF_SEC) == 0)
1144 			field = &secs->btf_ext_data;
1145 		else if (strcmp(name, BTF_BASE_ELF_SEC) == 0)
1146 			field = &secs->btf_base_data;
1147 		else
1148 			continue;
1149 
1150 		data = elf_getdata(scn, 0);
1151 		if (!data) {
1152 			pr_warn("failed to get section(%d, %s) data from %s\n",
1153 				idx, name, path);
1154 			goto err;
1155 		}
1156 		*field = data;
1157 	}
1158 
1159 	return 0;
1160 
1161 err:
1162 	return -LIBBPF_ERRNO__FORMAT;
1163 }
1164 
1165 static struct btf *btf_parse_elf(const char *path, struct btf *base_btf,
1166 				 struct btf_ext **btf_ext)
1167 {
1168 	struct btf_elf_secs secs = {};
1169 	struct btf *dist_base_btf = NULL;
1170 	struct btf *btf = NULL;
1171 	int err = 0, fd = -1;
1172 	Elf *elf = NULL;
1173 
1174 	if (elf_version(EV_CURRENT) == EV_NONE) {
1175 		pr_warn("failed to init libelf for %s\n", path);
1176 		return ERR_PTR(-LIBBPF_ERRNO__LIBELF);
1177 	}
1178 
1179 	fd = open(path, O_RDONLY | O_CLOEXEC);
1180 	if (fd < 0) {
1181 		err = -errno;
1182 		pr_warn("failed to open %s: %s\n", path, strerror(errno));
1183 		return ERR_PTR(err);
1184 	}
1185 
1186 	elf = elf_begin(fd, ELF_C_READ, NULL);
1187 	if (!elf) {
1188 		pr_warn("failed to open %s as ELF file\n", path);
1189 		goto done;
1190 	}
1191 
1192 	err = btf_find_elf_sections(elf, path, &secs);
1193 	if (err)
1194 		goto done;
1195 
1196 	if (!secs.btf_data) {
1197 		pr_warn("failed to find '%s' ELF section in %s\n", BTF_ELF_SEC, path);
1198 		err = -ENODATA;
1199 		goto done;
1200 	}
1201 
1202 	if (secs.btf_base_data) {
1203 		dist_base_btf = btf_new(secs.btf_base_data->d_buf, secs.btf_base_data->d_size,
1204 					NULL);
1205 		if (IS_ERR(dist_base_btf)) {
1206 			err = PTR_ERR(dist_base_btf);
1207 			dist_base_btf = NULL;
1208 			goto done;
1209 		}
1210 	}
1211 
1212 	btf = btf_new(secs.btf_data->d_buf, secs.btf_data->d_size,
1213 		      dist_base_btf ?: base_btf);
1214 	if (IS_ERR(btf)) {
1215 		err = PTR_ERR(btf);
1216 		goto done;
1217 	}
1218 	if (dist_base_btf && base_btf) {
1219 		err = btf__relocate(btf, base_btf);
1220 		if (err)
1221 			goto done;
1222 		btf__free(dist_base_btf);
1223 		dist_base_btf = NULL;
1224 	}
1225 
1226 	if (dist_base_btf)
1227 		btf->owns_base = true;
1228 
1229 	switch (gelf_getclass(elf)) {
1230 	case ELFCLASS32:
1231 		btf__set_pointer_size(btf, 4);
1232 		break;
1233 	case ELFCLASS64:
1234 		btf__set_pointer_size(btf, 8);
1235 		break;
1236 	default:
1237 		pr_warn("failed to get ELF class (bitness) for %s\n", path);
1238 		break;
1239 	}
1240 
1241 	if (btf_ext && secs.btf_ext_data) {
1242 		*btf_ext = btf_ext__new(secs.btf_ext_data->d_buf, secs.btf_ext_data->d_size);
1243 		if (IS_ERR(*btf_ext)) {
1244 			err = PTR_ERR(*btf_ext);
1245 			goto done;
1246 		}
1247 	} else if (btf_ext) {
1248 		*btf_ext = NULL;
1249 	}
1250 done:
1251 	if (elf)
1252 		elf_end(elf);
1253 	close(fd);
1254 
1255 	if (!err)
1256 		return btf;
1257 
1258 	if (btf_ext)
1259 		btf_ext__free(*btf_ext);
1260 	btf__free(dist_base_btf);
1261 	btf__free(btf);
1262 
1263 	return ERR_PTR(err);
1264 }
1265 
1266 struct btf *btf__parse_elf(const char *path, struct btf_ext **btf_ext)
1267 {
1268 	return libbpf_ptr(btf_parse_elf(path, NULL, btf_ext));
1269 }
1270 
1271 struct btf *btf__parse_elf_split(const char *path, struct btf *base_btf)
1272 {
1273 	return libbpf_ptr(btf_parse_elf(path, base_btf, NULL));
1274 }
1275 
1276 static struct btf *btf_parse_raw(const char *path, struct btf *base_btf)
1277 {
1278 	struct btf *btf = NULL;
1279 	void *data = NULL;
1280 	FILE *f = NULL;
1281 	__u16 magic;
1282 	int err = 0;
1283 	long sz;
1284 
1285 	f = fopen(path, "rbe");
1286 	if (!f) {
1287 		err = -errno;
1288 		goto err_out;
1289 	}
1290 
1291 	/* check BTF magic */
1292 	if (fread(&magic, 1, sizeof(magic), f) < sizeof(magic)) {
1293 		err = -EIO;
1294 		goto err_out;
1295 	}
1296 	if (magic != BTF_MAGIC && magic != bswap_16(BTF_MAGIC)) {
1297 		/* definitely not a raw BTF */
1298 		err = -EPROTO;
1299 		goto err_out;
1300 	}
1301 
1302 	/* get file size */
1303 	if (fseek(f, 0, SEEK_END)) {
1304 		err = -errno;
1305 		goto err_out;
1306 	}
1307 	sz = ftell(f);
1308 	if (sz < 0) {
1309 		err = -errno;
1310 		goto err_out;
1311 	}
1312 	/* rewind to the start */
1313 	if (fseek(f, 0, SEEK_SET)) {
1314 		err = -errno;
1315 		goto err_out;
1316 	}
1317 
1318 	/* pre-alloc memory and read all of BTF data */
1319 	data = malloc(sz);
1320 	if (!data) {
1321 		err = -ENOMEM;
1322 		goto err_out;
1323 	}
1324 	if (fread(data, 1, sz, f) < sz) {
1325 		err = -EIO;
1326 		goto err_out;
1327 	}
1328 
1329 	/* finally parse BTF data */
1330 	btf = btf_new(data, sz, base_btf);
1331 
1332 err_out:
1333 	free(data);
1334 	if (f)
1335 		fclose(f);
1336 	return err ? ERR_PTR(err) : btf;
1337 }
1338 
1339 struct btf *btf__parse_raw(const char *path)
1340 {
1341 	return libbpf_ptr(btf_parse_raw(path, NULL));
1342 }
1343 
1344 struct btf *btf__parse_raw_split(const char *path, struct btf *base_btf)
1345 {
1346 	return libbpf_ptr(btf_parse_raw(path, base_btf));
1347 }
1348 
1349 static struct btf *btf_parse(const char *path, struct btf *base_btf, struct btf_ext **btf_ext)
1350 {
1351 	struct btf *btf;
1352 	int err;
1353 
1354 	if (btf_ext)
1355 		*btf_ext = NULL;
1356 
1357 	btf = btf_parse_raw(path, base_btf);
1358 	err = libbpf_get_error(btf);
1359 	if (!err)
1360 		return btf;
1361 	if (err != -EPROTO)
1362 		return ERR_PTR(err);
1363 	return btf_parse_elf(path, base_btf, btf_ext);
1364 }
1365 
1366 struct btf *btf__parse(const char *path, struct btf_ext **btf_ext)
1367 {
1368 	return libbpf_ptr(btf_parse(path, NULL, btf_ext));
1369 }
1370 
1371 struct btf *btf__parse_split(const char *path, struct btf *base_btf)
1372 {
1373 	return libbpf_ptr(btf_parse(path, base_btf, NULL));
1374 }
1375 
1376 static void *btf_get_raw_data(const struct btf *btf, __u32 *size, bool swap_endian);
1377 
1378 int btf_load_into_kernel(struct btf *btf,
1379 			 char *log_buf, size_t log_sz, __u32 log_level,
1380 			 int token_fd)
1381 {
1382 	LIBBPF_OPTS(bpf_btf_load_opts, opts);
1383 	__u32 buf_sz = 0, raw_size;
1384 	char *buf = NULL, *tmp;
1385 	void *raw_data;
1386 	int err = 0;
1387 
1388 	if (btf->fd >= 0)
1389 		return libbpf_err(-EEXIST);
1390 	if (log_sz && !log_buf)
1391 		return libbpf_err(-EINVAL);
1392 
1393 	/* cache native raw data representation */
1394 	raw_data = btf_get_raw_data(btf, &raw_size, false);
1395 	if (!raw_data) {
1396 		err = -ENOMEM;
1397 		goto done;
1398 	}
1399 	btf->raw_size = raw_size;
1400 	btf->raw_data = raw_data;
1401 
1402 retry_load:
1403 	/* if log_level is 0, we won't provide log_buf/log_size to the kernel,
1404 	 * initially. Only if BTF loading fails, we bump log_level to 1 and
1405 	 * retry, using either auto-allocated or custom log_buf. This way
1406 	 * non-NULL custom log_buf provides a buffer just in case, but hopes
1407 	 * for successful load and no need for log_buf.
1408 	 */
1409 	if (log_level) {
1410 		/* if caller didn't provide custom log_buf, we'll keep
1411 		 * allocating our own progressively bigger buffers for BTF
1412 		 * verification log
1413 		 */
1414 		if (!log_buf) {
1415 			buf_sz = max((__u32)BPF_LOG_BUF_SIZE, buf_sz * 2);
1416 			tmp = realloc(buf, buf_sz);
1417 			if (!tmp) {
1418 				err = -ENOMEM;
1419 				goto done;
1420 			}
1421 			buf = tmp;
1422 			buf[0] = '\0';
1423 		}
1424 
1425 		opts.log_buf = log_buf ? log_buf : buf;
1426 		opts.log_size = log_buf ? log_sz : buf_sz;
1427 		opts.log_level = log_level;
1428 	}
1429 
1430 	opts.token_fd = token_fd;
1431 	if (token_fd)
1432 		opts.btf_flags |= BPF_F_TOKEN_FD;
1433 
1434 	btf->fd = bpf_btf_load(raw_data, raw_size, &opts);
1435 	if (btf->fd < 0) {
1436 		/* time to turn on verbose mode and try again */
1437 		if (log_level == 0) {
1438 			log_level = 1;
1439 			goto retry_load;
1440 		}
1441 		/* only retry if caller didn't provide custom log_buf, but
1442 		 * make sure we can never overflow buf_sz
1443 		 */
1444 		if (!log_buf && errno == ENOSPC && buf_sz <= UINT_MAX / 2)
1445 			goto retry_load;
1446 
1447 		err = -errno;
1448 		pr_warn("BTF loading error: %d\n", err);
1449 		/* don't print out contents of custom log_buf */
1450 		if (!log_buf && buf[0])
1451 			pr_warn("-- BEGIN BTF LOAD LOG ---\n%s\n-- END BTF LOAD LOG --\n", buf);
1452 	}
1453 
1454 done:
1455 	free(buf);
1456 	return libbpf_err(err);
1457 }
1458 
1459 int btf__load_into_kernel(struct btf *btf)
1460 {
1461 	return btf_load_into_kernel(btf, NULL, 0, 0, 0);
1462 }
1463 
1464 int btf__fd(const struct btf *btf)
1465 {
1466 	return btf->fd;
1467 }
1468 
1469 void btf__set_fd(struct btf *btf, int fd)
1470 {
1471 	btf->fd = fd;
1472 }
1473 
1474 static const void *btf_strs_data(const struct btf *btf)
1475 {
1476 	return btf->strs_data ? btf->strs_data : strset__data(btf->strs_set);
1477 }
1478 
1479 static void *btf_get_raw_data(const struct btf *btf, __u32 *size, bool swap_endian)
1480 {
1481 	struct btf_header *hdr = btf->hdr;
1482 	struct btf_type *t;
1483 	void *data, *p;
1484 	__u32 data_sz;
1485 	int i;
1486 
1487 	data = swap_endian ? btf->raw_data_swapped : btf->raw_data;
1488 	if (data) {
1489 		*size = btf->raw_size;
1490 		return data;
1491 	}
1492 
1493 	data_sz = hdr->hdr_len + hdr->type_len + hdr->str_len;
1494 	data = calloc(1, data_sz);
1495 	if (!data)
1496 		return NULL;
1497 	p = data;
1498 
1499 	memcpy(p, hdr, hdr->hdr_len);
1500 	if (swap_endian)
1501 		btf_bswap_hdr(p);
1502 	p += hdr->hdr_len;
1503 
1504 	memcpy(p, btf->types_data, hdr->type_len);
1505 	if (swap_endian) {
1506 		for (i = 0; i < btf->nr_types; i++) {
1507 			t = p + btf->type_offs[i];
1508 			/* btf_bswap_type_rest() relies on native t->info, so
1509 			 * we swap base type info after we swapped all the
1510 			 * additional information
1511 			 */
1512 			if (btf_bswap_type_rest(t))
1513 				goto err_out;
1514 			btf_bswap_type_base(t);
1515 		}
1516 	}
1517 	p += hdr->type_len;
1518 
1519 	memcpy(p, btf_strs_data(btf), hdr->str_len);
1520 	p += hdr->str_len;
1521 
1522 	*size = data_sz;
1523 	return data;
1524 err_out:
1525 	free(data);
1526 	return NULL;
1527 }
1528 
1529 const void *btf__raw_data(const struct btf *btf_ro, __u32 *size)
1530 {
1531 	struct btf *btf = (struct btf *)btf_ro;
1532 	__u32 data_sz;
1533 	void *data;
1534 
1535 	data = btf_get_raw_data(btf, &data_sz, btf->swapped_endian);
1536 	if (!data)
1537 		return errno = ENOMEM, NULL;
1538 
1539 	btf->raw_size = data_sz;
1540 	if (btf->swapped_endian)
1541 		btf->raw_data_swapped = data;
1542 	else
1543 		btf->raw_data = data;
1544 	*size = data_sz;
1545 	return data;
1546 }
1547 
1548 __attribute__((alias("btf__raw_data")))
1549 const void *btf__get_raw_data(const struct btf *btf, __u32 *size);
1550 
1551 const char *btf__str_by_offset(const struct btf *btf, __u32 offset)
1552 {
1553 	if (offset < btf->start_str_off)
1554 		return btf__str_by_offset(btf->base_btf, offset);
1555 	else if (offset - btf->start_str_off < btf->hdr->str_len)
1556 		return btf_strs_data(btf) + (offset - btf->start_str_off);
1557 	else
1558 		return errno = EINVAL, NULL;
1559 }
1560 
1561 const char *btf__name_by_offset(const struct btf *btf, __u32 offset)
1562 {
1563 	return btf__str_by_offset(btf, offset);
1564 }
1565 
1566 struct btf *btf_get_from_fd(int btf_fd, struct btf *base_btf)
1567 {
1568 	struct bpf_btf_info btf_info;
1569 	__u32 len = sizeof(btf_info);
1570 	__u32 last_size;
1571 	struct btf *btf;
1572 	void *ptr;
1573 	int err;
1574 
1575 	/* we won't know btf_size until we call bpf_btf_get_info_by_fd(). so
1576 	 * let's start with a sane default - 4KiB here - and resize it only if
1577 	 * bpf_btf_get_info_by_fd() needs a bigger buffer.
1578 	 */
1579 	last_size = 4096;
1580 	ptr = malloc(last_size);
1581 	if (!ptr)
1582 		return ERR_PTR(-ENOMEM);
1583 
1584 	memset(&btf_info, 0, sizeof(btf_info));
1585 	btf_info.btf = ptr_to_u64(ptr);
1586 	btf_info.btf_size = last_size;
1587 	err = bpf_btf_get_info_by_fd(btf_fd, &btf_info, &len);
1588 
1589 	if (!err && btf_info.btf_size > last_size) {
1590 		void *temp_ptr;
1591 
1592 		last_size = btf_info.btf_size;
1593 		temp_ptr = realloc(ptr, last_size);
1594 		if (!temp_ptr) {
1595 			btf = ERR_PTR(-ENOMEM);
1596 			goto exit_free;
1597 		}
1598 		ptr = temp_ptr;
1599 
1600 		len = sizeof(btf_info);
1601 		memset(&btf_info, 0, sizeof(btf_info));
1602 		btf_info.btf = ptr_to_u64(ptr);
1603 		btf_info.btf_size = last_size;
1604 
1605 		err = bpf_btf_get_info_by_fd(btf_fd, &btf_info, &len);
1606 	}
1607 
1608 	if (err || btf_info.btf_size > last_size) {
1609 		btf = err ? ERR_PTR(-errno) : ERR_PTR(-E2BIG);
1610 		goto exit_free;
1611 	}
1612 
1613 	btf = btf_new(ptr, btf_info.btf_size, base_btf);
1614 
1615 exit_free:
1616 	free(ptr);
1617 	return btf;
1618 }
1619 
1620 struct btf *btf__load_from_kernel_by_id_split(__u32 id, struct btf *base_btf)
1621 {
1622 	struct btf *btf;
1623 	int btf_fd;
1624 
1625 	btf_fd = bpf_btf_get_fd_by_id(id);
1626 	if (btf_fd < 0)
1627 		return libbpf_err_ptr(-errno);
1628 
1629 	btf = btf_get_from_fd(btf_fd, base_btf);
1630 	close(btf_fd);
1631 
1632 	return libbpf_ptr(btf);
1633 }
1634 
1635 struct btf *btf__load_from_kernel_by_id(__u32 id)
1636 {
1637 	return btf__load_from_kernel_by_id_split(id, NULL);
1638 }
1639 
1640 static void btf_invalidate_raw_data(struct btf *btf)
1641 {
1642 	if (btf->raw_data) {
1643 		free(btf->raw_data);
1644 		btf->raw_data = NULL;
1645 	}
1646 	if (btf->raw_data_swapped) {
1647 		free(btf->raw_data_swapped);
1648 		btf->raw_data_swapped = NULL;
1649 	}
1650 }
1651 
1652 /* Ensure BTF is ready to be modified (by splitting into a three memory
1653  * regions for header, types, and strings). Also invalidate cached
1654  * raw_data, if any.
1655  */
1656 static int btf_ensure_modifiable(struct btf *btf)
1657 {
1658 	void *hdr, *types;
1659 	struct strset *set = NULL;
1660 	int err = -ENOMEM;
1661 
1662 	if (btf_is_modifiable(btf)) {
1663 		/* any BTF modification invalidates raw_data */
1664 		btf_invalidate_raw_data(btf);
1665 		return 0;
1666 	}
1667 
1668 	/* split raw data into three memory regions */
1669 	hdr = malloc(btf->hdr->hdr_len);
1670 	types = malloc(btf->hdr->type_len);
1671 	if (!hdr || !types)
1672 		goto err_out;
1673 
1674 	memcpy(hdr, btf->hdr, btf->hdr->hdr_len);
1675 	memcpy(types, btf->types_data, btf->hdr->type_len);
1676 
1677 	/* build lookup index for all strings */
1678 	set = strset__new(BTF_MAX_STR_OFFSET, btf->strs_data, btf->hdr->str_len);
1679 	if (IS_ERR(set)) {
1680 		err = PTR_ERR(set);
1681 		goto err_out;
1682 	}
1683 
1684 	/* only when everything was successful, update internal state */
1685 	btf->hdr = hdr;
1686 	btf->types_data = types;
1687 	btf->types_data_cap = btf->hdr->type_len;
1688 	btf->strs_data = NULL;
1689 	btf->strs_set = set;
1690 	/* if BTF was created from scratch, all strings are guaranteed to be
1691 	 * unique and deduplicated
1692 	 */
1693 	if (btf->hdr->str_len == 0)
1694 		btf->strs_deduped = true;
1695 	if (!btf->base_btf && btf->hdr->str_len == 1)
1696 		btf->strs_deduped = true;
1697 
1698 	/* invalidate raw_data representation */
1699 	btf_invalidate_raw_data(btf);
1700 
1701 	return 0;
1702 
1703 err_out:
1704 	strset__free(set);
1705 	free(hdr);
1706 	free(types);
1707 	return err;
1708 }
1709 
1710 /* Find an offset in BTF string section that corresponds to a given string *s*.
1711  * Returns:
1712  *   - >0 offset into string section, if string is found;
1713  *   - -ENOENT, if string is not in the string section;
1714  *   - <0, on any other error.
1715  */
1716 int btf__find_str(struct btf *btf, const char *s)
1717 {
1718 	int off;
1719 
1720 	if (btf->base_btf) {
1721 		off = btf__find_str(btf->base_btf, s);
1722 		if (off != -ENOENT)
1723 			return off;
1724 	}
1725 
1726 	/* BTF needs to be in a modifiable state to build string lookup index */
1727 	if (btf_ensure_modifiable(btf))
1728 		return libbpf_err(-ENOMEM);
1729 
1730 	off = strset__find_str(btf->strs_set, s);
1731 	if (off < 0)
1732 		return libbpf_err(off);
1733 
1734 	return btf->start_str_off + off;
1735 }
1736 
1737 /* Add a string s to the BTF string section.
1738  * Returns:
1739  *   - > 0 offset into string section, on success;
1740  *   - < 0, on error.
1741  */
1742 int btf__add_str(struct btf *btf, const char *s)
1743 {
1744 	int off;
1745 
1746 	if (btf->base_btf) {
1747 		off = btf__find_str(btf->base_btf, s);
1748 		if (off != -ENOENT)
1749 			return off;
1750 	}
1751 
1752 	if (btf_ensure_modifiable(btf))
1753 		return libbpf_err(-ENOMEM);
1754 
1755 	off = strset__add_str(btf->strs_set, s);
1756 	if (off < 0)
1757 		return libbpf_err(off);
1758 
1759 	btf->hdr->str_len = strset__data_size(btf->strs_set);
1760 
1761 	return btf->start_str_off + off;
1762 }
1763 
1764 static void *btf_add_type_mem(struct btf *btf, size_t add_sz)
1765 {
1766 	return libbpf_add_mem(&btf->types_data, &btf->types_data_cap, 1,
1767 			      btf->hdr->type_len, UINT_MAX, add_sz);
1768 }
1769 
1770 static void btf_type_inc_vlen(struct btf_type *t)
1771 {
1772 	t->info = btf_type_info(btf_kind(t), btf_vlen(t) + 1, btf_kflag(t));
1773 }
1774 
1775 static int btf_commit_type(struct btf *btf, int data_sz)
1776 {
1777 	int err;
1778 
1779 	err = btf_add_type_idx_entry(btf, btf->hdr->type_len);
1780 	if (err)
1781 		return libbpf_err(err);
1782 
1783 	btf->hdr->type_len += data_sz;
1784 	btf->hdr->str_off += data_sz;
1785 	btf->nr_types++;
1786 	return btf->start_id + btf->nr_types - 1;
1787 }
1788 
1789 struct btf_pipe {
1790 	const struct btf *src;
1791 	struct btf *dst;
1792 	struct hashmap *str_off_map; /* map string offsets from src to dst */
1793 };
1794 
1795 static int btf_rewrite_str(struct btf_pipe *p, __u32 *str_off)
1796 {
1797 	long mapped_off;
1798 	int off, err;
1799 
1800 	if (!*str_off) /* nothing to do for empty strings */
1801 		return 0;
1802 
1803 	if (p->str_off_map &&
1804 	    hashmap__find(p->str_off_map, *str_off, &mapped_off)) {
1805 		*str_off = mapped_off;
1806 		return 0;
1807 	}
1808 
1809 	off = btf__add_str(p->dst, btf__str_by_offset(p->src, *str_off));
1810 	if (off < 0)
1811 		return off;
1812 
1813 	/* Remember string mapping from src to dst.  It avoids
1814 	 * performing expensive string comparisons.
1815 	 */
1816 	if (p->str_off_map) {
1817 		err = hashmap__append(p->str_off_map, *str_off, off);
1818 		if (err)
1819 			return err;
1820 	}
1821 
1822 	*str_off = off;
1823 	return 0;
1824 }
1825 
1826 static int btf_add_type(struct btf_pipe *p, const struct btf_type *src_type)
1827 {
1828 	struct btf_field_iter it;
1829 	struct btf_type *t;
1830 	__u32 *str_off;
1831 	int sz, err;
1832 
1833 	sz = btf_type_size(src_type);
1834 	if (sz < 0)
1835 		return libbpf_err(sz);
1836 
1837 	/* deconstruct BTF, if necessary, and invalidate raw_data */
1838 	if (btf_ensure_modifiable(p->dst))
1839 		return libbpf_err(-ENOMEM);
1840 
1841 	t = btf_add_type_mem(p->dst, sz);
1842 	if (!t)
1843 		return libbpf_err(-ENOMEM);
1844 
1845 	memcpy(t, src_type, sz);
1846 
1847 	err = btf_field_iter_init(&it, t, BTF_FIELD_ITER_STRS);
1848 	if (err)
1849 		return libbpf_err(err);
1850 
1851 	while ((str_off = btf_field_iter_next(&it))) {
1852 		err = btf_rewrite_str(p, str_off);
1853 		if (err)
1854 			return libbpf_err(err);
1855 	}
1856 
1857 	return btf_commit_type(p->dst, sz);
1858 }
1859 
1860 int btf__add_type(struct btf *btf, const struct btf *src_btf, const struct btf_type *src_type)
1861 {
1862 	struct btf_pipe p = { .src = src_btf, .dst = btf };
1863 
1864 	return btf_add_type(&p, src_type);
1865 }
1866 
1867 static size_t btf_dedup_identity_hash_fn(long key, void *ctx);
1868 static bool btf_dedup_equal_fn(long k1, long k2, void *ctx);
1869 
1870 int btf__add_btf(struct btf *btf, const struct btf *src_btf)
1871 {
1872 	struct btf_pipe p = { .src = src_btf, .dst = btf };
1873 	int data_sz, sz, cnt, i, err, old_strs_len;
1874 	__u32 *off;
1875 	void *t;
1876 
1877 	/* appending split BTF isn't supported yet */
1878 	if (src_btf->base_btf)
1879 		return libbpf_err(-ENOTSUP);
1880 
1881 	/* deconstruct BTF, if necessary, and invalidate raw_data */
1882 	if (btf_ensure_modifiable(btf))
1883 		return libbpf_err(-ENOMEM);
1884 
1885 	/* remember original strings section size if we have to roll back
1886 	 * partial strings section changes
1887 	 */
1888 	old_strs_len = btf->hdr->str_len;
1889 
1890 	data_sz = src_btf->hdr->type_len;
1891 	cnt = btf__type_cnt(src_btf) - 1;
1892 
1893 	/* pre-allocate enough memory for new types */
1894 	t = btf_add_type_mem(btf, data_sz);
1895 	if (!t)
1896 		return libbpf_err(-ENOMEM);
1897 
1898 	/* pre-allocate enough memory for type offset index for new types */
1899 	off = btf_add_type_offs_mem(btf, cnt);
1900 	if (!off)
1901 		return libbpf_err(-ENOMEM);
1902 
1903 	/* Map the string offsets from src_btf to the offsets from btf to improve performance */
1904 	p.str_off_map = hashmap__new(btf_dedup_identity_hash_fn, btf_dedup_equal_fn, NULL);
1905 	if (IS_ERR(p.str_off_map))
1906 		return libbpf_err(-ENOMEM);
1907 
1908 	/* bulk copy types data for all types from src_btf */
1909 	memcpy(t, src_btf->types_data, data_sz);
1910 
1911 	for (i = 0; i < cnt; i++) {
1912 		struct btf_field_iter it;
1913 		__u32 *type_id, *str_off;
1914 
1915 		sz = btf_type_size(t);
1916 		if (sz < 0) {
1917 			/* unlikely, has to be corrupted src_btf */
1918 			err = sz;
1919 			goto err_out;
1920 		}
1921 
1922 		/* fill out type ID to type offset mapping for lookups by type ID */
1923 		*off = t - btf->types_data;
1924 
1925 		/* add, dedup, and remap strings referenced by this BTF type */
1926 		err = btf_field_iter_init(&it, t, BTF_FIELD_ITER_STRS);
1927 		if (err)
1928 			goto err_out;
1929 		while ((str_off = btf_field_iter_next(&it))) {
1930 			err = btf_rewrite_str(&p, str_off);
1931 			if (err)
1932 				goto err_out;
1933 		}
1934 
1935 		/* remap all type IDs referenced from this BTF type */
1936 		err = btf_field_iter_init(&it, t, BTF_FIELD_ITER_IDS);
1937 		if (err)
1938 			goto err_out;
1939 
1940 		while ((type_id = btf_field_iter_next(&it))) {
1941 			if (!*type_id) /* nothing to do for VOID references */
1942 				continue;
1943 
1944 			/* we haven't updated btf's type count yet, so
1945 			 * btf->start_id + btf->nr_types - 1 is the type ID offset we should
1946 			 * add to all newly added BTF types
1947 			 */
1948 			*type_id += btf->start_id + btf->nr_types - 1;
1949 		}
1950 
1951 		/* go to next type data and type offset index entry */
1952 		t += sz;
1953 		off++;
1954 	}
1955 
1956 	/* Up until now any of the copied type data was effectively invisible,
1957 	 * so if we exited early before this point due to error, BTF would be
1958 	 * effectively unmodified. There would be extra internal memory
1959 	 * pre-allocated, but it would not be available for querying.  But now
1960 	 * that we've copied and rewritten all the data successfully, we can
1961 	 * update type count and various internal offsets and sizes to
1962 	 * "commit" the changes and made them visible to the outside world.
1963 	 */
1964 	btf->hdr->type_len += data_sz;
1965 	btf->hdr->str_off += data_sz;
1966 	btf->nr_types += cnt;
1967 
1968 	hashmap__free(p.str_off_map);
1969 
1970 	/* return type ID of the first added BTF type */
1971 	return btf->start_id + btf->nr_types - cnt;
1972 err_out:
1973 	/* zero out preallocated memory as if it was just allocated with
1974 	 * libbpf_add_mem()
1975 	 */
1976 	memset(btf->types_data + btf->hdr->type_len, 0, data_sz);
1977 	memset(btf->strs_data + old_strs_len, 0, btf->hdr->str_len - old_strs_len);
1978 
1979 	/* and now restore original strings section size; types data size
1980 	 * wasn't modified, so doesn't need restoring, see big comment above
1981 	 */
1982 	btf->hdr->str_len = old_strs_len;
1983 
1984 	hashmap__free(p.str_off_map);
1985 
1986 	return libbpf_err(err);
1987 }
1988 
1989 /*
1990  * Append new BTF_KIND_INT type with:
1991  *   - *name* - non-empty, non-NULL type name;
1992  *   - *sz* - power-of-2 (1, 2, 4, ..) size of the type, in bytes;
1993  *   - encoding is a combination of BTF_INT_SIGNED, BTF_INT_CHAR, BTF_INT_BOOL.
1994  * Returns:
1995  *   - >0, type ID of newly added BTF type;
1996  *   - <0, on error.
1997  */
1998 int btf__add_int(struct btf *btf, const char *name, size_t byte_sz, int encoding)
1999 {
2000 	struct btf_type *t;
2001 	int sz, name_off;
2002 
2003 	/* non-empty name */
2004 	if (!name || !name[0])
2005 		return libbpf_err(-EINVAL);
2006 	/* byte_sz must be power of 2 */
2007 	if (!byte_sz || (byte_sz & (byte_sz - 1)) || byte_sz > 16)
2008 		return libbpf_err(-EINVAL);
2009 	if (encoding & ~(BTF_INT_SIGNED | BTF_INT_CHAR | BTF_INT_BOOL))
2010 		return libbpf_err(-EINVAL);
2011 
2012 	/* deconstruct BTF, if necessary, and invalidate raw_data */
2013 	if (btf_ensure_modifiable(btf))
2014 		return libbpf_err(-ENOMEM);
2015 
2016 	sz = sizeof(struct btf_type) + sizeof(int);
2017 	t = btf_add_type_mem(btf, sz);
2018 	if (!t)
2019 		return libbpf_err(-ENOMEM);
2020 
2021 	/* if something goes wrong later, we might end up with an extra string,
2022 	 * but that shouldn't be a problem, because BTF can't be constructed
2023 	 * completely anyway and will most probably be just discarded
2024 	 */
2025 	name_off = btf__add_str(btf, name);
2026 	if (name_off < 0)
2027 		return name_off;
2028 
2029 	t->name_off = name_off;
2030 	t->info = btf_type_info(BTF_KIND_INT, 0, 0);
2031 	t->size = byte_sz;
2032 	/* set INT info, we don't allow setting legacy bit offset/size */
2033 	*(__u32 *)(t + 1) = (encoding << 24) | (byte_sz * 8);
2034 
2035 	return btf_commit_type(btf, sz);
2036 }
2037 
2038 /*
2039  * Append new BTF_KIND_FLOAT type with:
2040  *   - *name* - non-empty, non-NULL type name;
2041  *   - *sz* - size of the type, in bytes;
2042  * Returns:
2043  *   - >0, type ID of newly added BTF type;
2044  *   - <0, on error.
2045  */
2046 int btf__add_float(struct btf *btf, const char *name, size_t byte_sz)
2047 {
2048 	struct btf_type *t;
2049 	int sz, name_off;
2050 
2051 	/* non-empty name */
2052 	if (!name || !name[0])
2053 		return libbpf_err(-EINVAL);
2054 
2055 	/* byte_sz must be one of the explicitly allowed values */
2056 	if (byte_sz != 2 && byte_sz != 4 && byte_sz != 8 && byte_sz != 12 &&
2057 	    byte_sz != 16)
2058 		return libbpf_err(-EINVAL);
2059 
2060 	if (btf_ensure_modifiable(btf))
2061 		return libbpf_err(-ENOMEM);
2062 
2063 	sz = sizeof(struct btf_type);
2064 	t = btf_add_type_mem(btf, sz);
2065 	if (!t)
2066 		return libbpf_err(-ENOMEM);
2067 
2068 	name_off = btf__add_str(btf, name);
2069 	if (name_off < 0)
2070 		return name_off;
2071 
2072 	t->name_off = name_off;
2073 	t->info = btf_type_info(BTF_KIND_FLOAT, 0, 0);
2074 	t->size = byte_sz;
2075 
2076 	return btf_commit_type(btf, sz);
2077 }
2078 
2079 /* it's completely legal to append BTF types with type IDs pointing forward to
2080  * types that haven't been appended yet, so we only make sure that id looks
2081  * sane, we can't guarantee that ID will always be valid
2082  */
2083 static int validate_type_id(int id)
2084 {
2085 	if (id < 0 || id > BTF_MAX_NR_TYPES)
2086 		return -EINVAL;
2087 	return 0;
2088 }
2089 
2090 /* generic append function for PTR, TYPEDEF, CONST/VOLATILE/RESTRICT */
2091 static int btf_add_ref_kind(struct btf *btf, int kind, const char *name, int ref_type_id)
2092 {
2093 	struct btf_type *t;
2094 	int sz, name_off = 0;
2095 
2096 	if (validate_type_id(ref_type_id))
2097 		return libbpf_err(-EINVAL);
2098 
2099 	if (btf_ensure_modifiable(btf))
2100 		return libbpf_err(-ENOMEM);
2101 
2102 	sz = sizeof(struct btf_type);
2103 	t = btf_add_type_mem(btf, sz);
2104 	if (!t)
2105 		return libbpf_err(-ENOMEM);
2106 
2107 	if (name && name[0]) {
2108 		name_off = btf__add_str(btf, name);
2109 		if (name_off < 0)
2110 			return name_off;
2111 	}
2112 
2113 	t->name_off = name_off;
2114 	t->info = btf_type_info(kind, 0, 0);
2115 	t->type = ref_type_id;
2116 
2117 	return btf_commit_type(btf, sz);
2118 }
2119 
2120 /*
2121  * Append new BTF_KIND_PTR type with:
2122  *   - *ref_type_id* - referenced type ID, it might not exist yet;
2123  * Returns:
2124  *   - >0, type ID of newly added BTF type;
2125  *   - <0, on error.
2126  */
2127 int btf__add_ptr(struct btf *btf, int ref_type_id)
2128 {
2129 	return btf_add_ref_kind(btf, BTF_KIND_PTR, NULL, ref_type_id);
2130 }
2131 
2132 /*
2133  * Append new BTF_KIND_ARRAY type with:
2134  *   - *index_type_id* - type ID of the type describing array index;
2135  *   - *elem_type_id* - type ID of the type describing array element;
2136  *   - *nr_elems* - the size of the array;
2137  * Returns:
2138  *   - >0, type ID of newly added BTF type;
2139  *   - <0, on error.
2140  */
2141 int btf__add_array(struct btf *btf, int index_type_id, int elem_type_id, __u32 nr_elems)
2142 {
2143 	struct btf_type *t;
2144 	struct btf_array *a;
2145 	int sz;
2146 
2147 	if (validate_type_id(index_type_id) || validate_type_id(elem_type_id))
2148 		return libbpf_err(-EINVAL);
2149 
2150 	if (btf_ensure_modifiable(btf))
2151 		return libbpf_err(-ENOMEM);
2152 
2153 	sz = sizeof(struct btf_type) + sizeof(struct btf_array);
2154 	t = btf_add_type_mem(btf, sz);
2155 	if (!t)
2156 		return libbpf_err(-ENOMEM);
2157 
2158 	t->name_off = 0;
2159 	t->info = btf_type_info(BTF_KIND_ARRAY, 0, 0);
2160 	t->size = 0;
2161 
2162 	a = btf_array(t);
2163 	a->type = elem_type_id;
2164 	a->index_type = index_type_id;
2165 	a->nelems = nr_elems;
2166 
2167 	return btf_commit_type(btf, sz);
2168 }
2169 
2170 /* generic STRUCT/UNION append function */
2171 static int btf_add_composite(struct btf *btf, int kind, const char *name, __u32 bytes_sz)
2172 {
2173 	struct btf_type *t;
2174 	int sz, name_off = 0;
2175 
2176 	if (btf_ensure_modifiable(btf))
2177 		return libbpf_err(-ENOMEM);
2178 
2179 	sz = sizeof(struct btf_type);
2180 	t = btf_add_type_mem(btf, sz);
2181 	if (!t)
2182 		return libbpf_err(-ENOMEM);
2183 
2184 	if (name && name[0]) {
2185 		name_off = btf__add_str(btf, name);
2186 		if (name_off < 0)
2187 			return name_off;
2188 	}
2189 
2190 	/* start out with vlen=0 and no kflag; this will be adjusted when
2191 	 * adding each member
2192 	 */
2193 	t->name_off = name_off;
2194 	t->info = btf_type_info(kind, 0, 0);
2195 	t->size = bytes_sz;
2196 
2197 	return btf_commit_type(btf, sz);
2198 }
2199 
2200 /*
2201  * Append new BTF_KIND_STRUCT type with:
2202  *   - *name* - name of the struct, can be NULL or empty for anonymous structs;
2203  *   - *byte_sz* - size of the struct, in bytes;
2204  *
2205  * Struct initially has no fields in it. Fields can be added by
2206  * btf__add_field() right after btf__add_struct() succeeds.
2207  *
2208  * Returns:
2209  *   - >0, type ID of newly added BTF type;
2210  *   - <0, on error.
2211  */
2212 int btf__add_struct(struct btf *btf, const char *name, __u32 byte_sz)
2213 {
2214 	return btf_add_composite(btf, BTF_KIND_STRUCT, name, byte_sz);
2215 }
2216 
2217 /*
2218  * Append new BTF_KIND_UNION type with:
2219  *   - *name* - name of the union, can be NULL or empty for anonymous union;
2220  *   - *byte_sz* - size of the union, in bytes;
2221  *
2222  * Union initially has no fields in it. Fields can be added by
2223  * btf__add_field() right after btf__add_union() succeeds. All fields
2224  * should have *bit_offset* of 0.
2225  *
2226  * Returns:
2227  *   - >0, type ID of newly added BTF type;
2228  *   - <0, on error.
2229  */
2230 int btf__add_union(struct btf *btf, const char *name, __u32 byte_sz)
2231 {
2232 	return btf_add_composite(btf, BTF_KIND_UNION, name, byte_sz);
2233 }
2234 
2235 static struct btf_type *btf_last_type(struct btf *btf)
2236 {
2237 	return btf_type_by_id(btf, btf__type_cnt(btf) - 1);
2238 }
2239 
2240 /*
2241  * Append new field for the current STRUCT/UNION type with:
2242  *   - *name* - name of the field, can be NULL or empty for anonymous field;
2243  *   - *type_id* - type ID for the type describing field type;
2244  *   - *bit_offset* - bit offset of the start of the field within struct/union;
2245  *   - *bit_size* - bit size of a bitfield, 0 for non-bitfield fields;
2246  * Returns:
2247  *   -  0, on success;
2248  *   - <0, on error.
2249  */
2250 int btf__add_field(struct btf *btf, const char *name, int type_id,
2251 		   __u32 bit_offset, __u32 bit_size)
2252 {
2253 	struct btf_type *t;
2254 	struct btf_member *m;
2255 	bool is_bitfield;
2256 	int sz, name_off = 0;
2257 
2258 	/* last type should be union/struct */
2259 	if (btf->nr_types == 0)
2260 		return libbpf_err(-EINVAL);
2261 	t = btf_last_type(btf);
2262 	if (!btf_is_composite(t))
2263 		return libbpf_err(-EINVAL);
2264 
2265 	if (validate_type_id(type_id))
2266 		return libbpf_err(-EINVAL);
2267 	/* best-effort bit field offset/size enforcement */
2268 	is_bitfield = bit_size || (bit_offset % 8 != 0);
2269 	if (is_bitfield && (bit_size == 0 || bit_size > 255 || bit_offset > 0xffffff))
2270 		return libbpf_err(-EINVAL);
2271 
2272 	/* only offset 0 is allowed for unions */
2273 	if (btf_is_union(t) && bit_offset)
2274 		return libbpf_err(-EINVAL);
2275 
2276 	/* decompose and invalidate raw data */
2277 	if (btf_ensure_modifiable(btf))
2278 		return libbpf_err(-ENOMEM);
2279 
2280 	sz = sizeof(struct btf_member);
2281 	m = btf_add_type_mem(btf, sz);
2282 	if (!m)
2283 		return libbpf_err(-ENOMEM);
2284 
2285 	if (name && name[0]) {
2286 		name_off = btf__add_str(btf, name);
2287 		if (name_off < 0)
2288 			return name_off;
2289 	}
2290 
2291 	m->name_off = name_off;
2292 	m->type = type_id;
2293 	m->offset = bit_offset | (bit_size << 24);
2294 
2295 	/* btf_add_type_mem can invalidate t pointer */
2296 	t = btf_last_type(btf);
2297 	/* update parent type's vlen and kflag */
2298 	t->info = btf_type_info(btf_kind(t), btf_vlen(t) + 1, is_bitfield || btf_kflag(t));
2299 
2300 	btf->hdr->type_len += sz;
2301 	btf->hdr->str_off += sz;
2302 	return 0;
2303 }
2304 
2305 static int btf_add_enum_common(struct btf *btf, const char *name, __u32 byte_sz,
2306 			       bool is_signed, __u8 kind)
2307 {
2308 	struct btf_type *t;
2309 	int sz, name_off = 0;
2310 
2311 	/* byte_sz must be power of 2 */
2312 	if (!byte_sz || (byte_sz & (byte_sz - 1)) || byte_sz > 8)
2313 		return libbpf_err(-EINVAL);
2314 
2315 	if (btf_ensure_modifiable(btf))
2316 		return libbpf_err(-ENOMEM);
2317 
2318 	sz = sizeof(struct btf_type);
2319 	t = btf_add_type_mem(btf, sz);
2320 	if (!t)
2321 		return libbpf_err(-ENOMEM);
2322 
2323 	if (name && name[0]) {
2324 		name_off = btf__add_str(btf, name);
2325 		if (name_off < 0)
2326 			return name_off;
2327 	}
2328 
2329 	/* start out with vlen=0; it will be adjusted when adding enum values */
2330 	t->name_off = name_off;
2331 	t->info = btf_type_info(kind, 0, is_signed);
2332 	t->size = byte_sz;
2333 
2334 	return btf_commit_type(btf, sz);
2335 }
2336 
2337 /*
2338  * Append new BTF_KIND_ENUM type with:
2339  *   - *name* - name of the enum, can be NULL or empty for anonymous enums;
2340  *   - *byte_sz* - size of the enum, in bytes.
2341  *
2342  * Enum initially has no enum values in it (and corresponds to enum forward
2343  * declaration). Enumerator values can be added by btf__add_enum_value()
2344  * immediately after btf__add_enum() succeeds.
2345  *
2346  * Returns:
2347  *   - >0, type ID of newly added BTF type;
2348  *   - <0, on error.
2349  */
2350 int btf__add_enum(struct btf *btf, const char *name, __u32 byte_sz)
2351 {
2352 	/*
2353 	 * set the signedness to be unsigned, it will change to signed
2354 	 * if any later enumerator is negative.
2355 	 */
2356 	return btf_add_enum_common(btf, name, byte_sz, false, BTF_KIND_ENUM);
2357 }
2358 
2359 /*
2360  * Append new enum value for the current ENUM type with:
2361  *   - *name* - name of the enumerator value, can't be NULL or empty;
2362  *   - *value* - integer value corresponding to enum value *name*;
2363  * Returns:
2364  *   -  0, on success;
2365  *   - <0, on error.
2366  */
2367 int btf__add_enum_value(struct btf *btf, const char *name, __s64 value)
2368 {
2369 	struct btf_type *t;
2370 	struct btf_enum *v;
2371 	int sz, name_off;
2372 
2373 	/* last type should be BTF_KIND_ENUM */
2374 	if (btf->nr_types == 0)
2375 		return libbpf_err(-EINVAL);
2376 	t = btf_last_type(btf);
2377 	if (!btf_is_enum(t))
2378 		return libbpf_err(-EINVAL);
2379 
2380 	/* non-empty name */
2381 	if (!name || !name[0])
2382 		return libbpf_err(-EINVAL);
2383 	if (value < INT_MIN || value > UINT_MAX)
2384 		return libbpf_err(-E2BIG);
2385 
2386 	/* decompose and invalidate raw data */
2387 	if (btf_ensure_modifiable(btf))
2388 		return libbpf_err(-ENOMEM);
2389 
2390 	sz = sizeof(struct btf_enum);
2391 	v = btf_add_type_mem(btf, sz);
2392 	if (!v)
2393 		return libbpf_err(-ENOMEM);
2394 
2395 	name_off = btf__add_str(btf, name);
2396 	if (name_off < 0)
2397 		return name_off;
2398 
2399 	v->name_off = name_off;
2400 	v->val = value;
2401 
2402 	/* update parent type's vlen */
2403 	t = btf_last_type(btf);
2404 	btf_type_inc_vlen(t);
2405 
2406 	/* if negative value, set signedness to signed */
2407 	if (value < 0)
2408 		t->info = btf_type_info(btf_kind(t), btf_vlen(t), true);
2409 
2410 	btf->hdr->type_len += sz;
2411 	btf->hdr->str_off += sz;
2412 	return 0;
2413 }
2414 
2415 /*
2416  * Append new BTF_KIND_ENUM64 type with:
2417  *   - *name* - name of the enum, can be NULL or empty for anonymous enums;
2418  *   - *byte_sz* - size of the enum, in bytes.
2419  *   - *is_signed* - whether the enum values are signed or not;
2420  *
2421  * Enum initially has no enum values in it (and corresponds to enum forward
2422  * declaration). Enumerator values can be added by btf__add_enum64_value()
2423  * immediately after btf__add_enum64() succeeds.
2424  *
2425  * Returns:
2426  *   - >0, type ID of newly added BTF type;
2427  *   - <0, on error.
2428  */
2429 int btf__add_enum64(struct btf *btf, const char *name, __u32 byte_sz,
2430 		    bool is_signed)
2431 {
2432 	return btf_add_enum_common(btf, name, byte_sz, is_signed,
2433 				   BTF_KIND_ENUM64);
2434 }
2435 
2436 /*
2437  * Append new enum value for the current ENUM64 type with:
2438  *   - *name* - name of the enumerator value, can't be NULL or empty;
2439  *   - *value* - integer value corresponding to enum value *name*;
2440  * Returns:
2441  *   -  0, on success;
2442  *   - <0, on error.
2443  */
2444 int btf__add_enum64_value(struct btf *btf, const char *name, __u64 value)
2445 {
2446 	struct btf_enum64 *v;
2447 	struct btf_type *t;
2448 	int sz, name_off;
2449 
2450 	/* last type should be BTF_KIND_ENUM64 */
2451 	if (btf->nr_types == 0)
2452 		return libbpf_err(-EINVAL);
2453 	t = btf_last_type(btf);
2454 	if (!btf_is_enum64(t))
2455 		return libbpf_err(-EINVAL);
2456 
2457 	/* non-empty name */
2458 	if (!name || !name[0])
2459 		return libbpf_err(-EINVAL);
2460 
2461 	/* decompose and invalidate raw data */
2462 	if (btf_ensure_modifiable(btf))
2463 		return libbpf_err(-ENOMEM);
2464 
2465 	sz = sizeof(struct btf_enum64);
2466 	v = btf_add_type_mem(btf, sz);
2467 	if (!v)
2468 		return libbpf_err(-ENOMEM);
2469 
2470 	name_off = btf__add_str(btf, name);
2471 	if (name_off < 0)
2472 		return name_off;
2473 
2474 	v->name_off = name_off;
2475 	v->val_lo32 = (__u32)value;
2476 	v->val_hi32 = value >> 32;
2477 
2478 	/* update parent type's vlen */
2479 	t = btf_last_type(btf);
2480 	btf_type_inc_vlen(t);
2481 
2482 	btf->hdr->type_len += sz;
2483 	btf->hdr->str_off += sz;
2484 	return 0;
2485 }
2486 
2487 /*
2488  * Append new BTF_KIND_FWD type with:
2489  *   - *name*, non-empty/non-NULL name;
2490  *   - *fwd_kind*, kind of forward declaration, one of BTF_FWD_STRUCT,
2491  *     BTF_FWD_UNION, or BTF_FWD_ENUM;
2492  * Returns:
2493  *   - >0, type ID of newly added BTF type;
2494  *   - <0, on error.
2495  */
2496 int btf__add_fwd(struct btf *btf, const char *name, enum btf_fwd_kind fwd_kind)
2497 {
2498 	if (!name || !name[0])
2499 		return libbpf_err(-EINVAL);
2500 
2501 	switch (fwd_kind) {
2502 	case BTF_FWD_STRUCT:
2503 	case BTF_FWD_UNION: {
2504 		struct btf_type *t;
2505 		int id;
2506 
2507 		id = btf_add_ref_kind(btf, BTF_KIND_FWD, name, 0);
2508 		if (id <= 0)
2509 			return id;
2510 		t = btf_type_by_id(btf, id);
2511 		t->info = btf_type_info(BTF_KIND_FWD, 0, fwd_kind == BTF_FWD_UNION);
2512 		return id;
2513 	}
2514 	case BTF_FWD_ENUM:
2515 		/* enum forward in BTF currently is just an enum with no enum
2516 		 * values; we also assume a standard 4-byte size for it
2517 		 */
2518 		return btf__add_enum(btf, name, sizeof(int));
2519 	default:
2520 		return libbpf_err(-EINVAL);
2521 	}
2522 }
2523 
2524 /*
2525  * Append new BTF_KING_TYPEDEF type with:
2526  *   - *name*, non-empty/non-NULL name;
2527  *   - *ref_type_id* - referenced type ID, it might not exist yet;
2528  * Returns:
2529  *   - >0, type ID of newly added BTF type;
2530  *   - <0, on error.
2531  */
2532 int btf__add_typedef(struct btf *btf, const char *name, int ref_type_id)
2533 {
2534 	if (!name || !name[0])
2535 		return libbpf_err(-EINVAL);
2536 
2537 	return btf_add_ref_kind(btf, BTF_KIND_TYPEDEF, name, ref_type_id);
2538 }
2539 
2540 /*
2541  * Append new BTF_KIND_VOLATILE type with:
2542  *   - *ref_type_id* - referenced type ID, it might not exist yet;
2543  * Returns:
2544  *   - >0, type ID of newly added BTF type;
2545  *   - <0, on error.
2546  */
2547 int btf__add_volatile(struct btf *btf, int ref_type_id)
2548 {
2549 	return btf_add_ref_kind(btf, BTF_KIND_VOLATILE, NULL, ref_type_id);
2550 }
2551 
2552 /*
2553  * Append new BTF_KIND_CONST type with:
2554  *   - *ref_type_id* - referenced type ID, it might not exist yet;
2555  * Returns:
2556  *   - >0, type ID of newly added BTF type;
2557  *   - <0, on error.
2558  */
2559 int btf__add_const(struct btf *btf, int ref_type_id)
2560 {
2561 	return btf_add_ref_kind(btf, BTF_KIND_CONST, NULL, ref_type_id);
2562 }
2563 
2564 /*
2565  * Append new BTF_KIND_RESTRICT type with:
2566  *   - *ref_type_id* - referenced type ID, it might not exist yet;
2567  * Returns:
2568  *   - >0, type ID of newly added BTF type;
2569  *   - <0, on error.
2570  */
2571 int btf__add_restrict(struct btf *btf, int ref_type_id)
2572 {
2573 	return btf_add_ref_kind(btf, BTF_KIND_RESTRICT, NULL, ref_type_id);
2574 }
2575 
2576 /*
2577  * Append new BTF_KIND_TYPE_TAG type with:
2578  *   - *value*, non-empty/non-NULL tag value;
2579  *   - *ref_type_id* - referenced type ID, it might not exist yet;
2580  * Returns:
2581  *   - >0, type ID of newly added BTF type;
2582  *   - <0, on error.
2583  */
2584 int btf__add_type_tag(struct btf *btf, const char *value, int ref_type_id)
2585 {
2586 	if (!value || !value[0])
2587 		return libbpf_err(-EINVAL);
2588 
2589 	return btf_add_ref_kind(btf, BTF_KIND_TYPE_TAG, value, ref_type_id);
2590 }
2591 
2592 /*
2593  * Append new BTF_KIND_FUNC type with:
2594  *   - *name*, non-empty/non-NULL name;
2595  *   - *proto_type_id* - FUNC_PROTO's type ID, it might not exist yet;
2596  * Returns:
2597  *   - >0, type ID of newly added BTF type;
2598  *   - <0, on error.
2599  */
2600 int btf__add_func(struct btf *btf, const char *name,
2601 		  enum btf_func_linkage linkage, int proto_type_id)
2602 {
2603 	int id;
2604 
2605 	if (!name || !name[0])
2606 		return libbpf_err(-EINVAL);
2607 	if (linkage != BTF_FUNC_STATIC && linkage != BTF_FUNC_GLOBAL &&
2608 	    linkage != BTF_FUNC_EXTERN)
2609 		return libbpf_err(-EINVAL);
2610 
2611 	id = btf_add_ref_kind(btf, BTF_KIND_FUNC, name, proto_type_id);
2612 	if (id > 0) {
2613 		struct btf_type *t = btf_type_by_id(btf, id);
2614 
2615 		t->info = btf_type_info(BTF_KIND_FUNC, linkage, 0);
2616 	}
2617 	return libbpf_err(id);
2618 }
2619 
2620 /*
2621  * Append new BTF_KIND_FUNC_PROTO with:
2622  *   - *ret_type_id* - type ID for return result of a function.
2623  *
2624  * Function prototype initially has no arguments, but they can be added by
2625  * btf__add_func_param() one by one, immediately after
2626  * btf__add_func_proto() succeeded.
2627  *
2628  * Returns:
2629  *   - >0, type ID of newly added BTF type;
2630  *   - <0, on error.
2631  */
2632 int btf__add_func_proto(struct btf *btf, int ret_type_id)
2633 {
2634 	struct btf_type *t;
2635 	int sz;
2636 
2637 	if (validate_type_id(ret_type_id))
2638 		return libbpf_err(-EINVAL);
2639 
2640 	if (btf_ensure_modifiable(btf))
2641 		return libbpf_err(-ENOMEM);
2642 
2643 	sz = sizeof(struct btf_type);
2644 	t = btf_add_type_mem(btf, sz);
2645 	if (!t)
2646 		return libbpf_err(-ENOMEM);
2647 
2648 	/* start out with vlen=0; this will be adjusted when adding enum
2649 	 * values, if necessary
2650 	 */
2651 	t->name_off = 0;
2652 	t->info = btf_type_info(BTF_KIND_FUNC_PROTO, 0, 0);
2653 	t->type = ret_type_id;
2654 
2655 	return btf_commit_type(btf, sz);
2656 }
2657 
2658 /*
2659  * Append new function parameter for current FUNC_PROTO type with:
2660  *   - *name* - parameter name, can be NULL or empty;
2661  *   - *type_id* - type ID describing the type of the parameter.
2662  * Returns:
2663  *   -  0, on success;
2664  *   - <0, on error.
2665  */
2666 int btf__add_func_param(struct btf *btf, const char *name, int type_id)
2667 {
2668 	struct btf_type *t;
2669 	struct btf_param *p;
2670 	int sz, name_off = 0;
2671 
2672 	if (validate_type_id(type_id))
2673 		return libbpf_err(-EINVAL);
2674 
2675 	/* last type should be BTF_KIND_FUNC_PROTO */
2676 	if (btf->nr_types == 0)
2677 		return libbpf_err(-EINVAL);
2678 	t = btf_last_type(btf);
2679 	if (!btf_is_func_proto(t))
2680 		return libbpf_err(-EINVAL);
2681 
2682 	/* decompose and invalidate raw data */
2683 	if (btf_ensure_modifiable(btf))
2684 		return libbpf_err(-ENOMEM);
2685 
2686 	sz = sizeof(struct btf_param);
2687 	p = btf_add_type_mem(btf, sz);
2688 	if (!p)
2689 		return libbpf_err(-ENOMEM);
2690 
2691 	if (name && name[0]) {
2692 		name_off = btf__add_str(btf, name);
2693 		if (name_off < 0)
2694 			return name_off;
2695 	}
2696 
2697 	p->name_off = name_off;
2698 	p->type = type_id;
2699 
2700 	/* update parent type's vlen */
2701 	t = btf_last_type(btf);
2702 	btf_type_inc_vlen(t);
2703 
2704 	btf->hdr->type_len += sz;
2705 	btf->hdr->str_off += sz;
2706 	return 0;
2707 }
2708 
2709 /*
2710  * Append new BTF_KIND_VAR type with:
2711  *   - *name* - non-empty/non-NULL name;
2712  *   - *linkage* - variable linkage, one of BTF_VAR_STATIC,
2713  *     BTF_VAR_GLOBAL_ALLOCATED, or BTF_VAR_GLOBAL_EXTERN;
2714  *   - *type_id* - type ID of the type describing the type of the variable.
2715  * Returns:
2716  *   - >0, type ID of newly added BTF type;
2717  *   - <0, on error.
2718  */
2719 int btf__add_var(struct btf *btf, const char *name, int linkage, int type_id)
2720 {
2721 	struct btf_type *t;
2722 	struct btf_var *v;
2723 	int sz, name_off;
2724 
2725 	/* non-empty name */
2726 	if (!name || !name[0])
2727 		return libbpf_err(-EINVAL);
2728 	if (linkage != BTF_VAR_STATIC && linkage != BTF_VAR_GLOBAL_ALLOCATED &&
2729 	    linkage != BTF_VAR_GLOBAL_EXTERN)
2730 		return libbpf_err(-EINVAL);
2731 	if (validate_type_id(type_id))
2732 		return libbpf_err(-EINVAL);
2733 
2734 	/* deconstruct BTF, if necessary, and invalidate raw_data */
2735 	if (btf_ensure_modifiable(btf))
2736 		return libbpf_err(-ENOMEM);
2737 
2738 	sz = sizeof(struct btf_type) + sizeof(struct btf_var);
2739 	t = btf_add_type_mem(btf, sz);
2740 	if (!t)
2741 		return libbpf_err(-ENOMEM);
2742 
2743 	name_off = btf__add_str(btf, name);
2744 	if (name_off < 0)
2745 		return name_off;
2746 
2747 	t->name_off = name_off;
2748 	t->info = btf_type_info(BTF_KIND_VAR, 0, 0);
2749 	t->type = type_id;
2750 
2751 	v = btf_var(t);
2752 	v->linkage = linkage;
2753 
2754 	return btf_commit_type(btf, sz);
2755 }
2756 
2757 /*
2758  * Append new BTF_KIND_DATASEC type with:
2759  *   - *name* - non-empty/non-NULL name;
2760  *   - *byte_sz* - data section size, in bytes.
2761  *
2762  * Data section is initially empty. Variables info can be added with
2763  * btf__add_datasec_var_info() calls, after btf__add_datasec() succeeds.
2764  *
2765  * Returns:
2766  *   - >0, type ID of newly added BTF type;
2767  *   - <0, on error.
2768  */
2769 int btf__add_datasec(struct btf *btf, const char *name, __u32 byte_sz)
2770 {
2771 	struct btf_type *t;
2772 	int sz, name_off;
2773 
2774 	/* non-empty name */
2775 	if (!name || !name[0])
2776 		return libbpf_err(-EINVAL);
2777 
2778 	if (btf_ensure_modifiable(btf))
2779 		return libbpf_err(-ENOMEM);
2780 
2781 	sz = sizeof(struct btf_type);
2782 	t = btf_add_type_mem(btf, sz);
2783 	if (!t)
2784 		return libbpf_err(-ENOMEM);
2785 
2786 	name_off = btf__add_str(btf, name);
2787 	if (name_off < 0)
2788 		return name_off;
2789 
2790 	/* start with vlen=0, which will be update as var_secinfos are added */
2791 	t->name_off = name_off;
2792 	t->info = btf_type_info(BTF_KIND_DATASEC, 0, 0);
2793 	t->size = byte_sz;
2794 
2795 	return btf_commit_type(btf, sz);
2796 }
2797 
2798 /*
2799  * Append new data section variable information entry for current DATASEC type:
2800  *   - *var_type_id* - type ID, describing type of the variable;
2801  *   - *offset* - variable offset within data section, in bytes;
2802  *   - *byte_sz* - variable size, in bytes.
2803  *
2804  * Returns:
2805  *   -  0, on success;
2806  *   - <0, on error.
2807  */
2808 int btf__add_datasec_var_info(struct btf *btf, int var_type_id, __u32 offset, __u32 byte_sz)
2809 {
2810 	struct btf_type *t;
2811 	struct btf_var_secinfo *v;
2812 	int sz;
2813 
2814 	/* last type should be BTF_KIND_DATASEC */
2815 	if (btf->nr_types == 0)
2816 		return libbpf_err(-EINVAL);
2817 	t = btf_last_type(btf);
2818 	if (!btf_is_datasec(t))
2819 		return libbpf_err(-EINVAL);
2820 
2821 	if (validate_type_id(var_type_id))
2822 		return libbpf_err(-EINVAL);
2823 
2824 	/* decompose and invalidate raw data */
2825 	if (btf_ensure_modifiable(btf))
2826 		return libbpf_err(-ENOMEM);
2827 
2828 	sz = sizeof(struct btf_var_secinfo);
2829 	v = btf_add_type_mem(btf, sz);
2830 	if (!v)
2831 		return libbpf_err(-ENOMEM);
2832 
2833 	v->type = var_type_id;
2834 	v->offset = offset;
2835 	v->size = byte_sz;
2836 
2837 	/* update parent type's vlen */
2838 	t = btf_last_type(btf);
2839 	btf_type_inc_vlen(t);
2840 
2841 	btf->hdr->type_len += sz;
2842 	btf->hdr->str_off += sz;
2843 	return 0;
2844 }
2845 
2846 /*
2847  * Append new BTF_KIND_DECL_TAG type with:
2848  *   - *value* - non-empty/non-NULL string;
2849  *   - *ref_type_id* - referenced type ID, it might not exist yet;
2850  *   - *component_idx* - -1 for tagging reference type, otherwise struct/union
2851  *     member or function argument index;
2852  * Returns:
2853  *   - >0, type ID of newly added BTF type;
2854  *   - <0, on error.
2855  */
2856 int btf__add_decl_tag(struct btf *btf, const char *value, int ref_type_id,
2857 		 int component_idx)
2858 {
2859 	struct btf_type *t;
2860 	int sz, value_off;
2861 
2862 	if (!value || !value[0] || component_idx < -1)
2863 		return libbpf_err(-EINVAL);
2864 
2865 	if (validate_type_id(ref_type_id))
2866 		return libbpf_err(-EINVAL);
2867 
2868 	if (btf_ensure_modifiable(btf))
2869 		return libbpf_err(-ENOMEM);
2870 
2871 	sz = sizeof(struct btf_type) + sizeof(struct btf_decl_tag);
2872 	t = btf_add_type_mem(btf, sz);
2873 	if (!t)
2874 		return libbpf_err(-ENOMEM);
2875 
2876 	value_off = btf__add_str(btf, value);
2877 	if (value_off < 0)
2878 		return value_off;
2879 
2880 	t->name_off = value_off;
2881 	t->info = btf_type_info(BTF_KIND_DECL_TAG, 0, false);
2882 	t->type = ref_type_id;
2883 	btf_decl_tag(t)->component_idx = component_idx;
2884 
2885 	return btf_commit_type(btf, sz);
2886 }
2887 
2888 struct btf_ext_sec_setup_param {
2889 	__u32 off;
2890 	__u32 len;
2891 	__u32 min_rec_size;
2892 	struct btf_ext_info *ext_info;
2893 	const char *desc;
2894 };
2895 
2896 static int btf_ext_setup_info(struct btf_ext *btf_ext,
2897 			      struct btf_ext_sec_setup_param *ext_sec)
2898 {
2899 	const struct btf_ext_info_sec *sinfo;
2900 	struct btf_ext_info *ext_info;
2901 	__u32 info_left, record_size;
2902 	size_t sec_cnt = 0;
2903 	/* The start of the info sec (including the __u32 record_size). */
2904 	void *info;
2905 
2906 	if (ext_sec->len == 0)
2907 		return 0;
2908 
2909 	if (ext_sec->off & 0x03) {
2910 		pr_debug(".BTF.ext %s section is not aligned to 4 bytes\n",
2911 		     ext_sec->desc);
2912 		return -EINVAL;
2913 	}
2914 
2915 	info = btf_ext->data + btf_ext->hdr->hdr_len + ext_sec->off;
2916 	info_left = ext_sec->len;
2917 
2918 	if (btf_ext->data + btf_ext->data_size < info + ext_sec->len) {
2919 		pr_debug("%s section (off:%u len:%u) is beyond the end of the ELF section .BTF.ext\n",
2920 			 ext_sec->desc, ext_sec->off, ext_sec->len);
2921 		return -EINVAL;
2922 	}
2923 
2924 	/* At least a record size */
2925 	if (info_left < sizeof(__u32)) {
2926 		pr_debug(".BTF.ext %s record size not found\n", ext_sec->desc);
2927 		return -EINVAL;
2928 	}
2929 
2930 	/* The record size needs to meet the minimum standard */
2931 	record_size = *(__u32 *)info;
2932 	if (record_size < ext_sec->min_rec_size ||
2933 	    record_size & 0x03) {
2934 		pr_debug("%s section in .BTF.ext has invalid record size %u\n",
2935 			 ext_sec->desc, record_size);
2936 		return -EINVAL;
2937 	}
2938 
2939 	sinfo = info + sizeof(__u32);
2940 	info_left -= sizeof(__u32);
2941 
2942 	/* If no records, return failure now so .BTF.ext won't be used. */
2943 	if (!info_left) {
2944 		pr_debug("%s section in .BTF.ext has no records", ext_sec->desc);
2945 		return -EINVAL;
2946 	}
2947 
2948 	while (info_left) {
2949 		unsigned int sec_hdrlen = sizeof(struct btf_ext_info_sec);
2950 		__u64 total_record_size;
2951 		__u32 num_records;
2952 
2953 		if (info_left < sec_hdrlen) {
2954 			pr_debug("%s section header is not found in .BTF.ext\n",
2955 			     ext_sec->desc);
2956 			return -EINVAL;
2957 		}
2958 
2959 		num_records = sinfo->num_info;
2960 		if (num_records == 0) {
2961 			pr_debug("%s section has incorrect num_records in .BTF.ext\n",
2962 			     ext_sec->desc);
2963 			return -EINVAL;
2964 		}
2965 
2966 		total_record_size = sec_hdrlen + (__u64)num_records * record_size;
2967 		if (info_left < total_record_size) {
2968 			pr_debug("%s section has incorrect num_records in .BTF.ext\n",
2969 			     ext_sec->desc);
2970 			return -EINVAL;
2971 		}
2972 
2973 		info_left -= total_record_size;
2974 		sinfo = (void *)sinfo + total_record_size;
2975 		sec_cnt++;
2976 	}
2977 
2978 	ext_info = ext_sec->ext_info;
2979 	ext_info->len = ext_sec->len - sizeof(__u32);
2980 	ext_info->rec_size = record_size;
2981 	ext_info->info = info + sizeof(__u32);
2982 	ext_info->sec_cnt = sec_cnt;
2983 
2984 	return 0;
2985 }
2986 
2987 static int btf_ext_setup_func_info(struct btf_ext *btf_ext)
2988 {
2989 	struct btf_ext_sec_setup_param param = {
2990 		.off = btf_ext->hdr->func_info_off,
2991 		.len = btf_ext->hdr->func_info_len,
2992 		.min_rec_size = sizeof(struct bpf_func_info_min),
2993 		.ext_info = &btf_ext->func_info,
2994 		.desc = "func_info"
2995 	};
2996 
2997 	return btf_ext_setup_info(btf_ext, &param);
2998 }
2999 
3000 static int btf_ext_setup_line_info(struct btf_ext *btf_ext)
3001 {
3002 	struct btf_ext_sec_setup_param param = {
3003 		.off = btf_ext->hdr->line_info_off,
3004 		.len = btf_ext->hdr->line_info_len,
3005 		.min_rec_size = sizeof(struct bpf_line_info_min),
3006 		.ext_info = &btf_ext->line_info,
3007 		.desc = "line_info",
3008 	};
3009 
3010 	return btf_ext_setup_info(btf_ext, &param);
3011 }
3012 
3013 static int btf_ext_setup_core_relos(struct btf_ext *btf_ext)
3014 {
3015 	struct btf_ext_sec_setup_param param = {
3016 		.off = btf_ext->hdr->core_relo_off,
3017 		.len = btf_ext->hdr->core_relo_len,
3018 		.min_rec_size = sizeof(struct bpf_core_relo),
3019 		.ext_info = &btf_ext->core_relo_info,
3020 		.desc = "core_relo",
3021 	};
3022 
3023 	return btf_ext_setup_info(btf_ext, &param);
3024 }
3025 
3026 static int btf_ext_parse_hdr(__u8 *data, __u32 data_size)
3027 {
3028 	const struct btf_ext_header *hdr = (struct btf_ext_header *)data;
3029 
3030 	if (data_size < offsetofend(struct btf_ext_header, hdr_len) ||
3031 	    data_size < hdr->hdr_len) {
3032 		pr_debug("BTF.ext header not found");
3033 		return -EINVAL;
3034 	}
3035 
3036 	if (hdr->magic == bswap_16(BTF_MAGIC)) {
3037 		pr_warn("BTF.ext in non-native endianness is not supported\n");
3038 		return -ENOTSUP;
3039 	} else if (hdr->magic != BTF_MAGIC) {
3040 		pr_debug("Invalid BTF.ext magic:%x\n", hdr->magic);
3041 		return -EINVAL;
3042 	}
3043 
3044 	if (hdr->version != BTF_VERSION) {
3045 		pr_debug("Unsupported BTF.ext version:%u\n", hdr->version);
3046 		return -ENOTSUP;
3047 	}
3048 
3049 	if (hdr->flags) {
3050 		pr_debug("Unsupported BTF.ext flags:%x\n", hdr->flags);
3051 		return -ENOTSUP;
3052 	}
3053 
3054 	if (data_size == hdr->hdr_len) {
3055 		pr_debug("BTF.ext has no data\n");
3056 		return -EINVAL;
3057 	}
3058 
3059 	return 0;
3060 }
3061 
3062 void btf_ext__free(struct btf_ext *btf_ext)
3063 {
3064 	if (IS_ERR_OR_NULL(btf_ext))
3065 		return;
3066 	free(btf_ext->func_info.sec_idxs);
3067 	free(btf_ext->line_info.sec_idxs);
3068 	free(btf_ext->core_relo_info.sec_idxs);
3069 	free(btf_ext->data);
3070 	free(btf_ext);
3071 }
3072 
3073 struct btf_ext *btf_ext__new(const __u8 *data, __u32 size)
3074 {
3075 	struct btf_ext *btf_ext;
3076 	int err;
3077 
3078 	btf_ext = calloc(1, sizeof(struct btf_ext));
3079 	if (!btf_ext)
3080 		return libbpf_err_ptr(-ENOMEM);
3081 
3082 	btf_ext->data_size = size;
3083 	btf_ext->data = malloc(size);
3084 	if (!btf_ext->data) {
3085 		err = -ENOMEM;
3086 		goto done;
3087 	}
3088 	memcpy(btf_ext->data, data, size);
3089 
3090 	err = btf_ext_parse_hdr(btf_ext->data, size);
3091 	if (err)
3092 		goto done;
3093 
3094 	if (btf_ext->hdr->hdr_len < offsetofend(struct btf_ext_header, line_info_len)) {
3095 		err = -EINVAL;
3096 		goto done;
3097 	}
3098 
3099 	err = btf_ext_setup_func_info(btf_ext);
3100 	if (err)
3101 		goto done;
3102 
3103 	err = btf_ext_setup_line_info(btf_ext);
3104 	if (err)
3105 		goto done;
3106 
3107 	if (btf_ext->hdr->hdr_len < offsetofend(struct btf_ext_header, core_relo_len))
3108 		goto done; /* skip core relos parsing */
3109 
3110 	err = btf_ext_setup_core_relos(btf_ext);
3111 	if (err)
3112 		goto done;
3113 
3114 done:
3115 	if (err) {
3116 		btf_ext__free(btf_ext);
3117 		return libbpf_err_ptr(err);
3118 	}
3119 
3120 	return btf_ext;
3121 }
3122 
3123 const void *btf_ext__raw_data(const struct btf_ext *btf_ext, __u32 *size)
3124 {
3125 	*size = btf_ext->data_size;
3126 	return btf_ext->data;
3127 }
3128 
3129 __attribute__((alias("btf_ext__raw_data")))
3130 const void *btf_ext__get_raw_data(const struct btf_ext *btf_ext, __u32 *size);
3131 
3132 
3133 struct btf_dedup;
3134 
3135 static struct btf_dedup *btf_dedup_new(struct btf *btf, const struct btf_dedup_opts *opts);
3136 static void btf_dedup_free(struct btf_dedup *d);
3137 static int btf_dedup_prep(struct btf_dedup *d);
3138 static int btf_dedup_strings(struct btf_dedup *d);
3139 static int btf_dedup_prim_types(struct btf_dedup *d);
3140 static int btf_dedup_struct_types(struct btf_dedup *d);
3141 static int btf_dedup_ref_types(struct btf_dedup *d);
3142 static int btf_dedup_resolve_fwds(struct btf_dedup *d);
3143 static int btf_dedup_compact_types(struct btf_dedup *d);
3144 static int btf_dedup_remap_types(struct btf_dedup *d);
3145 
3146 /*
3147  * Deduplicate BTF types and strings.
3148  *
3149  * BTF dedup algorithm takes as an input `struct btf` representing `.BTF` ELF
3150  * section with all BTF type descriptors and string data. It overwrites that
3151  * memory in-place with deduplicated types and strings without any loss of
3152  * information. If optional `struct btf_ext` representing '.BTF.ext' ELF section
3153  * is provided, all the strings referenced from .BTF.ext section are honored
3154  * and updated to point to the right offsets after deduplication.
3155  *
3156  * If function returns with error, type/string data might be garbled and should
3157  * be discarded.
3158  *
3159  * More verbose and detailed description of both problem btf_dedup is solving,
3160  * as well as solution could be found at:
3161  * https://facebookmicrosites.github.io/bpf/blog/2018/11/14/btf-enhancement.html
3162  *
3163  * Problem description and justification
3164  * =====================================
3165  *
3166  * BTF type information is typically emitted either as a result of conversion
3167  * from DWARF to BTF or directly by compiler. In both cases, each compilation
3168  * unit contains information about a subset of all the types that are used
3169  * in an application. These subsets are frequently overlapping and contain a lot
3170  * of duplicated information when later concatenated together into a single
3171  * binary. This algorithm ensures that each unique type is represented by single
3172  * BTF type descriptor, greatly reducing resulting size of BTF data.
3173  *
3174  * Compilation unit isolation and subsequent duplication of data is not the only
3175  * problem. The same type hierarchy (e.g., struct and all the type that struct
3176  * references) in different compilation units can be represented in BTF to
3177  * various degrees of completeness (or, rather, incompleteness) due to
3178  * struct/union forward declarations.
3179  *
3180  * Let's take a look at an example, that we'll use to better understand the
3181  * problem (and solution). Suppose we have two compilation units, each using
3182  * same `struct S`, but each of them having incomplete type information about
3183  * struct's fields:
3184  *
3185  * // CU #1:
3186  * struct S;
3187  * struct A {
3188  *	int a;
3189  *	struct A* self;
3190  *	struct S* parent;
3191  * };
3192  * struct B;
3193  * struct S {
3194  *	struct A* a_ptr;
3195  *	struct B* b_ptr;
3196  * };
3197  *
3198  * // CU #2:
3199  * struct S;
3200  * struct A;
3201  * struct B {
3202  *	int b;
3203  *	struct B* self;
3204  *	struct S* parent;
3205  * };
3206  * struct S {
3207  *	struct A* a_ptr;
3208  *	struct B* b_ptr;
3209  * };
3210  *
3211  * In case of CU #1, BTF data will know only that `struct B` exist (but no
3212  * more), but will know the complete type information about `struct A`. While
3213  * for CU #2, it will know full type information about `struct B`, but will
3214  * only know about forward declaration of `struct A` (in BTF terms, it will
3215  * have `BTF_KIND_FWD` type descriptor with name `B`).
3216  *
3217  * This compilation unit isolation means that it's possible that there is no
3218  * single CU with complete type information describing structs `S`, `A`, and
3219  * `B`. Also, we might get tons of duplicated and redundant type information.
3220  *
3221  * Additional complication we need to keep in mind comes from the fact that
3222  * types, in general, can form graphs containing cycles, not just DAGs.
3223  *
3224  * While algorithm does deduplication, it also merges and resolves type
3225  * information (unless disabled throught `struct btf_opts`), whenever possible.
3226  * E.g., in the example above with two compilation units having partial type
3227  * information for structs `A` and `B`, the output of algorithm will emit
3228  * a single copy of each BTF type that describes structs `A`, `B`, and `S`
3229  * (as well as type information for `int` and pointers), as if they were defined
3230  * in a single compilation unit as:
3231  *
3232  * struct A {
3233  *	int a;
3234  *	struct A* self;
3235  *	struct S* parent;
3236  * };
3237  * struct B {
3238  *	int b;
3239  *	struct B* self;
3240  *	struct S* parent;
3241  * };
3242  * struct S {
3243  *	struct A* a_ptr;
3244  *	struct B* b_ptr;
3245  * };
3246  *
3247  * Algorithm summary
3248  * =================
3249  *
3250  * Algorithm completes its work in 7 separate passes:
3251  *
3252  * 1. Strings deduplication.
3253  * 2. Primitive types deduplication (int, enum, fwd).
3254  * 3. Struct/union types deduplication.
3255  * 4. Resolve unambiguous forward declarations.
3256  * 5. Reference types deduplication (pointers, typedefs, arrays, funcs, func
3257  *    protos, and const/volatile/restrict modifiers).
3258  * 6. Types compaction.
3259  * 7. Types remapping.
3260  *
3261  * Algorithm determines canonical type descriptor, which is a single
3262  * representative type for each truly unique type. This canonical type is the
3263  * one that will go into final deduplicated BTF type information. For
3264  * struct/unions, it is also the type that algorithm will merge additional type
3265  * information into (while resolving FWDs), as it discovers it from data in
3266  * other CUs. Each input BTF type eventually gets either mapped to itself, if
3267  * that type is canonical, or to some other type, if that type is equivalent
3268  * and was chosen as canonical representative. This mapping is stored in
3269  * `btf_dedup->map` array. This map is also used to record STRUCT/UNION that
3270  * FWD type got resolved to.
3271  *
3272  * To facilitate fast discovery of canonical types, we also maintain canonical
3273  * index (`btf_dedup->dedup_table`), which maps type descriptor's signature hash
3274  * (i.e., hashed kind, name, size, fields, etc) into a list of canonical types
3275  * that match that signature. With sufficiently good choice of type signature
3276  * hashing function, we can limit number of canonical types for each unique type
3277  * signature to a very small number, allowing to find canonical type for any
3278  * duplicated type very quickly.
3279  *
3280  * Struct/union deduplication is the most critical part and algorithm for
3281  * deduplicating structs/unions is described in greater details in comments for
3282  * `btf_dedup_is_equiv` function.
3283  */
3284 int btf__dedup(struct btf *btf, const struct btf_dedup_opts *opts)
3285 {
3286 	struct btf_dedup *d;
3287 	int err;
3288 
3289 	if (!OPTS_VALID(opts, btf_dedup_opts))
3290 		return libbpf_err(-EINVAL);
3291 
3292 	d = btf_dedup_new(btf, opts);
3293 	if (IS_ERR(d)) {
3294 		pr_debug("btf_dedup_new failed: %ld", PTR_ERR(d));
3295 		return libbpf_err(-EINVAL);
3296 	}
3297 
3298 	if (btf_ensure_modifiable(btf)) {
3299 		err = -ENOMEM;
3300 		goto done;
3301 	}
3302 
3303 	err = btf_dedup_prep(d);
3304 	if (err) {
3305 		pr_debug("btf_dedup_prep failed:%d\n", err);
3306 		goto done;
3307 	}
3308 	err = btf_dedup_strings(d);
3309 	if (err < 0) {
3310 		pr_debug("btf_dedup_strings failed:%d\n", err);
3311 		goto done;
3312 	}
3313 	err = btf_dedup_prim_types(d);
3314 	if (err < 0) {
3315 		pr_debug("btf_dedup_prim_types failed:%d\n", err);
3316 		goto done;
3317 	}
3318 	err = btf_dedup_struct_types(d);
3319 	if (err < 0) {
3320 		pr_debug("btf_dedup_struct_types failed:%d\n", err);
3321 		goto done;
3322 	}
3323 	err = btf_dedup_resolve_fwds(d);
3324 	if (err < 0) {
3325 		pr_debug("btf_dedup_resolve_fwds failed:%d\n", err);
3326 		goto done;
3327 	}
3328 	err = btf_dedup_ref_types(d);
3329 	if (err < 0) {
3330 		pr_debug("btf_dedup_ref_types failed:%d\n", err);
3331 		goto done;
3332 	}
3333 	err = btf_dedup_compact_types(d);
3334 	if (err < 0) {
3335 		pr_debug("btf_dedup_compact_types failed:%d\n", err);
3336 		goto done;
3337 	}
3338 	err = btf_dedup_remap_types(d);
3339 	if (err < 0) {
3340 		pr_debug("btf_dedup_remap_types failed:%d\n", err);
3341 		goto done;
3342 	}
3343 
3344 done:
3345 	btf_dedup_free(d);
3346 	return libbpf_err(err);
3347 }
3348 
3349 #define BTF_UNPROCESSED_ID ((__u32)-1)
3350 #define BTF_IN_PROGRESS_ID ((__u32)-2)
3351 
3352 struct btf_dedup {
3353 	/* .BTF section to be deduped in-place */
3354 	struct btf *btf;
3355 	/*
3356 	 * Optional .BTF.ext section. When provided, any strings referenced
3357 	 * from it will be taken into account when deduping strings
3358 	 */
3359 	struct btf_ext *btf_ext;
3360 	/*
3361 	 * This is a map from any type's signature hash to a list of possible
3362 	 * canonical representative type candidates. Hash collisions are
3363 	 * ignored, so even types of various kinds can share same list of
3364 	 * candidates, which is fine because we rely on subsequent
3365 	 * btf_xxx_equal() checks to authoritatively verify type equality.
3366 	 */
3367 	struct hashmap *dedup_table;
3368 	/* Canonical types map */
3369 	__u32 *map;
3370 	/* Hypothetical mapping, used during type graph equivalence checks */
3371 	__u32 *hypot_map;
3372 	__u32 *hypot_list;
3373 	size_t hypot_cnt;
3374 	size_t hypot_cap;
3375 	/* Whether hypothetical mapping, if successful, would need to adjust
3376 	 * already canonicalized types (due to a new forward declaration to
3377 	 * concrete type resolution). In such case, during split BTF dedup
3378 	 * candidate type would still be considered as different, because base
3379 	 * BTF is considered to be immutable.
3380 	 */
3381 	bool hypot_adjust_canon;
3382 	/* Various option modifying behavior of algorithm */
3383 	struct btf_dedup_opts opts;
3384 	/* temporary strings deduplication state */
3385 	struct strset *strs_set;
3386 };
3387 
3388 static long hash_combine(long h, long value)
3389 {
3390 	return h * 31 + value;
3391 }
3392 
3393 #define for_each_dedup_cand(d, node, hash) \
3394 	hashmap__for_each_key_entry(d->dedup_table, node, hash)
3395 
3396 static int btf_dedup_table_add(struct btf_dedup *d, long hash, __u32 type_id)
3397 {
3398 	return hashmap__append(d->dedup_table, hash, type_id);
3399 }
3400 
3401 static int btf_dedup_hypot_map_add(struct btf_dedup *d,
3402 				   __u32 from_id, __u32 to_id)
3403 {
3404 	if (d->hypot_cnt == d->hypot_cap) {
3405 		__u32 *new_list;
3406 
3407 		d->hypot_cap += max((size_t)16, d->hypot_cap / 2);
3408 		new_list = libbpf_reallocarray(d->hypot_list, d->hypot_cap, sizeof(__u32));
3409 		if (!new_list)
3410 			return -ENOMEM;
3411 		d->hypot_list = new_list;
3412 	}
3413 	d->hypot_list[d->hypot_cnt++] = from_id;
3414 	d->hypot_map[from_id] = to_id;
3415 	return 0;
3416 }
3417 
3418 static void btf_dedup_clear_hypot_map(struct btf_dedup *d)
3419 {
3420 	int i;
3421 
3422 	for (i = 0; i < d->hypot_cnt; i++)
3423 		d->hypot_map[d->hypot_list[i]] = BTF_UNPROCESSED_ID;
3424 	d->hypot_cnt = 0;
3425 	d->hypot_adjust_canon = false;
3426 }
3427 
3428 static void btf_dedup_free(struct btf_dedup *d)
3429 {
3430 	hashmap__free(d->dedup_table);
3431 	d->dedup_table = NULL;
3432 
3433 	free(d->map);
3434 	d->map = NULL;
3435 
3436 	free(d->hypot_map);
3437 	d->hypot_map = NULL;
3438 
3439 	free(d->hypot_list);
3440 	d->hypot_list = NULL;
3441 
3442 	free(d);
3443 }
3444 
3445 static size_t btf_dedup_identity_hash_fn(long key, void *ctx)
3446 {
3447 	return key;
3448 }
3449 
3450 static size_t btf_dedup_collision_hash_fn(long key, void *ctx)
3451 {
3452 	return 0;
3453 }
3454 
3455 static bool btf_dedup_equal_fn(long k1, long k2, void *ctx)
3456 {
3457 	return k1 == k2;
3458 }
3459 
3460 static struct btf_dedup *btf_dedup_new(struct btf *btf, const struct btf_dedup_opts *opts)
3461 {
3462 	struct btf_dedup *d = calloc(1, sizeof(struct btf_dedup));
3463 	hashmap_hash_fn hash_fn = btf_dedup_identity_hash_fn;
3464 	int i, err = 0, type_cnt;
3465 
3466 	if (!d)
3467 		return ERR_PTR(-ENOMEM);
3468 
3469 	if (OPTS_GET(opts, force_collisions, false))
3470 		hash_fn = btf_dedup_collision_hash_fn;
3471 
3472 	d->btf = btf;
3473 	d->btf_ext = OPTS_GET(opts, btf_ext, NULL);
3474 
3475 	d->dedup_table = hashmap__new(hash_fn, btf_dedup_equal_fn, NULL);
3476 	if (IS_ERR(d->dedup_table)) {
3477 		err = PTR_ERR(d->dedup_table);
3478 		d->dedup_table = NULL;
3479 		goto done;
3480 	}
3481 
3482 	type_cnt = btf__type_cnt(btf);
3483 	d->map = malloc(sizeof(__u32) * type_cnt);
3484 	if (!d->map) {
3485 		err = -ENOMEM;
3486 		goto done;
3487 	}
3488 	/* special BTF "void" type is made canonical immediately */
3489 	d->map[0] = 0;
3490 	for (i = 1; i < type_cnt; i++) {
3491 		struct btf_type *t = btf_type_by_id(d->btf, i);
3492 
3493 		/* VAR and DATASEC are never deduped and are self-canonical */
3494 		if (btf_is_var(t) || btf_is_datasec(t))
3495 			d->map[i] = i;
3496 		else
3497 			d->map[i] = BTF_UNPROCESSED_ID;
3498 	}
3499 
3500 	d->hypot_map = malloc(sizeof(__u32) * type_cnt);
3501 	if (!d->hypot_map) {
3502 		err = -ENOMEM;
3503 		goto done;
3504 	}
3505 	for (i = 0; i < type_cnt; i++)
3506 		d->hypot_map[i] = BTF_UNPROCESSED_ID;
3507 
3508 done:
3509 	if (err) {
3510 		btf_dedup_free(d);
3511 		return ERR_PTR(err);
3512 	}
3513 
3514 	return d;
3515 }
3516 
3517 /*
3518  * Iterate over all possible places in .BTF and .BTF.ext that can reference
3519  * string and pass pointer to it to a provided callback `fn`.
3520  */
3521 static int btf_for_each_str_off(struct btf_dedup *d, str_off_visit_fn fn, void *ctx)
3522 {
3523 	int i, r;
3524 
3525 	for (i = 0; i < d->btf->nr_types; i++) {
3526 		struct btf_field_iter it;
3527 		struct btf_type *t = btf_type_by_id(d->btf, d->btf->start_id + i);
3528 		__u32 *str_off;
3529 
3530 		r = btf_field_iter_init(&it, t, BTF_FIELD_ITER_STRS);
3531 		if (r)
3532 			return r;
3533 
3534 		while ((str_off = btf_field_iter_next(&it))) {
3535 			r = fn(str_off, ctx);
3536 			if (r)
3537 				return r;
3538 		}
3539 	}
3540 
3541 	if (!d->btf_ext)
3542 		return 0;
3543 
3544 	r = btf_ext_visit_str_offs(d->btf_ext, fn, ctx);
3545 	if (r)
3546 		return r;
3547 
3548 	return 0;
3549 }
3550 
3551 static int strs_dedup_remap_str_off(__u32 *str_off_ptr, void *ctx)
3552 {
3553 	struct btf_dedup *d = ctx;
3554 	__u32 str_off = *str_off_ptr;
3555 	const char *s;
3556 	int off, err;
3557 
3558 	/* don't touch empty string or string in main BTF */
3559 	if (str_off == 0 || str_off < d->btf->start_str_off)
3560 		return 0;
3561 
3562 	s = btf__str_by_offset(d->btf, str_off);
3563 	if (d->btf->base_btf) {
3564 		err = btf__find_str(d->btf->base_btf, s);
3565 		if (err >= 0) {
3566 			*str_off_ptr = err;
3567 			return 0;
3568 		}
3569 		if (err != -ENOENT)
3570 			return err;
3571 	}
3572 
3573 	off = strset__add_str(d->strs_set, s);
3574 	if (off < 0)
3575 		return off;
3576 
3577 	*str_off_ptr = d->btf->start_str_off + off;
3578 	return 0;
3579 }
3580 
3581 /*
3582  * Dedup string and filter out those that are not referenced from either .BTF
3583  * or .BTF.ext (if provided) sections.
3584  *
3585  * This is done by building index of all strings in BTF's string section,
3586  * then iterating over all entities that can reference strings (e.g., type
3587  * names, struct field names, .BTF.ext line info, etc) and marking corresponding
3588  * strings as used. After that all used strings are deduped and compacted into
3589  * sequential blob of memory and new offsets are calculated. Then all the string
3590  * references are iterated again and rewritten using new offsets.
3591  */
3592 static int btf_dedup_strings(struct btf_dedup *d)
3593 {
3594 	int err;
3595 
3596 	if (d->btf->strs_deduped)
3597 		return 0;
3598 
3599 	d->strs_set = strset__new(BTF_MAX_STR_OFFSET, NULL, 0);
3600 	if (IS_ERR(d->strs_set)) {
3601 		err = PTR_ERR(d->strs_set);
3602 		goto err_out;
3603 	}
3604 
3605 	if (!d->btf->base_btf) {
3606 		/* insert empty string; we won't be looking it up during strings
3607 		 * dedup, but it's good to have it for generic BTF string lookups
3608 		 */
3609 		err = strset__add_str(d->strs_set, "");
3610 		if (err < 0)
3611 			goto err_out;
3612 	}
3613 
3614 	/* remap string offsets */
3615 	err = btf_for_each_str_off(d, strs_dedup_remap_str_off, d);
3616 	if (err)
3617 		goto err_out;
3618 
3619 	/* replace BTF string data and hash with deduped ones */
3620 	strset__free(d->btf->strs_set);
3621 	d->btf->hdr->str_len = strset__data_size(d->strs_set);
3622 	d->btf->strs_set = d->strs_set;
3623 	d->strs_set = NULL;
3624 	d->btf->strs_deduped = true;
3625 	return 0;
3626 
3627 err_out:
3628 	strset__free(d->strs_set);
3629 	d->strs_set = NULL;
3630 
3631 	return err;
3632 }
3633 
3634 static long btf_hash_common(struct btf_type *t)
3635 {
3636 	long h;
3637 
3638 	h = hash_combine(0, t->name_off);
3639 	h = hash_combine(h, t->info);
3640 	h = hash_combine(h, t->size);
3641 	return h;
3642 }
3643 
3644 static bool btf_equal_common(struct btf_type *t1, struct btf_type *t2)
3645 {
3646 	return t1->name_off == t2->name_off &&
3647 	       t1->info == t2->info &&
3648 	       t1->size == t2->size;
3649 }
3650 
3651 /* Calculate type signature hash of INT or TAG. */
3652 static long btf_hash_int_decl_tag(struct btf_type *t)
3653 {
3654 	__u32 info = *(__u32 *)(t + 1);
3655 	long h;
3656 
3657 	h = btf_hash_common(t);
3658 	h = hash_combine(h, info);
3659 	return h;
3660 }
3661 
3662 /* Check structural equality of two INTs or TAGs. */
3663 static bool btf_equal_int_tag(struct btf_type *t1, struct btf_type *t2)
3664 {
3665 	__u32 info1, info2;
3666 
3667 	if (!btf_equal_common(t1, t2))
3668 		return false;
3669 	info1 = *(__u32 *)(t1 + 1);
3670 	info2 = *(__u32 *)(t2 + 1);
3671 	return info1 == info2;
3672 }
3673 
3674 /* Calculate type signature hash of ENUM/ENUM64. */
3675 static long btf_hash_enum(struct btf_type *t)
3676 {
3677 	long h;
3678 
3679 	/* don't hash vlen, enum members and size to support enum fwd resolving */
3680 	h = hash_combine(0, t->name_off);
3681 	return h;
3682 }
3683 
3684 static bool btf_equal_enum_members(struct btf_type *t1, struct btf_type *t2)
3685 {
3686 	const struct btf_enum *m1, *m2;
3687 	__u16 vlen;
3688 	int i;
3689 
3690 	vlen = btf_vlen(t1);
3691 	m1 = btf_enum(t1);
3692 	m2 = btf_enum(t2);
3693 	for (i = 0; i < vlen; i++) {
3694 		if (m1->name_off != m2->name_off || m1->val != m2->val)
3695 			return false;
3696 		m1++;
3697 		m2++;
3698 	}
3699 	return true;
3700 }
3701 
3702 static bool btf_equal_enum64_members(struct btf_type *t1, struct btf_type *t2)
3703 {
3704 	const struct btf_enum64 *m1, *m2;
3705 	__u16 vlen;
3706 	int i;
3707 
3708 	vlen = btf_vlen(t1);
3709 	m1 = btf_enum64(t1);
3710 	m2 = btf_enum64(t2);
3711 	for (i = 0; i < vlen; i++) {
3712 		if (m1->name_off != m2->name_off || m1->val_lo32 != m2->val_lo32 ||
3713 		    m1->val_hi32 != m2->val_hi32)
3714 			return false;
3715 		m1++;
3716 		m2++;
3717 	}
3718 	return true;
3719 }
3720 
3721 /* Check structural equality of two ENUMs or ENUM64s. */
3722 static bool btf_equal_enum(struct btf_type *t1, struct btf_type *t2)
3723 {
3724 	if (!btf_equal_common(t1, t2))
3725 		return false;
3726 
3727 	/* t1 & t2 kinds are identical because of btf_equal_common */
3728 	if (btf_kind(t1) == BTF_KIND_ENUM)
3729 		return btf_equal_enum_members(t1, t2);
3730 	else
3731 		return btf_equal_enum64_members(t1, t2);
3732 }
3733 
3734 static inline bool btf_is_enum_fwd(struct btf_type *t)
3735 {
3736 	return btf_is_any_enum(t) && btf_vlen(t) == 0;
3737 }
3738 
3739 static bool btf_compat_enum(struct btf_type *t1, struct btf_type *t2)
3740 {
3741 	if (!btf_is_enum_fwd(t1) && !btf_is_enum_fwd(t2))
3742 		return btf_equal_enum(t1, t2);
3743 	/* At this point either t1 or t2 or both are forward declarations, thus:
3744 	 * - skip comparing vlen because it is zero for forward declarations;
3745 	 * - skip comparing size to allow enum forward declarations
3746 	 *   to be compatible with enum64 full declarations;
3747 	 * - skip comparing kind for the same reason.
3748 	 */
3749 	return t1->name_off == t2->name_off &&
3750 	       btf_is_any_enum(t1) && btf_is_any_enum(t2);
3751 }
3752 
3753 /*
3754  * Calculate type signature hash of STRUCT/UNION, ignoring referenced type IDs,
3755  * as referenced type IDs equivalence is established separately during type
3756  * graph equivalence check algorithm.
3757  */
3758 static long btf_hash_struct(struct btf_type *t)
3759 {
3760 	const struct btf_member *member = btf_members(t);
3761 	__u32 vlen = btf_vlen(t);
3762 	long h = btf_hash_common(t);
3763 	int i;
3764 
3765 	for (i = 0; i < vlen; i++) {
3766 		h = hash_combine(h, member->name_off);
3767 		h = hash_combine(h, member->offset);
3768 		/* no hashing of referenced type ID, it can be unresolved yet */
3769 		member++;
3770 	}
3771 	return h;
3772 }
3773 
3774 /*
3775  * Check structural compatibility of two STRUCTs/UNIONs, ignoring referenced
3776  * type IDs. This check is performed during type graph equivalence check and
3777  * referenced types equivalence is checked separately.
3778  */
3779 static bool btf_shallow_equal_struct(struct btf_type *t1, struct btf_type *t2)
3780 {
3781 	const struct btf_member *m1, *m2;
3782 	__u16 vlen;
3783 	int i;
3784 
3785 	if (!btf_equal_common(t1, t2))
3786 		return false;
3787 
3788 	vlen = btf_vlen(t1);
3789 	m1 = btf_members(t1);
3790 	m2 = btf_members(t2);
3791 	for (i = 0; i < vlen; i++) {
3792 		if (m1->name_off != m2->name_off || m1->offset != m2->offset)
3793 			return false;
3794 		m1++;
3795 		m2++;
3796 	}
3797 	return true;
3798 }
3799 
3800 /*
3801  * Calculate type signature hash of ARRAY, including referenced type IDs,
3802  * under assumption that they were already resolved to canonical type IDs and
3803  * are not going to change.
3804  */
3805 static long btf_hash_array(struct btf_type *t)
3806 {
3807 	const struct btf_array *info = btf_array(t);
3808 	long h = btf_hash_common(t);
3809 
3810 	h = hash_combine(h, info->type);
3811 	h = hash_combine(h, info->index_type);
3812 	h = hash_combine(h, info->nelems);
3813 	return h;
3814 }
3815 
3816 /*
3817  * Check exact equality of two ARRAYs, taking into account referenced
3818  * type IDs, under assumption that they were already resolved to canonical
3819  * type IDs and are not going to change.
3820  * This function is called during reference types deduplication to compare
3821  * ARRAY to potential canonical representative.
3822  */
3823 static bool btf_equal_array(struct btf_type *t1, struct btf_type *t2)
3824 {
3825 	const struct btf_array *info1, *info2;
3826 
3827 	if (!btf_equal_common(t1, t2))
3828 		return false;
3829 
3830 	info1 = btf_array(t1);
3831 	info2 = btf_array(t2);
3832 	return info1->type == info2->type &&
3833 	       info1->index_type == info2->index_type &&
3834 	       info1->nelems == info2->nelems;
3835 }
3836 
3837 /*
3838  * Check structural compatibility of two ARRAYs, ignoring referenced type
3839  * IDs. This check is performed during type graph equivalence check and
3840  * referenced types equivalence is checked separately.
3841  */
3842 static bool btf_compat_array(struct btf_type *t1, struct btf_type *t2)
3843 {
3844 	if (!btf_equal_common(t1, t2))
3845 		return false;
3846 
3847 	return btf_array(t1)->nelems == btf_array(t2)->nelems;
3848 }
3849 
3850 /*
3851  * Calculate type signature hash of FUNC_PROTO, including referenced type IDs,
3852  * under assumption that they were already resolved to canonical type IDs and
3853  * are not going to change.
3854  */
3855 static long btf_hash_fnproto(struct btf_type *t)
3856 {
3857 	const struct btf_param *member = btf_params(t);
3858 	__u16 vlen = btf_vlen(t);
3859 	long h = btf_hash_common(t);
3860 	int i;
3861 
3862 	for (i = 0; i < vlen; i++) {
3863 		h = hash_combine(h, member->name_off);
3864 		h = hash_combine(h, member->type);
3865 		member++;
3866 	}
3867 	return h;
3868 }
3869 
3870 /*
3871  * Check exact equality of two FUNC_PROTOs, taking into account referenced
3872  * type IDs, under assumption that they were already resolved to canonical
3873  * type IDs and are not going to change.
3874  * This function is called during reference types deduplication to compare
3875  * FUNC_PROTO to potential canonical representative.
3876  */
3877 static bool btf_equal_fnproto(struct btf_type *t1, struct btf_type *t2)
3878 {
3879 	const struct btf_param *m1, *m2;
3880 	__u16 vlen;
3881 	int i;
3882 
3883 	if (!btf_equal_common(t1, t2))
3884 		return false;
3885 
3886 	vlen = btf_vlen(t1);
3887 	m1 = btf_params(t1);
3888 	m2 = btf_params(t2);
3889 	for (i = 0; i < vlen; i++) {
3890 		if (m1->name_off != m2->name_off || m1->type != m2->type)
3891 			return false;
3892 		m1++;
3893 		m2++;
3894 	}
3895 	return true;
3896 }
3897 
3898 /*
3899  * Check structural compatibility of two FUNC_PROTOs, ignoring referenced type
3900  * IDs. This check is performed during type graph equivalence check and
3901  * referenced types equivalence is checked separately.
3902  */
3903 static bool btf_compat_fnproto(struct btf_type *t1, struct btf_type *t2)
3904 {
3905 	const struct btf_param *m1, *m2;
3906 	__u16 vlen;
3907 	int i;
3908 
3909 	/* skip return type ID */
3910 	if (t1->name_off != t2->name_off || t1->info != t2->info)
3911 		return false;
3912 
3913 	vlen = btf_vlen(t1);
3914 	m1 = btf_params(t1);
3915 	m2 = btf_params(t2);
3916 	for (i = 0; i < vlen; i++) {
3917 		if (m1->name_off != m2->name_off)
3918 			return false;
3919 		m1++;
3920 		m2++;
3921 	}
3922 	return true;
3923 }
3924 
3925 /* Prepare split BTF for deduplication by calculating hashes of base BTF's
3926  * types and initializing the rest of the state (canonical type mapping) for
3927  * the fixed base BTF part.
3928  */
3929 static int btf_dedup_prep(struct btf_dedup *d)
3930 {
3931 	struct btf_type *t;
3932 	int type_id;
3933 	long h;
3934 
3935 	if (!d->btf->base_btf)
3936 		return 0;
3937 
3938 	for (type_id = 1; type_id < d->btf->start_id; type_id++) {
3939 		t = btf_type_by_id(d->btf, type_id);
3940 
3941 		/* all base BTF types are self-canonical by definition */
3942 		d->map[type_id] = type_id;
3943 
3944 		switch (btf_kind(t)) {
3945 		case BTF_KIND_VAR:
3946 		case BTF_KIND_DATASEC:
3947 			/* VAR and DATASEC are never hash/deduplicated */
3948 			continue;
3949 		case BTF_KIND_CONST:
3950 		case BTF_KIND_VOLATILE:
3951 		case BTF_KIND_RESTRICT:
3952 		case BTF_KIND_PTR:
3953 		case BTF_KIND_FWD:
3954 		case BTF_KIND_TYPEDEF:
3955 		case BTF_KIND_FUNC:
3956 		case BTF_KIND_FLOAT:
3957 		case BTF_KIND_TYPE_TAG:
3958 			h = btf_hash_common(t);
3959 			break;
3960 		case BTF_KIND_INT:
3961 		case BTF_KIND_DECL_TAG:
3962 			h = btf_hash_int_decl_tag(t);
3963 			break;
3964 		case BTF_KIND_ENUM:
3965 		case BTF_KIND_ENUM64:
3966 			h = btf_hash_enum(t);
3967 			break;
3968 		case BTF_KIND_STRUCT:
3969 		case BTF_KIND_UNION:
3970 			h = btf_hash_struct(t);
3971 			break;
3972 		case BTF_KIND_ARRAY:
3973 			h = btf_hash_array(t);
3974 			break;
3975 		case BTF_KIND_FUNC_PROTO:
3976 			h = btf_hash_fnproto(t);
3977 			break;
3978 		default:
3979 			pr_debug("unknown kind %d for type [%d]\n", btf_kind(t), type_id);
3980 			return -EINVAL;
3981 		}
3982 		if (btf_dedup_table_add(d, h, type_id))
3983 			return -ENOMEM;
3984 	}
3985 
3986 	return 0;
3987 }
3988 
3989 /*
3990  * Deduplicate primitive types, that can't reference other types, by calculating
3991  * their type signature hash and comparing them with any possible canonical
3992  * candidate. If no canonical candidate matches, type itself is marked as
3993  * canonical and is added into `btf_dedup->dedup_table` as another candidate.
3994  */
3995 static int btf_dedup_prim_type(struct btf_dedup *d, __u32 type_id)
3996 {
3997 	struct btf_type *t = btf_type_by_id(d->btf, type_id);
3998 	struct hashmap_entry *hash_entry;
3999 	struct btf_type *cand;
4000 	/* if we don't find equivalent type, then we are canonical */
4001 	__u32 new_id = type_id;
4002 	__u32 cand_id;
4003 	long h;
4004 
4005 	switch (btf_kind(t)) {
4006 	case BTF_KIND_CONST:
4007 	case BTF_KIND_VOLATILE:
4008 	case BTF_KIND_RESTRICT:
4009 	case BTF_KIND_PTR:
4010 	case BTF_KIND_TYPEDEF:
4011 	case BTF_KIND_ARRAY:
4012 	case BTF_KIND_STRUCT:
4013 	case BTF_KIND_UNION:
4014 	case BTF_KIND_FUNC:
4015 	case BTF_KIND_FUNC_PROTO:
4016 	case BTF_KIND_VAR:
4017 	case BTF_KIND_DATASEC:
4018 	case BTF_KIND_DECL_TAG:
4019 	case BTF_KIND_TYPE_TAG:
4020 		return 0;
4021 
4022 	case BTF_KIND_INT:
4023 		h = btf_hash_int_decl_tag(t);
4024 		for_each_dedup_cand(d, hash_entry, h) {
4025 			cand_id = hash_entry->value;
4026 			cand = btf_type_by_id(d->btf, cand_id);
4027 			if (btf_equal_int_tag(t, cand)) {
4028 				new_id = cand_id;
4029 				break;
4030 			}
4031 		}
4032 		break;
4033 
4034 	case BTF_KIND_ENUM:
4035 	case BTF_KIND_ENUM64:
4036 		h = btf_hash_enum(t);
4037 		for_each_dedup_cand(d, hash_entry, h) {
4038 			cand_id = hash_entry->value;
4039 			cand = btf_type_by_id(d->btf, cand_id);
4040 			if (btf_equal_enum(t, cand)) {
4041 				new_id = cand_id;
4042 				break;
4043 			}
4044 			if (btf_compat_enum(t, cand)) {
4045 				if (btf_is_enum_fwd(t)) {
4046 					/* resolve fwd to full enum */
4047 					new_id = cand_id;
4048 					break;
4049 				}
4050 				/* resolve canonical enum fwd to full enum */
4051 				d->map[cand_id] = type_id;
4052 			}
4053 		}
4054 		break;
4055 
4056 	case BTF_KIND_FWD:
4057 	case BTF_KIND_FLOAT:
4058 		h = btf_hash_common(t);
4059 		for_each_dedup_cand(d, hash_entry, h) {
4060 			cand_id = hash_entry->value;
4061 			cand = btf_type_by_id(d->btf, cand_id);
4062 			if (btf_equal_common(t, cand)) {
4063 				new_id = cand_id;
4064 				break;
4065 			}
4066 		}
4067 		break;
4068 
4069 	default:
4070 		return -EINVAL;
4071 	}
4072 
4073 	d->map[type_id] = new_id;
4074 	if (type_id == new_id && btf_dedup_table_add(d, h, type_id))
4075 		return -ENOMEM;
4076 
4077 	return 0;
4078 }
4079 
4080 static int btf_dedup_prim_types(struct btf_dedup *d)
4081 {
4082 	int i, err;
4083 
4084 	for (i = 0; i < d->btf->nr_types; i++) {
4085 		err = btf_dedup_prim_type(d, d->btf->start_id + i);
4086 		if (err)
4087 			return err;
4088 	}
4089 	return 0;
4090 }
4091 
4092 /*
4093  * Check whether type is already mapped into canonical one (could be to itself).
4094  */
4095 static inline bool is_type_mapped(struct btf_dedup *d, uint32_t type_id)
4096 {
4097 	return d->map[type_id] <= BTF_MAX_NR_TYPES;
4098 }
4099 
4100 /*
4101  * Resolve type ID into its canonical type ID, if any; otherwise return original
4102  * type ID. If type is FWD and is resolved into STRUCT/UNION already, follow
4103  * STRUCT/UNION link and resolve it into canonical type ID as well.
4104  */
4105 static inline __u32 resolve_type_id(struct btf_dedup *d, __u32 type_id)
4106 {
4107 	while (is_type_mapped(d, type_id) && d->map[type_id] != type_id)
4108 		type_id = d->map[type_id];
4109 	return type_id;
4110 }
4111 
4112 /*
4113  * Resolve FWD to underlying STRUCT/UNION, if any; otherwise return original
4114  * type ID.
4115  */
4116 static uint32_t resolve_fwd_id(struct btf_dedup *d, uint32_t type_id)
4117 {
4118 	__u32 orig_type_id = type_id;
4119 
4120 	if (!btf_is_fwd(btf__type_by_id(d->btf, type_id)))
4121 		return type_id;
4122 
4123 	while (is_type_mapped(d, type_id) && d->map[type_id] != type_id)
4124 		type_id = d->map[type_id];
4125 
4126 	if (!btf_is_fwd(btf__type_by_id(d->btf, type_id)))
4127 		return type_id;
4128 
4129 	return orig_type_id;
4130 }
4131 
4132 
4133 static inline __u16 btf_fwd_kind(struct btf_type *t)
4134 {
4135 	return btf_kflag(t) ? BTF_KIND_UNION : BTF_KIND_STRUCT;
4136 }
4137 
4138 /* Check if given two types are identical ARRAY definitions */
4139 static bool btf_dedup_identical_arrays(struct btf_dedup *d, __u32 id1, __u32 id2)
4140 {
4141 	struct btf_type *t1, *t2;
4142 
4143 	t1 = btf_type_by_id(d->btf, id1);
4144 	t2 = btf_type_by_id(d->btf, id2);
4145 	if (!btf_is_array(t1) || !btf_is_array(t2))
4146 		return false;
4147 
4148 	return btf_equal_array(t1, t2);
4149 }
4150 
4151 /* Check if given two types are identical STRUCT/UNION definitions */
4152 static bool btf_dedup_identical_structs(struct btf_dedup *d, __u32 id1, __u32 id2)
4153 {
4154 	const struct btf_member *m1, *m2;
4155 	struct btf_type *t1, *t2;
4156 	int n, i;
4157 
4158 	t1 = btf_type_by_id(d->btf, id1);
4159 	t2 = btf_type_by_id(d->btf, id2);
4160 
4161 	if (!btf_is_composite(t1) || btf_kind(t1) != btf_kind(t2))
4162 		return false;
4163 
4164 	if (!btf_shallow_equal_struct(t1, t2))
4165 		return false;
4166 
4167 	m1 = btf_members(t1);
4168 	m2 = btf_members(t2);
4169 	for (i = 0, n = btf_vlen(t1); i < n; i++, m1++, m2++) {
4170 		if (m1->type != m2->type &&
4171 		    !btf_dedup_identical_arrays(d, m1->type, m2->type) &&
4172 		    !btf_dedup_identical_structs(d, m1->type, m2->type))
4173 			return false;
4174 	}
4175 	return true;
4176 }
4177 
4178 /*
4179  * Check equivalence of BTF type graph formed by candidate struct/union (we'll
4180  * call it "candidate graph" in this description for brevity) to a type graph
4181  * formed by (potential) canonical struct/union ("canonical graph" for brevity
4182  * here, though keep in mind that not all types in canonical graph are
4183  * necessarily canonical representatives themselves, some of them might be
4184  * duplicates or its uniqueness might not have been established yet).
4185  * Returns:
4186  *  - >0, if type graphs are equivalent;
4187  *  -  0, if not equivalent;
4188  *  - <0, on error.
4189  *
4190  * Algorithm performs side-by-side DFS traversal of both type graphs and checks
4191  * equivalence of BTF types at each step. If at any point BTF types in candidate
4192  * and canonical graphs are not compatible structurally, whole graphs are
4193  * incompatible. If types are structurally equivalent (i.e., all information
4194  * except referenced type IDs is exactly the same), a mapping from `canon_id` to
4195  * a `cand_id` is recoded in hypothetical mapping (`btf_dedup->hypot_map`).
4196  * If a type references other types, then those referenced types are checked
4197  * for equivalence recursively.
4198  *
4199  * During DFS traversal, if we find that for current `canon_id` type we
4200  * already have some mapping in hypothetical map, we check for two possible
4201  * situations:
4202  *   - `canon_id` is mapped to exactly the same type as `cand_id`. This will
4203  *     happen when type graphs have cycles. In this case we assume those two
4204  *     types are equivalent.
4205  *   - `canon_id` is mapped to different type. This is contradiction in our
4206  *     hypothetical mapping, because same graph in canonical graph corresponds
4207  *     to two different types in candidate graph, which for equivalent type
4208  *     graphs shouldn't happen. This condition terminates equivalence check
4209  *     with negative result.
4210  *
4211  * If type graphs traversal exhausts types to check and find no contradiction,
4212  * then type graphs are equivalent.
4213  *
4214  * When checking types for equivalence, there is one special case: FWD types.
4215  * If FWD type resolution is allowed and one of the types (either from canonical
4216  * or candidate graph) is FWD and other is STRUCT/UNION (depending on FWD's kind
4217  * flag) and their names match, hypothetical mapping is updated to point from
4218  * FWD to STRUCT/UNION. If graphs will be determined as equivalent successfully,
4219  * this mapping will be used to record FWD -> STRUCT/UNION mapping permanently.
4220  *
4221  * Technically, this could lead to incorrect FWD to STRUCT/UNION resolution,
4222  * if there are two exactly named (or anonymous) structs/unions that are
4223  * compatible structurally, one of which has FWD field, while other is concrete
4224  * STRUCT/UNION, but according to C sources they are different structs/unions
4225  * that are referencing different types with the same name. This is extremely
4226  * unlikely to happen, but btf_dedup API allows to disable FWD resolution if
4227  * this logic is causing problems.
4228  *
4229  * Doing FWD resolution means that both candidate and/or canonical graphs can
4230  * consists of portions of the graph that come from multiple compilation units.
4231  * This is due to the fact that types within single compilation unit are always
4232  * deduplicated and FWDs are already resolved, if referenced struct/union
4233  * definition is available. So, if we had unresolved FWD and found corresponding
4234  * STRUCT/UNION, they will be from different compilation units. This
4235  * consequently means that when we "link" FWD to corresponding STRUCT/UNION,
4236  * type graph will likely have at least two different BTF types that describe
4237  * same type (e.g., most probably there will be two different BTF types for the
4238  * same 'int' primitive type) and could even have "overlapping" parts of type
4239  * graph that describe same subset of types.
4240  *
4241  * This in turn means that our assumption that each type in canonical graph
4242  * must correspond to exactly one type in candidate graph might not hold
4243  * anymore and will make it harder to detect contradictions using hypothetical
4244  * map. To handle this problem, we allow to follow FWD -> STRUCT/UNION
4245  * resolution only in canonical graph. FWDs in candidate graphs are never
4246  * resolved. To see why it's OK, let's check all possible situations w.r.t. FWDs
4247  * that can occur:
4248  *   - Both types in canonical and candidate graphs are FWDs. If they are
4249  *     structurally equivalent, then they can either be both resolved to the
4250  *     same STRUCT/UNION or not resolved at all. In both cases they are
4251  *     equivalent and there is no need to resolve FWD on candidate side.
4252  *   - Both types in canonical and candidate graphs are concrete STRUCT/UNION,
4253  *     so nothing to resolve as well, algorithm will check equivalence anyway.
4254  *   - Type in canonical graph is FWD, while type in candidate is concrete
4255  *     STRUCT/UNION. In this case candidate graph comes from single compilation
4256  *     unit, so there is exactly one BTF type for each unique C type. After
4257  *     resolving FWD into STRUCT/UNION, there might be more than one BTF type
4258  *     in canonical graph mapping to single BTF type in candidate graph, but
4259  *     because hypothetical mapping maps from canonical to candidate types, it's
4260  *     alright, and we still maintain the property of having single `canon_id`
4261  *     mapping to single `cand_id` (there could be two different `canon_id`
4262  *     mapped to the same `cand_id`, but it's not contradictory).
4263  *   - Type in canonical graph is concrete STRUCT/UNION, while type in candidate
4264  *     graph is FWD. In this case we are just going to check compatibility of
4265  *     STRUCT/UNION and corresponding FWD, and if they are compatible, we'll
4266  *     assume that whatever STRUCT/UNION FWD resolves to must be equivalent to
4267  *     a concrete STRUCT/UNION from canonical graph. If the rest of type graphs
4268  *     turn out equivalent, we'll re-resolve FWD to concrete STRUCT/UNION from
4269  *     canonical graph.
4270  */
4271 static int btf_dedup_is_equiv(struct btf_dedup *d, __u32 cand_id,
4272 			      __u32 canon_id)
4273 {
4274 	struct btf_type *cand_type;
4275 	struct btf_type *canon_type;
4276 	__u32 hypot_type_id;
4277 	__u16 cand_kind;
4278 	__u16 canon_kind;
4279 	int i, eq;
4280 
4281 	/* if both resolve to the same canonical, they must be equivalent */
4282 	if (resolve_type_id(d, cand_id) == resolve_type_id(d, canon_id))
4283 		return 1;
4284 
4285 	canon_id = resolve_fwd_id(d, canon_id);
4286 
4287 	hypot_type_id = d->hypot_map[canon_id];
4288 	if (hypot_type_id <= BTF_MAX_NR_TYPES) {
4289 		if (hypot_type_id == cand_id)
4290 			return 1;
4291 		/* In some cases compiler will generate different DWARF types
4292 		 * for *identical* array type definitions and use them for
4293 		 * different fields within the *same* struct. This breaks type
4294 		 * equivalence check, which makes an assumption that candidate
4295 		 * types sub-graph has a consistent and deduped-by-compiler
4296 		 * types within a single CU. So work around that by explicitly
4297 		 * allowing identical array types here.
4298 		 */
4299 		if (btf_dedup_identical_arrays(d, hypot_type_id, cand_id))
4300 			return 1;
4301 		/* It turns out that similar situation can happen with
4302 		 * struct/union sometimes, sigh... Handle the case where
4303 		 * structs/unions are exactly the same, down to the referenced
4304 		 * type IDs. Anything more complicated (e.g., if referenced
4305 		 * types are different, but equivalent) is *way more*
4306 		 * complicated and requires a many-to-many equivalence mapping.
4307 		 */
4308 		if (btf_dedup_identical_structs(d, hypot_type_id, cand_id))
4309 			return 1;
4310 		return 0;
4311 	}
4312 
4313 	if (btf_dedup_hypot_map_add(d, canon_id, cand_id))
4314 		return -ENOMEM;
4315 
4316 	cand_type = btf_type_by_id(d->btf, cand_id);
4317 	canon_type = btf_type_by_id(d->btf, canon_id);
4318 	cand_kind = btf_kind(cand_type);
4319 	canon_kind = btf_kind(canon_type);
4320 
4321 	if (cand_type->name_off != canon_type->name_off)
4322 		return 0;
4323 
4324 	/* FWD <--> STRUCT/UNION equivalence check, if enabled */
4325 	if ((cand_kind == BTF_KIND_FWD || canon_kind == BTF_KIND_FWD)
4326 	    && cand_kind != canon_kind) {
4327 		__u16 real_kind;
4328 		__u16 fwd_kind;
4329 
4330 		if (cand_kind == BTF_KIND_FWD) {
4331 			real_kind = canon_kind;
4332 			fwd_kind = btf_fwd_kind(cand_type);
4333 		} else {
4334 			real_kind = cand_kind;
4335 			fwd_kind = btf_fwd_kind(canon_type);
4336 			/* we'd need to resolve base FWD to STRUCT/UNION */
4337 			if (fwd_kind == real_kind && canon_id < d->btf->start_id)
4338 				d->hypot_adjust_canon = true;
4339 		}
4340 		return fwd_kind == real_kind;
4341 	}
4342 
4343 	if (cand_kind != canon_kind)
4344 		return 0;
4345 
4346 	switch (cand_kind) {
4347 	case BTF_KIND_INT:
4348 		return btf_equal_int_tag(cand_type, canon_type);
4349 
4350 	case BTF_KIND_ENUM:
4351 	case BTF_KIND_ENUM64:
4352 		return btf_compat_enum(cand_type, canon_type);
4353 
4354 	case BTF_KIND_FWD:
4355 	case BTF_KIND_FLOAT:
4356 		return btf_equal_common(cand_type, canon_type);
4357 
4358 	case BTF_KIND_CONST:
4359 	case BTF_KIND_VOLATILE:
4360 	case BTF_KIND_RESTRICT:
4361 	case BTF_KIND_PTR:
4362 	case BTF_KIND_TYPEDEF:
4363 	case BTF_KIND_FUNC:
4364 	case BTF_KIND_TYPE_TAG:
4365 		if (cand_type->info != canon_type->info)
4366 			return 0;
4367 		return btf_dedup_is_equiv(d, cand_type->type, canon_type->type);
4368 
4369 	case BTF_KIND_ARRAY: {
4370 		const struct btf_array *cand_arr, *canon_arr;
4371 
4372 		if (!btf_compat_array(cand_type, canon_type))
4373 			return 0;
4374 		cand_arr = btf_array(cand_type);
4375 		canon_arr = btf_array(canon_type);
4376 		eq = btf_dedup_is_equiv(d, cand_arr->index_type, canon_arr->index_type);
4377 		if (eq <= 0)
4378 			return eq;
4379 		return btf_dedup_is_equiv(d, cand_arr->type, canon_arr->type);
4380 	}
4381 
4382 	case BTF_KIND_STRUCT:
4383 	case BTF_KIND_UNION: {
4384 		const struct btf_member *cand_m, *canon_m;
4385 		__u16 vlen;
4386 
4387 		if (!btf_shallow_equal_struct(cand_type, canon_type))
4388 			return 0;
4389 		vlen = btf_vlen(cand_type);
4390 		cand_m = btf_members(cand_type);
4391 		canon_m = btf_members(canon_type);
4392 		for (i = 0; i < vlen; i++) {
4393 			eq = btf_dedup_is_equiv(d, cand_m->type, canon_m->type);
4394 			if (eq <= 0)
4395 				return eq;
4396 			cand_m++;
4397 			canon_m++;
4398 		}
4399 
4400 		return 1;
4401 	}
4402 
4403 	case BTF_KIND_FUNC_PROTO: {
4404 		const struct btf_param *cand_p, *canon_p;
4405 		__u16 vlen;
4406 
4407 		if (!btf_compat_fnproto(cand_type, canon_type))
4408 			return 0;
4409 		eq = btf_dedup_is_equiv(d, cand_type->type, canon_type->type);
4410 		if (eq <= 0)
4411 			return eq;
4412 		vlen = btf_vlen(cand_type);
4413 		cand_p = btf_params(cand_type);
4414 		canon_p = btf_params(canon_type);
4415 		for (i = 0; i < vlen; i++) {
4416 			eq = btf_dedup_is_equiv(d, cand_p->type, canon_p->type);
4417 			if (eq <= 0)
4418 				return eq;
4419 			cand_p++;
4420 			canon_p++;
4421 		}
4422 		return 1;
4423 	}
4424 
4425 	default:
4426 		return -EINVAL;
4427 	}
4428 	return 0;
4429 }
4430 
4431 /*
4432  * Use hypothetical mapping, produced by successful type graph equivalence
4433  * check, to augment existing struct/union canonical mapping, where possible.
4434  *
4435  * If BTF_KIND_FWD resolution is allowed, this mapping is also used to record
4436  * FWD -> STRUCT/UNION correspondence as well. FWD resolution is bidirectional:
4437  * it doesn't matter if FWD type was part of canonical graph or candidate one,
4438  * we are recording the mapping anyway. As opposed to carefulness required
4439  * for struct/union correspondence mapping (described below), for FWD resolution
4440  * it's not important, as by the time that FWD type (reference type) will be
4441  * deduplicated all structs/unions will be deduped already anyway.
4442  *
4443  * Recording STRUCT/UNION mapping is purely a performance optimization and is
4444  * not required for correctness. It needs to be done carefully to ensure that
4445  * struct/union from candidate's type graph is not mapped into corresponding
4446  * struct/union from canonical type graph that itself hasn't been resolved into
4447  * canonical representative. The only guarantee we have is that canonical
4448  * struct/union was determined as canonical and that won't change. But any
4449  * types referenced through that struct/union fields could have been not yet
4450  * resolved, so in case like that it's too early to establish any kind of
4451  * correspondence between structs/unions.
4452  *
4453  * No canonical correspondence is derived for primitive types (they are already
4454  * deduplicated completely already anyway) or reference types (they rely on
4455  * stability of struct/union canonical relationship for equivalence checks).
4456  */
4457 static void btf_dedup_merge_hypot_map(struct btf_dedup *d)
4458 {
4459 	__u32 canon_type_id, targ_type_id;
4460 	__u16 t_kind, c_kind;
4461 	__u32 t_id, c_id;
4462 	int i;
4463 
4464 	for (i = 0; i < d->hypot_cnt; i++) {
4465 		canon_type_id = d->hypot_list[i];
4466 		targ_type_id = d->hypot_map[canon_type_id];
4467 		t_id = resolve_type_id(d, targ_type_id);
4468 		c_id = resolve_type_id(d, canon_type_id);
4469 		t_kind = btf_kind(btf__type_by_id(d->btf, t_id));
4470 		c_kind = btf_kind(btf__type_by_id(d->btf, c_id));
4471 		/*
4472 		 * Resolve FWD into STRUCT/UNION.
4473 		 * It's ok to resolve FWD into STRUCT/UNION that's not yet
4474 		 * mapped to canonical representative (as opposed to
4475 		 * STRUCT/UNION <--> STRUCT/UNION mapping logic below), because
4476 		 * eventually that struct is going to be mapped and all resolved
4477 		 * FWDs will automatically resolve to correct canonical
4478 		 * representative. This will happen before ref type deduping,
4479 		 * which critically depends on stability of these mapping. This
4480 		 * stability is not a requirement for STRUCT/UNION equivalence
4481 		 * checks, though.
4482 		 */
4483 
4484 		/* if it's the split BTF case, we still need to point base FWD
4485 		 * to STRUCT/UNION in a split BTF, because FWDs from split BTF
4486 		 * will be resolved against base FWD. If we don't point base
4487 		 * canonical FWD to the resolved STRUCT/UNION, then all the
4488 		 * FWDs in split BTF won't be correctly resolved to a proper
4489 		 * STRUCT/UNION.
4490 		 */
4491 		if (t_kind != BTF_KIND_FWD && c_kind == BTF_KIND_FWD)
4492 			d->map[c_id] = t_id;
4493 
4494 		/* if graph equivalence determined that we'd need to adjust
4495 		 * base canonical types, then we need to only point base FWDs
4496 		 * to STRUCTs/UNIONs and do no more modifications. For all
4497 		 * other purposes the type graphs were not equivalent.
4498 		 */
4499 		if (d->hypot_adjust_canon)
4500 			continue;
4501 
4502 		if (t_kind == BTF_KIND_FWD && c_kind != BTF_KIND_FWD)
4503 			d->map[t_id] = c_id;
4504 
4505 		if ((t_kind == BTF_KIND_STRUCT || t_kind == BTF_KIND_UNION) &&
4506 		    c_kind != BTF_KIND_FWD &&
4507 		    is_type_mapped(d, c_id) &&
4508 		    !is_type_mapped(d, t_id)) {
4509 			/*
4510 			 * as a perf optimization, we can map struct/union
4511 			 * that's part of type graph we just verified for
4512 			 * equivalence. We can do that for struct/union that has
4513 			 * canonical representative only, though.
4514 			 */
4515 			d->map[t_id] = c_id;
4516 		}
4517 	}
4518 }
4519 
4520 /*
4521  * Deduplicate struct/union types.
4522  *
4523  * For each struct/union type its type signature hash is calculated, taking
4524  * into account type's name, size, number, order and names of fields, but
4525  * ignoring type ID's referenced from fields, because they might not be deduped
4526  * completely until after reference types deduplication phase. This type hash
4527  * is used to iterate over all potential canonical types, sharing same hash.
4528  * For each canonical candidate we check whether type graphs that they form
4529  * (through referenced types in fields and so on) are equivalent using algorithm
4530  * implemented in `btf_dedup_is_equiv`. If such equivalence is found and
4531  * BTF_KIND_FWD resolution is allowed, then hypothetical mapping
4532  * (btf_dedup->hypot_map) produced by aforementioned type graph equivalence
4533  * algorithm is used to record FWD -> STRUCT/UNION mapping. It's also used to
4534  * potentially map other structs/unions to their canonical representatives,
4535  * if such relationship hasn't yet been established. This speeds up algorithm
4536  * by eliminating some of the duplicate work.
4537  *
4538  * If no matching canonical representative was found, struct/union is marked
4539  * as canonical for itself and is added into btf_dedup->dedup_table hash map
4540  * for further look ups.
4541  */
4542 static int btf_dedup_struct_type(struct btf_dedup *d, __u32 type_id)
4543 {
4544 	struct btf_type *cand_type, *t;
4545 	struct hashmap_entry *hash_entry;
4546 	/* if we don't find equivalent type, then we are canonical */
4547 	__u32 new_id = type_id;
4548 	__u16 kind;
4549 	long h;
4550 
4551 	/* already deduped or is in process of deduping (loop detected) */
4552 	if (d->map[type_id] <= BTF_MAX_NR_TYPES)
4553 		return 0;
4554 
4555 	t = btf_type_by_id(d->btf, type_id);
4556 	kind = btf_kind(t);
4557 
4558 	if (kind != BTF_KIND_STRUCT && kind != BTF_KIND_UNION)
4559 		return 0;
4560 
4561 	h = btf_hash_struct(t);
4562 	for_each_dedup_cand(d, hash_entry, h) {
4563 		__u32 cand_id = hash_entry->value;
4564 		int eq;
4565 
4566 		/*
4567 		 * Even though btf_dedup_is_equiv() checks for
4568 		 * btf_shallow_equal_struct() internally when checking two
4569 		 * structs (unions) for equivalence, we need to guard here
4570 		 * from picking matching FWD type as a dedup candidate.
4571 		 * This can happen due to hash collision. In such case just
4572 		 * relying on btf_dedup_is_equiv() would lead to potentially
4573 		 * creating a loop (FWD -> STRUCT and STRUCT -> FWD), because
4574 		 * FWD and compatible STRUCT/UNION are considered equivalent.
4575 		 */
4576 		cand_type = btf_type_by_id(d->btf, cand_id);
4577 		if (!btf_shallow_equal_struct(t, cand_type))
4578 			continue;
4579 
4580 		btf_dedup_clear_hypot_map(d);
4581 		eq = btf_dedup_is_equiv(d, type_id, cand_id);
4582 		if (eq < 0)
4583 			return eq;
4584 		if (!eq)
4585 			continue;
4586 		btf_dedup_merge_hypot_map(d);
4587 		if (d->hypot_adjust_canon) /* not really equivalent */
4588 			continue;
4589 		new_id = cand_id;
4590 		break;
4591 	}
4592 
4593 	d->map[type_id] = new_id;
4594 	if (type_id == new_id && btf_dedup_table_add(d, h, type_id))
4595 		return -ENOMEM;
4596 
4597 	return 0;
4598 }
4599 
4600 static int btf_dedup_struct_types(struct btf_dedup *d)
4601 {
4602 	int i, err;
4603 
4604 	for (i = 0; i < d->btf->nr_types; i++) {
4605 		err = btf_dedup_struct_type(d, d->btf->start_id + i);
4606 		if (err)
4607 			return err;
4608 	}
4609 	return 0;
4610 }
4611 
4612 /*
4613  * Deduplicate reference type.
4614  *
4615  * Once all primitive and struct/union types got deduplicated, we can easily
4616  * deduplicate all other (reference) BTF types. This is done in two steps:
4617  *
4618  * 1. Resolve all referenced type IDs into their canonical type IDs. This
4619  * resolution can be done either immediately for primitive or struct/union types
4620  * (because they were deduped in previous two phases) or recursively for
4621  * reference types. Recursion will always terminate at either primitive or
4622  * struct/union type, at which point we can "unwind" chain of reference types
4623  * one by one. There is no danger of encountering cycles because in C type
4624  * system the only way to form type cycle is through struct/union, so any chain
4625  * of reference types, even those taking part in a type cycle, will inevitably
4626  * reach struct/union at some point.
4627  *
4628  * 2. Once all referenced type IDs are resolved into canonical ones, BTF type
4629  * becomes "stable", in the sense that no further deduplication will cause
4630  * any changes to it. With that, it's now possible to calculate type's signature
4631  * hash (this time taking into account referenced type IDs) and loop over all
4632  * potential canonical representatives. If no match was found, current type
4633  * will become canonical representative of itself and will be added into
4634  * btf_dedup->dedup_table as another possible canonical representative.
4635  */
4636 static int btf_dedup_ref_type(struct btf_dedup *d, __u32 type_id)
4637 {
4638 	struct hashmap_entry *hash_entry;
4639 	__u32 new_id = type_id, cand_id;
4640 	struct btf_type *t, *cand;
4641 	/* if we don't find equivalent type, then we are representative type */
4642 	int ref_type_id;
4643 	long h;
4644 
4645 	if (d->map[type_id] == BTF_IN_PROGRESS_ID)
4646 		return -ELOOP;
4647 	if (d->map[type_id] <= BTF_MAX_NR_TYPES)
4648 		return resolve_type_id(d, type_id);
4649 
4650 	t = btf_type_by_id(d->btf, type_id);
4651 	d->map[type_id] = BTF_IN_PROGRESS_ID;
4652 
4653 	switch (btf_kind(t)) {
4654 	case BTF_KIND_CONST:
4655 	case BTF_KIND_VOLATILE:
4656 	case BTF_KIND_RESTRICT:
4657 	case BTF_KIND_PTR:
4658 	case BTF_KIND_TYPEDEF:
4659 	case BTF_KIND_FUNC:
4660 	case BTF_KIND_TYPE_TAG:
4661 		ref_type_id = btf_dedup_ref_type(d, t->type);
4662 		if (ref_type_id < 0)
4663 			return ref_type_id;
4664 		t->type = ref_type_id;
4665 
4666 		h = btf_hash_common(t);
4667 		for_each_dedup_cand(d, hash_entry, h) {
4668 			cand_id = hash_entry->value;
4669 			cand = btf_type_by_id(d->btf, cand_id);
4670 			if (btf_equal_common(t, cand)) {
4671 				new_id = cand_id;
4672 				break;
4673 			}
4674 		}
4675 		break;
4676 
4677 	case BTF_KIND_DECL_TAG:
4678 		ref_type_id = btf_dedup_ref_type(d, t->type);
4679 		if (ref_type_id < 0)
4680 			return ref_type_id;
4681 		t->type = ref_type_id;
4682 
4683 		h = btf_hash_int_decl_tag(t);
4684 		for_each_dedup_cand(d, hash_entry, h) {
4685 			cand_id = hash_entry->value;
4686 			cand = btf_type_by_id(d->btf, cand_id);
4687 			if (btf_equal_int_tag(t, cand)) {
4688 				new_id = cand_id;
4689 				break;
4690 			}
4691 		}
4692 		break;
4693 
4694 	case BTF_KIND_ARRAY: {
4695 		struct btf_array *info = btf_array(t);
4696 
4697 		ref_type_id = btf_dedup_ref_type(d, info->type);
4698 		if (ref_type_id < 0)
4699 			return ref_type_id;
4700 		info->type = ref_type_id;
4701 
4702 		ref_type_id = btf_dedup_ref_type(d, info->index_type);
4703 		if (ref_type_id < 0)
4704 			return ref_type_id;
4705 		info->index_type = ref_type_id;
4706 
4707 		h = btf_hash_array(t);
4708 		for_each_dedup_cand(d, hash_entry, h) {
4709 			cand_id = hash_entry->value;
4710 			cand = btf_type_by_id(d->btf, cand_id);
4711 			if (btf_equal_array(t, cand)) {
4712 				new_id = cand_id;
4713 				break;
4714 			}
4715 		}
4716 		break;
4717 	}
4718 
4719 	case BTF_KIND_FUNC_PROTO: {
4720 		struct btf_param *param;
4721 		__u16 vlen;
4722 		int i;
4723 
4724 		ref_type_id = btf_dedup_ref_type(d, t->type);
4725 		if (ref_type_id < 0)
4726 			return ref_type_id;
4727 		t->type = ref_type_id;
4728 
4729 		vlen = btf_vlen(t);
4730 		param = btf_params(t);
4731 		for (i = 0; i < vlen; i++) {
4732 			ref_type_id = btf_dedup_ref_type(d, param->type);
4733 			if (ref_type_id < 0)
4734 				return ref_type_id;
4735 			param->type = ref_type_id;
4736 			param++;
4737 		}
4738 
4739 		h = btf_hash_fnproto(t);
4740 		for_each_dedup_cand(d, hash_entry, h) {
4741 			cand_id = hash_entry->value;
4742 			cand = btf_type_by_id(d->btf, cand_id);
4743 			if (btf_equal_fnproto(t, cand)) {
4744 				new_id = cand_id;
4745 				break;
4746 			}
4747 		}
4748 		break;
4749 	}
4750 
4751 	default:
4752 		return -EINVAL;
4753 	}
4754 
4755 	d->map[type_id] = new_id;
4756 	if (type_id == new_id && btf_dedup_table_add(d, h, type_id))
4757 		return -ENOMEM;
4758 
4759 	return new_id;
4760 }
4761 
4762 static int btf_dedup_ref_types(struct btf_dedup *d)
4763 {
4764 	int i, err;
4765 
4766 	for (i = 0; i < d->btf->nr_types; i++) {
4767 		err = btf_dedup_ref_type(d, d->btf->start_id + i);
4768 		if (err < 0)
4769 			return err;
4770 	}
4771 	/* we won't need d->dedup_table anymore */
4772 	hashmap__free(d->dedup_table);
4773 	d->dedup_table = NULL;
4774 	return 0;
4775 }
4776 
4777 /*
4778  * Collect a map from type names to type ids for all canonical structs
4779  * and unions. If the same name is shared by several canonical types
4780  * use a special value 0 to indicate this fact.
4781  */
4782 static int btf_dedup_fill_unique_names_map(struct btf_dedup *d, struct hashmap *names_map)
4783 {
4784 	__u32 nr_types = btf__type_cnt(d->btf);
4785 	struct btf_type *t;
4786 	__u32 type_id;
4787 	__u16 kind;
4788 	int err;
4789 
4790 	/*
4791 	 * Iterate over base and split module ids in order to get all
4792 	 * available structs in the map.
4793 	 */
4794 	for (type_id = 1; type_id < nr_types; ++type_id) {
4795 		t = btf_type_by_id(d->btf, type_id);
4796 		kind = btf_kind(t);
4797 
4798 		if (kind != BTF_KIND_STRUCT && kind != BTF_KIND_UNION)
4799 			continue;
4800 
4801 		/* Skip non-canonical types */
4802 		if (type_id != d->map[type_id])
4803 			continue;
4804 
4805 		err = hashmap__add(names_map, t->name_off, type_id);
4806 		if (err == -EEXIST)
4807 			err = hashmap__set(names_map, t->name_off, 0, NULL, NULL);
4808 
4809 		if (err)
4810 			return err;
4811 	}
4812 
4813 	return 0;
4814 }
4815 
4816 static int btf_dedup_resolve_fwd(struct btf_dedup *d, struct hashmap *names_map, __u32 type_id)
4817 {
4818 	struct btf_type *t = btf_type_by_id(d->btf, type_id);
4819 	enum btf_fwd_kind fwd_kind = btf_kflag(t);
4820 	__u16 cand_kind, kind = btf_kind(t);
4821 	struct btf_type *cand_t;
4822 	uintptr_t cand_id;
4823 
4824 	if (kind != BTF_KIND_FWD)
4825 		return 0;
4826 
4827 	/* Skip if this FWD already has a mapping */
4828 	if (type_id != d->map[type_id])
4829 		return 0;
4830 
4831 	if (!hashmap__find(names_map, t->name_off, &cand_id))
4832 		return 0;
4833 
4834 	/* Zero is a special value indicating that name is not unique */
4835 	if (!cand_id)
4836 		return 0;
4837 
4838 	cand_t = btf_type_by_id(d->btf, cand_id);
4839 	cand_kind = btf_kind(cand_t);
4840 	if ((cand_kind == BTF_KIND_STRUCT && fwd_kind != BTF_FWD_STRUCT) ||
4841 	    (cand_kind == BTF_KIND_UNION && fwd_kind != BTF_FWD_UNION))
4842 		return 0;
4843 
4844 	d->map[type_id] = cand_id;
4845 
4846 	return 0;
4847 }
4848 
4849 /*
4850  * Resolve unambiguous forward declarations.
4851  *
4852  * The lion's share of all FWD declarations is resolved during
4853  * `btf_dedup_struct_types` phase when different type graphs are
4854  * compared against each other. However, if in some compilation unit a
4855  * FWD declaration is not a part of a type graph compared against
4856  * another type graph that declaration's canonical type would not be
4857  * changed. Example:
4858  *
4859  * CU #1:
4860  *
4861  * struct foo;
4862  * struct foo *some_global;
4863  *
4864  * CU #2:
4865  *
4866  * struct foo { int u; };
4867  * struct foo *another_global;
4868  *
4869  * After `btf_dedup_struct_types` the BTF looks as follows:
4870  *
4871  * [1] STRUCT 'foo' size=4 vlen=1 ...
4872  * [2] INT 'int' size=4 ...
4873  * [3] PTR '(anon)' type_id=1
4874  * [4] FWD 'foo' fwd_kind=struct
4875  * [5] PTR '(anon)' type_id=4
4876  *
4877  * This pass assumes that such FWD declarations should be mapped to
4878  * structs or unions with identical name in case if the name is not
4879  * ambiguous.
4880  */
4881 static int btf_dedup_resolve_fwds(struct btf_dedup *d)
4882 {
4883 	int i, err;
4884 	struct hashmap *names_map;
4885 
4886 	names_map = hashmap__new(btf_dedup_identity_hash_fn, btf_dedup_equal_fn, NULL);
4887 	if (IS_ERR(names_map))
4888 		return PTR_ERR(names_map);
4889 
4890 	err = btf_dedup_fill_unique_names_map(d, names_map);
4891 	if (err < 0)
4892 		goto exit;
4893 
4894 	for (i = 0; i < d->btf->nr_types; i++) {
4895 		err = btf_dedup_resolve_fwd(d, names_map, d->btf->start_id + i);
4896 		if (err < 0)
4897 			break;
4898 	}
4899 
4900 exit:
4901 	hashmap__free(names_map);
4902 	return err;
4903 }
4904 
4905 /*
4906  * Compact types.
4907  *
4908  * After we established for each type its corresponding canonical representative
4909  * type, we now can eliminate types that are not canonical and leave only
4910  * canonical ones layed out sequentially in memory by copying them over
4911  * duplicates. During compaction btf_dedup->hypot_map array is reused to store
4912  * a map from original type ID to a new compacted type ID, which will be used
4913  * during next phase to "fix up" type IDs, referenced from struct/union and
4914  * reference types.
4915  */
4916 static int btf_dedup_compact_types(struct btf_dedup *d)
4917 {
4918 	__u32 *new_offs;
4919 	__u32 next_type_id = d->btf->start_id;
4920 	const struct btf_type *t;
4921 	void *p;
4922 	int i, id, len;
4923 
4924 	/* we are going to reuse hypot_map to store compaction remapping */
4925 	d->hypot_map[0] = 0;
4926 	/* base BTF types are not renumbered */
4927 	for (id = 1; id < d->btf->start_id; id++)
4928 		d->hypot_map[id] = id;
4929 	for (i = 0, id = d->btf->start_id; i < d->btf->nr_types; i++, id++)
4930 		d->hypot_map[id] = BTF_UNPROCESSED_ID;
4931 
4932 	p = d->btf->types_data;
4933 
4934 	for (i = 0, id = d->btf->start_id; i < d->btf->nr_types; i++, id++) {
4935 		if (d->map[id] != id)
4936 			continue;
4937 
4938 		t = btf__type_by_id(d->btf, id);
4939 		len = btf_type_size(t);
4940 		if (len < 0)
4941 			return len;
4942 
4943 		memmove(p, t, len);
4944 		d->hypot_map[id] = next_type_id;
4945 		d->btf->type_offs[next_type_id - d->btf->start_id] = p - d->btf->types_data;
4946 		p += len;
4947 		next_type_id++;
4948 	}
4949 
4950 	/* shrink struct btf's internal types index and update btf_header */
4951 	d->btf->nr_types = next_type_id - d->btf->start_id;
4952 	d->btf->type_offs_cap = d->btf->nr_types;
4953 	d->btf->hdr->type_len = p - d->btf->types_data;
4954 	new_offs = libbpf_reallocarray(d->btf->type_offs, d->btf->type_offs_cap,
4955 				       sizeof(*new_offs));
4956 	if (d->btf->type_offs_cap && !new_offs)
4957 		return -ENOMEM;
4958 	d->btf->type_offs = new_offs;
4959 	d->btf->hdr->str_off = d->btf->hdr->type_len;
4960 	d->btf->raw_size = d->btf->hdr->hdr_len + d->btf->hdr->type_len + d->btf->hdr->str_len;
4961 	return 0;
4962 }
4963 
4964 /*
4965  * Figure out final (deduplicated and compacted) type ID for provided original
4966  * `type_id` by first resolving it into corresponding canonical type ID and
4967  * then mapping it to a deduplicated type ID, stored in btf_dedup->hypot_map,
4968  * which is populated during compaction phase.
4969  */
4970 static int btf_dedup_remap_type_id(__u32 *type_id, void *ctx)
4971 {
4972 	struct btf_dedup *d = ctx;
4973 	__u32 resolved_type_id, new_type_id;
4974 
4975 	resolved_type_id = resolve_type_id(d, *type_id);
4976 	new_type_id = d->hypot_map[resolved_type_id];
4977 	if (new_type_id > BTF_MAX_NR_TYPES)
4978 		return -EINVAL;
4979 
4980 	*type_id = new_type_id;
4981 	return 0;
4982 }
4983 
4984 /*
4985  * Remap referenced type IDs into deduped type IDs.
4986  *
4987  * After BTF types are deduplicated and compacted, their final type IDs may
4988  * differ from original ones. The map from original to a corresponding
4989  * deduped type ID is stored in btf_dedup->hypot_map and is populated during
4990  * compaction phase. During remapping phase we are rewriting all type IDs
4991  * referenced from any BTF type (e.g., struct fields, func proto args, etc) to
4992  * their final deduped type IDs.
4993  */
4994 static int btf_dedup_remap_types(struct btf_dedup *d)
4995 {
4996 	int i, r;
4997 
4998 	for (i = 0; i < d->btf->nr_types; i++) {
4999 		struct btf_type *t = btf_type_by_id(d->btf, d->btf->start_id + i);
5000 		struct btf_field_iter it;
5001 		__u32 *type_id;
5002 
5003 		r = btf_field_iter_init(&it, t, BTF_FIELD_ITER_IDS);
5004 		if (r)
5005 			return r;
5006 
5007 		while ((type_id = btf_field_iter_next(&it))) {
5008 			__u32 resolved_id, new_id;
5009 
5010 			resolved_id = resolve_type_id(d, *type_id);
5011 			new_id = d->hypot_map[resolved_id];
5012 			if (new_id > BTF_MAX_NR_TYPES)
5013 				return -EINVAL;
5014 
5015 			*type_id = new_id;
5016 		}
5017 	}
5018 
5019 	if (!d->btf_ext)
5020 		return 0;
5021 
5022 	r = btf_ext_visit_type_ids(d->btf_ext, btf_dedup_remap_type_id, d);
5023 	if (r)
5024 		return r;
5025 
5026 	return 0;
5027 }
5028 
5029 /*
5030  * Probe few well-known locations for vmlinux kernel image and try to load BTF
5031  * data out of it to use for target BTF.
5032  */
5033 struct btf *btf__load_vmlinux_btf(void)
5034 {
5035 	const char *sysfs_btf_path = "/sys/kernel/btf/vmlinux";
5036 	/* fall back locations, trying to find vmlinux on disk */
5037 	const char *locations[] = {
5038 		"/boot/vmlinux-%1$s",
5039 		"/lib/modules/%1$s/vmlinux-%1$s",
5040 		"/lib/modules/%1$s/build/vmlinux",
5041 		"/usr/lib/modules/%1$s/kernel/vmlinux",
5042 		"/usr/lib/debug/boot/vmlinux-%1$s",
5043 		"/usr/lib/debug/boot/vmlinux-%1$s.debug",
5044 		"/usr/lib/debug/lib/modules/%1$s/vmlinux",
5045 	};
5046 	char path[PATH_MAX + 1];
5047 	struct utsname buf;
5048 	struct btf *btf;
5049 	int i, err;
5050 
5051 	/* is canonical sysfs location accessible? */
5052 	if (faccessat(AT_FDCWD, sysfs_btf_path, F_OK, AT_EACCESS) < 0) {
5053 		pr_warn("kernel BTF is missing at '%s', was CONFIG_DEBUG_INFO_BTF enabled?\n",
5054 			sysfs_btf_path);
5055 	} else {
5056 		btf = btf__parse(sysfs_btf_path, NULL);
5057 		if (!btf) {
5058 			err = -errno;
5059 			pr_warn("failed to read kernel BTF from '%s': %d\n", sysfs_btf_path, err);
5060 			return libbpf_err_ptr(err);
5061 		}
5062 		pr_debug("loaded kernel BTF from '%s'\n", sysfs_btf_path);
5063 		return btf;
5064 	}
5065 
5066 	/* try fallback locations */
5067 	uname(&buf);
5068 	for (i = 0; i < ARRAY_SIZE(locations); i++) {
5069 		snprintf(path, PATH_MAX, locations[i], buf.release);
5070 
5071 		if (faccessat(AT_FDCWD, path, R_OK, AT_EACCESS))
5072 			continue;
5073 
5074 		btf = btf__parse(path, NULL);
5075 		err = libbpf_get_error(btf);
5076 		pr_debug("loading kernel BTF '%s': %d\n", path, err);
5077 		if (err)
5078 			continue;
5079 
5080 		return btf;
5081 	}
5082 
5083 	pr_warn("failed to find valid kernel BTF\n");
5084 	return libbpf_err_ptr(-ESRCH);
5085 }
5086 
5087 struct btf *libbpf_find_kernel_btf(void) __attribute__((alias("btf__load_vmlinux_btf")));
5088 
5089 struct btf *btf__load_module_btf(const char *module_name, struct btf *vmlinux_btf)
5090 {
5091 	char path[80];
5092 
5093 	snprintf(path, sizeof(path), "/sys/kernel/btf/%s", module_name);
5094 	return btf__parse_split(path, vmlinux_btf);
5095 }
5096 
5097 int btf_ext_visit_type_ids(struct btf_ext *btf_ext, type_id_visit_fn visit, void *ctx)
5098 {
5099 	const struct btf_ext_info *seg;
5100 	struct btf_ext_info_sec *sec;
5101 	int i, err;
5102 
5103 	seg = &btf_ext->func_info;
5104 	for_each_btf_ext_sec(seg, sec) {
5105 		struct bpf_func_info_min *rec;
5106 
5107 		for_each_btf_ext_rec(seg, sec, i, rec) {
5108 			err = visit(&rec->type_id, ctx);
5109 			if (err < 0)
5110 				return err;
5111 		}
5112 	}
5113 
5114 	seg = &btf_ext->core_relo_info;
5115 	for_each_btf_ext_sec(seg, sec) {
5116 		struct bpf_core_relo *rec;
5117 
5118 		for_each_btf_ext_rec(seg, sec, i, rec) {
5119 			err = visit(&rec->type_id, ctx);
5120 			if (err < 0)
5121 				return err;
5122 		}
5123 	}
5124 
5125 	return 0;
5126 }
5127 
5128 int btf_ext_visit_str_offs(struct btf_ext *btf_ext, str_off_visit_fn visit, void *ctx)
5129 {
5130 	const struct btf_ext_info *seg;
5131 	struct btf_ext_info_sec *sec;
5132 	int i, err;
5133 
5134 	seg = &btf_ext->func_info;
5135 	for_each_btf_ext_sec(seg, sec) {
5136 		err = visit(&sec->sec_name_off, ctx);
5137 		if (err)
5138 			return err;
5139 	}
5140 
5141 	seg = &btf_ext->line_info;
5142 	for_each_btf_ext_sec(seg, sec) {
5143 		struct bpf_line_info_min *rec;
5144 
5145 		err = visit(&sec->sec_name_off, ctx);
5146 		if (err)
5147 			return err;
5148 
5149 		for_each_btf_ext_rec(seg, sec, i, rec) {
5150 			err = visit(&rec->file_name_off, ctx);
5151 			if (err)
5152 				return err;
5153 			err = visit(&rec->line_off, ctx);
5154 			if (err)
5155 				return err;
5156 		}
5157 	}
5158 
5159 	seg = &btf_ext->core_relo_info;
5160 	for_each_btf_ext_sec(seg, sec) {
5161 		struct bpf_core_relo *rec;
5162 
5163 		err = visit(&sec->sec_name_off, ctx);
5164 		if (err)
5165 			return err;
5166 
5167 		for_each_btf_ext_rec(seg, sec, i, rec) {
5168 			err = visit(&rec->access_str_off, ctx);
5169 			if (err)
5170 				return err;
5171 		}
5172 	}
5173 
5174 	return 0;
5175 }
5176 
5177 struct btf_distill {
5178 	struct btf_pipe pipe;
5179 	int *id_map;
5180 	unsigned int split_start_id;
5181 	unsigned int split_start_str;
5182 	int diff_id;
5183 };
5184 
5185 static int btf_add_distilled_type_ids(struct btf_distill *dist, __u32 i)
5186 {
5187 	struct btf_type *split_t = btf_type_by_id(dist->pipe.src, i);
5188 	struct btf_field_iter it;
5189 	__u32 *id;
5190 	int err;
5191 
5192 	err = btf_field_iter_init(&it, split_t, BTF_FIELD_ITER_IDS);
5193 	if (err)
5194 		return err;
5195 	while ((id = btf_field_iter_next(&it))) {
5196 		struct btf_type *base_t;
5197 
5198 		if (!*id)
5199 			continue;
5200 		/* split BTF id, not needed */
5201 		if (*id >= dist->split_start_id)
5202 			continue;
5203 		/* already added ? */
5204 		if (dist->id_map[*id] > 0)
5205 			continue;
5206 
5207 		/* only a subset of base BTF types should be referenced from
5208 		 * split BTF; ensure nothing unexpected is referenced.
5209 		 */
5210 		base_t = btf_type_by_id(dist->pipe.src, *id);
5211 		switch (btf_kind(base_t)) {
5212 		case BTF_KIND_INT:
5213 		case BTF_KIND_FLOAT:
5214 		case BTF_KIND_FWD:
5215 		case BTF_KIND_ARRAY:
5216 		case BTF_KIND_STRUCT:
5217 		case BTF_KIND_UNION:
5218 		case BTF_KIND_TYPEDEF:
5219 		case BTF_KIND_ENUM:
5220 		case BTF_KIND_ENUM64:
5221 		case BTF_KIND_PTR:
5222 		case BTF_KIND_CONST:
5223 		case BTF_KIND_RESTRICT:
5224 		case BTF_KIND_VOLATILE:
5225 		case BTF_KIND_FUNC_PROTO:
5226 		case BTF_KIND_TYPE_TAG:
5227 			dist->id_map[*id] = *id;
5228 			break;
5229 		default:
5230 			pr_warn("unexpected reference to base type[%u] of kind [%u] when creating distilled base BTF.\n",
5231 				*id, btf_kind(base_t));
5232 			return -EINVAL;
5233 		}
5234 		/* If a base type is used, ensure types it refers to are
5235 		 * marked as used also; so for example if we find a PTR to INT
5236 		 * we need both the PTR and INT.
5237 		 *
5238 		 * The only exception is named struct/unions, since distilled
5239 		 * base BTF composite types have no members.
5240 		 */
5241 		if (btf_is_composite(base_t) && base_t->name_off)
5242 			continue;
5243 		err = btf_add_distilled_type_ids(dist, *id);
5244 		if (err)
5245 			return err;
5246 	}
5247 	return 0;
5248 }
5249 
5250 static int btf_add_distilled_types(struct btf_distill *dist)
5251 {
5252 	bool adding_to_base = dist->pipe.dst->start_id == 1;
5253 	int id = btf__type_cnt(dist->pipe.dst);
5254 	struct btf_type *t;
5255 	int i, err = 0;
5256 
5257 
5258 	/* Add types for each of the required references to either distilled
5259 	 * base or split BTF, depending on type characteristics.
5260 	 */
5261 	for (i = 1; i < dist->split_start_id; i++) {
5262 		const char *name;
5263 		int kind;
5264 
5265 		if (!dist->id_map[i])
5266 			continue;
5267 		t = btf_type_by_id(dist->pipe.src, i);
5268 		kind = btf_kind(t);
5269 		name = btf__name_by_offset(dist->pipe.src, t->name_off);
5270 
5271 		switch (kind) {
5272 		case BTF_KIND_INT:
5273 		case BTF_KIND_FLOAT:
5274 		case BTF_KIND_FWD:
5275 			/* Named int, float, fwd are added to base. */
5276 			if (!adding_to_base)
5277 				continue;
5278 			err = btf_add_type(&dist->pipe, t);
5279 			break;
5280 		case BTF_KIND_STRUCT:
5281 		case BTF_KIND_UNION:
5282 			/* Named struct/union are added to base as 0-vlen
5283 			 * struct/union of same size.  Anonymous struct/unions
5284 			 * are added to split BTF as-is.
5285 			 */
5286 			if (adding_to_base) {
5287 				if (!t->name_off)
5288 					continue;
5289 				err = btf_add_composite(dist->pipe.dst, kind, name, t->size);
5290 			} else {
5291 				if (t->name_off)
5292 					continue;
5293 				err = btf_add_type(&dist->pipe, t);
5294 			}
5295 			break;
5296 		case BTF_KIND_ENUM:
5297 		case BTF_KIND_ENUM64:
5298 			/* Named enum[64]s are added to base as a sized
5299 			 * enum; relocation will match with appropriately-named
5300 			 * and sized enum or enum64.
5301 			 *
5302 			 * Anonymous enums are added to split BTF as-is.
5303 			 */
5304 			if (adding_to_base) {
5305 				if (!t->name_off)
5306 					continue;
5307 				err = btf__add_enum(dist->pipe.dst, name, t->size);
5308 			} else {
5309 				if (t->name_off)
5310 					continue;
5311 				err = btf_add_type(&dist->pipe, t);
5312 			}
5313 			break;
5314 		case BTF_KIND_ARRAY:
5315 		case BTF_KIND_TYPEDEF:
5316 		case BTF_KIND_PTR:
5317 		case BTF_KIND_CONST:
5318 		case BTF_KIND_RESTRICT:
5319 		case BTF_KIND_VOLATILE:
5320 		case BTF_KIND_FUNC_PROTO:
5321 		case BTF_KIND_TYPE_TAG:
5322 			/* All other types are added to split BTF. */
5323 			if (adding_to_base)
5324 				continue;
5325 			err = btf_add_type(&dist->pipe, t);
5326 			break;
5327 		default:
5328 			pr_warn("unexpected kind when adding base type '%s'[%u] of kind [%u] to distilled base BTF.\n",
5329 				name, i, kind);
5330 			return -EINVAL;
5331 
5332 		}
5333 		if (err < 0)
5334 			break;
5335 		dist->id_map[i] = id++;
5336 	}
5337 	return err;
5338 }
5339 
5340 /* Split BTF ids without a mapping will be shifted downwards since distilled
5341  * base BTF is smaller than the original base BTF.  For those that have a
5342  * mapping (either to base or updated split BTF), update the id based on
5343  * that mapping.
5344  */
5345 static int btf_update_distilled_type_ids(struct btf_distill *dist, __u32 i)
5346 {
5347 	struct btf_type *t = btf_type_by_id(dist->pipe.dst, i);
5348 	struct btf_field_iter it;
5349 	__u32 *id;
5350 	int err;
5351 
5352 	err = btf_field_iter_init(&it, t, BTF_FIELD_ITER_IDS);
5353 	if (err)
5354 		return err;
5355 	while ((id = btf_field_iter_next(&it))) {
5356 		if (dist->id_map[*id])
5357 			*id = dist->id_map[*id];
5358 		else if (*id >= dist->split_start_id)
5359 			*id -= dist->diff_id;
5360 	}
5361 	return 0;
5362 }
5363 
5364 /* Create updated split BTF with distilled base BTF; distilled base BTF
5365  * consists of BTF information required to clarify the types that split
5366  * BTF refers to, omitting unneeded details.  Specifically it will contain
5367  * base types and memberless definitions of named structs, unions and enumerated
5368  * types. Associated reference types like pointers, arrays and anonymous
5369  * structs, unions and enumerated types will be added to split BTF.
5370  * Size is recorded for named struct/unions to help guide matching to the
5371  * target base BTF during later relocation.
5372  *
5373  * The only case where structs, unions or enumerated types are fully represented
5374  * is when they are anonymous; in such cases, the anonymous type is added to
5375  * split BTF in full.
5376  *
5377  * We return newly-created split BTF where the split BTF refers to a newly-created
5378  * distilled base BTF. Both must be freed separately by the caller.
5379  */
5380 int btf__distill_base(const struct btf *src_btf, struct btf **new_base_btf,
5381 		      struct btf **new_split_btf)
5382 {
5383 	struct btf *new_base = NULL, *new_split = NULL;
5384 	const struct btf *old_base;
5385 	unsigned int n = btf__type_cnt(src_btf);
5386 	struct btf_distill dist = {};
5387 	struct btf_type *t;
5388 	int i, err = 0;
5389 
5390 	/* src BTF must be split BTF. */
5391 	old_base = btf__base_btf(src_btf);
5392 	if (!new_base_btf || !new_split_btf || !old_base)
5393 		return libbpf_err(-EINVAL);
5394 
5395 	new_base = btf__new_empty();
5396 	if (!new_base)
5397 		return libbpf_err(-ENOMEM);
5398 
5399 	btf__set_endianness(new_base, btf__endianness(src_btf));
5400 
5401 	dist.id_map = calloc(n, sizeof(*dist.id_map));
5402 	if (!dist.id_map) {
5403 		err = -ENOMEM;
5404 		goto done;
5405 	}
5406 	dist.pipe.src = src_btf;
5407 	dist.pipe.dst = new_base;
5408 	dist.pipe.str_off_map = hashmap__new(btf_dedup_identity_hash_fn, btf_dedup_equal_fn, NULL);
5409 	if (IS_ERR(dist.pipe.str_off_map)) {
5410 		err = -ENOMEM;
5411 		goto done;
5412 	}
5413 	dist.split_start_id = btf__type_cnt(old_base);
5414 	dist.split_start_str = old_base->hdr->str_len;
5415 
5416 	/* Pass over src split BTF; generate the list of base BTF type ids it
5417 	 * references; these will constitute our distilled BTF set to be
5418 	 * distributed over base and split BTF as appropriate.
5419 	 */
5420 	for (i = src_btf->start_id; i < n; i++) {
5421 		err = btf_add_distilled_type_ids(&dist, i);
5422 		if (err < 0)
5423 			goto done;
5424 	}
5425 	/* Next add types for each of the required references to base BTF and split BTF
5426 	 * in turn.
5427 	 */
5428 	err = btf_add_distilled_types(&dist);
5429 	if (err < 0)
5430 		goto done;
5431 
5432 	/* Create new split BTF with distilled base BTF as its base; the final
5433 	 * state is split BTF with distilled base BTF that represents enough
5434 	 * about its base references to allow it to be relocated with the base
5435 	 * BTF available.
5436 	 */
5437 	new_split = btf__new_empty_split(new_base);
5438 	if (!new_split) {
5439 		err = -errno;
5440 		goto done;
5441 	}
5442 	dist.pipe.dst = new_split;
5443 	/* First add all split types */
5444 	for (i = src_btf->start_id; i < n; i++) {
5445 		t = btf_type_by_id(src_btf, i);
5446 		err = btf_add_type(&dist.pipe, t);
5447 		if (err < 0)
5448 			goto done;
5449 	}
5450 	/* Now add distilled types to split BTF that are not added to base. */
5451 	err = btf_add_distilled_types(&dist);
5452 	if (err < 0)
5453 		goto done;
5454 
5455 	/* All split BTF ids will be shifted downwards since there are less base
5456 	 * BTF ids in distilled base BTF.
5457 	 */
5458 	dist.diff_id = dist.split_start_id - btf__type_cnt(new_base);
5459 
5460 	n = btf__type_cnt(new_split);
5461 	/* Now update base/split BTF ids. */
5462 	for (i = 1; i < n; i++) {
5463 		err = btf_update_distilled_type_ids(&dist, i);
5464 		if (err < 0)
5465 			break;
5466 	}
5467 done:
5468 	free(dist.id_map);
5469 	hashmap__free(dist.pipe.str_off_map);
5470 	if (err) {
5471 		btf__free(new_split);
5472 		btf__free(new_base);
5473 		return libbpf_err(err);
5474 	}
5475 	*new_base_btf = new_base;
5476 	*new_split_btf = new_split;
5477 
5478 	return 0;
5479 }
5480 
5481 const struct btf_header *btf_header(const struct btf *btf)
5482 {
5483 	return btf->hdr;
5484 }
5485 
5486 void btf_set_base_btf(struct btf *btf, const struct btf *base_btf)
5487 {
5488 	btf->base_btf = (struct btf *)base_btf;
5489 	btf->start_id = btf__type_cnt(base_btf);
5490 	btf->start_str_off = base_btf->hdr->str_len;
5491 }
5492 
5493 int btf__relocate(struct btf *btf, const struct btf *base_btf)
5494 {
5495 	int err = btf_relocate(btf, base_btf, NULL);
5496 
5497 	if (!err)
5498 		btf->owns_base = false;
5499 	return libbpf_err(err);
5500 }
5501