xref: /linux/tools/lib/bpf/btf.c (revision 9e6d33937b42ca4867af3b341e5d09abca4a2746)
1 // SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
2 /* Copyright (c) 2018 Facebook */
3 
4 #include <byteswap.h>
5 #include <endian.h>
6 #include <stdio.h>
7 #include <stdlib.h>
8 #include <string.h>
9 #include <fcntl.h>
10 #include <unistd.h>
11 #include <errno.h>
12 #include <sys/utsname.h>
13 #include <sys/param.h>
14 #include <sys/stat.h>
15 #include <linux/kernel.h>
16 #include <linux/err.h>
17 #include <linux/btf.h>
18 #include <gelf.h>
19 #include "btf.h"
20 #include "bpf.h"
21 #include "libbpf.h"
22 #include "libbpf_internal.h"
23 #include "hashmap.h"
24 #include "strset.h"
25 
26 #define BTF_MAX_NR_TYPES 0x7fffffffU
27 #define BTF_MAX_STR_OFFSET 0x7fffffffU
28 
29 static struct btf_type btf_void;
30 
31 struct btf {
32 	/* raw BTF data in native endianness */
33 	void *raw_data;
34 	/* raw BTF data in non-native endianness */
35 	void *raw_data_swapped;
36 	__u32 raw_size;
37 	/* whether target endianness differs from the native one */
38 	bool swapped_endian;
39 
40 	/*
41 	 * When BTF is loaded from an ELF or raw memory it is stored
42 	 * in a contiguous memory block. The hdr, type_data, and, strs_data
43 	 * point inside that memory region to their respective parts of BTF
44 	 * representation:
45 	 *
46 	 * +--------------------------------+
47 	 * |  Header  |  Types  |  Strings  |
48 	 * +--------------------------------+
49 	 * ^          ^         ^
50 	 * |          |         |
51 	 * hdr        |         |
52 	 * types_data-+         |
53 	 * strs_data------------+
54 	 *
55 	 * If BTF data is later modified, e.g., due to types added or
56 	 * removed, BTF deduplication performed, etc, this contiguous
57 	 * representation is broken up into three independently allocated
58 	 * memory regions to be able to modify them independently.
59 	 * raw_data is nulled out at that point, but can be later allocated
60 	 * and cached again if user calls btf__raw_data(), at which point
61 	 * raw_data will contain a contiguous copy of header, types, and
62 	 * strings:
63 	 *
64 	 * +----------+  +---------+  +-----------+
65 	 * |  Header  |  |  Types  |  |  Strings  |
66 	 * +----------+  +---------+  +-----------+
67 	 * ^             ^            ^
68 	 * |             |            |
69 	 * hdr           |            |
70 	 * types_data----+            |
71 	 * strset__data(strs_set)-----+
72 	 *
73 	 *               +----------+---------+-----------+
74 	 *               |  Header  |  Types  |  Strings  |
75 	 * raw_data----->+----------+---------+-----------+
76 	 */
77 	struct btf_header *hdr;
78 
79 	void *types_data;
80 	size_t types_data_cap; /* used size stored in hdr->type_len */
81 
82 	/* type ID to `struct btf_type *` lookup index
83 	 * type_offs[0] corresponds to the first non-VOID type:
84 	 *   - for base BTF it's type [1];
85 	 *   - for split BTF it's the first non-base BTF type.
86 	 */
87 	__u32 *type_offs;
88 	size_t type_offs_cap;
89 	/* number of types in this BTF instance:
90 	 *   - doesn't include special [0] void type;
91 	 *   - for split BTF counts number of types added on top of base BTF.
92 	 */
93 	__u32 nr_types;
94 	/* if not NULL, points to the base BTF on top of which the current
95 	 * split BTF is based
96 	 */
97 	struct btf *base_btf;
98 	/* BTF type ID of the first type in this BTF instance:
99 	 *   - for base BTF it's equal to 1;
100 	 *   - for split BTF it's equal to biggest type ID of base BTF plus 1.
101 	 */
102 	int start_id;
103 	/* logical string offset of this BTF instance:
104 	 *   - for base BTF it's equal to 0;
105 	 *   - for split BTF it's equal to total size of base BTF's string section size.
106 	 */
107 	int start_str_off;
108 
109 	/* only one of strs_data or strs_set can be non-NULL, depending on
110 	 * whether BTF is in a modifiable state (strs_set is used) or not
111 	 * (strs_data points inside raw_data)
112 	 */
113 	void *strs_data;
114 	/* a set of unique strings */
115 	struct strset *strs_set;
116 	/* whether strings are already deduplicated */
117 	bool strs_deduped;
118 
119 	/* BTF object FD, if loaded into kernel */
120 	int fd;
121 
122 	/* Pointer size (in bytes) for a target architecture of this BTF */
123 	int ptr_sz;
124 };
125 
126 static inline __u64 ptr_to_u64(const void *ptr)
127 {
128 	return (__u64) (unsigned long) ptr;
129 }
130 
131 /* Ensure given dynamically allocated memory region pointed to by *data* with
132  * capacity of *cap_cnt* elements each taking *elem_sz* bytes has enough
133  * memory to accommodate *add_cnt* new elements, assuming *cur_cnt* elements
134  * are already used. At most *max_cnt* elements can be ever allocated.
135  * If necessary, memory is reallocated and all existing data is copied over,
136  * new pointer to the memory region is stored at *data, new memory region
137  * capacity (in number of elements) is stored in *cap.
138  * On success, memory pointer to the beginning of unused memory is returned.
139  * On error, NULL is returned.
140  */
141 void *libbpf_add_mem(void **data, size_t *cap_cnt, size_t elem_sz,
142 		     size_t cur_cnt, size_t max_cnt, size_t add_cnt)
143 {
144 	size_t new_cnt;
145 	void *new_data;
146 
147 	if (cur_cnt + add_cnt <= *cap_cnt)
148 		return *data + cur_cnt * elem_sz;
149 
150 	/* requested more than the set limit */
151 	if (cur_cnt + add_cnt > max_cnt)
152 		return NULL;
153 
154 	new_cnt = *cap_cnt;
155 	new_cnt += new_cnt / 4;		  /* expand by 25% */
156 	if (new_cnt < 16)		  /* but at least 16 elements */
157 		new_cnt = 16;
158 	if (new_cnt > max_cnt)		  /* but not exceeding a set limit */
159 		new_cnt = max_cnt;
160 	if (new_cnt < cur_cnt + add_cnt)  /* also ensure we have enough memory */
161 		new_cnt = cur_cnt + add_cnt;
162 
163 	new_data = libbpf_reallocarray(*data, new_cnt, elem_sz);
164 	if (!new_data)
165 		return NULL;
166 
167 	/* zero out newly allocated portion of memory */
168 	memset(new_data + (*cap_cnt) * elem_sz, 0, (new_cnt - *cap_cnt) * elem_sz);
169 
170 	*data = new_data;
171 	*cap_cnt = new_cnt;
172 	return new_data + cur_cnt * elem_sz;
173 }
174 
175 /* Ensure given dynamically allocated memory region has enough allocated space
176  * to accommodate *need_cnt* elements of size *elem_sz* bytes each
177  */
178 int libbpf_ensure_mem(void **data, size_t *cap_cnt, size_t elem_sz, size_t need_cnt)
179 {
180 	void *p;
181 
182 	if (need_cnt <= *cap_cnt)
183 		return 0;
184 
185 	p = libbpf_add_mem(data, cap_cnt, elem_sz, *cap_cnt, SIZE_MAX, need_cnt - *cap_cnt);
186 	if (!p)
187 		return -ENOMEM;
188 
189 	return 0;
190 }
191 
192 static void *btf_add_type_offs_mem(struct btf *btf, size_t add_cnt)
193 {
194 	return libbpf_add_mem((void **)&btf->type_offs, &btf->type_offs_cap, sizeof(__u32),
195 			      btf->nr_types, BTF_MAX_NR_TYPES, add_cnt);
196 }
197 
198 static int btf_add_type_idx_entry(struct btf *btf, __u32 type_off)
199 {
200 	__u32 *p;
201 
202 	p = btf_add_type_offs_mem(btf, 1);
203 	if (!p)
204 		return -ENOMEM;
205 
206 	*p = type_off;
207 	return 0;
208 }
209 
210 static void btf_bswap_hdr(struct btf_header *h)
211 {
212 	h->magic = bswap_16(h->magic);
213 	h->hdr_len = bswap_32(h->hdr_len);
214 	h->type_off = bswap_32(h->type_off);
215 	h->type_len = bswap_32(h->type_len);
216 	h->str_off = bswap_32(h->str_off);
217 	h->str_len = bswap_32(h->str_len);
218 }
219 
220 static int btf_parse_hdr(struct btf *btf)
221 {
222 	struct btf_header *hdr = btf->hdr;
223 	__u32 meta_left;
224 
225 	if (btf->raw_size < sizeof(struct btf_header)) {
226 		pr_debug("BTF header not found\n");
227 		return -EINVAL;
228 	}
229 
230 	if (hdr->magic == bswap_16(BTF_MAGIC)) {
231 		btf->swapped_endian = true;
232 		if (bswap_32(hdr->hdr_len) != sizeof(struct btf_header)) {
233 			pr_warn("Can't load BTF with non-native endianness due to unsupported header length %u\n",
234 				bswap_32(hdr->hdr_len));
235 			return -ENOTSUP;
236 		}
237 		btf_bswap_hdr(hdr);
238 	} else if (hdr->magic != BTF_MAGIC) {
239 		pr_debug("Invalid BTF magic: %x\n", hdr->magic);
240 		return -EINVAL;
241 	}
242 
243 	if (btf->raw_size < hdr->hdr_len) {
244 		pr_debug("BTF header len %u larger than data size %u\n",
245 			 hdr->hdr_len, btf->raw_size);
246 		return -EINVAL;
247 	}
248 
249 	meta_left = btf->raw_size - hdr->hdr_len;
250 	if (meta_left < (long long)hdr->str_off + hdr->str_len) {
251 		pr_debug("Invalid BTF total size: %u\n", btf->raw_size);
252 		return -EINVAL;
253 	}
254 
255 	if ((long long)hdr->type_off + hdr->type_len > hdr->str_off) {
256 		pr_debug("Invalid BTF data sections layout: type data at %u + %u, strings data at %u + %u\n",
257 			 hdr->type_off, hdr->type_len, hdr->str_off, hdr->str_len);
258 		return -EINVAL;
259 	}
260 
261 	if (hdr->type_off % 4) {
262 		pr_debug("BTF type section is not aligned to 4 bytes\n");
263 		return -EINVAL;
264 	}
265 
266 	return 0;
267 }
268 
269 static int btf_parse_str_sec(struct btf *btf)
270 {
271 	const struct btf_header *hdr = btf->hdr;
272 	const char *start = btf->strs_data;
273 	const char *end = start + btf->hdr->str_len;
274 
275 	if (btf->base_btf && hdr->str_len == 0)
276 		return 0;
277 	if (!hdr->str_len || hdr->str_len - 1 > BTF_MAX_STR_OFFSET || end[-1]) {
278 		pr_debug("Invalid BTF string section\n");
279 		return -EINVAL;
280 	}
281 	if (!btf->base_btf && start[0]) {
282 		pr_debug("Invalid BTF string section\n");
283 		return -EINVAL;
284 	}
285 	return 0;
286 }
287 
288 static int btf_type_size(const struct btf_type *t)
289 {
290 	const int base_size = sizeof(struct btf_type);
291 	__u16 vlen = btf_vlen(t);
292 
293 	switch (btf_kind(t)) {
294 	case BTF_KIND_FWD:
295 	case BTF_KIND_CONST:
296 	case BTF_KIND_VOLATILE:
297 	case BTF_KIND_RESTRICT:
298 	case BTF_KIND_PTR:
299 	case BTF_KIND_TYPEDEF:
300 	case BTF_KIND_FUNC:
301 	case BTF_KIND_FLOAT:
302 	case BTF_KIND_TYPE_TAG:
303 		return base_size;
304 	case BTF_KIND_INT:
305 		return base_size + sizeof(__u32);
306 	case BTF_KIND_ENUM:
307 		return base_size + vlen * sizeof(struct btf_enum);
308 	case BTF_KIND_ENUM64:
309 		return base_size + vlen * sizeof(struct btf_enum64);
310 	case BTF_KIND_ARRAY:
311 		return base_size + sizeof(struct btf_array);
312 	case BTF_KIND_STRUCT:
313 	case BTF_KIND_UNION:
314 		return base_size + vlen * sizeof(struct btf_member);
315 	case BTF_KIND_FUNC_PROTO:
316 		return base_size + vlen * sizeof(struct btf_param);
317 	case BTF_KIND_VAR:
318 		return base_size + sizeof(struct btf_var);
319 	case BTF_KIND_DATASEC:
320 		return base_size + vlen * sizeof(struct btf_var_secinfo);
321 	case BTF_KIND_DECL_TAG:
322 		return base_size + sizeof(struct btf_decl_tag);
323 	default:
324 		pr_debug("Unsupported BTF_KIND:%u\n", btf_kind(t));
325 		return -EINVAL;
326 	}
327 }
328 
329 static void btf_bswap_type_base(struct btf_type *t)
330 {
331 	t->name_off = bswap_32(t->name_off);
332 	t->info = bswap_32(t->info);
333 	t->type = bswap_32(t->type);
334 }
335 
336 static int btf_bswap_type_rest(struct btf_type *t)
337 {
338 	struct btf_var_secinfo *v;
339 	struct btf_enum64 *e64;
340 	struct btf_member *m;
341 	struct btf_array *a;
342 	struct btf_param *p;
343 	struct btf_enum *e;
344 	__u16 vlen = btf_vlen(t);
345 	int i;
346 
347 	switch (btf_kind(t)) {
348 	case BTF_KIND_FWD:
349 	case BTF_KIND_CONST:
350 	case BTF_KIND_VOLATILE:
351 	case BTF_KIND_RESTRICT:
352 	case BTF_KIND_PTR:
353 	case BTF_KIND_TYPEDEF:
354 	case BTF_KIND_FUNC:
355 	case BTF_KIND_FLOAT:
356 	case BTF_KIND_TYPE_TAG:
357 		return 0;
358 	case BTF_KIND_INT:
359 		*(__u32 *)(t + 1) = bswap_32(*(__u32 *)(t + 1));
360 		return 0;
361 	case BTF_KIND_ENUM:
362 		for (i = 0, e = btf_enum(t); i < vlen; i++, e++) {
363 			e->name_off = bswap_32(e->name_off);
364 			e->val = bswap_32(e->val);
365 		}
366 		return 0;
367 	case BTF_KIND_ENUM64:
368 		for (i = 0, e64 = btf_enum64(t); i < vlen; i++, e64++) {
369 			e64->name_off = bswap_32(e64->name_off);
370 			e64->val_lo32 = bswap_32(e64->val_lo32);
371 			e64->val_hi32 = bswap_32(e64->val_hi32);
372 		}
373 		return 0;
374 	case BTF_KIND_ARRAY:
375 		a = btf_array(t);
376 		a->type = bswap_32(a->type);
377 		a->index_type = bswap_32(a->index_type);
378 		a->nelems = bswap_32(a->nelems);
379 		return 0;
380 	case BTF_KIND_STRUCT:
381 	case BTF_KIND_UNION:
382 		for (i = 0, m = btf_members(t); i < vlen; i++, m++) {
383 			m->name_off = bswap_32(m->name_off);
384 			m->type = bswap_32(m->type);
385 			m->offset = bswap_32(m->offset);
386 		}
387 		return 0;
388 	case BTF_KIND_FUNC_PROTO:
389 		for (i = 0, p = btf_params(t); i < vlen; i++, p++) {
390 			p->name_off = bswap_32(p->name_off);
391 			p->type = bswap_32(p->type);
392 		}
393 		return 0;
394 	case BTF_KIND_VAR:
395 		btf_var(t)->linkage = bswap_32(btf_var(t)->linkage);
396 		return 0;
397 	case BTF_KIND_DATASEC:
398 		for (i = 0, v = btf_var_secinfos(t); i < vlen; i++, v++) {
399 			v->type = bswap_32(v->type);
400 			v->offset = bswap_32(v->offset);
401 			v->size = bswap_32(v->size);
402 		}
403 		return 0;
404 	case BTF_KIND_DECL_TAG:
405 		btf_decl_tag(t)->component_idx = bswap_32(btf_decl_tag(t)->component_idx);
406 		return 0;
407 	default:
408 		pr_debug("Unsupported BTF_KIND:%u\n", btf_kind(t));
409 		return -EINVAL;
410 	}
411 }
412 
413 static int btf_parse_type_sec(struct btf *btf)
414 {
415 	struct btf_header *hdr = btf->hdr;
416 	void *next_type = btf->types_data;
417 	void *end_type = next_type + hdr->type_len;
418 	int err, type_size;
419 
420 	while (next_type + sizeof(struct btf_type) <= end_type) {
421 		if (btf->swapped_endian)
422 			btf_bswap_type_base(next_type);
423 
424 		type_size = btf_type_size(next_type);
425 		if (type_size < 0)
426 			return type_size;
427 		if (next_type + type_size > end_type) {
428 			pr_warn("BTF type [%d] is malformed\n", btf->start_id + btf->nr_types);
429 			return -EINVAL;
430 		}
431 
432 		if (btf->swapped_endian && btf_bswap_type_rest(next_type))
433 			return -EINVAL;
434 
435 		err = btf_add_type_idx_entry(btf, next_type - btf->types_data);
436 		if (err)
437 			return err;
438 
439 		next_type += type_size;
440 		btf->nr_types++;
441 	}
442 
443 	if (next_type != end_type) {
444 		pr_warn("BTF types data is malformed\n");
445 		return -EINVAL;
446 	}
447 
448 	return 0;
449 }
450 
451 static int btf_validate_str(const struct btf *btf, __u32 str_off, const char *what, __u32 type_id)
452 {
453 	const char *s;
454 
455 	s = btf__str_by_offset(btf, str_off);
456 	if (!s) {
457 		pr_warn("btf: type [%u]: invalid %s (string offset %u)\n", type_id, what, str_off);
458 		return -EINVAL;
459 	}
460 
461 	return 0;
462 }
463 
464 static int btf_validate_id(const struct btf *btf, __u32 id, __u32 ctx_id)
465 {
466 	const struct btf_type *t;
467 
468 	t = btf__type_by_id(btf, id);
469 	if (!t) {
470 		pr_warn("btf: type [%u]: invalid referenced type ID %u\n", ctx_id, id);
471 		return -EINVAL;
472 	}
473 
474 	return 0;
475 }
476 
477 static int btf_validate_type(const struct btf *btf, const struct btf_type *t, __u32 id)
478 {
479 	__u32 kind = btf_kind(t);
480 	int err, i, n;
481 
482 	err = btf_validate_str(btf, t->name_off, "type name", id);
483 	if (err)
484 		return err;
485 
486 	switch (kind) {
487 	case BTF_KIND_UNKN:
488 	case BTF_KIND_INT:
489 	case BTF_KIND_FWD:
490 	case BTF_KIND_FLOAT:
491 		break;
492 	case BTF_KIND_PTR:
493 	case BTF_KIND_TYPEDEF:
494 	case BTF_KIND_VOLATILE:
495 	case BTF_KIND_CONST:
496 	case BTF_KIND_RESTRICT:
497 	case BTF_KIND_VAR:
498 	case BTF_KIND_DECL_TAG:
499 	case BTF_KIND_TYPE_TAG:
500 		err = btf_validate_id(btf, t->type, id);
501 		if (err)
502 			return err;
503 		break;
504 	case BTF_KIND_ARRAY: {
505 		const struct btf_array *a = btf_array(t);
506 
507 		err = btf_validate_id(btf, a->type, id);
508 		err = err ?: btf_validate_id(btf, a->index_type, id);
509 		if (err)
510 			return err;
511 		break;
512 	}
513 	case BTF_KIND_STRUCT:
514 	case BTF_KIND_UNION: {
515 		const struct btf_member *m = btf_members(t);
516 
517 		n = btf_vlen(t);
518 		for (i = 0; i < n; i++, m++) {
519 			err = btf_validate_str(btf, m->name_off, "field name", id);
520 			err = err ?: btf_validate_id(btf, m->type, id);
521 			if (err)
522 				return err;
523 		}
524 		break;
525 	}
526 	case BTF_KIND_ENUM: {
527 		const struct btf_enum *m = btf_enum(t);
528 
529 		n = btf_vlen(t);
530 		for (i = 0; i < n; i++, m++) {
531 			err = btf_validate_str(btf, m->name_off, "enum name", id);
532 			if (err)
533 				return err;
534 		}
535 		break;
536 	}
537 	case BTF_KIND_ENUM64: {
538 		const struct btf_enum64 *m = btf_enum64(t);
539 
540 		n = btf_vlen(t);
541 		for (i = 0; i < n; i++, m++) {
542 			err = btf_validate_str(btf, m->name_off, "enum name", id);
543 			if (err)
544 				return err;
545 		}
546 		break;
547 	}
548 	case BTF_KIND_FUNC: {
549 		const struct btf_type *ft;
550 
551 		err = btf_validate_id(btf, t->type, id);
552 		if (err)
553 			return err;
554 		ft = btf__type_by_id(btf, t->type);
555 		if (btf_kind(ft) != BTF_KIND_FUNC_PROTO) {
556 			pr_warn("btf: type [%u]: referenced type [%u] is not FUNC_PROTO\n", id, t->type);
557 			return -EINVAL;
558 		}
559 		break;
560 	}
561 	case BTF_KIND_FUNC_PROTO: {
562 		const struct btf_param *m = btf_params(t);
563 
564 		n = btf_vlen(t);
565 		for (i = 0; i < n; i++, m++) {
566 			err = btf_validate_str(btf, m->name_off, "param name", id);
567 			err = err ?: btf_validate_id(btf, m->type, id);
568 			if (err)
569 				return err;
570 		}
571 		break;
572 	}
573 	case BTF_KIND_DATASEC: {
574 		const struct btf_var_secinfo *m = btf_var_secinfos(t);
575 
576 		n = btf_vlen(t);
577 		for (i = 0; i < n; i++, m++) {
578 			err = btf_validate_id(btf, m->type, id);
579 			if (err)
580 				return err;
581 		}
582 		break;
583 	}
584 	default:
585 		pr_warn("btf: type [%u]: unrecognized kind %u\n", id, kind);
586 		return -EINVAL;
587 	}
588 	return 0;
589 }
590 
591 /* Validate basic sanity of BTF. It's intentionally less thorough than
592  * kernel's validation and validates only properties of BTF that libbpf relies
593  * on to be correct (e.g., valid type IDs, valid string offsets, etc)
594  */
595 static int btf_sanity_check(const struct btf *btf)
596 {
597 	const struct btf_type *t;
598 	__u32 i, n = btf__type_cnt(btf);
599 	int err;
600 
601 	for (i = 1; i < n; i++) {
602 		t = btf_type_by_id(btf, i);
603 		err = btf_validate_type(btf, t, i);
604 		if (err)
605 			return err;
606 	}
607 	return 0;
608 }
609 
610 __u32 btf__type_cnt(const struct btf *btf)
611 {
612 	return btf->start_id + btf->nr_types;
613 }
614 
615 const struct btf *btf__base_btf(const struct btf *btf)
616 {
617 	return btf->base_btf;
618 }
619 
620 /* internal helper returning non-const pointer to a type */
621 struct btf_type *btf_type_by_id(const struct btf *btf, __u32 type_id)
622 {
623 	if (type_id == 0)
624 		return &btf_void;
625 	if (type_id < btf->start_id)
626 		return btf_type_by_id(btf->base_btf, type_id);
627 	return btf->types_data + btf->type_offs[type_id - btf->start_id];
628 }
629 
630 const struct btf_type *btf__type_by_id(const struct btf *btf, __u32 type_id)
631 {
632 	if (type_id >= btf->start_id + btf->nr_types)
633 		return errno = EINVAL, NULL;
634 	return btf_type_by_id((struct btf *)btf, type_id);
635 }
636 
637 static int determine_ptr_size(const struct btf *btf)
638 {
639 	static const char * const long_aliases[] = {
640 		"long",
641 		"long int",
642 		"int long",
643 		"unsigned long",
644 		"long unsigned",
645 		"unsigned long int",
646 		"unsigned int long",
647 		"long unsigned int",
648 		"long int unsigned",
649 		"int unsigned long",
650 		"int long unsigned",
651 	};
652 	const struct btf_type *t;
653 	const char *name;
654 	int i, j, n;
655 
656 	if (btf->base_btf && btf->base_btf->ptr_sz > 0)
657 		return btf->base_btf->ptr_sz;
658 
659 	n = btf__type_cnt(btf);
660 	for (i = 1; i < n; i++) {
661 		t = btf__type_by_id(btf, i);
662 		if (!btf_is_int(t))
663 			continue;
664 
665 		if (t->size != 4 && t->size != 8)
666 			continue;
667 
668 		name = btf__name_by_offset(btf, t->name_off);
669 		if (!name)
670 			continue;
671 
672 		for (j = 0; j < ARRAY_SIZE(long_aliases); j++) {
673 			if (strcmp(name, long_aliases[j]) == 0)
674 				return t->size;
675 		}
676 	}
677 
678 	return -1;
679 }
680 
681 static size_t btf_ptr_sz(const struct btf *btf)
682 {
683 	if (!btf->ptr_sz)
684 		((struct btf *)btf)->ptr_sz = determine_ptr_size(btf);
685 	return btf->ptr_sz < 0 ? sizeof(void *) : btf->ptr_sz;
686 }
687 
688 /* Return pointer size this BTF instance assumes. The size is heuristically
689  * determined by looking for 'long' or 'unsigned long' integer type and
690  * recording its size in bytes. If BTF type information doesn't have any such
691  * type, this function returns 0. In the latter case, native architecture's
692  * pointer size is assumed, so will be either 4 or 8, depending on
693  * architecture that libbpf was compiled for. It's possible to override
694  * guessed value by using btf__set_pointer_size() API.
695  */
696 size_t btf__pointer_size(const struct btf *btf)
697 {
698 	if (!btf->ptr_sz)
699 		((struct btf *)btf)->ptr_sz = determine_ptr_size(btf);
700 
701 	if (btf->ptr_sz < 0)
702 		/* not enough BTF type info to guess */
703 		return 0;
704 
705 	return btf->ptr_sz;
706 }
707 
708 /* Override or set pointer size in bytes. Only values of 4 and 8 are
709  * supported.
710  */
711 int btf__set_pointer_size(struct btf *btf, size_t ptr_sz)
712 {
713 	if (ptr_sz != 4 && ptr_sz != 8)
714 		return libbpf_err(-EINVAL);
715 	btf->ptr_sz = ptr_sz;
716 	return 0;
717 }
718 
719 static bool is_host_big_endian(void)
720 {
721 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
722 	return false;
723 #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
724 	return true;
725 #else
726 # error "Unrecognized __BYTE_ORDER__"
727 #endif
728 }
729 
730 enum btf_endianness btf__endianness(const struct btf *btf)
731 {
732 	if (is_host_big_endian())
733 		return btf->swapped_endian ? BTF_LITTLE_ENDIAN : BTF_BIG_ENDIAN;
734 	else
735 		return btf->swapped_endian ? BTF_BIG_ENDIAN : BTF_LITTLE_ENDIAN;
736 }
737 
738 int btf__set_endianness(struct btf *btf, enum btf_endianness endian)
739 {
740 	if (endian != BTF_LITTLE_ENDIAN && endian != BTF_BIG_ENDIAN)
741 		return libbpf_err(-EINVAL);
742 
743 	btf->swapped_endian = is_host_big_endian() != (endian == BTF_BIG_ENDIAN);
744 	if (!btf->swapped_endian) {
745 		free(btf->raw_data_swapped);
746 		btf->raw_data_swapped = NULL;
747 	}
748 	return 0;
749 }
750 
751 static bool btf_type_is_void(const struct btf_type *t)
752 {
753 	return t == &btf_void || btf_is_fwd(t);
754 }
755 
756 static bool btf_type_is_void_or_null(const struct btf_type *t)
757 {
758 	return !t || btf_type_is_void(t);
759 }
760 
761 #define MAX_RESOLVE_DEPTH 32
762 
763 __s64 btf__resolve_size(const struct btf *btf, __u32 type_id)
764 {
765 	const struct btf_array *array;
766 	const struct btf_type *t;
767 	__u32 nelems = 1;
768 	__s64 size = -1;
769 	int i;
770 
771 	t = btf__type_by_id(btf, type_id);
772 	for (i = 0; i < MAX_RESOLVE_DEPTH && !btf_type_is_void_or_null(t); i++) {
773 		switch (btf_kind(t)) {
774 		case BTF_KIND_INT:
775 		case BTF_KIND_STRUCT:
776 		case BTF_KIND_UNION:
777 		case BTF_KIND_ENUM:
778 		case BTF_KIND_ENUM64:
779 		case BTF_KIND_DATASEC:
780 		case BTF_KIND_FLOAT:
781 			size = t->size;
782 			goto done;
783 		case BTF_KIND_PTR:
784 			size = btf_ptr_sz(btf);
785 			goto done;
786 		case BTF_KIND_TYPEDEF:
787 		case BTF_KIND_VOLATILE:
788 		case BTF_KIND_CONST:
789 		case BTF_KIND_RESTRICT:
790 		case BTF_KIND_VAR:
791 		case BTF_KIND_DECL_TAG:
792 		case BTF_KIND_TYPE_TAG:
793 			type_id = t->type;
794 			break;
795 		case BTF_KIND_ARRAY:
796 			array = btf_array(t);
797 			if (nelems && array->nelems > UINT32_MAX / nelems)
798 				return libbpf_err(-E2BIG);
799 			nelems *= array->nelems;
800 			type_id = array->type;
801 			break;
802 		default:
803 			return libbpf_err(-EINVAL);
804 		}
805 
806 		t = btf__type_by_id(btf, type_id);
807 	}
808 
809 done:
810 	if (size < 0)
811 		return libbpf_err(-EINVAL);
812 	if (nelems && size > UINT32_MAX / nelems)
813 		return libbpf_err(-E2BIG);
814 
815 	return nelems * size;
816 }
817 
818 int btf__align_of(const struct btf *btf, __u32 id)
819 {
820 	const struct btf_type *t = btf__type_by_id(btf, id);
821 	__u16 kind = btf_kind(t);
822 
823 	switch (kind) {
824 	case BTF_KIND_INT:
825 	case BTF_KIND_ENUM:
826 	case BTF_KIND_ENUM64:
827 	case BTF_KIND_FLOAT:
828 		return min(btf_ptr_sz(btf), (size_t)t->size);
829 	case BTF_KIND_PTR:
830 		return btf_ptr_sz(btf);
831 	case BTF_KIND_TYPEDEF:
832 	case BTF_KIND_VOLATILE:
833 	case BTF_KIND_CONST:
834 	case BTF_KIND_RESTRICT:
835 	case BTF_KIND_TYPE_TAG:
836 		return btf__align_of(btf, t->type);
837 	case BTF_KIND_ARRAY:
838 		return btf__align_of(btf, btf_array(t)->type);
839 	case BTF_KIND_STRUCT:
840 	case BTF_KIND_UNION: {
841 		const struct btf_member *m = btf_members(t);
842 		__u16 vlen = btf_vlen(t);
843 		int i, max_align = 1, align;
844 
845 		for (i = 0; i < vlen; i++, m++) {
846 			align = btf__align_of(btf, m->type);
847 			if (align <= 0)
848 				return libbpf_err(align);
849 			max_align = max(max_align, align);
850 
851 			/* if field offset isn't aligned according to field
852 			 * type's alignment, then struct must be packed
853 			 */
854 			if (btf_member_bitfield_size(t, i) == 0 &&
855 			    (m->offset % (8 * align)) != 0)
856 				return 1;
857 		}
858 
859 		/* if struct/union size isn't a multiple of its alignment,
860 		 * then struct must be packed
861 		 */
862 		if ((t->size % max_align) != 0)
863 			return 1;
864 
865 		return max_align;
866 	}
867 	default:
868 		pr_warn("unsupported BTF_KIND:%u\n", btf_kind(t));
869 		return errno = EINVAL, 0;
870 	}
871 }
872 
873 int btf__resolve_type(const struct btf *btf, __u32 type_id)
874 {
875 	const struct btf_type *t;
876 	int depth = 0;
877 
878 	t = btf__type_by_id(btf, type_id);
879 	while (depth < MAX_RESOLVE_DEPTH &&
880 	       !btf_type_is_void_or_null(t) &&
881 	       (btf_is_mod(t) || btf_is_typedef(t) || btf_is_var(t))) {
882 		type_id = t->type;
883 		t = btf__type_by_id(btf, type_id);
884 		depth++;
885 	}
886 
887 	if (depth == MAX_RESOLVE_DEPTH || btf_type_is_void_or_null(t))
888 		return libbpf_err(-EINVAL);
889 
890 	return type_id;
891 }
892 
893 __s32 btf__find_by_name(const struct btf *btf, const char *type_name)
894 {
895 	__u32 i, nr_types = btf__type_cnt(btf);
896 
897 	if (!strcmp(type_name, "void"))
898 		return 0;
899 
900 	for (i = 1; i < nr_types; i++) {
901 		const struct btf_type *t = btf__type_by_id(btf, i);
902 		const char *name = btf__name_by_offset(btf, t->name_off);
903 
904 		if (name && !strcmp(type_name, name))
905 			return i;
906 	}
907 
908 	return libbpf_err(-ENOENT);
909 }
910 
911 static __s32 btf_find_by_name_kind(const struct btf *btf, int start_id,
912 				   const char *type_name, __u32 kind)
913 {
914 	__u32 i, nr_types = btf__type_cnt(btf);
915 
916 	if (kind == BTF_KIND_UNKN || !strcmp(type_name, "void"))
917 		return 0;
918 
919 	for (i = start_id; i < nr_types; i++) {
920 		const struct btf_type *t = btf__type_by_id(btf, i);
921 		const char *name;
922 
923 		if (btf_kind(t) != kind)
924 			continue;
925 		name = btf__name_by_offset(btf, t->name_off);
926 		if (name && !strcmp(type_name, name))
927 			return i;
928 	}
929 
930 	return libbpf_err(-ENOENT);
931 }
932 
933 __s32 btf__find_by_name_kind_own(const struct btf *btf, const char *type_name,
934 				 __u32 kind)
935 {
936 	return btf_find_by_name_kind(btf, btf->start_id, type_name, kind);
937 }
938 
939 __s32 btf__find_by_name_kind(const struct btf *btf, const char *type_name,
940 			     __u32 kind)
941 {
942 	return btf_find_by_name_kind(btf, 1, type_name, kind);
943 }
944 
945 static bool btf_is_modifiable(const struct btf *btf)
946 {
947 	return (void *)btf->hdr != btf->raw_data;
948 }
949 
950 void btf__free(struct btf *btf)
951 {
952 	if (IS_ERR_OR_NULL(btf))
953 		return;
954 
955 	if (btf->fd >= 0)
956 		close(btf->fd);
957 
958 	if (btf_is_modifiable(btf)) {
959 		/* if BTF was modified after loading, it will have a split
960 		 * in-memory representation for header, types, and strings
961 		 * sections, so we need to free all of them individually. It
962 		 * might still have a cached contiguous raw data present,
963 		 * which will be unconditionally freed below.
964 		 */
965 		free(btf->hdr);
966 		free(btf->types_data);
967 		strset__free(btf->strs_set);
968 	}
969 	free(btf->raw_data);
970 	free(btf->raw_data_swapped);
971 	free(btf->type_offs);
972 	free(btf);
973 }
974 
975 static struct btf *btf_new_empty(struct btf *base_btf)
976 {
977 	struct btf *btf;
978 
979 	btf = calloc(1, sizeof(*btf));
980 	if (!btf)
981 		return ERR_PTR(-ENOMEM);
982 
983 	btf->nr_types = 0;
984 	btf->start_id = 1;
985 	btf->start_str_off = 0;
986 	btf->fd = -1;
987 	btf->ptr_sz = sizeof(void *);
988 	btf->swapped_endian = false;
989 
990 	if (base_btf) {
991 		btf->base_btf = base_btf;
992 		btf->start_id = btf__type_cnt(base_btf);
993 		btf->start_str_off = base_btf->hdr->str_len;
994 	}
995 
996 	/* +1 for empty string at offset 0 */
997 	btf->raw_size = sizeof(struct btf_header) + (base_btf ? 0 : 1);
998 	btf->raw_data = calloc(1, btf->raw_size);
999 	if (!btf->raw_data) {
1000 		free(btf);
1001 		return ERR_PTR(-ENOMEM);
1002 	}
1003 
1004 	btf->hdr = btf->raw_data;
1005 	btf->hdr->hdr_len = sizeof(struct btf_header);
1006 	btf->hdr->magic = BTF_MAGIC;
1007 	btf->hdr->version = BTF_VERSION;
1008 
1009 	btf->types_data = btf->raw_data + btf->hdr->hdr_len;
1010 	btf->strs_data = btf->raw_data + btf->hdr->hdr_len;
1011 	btf->hdr->str_len = base_btf ? 0 : 1; /* empty string at offset 0 */
1012 
1013 	return btf;
1014 }
1015 
1016 struct btf *btf__new_empty(void)
1017 {
1018 	return libbpf_ptr(btf_new_empty(NULL));
1019 }
1020 
1021 struct btf *btf__new_empty_split(struct btf *base_btf)
1022 {
1023 	return libbpf_ptr(btf_new_empty(base_btf));
1024 }
1025 
1026 static struct btf *btf_new(const void *data, __u32 size, struct btf *base_btf)
1027 {
1028 	struct btf *btf;
1029 	int err;
1030 
1031 	btf = calloc(1, sizeof(struct btf));
1032 	if (!btf)
1033 		return ERR_PTR(-ENOMEM);
1034 
1035 	btf->nr_types = 0;
1036 	btf->start_id = 1;
1037 	btf->start_str_off = 0;
1038 	btf->fd = -1;
1039 
1040 	if (base_btf) {
1041 		btf->base_btf = base_btf;
1042 		btf->start_id = btf__type_cnt(base_btf);
1043 		btf->start_str_off = base_btf->hdr->str_len;
1044 	}
1045 
1046 	btf->raw_data = malloc(size);
1047 	if (!btf->raw_data) {
1048 		err = -ENOMEM;
1049 		goto done;
1050 	}
1051 	memcpy(btf->raw_data, data, size);
1052 	btf->raw_size = size;
1053 
1054 	btf->hdr = btf->raw_data;
1055 	err = btf_parse_hdr(btf);
1056 	if (err)
1057 		goto done;
1058 
1059 	btf->strs_data = btf->raw_data + btf->hdr->hdr_len + btf->hdr->str_off;
1060 	btf->types_data = btf->raw_data + btf->hdr->hdr_len + btf->hdr->type_off;
1061 
1062 	err = btf_parse_str_sec(btf);
1063 	err = err ?: btf_parse_type_sec(btf);
1064 	err = err ?: btf_sanity_check(btf);
1065 	if (err)
1066 		goto done;
1067 
1068 done:
1069 	if (err) {
1070 		btf__free(btf);
1071 		return ERR_PTR(err);
1072 	}
1073 
1074 	return btf;
1075 }
1076 
1077 struct btf *btf__new(const void *data, __u32 size)
1078 {
1079 	return libbpf_ptr(btf_new(data, size, NULL));
1080 }
1081 
1082 struct btf *btf__new_split(const void *data, __u32 size, struct btf *base_btf)
1083 {
1084 	return libbpf_ptr(btf_new(data, size, base_btf));
1085 }
1086 
1087 static struct btf *btf_parse_elf(const char *path, struct btf *base_btf,
1088 				 struct btf_ext **btf_ext)
1089 {
1090 	Elf_Data *btf_data = NULL, *btf_ext_data = NULL;
1091 	int err = 0, fd = -1, idx = 0;
1092 	struct btf *btf = NULL;
1093 	Elf_Scn *scn = NULL;
1094 	Elf *elf = NULL;
1095 	GElf_Ehdr ehdr;
1096 	size_t shstrndx;
1097 
1098 	if (elf_version(EV_CURRENT) == EV_NONE) {
1099 		pr_warn("failed to init libelf for %s\n", path);
1100 		return ERR_PTR(-LIBBPF_ERRNO__LIBELF);
1101 	}
1102 
1103 	fd = open(path, O_RDONLY | O_CLOEXEC);
1104 	if (fd < 0) {
1105 		err = -errno;
1106 		pr_warn("failed to open %s: %s\n", path, strerror(errno));
1107 		return ERR_PTR(err);
1108 	}
1109 
1110 	err = -LIBBPF_ERRNO__FORMAT;
1111 
1112 	elf = elf_begin(fd, ELF_C_READ, NULL);
1113 	if (!elf) {
1114 		pr_warn("failed to open %s as ELF file\n", path);
1115 		goto done;
1116 	}
1117 	if (!gelf_getehdr(elf, &ehdr)) {
1118 		pr_warn("failed to get EHDR from %s\n", path);
1119 		goto done;
1120 	}
1121 
1122 	if (elf_getshdrstrndx(elf, &shstrndx)) {
1123 		pr_warn("failed to get section names section index for %s\n",
1124 			path);
1125 		goto done;
1126 	}
1127 
1128 	if (!elf_rawdata(elf_getscn(elf, shstrndx), NULL)) {
1129 		pr_warn("failed to get e_shstrndx from %s\n", path);
1130 		goto done;
1131 	}
1132 
1133 	while ((scn = elf_nextscn(elf, scn)) != NULL) {
1134 		GElf_Shdr sh;
1135 		char *name;
1136 
1137 		idx++;
1138 		if (gelf_getshdr(scn, &sh) != &sh) {
1139 			pr_warn("failed to get section(%d) header from %s\n",
1140 				idx, path);
1141 			goto done;
1142 		}
1143 		name = elf_strptr(elf, shstrndx, sh.sh_name);
1144 		if (!name) {
1145 			pr_warn("failed to get section(%d) name from %s\n",
1146 				idx, path);
1147 			goto done;
1148 		}
1149 		if (strcmp(name, BTF_ELF_SEC) == 0) {
1150 			btf_data = elf_getdata(scn, 0);
1151 			if (!btf_data) {
1152 				pr_warn("failed to get section(%d, %s) data from %s\n",
1153 					idx, name, path);
1154 				goto done;
1155 			}
1156 			continue;
1157 		} else if (btf_ext && strcmp(name, BTF_EXT_ELF_SEC) == 0) {
1158 			btf_ext_data = elf_getdata(scn, 0);
1159 			if (!btf_ext_data) {
1160 				pr_warn("failed to get section(%d, %s) data from %s\n",
1161 					idx, name, path);
1162 				goto done;
1163 			}
1164 			continue;
1165 		}
1166 	}
1167 
1168 	if (!btf_data) {
1169 		pr_warn("failed to find '%s' ELF section in %s\n", BTF_ELF_SEC, path);
1170 		err = -ENODATA;
1171 		goto done;
1172 	}
1173 	btf = btf_new(btf_data->d_buf, btf_data->d_size, base_btf);
1174 	err = libbpf_get_error(btf);
1175 	if (err)
1176 		goto done;
1177 
1178 	switch (gelf_getclass(elf)) {
1179 	case ELFCLASS32:
1180 		btf__set_pointer_size(btf, 4);
1181 		break;
1182 	case ELFCLASS64:
1183 		btf__set_pointer_size(btf, 8);
1184 		break;
1185 	default:
1186 		pr_warn("failed to get ELF class (bitness) for %s\n", path);
1187 		break;
1188 	}
1189 
1190 	if (btf_ext && btf_ext_data) {
1191 		*btf_ext = btf_ext__new(btf_ext_data->d_buf, btf_ext_data->d_size);
1192 		err = libbpf_get_error(*btf_ext);
1193 		if (err)
1194 			goto done;
1195 	} else if (btf_ext) {
1196 		*btf_ext = NULL;
1197 	}
1198 done:
1199 	if (elf)
1200 		elf_end(elf);
1201 	close(fd);
1202 
1203 	if (!err)
1204 		return btf;
1205 
1206 	if (btf_ext)
1207 		btf_ext__free(*btf_ext);
1208 	btf__free(btf);
1209 
1210 	return ERR_PTR(err);
1211 }
1212 
1213 struct btf *btf__parse_elf(const char *path, struct btf_ext **btf_ext)
1214 {
1215 	return libbpf_ptr(btf_parse_elf(path, NULL, btf_ext));
1216 }
1217 
1218 struct btf *btf__parse_elf_split(const char *path, struct btf *base_btf)
1219 {
1220 	return libbpf_ptr(btf_parse_elf(path, base_btf, NULL));
1221 }
1222 
1223 static struct btf *btf_parse_raw(const char *path, struct btf *base_btf)
1224 {
1225 	struct btf *btf = NULL;
1226 	void *data = NULL;
1227 	FILE *f = NULL;
1228 	__u16 magic;
1229 	int err = 0;
1230 	long sz;
1231 
1232 	f = fopen(path, "rbe");
1233 	if (!f) {
1234 		err = -errno;
1235 		goto err_out;
1236 	}
1237 
1238 	/* check BTF magic */
1239 	if (fread(&magic, 1, sizeof(magic), f) < sizeof(magic)) {
1240 		err = -EIO;
1241 		goto err_out;
1242 	}
1243 	if (magic != BTF_MAGIC && magic != bswap_16(BTF_MAGIC)) {
1244 		/* definitely not a raw BTF */
1245 		err = -EPROTO;
1246 		goto err_out;
1247 	}
1248 
1249 	/* get file size */
1250 	if (fseek(f, 0, SEEK_END)) {
1251 		err = -errno;
1252 		goto err_out;
1253 	}
1254 	sz = ftell(f);
1255 	if (sz < 0) {
1256 		err = -errno;
1257 		goto err_out;
1258 	}
1259 	/* rewind to the start */
1260 	if (fseek(f, 0, SEEK_SET)) {
1261 		err = -errno;
1262 		goto err_out;
1263 	}
1264 
1265 	/* pre-alloc memory and read all of BTF data */
1266 	data = malloc(sz);
1267 	if (!data) {
1268 		err = -ENOMEM;
1269 		goto err_out;
1270 	}
1271 	if (fread(data, 1, sz, f) < sz) {
1272 		err = -EIO;
1273 		goto err_out;
1274 	}
1275 
1276 	/* finally parse BTF data */
1277 	btf = btf_new(data, sz, base_btf);
1278 
1279 err_out:
1280 	free(data);
1281 	if (f)
1282 		fclose(f);
1283 	return err ? ERR_PTR(err) : btf;
1284 }
1285 
1286 struct btf *btf__parse_raw(const char *path)
1287 {
1288 	return libbpf_ptr(btf_parse_raw(path, NULL));
1289 }
1290 
1291 struct btf *btf__parse_raw_split(const char *path, struct btf *base_btf)
1292 {
1293 	return libbpf_ptr(btf_parse_raw(path, base_btf));
1294 }
1295 
1296 static struct btf *btf_parse(const char *path, struct btf *base_btf, struct btf_ext **btf_ext)
1297 {
1298 	struct btf *btf;
1299 	int err;
1300 
1301 	if (btf_ext)
1302 		*btf_ext = NULL;
1303 
1304 	btf = btf_parse_raw(path, base_btf);
1305 	err = libbpf_get_error(btf);
1306 	if (!err)
1307 		return btf;
1308 	if (err != -EPROTO)
1309 		return ERR_PTR(err);
1310 	return btf_parse_elf(path, base_btf, btf_ext);
1311 }
1312 
1313 struct btf *btf__parse(const char *path, struct btf_ext **btf_ext)
1314 {
1315 	return libbpf_ptr(btf_parse(path, NULL, btf_ext));
1316 }
1317 
1318 struct btf *btf__parse_split(const char *path, struct btf *base_btf)
1319 {
1320 	return libbpf_ptr(btf_parse(path, base_btf, NULL));
1321 }
1322 
1323 static void *btf_get_raw_data(const struct btf *btf, __u32 *size, bool swap_endian);
1324 
1325 int btf_load_into_kernel(struct btf *btf,
1326 			 char *log_buf, size_t log_sz, __u32 log_level,
1327 			 int token_fd)
1328 {
1329 	LIBBPF_OPTS(bpf_btf_load_opts, opts);
1330 	__u32 buf_sz = 0, raw_size;
1331 	char *buf = NULL, *tmp;
1332 	void *raw_data;
1333 	int err = 0;
1334 
1335 	if (btf->fd >= 0)
1336 		return libbpf_err(-EEXIST);
1337 	if (log_sz && !log_buf)
1338 		return libbpf_err(-EINVAL);
1339 
1340 	/* cache native raw data representation */
1341 	raw_data = btf_get_raw_data(btf, &raw_size, false);
1342 	if (!raw_data) {
1343 		err = -ENOMEM;
1344 		goto done;
1345 	}
1346 	btf->raw_size = raw_size;
1347 	btf->raw_data = raw_data;
1348 
1349 retry_load:
1350 	/* if log_level is 0, we won't provide log_buf/log_size to the kernel,
1351 	 * initially. Only if BTF loading fails, we bump log_level to 1 and
1352 	 * retry, using either auto-allocated or custom log_buf. This way
1353 	 * non-NULL custom log_buf provides a buffer just in case, but hopes
1354 	 * for successful load and no need for log_buf.
1355 	 */
1356 	if (log_level) {
1357 		/* if caller didn't provide custom log_buf, we'll keep
1358 		 * allocating our own progressively bigger buffers for BTF
1359 		 * verification log
1360 		 */
1361 		if (!log_buf) {
1362 			buf_sz = max((__u32)BPF_LOG_BUF_SIZE, buf_sz * 2);
1363 			tmp = realloc(buf, buf_sz);
1364 			if (!tmp) {
1365 				err = -ENOMEM;
1366 				goto done;
1367 			}
1368 			buf = tmp;
1369 			buf[0] = '\0';
1370 		}
1371 
1372 		opts.log_buf = log_buf ? log_buf : buf;
1373 		opts.log_size = log_buf ? log_sz : buf_sz;
1374 		opts.log_level = log_level;
1375 	}
1376 
1377 	opts.token_fd = token_fd;
1378 	if (token_fd)
1379 		opts.btf_flags |= BPF_F_TOKEN_FD;
1380 
1381 	btf->fd = bpf_btf_load(raw_data, raw_size, &opts);
1382 	if (btf->fd < 0) {
1383 		/* time to turn on verbose mode and try again */
1384 		if (log_level == 0) {
1385 			log_level = 1;
1386 			goto retry_load;
1387 		}
1388 		/* only retry if caller didn't provide custom log_buf, but
1389 		 * make sure we can never overflow buf_sz
1390 		 */
1391 		if (!log_buf && errno == ENOSPC && buf_sz <= UINT_MAX / 2)
1392 			goto retry_load;
1393 
1394 		err = -errno;
1395 		pr_warn("BTF loading error: %d\n", err);
1396 		/* don't print out contents of custom log_buf */
1397 		if (!log_buf && buf[0])
1398 			pr_warn("-- BEGIN BTF LOAD LOG ---\n%s\n-- END BTF LOAD LOG --\n", buf);
1399 	}
1400 
1401 done:
1402 	free(buf);
1403 	return libbpf_err(err);
1404 }
1405 
1406 int btf__load_into_kernel(struct btf *btf)
1407 {
1408 	return btf_load_into_kernel(btf, NULL, 0, 0, 0);
1409 }
1410 
1411 int btf__fd(const struct btf *btf)
1412 {
1413 	return btf->fd;
1414 }
1415 
1416 void btf__set_fd(struct btf *btf, int fd)
1417 {
1418 	btf->fd = fd;
1419 }
1420 
1421 static const void *btf_strs_data(const struct btf *btf)
1422 {
1423 	return btf->strs_data ? btf->strs_data : strset__data(btf->strs_set);
1424 }
1425 
1426 static void *btf_get_raw_data(const struct btf *btf, __u32 *size, bool swap_endian)
1427 {
1428 	struct btf_header *hdr = btf->hdr;
1429 	struct btf_type *t;
1430 	void *data, *p;
1431 	__u32 data_sz;
1432 	int i;
1433 
1434 	data = swap_endian ? btf->raw_data_swapped : btf->raw_data;
1435 	if (data) {
1436 		*size = btf->raw_size;
1437 		return data;
1438 	}
1439 
1440 	data_sz = hdr->hdr_len + hdr->type_len + hdr->str_len;
1441 	data = calloc(1, data_sz);
1442 	if (!data)
1443 		return NULL;
1444 	p = data;
1445 
1446 	memcpy(p, hdr, hdr->hdr_len);
1447 	if (swap_endian)
1448 		btf_bswap_hdr(p);
1449 	p += hdr->hdr_len;
1450 
1451 	memcpy(p, btf->types_data, hdr->type_len);
1452 	if (swap_endian) {
1453 		for (i = 0; i < btf->nr_types; i++) {
1454 			t = p + btf->type_offs[i];
1455 			/* btf_bswap_type_rest() relies on native t->info, so
1456 			 * we swap base type info after we swapped all the
1457 			 * additional information
1458 			 */
1459 			if (btf_bswap_type_rest(t))
1460 				goto err_out;
1461 			btf_bswap_type_base(t);
1462 		}
1463 	}
1464 	p += hdr->type_len;
1465 
1466 	memcpy(p, btf_strs_data(btf), hdr->str_len);
1467 	p += hdr->str_len;
1468 
1469 	*size = data_sz;
1470 	return data;
1471 err_out:
1472 	free(data);
1473 	return NULL;
1474 }
1475 
1476 const void *btf__raw_data(const struct btf *btf_ro, __u32 *size)
1477 {
1478 	struct btf *btf = (struct btf *)btf_ro;
1479 	__u32 data_sz;
1480 	void *data;
1481 
1482 	data = btf_get_raw_data(btf, &data_sz, btf->swapped_endian);
1483 	if (!data)
1484 		return errno = ENOMEM, NULL;
1485 
1486 	btf->raw_size = data_sz;
1487 	if (btf->swapped_endian)
1488 		btf->raw_data_swapped = data;
1489 	else
1490 		btf->raw_data = data;
1491 	*size = data_sz;
1492 	return data;
1493 }
1494 
1495 __attribute__((alias("btf__raw_data")))
1496 const void *btf__get_raw_data(const struct btf *btf, __u32 *size);
1497 
1498 const char *btf__str_by_offset(const struct btf *btf, __u32 offset)
1499 {
1500 	if (offset < btf->start_str_off)
1501 		return btf__str_by_offset(btf->base_btf, offset);
1502 	else if (offset - btf->start_str_off < btf->hdr->str_len)
1503 		return btf_strs_data(btf) + (offset - btf->start_str_off);
1504 	else
1505 		return errno = EINVAL, NULL;
1506 }
1507 
1508 const char *btf__name_by_offset(const struct btf *btf, __u32 offset)
1509 {
1510 	return btf__str_by_offset(btf, offset);
1511 }
1512 
1513 struct btf *btf_get_from_fd(int btf_fd, struct btf *base_btf)
1514 {
1515 	struct bpf_btf_info btf_info;
1516 	__u32 len = sizeof(btf_info);
1517 	__u32 last_size;
1518 	struct btf *btf;
1519 	void *ptr;
1520 	int err;
1521 
1522 	/* we won't know btf_size until we call bpf_btf_get_info_by_fd(). so
1523 	 * let's start with a sane default - 4KiB here - and resize it only if
1524 	 * bpf_btf_get_info_by_fd() needs a bigger buffer.
1525 	 */
1526 	last_size = 4096;
1527 	ptr = malloc(last_size);
1528 	if (!ptr)
1529 		return ERR_PTR(-ENOMEM);
1530 
1531 	memset(&btf_info, 0, sizeof(btf_info));
1532 	btf_info.btf = ptr_to_u64(ptr);
1533 	btf_info.btf_size = last_size;
1534 	err = bpf_btf_get_info_by_fd(btf_fd, &btf_info, &len);
1535 
1536 	if (!err && btf_info.btf_size > last_size) {
1537 		void *temp_ptr;
1538 
1539 		last_size = btf_info.btf_size;
1540 		temp_ptr = realloc(ptr, last_size);
1541 		if (!temp_ptr) {
1542 			btf = ERR_PTR(-ENOMEM);
1543 			goto exit_free;
1544 		}
1545 		ptr = temp_ptr;
1546 
1547 		len = sizeof(btf_info);
1548 		memset(&btf_info, 0, sizeof(btf_info));
1549 		btf_info.btf = ptr_to_u64(ptr);
1550 		btf_info.btf_size = last_size;
1551 
1552 		err = bpf_btf_get_info_by_fd(btf_fd, &btf_info, &len);
1553 	}
1554 
1555 	if (err || btf_info.btf_size > last_size) {
1556 		btf = err ? ERR_PTR(-errno) : ERR_PTR(-E2BIG);
1557 		goto exit_free;
1558 	}
1559 
1560 	btf = btf_new(ptr, btf_info.btf_size, base_btf);
1561 
1562 exit_free:
1563 	free(ptr);
1564 	return btf;
1565 }
1566 
1567 struct btf *btf__load_from_kernel_by_id_split(__u32 id, struct btf *base_btf)
1568 {
1569 	struct btf *btf;
1570 	int btf_fd;
1571 
1572 	btf_fd = bpf_btf_get_fd_by_id(id);
1573 	if (btf_fd < 0)
1574 		return libbpf_err_ptr(-errno);
1575 
1576 	btf = btf_get_from_fd(btf_fd, base_btf);
1577 	close(btf_fd);
1578 
1579 	return libbpf_ptr(btf);
1580 }
1581 
1582 struct btf *btf__load_from_kernel_by_id(__u32 id)
1583 {
1584 	return btf__load_from_kernel_by_id_split(id, NULL);
1585 }
1586 
1587 static void btf_invalidate_raw_data(struct btf *btf)
1588 {
1589 	if (btf->raw_data) {
1590 		free(btf->raw_data);
1591 		btf->raw_data = NULL;
1592 	}
1593 	if (btf->raw_data_swapped) {
1594 		free(btf->raw_data_swapped);
1595 		btf->raw_data_swapped = NULL;
1596 	}
1597 }
1598 
1599 /* Ensure BTF is ready to be modified (by splitting into a three memory
1600  * regions for header, types, and strings). Also invalidate cached
1601  * raw_data, if any.
1602  */
1603 static int btf_ensure_modifiable(struct btf *btf)
1604 {
1605 	void *hdr, *types;
1606 	struct strset *set = NULL;
1607 	int err = -ENOMEM;
1608 
1609 	if (btf_is_modifiable(btf)) {
1610 		/* any BTF modification invalidates raw_data */
1611 		btf_invalidate_raw_data(btf);
1612 		return 0;
1613 	}
1614 
1615 	/* split raw data into three memory regions */
1616 	hdr = malloc(btf->hdr->hdr_len);
1617 	types = malloc(btf->hdr->type_len);
1618 	if (!hdr || !types)
1619 		goto err_out;
1620 
1621 	memcpy(hdr, btf->hdr, btf->hdr->hdr_len);
1622 	memcpy(types, btf->types_data, btf->hdr->type_len);
1623 
1624 	/* build lookup index for all strings */
1625 	set = strset__new(BTF_MAX_STR_OFFSET, btf->strs_data, btf->hdr->str_len);
1626 	if (IS_ERR(set)) {
1627 		err = PTR_ERR(set);
1628 		goto err_out;
1629 	}
1630 
1631 	/* only when everything was successful, update internal state */
1632 	btf->hdr = hdr;
1633 	btf->types_data = types;
1634 	btf->types_data_cap = btf->hdr->type_len;
1635 	btf->strs_data = NULL;
1636 	btf->strs_set = set;
1637 	/* if BTF was created from scratch, all strings are guaranteed to be
1638 	 * unique and deduplicated
1639 	 */
1640 	if (btf->hdr->str_len == 0)
1641 		btf->strs_deduped = true;
1642 	if (!btf->base_btf && btf->hdr->str_len == 1)
1643 		btf->strs_deduped = true;
1644 
1645 	/* invalidate raw_data representation */
1646 	btf_invalidate_raw_data(btf);
1647 
1648 	return 0;
1649 
1650 err_out:
1651 	strset__free(set);
1652 	free(hdr);
1653 	free(types);
1654 	return err;
1655 }
1656 
1657 /* Find an offset in BTF string section that corresponds to a given string *s*.
1658  * Returns:
1659  *   - >0 offset into string section, if string is found;
1660  *   - -ENOENT, if string is not in the string section;
1661  *   - <0, on any other error.
1662  */
1663 int btf__find_str(struct btf *btf, const char *s)
1664 {
1665 	int off;
1666 
1667 	if (btf->base_btf) {
1668 		off = btf__find_str(btf->base_btf, s);
1669 		if (off != -ENOENT)
1670 			return off;
1671 	}
1672 
1673 	/* BTF needs to be in a modifiable state to build string lookup index */
1674 	if (btf_ensure_modifiable(btf))
1675 		return libbpf_err(-ENOMEM);
1676 
1677 	off = strset__find_str(btf->strs_set, s);
1678 	if (off < 0)
1679 		return libbpf_err(off);
1680 
1681 	return btf->start_str_off + off;
1682 }
1683 
1684 /* Add a string s to the BTF string section.
1685  * Returns:
1686  *   - > 0 offset into string section, on success;
1687  *   - < 0, on error.
1688  */
1689 int btf__add_str(struct btf *btf, const char *s)
1690 {
1691 	int off;
1692 
1693 	if (btf->base_btf) {
1694 		off = btf__find_str(btf->base_btf, s);
1695 		if (off != -ENOENT)
1696 			return off;
1697 	}
1698 
1699 	if (btf_ensure_modifiable(btf))
1700 		return libbpf_err(-ENOMEM);
1701 
1702 	off = strset__add_str(btf->strs_set, s);
1703 	if (off < 0)
1704 		return libbpf_err(off);
1705 
1706 	btf->hdr->str_len = strset__data_size(btf->strs_set);
1707 
1708 	return btf->start_str_off + off;
1709 }
1710 
1711 static void *btf_add_type_mem(struct btf *btf, size_t add_sz)
1712 {
1713 	return libbpf_add_mem(&btf->types_data, &btf->types_data_cap, 1,
1714 			      btf->hdr->type_len, UINT_MAX, add_sz);
1715 }
1716 
1717 static void btf_type_inc_vlen(struct btf_type *t)
1718 {
1719 	t->info = btf_type_info(btf_kind(t), btf_vlen(t) + 1, btf_kflag(t));
1720 }
1721 
1722 static int btf_commit_type(struct btf *btf, int data_sz)
1723 {
1724 	int err;
1725 
1726 	err = btf_add_type_idx_entry(btf, btf->hdr->type_len);
1727 	if (err)
1728 		return libbpf_err(err);
1729 
1730 	btf->hdr->type_len += data_sz;
1731 	btf->hdr->str_off += data_sz;
1732 	btf->nr_types++;
1733 	return btf->start_id + btf->nr_types - 1;
1734 }
1735 
1736 struct btf_pipe {
1737 	const struct btf *src;
1738 	struct btf *dst;
1739 	struct hashmap *str_off_map; /* map string offsets from src to dst */
1740 };
1741 
1742 static int btf_rewrite_str(struct btf_pipe *p, __u32 *str_off)
1743 {
1744 	long mapped_off;
1745 	int off, err;
1746 
1747 	if (!*str_off) /* nothing to do for empty strings */
1748 		return 0;
1749 
1750 	if (p->str_off_map &&
1751 	    hashmap__find(p->str_off_map, *str_off, &mapped_off)) {
1752 		*str_off = mapped_off;
1753 		return 0;
1754 	}
1755 
1756 	off = btf__add_str(p->dst, btf__str_by_offset(p->src, *str_off));
1757 	if (off < 0)
1758 		return off;
1759 
1760 	/* Remember string mapping from src to dst.  It avoids
1761 	 * performing expensive string comparisons.
1762 	 */
1763 	if (p->str_off_map) {
1764 		err = hashmap__append(p->str_off_map, *str_off, off);
1765 		if (err)
1766 			return err;
1767 	}
1768 
1769 	*str_off = off;
1770 	return 0;
1771 }
1772 
1773 int btf__add_type(struct btf *btf, const struct btf *src_btf, const struct btf_type *src_type)
1774 {
1775 	struct btf_pipe p = { .src = src_btf, .dst = btf };
1776 	struct btf_field_iter it;
1777 	struct btf_type *t;
1778 	__u32 *str_off;
1779 	int sz, err;
1780 
1781 	sz = btf_type_size(src_type);
1782 	if (sz < 0)
1783 		return libbpf_err(sz);
1784 
1785 	/* deconstruct BTF, if necessary, and invalidate raw_data */
1786 	if (btf_ensure_modifiable(btf))
1787 		return libbpf_err(-ENOMEM);
1788 
1789 	t = btf_add_type_mem(btf, sz);
1790 	if (!t)
1791 		return libbpf_err(-ENOMEM);
1792 
1793 	memcpy(t, src_type, sz);
1794 
1795 	err = btf_field_iter_init(&it, t, BTF_FIELD_ITER_STRS);
1796 	if (err)
1797 		return libbpf_err(err);
1798 
1799 	while ((str_off = btf_field_iter_next(&it))) {
1800 		err = btf_rewrite_str(&p, str_off);
1801 		if (err)
1802 			return libbpf_err(err);
1803 	}
1804 
1805 	return btf_commit_type(btf, sz);
1806 }
1807 
1808 static size_t btf_dedup_identity_hash_fn(long key, void *ctx);
1809 static bool btf_dedup_equal_fn(long k1, long k2, void *ctx);
1810 
1811 int btf__add_btf(struct btf *btf, const struct btf *src_btf)
1812 {
1813 	struct btf_pipe p = { .src = src_btf, .dst = btf };
1814 	int data_sz, sz, cnt, i, err, old_strs_len;
1815 	__u32 *off;
1816 	void *t;
1817 
1818 	/* appending split BTF isn't supported yet */
1819 	if (src_btf->base_btf)
1820 		return libbpf_err(-ENOTSUP);
1821 
1822 	/* deconstruct BTF, if necessary, and invalidate raw_data */
1823 	if (btf_ensure_modifiable(btf))
1824 		return libbpf_err(-ENOMEM);
1825 
1826 	/* remember original strings section size if we have to roll back
1827 	 * partial strings section changes
1828 	 */
1829 	old_strs_len = btf->hdr->str_len;
1830 
1831 	data_sz = src_btf->hdr->type_len;
1832 	cnt = btf__type_cnt(src_btf) - 1;
1833 
1834 	/* pre-allocate enough memory for new types */
1835 	t = btf_add_type_mem(btf, data_sz);
1836 	if (!t)
1837 		return libbpf_err(-ENOMEM);
1838 
1839 	/* pre-allocate enough memory for type offset index for new types */
1840 	off = btf_add_type_offs_mem(btf, cnt);
1841 	if (!off)
1842 		return libbpf_err(-ENOMEM);
1843 
1844 	/* Map the string offsets from src_btf to the offsets from btf to improve performance */
1845 	p.str_off_map = hashmap__new(btf_dedup_identity_hash_fn, btf_dedup_equal_fn, NULL);
1846 	if (IS_ERR(p.str_off_map))
1847 		return libbpf_err(-ENOMEM);
1848 
1849 	/* bulk copy types data for all types from src_btf */
1850 	memcpy(t, src_btf->types_data, data_sz);
1851 
1852 	for (i = 0; i < cnt; i++) {
1853 		struct btf_field_iter it;
1854 		__u32 *type_id, *str_off;
1855 
1856 		sz = btf_type_size(t);
1857 		if (sz < 0) {
1858 			/* unlikely, has to be corrupted src_btf */
1859 			err = sz;
1860 			goto err_out;
1861 		}
1862 
1863 		/* fill out type ID to type offset mapping for lookups by type ID */
1864 		*off = t - btf->types_data;
1865 
1866 		/* add, dedup, and remap strings referenced by this BTF type */
1867 		err = btf_field_iter_init(&it, t, BTF_FIELD_ITER_STRS);
1868 		if (err)
1869 			goto err_out;
1870 		while ((str_off = btf_field_iter_next(&it))) {
1871 			err = btf_rewrite_str(&p, str_off);
1872 			if (err)
1873 				goto err_out;
1874 		}
1875 
1876 		/* remap all type IDs referenced from this BTF type */
1877 		err = btf_field_iter_init(&it, t, BTF_FIELD_ITER_IDS);
1878 		if (err)
1879 			goto err_out;
1880 
1881 		while ((type_id = btf_field_iter_next(&it))) {
1882 			if (!*type_id) /* nothing to do for VOID references */
1883 				continue;
1884 
1885 			/* we haven't updated btf's type count yet, so
1886 			 * btf->start_id + btf->nr_types - 1 is the type ID offset we should
1887 			 * add to all newly added BTF types
1888 			 */
1889 			*type_id += btf->start_id + btf->nr_types - 1;
1890 		}
1891 
1892 		/* go to next type data and type offset index entry */
1893 		t += sz;
1894 		off++;
1895 	}
1896 
1897 	/* Up until now any of the copied type data was effectively invisible,
1898 	 * so if we exited early before this point due to error, BTF would be
1899 	 * effectively unmodified. There would be extra internal memory
1900 	 * pre-allocated, but it would not be available for querying.  But now
1901 	 * that we've copied and rewritten all the data successfully, we can
1902 	 * update type count and various internal offsets and sizes to
1903 	 * "commit" the changes and made them visible to the outside world.
1904 	 */
1905 	btf->hdr->type_len += data_sz;
1906 	btf->hdr->str_off += data_sz;
1907 	btf->nr_types += cnt;
1908 
1909 	hashmap__free(p.str_off_map);
1910 
1911 	/* return type ID of the first added BTF type */
1912 	return btf->start_id + btf->nr_types - cnt;
1913 err_out:
1914 	/* zero out preallocated memory as if it was just allocated with
1915 	 * libbpf_add_mem()
1916 	 */
1917 	memset(btf->types_data + btf->hdr->type_len, 0, data_sz);
1918 	memset(btf->strs_data + old_strs_len, 0, btf->hdr->str_len - old_strs_len);
1919 
1920 	/* and now restore original strings section size; types data size
1921 	 * wasn't modified, so doesn't need restoring, see big comment above
1922 	 */
1923 	btf->hdr->str_len = old_strs_len;
1924 
1925 	hashmap__free(p.str_off_map);
1926 
1927 	return libbpf_err(err);
1928 }
1929 
1930 /*
1931  * Append new BTF_KIND_INT type with:
1932  *   - *name* - non-empty, non-NULL type name;
1933  *   - *sz* - power-of-2 (1, 2, 4, ..) size of the type, in bytes;
1934  *   - encoding is a combination of BTF_INT_SIGNED, BTF_INT_CHAR, BTF_INT_BOOL.
1935  * Returns:
1936  *   - >0, type ID of newly added BTF type;
1937  *   - <0, on error.
1938  */
1939 int btf__add_int(struct btf *btf, const char *name, size_t byte_sz, int encoding)
1940 {
1941 	struct btf_type *t;
1942 	int sz, name_off;
1943 
1944 	/* non-empty name */
1945 	if (!name || !name[0])
1946 		return libbpf_err(-EINVAL);
1947 	/* byte_sz must be power of 2 */
1948 	if (!byte_sz || (byte_sz & (byte_sz - 1)) || byte_sz > 16)
1949 		return libbpf_err(-EINVAL);
1950 	if (encoding & ~(BTF_INT_SIGNED | BTF_INT_CHAR | BTF_INT_BOOL))
1951 		return libbpf_err(-EINVAL);
1952 
1953 	/* deconstruct BTF, if necessary, and invalidate raw_data */
1954 	if (btf_ensure_modifiable(btf))
1955 		return libbpf_err(-ENOMEM);
1956 
1957 	sz = sizeof(struct btf_type) + sizeof(int);
1958 	t = btf_add_type_mem(btf, sz);
1959 	if (!t)
1960 		return libbpf_err(-ENOMEM);
1961 
1962 	/* if something goes wrong later, we might end up with an extra string,
1963 	 * but that shouldn't be a problem, because BTF can't be constructed
1964 	 * completely anyway and will most probably be just discarded
1965 	 */
1966 	name_off = btf__add_str(btf, name);
1967 	if (name_off < 0)
1968 		return name_off;
1969 
1970 	t->name_off = name_off;
1971 	t->info = btf_type_info(BTF_KIND_INT, 0, 0);
1972 	t->size = byte_sz;
1973 	/* set INT info, we don't allow setting legacy bit offset/size */
1974 	*(__u32 *)(t + 1) = (encoding << 24) | (byte_sz * 8);
1975 
1976 	return btf_commit_type(btf, sz);
1977 }
1978 
1979 /*
1980  * Append new BTF_KIND_FLOAT type with:
1981  *   - *name* - non-empty, non-NULL type name;
1982  *   - *sz* - size of the type, in bytes;
1983  * Returns:
1984  *   - >0, type ID of newly added BTF type;
1985  *   - <0, on error.
1986  */
1987 int btf__add_float(struct btf *btf, const char *name, size_t byte_sz)
1988 {
1989 	struct btf_type *t;
1990 	int sz, name_off;
1991 
1992 	/* non-empty name */
1993 	if (!name || !name[0])
1994 		return libbpf_err(-EINVAL);
1995 
1996 	/* byte_sz must be one of the explicitly allowed values */
1997 	if (byte_sz != 2 && byte_sz != 4 && byte_sz != 8 && byte_sz != 12 &&
1998 	    byte_sz != 16)
1999 		return libbpf_err(-EINVAL);
2000 
2001 	if (btf_ensure_modifiable(btf))
2002 		return libbpf_err(-ENOMEM);
2003 
2004 	sz = sizeof(struct btf_type);
2005 	t = btf_add_type_mem(btf, sz);
2006 	if (!t)
2007 		return libbpf_err(-ENOMEM);
2008 
2009 	name_off = btf__add_str(btf, name);
2010 	if (name_off < 0)
2011 		return name_off;
2012 
2013 	t->name_off = name_off;
2014 	t->info = btf_type_info(BTF_KIND_FLOAT, 0, 0);
2015 	t->size = byte_sz;
2016 
2017 	return btf_commit_type(btf, sz);
2018 }
2019 
2020 /* it's completely legal to append BTF types with type IDs pointing forward to
2021  * types that haven't been appended yet, so we only make sure that id looks
2022  * sane, we can't guarantee that ID will always be valid
2023  */
2024 static int validate_type_id(int id)
2025 {
2026 	if (id < 0 || id > BTF_MAX_NR_TYPES)
2027 		return -EINVAL;
2028 	return 0;
2029 }
2030 
2031 /* generic append function for PTR, TYPEDEF, CONST/VOLATILE/RESTRICT */
2032 static int btf_add_ref_kind(struct btf *btf, int kind, const char *name, int ref_type_id)
2033 {
2034 	struct btf_type *t;
2035 	int sz, name_off = 0;
2036 
2037 	if (validate_type_id(ref_type_id))
2038 		return libbpf_err(-EINVAL);
2039 
2040 	if (btf_ensure_modifiable(btf))
2041 		return libbpf_err(-ENOMEM);
2042 
2043 	sz = sizeof(struct btf_type);
2044 	t = btf_add_type_mem(btf, sz);
2045 	if (!t)
2046 		return libbpf_err(-ENOMEM);
2047 
2048 	if (name && name[0]) {
2049 		name_off = btf__add_str(btf, name);
2050 		if (name_off < 0)
2051 			return name_off;
2052 	}
2053 
2054 	t->name_off = name_off;
2055 	t->info = btf_type_info(kind, 0, 0);
2056 	t->type = ref_type_id;
2057 
2058 	return btf_commit_type(btf, sz);
2059 }
2060 
2061 /*
2062  * Append new BTF_KIND_PTR type with:
2063  *   - *ref_type_id* - referenced type ID, it might not exist yet;
2064  * Returns:
2065  *   - >0, type ID of newly added BTF type;
2066  *   - <0, on error.
2067  */
2068 int btf__add_ptr(struct btf *btf, int ref_type_id)
2069 {
2070 	return btf_add_ref_kind(btf, BTF_KIND_PTR, NULL, ref_type_id);
2071 }
2072 
2073 /*
2074  * Append new BTF_KIND_ARRAY type with:
2075  *   - *index_type_id* - type ID of the type describing array index;
2076  *   - *elem_type_id* - type ID of the type describing array element;
2077  *   - *nr_elems* - the size of the array;
2078  * Returns:
2079  *   - >0, type ID of newly added BTF type;
2080  *   - <0, on error.
2081  */
2082 int btf__add_array(struct btf *btf, int index_type_id, int elem_type_id, __u32 nr_elems)
2083 {
2084 	struct btf_type *t;
2085 	struct btf_array *a;
2086 	int sz;
2087 
2088 	if (validate_type_id(index_type_id) || validate_type_id(elem_type_id))
2089 		return libbpf_err(-EINVAL);
2090 
2091 	if (btf_ensure_modifiable(btf))
2092 		return libbpf_err(-ENOMEM);
2093 
2094 	sz = sizeof(struct btf_type) + sizeof(struct btf_array);
2095 	t = btf_add_type_mem(btf, sz);
2096 	if (!t)
2097 		return libbpf_err(-ENOMEM);
2098 
2099 	t->name_off = 0;
2100 	t->info = btf_type_info(BTF_KIND_ARRAY, 0, 0);
2101 	t->size = 0;
2102 
2103 	a = btf_array(t);
2104 	a->type = elem_type_id;
2105 	a->index_type = index_type_id;
2106 	a->nelems = nr_elems;
2107 
2108 	return btf_commit_type(btf, sz);
2109 }
2110 
2111 /* generic STRUCT/UNION append function */
2112 static int btf_add_composite(struct btf *btf, int kind, const char *name, __u32 bytes_sz)
2113 {
2114 	struct btf_type *t;
2115 	int sz, name_off = 0;
2116 
2117 	if (btf_ensure_modifiable(btf))
2118 		return libbpf_err(-ENOMEM);
2119 
2120 	sz = sizeof(struct btf_type);
2121 	t = btf_add_type_mem(btf, sz);
2122 	if (!t)
2123 		return libbpf_err(-ENOMEM);
2124 
2125 	if (name && name[0]) {
2126 		name_off = btf__add_str(btf, name);
2127 		if (name_off < 0)
2128 			return name_off;
2129 	}
2130 
2131 	/* start out with vlen=0 and no kflag; this will be adjusted when
2132 	 * adding each member
2133 	 */
2134 	t->name_off = name_off;
2135 	t->info = btf_type_info(kind, 0, 0);
2136 	t->size = bytes_sz;
2137 
2138 	return btf_commit_type(btf, sz);
2139 }
2140 
2141 /*
2142  * Append new BTF_KIND_STRUCT type with:
2143  *   - *name* - name of the struct, can be NULL or empty for anonymous structs;
2144  *   - *byte_sz* - size of the struct, in bytes;
2145  *
2146  * Struct initially has no fields in it. Fields can be added by
2147  * btf__add_field() right after btf__add_struct() succeeds.
2148  *
2149  * Returns:
2150  *   - >0, type ID of newly added BTF type;
2151  *   - <0, on error.
2152  */
2153 int btf__add_struct(struct btf *btf, const char *name, __u32 byte_sz)
2154 {
2155 	return btf_add_composite(btf, BTF_KIND_STRUCT, name, byte_sz);
2156 }
2157 
2158 /*
2159  * Append new BTF_KIND_UNION type with:
2160  *   - *name* - name of the union, can be NULL or empty for anonymous union;
2161  *   - *byte_sz* - size of the union, in bytes;
2162  *
2163  * Union initially has no fields in it. Fields can be added by
2164  * btf__add_field() right after btf__add_union() succeeds. All fields
2165  * should have *bit_offset* of 0.
2166  *
2167  * Returns:
2168  *   - >0, type ID of newly added BTF type;
2169  *   - <0, on error.
2170  */
2171 int btf__add_union(struct btf *btf, const char *name, __u32 byte_sz)
2172 {
2173 	return btf_add_composite(btf, BTF_KIND_UNION, name, byte_sz);
2174 }
2175 
2176 static struct btf_type *btf_last_type(struct btf *btf)
2177 {
2178 	return btf_type_by_id(btf, btf__type_cnt(btf) - 1);
2179 }
2180 
2181 /*
2182  * Append new field for the current STRUCT/UNION type with:
2183  *   - *name* - name of the field, can be NULL or empty for anonymous field;
2184  *   - *type_id* - type ID for the type describing field type;
2185  *   - *bit_offset* - bit offset of the start of the field within struct/union;
2186  *   - *bit_size* - bit size of a bitfield, 0 for non-bitfield fields;
2187  * Returns:
2188  *   -  0, on success;
2189  *   - <0, on error.
2190  */
2191 int btf__add_field(struct btf *btf, const char *name, int type_id,
2192 		   __u32 bit_offset, __u32 bit_size)
2193 {
2194 	struct btf_type *t;
2195 	struct btf_member *m;
2196 	bool is_bitfield;
2197 	int sz, name_off = 0;
2198 
2199 	/* last type should be union/struct */
2200 	if (btf->nr_types == 0)
2201 		return libbpf_err(-EINVAL);
2202 	t = btf_last_type(btf);
2203 	if (!btf_is_composite(t))
2204 		return libbpf_err(-EINVAL);
2205 
2206 	if (validate_type_id(type_id))
2207 		return libbpf_err(-EINVAL);
2208 	/* best-effort bit field offset/size enforcement */
2209 	is_bitfield = bit_size || (bit_offset % 8 != 0);
2210 	if (is_bitfield && (bit_size == 0 || bit_size > 255 || bit_offset > 0xffffff))
2211 		return libbpf_err(-EINVAL);
2212 
2213 	/* only offset 0 is allowed for unions */
2214 	if (btf_is_union(t) && bit_offset)
2215 		return libbpf_err(-EINVAL);
2216 
2217 	/* decompose and invalidate raw data */
2218 	if (btf_ensure_modifiable(btf))
2219 		return libbpf_err(-ENOMEM);
2220 
2221 	sz = sizeof(struct btf_member);
2222 	m = btf_add_type_mem(btf, sz);
2223 	if (!m)
2224 		return libbpf_err(-ENOMEM);
2225 
2226 	if (name && name[0]) {
2227 		name_off = btf__add_str(btf, name);
2228 		if (name_off < 0)
2229 			return name_off;
2230 	}
2231 
2232 	m->name_off = name_off;
2233 	m->type = type_id;
2234 	m->offset = bit_offset | (bit_size << 24);
2235 
2236 	/* btf_add_type_mem can invalidate t pointer */
2237 	t = btf_last_type(btf);
2238 	/* update parent type's vlen and kflag */
2239 	t->info = btf_type_info(btf_kind(t), btf_vlen(t) + 1, is_bitfield || btf_kflag(t));
2240 
2241 	btf->hdr->type_len += sz;
2242 	btf->hdr->str_off += sz;
2243 	return 0;
2244 }
2245 
2246 static int btf_add_enum_common(struct btf *btf, const char *name, __u32 byte_sz,
2247 			       bool is_signed, __u8 kind)
2248 {
2249 	struct btf_type *t;
2250 	int sz, name_off = 0;
2251 
2252 	/* byte_sz must be power of 2 */
2253 	if (!byte_sz || (byte_sz & (byte_sz - 1)) || byte_sz > 8)
2254 		return libbpf_err(-EINVAL);
2255 
2256 	if (btf_ensure_modifiable(btf))
2257 		return libbpf_err(-ENOMEM);
2258 
2259 	sz = sizeof(struct btf_type);
2260 	t = btf_add_type_mem(btf, sz);
2261 	if (!t)
2262 		return libbpf_err(-ENOMEM);
2263 
2264 	if (name && name[0]) {
2265 		name_off = btf__add_str(btf, name);
2266 		if (name_off < 0)
2267 			return name_off;
2268 	}
2269 
2270 	/* start out with vlen=0; it will be adjusted when adding enum values */
2271 	t->name_off = name_off;
2272 	t->info = btf_type_info(kind, 0, is_signed);
2273 	t->size = byte_sz;
2274 
2275 	return btf_commit_type(btf, sz);
2276 }
2277 
2278 /*
2279  * Append new BTF_KIND_ENUM type with:
2280  *   - *name* - name of the enum, can be NULL or empty for anonymous enums;
2281  *   - *byte_sz* - size of the enum, in bytes.
2282  *
2283  * Enum initially has no enum values in it (and corresponds to enum forward
2284  * declaration). Enumerator values can be added by btf__add_enum_value()
2285  * immediately after btf__add_enum() succeeds.
2286  *
2287  * Returns:
2288  *   - >0, type ID of newly added BTF type;
2289  *   - <0, on error.
2290  */
2291 int btf__add_enum(struct btf *btf, const char *name, __u32 byte_sz)
2292 {
2293 	/*
2294 	 * set the signedness to be unsigned, it will change to signed
2295 	 * if any later enumerator is negative.
2296 	 */
2297 	return btf_add_enum_common(btf, name, byte_sz, false, BTF_KIND_ENUM);
2298 }
2299 
2300 /*
2301  * Append new enum value for the current ENUM type with:
2302  *   - *name* - name of the enumerator value, can't be NULL or empty;
2303  *   - *value* - integer value corresponding to enum value *name*;
2304  * Returns:
2305  *   -  0, on success;
2306  *   - <0, on error.
2307  */
2308 int btf__add_enum_value(struct btf *btf, const char *name, __s64 value)
2309 {
2310 	struct btf_type *t;
2311 	struct btf_enum *v;
2312 	int sz, name_off;
2313 
2314 	/* last type should be BTF_KIND_ENUM */
2315 	if (btf->nr_types == 0)
2316 		return libbpf_err(-EINVAL);
2317 	t = btf_last_type(btf);
2318 	if (!btf_is_enum(t))
2319 		return libbpf_err(-EINVAL);
2320 
2321 	/* non-empty name */
2322 	if (!name || !name[0])
2323 		return libbpf_err(-EINVAL);
2324 	if (value < INT_MIN || value > UINT_MAX)
2325 		return libbpf_err(-E2BIG);
2326 
2327 	/* decompose and invalidate raw data */
2328 	if (btf_ensure_modifiable(btf))
2329 		return libbpf_err(-ENOMEM);
2330 
2331 	sz = sizeof(struct btf_enum);
2332 	v = btf_add_type_mem(btf, sz);
2333 	if (!v)
2334 		return libbpf_err(-ENOMEM);
2335 
2336 	name_off = btf__add_str(btf, name);
2337 	if (name_off < 0)
2338 		return name_off;
2339 
2340 	v->name_off = name_off;
2341 	v->val = value;
2342 
2343 	/* update parent type's vlen */
2344 	t = btf_last_type(btf);
2345 	btf_type_inc_vlen(t);
2346 
2347 	/* if negative value, set signedness to signed */
2348 	if (value < 0)
2349 		t->info = btf_type_info(btf_kind(t), btf_vlen(t), true);
2350 
2351 	btf->hdr->type_len += sz;
2352 	btf->hdr->str_off += sz;
2353 	return 0;
2354 }
2355 
2356 /*
2357  * Append new BTF_KIND_ENUM64 type with:
2358  *   - *name* - name of the enum, can be NULL or empty for anonymous enums;
2359  *   - *byte_sz* - size of the enum, in bytes.
2360  *   - *is_signed* - whether the enum values are signed or not;
2361  *
2362  * Enum initially has no enum values in it (and corresponds to enum forward
2363  * declaration). Enumerator values can be added by btf__add_enum64_value()
2364  * immediately after btf__add_enum64() succeeds.
2365  *
2366  * Returns:
2367  *   - >0, type ID of newly added BTF type;
2368  *   - <0, on error.
2369  */
2370 int btf__add_enum64(struct btf *btf, const char *name, __u32 byte_sz,
2371 		    bool is_signed)
2372 {
2373 	return btf_add_enum_common(btf, name, byte_sz, is_signed,
2374 				   BTF_KIND_ENUM64);
2375 }
2376 
2377 /*
2378  * Append new enum value for the current ENUM64 type with:
2379  *   - *name* - name of the enumerator value, can't be NULL or empty;
2380  *   - *value* - integer value corresponding to enum value *name*;
2381  * Returns:
2382  *   -  0, on success;
2383  *   - <0, on error.
2384  */
2385 int btf__add_enum64_value(struct btf *btf, const char *name, __u64 value)
2386 {
2387 	struct btf_enum64 *v;
2388 	struct btf_type *t;
2389 	int sz, name_off;
2390 
2391 	/* last type should be BTF_KIND_ENUM64 */
2392 	if (btf->nr_types == 0)
2393 		return libbpf_err(-EINVAL);
2394 	t = btf_last_type(btf);
2395 	if (!btf_is_enum64(t))
2396 		return libbpf_err(-EINVAL);
2397 
2398 	/* non-empty name */
2399 	if (!name || !name[0])
2400 		return libbpf_err(-EINVAL);
2401 
2402 	/* decompose and invalidate raw data */
2403 	if (btf_ensure_modifiable(btf))
2404 		return libbpf_err(-ENOMEM);
2405 
2406 	sz = sizeof(struct btf_enum64);
2407 	v = btf_add_type_mem(btf, sz);
2408 	if (!v)
2409 		return libbpf_err(-ENOMEM);
2410 
2411 	name_off = btf__add_str(btf, name);
2412 	if (name_off < 0)
2413 		return name_off;
2414 
2415 	v->name_off = name_off;
2416 	v->val_lo32 = (__u32)value;
2417 	v->val_hi32 = value >> 32;
2418 
2419 	/* update parent type's vlen */
2420 	t = btf_last_type(btf);
2421 	btf_type_inc_vlen(t);
2422 
2423 	btf->hdr->type_len += sz;
2424 	btf->hdr->str_off += sz;
2425 	return 0;
2426 }
2427 
2428 /*
2429  * Append new BTF_KIND_FWD type with:
2430  *   - *name*, non-empty/non-NULL name;
2431  *   - *fwd_kind*, kind of forward declaration, one of BTF_FWD_STRUCT,
2432  *     BTF_FWD_UNION, or BTF_FWD_ENUM;
2433  * Returns:
2434  *   - >0, type ID of newly added BTF type;
2435  *   - <0, on error.
2436  */
2437 int btf__add_fwd(struct btf *btf, const char *name, enum btf_fwd_kind fwd_kind)
2438 {
2439 	if (!name || !name[0])
2440 		return libbpf_err(-EINVAL);
2441 
2442 	switch (fwd_kind) {
2443 	case BTF_FWD_STRUCT:
2444 	case BTF_FWD_UNION: {
2445 		struct btf_type *t;
2446 		int id;
2447 
2448 		id = btf_add_ref_kind(btf, BTF_KIND_FWD, name, 0);
2449 		if (id <= 0)
2450 			return id;
2451 		t = btf_type_by_id(btf, id);
2452 		t->info = btf_type_info(BTF_KIND_FWD, 0, fwd_kind == BTF_FWD_UNION);
2453 		return id;
2454 	}
2455 	case BTF_FWD_ENUM:
2456 		/* enum forward in BTF currently is just an enum with no enum
2457 		 * values; we also assume a standard 4-byte size for it
2458 		 */
2459 		return btf__add_enum(btf, name, sizeof(int));
2460 	default:
2461 		return libbpf_err(-EINVAL);
2462 	}
2463 }
2464 
2465 /*
2466  * Append new BTF_KING_TYPEDEF type with:
2467  *   - *name*, non-empty/non-NULL name;
2468  *   - *ref_type_id* - referenced type ID, it might not exist yet;
2469  * Returns:
2470  *   - >0, type ID of newly added BTF type;
2471  *   - <0, on error.
2472  */
2473 int btf__add_typedef(struct btf *btf, const char *name, int ref_type_id)
2474 {
2475 	if (!name || !name[0])
2476 		return libbpf_err(-EINVAL);
2477 
2478 	return btf_add_ref_kind(btf, BTF_KIND_TYPEDEF, name, ref_type_id);
2479 }
2480 
2481 /*
2482  * Append new BTF_KIND_VOLATILE type with:
2483  *   - *ref_type_id* - referenced type ID, it might not exist yet;
2484  * Returns:
2485  *   - >0, type ID of newly added BTF type;
2486  *   - <0, on error.
2487  */
2488 int btf__add_volatile(struct btf *btf, int ref_type_id)
2489 {
2490 	return btf_add_ref_kind(btf, BTF_KIND_VOLATILE, NULL, ref_type_id);
2491 }
2492 
2493 /*
2494  * Append new BTF_KIND_CONST type with:
2495  *   - *ref_type_id* - referenced type ID, it might not exist yet;
2496  * Returns:
2497  *   - >0, type ID of newly added BTF type;
2498  *   - <0, on error.
2499  */
2500 int btf__add_const(struct btf *btf, int ref_type_id)
2501 {
2502 	return btf_add_ref_kind(btf, BTF_KIND_CONST, NULL, ref_type_id);
2503 }
2504 
2505 /*
2506  * Append new BTF_KIND_RESTRICT type with:
2507  *   - *ref_type_id* - referenced type ID, it might not exist yet;
2508  * Returns:
2509  *   - >0, type ID of newly added BTF type;
2510  *   - <0, on error.
2511  */
2512 int btf__add_restrict(struct btf *btf, int ref_type_id)
2513 {
2514 	return btf_add_ref_kind(btf, BTF_KIND_RESTRICT, NULL, ref_type_id);
2515 }
2516 
2517 /*
2518  * Append new BTF_KIND_TYPE_TAG type with:
2519  *   - *value*, non-empty/non-NULL tag value;
2520  *   - *ref_type_id* - referenced type ID, it might not exist yet;
2521  * Returns:
2522  *   - >0, type ID of newly added BTF type;
2523  *   - <0, on error.
2524  */
2525 int btf__add_type_tag(struct btf *btf, const char *value, int ref_type_id)
2526 {
2527 	if (!value || !value[0])
2528 		return libbpf_err(-EINVAL);
2529 
2530 	return btf_add_ref_kind(btf, BTF_KIND_TYPE_TAG, value, ref_type_id);
2531 }
2532 
2533 /*
2534  * Append new BTF_KIND_FUNC type with:
2535  *   - *name*, non-empty/non-NULL name;
2536  *   - *proto_type_id* - FUNC_PROTO's type ID, it might not exist yet;
2537  * Returns:
2538  *   - >0, type ID of newly added BTF type;
2539  *   - <0, on error.
2540  */
2541 int btf__add_func(struct btf *btf, const char *name,
2542 		  enum btf_func_linkage linkage, int proto_type_id)
2543 {
2544 	int id;
2545 
2546 	if (!name || !name[0])
2547 		return libbpf_err(-EINVAL);
2548 	if (linkage != BTF_FUNC_STATIC && linkage != BTF_FUNC_GLOBAL &&
2549 	    linkage != BTF_FUNC_EXTERN)
2550 		return libbpf_err(-EINVAL);
2551 
2552 	id = btf_add_ref_kind(btf, BTF_KIND_FUNC, name, proto_type_id);
2553 	if (id > 0) {
2554 		struct btf_type *t = btf_type_by_id(btf, id);
2555 
2556 		t->info = btf_type_info(BTF_KIND_FUNC, linkage, 0);
2557 	}
2558 	return libbpf_err(id);
2559 }
2560 
2561 /*
2562  * Append new BTF_KIND_FUNC_PROTO with:
2563  *   - *ret_type_id* - type ID for return result of a function.
2564  *
2565  * Function prototype initially has no arguments, but they can be added by
2566  * btf__add_func_param() one by one, immediately after
2567  * btf__add_func_proto() succeeded.
2568  *
2569  * Returns:
2570  *   - >0, type ID of newly added BTF type;
2571  *   - <0, on error.
2572  */
2573 int btf__add_func_proto(struct btf *btf, int ret_type_id)
2574 {
2575 	struct btf_type *t;
2576 	int sz;
2577 
2578 	if (validate_type_id(ret_type_id))
2579 		return libbpf_err(-EINVAL);
2580 
2581 	if (btf_ensure_modifiable(btf))
2582 		return libbpf_err(-ENOMEM);
2583 
2584 	sz = sizeof(struct btf_type);
2585 	t = btf_add_type_mem(btf, sz);
2586 	if (!t)
2587 		return libbpf_err(-ENOMEM);
2588 
2589 	/* start out with vlen=0; this will be adjusted when adding enum
2590 	 * values, if necessary
2591 	 */
2592 	t->name_off = 0;
2593 	t->info = btf_type_info(BTF_KIND_FUNC_PROTO, 0, 0);
2594 	t->type = ret_type_id;
2595 
2596 	return btf_commit_type(btf, sz);
2597 }
2598 
2599 /*
2600  * Append new function parameter for current FUNC_PROTO type with:
2601  *   - *name* - parameter name, can be NULL or empty;
2602  *   - *type_id* - type ID describing the type of the parameter.
2603  * Returns:
2604  *   -  0, on success;
2605  *   - <0, on error.
2606  */
2607 int btf__add_func_param(struct btf *btf, const char *name, int type_id)
2608 {
2609 	struct btf_type *t;
2610 	struct btf_param *p;
2611 	int sz, name_off = 0;
2612 
2613 	if (validate_type_id(type_id))
2614 		return libbpf_err(-EINVAL);
2615 
2616 	/* last type should be BTF_KIND_FUNC_PROTO */
2617 	if (btf->nr_types == 0)
2618 		return libbpf_err(-EINVAL);
2619 	t = btf_last_type(btf);
2620 	if (!btf_is_func_proto(t))
2621 		return libbpf_err(-EINVAL);
2622 
2623 	/* decompose and invalidate raw data */
2624 	if (btf_ensure_modifiable(btf))
2625 		return libbpf_err(-ENOMEM);
2626 
2627 	sz = sizeof(struct btf_param);
2628 	p = btf_add_type_mem(btf, sz);
2629 	if (!p)
2630 		return libbpf_err(-ENOMEM);
2631 
2632 	if (name && name[0]) {
2633 		name_off = btf__add_str(btf, name);
2634 		if (name_off < 0)
2635 			return name_off;
2636 	}
2637 
2638 	p->name_off = name_off;
2639 	p->type = type_id;
2640 
2641 	/* update parent type's vlen */
2642 	t = btf_last_type(btf);
2643 	btf_type_inc_vlen(t);
2644 
2645 	btf->hdr->type_len += sz;
2646 	btf->hdr->str_off += sz;
2647 	return 0;
2648 }
2649 
2650 /*
2651  * Append new BTF_KIND_VAR type with:
2652  *   - *name* - non-empty/non-NULL name;
2653  *   - *linkage* - variable linkage, one of BTF_VAR_STATIC,
2654  *     BTF_VAR_GLOBAL_ALLOCATED, or BTF_VAR_GLOBAL_EXTERN;
2655  *   - *type_id* - type ID of the type describing the type of the variable.
2656  * Returns:
2657  *   - >0, type ID of newly added BTF type;
2658  *   - <0, on error.
2659  */
2660 int btf__add_var(struct btf *btf, const char *name, int linkage, int type_id)
2661 {
2662 	struct btf_type *t;
2663 	struct btf_var *v;
2664 	int sz, name_off;
2665 
2666 	/* non-empty name */
2667 	if (!name || !name[0])
2668 		return libbpf_err(-EINVAL);
2669 	if (linkage != BTF_VAR_STATIC && linkage != BTF_VAR_GLOBAL_ALLOCATED &&
2670 	    linkage != BTF_VAR_GLOBAL_EXTERN)
2671 		return libbpf_err(-EINVAL);
2672 	if (validate_type_id(type_id))
2673 		return libbpf_err(-EINVAL);
2674 
2675 	/* deconstruct BTF, if necessary, and invalidate raw_data */
2676 	if (btf_ensure_modifiable(btf))
2677 		return libbpf_err(-ENOMEM);
2678 
2679 	sz = sizeof(struct btf_type) + sizeof(struct btf_var);
2680 	t = btf_add_type_mem(btf, sz);
2681 	if (!t)
2682 		return libbpf_err(-ENOMEM);
2683 
2684 	name_off = btf__add_str(btf, name);
2685 	if (name_off < 0)
2686 		return name_off;
2687 
2688 	t->name_off = name_off;
2689 	t->info = btf_type_info(BTF_KIND_VAR, 0, 0);
2690 	t->type = type_id;
2691 
2692 	v = btf_var(t);
2693 	v->linkage = linkage;
2694 
2695 	return btf_commit_type(btf, sz);
2696 }
2697 
2698 /*
2699  * Append new BTF_KIND_DATASEC type with:
2700  *   - *name* - non-empty/non-NULL name;
2701  *   - *byte_sz* - data section size, in bytes.
2702  *
2703  * Data section is initially empty. Variables info can be added with
2704  * btf__add_datasec_var_info() calls, after btf__add_datasec() succeeds.
2705  *
2706  * Returns:
2707  *   - >0, type ID of newly added BTF type;
2708  *   - <0, on error.
2709  */
2710 int btf__add_datasec(struct btf *btf, const char *name, __u32 byte_sz)
2711 {
2712 	struct btf_type *t;
2713 	int sz, name_off;
2714 
2715 	/* non-empty name */
2716 	if (!name || !name[0])
2717 		return libbpf_err(-EINVAL);
2718 
2719 	if (btf_ensure_modifiable(btf))
2720 		return libbpf_err(-ENOMEM);
2721 
2722 	sz = sizeof(struct btf_type);
2723 	t = btf_add_type_mem(btf, sz);
2724 	if (!t)
2725 		return libbpf_err(-ENOMEM);
2726 
2727 	name_off = btf__add_str(btf, name);
2728 	if (name_off < 0)
2729 		return name_off;
2730 
2731 	/* start with vlen=0, which will be update as var_secinfos are added */
2732 	t->name_off = name_off;
2733 	t->info = btf_type_info(BTF_KIND_DATASEC, 0, 0);
2734 	t->size = byte_sz;
2735 
2736 	return btf_commit_type(btf, sz);
2737 }
2738 
2739 /*
2740  * Append new data section variable information entry for current DATASEC type:
2741  *   - *var_type_id* - type ID, describing type of the variable;
2742  *   - *offset* - variable offset within data section, in bytes;
2743  *   - *byte_sz* - variable size, in bytes.
2744  *
2745  * Returns:
2746  *   -  0, on success;
2747  *   - <0, on error.
2748  */
2749 int btf__add_datasec_var_info(struct btf *btf, int var_type_id, __u32 offset, __u32 byte_sz)
2750 {
2751 	struct btf_type *t;
2752 	struct btf_var_secinfo *v;
2753 	int sz;
2754 
2755 	/* last type should be BTF_KIND_DATASEC */
2756 	if (btf->nr_types == 0)
2757 		return libbpf_err(-EINVAL);
2758 	t = btf_last_type(btf);
2759 	if (!btf_is_datasec(t))
2760 		return libbpf_err(-EINVAL);
2761 
2762 	if (validate_type_id(var_type_id))
2763 		return libbpf_err(-EINVAL);
2764 
2765 	/* decompose and invalidate raw data */
2766 	if (btf_ensure_modifiable(btf))
2767 		return libbpf_err(-ENOMEM);
2768 
2769 	sz = sizeof(struct btf_var_secinfo);
2770 	v = btf_add_type_mem(btf, sz);
2771 	if (!v)
2772 		return libbpf_err(-ENOMEM);
2773 
2774 	v->type = var_type_id;
2775 	v->offset = offset;
2776 	v->size = byte_sz;
2777 
2778 	/* update parent type's vlen */
2779 	t = btf_last_type(btf);
2780 	btf_type_inc_vlen(t);
2781 
2782 	btf->hdr->type_len += sz;
2783 	btf->hdr->str_off += sz;
2784 	return 0;
2785 }
2786 
2787 /*
2788  * Append new BTF_KIND_DECL_TAG type with:
2789  *   - *value* - non-empty/non-NULL string;
2790  *   - *ref_type_id* - referenced type ID, it might not exist yet;
2791  *   - *component_idx* - -1 for tagging reference type, otherwise struct/union
2792  *     member or function argument index;
2793  * Returns:
2794  *   - >0, type ID of newly added BTF type;
2795  *   - <0, on error.
2796  */
2797 int btf__add_decl_tag(struct btf *btf, const char *value, int ref_type_id,
2798 		 int component_idx)
2799 {
2800 	struct btf_type *t;
2801 	int sz, value_off;
2802 
2803 	if (!value || !value[0] || component_idx < -1)
2804 		return libbpf_err(-EINVAL);
2805 
2806 	if (validate_type_id(ref_type_id))
2807 		return libbpf_err(-EINVAL);
2808 
2809 	if (btf_ensure_modifiable(btf))
2810 		return libbpf_err(-ENOMEM);
2811 
2812 	sz = sizeof(struct btf_type) + sizeof(struct btf_decl_tag);
2813 	t = btf_add_type_mem(btf, sz);
2814 	if (!t)
2815 		return libbpf_err(-ENOMEM);
2816 
2817 	value_off = btf__add_str(btf, value);
2818 	if (value_off < 0)
2819 		return value_off;
2820 
2821 	t->name_off = value_off;
2822 	t->info = btf_type_info(BTF_KIND_DECL_TAG, 0, false);
2823 	t->type = ref_type_id;
2824 	btf_decl_tag(t)->component_idx = component_idx;
2825 
2826 	return btf_commit_type(btf, sz);
2827 }
2828 
2829 struct btf_ext_sec_setup_param {
2830 	__u32 off;
2831 	__u32 len;
2832 	__u32 min_rec_size;
2833 	struct btf_ext_info *ext_info;
2834 	const char *desc;
2835 };
2836 
2837 static int btf_ext_setup_info(struct btf_ext *btf_ext,
2838 			      struct btf_ext_sec_setup_param *ext_sec)
2839 {
2840 	const struct btf_ext_info_sec *sinfo;
2841 	struct btf_ext_info *ext_info;
2842 	__u32 info_left, record_size;
2843 	size_t sec_cnt = 0;
2844 	/* The start of the info sec (including the __u32 record_size). */
2845 	void *info;
2846 
2847 	if (ext_sec->len == 0)
2848 		return 0;
2849 
2850 	if (ext_sec->off & 0x03) {
2851 		pr_debug(".BTF.ext %s section is not aligned to 4 bytes\n",
2852 		     ext_sec->desc);
2853 		return -EINVAL;
2854 	}
2855 
2856 	info = btf_ext->data + btf_ext->hdr->hdr_len + ext_sec->off;
2857 	info_left = ext_sec->len;
2858 
2859 	if (btf_ext->data + btf_ext->data_size < info + ext_sec->len) {
2860 		pr_debug("%s section (off:%u len:%u) is beyond the end of the ELF section .BTF.ext\n",
2861 			 ext_sec->desc, ext_sec->off, ext_sec->len);
2862 		return -EINVAL;
2863 	}
2864 
2865 	/* At least a record size */
2866 	if (info_left < sizeof(__u32)) {
2867 		pr_debug(".BTF.ext %s record size not found\n", ext_sec->desc);
2868 		return -EINVAL;
2869 	}
2870 
2871 	/* The record size needs to meet the minimum standard */
2872 	record_size = *(__u32 *)info;
2873 	if (record_size < ext_sec->min_rec_size ||
2874 	    record_size & 0x03) {
2875 		pr_debug("%s section in .BTF.ext has invalid record size %u\n",
2876 			 ext_sec->desc, record_size);
2877 		return -EINVAL;
2878 	}
2879 
2880 	sinfo = info + sizeof(__u32);
2881 	info_left -= sizeof(__u32);
2882 
2883 	/* If no records, return failure now so .BTF.ext won't be used. */
2884 	if (!info_left) {
2885 		pr_debug("%s section in .BTF.ext has no records", ext_sec->desc);
2886 		return -EINVAL;
2887 	}
2888 
2889 	while (info_left) {
2890 		unsigned int sec_hdrlen = sizeof(struct btf_ext_info_sec);
2891 		__u64 total_record_size;
2892 		__u32 num_records;
2893 
2894 		if (info_left < sec_hdrlen) {
2895 			pr_debug("%s section header is not found in .BTF.ext\n",
2896 			     ext_sec->desc);
2897 			return -EINVAL;
2898 		}
2899 
2900 		num_records = sinfo->num_info;
2901 		if (num_records == 0) {
2902 			pr_debug("%s section has incorrect num_records in .BTF.ext\n",
2903 			     ext_sec->desc);
2904 			return -EINVAL;
2905 		}
2906 
2907 		total_record_size = sec_hdrlen + (__u64)num_records * record_size;
2908 		if (info_left < total_record_size) {
2909 			pr_debug("%s section has incorrect num_records in .BTF.ext\n",
2910 			     ext_sec->desc);
2911 			return -EINVAL;
2912 		}
2913 
2914 		info_left -= total_record_size;
2915 		sinfo = (void *)sinfo + total_record_size;
2916 		sec_cnt++;
2917 	}
2918 
2919 	ext_info = ext_sec->ext_info;
2920 	ext_info->len = ext_sec->len - sizeof(__u32);
2921 	ext_info->rec_size = record_size;
2922 	ext_info->info = info + sizeof(__u32);
2923 	ext_info->sec_cnt = sec_cnt;
2924 
2925 	return 0;
2926 }
2927 
2928 static int btf_ext_setup_func_info(struct btf_ext *btf_ext)
2929 {
2930 	struct btf_ext_sec_setup_param param = {
2931 		.off = btf_ext->hdr->func_info_off,
2932 		.len = btf_ext->hdr->func_info_len,
2933 		.min_rec_size = sizeof(struct bpf_func_info_min),
2934 		.ext_info = &btf_ext->func_info,
2935 		.desc = "func_info"
2936 	};
2937 
2938 	return btf_ext_setup_info(btf_ext, &param);
2939 }
2940 
2941 static int btf_ext_setup_line_info(struct btf_ext *btf_ext)
2942 {
2943 	struct btf_ext_sec_setup_param param = {
2944 		.off = btf_ext->hdr->line_info_off,
2945 		.len = btf_ext->hdr->line_info_len,
2946 		.min_rec_size = sizeof(struct bpf_line_info_min),
2947 		.ext_info = &btf_ext->line_info,
2948 		.desc = "line_info",
2949 	};
2950 
2951 	return btf_ext_setup_info(btf_ext, &param);
2952 }
2953 
2954 static int btf_ext_setup_core_relos(struct btf_ext *btf_ext)
2955 {
2956 	struct btf_ext_sec_setup_param param = {
2957 		.off = btf_ext->hdr->core_relo_off,
2958 		.len = btf_ext->hdr->core_relo_len,
2959 		.min_rec_size = sizeof(struct bpf_core_relo),
2960 		.ext_info = &btf_ext->core_relo_info,
2961 		.desc = "core_relo",
2962 	};
2963 
2964 	return btf_ext_setup_info(btf_ext, &param);
2965 }
2966 
2967 static int btf_ext_parse_hdr(__u8 *data, __u32 data_size)
2968 {
2969 	const struct btf_ext_header *hdr = (struct btf_ext_header *)data;
2970 
2971 	if (data_size < offsetofend(struct btf_ext_header, hdr_len) ||
2972 	    data_size < hdr->hdr_len) {
2973 		pr_debug("BTF.ext header not found");
2974 		return -EINVAL;
2975 	}
2976 
2977 	if (hdr->magic == bswap_16(BTF_MAGIC)) {
2978 		pr_warn("BTF.ext in non-native endianness is not supported\n");
2979 		return -ENOTSUP;
2980 	} else if (hdr->magic != BTF_MAGIC) {
2981 		pr_debug("Invalid BTF.ext magic:%x\n", hdr->magic);
2982 		return -EINVAL;
2983 	}
2984 
2985 	if (hdr->version != BTF_VERSION) {
2986 		pr_debug("Unsupported BTF.ext version:%u\n", hdr->version);
2987 		return -ENOTSUP;
2988 	}
2989 
2990 	if (hdr->flags) {
2991 		pr_debug("Unsupported BTF.ext flags:%x\n", hdr->flags);
2992 		return -ENOTSUP;
2993 	}
2994 
2995 	if (data_size == hdr->hdr_len) {
2996 		pr_debug("BTF.ext has no data\n");
2997 		return -EINVAL;
2998 	}
2999 
3000 	return 0;
3001 }
3002 
3003 void btf_ext__free(struct btf_ext *btf_ext)
3004 {
3005 	if (IS_ERR_OR_NULL(btf_ext))
3006 		return;
3007 	free(btf_ext->func_info.sec_idxs);
3008 	free(btf_ext->line_info.sec_idxs);
3009 	free(btf_ext->core_relo_info.sec_idxs);
3010 	free(btf_ext->data);
3011 	free(btf_ext);
3012 }
3013 
3014 struct btf_ext *btf_ext__new(const __u8 *data, __u32 size)
3015 {
3016 	struct btf_ext *btf_ext;
3017 	int err;
3018 
3019 	btf_ext = calloc(1, sizeof(struct btf_ext));
3020 	if (!btf_ext)
3021 		return libbpf_err_ptr(-ENOMEM);
3022 
3023 	btf_ext->data_size = size;
3024 	btf_ext->data = malloc(size);
3025 	if (!btf_ext->data) {
3026 		err = -ENOMEM;
3027 		goto done;
3028 	}
3029 	memcpy(btf_ext->data, data, size);
3030 
3031 	err = btf_ext_parse_hdr(btf_ext->data, size);
3032 	if (err)
3033 		goto done;
3034 
3035 	if (btf_ext->hdr->hdr_len < offsetofend(struct btf_ext_header, line_info_len)) {
3036 		err = -EINVAL;
3037 		goto done;
3038 	}
3039 
3040 	err = btf_ext_setup_func_info(btf_ext);
3041 	if (err)
3042 		goto done;
3043 
3044 	err = btf_ext_setup_line_info(btf_ext);
3045 	if (err)
3046 		goto done;
3047 
3048 	if (btf_ext->hdr->hdr_len < offsetofend(struct btf_ext_header, core_relo_len))
3049 		goto done; /* skip core relos parsing */
3050 
3051 	err = btf_ext_setup_core_relos(btf_ext);
3052 	if (err)
3053 		goto done;
3054 
3055 done:
3056 	if (err) {
3057 		btf_ext__free(btf_ext);
3058 		return libbpf_err_ptr(err);
3059 	}
3060 
3061 	return btf_ext;
3062 }
3063 
3064 const void *btf_ext__raw_data(const struct btf_ext *btf_ext, __u32 *size)
3065 {
3066 	*size = btf_ext->data_size;
3067 	return btf_ext->data;
3068 }
3069 
3070 __attribute__((alias("btf_ext__raw_data")))
3071 const void *btf_ext__get_raw_data(const struct btf_ext *btf_ext, __u32 *size);
3072 
3073 
3074 struct btf_dedup;
3075 
3076 static struct btf_dedup *btf_dedup_new(struct btf *btf, const struct btf_dedup_opts *opts);
3077 static void btf_dedup_free(struct btf_dedup *d);
3078 static int btf_dedup_prep(struct btf_dedup *d);
3079 static int btf_dedup_strings(struct btf_dedup *d);
3080 static int btf_dedup_prim_types(struct btf_dedup *d);
3081 static int btf_dedup_struct_types(struct btf_dedup *d);
3082 static int btf_dedup_ref_types(struct btf_dedup *d);
3083 static int btf_dedup_resolve_fwds(struct btf_dedup *d);
3084 static int btf_dedup_compact_types(struct btf_dedup *d);
3085 static int btf_dedup_remap_types(struct btf_dedup *d);
3086 
3087 /*
3088  * Deduplicate BTF types and strings.
3089  *
3090  * BTF dedup algorithm takes as an input `struct btf` representing `.BTF` ELF
3091  * section with all BTF type descriptors and string data. It overwrites that
3092  * memory in-place with deduplicated types and strings without any loss of
3093  * information. If optional `struct btf_ext` representing '.BTF.ext' ELF section
3094  * is provided, all the strings referenced from .BTF.ext section are honored
3095  * and updated to point to the right offsets after deduplication.
3096  *
3097  * If function returns with error, type/string data might be garbled and should
3098  * be discarded.
3099  *
3100  * More verbose and detailed description of both problem btf_dedup is solving,
3101  * as well as solution could be found at:
3102  * https://facebookmicrosites.github.io/bpf/blog/2018/11/14/btf-enhancement.html
3103  *
3104  * Problem description and justification
3105  * =====================================
3106  *
3107  * BTF type information is typically emitted either as a result of conversion
3108  * from DWARF to BTF or directly by compiler. In both cases, each compilation
3109  * unit contains information about a subset of all the types that are used
3110  * in an application. These subsets are frequently overlapping and contain a lot
3111  * of duplicated information when later concatenated together into a single
3112  * binary. This algorithm ensures that each unique type is represented by single
3113  * BTF type descriptor, greatly reducing resulting size of BTF data.
3114  *
3115  * Compilation unit isolation and subsequent duplication of data is not the only
3116  * problem. The same type hierarchy (e.g., struct and all the type that struct
3117  * references) in different compilation units can be represented in BTF to
3118  * various degrees of completeness (or, rather, incompleteness) due to
3119  * struct/union forward declarations.
3120  *
3121  * Let's take a look at an example, that we'll use to better understand the
3122  * problem (and solution). Suppose we have two compilation units, each using
3123  * same `struct S`, but each of them having incomplete type information about
3124  * struct's fields:
3125  *
3126  * // CU #1:
3127  * struct S;
3128  * struct A {
3129  *	int a;
3130  *	struct A* self;
3131  *	struct S* parent;
3132  * };
3133  * struct B;
3134  * struct S {
3135  *	struct A* a_ptr;
3136  *	struct B* b_ptr;
3137  * };
3138  *
3139  * // CU #2:
3140  * struct S;
3141  * struct A;
3142  * struct B {
3143  *	int b;
3144  *	struct B* self;
3145  *	struct S* parent;
3146  * };
3147  * struct S {
3148  *	struct A* a_ptr;
3149  *	struct B* b_ptr;
3150  * };
3151  *
3152  * In case of CU #1, BTF data will know only that `struct B` exist (but no
3153  * more), but will know the complete type information about `struct A`. While
3154  * for CU #2, it will know full type information about `struct B`, but will
3155  * only know about forward declaration of `struct A` (in BTF terms, it will
3156  * have `BTF_KIND_FWD` type descriptor with name `B`).
3157  *
3158  * This compilation unit isolation means that it's possible that there is no
3159  * single CU with complete type information describing structs `S`, `A`, and
3160  * `B`. Also, we might get tons of duplicated and redundant type information.
3161  *
3162  * Additional complication we need to keep in mind comes from the fact that
3163  * types, in general, can form graphs containing cycles, not just DAGs.
3164  *
3165  * While algorithm does deduplication, it also merges and resolves type
3166  * information (unless disabled throught `struct btf_opts`), whenever possible.
3167  * E.g., in the example above with two compilation units having partial type
3168  * information for structs `A` and `B`, the output of algorithm will emit
3169  * a single copy of each BTF type that describes structs `A`, `B`, and `S`
3170  * (as well as type information for `int` and pointers), as if they were defined
3171  * in a single compilation unit as:
3172  *
3173  * struct A {
3174  *	int a;
3175  *	struct A* self;
3176  *	struct S* parent;
3177  * };
3178  * struct B {
3179  *	int b;
3180  *	struct B* self;
3181  *	struct S* parent;
3182  * };
3183  * struct S {
3184  *	struct A* a_ptr;
3185  *	struct B* b_ptr;
3186  * };
3187  *
3188  * Algorithm summary
3189  * =================
3190  *
3191  * Algorithm completes its work in 7 separate passes:
3192  *
3193  * 1. Strings deduplication.
3194  * 2. Primitive types deduplication (int, enum, fwd).
3195  * 3. Struct/union types deduplication.
3196  * 4. Resolve unambiguous forward declarations.
3197  * 5. Reference types deduplication (pointers, typedefs, arrays, funcs, func
3198  *    protos, and const/volatile/restrict modifiers).
3199  * 6. Types compaction.
3200  * 7. Types remapping.
3201  *
3202  * Algorithm determines canonical type descriptor, which is a single
3203  * representative type for each truly unique type. This canonical type is the
3204  * one that will go into final deduplicated BTF type information. For
3205  * struct/unions, it is also the type that algorithm will merge additional type
3206  * information into (while resolving FWDs), as it discovers it from data in
3207  * other CUs. Each input BTF type eventually gets either mapped to itself, if
3208  * that type is canonical, or to some other type, if that type is equivalent
3209  * and was chosen as canonical representative. This mapping is stored in
3210  * `btf_dedup->map` array. This map is also used to record STRUCT/UNION that
3211  * FWD type got resolved to.
3212  *
3213  * To facilitate fast discovery of canonical types, we also maintain canonical
3214  * index (`btf_dedup->dedup_table`), which maps type descriptor's signature hash
3215  * (i.e., hashed kind, name, size, fields, etc) into a list of canonical types
3216  * that match that signature. With sufficiently good choice of type signature
3217  * hashing function, we can limit number of canonical types for each unique type
3218  * signature to a very small number, allowing to find canonical type for any
3219  * duplicated type very quickly.
3220  *
3221  * Struct/union deduplication is the most critical part and algorithm for
3222  * deduplicating structs/unions is described in greater details in comments for
3223  * `btf_dedup_is_equiv` function.
3224  */
3225 int btf__dedup(struct btf *btf, const struct btf_dedup_opts *opts)
3226 {
3227 	struct btf_dedup *d;
3228 	int err;
3229 
3230 	if (!OPTS_VALID(opts, btf_dedup_opts))
3231 		return libbpf_err(-EINVAL);
3232 
3233 	d = btf_dedup_new(btf, opts);
3234 	if (IS_ERR(d)) {
3235 		pr_debug("btf_dedup_new failed: %ld", PTR_ERR(d));
3236 		return libbpf_err(-EINVAL);
3237 	}
3238 
3239 	if (btf_ensure_modifiable(btf)) {
3240 		err = -ENOMEM;
3241 		goto done;
3242 	}
3243 
3244 	err = btf_dedup_prep(d);
3245 	if (err) {
3246 		pr_debug("btf_dedup_prep failed:%d\n", err);
3247 		goto done;
3248 	}
3249 	err = btf_dedup_strings(d);
3250 	if (err < 0) {
3251 		pr_debug("btf_dedup_strings failed:%d\n", err);
3252 		goto done;
3253 	}
3254 	err = btf_dedup_prim_types(d);
3255 	if (err < 0) {
3256 		pr_debug("btf_dedup_prim_types failed:%d\n", err);
3257 		goto done;
3258 	}
3259 	err = btf_dedup_struct_types(d);
3260 	if (err < 0) {
3261 		pr_debug("btf_dedup_struct_types failed:%d\n", err);
3262 		goto done;
3263 	}
3264 	err = btf_dedup_resolve_fwds(d);
3265 	if (err < 0) {
3266 		pr_debug("btf_dedup_resolve_fwds failed:%d\n", err);
3267 		goto done;
3268 	}
3269 	err = btf_dedup_ref_types(d);
3270 	if (err < 0) {
3271 		pr_debug("btf_dedup_ref_types failed:%d\n", err);
3272 		goto done;
3273 	}
3274 	err = btf_dedup_compact_types(d);
3275 	if (err < 0) {
3276 		pr_debug("btf_dedup_compact_types failed:%d\n", err);
3277 		goto done;
3278 	}
3279 	err = btf_dedup_remap_types(d);
3280 	if (err < 0) {
3281 		pr_debug("btf_dedup_remap_types failed:%d\n", err);
3282 		goto done;
3283 	}
3284 
3285 done:
3286 	btf_dedup_free(d);
3287 	return libbpf_err(err);
3288 }
3289 
3290 #define BTF_UNPROCESSED_ID ((__u32)-1)
3291 #define BTF_IN_PROGRESS_ID ((__u32)-2)
3292 
3293 struct btf_dedup {
3294 	/* .BTF section to be deduped in-place */
3295 	struct btf *btf;
3296 	/*
3297 	 * Optional .BTF.ext section. When provided, any strings referenced
3298 	 * from it will be taken into account when deduping strings
3299 	 */
3300 	struct btf_ext *btf_ext;
3301 	/*
3302 	 * This is a map from any type's signature hash to a list of possible
3303 	 * canonical representative type candidates. Hash collisions are
3304 	 * ignored, so even types of various kinds can share same list of
3305 	 * candidates, which is fine because we rely on subsequent
3306 	 * btf_xxx_equal() checks to authoritatively verify type equality.
3307 	 */
3308 	struct hashmap *dedup_table;
3309 	/* Canonical types map */
3310 	__u32 *map;
3311 	/* Hypothetical mapping, used during type graph equivalence checks */
3312 	__u32 *hypot_map;
3313 	__u32 *hypot_list;
3314 	size_t hypot_cnt;
3315 	size_t hypot_cap;
3316 	/* Whether hypothetical mapping, if successful, would need to adjust
3317 	 * already canonicalized types (due to a new forward declaration to
3318 	 * concrete type resolution). In such case, during split BTF dedup
3319 	 * candidate type would still be considered as different, because base
3320 	 * BTF is considered to be immutable.
3321 	 */
3322 	bool hypot_adjust_canon;
3323 	/* Various option modifying behavior of algorithm */
3324 	struct btf_dedup_opts opts;
3325 	/* temporary strings deduplication state */
3326 	struct strset *strs_set;
3327 };
3328 
3329 static long hash_combine(long h, long value)
3330 {
3331 	return h * 31 + value;
3332 }
3333 
3334 #define for_each_dedup_cand(d, node, hash) \
3335 	hashmap__for_each_key_entry(d->dedup_table, node, hash)
3336 
3337 static int btf_dedup_table_add(struct btf_dedup *d, long hash, __u32 type_id)
3338 {
3339 	return hashmap__append(d->dedup_table, hash, type_id);
3340 }
3341 
3342 static int btf_dedup_hypot_map_add(struct btf_dedup *d,
3343 				   __u32 from_id, __u32 to_id)
3344 {
3345 	if (d->hypot_cnt == d->hypot_cap) {
3346 		__u32 *new_list;
3347 
3348 		d->hypot_cap += max((size_t)16, d->hypot_cap / 2);
3349 		new_list = libbpf_reallocarray(d->hypot_list, d->hypot_cap, sizeof(__u32));
3350 		if (!new_list)
3351 			return -ENOMEM;
3352 		d->hypot_list = new_list;
3353 	}
3354 	d->hypot_list[d->hypot_cnt++] = from_id;
3355 	d->hypot_map[from_id] = to_id;
3356 	return 0;
3357 }
3358 
3359 static void btf_dedup_clear_hypot_map(struct btf_dedup *d)
3360 {
3361 	int i;
3362 
3363 	for (i = 0; i < d->hypot_cnt; i++)
3364 		d->hypot_map[d->hypot_list[i]] = BTF_UNPROCESSED_ID;
3365 	d->hypot_cnt = 0;
3366 	d->hypot_adjust_canon = false;
3367 }
3368 
3369 static void btf_dedup_free(struct btf_dedup *d)
3370 {
3371 	hashmap__free(d->dedup_table);
3372 	d->dedup_table = NULL;
3373 
3374 	free(d->map);
3375 	d->map = NULL;
3376 
3377 	free(d->hypot_map);
3378 	d->hypot_map = NULL;
3379 
3380 	free(d->hypot_list);
3381 	d->hypot_list = NULL;
3382 
3383 	free(d);
3384 }
3385 
3386 static size_t btf_dedup_identity_hash_fn(long key, void *ctx)
3387 {
3388 	return key;
3389 }
3390 
3391 static size_t btf_dedup_collision_hash_fn(long key, void *ctx)
3392 {
3393 	return 0;
3394 }
3395 
3396 static bool btf_dedup_equal_fn(long k1, long k2, void *ctx)
3397 {
3398 	return k1 == k2;
3399 }
3400 
3401 static struct btf_dedup *btf_dedup_new(struct btf *btf, const struct btf_dedup_opts *opts)
3402 {
3403 	struct btf_dedup *d = calloc(1, sizeof(struct btf_dedup));
3404 	hashmap_hash_fn hash_fn = btf_dedup_identity_hash_fn;
3405 	int i, err = 0, type_cnt;
3406 
3407 	if (!d)
3408 		return ERR_PTR(-ENOMEM);
3409 
3410 	if (OPTS_GET(opts, force_collisions, false))
3411 		hash_fn = btf_dedup_collision_hash_fn;
3412 
3413 	d->btf = btf;
3414 	d->btf_ext = OPTS_GET(opts, btf_ext, NULL);
3415 
3416 	d->dedup_table = hashmap__new(hash_fn, btf_dedup_equal_fn, NULL);
3417 	if (IS_ERR(d->dedup_table)) {
3418 		err = PTR_ERR(d->dedup_table);
3419 		d->dedup_table = NULL;
3420 		goto done;
3421 	}
3422 
3423 	type_cnt = btf__type_cnt(btf);
3424 	d->map = malloc(sizeof(__u32) * type_cnt);
3425 	if (!d->map) {
3426 		err = -ENOMEM;
3427 		goto done;
3428 	}
3429 	/* special BTF "void" type is made canonical immediately */
3430 	d->map[0] = 0;
3431 	for (i = 1; i < type_cnt; i++) {
3432 		struct btf_type *t = btf_type_by_id(d->btf, i);
3433 
3434 		/* VAR and DATASEC are never deduped and are self-canonical */
3435 		if (btf_is_var(t) || btf_is_datasec(t))
3436 			d->map[i] = i;
3437 		else
3438 			d->map[i] = BTF_UNPROCESSED_ID;
3439 	}
3440 
3441 	d->hypot_map = malloc(sizeof(__u32) * type_cnt);
3442 	if (!d->hypot_map) {
3443 		err = -ENOMEM;
3444 		goto done;
3445 	}
3446 	for (i = 0; i < type_cnt; i++)
3447 		d->hypot_map[i] = BTF_UNPROCESSED_ID;
3448 
3449 done:
3450 	if (err) {
3451 		btf_dedup_free(d);
3452 		return ERR_PTR(err);
3453 	}
3454 
3455 	return d;
3456 }
3457 
3458 /*
3459  * Iterate over all possible places in .BTF and .BTF.ext that can reference
3460  * string and pass pointer to it to a provided callback `fn`.
3461  */
3462 static int btf_for_each_str_off(struct btf_dedup *d, str_off_visit_fn fn, void *ctx)
3463 {
3464 	int i, r;
3465 
3466 	for (i = 0; i < d->btf->nr_types; i++) {
3467 		struct btf_field_iter it;
3468 		struct btf_type *t = btf_type_by_id(d->btf, d->btf->start_id + i);
3469 		__u32 *str_off;
3470 
3471 		r = btf_field_iter_init(&it, t, BTF_FIELD_ITER_STRS);
3472 		if (r)
3473 			return r;
3474 
3475 		while ((str_off = btf_field_iter_next(&it))) {
3476 			r = fn(str_off, ctx);
3477 			if (r)
3478 				return r;
3479 		}
3480 	}
3481 
3482 	if (!d->btf_ext)
3483 		return 0;
3484 
3485 	r = btf_ext_visit_str_offs(d->btf_ext, fn, ctx);
3486 	if (r)
3487 		return r;
3488 
3489 	return 0;
3490 }
3491 
3492 static int strs_dedup_remap_str_off(__u32 *str_off_ptr, void *ctx)
3493 {
3494 	struct btf_dedup *d = ctx;
3495 	__u32 str_off = *str_off_ptr;
3496 	const char *s;
3497 	int off, err;
3498 
3499 	/* don't touch empty string or string in main BTF */
3500 	if (str_off == 0 || str_off < d->btf->start_str_off)
3501 		return 0;
3502 
3503 	s = btf__str_by_offset(d->btf, str_off);
3504 	if (d->btf->base_btf) {
3505 		err = btf__find_str(d->btf->base_btf, s);
3506 		if (err >= 0) {
3507 			*str_off_ptr = err;
3508 			return 0;
3509 		}
3510 		if (err != -ENOENT)
3511 			return err;
3512 	}
3513 
3514 	off = strset__add_str(d->strs_set, s);
3515 	if (off < 0)
3516 		return off;
3517 
3518 	*str_off_ptr = d->btf->start_str_off + off;
3519 	return 0;
3520 }
3521 
3522 /*
3523  * Dedup string and filter out those that are not referenced from either .BTF
3524  * or .BTF.ext (if provided) sections.
3525  *
3526  * This is done by building index of all strings in BTF's string section,
3527  * then iterating over all entities that can reference strings (e.g., type
3528  * names, struct field names, .BTF.ext line info, etc) and marking corresponding
3529  * strings as used. After that all used strings are deduped and compacted into
3530  * sequential blob of memory and new offsets are calculated. Then all the string
3531  * references are iterated again and rewritten using new offsets.
3532  */
3533 static int btf_dedup_strings(struct btf_dedup *d)
3534 {
3535 	int err;
3536 
3537 	if (d->btf->strs_deduped)
3538 		return 0;
3539 
3540 	d->strs_set = strset__new(BTF_MAX_STR_OFFSET, NULL, 0);
3541 	if (IS_ERR(d->strs_set)) {
3542 		err = PTR_ERR(d->strs_set);
3543 		goto err_out;
3544 	}
3545 
3546 	if (!d->btf->base_btf) {
3547 		/* insert empty string; we won't be looking it up during strings
3548 		 * dedup, but it's good to have it for generic BTF string lookups
3549 		 */
3550 		err = strset__add_str(d->strs_set, "");
3551 		if (err < 0)
3552 			goto err_out;
3553 	}
3554 
3555 	/* remap string offsets */
3556 	err = btf_for_each_str_off(d, strs_dedup_remap_str_off, d);
3557 	if (err)
3558 		goto err_out;
3559 
3560 	/* replace BTF string data and hash with deduped ones */
3561 	strset__free(d->btf->strs_set);
3562 	d->btf->hdr->str_len = strset__data_size(d->strs_set);
3563 	d->btf->strs_set = d->strs_set;
3564 	d->strs_set = NULL;
3565 	d->btf->strs_deduped = true;
3566 	return 0;
3567 
3568 err_out:
3569 	strset__free(d->strs_set);
3570 	d->strs_set = NULL;
3571 
3572 	return err;
3573 }
3574 
3575 static long btf_hash_common(struct btf_type *t)
3576 {
3577 	long h;
3578 
3579 	h = hash_combine(0, t->name_off);
3580 	h = hash_combine(h, t->info);
3581 	h = hash_combine(h, t->size);
3582 	return h;
3583 }
3584 
3585 static bool btf_equal_common(struct btf_type *t1, struct btf_type *t2)
3586 {
3587 	return t1->name_off == t2->name_off &&
3588 	       t1->info == t2->info &&
3589 	       t1->size == t2->size;
3590 }
3591 
3592 /* Calculate type signature hash of INT or TAG. */
3593 static long btf_hash_int_decl_tag(struct btf_type *t)
3594 {
3595 	__u32 info = *(__u32 *)(t + 1);
3596 	long h;
3597 
3598 	h = btf_hash_common(t);
3599 	h = hash_combine(h, info);
3600 	return h;
3601 }
3602 
3603 /* Check structural equality of two INTs or TAGs. */
3604 static bool btf_equal_int_tag(struct btf_type *t1, struct btf_type *t2)
3605 {
3606 	__u32 info1, info2;
3607 
3608 	if (!btf_equal_common(t1, t2))
3609 		return false;
3610 	info1 = *(__u32 *)(t1 + 1);
3611 	info2 = *(__u32 *)(t2 + 1);
3612 	return info1 == info2;
3613 }
3614 
3615 /* Calculate type signature hash of ENUM/ENUM64. */
3616 static long btf_hash_enum(struct btf_type *t)
3617 {
3618 	long h;
3619 
3620 	/* don't hash vlen, enum members and size to support enum fwd resolving */
3621 	h = hash_combine(0, t->name_off);
3622 	return h;
3623 }
3624 
3625 static bool btf_equal_enum_members(struct btf_type *t1, struct btf_type *t2)
3626 {
3627 	const struct btf_enum *m1, *m2;
3628 	__u16 vlen;
3629 	int i;
3630 
3631 	vlen = btf_vlen(t1);
3632 	m1 = btf_enum(t1);
3633 	m2 = btf_enum(t2);
3634 	for (i = 0; i < vlen; i++) {
3635 		if (m1->name_off != m2->name_off || m1->val != m2->val)
3636 			return false;
3637 		m1++;
3638 		m2++;
3639 	}
3640 	return true;
3641 }
3642 
3643 static bool btf_equal_enum64_members(struct btf_type *t1, struct btf_type *t2)
3644 {
3645 	const struct btf_enum64 *m1, *m2;
3646 	__u16 vlen;
3647 	int i;
3648 
3649 	vlen = btf_vlen(t1);
3650 	m1 = btf_enum64(t1);
3651 	m2 = btf_enum64(t2);
3652 	for (i = 0; i < vlen; i++) {
3653 		if (m1->name_off != m2->name_off || m1->val_lo32 != m2->val_lo32 ||
3654 		    m1->val_hi32 != m2->val_hi32)
3655 			return false;
3656 		m1++;
3657 		m2++;
3658 	}
3659 	return true;
3660 }
3661 
3662 /* Check structural equality of two ENUMs or ENUM64s. */
3663 static bool btf_equal_enum(struct btf_type *t1, struct btf_type *t2)
3664 {
3665 	if (!btf_equal_common(t1, t2))
3666 		return false;
3667 
3668 	/* t1 & t2 kinds are identical because of btf_equal_common */
3669 	if (btf_kind(t1) == BTF_KIND_ENUM)
3670 		return btf_equal_enum_members(t1, t2);
3671 	else
3672 		return btf_equal_enum64_members(t1, t2);
3673 }
3674 
3675 static inline bool btf_is_enum_fwd(struct btf_type *t)
3676 {
3677 	return btf_is_any_enum(t) && btf_vlen(t) == 0;
3678 }
3679 
3680 static bool btf_compat_enum(struct btf_type *t1, struct btf_type *t2)
3681 {
3682 	if (!btf_is_enum_fwd(t1) && !btf_is_enum_fwd(t2))
3683 		return btf_equal_enum(t1, t2);
3684 	/* At this point either t1 or t2 or both are forward declarations, thus:
3685 	 * - skip comparing vlen because it is zero for forward declarations;
3686 	 * - skip comparing size to allow enum forward declarations
3687 	 *   to be compatible with enum64 full declarations;
3688 	 * - skip comparing kind for the same reason.
3689 	 */
3690 	return t1->name_off == t2->name_off &&
3691 	       btf_is_any_enum(t1) && btf_is_any_enum(t2);
3692 }
3693 
3694 /*
3695  * Calculate type signature hash of STRUCT/UNION, ignoring referenced type IDs,
3696  * as referenced type IDs equivalence is established separately during type
3697  * graph equivalence check algorithm.
3698  */
3699 static long btf_hash_struct(struct btf_type *t)
3700 {
3701 	const struct btf_member *member = btf_members(t);
3702 	__u32 vlen = btf_vlen(t);
3703 	long h = btf_hash_common(t);
3704 	int i;
3705 
3706 	for (i = 0; i < vlen; i++) {
3707 		h = hash_combine(h, member->name_off);
3708 		h = hash_combine(h, member->offset);
3709 		/* no hashing of referenced type ID, it can be unresolved yet */
3710 		member++;
3711 	}
3712 	return h;
3713 }
3714 
3715 /*
3716  * Check structural compatibility of two STRUCTs/UNIONs, ignoring referenced
3717  * type IDs. This check is performed during type graph equivalence check and
3718  * referenced types equivalence is checked separately.
3719  */
3720 static bool btf_shallow_equal_struct(struct btf_type *t1, struct btf_type *t2)
3721 {
3722 	const struct btf_member *m1, *m2;
3723 	__u16 vlen;
3724 	int i;
3725 
3726 	if (!btf_equal_common(t1, t2))
3727 		return false;
3728 
3729 	vlen = btf_vlen(t1);
3730 	m1 = btf_members(t1);
3731 	m2 = btf_members(t2);
3732 	for (i = 0; i < vlen; i++) {
3733 		if (m1->name_off != m2->name_off || m1->offset != m2->offset)
3734 			return false;
3735 		m1++;
3736 		m2++;
3737 	}
3738 	return true;
3739 }
3740 
3741 /*
3742  * Calculate type signature hash of ARRAY, including referenced type IDs,
3743  * under assumption that they were already resolved to canonical type IDs and
3744  * are not going to change.
3745  */
3746 static long btf_hash_array(struct btf_type *t)
3747 {
3748 	const struct btf_array *info = btf_array(t);
3749 	long h = btf_hash_common(t);
3750 
3751 	h = hash_combine(h, info->type);
3752 	h = hash_combine(h, info->index_type);
3753 	h = hash_combine(h, info->nelems);
3754 	return h;
3755 }
3756 
3757 /*
3758  * Check exact equality of two ARRAYs, taking into account referenced
3759  * type IDs, under assumption that they were already resolved to canonical
3760  * type IDs and are not going to change.
3761  * This function is called during reference types deduplication to compare
3762  * ARRAY to potential canonical representative.
3763  */
3764 static bool btf_equal_array(struct btf_type *t1, struct btf_type *t2)
3765 {
3766 	const struct btf_array *info1, *info2;
3767 
3768 	if (!btf_equal_common(t1, t2))
3769 		return false;
3770 
3771 	info1 = btf_array(t1);
3772 	info2 = btf_array(t2);
3773 	return info1->type == info2->type &&
3774 	       info1->index_type == info2->index_type &&
3775 	       info1->nelems == info2->nelems;
3776 }
3777 
3778 /*
3779  * Check structural compatibility of two ARRAYs, ignoring referenced type
3780  * IDs. This check is performed during type graph equivalence check and
3781  * referenced types equivalence is checked separately.
3782  */
3783 static bool btf_compat_array(struct btf_type *t1, struct btf_type *t2)
3784 {
3785 	if (!btf_equal_common(t1, t2))
3786 		return false;
3787 
3788 	return btf_array(t1)->nelems == btf_array(t2)->nelems;
3789 }
3790 
3791 /*
3792  * Calculate type signature hash of FUNC_PROTO, including referenced type IDs,
3793  * under assumption that they were already resolved to canonical type IDs and
3794  * are not going to change.
3795  */
3796 static long btf_hash_fnproto(struct btf_type *t)
3797 {
3798 	const struct btf_param *member = btf_params(t);
3799 	__u16 vlen = btf_vlen(t);
3800 	long h = btf_hash_common(t);
3801 	int i;
3802 
3803 	for (i = 0; i < vlen; i++) {
3804 		h = hash_combine(h, member->name_off);
3805 		h = hash_combine(h, member->type);
3806 		member++;
3807 	}
3808 	return h;
3809 }
3810 
3811 /*
3812  * Check exact equality of two FUNC_PROTOs, taking into account referenced
3813  * type IDs, under assumption that they were already resolved to canonical
3814  * type IDs and are not going to change.
3815  * This function is called during reference types deduplication to compare
3816  * FUNC_PROTO to potential canonical representative.
3817  */
3818 static bool btf_equal_fnproto(struct btf_type *t1, struct btf_type *t2)
3819 {
3820 	const struct btf_param *m1, *m2;
3821 	__u16 vlen;
3822 	int i;
3823 
3824 	if (!btf_equal_common(t1, t2))
3825 		return false;
3826 
3827 	vlen = btf_vlen(t1);
3828 	m1 = btf_params(t1);
3829 	m2 = btf_params(t2);
3830 	for (i = 0; i < vlen; i++) {
3831 		if (m1->name_off != m2->name_off || m1->type != m2->type)
3832 			return false;
3833 		m1++;
3834 		m2++;
3835 	}
3836 	return true;
3837 }
3838 
3839 /*
3840  * Check structural compatibility of two FUNC_PROTOs, ignoring referenced type
3841  * IDs. This check is performed during type graph equivalence check and
3842  * referenced types equivalence is checked separately.
3843  */
3844 static bool btf_compat_fnproto(struct btf_type *t1, struct btf_type *t2)
3845 {
3846 	const struct btf_param *m1, *m2;
3847 	__u16 vlen;
3848 	int i;
3849 
3850 	/* skip return type ID */
3851 	if (t1->name_off != t2->name_off || t1->info != t2->info)
3852 		return false;
3853 
3854 	vlen = btf_vlen(t1);
3855 	m1 = btf_params(t1);
3856 	m2 = btf_params(t2);
3857 	for (i = 0; i < vlen; i++) {
3858 		if (m1->name_off != m2->name_off)
3859 			return false;
3860 		m1++;
3861 		m2++;
3862 	}
3863 	return true;
3864 }
3865 
3866 /* Prepare split BTF for deduplication by calculating hashes of base BTF's
3867  * types and initializing the rest of the state (canonical type mapping) for
3868  * the fixed base BTF part.
3869  */
3870 static int btf_dedup_prep(struct btf_dedup *d)
3871 {
3872 	struct btf_type *t;
3873 	int type_id;
3874 	long h;
3875 
3876 	if (!d->btf->base_btf)
3877 		return 0;
3878 
3879 	for (type_id = 1; type_id < d->btf->start_id; type_id++) {
3880 		t = btf_type_by_id(d->btf, type_id);
3881 
3882 		/* all base BTF types are self-canonical by definition */
3883 		d->map[type_id] = type_id;
3884 
3885 		switch (btf_kind(t)) {
3886 		case BTF_KIND_VAR:
3887 		case BTF_KIND_DATASEC:
3888 			/* VAR and DATASEC are never hash/deduplicated */
3889 			continue;
3890 		case BTF_KIND_CONST:
3891 		case BTF_KIND_VOLATILE:
3892 		case BTF_KIND_RESTRICT:
3893 		case BTF_KIND_PTR:
3894 		case BTF_KIND_FWD:
3895 		case BTF_KIND_TYPEDEF:
3896 		case BTF_KIND_FUNC:
3897 		case BTF_KIND_FLOAT:
3898 		case BTF_KIND_TYPE_TAG:
3899 			h = btf_hash_common(t);
3900 			break;
3901 		case BTF_KIND_INT:
3902 		case BTF_KIND_DECL_TAG:
3903 			h = btf_hash_int_decl_tag(t);
3904 			break;
3905 		case BTF_KIND_ENUM:
3906 		case BTF_KIND_ENUM64:
3907 			h = btf_hash_enum(t);
3908 			break;
3909 		case BTF_KIND_STRUCT:
3910 		case BTF_KIND_UNION:
3911 			h = btf_hash_struct(t);
3912 			break;
3913 		case BTF_KIND_ARRAY:
3914 			h = btf_hash_array(t);
3915 			break;
3916 		case BTF_KIND_FUNC_PROTO:
3917 			h = btf_hash_fnproto(t);
3918 			break;
3919 		default:
3920 			pr_debug("unknown kind %d for type [%d]\n", btf_kind(t), type_id);
3921 			return -EINVAL;
3922 		}
3923 		if (btf_dedup_table_add(d, h, type_id))
3924 			return -ENOMEM;
3925 	}
3926 
3927 	return 0;
3928 }
3929 
3930 /*
3931  * Deduplicate primitive types, that can't reference other types, by calculating
3932  * their type signature hash and comparing them with any possible canonical
3933  * candidate. If no canonical candidate matches, type itself is marked as
3934  * canonical and is added into `btf_dedup->dedup_table` as another candidate.
3935  */
3936 static int btf_dedup_prim_type(struct btf_dedup *d, __u32 type_id)
3937 {
3938 	struct btf_type *t = btf_type_by_id(d->btf, type_id);
3939 	struct hashmap_entry *hash_entry;
3940 	struct btf_type *cand;
3941 	/* if we don't find equivalent type, then we are canonical */
3942 	__u32 new_id = type_id;
3943 	__u32 cand_id;
3944 	long h;
3945 
3946 	switch (btf_kind(t)) {
3947 	case BTF_KIND_CONST:
3948 	case BTF_KIND_VOLATILE:
3949 	case BTF_KIND_RESTRICT:
3950 	case BTF_KIND_PTR:
3951 	case BTF_KIND_TYPEDEF:
3952 	case BTF_KIND_ARRAY:
3953 	case BTF_KIND_STRUCT:
3954 	case BTF_KIND_UNION:
3955 	case BTF_KIND_FUNC:
3956 	case BTF_KIND_FUNC_PROTO:
3957 	case BTF_KIND_VAR:
3958 	case BTF_KIND_DATASEC:
3959 	case BTF_KIND_DECL_TAG:
3960 	case BTF_KIND_TYPE_TAG:
3961 		return 0;
3962 
3963 	case BTF_KIND_INT:
3964 		h = btf_hash_int_decl_tag(t);
3965 		for_each_dedup_cand(d, hash_entry, h) {
3966 			cand_id = hash_entry->value;
3967 			cand = btf_type_by_id(d->btf, cand_id);
3968 			if (btf_equal_int_tag(t, cand)) {
3969 				new_id = cand_id;
3970 				break;
3971 			}
3972 		}
3973 		break;
3974 
3975 	case BTF_KIND_ENUM:
3976 	case BTF_KIND_ENUM64:
3977 		h = btf_hash_enum(t);
3978 		for_each_dedup_cand(d, hash_entry, h) {
3979 			cand_id = hash_entry->value;
3980 			cand = btf_type_by_id(d->btf, cand_id);
3981 			if (btf_equal_enum(t, cand)) {
3982 				new_id = cand_id;
3983 				break;
3984 			}
3985 			if (btf_compat_enum(t, cand)) {
3986 				if (btf_is_enum_fwd(t)) {
3987 					/* resolve fwd to full enum */
3988 					new_id = cand_id;
3989 					break;
3990 				}
3991 				/* resolve canonical enum fwd to full enum */
3992 				d->map[cand_id] = type_id;
3993 			}
3994 		}
3995 		break;
3996 
3997 	case BTF_KIND_FWD:
3998 	case BTF_KIND_FLOAT:
3999 		h = btf_hash_common(t);
4000 		for_each_dedup_cand(d, hash_entry, h) {
4001 			cand_id = hash_entry->value;
4002 			cand = btf_type_by_id(d->btf, cand_id);
4003 			if (btf_equal_common(t, cand)) {
4004 				new_id = cand_id;
4005 				break;
4006 			}
4007 		}
4008 		break;
4009 
4010 	default:
4011 		return -EINVAL;
4012 	}
4013 
4014 	d->map[type_id] = new_id;
4015 	if (type_id == new_id && btf_dedup_table_add(d, h, type_id))
4016 		return -ENOMEM;
4017 
4018 	return 0;
4019 }
4020 
4021 static int btf_dedup_prim_types(struct btf_dedup *d)
4022 {
4023 	int i, err;
4024 
4025 	for (i = 0; i < d->btf->nr_types; i++) {
4026 		err = btf_dedup_prim_type(d, d->btf->start_id + i);
4027 		if (err)
4028 			return err;
4029 	}
4030 	return 0;
4031 }
4032 
4033 /*
4034  * Check whether type is already mapped into canonical one (could be to itself).
4035  */
4036 static inline bool is_type_mapped(struct btf_dedup *d, uint32_t type_id)
4037 {
4038 	return d->map[type_id] <= BTF_MAX_NR_TYPES;
4039 }
4040 
4041 /*
4042  * Resolve type ID into its canonical type ID, if any; otherwise return original
4043  * type ID. If type is FWD and is resolved into STRUCT/UNION already, follow
4044  * STRUCT/UNION link and resolve it into canonical type ID as well.
4045  */
4046 static inline __u32 resolve_type_id(struct btf_dedup *d, __u32 type_id)
4047 {
4048 	while (is_type_mapped(d, type_id) && d->map[type_id] != type_id)
4049 		type_id = d->map[type_id];
4050 	return type_id;
4051 }
4052 
4053 /*
4054  * Resolve FWD to underlying STRUCT/UNION, if any; otherwise return original
4055  * type ID.
4056  */
4057 static uint32_t resolve_fwd_id(struct btf_dedup *d, uint32_t type_id)
4058 {
4059 	__u32 orig_type_id = type_id;
4060 
4061 	if (!btf_is_fwd(btf__type_by_id(d->btf, type_id)))
4062 		return type_id;
4063 
4064 	while (is_type_mapped(d, type_id) && d->map[type_id] != type_id)
4065 		type_id = d->map[type_id];
4066 
4067 	if (!btf_is_fwd(btf__type_by_id(d->btf, type_id)))
4068 		return type_id;
4069 
4070 	return orig_type_id;
4071 }
4072 
4073 
4074 static inline __u16 btf_fwd_kind(struct btf_type *t)
4075 {
4076 	return btf_kflag(t) ? BTF_KIND_UNION : BTF_KIND_STRUCT;
4077 }
4078 
4079 /* Check if given two types are identical ARRAY definitions */
4080 static bool btf_dedup_identical_arrays(struct btf_dedup *d, __u32 id1, __u32 id2)
4081 {
4082 	struct btf_type *t1, *t2;
4083 
4084 	t1 = btf_type_by_id(d->btf, id1);
4085 	t2 = btf_type_by_id(d->btf, id2);
4086 	if (!btf_is_array(t1) || !btf_is_array(t2))
4087 		return false;
4088 
4089 	return btf_equal_array(t1, t2);
4090 }
4091 
4092 /* Check if given two types are identical STRUCT/UNION definitions */
4093 static bool btf_dedup_identical_structs(struct btf_dedup *d, __u32 id1, __u32 id2)
4094 {
4095 	const struct btf_member *m1, *m2;
4096 	struct btf_type *t1, *t2;
4097 	int n, i;
4098 
4099 	t1 = btf_type_by_id(d->btf, id1);
4100 	t2 = btf_type_by_id(d->btf, id2);
4101 
4102 	if (!btf_is_composite(t1) || btf_kind(t1) != btf_kind(t2))
4103 		return false;
4104 
4105 	if (!btf_shallow_equal_struct(t1, t2))
4106 		return false;
4107 
4108 	m1 = btf_members(t1);
4109 	m2 = btf_members(t2);
4110 	for (i = 0, n = btf_vlen(t1); i < n; i++, m1++, m2++) {
4111 		if (m1->type != m2->type &&
4112 		    !btf_dedup_identical_arrays(d, m1->type, m2->type) &&
4113 		    !btf_dedup_identical_structs(d, m1->type, m2->type))
4114 			return false;
4115 	}
4116 	return true;
4117 }
4118 
4119 /*
4120  * Check equivalence of BTF type graph formed by candidate struct/union (we'll
4121  * call it "candidate graph" in this description for brevity) to a type graph
4122  * formed by (potential) canonical struct/union ("canonical graph" for brevity
4123  * here, though keep in mind that not all types in canonical graph are
4124  * necessarily canonical representatives themselves, some of them might be
4125  * duplicates or its uniqueness might not have been established yet).
4126  * Returns:
4127  *  - >0, if type graphs are equivalent;
4128  *  -  0, if not equivalent;
4129  *  - <0, on error.
4130  *
4131  * Algorithm performs side-by-side DFS traversal of both type graphs and checks
4132  * equivalence of BTF types at each step. If at any point BTF types in candidate
4133  * and canonical graphs are not compatible structurally, whole graphs are
4134  * incompatible. If types are structurally equivalent (i.e., all information
4135  * except referenced type IDs is exactly the same), a mapping from `canon_id` to
4136  * a `cand_id` is recored in hypothetical mapping (`btf_dedup->hypot_map`).
4137  * If a type references other types, then those referenced types are checked
4138  * for equivalence recursively.
4139  *
4140  * During DFS traversal, if we find that for current `canon_id` type we
4141  * already have some mapping in hypothetical map, we check for two possible
4142  * situations:
4143  *   - `canon_id` is mapped to exactly the same type as `cand_id`. This will
4144  *     happen when type graphs have cycles. In this case we assume those two
4145  *     types are equivalent.
4146  *   - `canon_id` is mapped to different type. This is contradiction in our
4147  *     hypothetical mapping, because same graph in canonical graph corresponds
4148  *     to two different types in candidate graph, which for equivalent type
4149  *     graphs shouldn't happen. This condition terminates equivalence check
4150  *     with negative result.
4151  *
4152  * If type graphs traversal exhausts types to check and find no contradiction,
4153  * then type graphs are equivalent.
4154  *
4155  * When checking types for equivalence, there is one special case: FWD types.
4156  * If FWD type resolution is allowed and one of the types (either from canonical
4157  * or candidate graph) is FWD and other is STRUCT/UNION (depending on FWD's kind
4158  * flag) and their names match, hypothetical mapping is updated to point from
4159  * FWD to STRUCT/UNION. If graphs will be determined as equivalent successfully,
4160  * this mapping will be used to record FWD -> STRUCT/UNION mapping permanently.
4161  *
4162  * Technically, this could lead to incorrect FWD to STRUCT/UNION resolution,
4163  * if there are two exactly named (or anonymous) structs/unions that are
4164  * compatible structurally, one of which has FWD field, while other is concrete
4165  * STRUCT/UNION, but according to C sources they are different structs/unions
4166  * that are referencing different types with the same name. This is extremely
4167  * unlikely to happen, but btf_dedup API allows to disable FWD resolution if
4168  * this logic is causing problems.
4169  *
4170  * Doing FWD resolution means that both candidate and/or canonical graphs can
4171  * consists of portions of the graph that come from multiple compilation units.
4172  * This is due to the fact that types within single compilation unit are always
4173  * deduplicated and FWDs are already resolved, if referenced struct/union
4174  * definiton is available. So, if we had unresolved FWD and found corresponding
4175  * STRUCT/UNION, they will be from different compilation units. This
4176  * consequently means that when we "link" FWD to corresponding STRUCT/UNION,
4177  * type graph will likely have at least two different BTF types that describe
4178  * same type (e.g., most probably there will be two different BTF types for the
4179  * same 'int' primitive type) and could even have "overlapping" parts of type
4180  * graph that describe same subset of types.
4181  *
4182  * This in turn means that our assumption that each type in canonical graph
4183  * must correspond to exactly one type in candidate graph might not hold
4184  * anymore and will make it harder to detect contradictions using hypothetical
4185  * map. To handle this problem, we allow to follow FWD -> STRUCT/UNION
4186  * resolution only in canonical graph. FWDs in candidate graphs are never
4187  * resolved. To see why it's OK, let's check all possible situations w.r.t. FWDs
4188  * that can occur:
4189  *   - Both types in canonical and candidate graphs are FWDs. If they are
4190  *     structurally equivalent, then they can either be both resolved to the
4191  *     same STRUCT/UNION or not resolved at all. In both cases they are
4192  *     equivalent and there is no need to resolve FWD on candidate side.
4193  *   - Both types in canonical and candidate graphs are concrete STRUCT/UNION,
4194  *     so nothing to resolve as well, algorithm will check equivalence anyway.
4195  *   - Type in canonical graph is FWD, while type in candidate is concrete
4196  *     STRUCT/UNION. In this case candidate graph comes from single compilation
4197  *     unit, so there is exactly one BTF type for each unique C type. After
4198  *     resolving FWD into STRUCT/UNION, there might be more than one BTF type
4199  *     in canonical graph mapping to single BTF type in candidate graph, but
4200  *     because hypothetical mapping maps from canonical to candidate types, it's
4201  *     alright, and we still maintain the property of having single `canon_id`
4202  *     mapping to single `cand_id` (there could be two different `canon_id`
4203  *     mapped to the same `cand_id`, but it's not contradictory).
4204  *   - Type in canonical graph is concrete STRUCT/UNION, while type in candidate
4205  *     graph is FWD. In this case we are just going to check compatibility of
4206  *     STRUCT/UNION and corresponding FWD, and if they are compatible, we'll
4207  *     assume that whatever STRUCT/UNION FWD resolves to must be equivalent to
4208  *     a concrete STRUCT/UNION from canonical graph. If the rest of type graphs
4209  *     turn out equivalent, we'll re-resolve FWD to concrete STRUCT/UNION from
4210  *     canonical graph.
4211  */
4212 static int btf_dedup_is_equiv(struct btf_dedup *d, __u32 cand_id,
4213 			      __u32 canon_id)
4214 {
4215 	struct btf_type *cand_type;
4216 	struct btf_type *canon_type;
4217 	__u32 hypot_type_id;
4218 	__u16 cand_kind;
4219 	__u16 canon_kind;
4220 	int i, eq;
4221 
4222 	/* if both resolve to the same canonical, they must be equivalent */
4223 	if (resolve_type_id(d, cand_id) == resolve_type_id(d, canon_id))
4224 		return 1;
4225 
4226 	canon_id = resolve_fwd_id(d, canon_id);
4227 
4228 	hypot_type_id = d->hypot_map[canon_id];
4229 	if (hypot_type_id <= BTF_MAX_NR_TYPES) {
4230 		if (hypot_type_id == cand_id)
4231 			return 1;
4232 		/* In some cases compiler will generate different DWARF types
4233 		 * for *identical* array type definitions and use them for
4234 		 * different fields within the *same* struct. This breaks type
4235 		 * equivalence check, which makes an assumption that candidate
4236 		 * types sub-graph has a consistent and deduped-by-compiler
4237 		 * types within a single CU. So work around that by explicitly
4238 		 * allowing identical array types here.
4239 		 */
4240 		if (btf_dedup_identical_arrays(d, hypot_type_id, cand_id))
4241 			return 1;
4242 		/* It turns out that similar situation can happen with
4243 		 * struct/union sometimes, sigh... Handle the case where
4244 		 * structs/unions are exactly the same, down to the referenced
4245 		 * type IDs. Anything more complicated (e.g., if referenced
4246 		 * types are different, but equivalent) is *way more*
4247 		 * complicated and requires a many-to-many equivalence mapping.
4248 		 */
4249 		if (btf_dedup_identical_structs(d, hypot_type_id, cand_id))
4250 			return 1;
4251 		return 0;
4252 	}
4253 
4254 	if (btf_dedup_hypot_map_add(d, canon_id, cand_id))
4255 		return -ENOMEM;
4256 
4257 	cand_type = btf_type_by_id(d->btf, cand_id);
4258 	canon_type = btf_type_by_id(d->btf, canon_id);
4259 	cand_kind = btf_kind(cand_type);
4260 	canon_kind = btf_kind(canon_type);
4261 
4262 	if (cand_type->name_off != canon_type->name_off)
4263 		return 0;
4264 
4265 	/* FWD <--> STRUCT/UNION equivalence check, if enabled */
4266 	if ((cand_kind == BTF_KIND_FWD || canon_kind == BTF_KIND_FWD)
4267 	    && cand_kind != canon_kind) {
4268 		__u16 real_kind;
4269 		__u16 fwd_kind;
4270 
4271 		if (cand_kind == BTF_KIND_FWD) {
4272 			real_kind = canon_kind;
4273 			fwd_kind = btf_fwd_kind(cand_type);
4274 		} else {
4275 			real_kind = cand_kind;
4276 			fwd_kind = btf_fwd_kind(canon_type);
4277 			/* we'd need to resolve base FWD to STRUCT/UNION */
4278 			if (fwd_kind == real_kind && canon_id < d->btf->start_id)
4279 				d->hypot_adjust_canon = true;
4280 		}
4281 		return fwd_kind == real_kind;
4282 	}
4283 
4284 	if (cand_kind != canon_kind)
4285 		return 0;
4286 
4287 	switch (cand_kind) {
4288 	case BTF_KIND_INT:
4289 		return btf_equal_int_tag(cand_type, canon_type);
4290 
4291 	case BTF_KIND_ENUM:
4292 	case BTF_KIND_ENUM64:
4293 		return btf_compat_enum(cand_type, canon_type);
4294 
4295 	case BTF_KIND_FWD:
4296 	case BTF_KIND_FLOAT:
4297 		return btf_equal_common(cand_type, canon_type);
4298 
4299 	case BTF_KIND_CONST:
4300 	case BTF_KIND_VOLATILE:
4301 	case BTF_KIND_RESTRICT:
4302 	case BTF_KIND_PTR:
4303 	case BTF_KIND_TYPEDEF:
4304 	case BTF_KIND_FUNC:
4305 	case BTF_KIND_TYPE_TAG:
4306 		if (cand_type->info != canon_type->info)
4307 			return 0;
4308 		return btf_dedup_is_equiv(d, cand_type->type, canon_type->type);
4309 
4310 	case BTF_KIND_ARRAY: {
4311 		const struct btf_array *cand_arr, *canon_arr;
4312 
4313 		if (!btf_compat_array(cand_type, canon_type))
4314 			return 0;
4315 		cand_arr = btf_array(cand_type);
4316 		canon_arr = btf_array(canon_type);
4317 		eq = btf_dedup_is_equiv(d, cand_arr->index_type, canon_arr->index_type);
4318 		if (eq <= 0)
4319 			return eq;
4320 		return btf_dedup_is_equiv(d, cand_arr->type, canon_arr->type);
4321 	}
4322 
4323 	case BTF_KIND_STRUCT:
4324 	case BTF_KIND_UNION: {
4325 		const struct btf_member *cand_m, *canon_m;
4326 		__u16 vlen;
4327 
4328 		if (!btf_shallow_equal_struct(cand_type, canon_type))
4329 			return 0;
4330 		vlen = btf_vlen(cand_type);
4331 		cand_m = btf_members(cand_type);
4332 		canon_m = btf_members(canon_type);
4333 		for (i = 0; i < vlen; i++) {
4334 			eq = btf_dedup_is_equiv(d, cand_m->type, canon_m->type);
4335 			if (eq <= 0)
4336 				return eq;
4337 			cand_m++;
4338 			canon_m++;
4339 		}
4340 
4341 		return 1;
4342 	}
4343 
4344 	case BTF_KIND_FUNC_PROTO: {
4345 		const struct btf_param *cand_p, *canon_p;
4346 		__u16 vlen;
4347 
4348 		if (!btf_compat_fnproto(cand_type, canon_type))
4349 			return 0;
4350 		eq = btf_dedup_is_equiv(d, cand_type->type, canon_type->type);
4351 		if (eq <= 0)
4352 			return eq;
4353 		vlen = btf_vlen(cand_type);
4354 		cand_p = btf_params(cand_type);
4355 		canon_p = btf_params(canon_type);
4356 		for (i = 0; i < vlen; i++) {
4357 			eq = btf_dedup_is_equiv(d, cand_p->type, canon_p->type);
4358 			if (eq <= 0)
4359 				return eq;
4360 			cand_p++;
4361 			canon_p++;
4362 		}
4363 		return 1;
4364 	}
4365 
4366 	default:
4367 		return -EINVAL;
4368 	}
4369 	return 0;
4370 }
4371 
4372 /*
4373  * Use hypothetical mapping, produced by successful type graph equivalence
4374  * check, to augment existing struct/union canonical mapping, where possible.
4375  *
4376  * If BTF_KIND_FWD resolution is allowed, this mapping is also used to record
4377  * FWD -> STRUCT/UNION correspondence as well. FWD resolution is bidirectional:
4378  * it doesn't matter if FWD type was part of canonical graph or candidate one,
4379  * we are recording the mapping anyway. As opposed to carefulness required
4380  * for struct/union correspondence mapping (described below), for FWD resolution
4381  * it's not important, as by the time that FWD type (reference type) will be
4382  * deduplicated all structs/unions will be deduped already anyway.
4383  *
4384  * Recording STRUCT/UNION mapping is purely a performance optimization and is
4385  * not required for correctness. It needs to be done carefully to ensure that
4386  * struct/union from candidate's type graph is not mapped into corresponding
4387  * struct/union from canonical type graph that itself hasn't been resolved into
4388  * canonical representative. The only guarantee we have is that canonical
4389  * struct/union was determined as canonical and that won't change. But any
4390  * types referenced through that struct/union fields could have been not yet
4391  * resolved, so in case like that it's too early to establish any kind of
4392  * correspondence between structs/unions.
4393  *
4394  * No canonical correspondence is derived for primitive types (they are already
4395  * deduplicated completely already anyway) or reference types (they rely on
4396  * stability of struct/union canonical relationship for equivalence checks).
4397  */
4398 static void btf_dedup_merge_hypot_map(struct btf_dedup *d)
4399 {
4400 	__u32 canon_type_id, targ_type_id;
4401 	__u16 t_kind, c_kind;
4402 	__u32 t_id, c_id;
4403 	int i;
4404 
4405 	for (i = 0; i < d->hypot_cnt; i++) {
4406 		canon_type_id = d->hypot_list[i];
4407 		targ_type_id = d->hypot_map[canon_type_id];
4408 		t_id = resolve_type_id(d, targ_type_id);
4409 		c_id = resolve_type_id(d, canon_type_id);
4410 		t_kind = btf_kind(btf__type_by_id(d->btf, t_id));
4411 		c_kind = btf_kind(btf__type_by_id(d->btf, c_id));
4412 		/*
4413 		 * Resolve FWD into STRUCT/UNION.
4414 		 * It's ok to resolve FWD into STRUCT/UNION that's not yet
4415 		 * mapped to canonical representative (as opposed to
4416 		 * STRUCT/UNION <--> STRUCT/UNION mapping logic below), because
4417 		 * eventually that struct is going to be mapped and all resolved
4418 		 * FWDs will automatically resolve to correct canonical
4419 		 * representative. This will happen before ref type deduping,
4420 		 * which critically depends on stability of these mapping. This
4421 		 * stability is not a requirement for STRUCT/UNION equivalence
4422 		 * checks, though.
4423 		 */
4424 
4425 		/* if it's the split BTF case, we still need to point base FWD
4426 		 * to STRUCT/UNION in a split BTF, because FWDs from split BTF
4427 		 * will be resolved against base FWD. If we don't point base
4428 		 * canonical FWD to the resolved STRUCT/UNION, then all the
4429 		 * FWDs in split BTF won't be correctly resolved to a proper
4430 		 * STRUCT/UNION.
4431 		 */
4432 		if (t_kind != BTF_KIND_FWD && c_kind == BTF_KIND_FWD)
4433 			d->map[c_id] = t_id;
4434 
4435 		/* if graph equivalence determined that we'd need to adjust
4436 		 * base canonical types, then we need to only point base FWDs
4437 		 * to STRUCTs/UNIONs and do no more modifications. For all
4438 		 * other purposes the type graphs were not equivalent.
4439 		 */
4440 		if (d->hypot_adjust_canon)
4441 			continue;
4442 
4443 		if (t_kind == BTF_KIND_FWD && c_kind != BTF_KIND_FWD)
4444 			d->map[t_id] = c_id;
4445 
4446 		if ((t_kind == BTF_KIND_STRUCT || t_kind == BTF_KIND_UNION) &&
4447 		    c_kind != BTF_KIND_FWD &&
4448 		    is_type_mapped(d, c_id) &&
4449 		    !is_type_mapped(d, t_id)) {
4450 			/*
4451 			 * as a perf optimization, we can map struct/union
4452 			 * that's part of type graph we just verified for
4453 			 * equivalence. We can do that for struct/union that has
4454 			 * canonical representative only, though.
4455 			 */
4456 			d->map[t_id] = c_id;
4457 		}
4458 	}
4459 }
4460 
4461 /*
4462  * Deduplicate struct/union types.
4463  *
4464  * For each struct/union type its type signature hash is calculated, taking
4465  * into account type's name, size, number, order and names of fields, but
4466  * ignoring type ID's referenced from fields, because they might not be deduped
4467  * completely until after reference types deduplication phase. This type hash
4468  * is used to iterate over all potential canonical types, sharing same hash.
4469  * For each canonical candidate we check whether type graphs that they form
4470  * (through referenced types in fields and so on) are equivalent using algorithm
4471  * implemented in `btf_dedup_is_equiv`. If such equivalence is found and
4472  * BTF_KIND_FWD resolution is allowed, then hypothetical mapping
4473  * (btf_dedup->hypot_map) produced by aforementioned type graph equivalence
4474  * algorithm is used to record FWD -> STRUCT/UNION mapping. It's also used to
4475  * potentially map other structs/unions to their canonical representatives,
4476  * if such relationship hasn't yet been established. This speeds up algorithm
4477  * by eliminating some of the duplicate work.
4478  *
4479  * If no matching canonical representative was found, struct/union is marked
4480  * as canonical for itself and is added into btf_dedup->dedup_table hash map
4481  * for further look ups.
4482  */
4483 static int btf_dedup_struct_type(struct btf_dedup *d, __u32 type_id)
4484 {
4485 	struct btf_type *cand_type, *t;
4486 	struct hashmap_entry *hash_entry;
4487 	/* if we don't find equivalent type, then we are canonical */
4488 	__u32 new_id = type_id;
4489 	__u16 kind;
4490 	long h;
4491 
4492 	/* already deduped or is in process of deduping (loop detected) */
4493 	if (d->map[type_id] <= BTF_MAX_NR_TYPES)
4494 		return 0;
4495 
4496 	t = btf_type_by_id(d->btf, type_id);
4497 	kind = btf_kind(t);
4498 
4499 	if (kind != BTF_KIND_STRUCT && kind != BTF_KIND_UNION)
4500 		return 0;
4501 
4502 	h = btf_hash_struct(t);
4503 	for_each_dedup_cand(d, hash_entry, h) {
4504 		__u32 cand_id = hash_entry->value;
4505 		int eq;
4506 
4507 		/*
4508 		 * Even though btf_dedup_is_equiv() checks for
4509 		 * btf_shallow_equal_struct() internally when checking two
4510 		 * structs (unions) for equivalence, we need to guard here
4511 		 * from picking matching FWD type as a dedup candidate.
4512 		 * This can happen due to hash collision. In such case just
4513 		 * relying on btf_dedup_is_equiv() would lead to potentially
4514 		 * creating a loop (FWD -> STRUCT and STRUCT -> FWD), because
4515 		 * FWD and compatible STRUCT/UNION are considered equivalent.
4516 		 */
4517 		cand_type = btf_type_by_id(d->btf, cand_id);
4518 		if (!btf_shallow_equal_struct(t, cand_type))
4519 			continue;
4520 
4521 		btf_dedup_clear_hypot_map(d);
4522 		eq = btf_dedup_is_equiv(d, type_id, cand_id);
4523 		if (eq < 0)
4524 			return eq;
4525 		if (!eq)
4526 			continue;
4527 		btf_dedup_merge_hypot_map(d);
4528 		if (d->hypot_adjust_canon) /* not really equivalent */
4529 			continue;
4530 		new_id = cand_id;
4531 		break;
4532 	}
4533 
4534 	d->map[type_id] = new_id;
4535 	if (type_id == new_id && btf_dedup_table_add(d, h, type_id))
4536 		return -ENOMEM;
4537 
4538 	return 0;
4539 }
4540 
4541 static int btf_dedup_struct_types(struct btf_dedup *d)
4542 {
4543 	int i, err;
4544 
4545 	for (i = 0; i < d->btf->nr_types; i++) {
4546 		err = btf_dedup_struct_type(d, d->btf->start_id + i);
4547 		if (err)
4548 			return err;
4549 	}
4550 	return 0;
4551 }
4552 
4553 /*
4554  * Deduplicate reference type.
4555  *
4556  * Once all primitive and struct/union types got deduplicated, we can easily
4557  * deduplicate all other (reference) BTF types. This is done in two steps:
4558  *
4559  * 1. Resolve all referenced type IDs into their canonical type IDs. This
4560  * resolution can be done either immediately for primitive or struct/union types
4561  * (because they were deduped in previous two phases) or recursively for
4562  * reference types. Recursion will always terminate at either primitive or
4563  * struct/union type, at which point we can "unwind" chain of reference types
4564  * one by one. There is no danger of encountering cycles because in C type
4565  * system the only way to form type cycle is through struct/union, so any chain
4566  * of reference types, even those taking part in a type cycle, will inevitably
4567  * reach struct/union at some point.
4568  *
4569  * 2. Once all referenced type IDs are resolved into canonical ones, BTF type
4570  * becomes "stable", in the sense that no further deduplication will cause
4571  * any changes to it. With that, it's now possible to calculate type's signature
4572  * hash (this time taking into account referenced type IDs) and loop over all
4573  * potential canonical representatives. If no match was found, current type
4574  * will become canonical representative of itself and will be added into
4575  * btf_dedup->dedup_table as another possible canonical representative.
4576  */
4577 static int btf_dedup_ref_type(struct btf_dedup *d, __u32 type_id)
4578 {
4579 	struct hashmap_entry *hash_entry;
4580 	__u32 new_id = type_id, cand_id;
4581 	struct btf_type *t, *cand;
4582 	/* if we don't find equivalent type, then we are representative type */
4583 	int ref_type_id;
4584 	long h;
4585 
4586 	if (d->map[type_id] == BTF_IN_PROGRESS_ID)
4587 		return -ELOOP;
4588 	if (d->map[type_id] <= BTF_MAX_NR_TYPES)
4589 		return resolve_type_id(d, type_id);
4590 
4591 	t = btf_type_by_id(d->btf, type_id);
4592 	d->map[type_id] = BTF_IN_PROGRESS_ID;
4593 
4594 	switch (btf_kind(t)) {
4595 	case BTF_KIND_CONST:
4596 	case BTF_KIND_VOLATILE:
4597 	case BTF_KIND_RESTRICT:
4598 	case BTF_KIND_PTR:
4599 	case BTF_KIND_TYPEDEF:
4600 	case BTF_KIND_FUNC:
4601 	case BTF_KIND_TYPE_TAG:
4602 		ref_type_id = btf_dedup_ref_type(d, t->type);
4603 		if (ref_type_id < 0)
4604 			return ref_type_id;
4605 		t->type = ref_type_id;
4606 
4607 		h = btf_hash_common(t);
4608 		for_each_dedup_cand(d, hash_entry, h) {
4609 			cand_id = hash_entry->value;
4610 			cand = btf_type_by_id(d->btf, cand_id);
4611 			if (btf_equal_common(t, cand)) {
4612 				new_id = cand_id;
4613 				break;
4614 			}
4615 		}
4616 		break;
4617 
4618 	case BTF_KIND_DECL_TAG:
4619 		ref_type_id = btf_dedup_ref_type(d, t->type);
4620 		if (ref_type_id < 0)
4621 			return ref_type_id;
4622 		t->type = ref_type_id;
4623 
4624 		h = btf_hash_int_decl_tag(t);
4625 		for_each_dedup_cand(d, hash_entry, h) {
4626 			cand_id = hash_entry->value;
4627 			cand = btf_type_by_id(d->btf, cand_id);
4628 			if (btf_equal_int_tag(t, cand)) {
4629 				new_id = cand_id;
4630 				break;
4631 			}
4632 		}
4633 		break;
4634 
4635 	case BTF_KIND_ARRAY: {
4636 		struct btf_array *info = btf_array(t);
4637 
4638 		ref_type_id = btf_dedup_ref_type(d, info->type);
4639 		if (ref_type_id < 0)
4640 			return ref_type_id;
4641 		info->type = ref_type_id;
4642 
4643 		ref_type_id = btf_dedup_ref_type(d, info->index_type);
4644 		if (ref_type_id < 0)
4645 			return ref_type_id;
4646 		info->index_type = ref_type_id;
4647 
4648 		h = btf_hash_array(t);
4649 		for_each_dedup_cand(d, hash_entry, h) {
4650 			cand_id = hash_entry->value;
4651 			cand = btf_type_by_id(d->btf, cand_id);
4652 			if (btf_equal_array(t, cand)) {
4653 				new_id = cand_id;
4654 				break;
4655 			}
4656 		}
4657 		break;
4658 	}
4659 
4660 	case BTF_KIND_FUNC_PROTO: {
4661 		struct btf_param *param;
4662 		__u16 vlen;
4663 		int i;
4664 
4665 		ref_type_id = btf_dedup_ref_type(d, t->type);
4666 		if (ref_type_id < 0)
4667 			return ref_type_id;
4668 		t->type = ref_type_id;
4669 
4670 		vlen = btf_vlen(t);
4671 		param = btf_params(t);
4672 		for (i = 0; i < vlen; i++) {
4673 			ref_type_id = btf_dedup_ref_type(d, param->type);
4674 			if (ref_type_id < 0)
4675 				return ref_type_id;
4676 			param->type = ref_type_id;
4677 			param++;
4678 		}
4679 
4680 		h = btf_hash_fnproto(t);
4681 		for_each_dedup_cand(d, hash_entry, h) {
4682 			cand_id = hash_entry->value;
4683 			cand = btf_type_by_id(d->btf, cand_id);
4684 			if (btf_equal_fnproto(t, cand)) {
4685 				new_id = cand_id;
4686 				break;
4687 			}
4688 		}
4689 		break;
4690 	}
4691 
4692 	default:
4693 		return -EINVAL;
4694 	}
4695 
4696 	d->map[type_id] = new_id;
4697 	if (type_id == new_id && btf_dedup_table_add(d, h, type_id))
4698 		return -ENOMEM;
4699 
4700 	return new_id;
4701 }
4702 
4703 static int btf_dedup_ref_types(struct btf_dedup *d)
4704 {
4705 	int i, err;
4706 
4707 	for (i = 0; i < d->btf->nr_types; i++) {
4708 		err = btf_dedup_ref_type(d, d->btf->start_id + i);
4709 		if (err < 0)
4710 			return err;
4711 	}
4712 	/* we won't need d->dedup_table anymore */
4713 	hashmap__free(d->dedup_table);
4714 	d->dedup_table = NULL;
4715 	return 0;
4716 }
4717 
4718 /*
4719  * Collect a map from type names to type ids for all canonical structs
4720  * and unions. If the same name is shared by several canonical types
4721  * use a special value 0 to indicate this fact.
4722  */
4723 static int btf_dedup_fill_unique_names_map(struct btf_dedup *d, struct hashmap *names_map)
4724 {
4725 	__u32 nr_types = btf__type_cnt(d->btf);
4726 	struct btf_type *t;
4727 	__u32 type_id;
4728 	__u16 kind;
4729 	int err;
4730 
4731 	/*
4732 	 * Iterate over base and split module ids in order to get all
4733 	 * available structs in the map.
4734 	 */
4735 	for (type_id = 1; type_id < nr_types; ++type_id) {
4736 		t = btf_type_by_id(d->btf, type_id);
4737 		kind = btf_kind(t);
4738 
4739 		if (kind != BTF_KIND_STRUCT && kind != BTF_KIND_UNION)
4740 			continue;
4741 
4742 		/* Skip non-canonical types */
4743 		if (type_id != d->map[type_id])
4744 			continue;
4745 
4746 		err = hashmap__add(names_map, t->name_off, type_id);
4747 		if (err == -EEXIST)
4748 			err = hashmap__set(names_map, t->name_off, 0, NULL, NULL);
4749 
4750 		if (err)
4751 			return err;
4752 	}
4753 
4754 	return 0;
4755 }
4756 
4757 static int btf_dedup_resolve_fwd(struct btf_dedup *d, struct hashmap *names_map, __u32 type_id)
4758 {
4759 	struct btf_type *t = btf_type_by_id(d->btf, type_id);
4760 	enum btf_fwd_kind fwd_kind = btf_kflag(t);
4761 	__u16 cand_kind, kind = btf_kind(t);
4762 	struct btf_type *cand_t;
4763 	uintptr_t cand_id;
4764 
4765 	if (kind != BTF_KIND_FWD)
4766 		return 0;
4767 
4768 	/* Skip if this FWD already has a mapping */
4769 	if (type_id != d->map[type_id])
4770 		return 0;
4771 
4772 	if (!hashmap__find(names_map, t->name_off, &cand_id))
4773 		return 0;
4774 
4775 	/* Zero is a special value indicating that name is not unique */
4776 	if (!cand_id)
4777 		return 0;
4778 
4779 	cand_t = btf_type_by_id(d->btf, cand_id);
4780 	cand_kind = btf_kind(cand_t);
4781 	if ((cand_kind == BTF_KIND_STRUCT && fwd_kind != BTF_FWD_STRUCT) ||
4782 	    (cand_kind == BTF_KIND_UNION && fwd_kind != BTF_FWD_UNION))
4783 		return 0;
4784 
4785 	d->map[type_id] = cand_id;
4786 
4787 	return 0;
4788 }
4789 
4790 /*
4791  * Resolve unambiguous forward declarations.
4792  *
4793  * The lion's share of all FWD declarations is resolved during
4794  * `btf_dedup_struct_types` phase when different type graphs are
4795  * compared against each other. However, if in some compilation unit a
4796  * FWD declaration is not a part of a type graph compared against
4797  * another type graph that declaration's canonical type would not be
4798  * changed. Example:
4799  *
4800  * CU #1:
4801  *
4802  * struct foo;
4803  * struct foo *some_global;
4804  *
4805  * CU #2:
4806  *
4807  * struct foo { int u; };
4808  * struct foo *another_global;
4809  *
4810  * After `btf_dedup_struct_types` the BTF looks as follows:
4811  *
4812  * [1] STRUCT 'foo' size=4 vlen=1 ...
4813  * [2] INT 'int' size=4 ...
4814  * [3] PTR '(anon)' type_id=1
4815  * [4] FWD 'foo' fwd_kind=struct
4816  * [5] PTR '(anon)' type_id=4
4817  *
4818  * This pass assumes that such FWD declarations should be mapped to
4819  * structs or unions with identical name in case if the name is not
4820  * ambiguous.
4821  */
4822 static int btf_dedup_resolve_fwds(struct btf_dedup *d)
4823 {
4824 	int i, err;
4825 	struct hashmap *names_map;
4826 
4827 	names_map = hashmap__new(btf_dedup_identity_hash_fn, btf_dedup_equal_fn, NULL);
4828 	if (IS_ERR(names_map))
4829 		return PTR_ERR(names_map);
4830 
4831 	err = btf_dedup_fill_unique_names_map(d, names_map);
4832 	if (err < 0)
4833 		goto exit;
4834 
4835 	for (i = 0; i < d->btf->nr_types; i++) {
4836 		err = btf_dedup_resolve_fwd(d, names_map, d->btf->start_id + i);
4837 		if (err < 0)
4838 			break;
4839 	}
4840 
4841 exit:
4842 	hashmap__free(names_map);
4843 	return err;
4844 }
4845 
4846 /*
4847  * Compact types.
4848  *
4849  * After we established for each type its corresponding canonical representative
4850  * type, we now can eliminate types that are not canonical and leave only
4851  * canonical ones layed out sequentially in memory by copying them over
4852  * duplicates. During compaction btf_dedup->hypot_map array is reused to store
4853  * a map from original type ID to a new compacted type ID, which will be used
4854  * during next phase to "fix up" type IDs, referenced from struct/union and
4855  * reference types.
4856  */
4857 static int btf_dedup_compact_types(struct btf_dedup *d)
4858 {
4859 	__u32 *new_offs;
4860 	__u32 next_type_id = d->btf->start_id;
4861 	const struct btf_type *t;
4862 	void *p;
4863 	int i, id, len;
4864 
4865 	/* we are going to reuse hypot_map to store compaction remapping */
4866 	d->hypot_map[0] = 0;
4867 	/* base BTF types are not renumbered */
4868 	for (id = 1; id < d->btf->start_id; id++)
4869 		d->hypot_map[id] = id;
4870 	for (i = 0, id = d->btf->start_id; i < d->btf->nr_types; i++, id++)
4871 		d->hypot_map[id] = BTF_UNPROCESSED_ID;
4872 
4873 	p = d->btf->types_data;
4874 
4875 	for (i = 0, id = d->btf->start_id; i < d->btf->nr_types; i++, id++) {
4876 		if (d->map[id] != id)
4877 			continue;
4878 
4879 		t = btf__type_by_id(d->btf, id);
4880 		len = btf_type_size(t);
4881 		if (len < 0)
4882 			return len;
4883 
4884 		memmove(p, t, len);
4885 		d->hypot_map[id] = next_type_id;
4886 		d->btf->type_offs[next_type_id - d->btf->start_id] = p - d->btf->types_data;
4887 		p += len;
4888 		next_type_id++;
4889 	}
4890 
4891 	/* shrink struct btf's internal types index and update btf_header */
4892 	d->btf->nr_types = next_type_id - d->btf->start_id;
4893 	d->btf->type_offs_cap = d->btf->nr_types;
4894 	d->btf->hdr->type_len = p - d->btf->types_data;
4895 	new_offs = libbpf_reallocarray(d->btf->type_offs, d->btf->type_offs_cap,
4896 				       sizeof(*new_offs));
4897 	if (d->btf->type_offs_cap && !new_offs)
4898 		return -ENOMEM;
4899 	d->btf->type_offs = new_offs;
4900 	d->btf->hdr->str_off = d->btf->hdr->type_len;
4901 	d->btf->raw_size = d->btf->hdr->hdr_len + d->btf->hdr->type_len + d->btf->hdr->str_len;
4902 	return 0;
4903 }
4904 
4905 /*
4906  * Figure out final (deduplicated and compacted) type ID for provided original
4907  * `type_id` by first resolving it into corresponding canonical type ID and
4908  * then mapping it to a deduplicated type ID, stored in btf_dedup->hypot_map,
4909  * which is populated during compaction phase.
4910  */
4911 static int btf_dedup_remap_type_id(__u32 *type_id, void *ctx)
4912 {
4913 	struct btf_dedup *d = ctx;
4914 	__u32 resolved_type_id, new_type_id;
4915 
4916 	resolved_type_id = resolve_type_id(d, *type_id);
4917 	new_type_id = d->hypot_map[resolved_type_id];
4918 	if (new_type_id > BTF_MAX_NR_TYPES)
4919 		return -EINVAL;
4920 
4921 	*type_id = new_type_id;
4922 	return 0;
4923 }
4924 
4925 /*
4926  * Remap referenced type IDs into deduped type IDs.
4927  *
4928  * After BTF types are deduplicated and compacted, their final type IDs may
4929  * differ from original ones. The map from original to a corresponding
4930  * deduped type ID is stored in btf_dedup->hypot_map and is populated during
4931  * compaction phase. During remapping phase we are rewriting all type IDs
4932  * referenced from any BTF type (e.g., struct fields, func proto args, etc) to
4933  * their final deduped type IDs.
4934  */
4935 static int btf_dedup_remap_types(struct btf_dedup *d)
4936 {
4937 	int i, r;
4938 
4939 	for (i = 0; i < d->btf->nr_types; i++) {
4940 		struct btf_type *t = btf_type_by_id(d->btf, d->btf->start_id + i);
4941 		struct btf_field_iter it;
4942 		__u32 *type_id;
4943 
4944 		r = btf_field_iter_init(&it, t, BTF_FIELD_ITER_IDS);
4945 		if (r)
4946 			return r;
4947 
4948 		while ((type_id = btf_field_iter_next(&it))) {
4949 			__u32 resolved_id, new_id;
4950 
4951 			resolved_id = resolve_type_id(d, *type_id);
4952 			new_id = d->hypot_map[resolved_id];
4953 			if (new_id > BTF_MAX_NR_TYPES)
4954 				return -EINVAL;
4955 
4956 			*type_id = new_id;
4957 		}
4958 	}
4959 
4960 	if (!d->btf_ext)
4961 		return 0;
4962 
4963 	r = btf_ext_visit_type_ids(d->btf_ext, btf_dedup_remap_type_id, d);
4964 	if (r)
4965 		return r;
4966 
4967 	return 0;
4968 }
4969 
4970 /*
4971  * Probe few well-known locations for vmlinux kernel image and try to load BTF
4972  * data out of it to use for target BTF.
4973  */
4974 struct btf *btf__load_vmlinux_btf(void)
4975 {
4976 	const char *sysfs_btf_path = "/sys/kernel/btf/vmlinux";
4977 	/* fall back locations, trying to find vmlinux on disk */
4978 	const char *locations[] = {
4979 		"/boot/vmlinux-%1$s",
4980 		"/lib/modules/%1$s/vmlinux-%1$s",
4981 		"/lib/modules/%1$s/build/vmlinux",
4982 		"/usr/lib/modules/%1$s/kernel/vmlinux",
4983 		"/usr/lib/debug/boot/vmlinux-%1$s",
4984 		"/usr/lib/debug/boot/vmlinux-%1$s.debug",
4985 		"/usr/lib/debug/lib/modules/%1$s/vmlinux",
4986 	};
4987 	char path[PATH_MAX + 1];
4988 	struct utsname buf;
4989 	struct btf *btf;
4990 	int i, err;
4991 
4992 	/* is canonical sysfs location accessible? */
4993 	if (faccessat(AT_FDCWD, sysfs_btf_path, F_OK, AT_EACCESS) < 0) {
4994 		pr_warn("kernel BTF is missing at '%s', was CONFIG_DEBUG_INFO_BTF enabled?\n",
4995 			sysfs_btf_path);
4996 	} else {
4997 		btf = btf__parse(sysfs_btf_path, NULL);
4998 		if (!btf) {
4999 			err = -errno;
5000 			pr_warn("failed to read kernel BTF from '%s': %d\n", sysfs_btf_path, err);
5001 			return libbpf_err_ptr(err);
5002 		}
5003 		pr_debug("loaded kernel BTF from '%s'\n", sysfs_btf_path);
5004 		return btf;
5005 	}
5006 
5007 	/* try fallback locations */
5008 	uname(&buf);
5009 	for (i = 0; i < ARRAY_SIZE(locations); i++) {
5010 		snprintf(path, PATH_MAX, locations[i], buf.release);
5011 
5012 		if (faccessat(AT_FDCWD, path, R_OK, AT_EACCESS))
5013 			continue;
5014 
5015 		btf = btf__parse(path, NULL);
5016 		err = libbpf_get_error(btf);
5017 		pr_debug("loading kernel BTF '%s': %d\n", path, err);
5018 		if (err)
5019 			continue;
5020 
5021 		return btf;
5022 	}
5023 
5024 	pr_warn("failed to find valid kernel BTF\n");
5025 	return libbpf_err_ptr(-ESRCH);
5026 }
5027 
5028 struct btf *libbpf_find_kernel_btf(void) __attribute__((alias("btf__load_vmlinux_btf")));
5029 
5030 struct btf *btf__load_module_btf(const char *module_name, struct btf *vmlinux_btf)
5031 {
5032 	char path[80];
5033 
5034 	snprintf(path, sizeof(path), "/sys/kernel/btf/%s", module_name);
5035 	return btf__parse_split(path, vmlinux_btf);
5036 }
5037 
5038 int btf_field_iter_init(struct btf_field_iter *it, struct btf_type *t, enum btf_field_iter_kind iter_kind)
5039 {
5040 	it->p = NULL;
5041 	it->m_idx = -1;
5042 	it->off_idx = 0;
5043 	it->vlen = 0;
5044 
5045 	switch (iter_kind) {
5046 	case BTF_FIELD_ITER_IDS:
5047 		switch (btf_kind(t)) {
5048 		case BTF_KIND_UNKN:
5049 		case BTF_KIND_INT:
5050 		case BTF_KIND_FLOAT:
5051 		case BTF_KIND_ENUM:
5052 		case BTF_KIND_ENUM64:
5053 			it->desc = (struct btf_field_desc) {};
5054 			break;
5055 		case BTF_KIND_FWD:
5056 		case BTF_KIND_CONST:
5057 		case BTF_KIND_VOLATILE:
5058 		case BTF_KIND_RESTRICT:
5059 		case BTF_KIND_PTR:
5060 		case BTF_KIND_TYPEDEF:
5061 		case BTF_KIND_FUNC:
5062 		case BTF_KIND_VAR:
5063 		case BTF_KIND_DECL_TAG:
5064 		case BTF_KIND_TYPE_TAG:
5065 			it->desc = (struct btf_field_desc) { 1, {offsetof(struct btf_type, type)} };
5066 			break;
5067 		case BTF_KIND_ARRAY:
5068 			it->desc = (struct btf_field_desc) {
5069 				2, {sizeof(struct btf_type) + offsetof(struct btf_array, type),
5070 				    sizeof(struct btf_type) + offsetof(struct btf_array, index_type)}
5071 			};
5072 			break;
5073 		case BTF_KIND_STRUCT:
5074 		case BTF_KIND_UNION:
5075 			it->desc = (struct btf_field_desc) {
5076 				0, {},
5077 				sizeof(struct btf_member),
5078 				1, {offsetof(struct btf_member, type)}
5079 			};
5080 			break;
5081 		case BTF_KIND_FUNC_PROTO:
5082 			it->desc = (struct btf_field_desc) {
5083 				1, {offsetof(struct btf_type, type)},
5084 				sizeof(struct btf_param),
5085 				1, {offsetof(struct btf_param, type)}
5086 			};
5087 			break;
5088 		case BTF_KIND_DATASEC:
5089 			it->desc = (struct btf_field_desc) {
5090 				0, {},
5091 				sizeof(struct btf_var_secinfo),
5092 				1, {offsetof(struct btf_var_secinfo, type)}
5093 			};
5094 			break;
5095 		default:
5096 			return -EINVAL;
5097 		}
5098 		break;
5099 	case BTF_FIELD_ITER_STRS:
5100 		switch (btf_kind(t)) {
5101 		case BTF_KIND_UNKN:
5102 			it->desc = (struct btf_field_desc) {};
5103 			break;
5104 		case BTF_KIND_INT:
5105 		case BTF_KIND_FLOAT:
5106 		case BTF_KIND_FWD:
5107 		case BTF_KIND_ARRAY:
5108 		case BTF_KIND_CONST:
5109 		case BTF_KIND_VOLATILE:
5110 		case BTF_KIND_RESTRICT:
5111 		case BTF_KIND_PTR:
5112 		case BTF_KIND_TYPEDEF:
5113 		case BTF_KIND_FUNC:
5114 		case BTF_KIND_VAR:
5115 		case BTF_KIND_DECL_TAG:
5116 		case BTF_KIND_TYPE_TAG:
5117 		case BTF_KIND_DATASEC:
5118 			it->desc = (struct btf_field_desc) {
5119 				1, {offsetof(struct btf_type, name_off)}
5120 			};
5121 			break;
5122 		case BTF_KIND_ENUM:
5123 			it->desc = (struct btf_field_desc) {
5124 				1, {offsetof(struct btf_type, name_off)},
5125 				sizeof(struct btf_enum),
5126 				1, {offsetof(struct btf_enum, name_off)}
5127 			};
5128 			break;
5129 		case BTF_KIND_ENUM64:
5130 			it->desc = (struct btf_field_desc) {
5131 				1, {offsetof(struct btf_type, name_off)},
5132 				sizeof(struct btf_enum64),
5133 				1, {offsetof(struct btf_enum64, name_off)}
5134 			};
5135 			break;
5136 		case BTF_KIND_STRUCT:
5137 		case BTF_KIND_UNION:
5138 			it->desc = (struct btf_field_desc) {
5139 				1, {offsetof(struct btf_type, name_off)},
5140 				sizeof(struct btf_member),
5141 				1, {offsetof(struct btf_member, name_off)}
5142 			};
5143 			break;
5144 		case BTF_KIND_FUNC_PROTO:
5145 			it->desc = (struct btf_field_desc) {
5146 				1, {offsetof(struct btf_type, name_off)},
5147 				sizeof(struct btf_param),
5148 				1, {offsetof(struct btf_param, name_off)}
5149 			};
5150 			break;
5151 		default:
5152 			return -EINVAL;
5153 		}
5154 		break;
5155 	default:
5156 		return -EINVAL;
5157 	}
5158 
5159 	if (it->desc.m_sz)
5160 		it->vlen = btf_vlen(t);
5161 
5162 	it->p = t;
5163 	return 0;
5164 }
5165 
5166 __u32 *btf_field_iter_next(struct btf_field_iter *it)
5167 {
5168 	if (!it->p)
5169 		return NULL;
5170 
5171 	if (it->m_idx < 0) {
5172 		if (it->off_idx < it->desc.t_off_cnt)
5173 			return it->p + it->desc.t_offs[it->off_idx++];
5174 		/* move to per-member iteration */
5175 		it->m_idx = 0;
5176 		it->p += sizeof(struct btf_type);
5177 		it->off_idx = 0;
5178 	}
5179 
5180 	/* if type doesn't have members, stop */
5181 	if (it->desc.m_sz == 0) {
5182 		it->p = NULL;
5183 		return NULL;
5184 	}
5185 
5186 	if (it->off_idx >= it->desc.m_off_cnt) {
5187 		/* exhausted this member's fields, go to the next member */
5188 		it->m_idx++;
5189 		it->p += it->desc.m_sz;
5190 		it->off_idx = 0;
5191 	}
5192 
5193 	if (it->m_idx < it->vlen)
5194 		return it->p + it->desc.m_offs[it->off_idx++];
5195 
5196 	it->p = NULL;
5197 	return NULL;
5198 }
5199 
5200 int btf_ext_visit_type_ids(struct btf_ext *btf_ext, type_id_visit_fn visit, void *ctx)
5201 {
5202 	const struct btf_ext_info *seg;
5203 	struct btf_ext_info_sec *sec;
5204 	int i, err;
5205 
5206 	seg = &btf_ext->func_info;
5207 	for_each_btf_ext_sec(seg, sec) {
5208 		struct bpf_func_info_min *rec;
5209 
5210 		for_each_btf_ext_rec(seg, sec, i, rec) {
5211 			err = visit(&rec->type_id, ctx);
5212 			if (err < 0)
5213 				return err;
5214 		}
5215 	}
5216 
5217 	seg = &btf_ext->core_relo_info;
5218 	for_each_btf_ext_sec(seg, sec) {
5219 		struct bpf_core_relo *rec;
5220 
5221 		for_each_btf_ext_rec(seg, sec, i, rec) {
5222 			err = visit(&rec->type_id, ctx);
5223 			if (err < 0)
5224 				return err;
5225 		}
5226 	}
5227 
5228 	return 0;
5229 }
5230 
5231 int btf_ext_visit_str_offs(struct btf_ext *btf_ext, str_off_visit_fn visit, void *ctx)
5232 {
5233 	const struct btf_ext_info *seg;
5234 	struct btf_ext_info_sec *sec;
5235 	int i, err;
5236 
5237 	seg = &btf_ext->func_info;
5238 	for_each_btf_ext_sec(seg, sec) {
5239 		err = visit(&sec->sec_name_off, ctx);
5240 		if (err)
5241 			return err;
5242 	}
5243 
5244 	seg = &btf_ext->line_info;
5245 	for_each_btf_ext_sec(seg, sec) {
5246 		struct bpf_line_info_min *rec;
5247 
5248 		err = visit(&sec->sec_name_off, ctx);
5249 		if (err)
5250 			return err;
5251 
5252 		for_each_btf_ext_rec(seg, sec, i, rec) {
5253 			err = visit(&rec->file_name_off, ctx);
5254 			if (err)
5255 				return err;
5256 			err = visit(&rec->line_off, ctx);
5257 			if (err)
5258 				return err;
5259 		}
5260 	}
5261 
5262 	seg = &btf_ext->core_relo_info;
5263 	for_each_btf_ext_sec(seg, sec) {
5264 		struct bpf_core_relo *rec;
5265 
5266 		err = visit(&sec->sec_name_off, ctx);
5267 		if (err)
5268 			return err;
5269 
5270 		for_each_btf_ext_rec(seg, sec, i, rec) {
5271 			err = visit(&rec->access_str_off, ctx);
5272 			if (err)
5273 				return err;
5274 		}
5275 	}
5276 
5277 	return 0;
5278 }
5279