xref: /linux/tools/lib/bpf/btf.c (revision c01044cc819160323f3ca4acd44fca487c4432e6)
1 // SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
2 /* Copyright (c) 2018 Facebook */
3 
4 #include <endian.h>
5 #include <stdio.h>
6 #include <stdlib.h>
7 #include <string.h>
8 #include <fcntl.h>
9 #include <unistd.h>
10 #include <errno.h>
11 #include <sys/utsname.h>
12 #include <sys/param.h>
13 #include <sys/stat.h>
14 #include <linux/kernel.h>
15 #include <linux/err.h>
16 #include <linux/btf.h>
17 #include <gelf.h>
18 #include "btf.h"
19 #include "bpf.h"
20 #include "libbpf.h"
21 #include "libbpf_internal.h"
22 #include "hashmap.h"
23 
24 /* make sure libbpf doesn't use kernel-only integer typedefs */
25 #pragma GCC poison u8 u16 u32 u64 s8 s16 s32 s64
26 
27 #define BTF_MAX_NR_TYPES 0x7fffffffU
28 #define BTF_MAX_STR_OFFSET 0x7fffffffU
29 
30 static struct btf_type btf_void;
31 
32 struct btf {
33 	union {
34 		struct btf_header *hdr;
35 		void *data;
36 	};
37 	struct btf_type **types;
38 	const char *strings;
39 	void *nohdr_data;
40 	__u32 nr_types;
41 	__u32 types_size;
42 	__u32 data_size;
43 	int fd;
44 	int ptr_sz;
45 };
46 
47 static inline __u64 ptr_to_u64(const void *ptr)
48 {
49 	return (__u64) (unsigned long) ptr;
50 }
51 
52 static int btf_add_type(struct btf *btf, struct btf_type *t)
53 {
54 	if (btf->types_size - btf->nr_types < 2) {
55 		struct btf_type **new_types;
56 		__u32 expand_by, new_size;
57 
58 		if (btf->types_size == BTF_MAX_NR_TYPES)
59 			return -E2BIG;
60 
61 		expand_by = max(btf->types_size >> 2, 16U);
62 		new_size = min(BTF_MAX_NR_TYPES, btf->types_size + expand_by);
63 
64 		new_types = realloc(btf->types, sizeof(*new_types) * new_size);
65 		if (!new_types)
66 			return -ENOMEM;
67 
68 		if (btf->nr_types == 0)
69 			new_types[0] = &btf_void;
70 
71 		btf->types = new_types;
72 		btf->types_size = new_size;
73 	}
74 
75 	btf->types[++(btf->nr_types)] = t;
76 
77 	return 0;
78 }
79 
80 static int btf_parse_hdr(struct btf *btf)
81 {
82 	const struct btf_header *hdr = btf->hdr;
83 	__u32 meta_left;
84 
85 	if (btf->data_size < sizeof(struct btf_header)) {
86 		pr_debug("BTF header not found\n");
87 		return -EINVAL;
88 	}
89 
90 	if (hdr->magic != BTF_MAGIC) {
91 		pr_debug("Invalid BTF magic:%x\n", hdr->magic);
92 		return -EINVAL;
93 	}
94 
95 	if (hdr->version != BTF_VERSION) {
96 		pr_debug("Unsupported BTF version:%u\n", hdr->version);
97 		return -ENOTSUP;
98 	}
99 
100 	if (hdr->flags) {
101 		pr_debug("Unsupported BTF flags:%x\n", hdr->flags);
102 		return -ENOTSUP;
103 	}
104 
105 	meta_left = btf->data_size - sizeof(*hdr);
106 	if (!meta_left) {
107 		pr_debug("BTF has no data\n");
108 		return -EINVAL;
109 	}
110 
111 	if (meta_left < hdr->type_off) {
112 		pr_debug("Invalid BTF type section offset:%u\n", hdr->type_off);
113 		return -EINVAL;
114 	}
115 
116 	if (meta_left < hdr->str_off) {
117 		pr_debug("Invalid BTF string section offset:%u\n", hdr->str_off);
118 		return -EINVAL;
119 	}
120 
121 	if (hdr->type_off >= hdr->str_off) {
122 		pr_debug("BTF type section offset >= string section offset. No type?\n");
123 		return -EINVAL;
124 	}
125 
126 	if (hdr->type_off & 0x02) {
127 		pr_debug("BTF type section is not aligned to 4 bytes\n");
128 		return -EINVAL;
129 	}
130 
131 	btf->nohdr_data = btf->hdr + 1;
132 
133 	return 0;
134 }
135 
136 static int btf_parse_str_sec(struct btf *btf)
137 {
138 	const struct btf_header *hdr = btf->hdr;
139 	const char *start = btf->nohdr_data + hdr->str_off;
140 	const char *end = start + btf->hdr->str_len;
141 
142 	if (!hdr->str_len || hdr->str_len - 1 > BTF_MAX_STR_OFFSET ||
143 	    start[0] || end[-1]) {
144 		pr_debug("Invalid BTF string section\n");
145 		return -EINVAL;
146 	}
147 
148 	btf->strings = start;
149 
150 	return 0;
151 }
152 
153 static int btf_type_size(struct btf_type *t)
154 {
155 	int base_size = sizeof(struct btf_type);
156 	__u16 vlen = btf_vlen(t);
157 
158 	switch (btf_kind(t)) {
159 	case BTF_KIND_FWD:
160 	case BTF_KIND_CONST:
161 	case BTF_KIND_VOLATILE:
162 	case BTF_KIND_RESTRICT:
163 	case BTF_KIND_PTR:
164 	case BTF_KIND_TYPEDEF:
165 	case BTF_KIND_FUNC:
166 		return base_size;
167 	case BTF_KIND_INT:
168 		return base_size + sizeof(__u32);
169 	case BTF_KIND_ENUM:
170 		return base_size + vlen * sizeof(struct btf_enum);
171 	case BTF_KIND_ARRAY:
172 		return base_size + sizeof(struct btf_array);
173 	case BTF_KIND_STRUCT:
174 	case BTF_KIND_UNION:
175 		return base_size + vlen * sizeof(struct btf_member);
176 	case BTF_KIND_FUNC_PROTO:
177 		return base_size + vlen * sizeof(struct btf_param);
178 	case BTF_KIND_VAR:
179 		return base_size + sizeof(struct btf_var);
180 	case BTF_KIND_DATASEC:
181 		return base_size + vlen * sizeof(struct btf_var_secinfo);
182 	default:
183 		pr_debug("Unsupported BTF_KIND:%u\n", btf_kind(t));
184 		return -EINVAL;
185 	}
186 }
187 
188 static int btf_parse_type_sec(struct btf *btf)
189 {
190 	struct btf_header *hdr = btf->hdr;
191 	void *nohdr_data = btf->nohdr_data;
192 	void *next_type = nohdr_data + hdr->type_off;
193 	void *end_type = nohdr_data + hdr->str_off;
194 
195 	while (next_type < end_type) {
196 		struct btf_type *t = next_type;
197 		int type_size;
198 		int err;
199 
200 		type_size = btf_type_size(t);
201 		if (type_size < 0)
202 			return type_size;
203 		next_type += type_size;
204 		err = btf_add_type(btf, t);
205 		if (err)
206 			return err;
207 	}
208 
209 	return 0;
210 }
211 
212 __u32 btf__get_nr_types(const struct btf *btf)
213 {
214 	return btf->nr_types;
215 }
216 
217 const struct btf_type *btf__type_by_id(const struct btf *btf, __u32 type_id)
218 {
219 	if (type_id > btf->nr_types)
220 		return NULL;
221 
222 	return btf->types[type_id];
223 }
224 
225 static int determine_ptr_size(const struct btf *btf)
226 {
227 	const struct btf_type *t;
228 	const char *name;
229 	int i;
230 
231 	for (i = 1; i <= btf->nr_types; i++) {
232 		t = btf__type_by_id(btf, i);
233 		if (!btf_is_int(t))
234 			continue;
235 
236 		name = btf__name_by_offset(btf, t->name_off);
237 		if (!name)
238 			continue;
239 
240 		if (strcmp(name, "long int") == 0 ||
241 		    strcmp(name, "long unsigned int") == 0) {
242 			if (t->size != 4 && t->size != 8)
243 				continue;
244 			return t->size;
245 		}
246 	}
247 
248 	return -1;
249 }
250 
251 static size_t btf_ptr_sz(const struct btf *btf)
252 {
253 	if (!btf->ptr_sz)
254 		((struct btf *)btf)->ptr_sz = determine_ptr_size(btf);
255 	return btf->ptr_sz < 0 ? sizeof(void *) : btf->ptr_sz;
256 }
257 
258 /* Return pointer size this BTF instance assumes. The size is heuristically
259  * determined by looking for 'long' or 'unsigned long' integer type and
260  * recording its size in bytes. If BTF type information doesn't have any such
261  * type, this function returns 0. In the latter case, native architecture's
262  * pointer size is assumed, so will be either 4 or 8, depending on
263  * architecture that libbpf was compiled for. It's possible to override
264  * guessed value by using btf__set_pointer_size() API.
265  */
266 size_t btf__pointer_size(const struct btf *btf)
267 {
268 	if (!btf->ptr_sz)
269 		((struct btf *)btf)->ptr_sz = determine_ptr_size(btf);
270 
271 	if (btf->ptr_sz < 0)
272 		/* not enough BTF type info to guess */
273 		return 0;
274 
275 	return btf->ptr_sz;
276 }
277 
278 /* Override or set pointer size in bytes. Only values of 4 and 8 are
279  * supported.
280  */
281 int btf__set_pointer_size(struct btf *btf, size_t ptr_sz)
282 {
283 	if (ptr_sz != 4 && ptr_sz != 8)
284 		return -EINVAL;
285 	btf->ptr_sz = ptr_sz;
286 	return 0;
287 }
288 
289 static bool btf_type_is_void(const struct btf_type *t)
290 {
291 	return t == &btf_void || btf_is_fwd(t);
292 }
293 
294 static bool btf_type_is_void_or_null(const struct btf_type *t)
295 {
296 	return !t || btf_type_is_void(t);
297 }
298 
299 #define MAX_RESOLVE_DEPTH 32
300 
301 __s64 btf__resolve_size(const struct btf *btf, __u32 type_id)
302 {
303 	const struct btf_array *array;
304 	const struct btf_type *t;
305 	__u32 nelems = 1;
306 	__s64 size = -1;
307 	int i;
308 
309 	t = btf__type_by_id(btf, type_id);
310 	for (i = 0; i < MAX_RESOLVE_DEPTH && !btf_type_is_void_or_null(t);
311 	     i++) {
312 		switch (btf_kind(t)) {
313 		case BTF_KIND_INT:
314 		case BTF_KIND_STRUCT:
315 		case BTF_KIND_UNION:
316 		case BTF_KIND_ENUM:
317 		case BTF_KIND_DATASEC:
318 			size = t->size;
319 			goto done;
320 		case BTF_KIND_PTR:
321 			size = btf_ptr_sz(btf);
322 			goto done;
323 		case BTF_KIND_TYPEDEF:
324 		case BTF_KIND_VOLATILE:
325 		case BTF_KIND_CONST:
326 		case BTF_KIND_RESTRICT:
327 		case BTF_KIND_VAR:
328 			type_id = t->type;
329 			break;
330 		case BTF_KIND_ARRAY:
331 			array = btf_array(t);
332 			if (nelems && array->nelems > UINT32_MAX / nelems)
333 				return -E2BIG;
334 			nelems *= array->nelems;
335 			type_id = array->type;
336 			break;
337 		default:
338 			return -EINVAL;
339 		}
340 
341 		t = btf__type_by_id(btf, type_id);
342 	}
343 
344 done:
345 	if (size < 0)
346 		return -EINVAL;
347 	if (nelems && size > UINT32_MAX / nelems)
348 		return -E2BIG;
349 
350 	return nelems * size;
351 }
352 
353 int btf__align_of(const struct btf *btf, __u32 id)
354 {
355 	const struct btf_type *t = btf__type_by_id(btf, id);
356 	__u16 kind = btf_kind(t);
357 
358 	switch (kind) {
359 	case BTF_KIND_INT:
360 	case BTF_KIND_ENUM:
361 		return min(btf_ptr_sz(btf), (size_t)t->size);
362 	case BTF_KIND_PTR:
363 		return btf_ptr_sz(btf);
364 	case BTF_KIND_TYPEDEF:
365 	case BTF_KIND_VOLATILE:
366 	case BTF_KIND_CONST:
367 	case BTF_KIND_RESTRICT:
368 		return btf__align_of(btf, t->type);
369 	case BTF_KIND_ARRAY:
370 		return btf__align_of(btf, btf_array(t)->type);
371 	case BTF_KIND_STRUCT:
372 	case BTF_KIND_UNION: {
373 		const struct btf_member *m = btf_members(t);
374 		__u16 vlen = btf_vlen(t);
375 		int i, max_align = 1, align;
376 
377 		for (i = 0; i < vlen; i++, m++) {
378 			align = btf__align_of(btf, m->type);
379 			if (align <= 0)
380 				return align;
381 			max_align = max(max_align, align);
382 		}
383 
384 		return max_align;
385 	}
386 	default:
387 		pr_warn("unsupported BTF_KIND:%u\n", btf_kind(t));
388 		return 0;
389 	}
390 }
391 
392 int btf__resolve_type(const struct btf *btf, __u32 type_id)
393 {
394 	const struct btf_type *t;
395 	int depth = 0;
396 
397 	t = btf__type_by_id(btf, type_id);
398 	while (depth < MAX_RESOLVE_DEPTH &&
399 	       !btf_type_is_void_or_null(t) &&
400 	       (btf_is_mod(t) || btf_is_typedef(t) || btf_is_var(t))) {
401 		type_id = t->type;
402 		t = btf__type_by_id(btf, type_id);
403 		depth++;
404 	}
405 
406 	if (depth == MAX_RESOLVE_DEPTH || btf_type_is_void_or_null(t))
407 		return -EINVAL;
408 
409 	return type_id;
410 }
411 
412 __s32 btf__find_by_name(const struct btf *btf, const char *type_name)
413 {
414 	__u32 i;
415 
416 	if (!strcmp(type_name, "void"))
417 		return 0;
418 
419 	for (i = 1; i <= btf->nr_types; i++) {
420 		const struct btf_type *t = btf->types[i];
421 		const char *name = btf__name_by_offset(btf, t->name_off);
422 
423 		if (name && !strcmp(type_name, name))
424 			return i;
425 	}
426 
427 	return -ENOENT;
428 }
429 
430 __s32 btf__find_by_name_kind(const struct btf *btf, const char *type_name,
431 			     __u32 kind)
432 {
433 	__u32 i;
434 
435 	if (kind == BTF_KIND_UNKN || !strcmp(type_name, "void"))
436 		return 0;
437 
438 	for (i = 1; i <= btf->nr_types; i++) {
439 		const struct btf_type *t = btf->types[i];
440 		const char *name;
441 
442 		if (btf_kind(t) != kind)
443 			continue;
444 		name = btf__name_by_offset(btf, t->name_off);
445 		if (name && !strcmp(type_name, name))
446 			return i;
447 	}
448 
449 	return -ENOENT;
450 }
451 
452 void btf__free(struct btf *btf)
453 {
454 	if (IS_ERR_OR_NULL(btf))
455 		return;
456 
457 	if (btf->fd >= 0)
458 		close(btf->fd);
459 
460 	free(btf->data);
461 	free(btf->types);
462 	free(btf);
463 }
464 
465 struct btf *btf__new(const void *data, __u32 size)
466 {
467 	struct btf *btf;
468 	int err;
469 
470 	btf = calloc(1, sizeof(struct btf));
471 	if (!btf)
472 		return ERR_PTR(-ENOMEM);
473 
474 	btf->fd = -1;
475 
476 	btf->data = malloc(size);
477 	if (!btf->data) {
478 		err = -ENOMEM;
479 		goto done;
480 	}
481 
482 	memcpy(btf->data, data, size);
483 	btf->data_size = size;
484 
485 	err = btf_parse_hdr(btf);
486 	if (err)
487 		goto done;
488 
489 	err = btf_parse_str_sec(btf);
490 	if (err)
491 		goto done;
492 
493 	err = btf_parse_type_sec(btf);
494 
495 done:
496 	if (err) {
497 		btf__free(btf);
498 		return ERR_PTR(err);
499 	}
500 
501 	return btf;
502 }
503 
504 static bool btf_check_endianness(const GElf_Ehdr *ehdr)
505 {
506 #if __BYTE_ORDER == __LITTLE_ENDIAN
507 	return ehdr->e_ident[EI_DATA] == ELFDATA2LSB;
508 #elif __BYTE_ORDER == __BIG_ENDIAN
509 	return ehdr->e_ident[EI_DATA] == ELFDATA2MSB;
510 #else
511 # error "Unrecognized __BYTE_ORDER__"
512 #endif
513 }
514 
515 struct btf *btf__parse_elf(const char *path, struct btf_ext **btf_ext)
516 {
517 	Elf_Data *btf_data = NULL, *btf_ext_data = NULL;
518 	int err = 0, fd = -1, idx = 0;
519 	struct btf *btf = NULL;
520 	Elf_Scn *scn = NULL;
521 	Elf *elf = NULL;
522 	GElf_Ehdr ehdr;
523 
524 	if (elf_version(EV_CURRENT) == EV_NONE) {
525 		pr_warn("failed to init libelf for %s\n", path);
526 		return ERR_PTR(-LIBBPF_ERRNO__LIBELF);
527 	}
528 
529 	fd = open(path, O_RDONLY);
530 	if (fd < 0) {
531 		err = -errno;
532 		pr_warn("failed to open %s: %s\n", path, strerror(errno));
533 		return ERR_PTR(err);
534 	}
535 
536 	err = -LIBBPF_ERRNO__FORMAT;
537 
538 	elf = elf_begin(fd, ELF_C_READ, NULL);
539 	if (!elf) {
540 		pr_warn("failed to open %s as ELF file\n", path);
541 		goto done;
542 	}
543 	if (!gelf_getehdr(elf, &ehdr)) {
544 		pr_warn("failed to get EHDR from %s\n", path);
545 		goto done;
546 	}
547 	if (!btf_check_endianness(&ehdr)) {
548 		pr_warn("non-native ELF endianness is not supported\n");
549 		goto done;
550 	}
551 	if (!elf_rawdata(elf_getscn(elf, ehdr.e_shstrndx), NULL)) {
552 		pr_warn("failed to get e_shstrndx from %s\n", path);
553 		goto done;
554 	}
555 
556 	while ((scn = elf_nextscn(elf, scn)) != NULL) {
557 		GElf_Shdr sh;
558 		char *name;
559 
560 		idx++;
561 		if (gelf_getshdr(scn, &sh) != &sh) {
562 			pr_warn("failed to get section(%d) header from %s\n",
563 				idx, path);
564 			goto done;
565 		}
566 		name = elf_strptr(elf, ehdr.e_shstrndx, sh.sh_name);
567 		if (!name) {
568 			pr_warn("failed to get section(%d) name from %s\n",
569 				idx, path);
570 			goto done;
571 		}
572 		if (strcmp(name, BTF_ELF_SEC) == 0) {
573 			btf_data = elf_getdata(scn, 0);
574 			if (!btf_data) {
575 				pr_warn("failed to get section(%d, %s) data from %s\n",
576 					idx, name, path);
577 				goto done;
578 			}
579 			continue;
580 		} else if (btf_ext && strcmp(name, BTF_EXT_ELF_SEC) == 0) {
581 			btf_ext_data = elf_getdata(scn, 0);
582 			if (!btf_ext_data) {
583 				pr_warn("failed to get section(%d, %s) data from %s\n",
584 					idx, name, path);
585 				goto done;
586 			}
587 			continue;
588 		}
589 	}
590 
591 	err = 0;
592 
593 	if (!btf_data) {
594 		err = -ENOENT;
595 		goto done;
596 	}
597 	btf = btf__new(btf_data->d_buf, btf_data->d_size);
598 	if (IS_ERR(btf))
599 		goto done;
600 
601 	switch (gelf_getclass(elf)) {
602 	case ELFCLASS32:
603 		btf__set_pointer_size(btf, 4);
604 		break;
605 	case ELFCLASS64:
606 		btf__set_pointer_size(btf, 8);
607 		break;
608 	default:
609 		pr_warn("failed to get ELF class (bitness) for %s\n", path);
610 		break;
611 	}
612 
613 	if (btf_ext && btf_ext_data) {
614 		*btf_ext = btf_ext__new(btf_ext_data->d_buf,
615 					btf_ext_data->d_size);
616 		if (IS_ERR(*btf_ext))
617 			goto done;
618 	} else if (btf_ext) {
619 		*btf_ext = NULL;
620 	}
621 done:
622 	if (elf)
623 		elf_end(elf);
624 	close(fd);
625 
626 	if (err)
627 		return ERR_PTR(err);
628 	/*
629 	 * btf is always parsed before btf_ext, so no need to clean up
630 	 * btf_ext, if btf loading failed
631 	 */
632 	if (IS_ERR(btf))
633 		return btf;
634 	if (btf_ext && IS_ERR(*btf_ext)) {
635 		btf__free(btf);
636 		err = PTR_ERR(*btf_ext);
637 		return ERR_PTR(err);
638 	}
639 	return btf;
640 }
641 
642 struct btf *btf__parse_raw(const char *path)
643 {
644 	struct btf *btf = NULL;
645 	void *data = NULL;
646 	FILE *f = NULL;
647 	__u16 magic;
648 	int err = 0;
649 	long sz;
650 
651 	f = fopen(path, "rb");
652 	if (!f) {
653 		err = -errno;
654 		goto err_out;
655 	}
656 
657 	/* check BTF magic */
658 	if (fread(&magic, 1, sizeof(magic), f) < sizeof(magic)) {
659 		err = -EIO;
660 		goto err_out;
661 	}
662 	if (magic == __bswap_16(BTF_MAGIC)) {
663 		/* non-native endian raw BTF */
664 		pr_warn("non-native BTF endianness is not supported\n");
665 		err = -LIBBPF_ERRNO__ENDIAN;
666 		goto err_out;
667 	}
668 	if (magic != BTF_MAGIC) {
669 		/* definitely not a raw BTF */
670 		err = -EPROTO;
671 		goto err_out;
672 	}
673 
674 	/* get file size */
675 	if (fseek(f, 0, SEEK_END)) {
676 		err = -errno;
677 		goto err_out;
678 	}
679 	sz = ftell(f);
680 	if (sz < 0) {
681 		err = -errno;
682 		goto err_out;
683 	}
684 	/* rewind to the start */
685 	if (fseek(f, 0, SEEK_SET)) {
686 		err = -errno;
687 		goto err_out;
688 	}
689 
690 	/* pre-alloc memory and read all of BTF data */
691 	data = malloc(sz);
692 	if (!data) {
693 		err = -ENOMEM;
694 		goto err_out;
695 	}
696 	if (fread(data, 1, sz, f) < sz) {
697 		err = -EIO;
698 		goto err_out;
699 	}
700 
701 	/* finally parse BTF data */
702 	btf = btf__new(data, sz);
703 
704 err_out:
705 	free(data);
706 	if (f)
707 		fclose(f);
708 	return err ? ERR_PTR(err) : btf;
709 }
710 
711 struct btf *btf__parse(const char *path, struct btf_ext **btf_ext)
712 {
713 	struct btf *btf;
714 
715 	if (btf_ext)
716 		*btf_ext = NULL;
717 
718 	btf = btf__parse_raw(path);
719 	if (!IS_ERR(btf) || PTR_ERR(btf) != -EPROTO)
720 		return btf;
721 
722 	return btf__parse_elf(path, btf_ext);
723 }
724 
725 static int compare_vsi_off(const void *_a, const void *_b)
726 {
727 	const struct btf_var_secinfo *a = _a;
728 	const struct btf_var_secinfo *b = _b;
729 
730 	return a->offset - b->offset;
731 }
732 
733 static int btf_fixup_datasec(struct bpf_object *obj, struct btf *btf,
734 			     struct btf_type *t)
735 {
736 	__u32 size = 0, off = 0, i, vars = btf_vlen(t);
737 	const char *name = btf__name_by_offset(btf, t->name_off);
738 	const struct btf_type *t_var;
739 	struct btf_var_secinfo *vsi;
740 	const struct btf_var *var;
741 	int ret;
742 
743 	if (!name) {
744 		pr_debug("No name found in string section for DATASEC kind.\n");
745 		return -ENOENT;
746 	}
747 
748 	/* .extern datasec size and var offsets were set correctly during
749 	 * extern collection step, so just skip straight to sorting variables
750 	 */
751 	if (t->size)
752 		goto sort_vars;
753 
754 	ret = bpf_object__section_size(obj, name, &size);
755 	if (ret || !size || (t->size && t->size != size)) {
756 		pr_debug("Invalid size for section %s: %u bytes\n", name, size);
757 		return -ENOENT;
758 	}
759 
760 	t->size = size;
761 
762 	for (i = 0, vsi = btf_var_secinfos(t); i < vars; i++, vsi++) {
763 		t_var = btf__type_by_id(btf, vsi->type);
764 		var = btf_var(t_var);
765 
766 		if (!btf_is_var(t_var)) {
767 			pr_debug("Non-VAR type seen in section %s\n", name);
768 			return -EINVAL;
769 		}
770 
771 		if (var->linkage == BTF_VAR_STATIC)
772 			continue;
773 
774 		name = btf__name_by_offset(btf, t_var->name_off);
775 		if (!name) {
776 			pr_debug("No name found in string section for VAR kind\n");
777 			return -ENOENT;
778 		}
779 
780 		ret = bpf_object__variable_offset(obj, name, &off);
781 		if (ret) {
782 			pr_debug("No offset found in symbol table for VAR %s\n",
783 				 name);
784 			return -ENOENT;
785 		}
786 
787 		vsi->offset = off;
788 	}
789 
790 sort_vars:
791 	qsort(btf_var_secinfos(t), vars, sizeof(*vsi), compare_vsi_off);
792 	return 0;
793 }
794 
795 int btf__finalize_data(struct bpf_object *obj, struct btf *btf)
796 {
797 	int err = 0;
798 	__u32 i;
799 
800 	for (i = 1; i <= btf->nr_types; i++) {
801 		struct btf_type *t = btf->types[i];
802 
803 		/* Loader needs to fix up some of the things compiler
804 		 * couldn't get its hands on while emitting BTF. This
805 		 * is section size and global variable offset. We use
806 		 * the info from the ELF itself for this purpose.
807 		 */
808 		if (btf_is_datasec(t)) {
809 			err = btf_fixup_datasec(obj, btf, t);
810 			if (err)
811 				break;
812 		}
813 	}
814 
815 	return err;
816 }
817 
818 int btf__load(struct btf *btf)
819 {
820 	__u32 log_buf_size = 0;
821 	char *log_buf = NULL;
822 	int err = 0;
823 
824 	if (btf->fd >= 0)
825 		return -EEXIST;
826 
827 retry_load:
828 	if (log_buf_size) {
829 		log_buf = malloc(log_buf_size);
830 		if (!log_buf)
831 			return -ENOMEM;
832 
833 		*log_buf = 0;
834 	}
835 
836 	btf->fd = bpf_load_btf(btf->data, btf->data_size,
837 			       log_buf, log_buf_size, false);
838 	if (btf->fd < 0) {
839 		if (!log_buf || errno == ENOSPC) {
840 			log_buf_size = max((__u32)BPF_LOG_BUF_SIZE,
841 					   log_buf_size << 1);
842 			free(log_buf);
843 			goto retry_load;
844 		}
845 
846 		err = -errno;
847 		pr_warn("Error loading BTF: %s(%d)\n", strerror(errno), errno);
848 		if (*log_buf)
849 			pr_warn("%s\n", log_buf);
850 		goto done;
851 	}
852 
853 done:
854 	free(log_buf);
855 	return err;
856 }
857 
858 int btf__fd(const struct btf *btf)
859 {
860 	return btf->fd;
861 }
862 
863 void btf__set_fd(struct btf *btf, int fd)
864 {
865 	btf->fd = fd;
866 }
867 
868 const void *btf__get_raw_data(const struct btf *btf, __u32 *size)
869 {
870 	*size = btf->data_size;
871 	return btf->data;
872 }
873 
874 const char *btf__name_by_offset(const struct btf *btf, __u32 offset)
875 {
876 	if (offset < btf->hdr->str_len)
877 		return &btf->strings[offset];
878 	else
879 		return NULL;
880 }
881 
882 int btf__get_from_id(__u32 id, struct btf **btf)
883 {
884 	struct bpf_btf_info btf_info = { 0 };
885 	__u32 len = sizeof(btf_info);
886 	__u32 last_size;
887 	int btf_fd;
888 	void *ptr;
889 	int err;
890 
891 	err = 0;
892 	*btf = NULL;
893 	btf_fd = bpf_btf_get_fd_by_id(id);
894 	if (btf_fd < 0)
895 		return 0;
896 
897 	/* we won't know btf_size until we call bpf_obj_get_info_by_fd(). so
898 	 * let's start with a sane default - 4KiB here - and resize it only if
899 	 * bpf_obj_get_info_by_fd() needs a bigger buffer.
900 	 */
901 	btf_info.btf_size = 4096;
902 	last_size = btf_info.btf_size;
903 	ptr = malloc(last_size);
904 	if (!ptr) {
905 		err = -ENOMEM;
906 		goto exit_free;
907 	}
908 
909 	memset(ptr, 0, last_size);
910 	btf_info.btf = ptr_to_u64(ptr);
911 	err = bpf_obj_get_info_by_fd(btf_fd, &btf_info, &len);
912 
913 	if (!err && btf_info.btf_size > last_size) {
914 		void *temp_ptr;
915 
916 		last_size = btf_info.btf_size;
917 		temp_ptr = realloc(ptr, last_size);
918 		if (!temp_ptr) {
919 			err = -ENOMEM;
920 			goto exit_free;
921 		}
922 		ptr = temp_ptr;
923 		memset(ptr, 0, last_size);
924 		btf_info.btf = ptr_to_u64(ptr);
925 		err = bpf_obj_get_info_by_fd(btf_fd, &btf_info, &len);
926 	}
927 
928 	if (err || btf_info.btf_size > last_size) {
929 		err = errno;
930 		goto exit_free;
931 	}
932 
933 	*btf = btf__new((__u8 *)(long)btf_info.btf, btf_info.btf_size);
934 	if (IS_ERR(*btf)) {
935 		err = PTR_ERR(*btf);
936 		*btf = NULL;
937 	}
938 
939 exit_free:
940 	close(btf_fd);
941 	free(ptr);
942 
943 	return err;
944 }
945 
946 int btf__get_map_kv_tids(const struct btf *btf, const char *map_name,
947 			 __u32 expected_key_size, __u32 expected_value_size,
948 			 __u32 *key_type_id, __u32 *value_type_id)
949 {
950 	const struct btf_type *container_type;
951 	const struct btf_member *key, *value;
952 	const size_t max_name = 256;
953 	char container_name[max_name];
954 	__s64 key_size, value_size;
955 	__s32 container_id;
956 
957 	if (snprintf(container_name, max_name, "____btf_map_%s", map_name) ==
958 	    max_name) {
959 		pr_warn("map:%s length of '____btf_map_%s' is too long\n",
960 			map_name, map_name);
961 		return -EINVAL;
962 	}
963 
964 	container_id = btf__find_by_name(btf, container_name);
965 	if (container_id < 0) {
966 		pr_debug("map:%s container_name:%s cannot be found in BTF. Missing BPF_ANNOTATE_KV_PAIR?\n",
967 			 map_name, container_name);
968 		return container_id;
969 	}
970 
971 	container_type = btf__type_by_id(btf, container_id);
972 	if (!container_type) {
973 		pr_warn("map:%s cannot find BTF type for container_id:%u\n",
974 			map_name, container_id);
975 		return -EINVAL;
976 	}
977 
978 	if (!btf_is_struct(container_type) || btf_vlen(container_type) < 2) {
979 		pr_warn("map:%s container_name:%s is an invalid container struct\n",
980 			map_name, container_name);
981 		return -EINVAL;
982 	}
983 
984 	key = btf_members(container_type);
985 	value = key + 1;
986 
987 	key_size = btf__resolve_size(btf, key->type);
988 	if (key_size < 0) {
989 		pr_warn("map:%s invalid BTF key_type_size\n", map_name);
990 		return key_size;
991 	}
992 
993 	if (expected_key_size != key_size) {
994 		pr_warn("map:%s btf_key_type_size:%u != map_def_key_size:%u\n",
995 			map_name, (__u32)key_size, expected_key_size);
996 		return -EINVAL;
997 	}
998 
999 	value_size = btf__resolve_size(btf, value->type);
1000 	if (value_size < 0) {
1001 		pr_warn("map:%s invalid BTF value_type_size\n", map_name);
1002 		return value_size;
1003 	}
1004 
1005 	if (expected_value_size != value_size) {
1006 		pr_warn("map:%s btf_value_type_size:%u != map_def_value_size:%u\n",
1007 			map_name, (__u32)value_size, expected_value_size);
1008 		return -EINVAL;
1009 	}
1010 
1011 	*key_type_id = key->type;
1012 	*value_type_id = value->type;
1013 
1014 	return 0;
1015 }
1016 
1017 struct btf_ext_sec_setup_param {
1018 	__u32 off;
1019 	__u32 len;
1020 	__u32 min_rec_size;
1021 	struct btf_ext_info *ext_info;
1022 	const char *desc;
1023 };
1024 
1025 static int btf_ext_setup_info(struct btf_ext *btf_ext,
1026 			      struct btf_ext_sec_setup_param *ext_sec)
1027 {
1028 	const struct btf_ext_info_sec *sinfo;
1029 	struct btf_ext_info *ext_info;
1030 	__u32 info_left, record_size;
1031 	/* The start of the info sec (including the __u32 record_size). */
1032 	void *info;
1033 
1034 	if (ext_sec->len == 0)
1035 		return 0;
1036 
1037 	if (ext_sec->off & 0x03) {
1038 		pr_debug(".BTF.ext %s section is not aligned to 4 bytes\n",
1039 		     ext_sec->desc);
1040 		return -EINVAL;
1041 	}
1042 
1043 	info = btf_ext->data + btf_ext->hdr->hdr_len + ext_sec->off;
1044 	info_left = ext_sec->len;
1045 
1046 	if (btf_ext->data + btf_ext->data_size < info + ext_sec->len) {
1047 		pr_debug("%s section (off:%u len:%u) is beyond the end of the ELF section .BTF.ext\n",
1048 			 ext_sec->desc, ext_sec->off, ext_sec->len);
1049 		return -EINVAL;
1050 	}
1051 
1052 	/* At least a record size */
1053 	if (info_left < sizeof(__u32)) {
1054 		pr_debug(".BTF.ext %s record size not found\n", ext_sec->desc);
1055 		return -EINVAL;
1056 	}
1057 
1058 	/* The record size needs to meet the minimum standard */
1059 	record_size = *(__u32 *)info;
1060 	if (record_size < ext_sec->min_rec_size ||
1061 	    record_size & 0x03) {
1062 		pr_debug("%s section in .BTF.ext has invalid record size %u\n",
1063 			 ext_sec->desc, record_size);
1064 		return -EINVAL;
1065 	}
1066 
1067 	sinfo = info + sizeof(__u32);
1068 	info_left -= sizeof(__u32);
1069 
1070 	/* If no records, return failure now so .BTF.ext won't be used. */
1071 	if (!info_left) {
1072 		pr_debug("%s section in .BTF.ext has no records", ext_sec->desc);
1073 		return -EINVAL;
1074 	}
1075 
1076 	while (info_left) {
1077 		unsigned int sec_hdrlen = sizeof(struct btf_ext_info_sec);
1078 		__u64 total_record_size;
1079 		__u32 num_records;
1080 
1081 		if (info_left < sec_hdrlen) {
1082 			pr_debug("%s section header is not found in .BTF.ext\n",
1083 			     ext_sec->desc);
1084 			return -EINVAL;
1085 		}
1086 
1087 		num_records = sinfo->num_info;
1088 		if (num_records == 0) {
1089 			pr_debug("%s section has incorrect num_records in .BTF.ext\n",
1090 			     ext_sec->desc);
1091 			return -EINVAL;
1092 		}
1093 
1094 		total_record_size = sec_hdrlen +
1095 				    (__u64)num_records * record_size;
1096 		if (info_left < total_record_size) {
1097 			pr_debug("%s section has incorrect num_records in .BTF.ext\n",
1098 			     ext_sec->desc);
1099 			return -EINVAL;
1100 		}
1101 
1102 		info_left -= total_record_size;
1103 		sinfo = (void *)sinfo + total_record_size;
1104 	}
1105 
1106 	ext_info = ext_sec->ext_info;
1107 	ext_info->len = ext_sec->len - sizeof(__u32);
1108 	ext_info->rec_size = record_size;
1109 	ext_info->info = info + sizeof(__u32);
1110 
1111 	return 0;
1112 }
1113 
1114 static int btf_ext_setup_func_info(struct btf_ext *btf_ext)
1115 {
1116 	struct btf_ext_sec_setup_param param = {
1117 		.off = btf_ext->hdr->func_info_off,
1118 		.len = btf_ext->hdr->func_info_len,
1119 		.min_rec_size = sizeof(struct bpf_func_info_min),
1120 		.ext_info = &btf_ext->func_info,
1121 		.desc = "func_info"
1122 	};
1123 
1124 	return btf_ext_setup_info(btf_ext, &param);
1125 }
1126 
1127 static int btf_ext_setup_line_info(struct btf_ext *btf_ext)
1128 {
1129 	struct btf_ext_sec_setup_param param = {
1130 		.off = btf_ext->hdr->line_info_off,
1131 		.len = btf_ext->hdr->line_info_len,
1132 		.min_rec_size = sizeof(struct bpf_line_info_min),
1133 		.ext_info = &btf_ext->line_info,
1134 		.desc = "line_info",
1135 	};
1136 
1137 	return btf_ext_setup_info(btf_ext, &param);
1138 }
1139 
1140 static int btf_ext_setup_field_reloc(struct btf_ext *btf_ext)
1141 {
1142 	struct btf_ext_sec_setup_param param = {
1143 		.off = btf_ext->hdr->field_reloc_off,
1144 		.len = btf_ext->hdr->field_reloc_len,
1145 		.min_rec_size = sizeof(struct bpf_field_reloc),
1146 		.ext_info = &btf_ext->field_reloc_info,
1147 		.desc = "field_reloc",
1148 	};
1149 
1150 	return btf_ext_setup_info(btf_ext, &param);
1151 }
1152 
1153 static int btf_ext_parse_hdr(__u8 *data, __u32 data_size)
1154 {
1155 	const struct btf_ext_header *hdr = (struct btf_ext_header *)data;
1156 
1157 	if (data_size < offsetofend(struct btf_ext_header, hdr_len) ||
1158 	    data_size < hdr->hdr_len) {
1159 		pr_debug("BTF.ext header not found");
1160 		return -EINVAL;
1161 	}
1162 
1163 	if (hdr->magic != BTF_MAGIC) {
1164 		pr_debug("Invalid BTF.ext magic:%x\n", hdr->magic);
1165 		return -EINVAL;
1166 	}
1167 
1168 	if (hdr->version != BTF_VERSION) {
1169 		pr_debug("Unsupported BTF.ext version:%u\n", hdr->version);
1170 		return -ENOTSUP;
1171 	}
1172 
1173 	if (hdr->flags) {
1174 		pr_debug("Unsupported BTF.ext flags:%x\n", hdr->flags);
1175 		return -ENOTSUP;
1176 	}
1177 
1178 	if (data_size == hdr->hdr_len) {
1179 		pr_debug("BTF.ext has no data\n");
1180 		return -EINVAL;
1181 	}
1182 
1183 	return 0;
1184 }
1185 
1186 void btf_ext__free(struct btf_ext *btf_ext)
1187 {
1188 	if (IS_ERR_OR_NULL(btf_ext))
1189 		return;
1190 	free(btf_ext->data);
1191 	free(btf_ext);
1192 }
1193 
1194 struct btf_ext *btf_ext__new(__u8 *data, __u32 size)
1195 {
1196 	struct btf_ext *btf_ext;
1197 	int err;
1198 
1199 	err = btf_ext_parse_hdr(data, size);
1200 	if (err)
1201 		return ERR_PTR(err);
1202 
1203 	btf_ext = calloc(1, sizeof(struct btf_ext));
1204 	if (!btf_ext)
1205 		return ERR_PTR(-ENOMEM);
1206 
1207 	btf_ext->data_size = size;
1208 	btf_ext->data = malloc(size);
1209 	if (!btf_ext->data) {
1210 		err = -ENOMEM;
1211 		goto done;
1212 	}
1213 	memcpy(btf_ext->data, data, size);
1214 
1215 	if (btf_ext->hdr->hdr_len <
1216 	    offsetofend(struct btf_ext_header, line_info_len))
1217 		goto done;
1218 	err = btf_ext_setup_func_info(btf_ext);
1219 	if (err)
1220 		goto done;
1221 
1222 	err = btf_ext_setup_line_info(btf_ext);
1223 	if (err)
1224 		goto done;
1225 
1226 	if (btf_ext->hdr->hdr_len <
1227 	    offsetofend(struct btf_ext_header, field_reloc_len))
1228 		goto done;
1229 	err = btf_ext_setup_field_reloc(btf_ext);
1230 	if (err)
1231 		goto done;
1232 
1233 done:
1234 	if (err) {
1235 		btf_ext__free(btf_ext);
1236 		return ERR_PTR(err);
1237 	}
1238 
1239 	return btf_ext;
1240 }
1241 
1242 const void *btf_ext__get_raw_data(const struct btf_ext *btf_ext, __u32 *size)
1243 {
1244 	*size = btf_ext->data_size;
1245 	return btf_ext->data;
1246 }
1247 
1248 static int btf_ext_reloc_info(const struct btf *btf,
1249 			      const struct btf_ext_info *ext_info,
1250 			      const char *sec_name, __u32 insns_cnt,
1251 			      void **info, __u32 *cnt)
1252 {
1253 	__u32 sec_hdrlen = sizeof(struct btf_ext_info_sec);
1254 	__u32 i, record_size, existing_len, records_len;
1255 	struct btf_ext_info_sec *sinfo;
1256 	const char *info_sec_name;
1257 	__u64 remain_len;
1258 	void *data;
1259 
1260 	record_size = ext_info->rec_size;
1261 	sinfo = ext_info->info;
1262 	remain_len = ext_info->len;
1263 	while (remain_len > 0) {
1264 		records_len = sinfo->num_info * record_size;
1265 		info_sec_name = btf__name_by_offset(btf, sinfo->sec_name_off);
1266 		if (strcmp(info_sec_name, sec_name)) {
1267 			remain_len -= sec_hdrlen + records_len;
1268 			sinfo = (void *)sinfo + sec_hdrlen + records_len;
1269 			continue;
1270 		}
1271 
1272 		existing_len = (*cnt) * record_size;
1273 		data = realloc(*info, existing_len + records_len);
1274 		if (!data)
1275 			return -ENOMEM;
1276 
1277 		memcpy(data + existing_len, sinfo->data, records_len);
1278 		/* adjust insn_off only, the rest data will be passed
1279 		 * to the kernel.
1280 		 */
1281 		for (i = 0; i < sinfo->num_info; i++) {
1282 			__u32 *insn_off;
1283 
1284 			insn_off = data + existing_len + (i * record_size);
1285 			*insn_off = *insn_off / sizeof(struct bpf_insn) +
1286 				insns_cnt;
1287 		}
1288 		*info = data;
1289 		*cnt += sinfo->num_info;
1290 		return 0;
1291 	}
1292 
1293 	return -ENOENT;
1294 }
1295 
1296 int btf_ext__reloc_func_info(const struct btf *btf,
1297 			     const struct btf_ext *btf_ext,
1298 			     const char *sec_name, __u32 insns_cnt,
1299 			     void **func_info, __u32 *cnt)
1300 {
1301 	return btf_ext_reloc_info(btf, &btf_ext->func_info, sec_name,
1302 				  insns_cnt, func_info, cnt);
1303 }
1304 
1305 int btf_ext__reloc_line_info(const struct btf *btf,
1306 			     const struct btf_ext *btf_ext,
1307 			     const char *sec_name, __u32 insns_cnt,
1308 			     void **line_info, __u32 *cnt)
1309 {
1310 	return btf_ext_reloc_info(btf, &btf_ext->line_info, sec_name,
1311 				  insns_cnt, line_info, cnt);
1312 }
1313 
1314 __u32 btf_ext__func_info_rec_size(const struct btf_ext *btf_ext)
1315 {
1316 	return btf_ext->func_info.rec_size;
1317 }
1318 
1319 __u32 btf_ext__line_info_rec_size(const struct btf_ext *btf_ext)
1320 {
1321 	return btf_ext->line_info.rec_size;
1322 }
1323 
1324 struct btf_dedup;
1325 
1326 static struct btf_dedup *btf_dedup_new(struct btf *btf, struct btf_ext *btf_ext,
1327 				       const struct btf_dedup_opts *opts);
1328 static void btf_dedup_free(struct btf_dedup *d);
1329 static int btf_dedup_strings(struct btf_dedup *d);
1330 static int btf_dedup_prim_types(struct btf_dedup *d);
1331 static int btf_dedup_struct_types(struct btf_dedup *d);
1332 static int btf_dedup_ref_types(struct btf_dedup *d);
1333 static int btf_dedup_compact_types(struct btf_dedup *d);
1334 static int btf_dedup_remap_types(struct btf_dedup *d);
1335 
1336 /*
1337  * Deduplicate BTF types and strings.
1338  *
1339  * BTF dedup algorithm takes as an input `struct btf` representing `.BTF` ELF
1340  * section with all BTF type descriptors and string data. It overwrites that
1341  * memory in-place with deduplicated types and strings without any loss of
1342  * information. If optional `struct btf_ext` representing '.BTF.ext' ELF section
1343  * is provided, all the strings referenced from .BTF.ext section are honored
1344  * and updated to point to the right offsets after deduplication.
1345  *
1346  * If function returns with error, type/string data might be garbled and should
1347  * be discarded.
1348  *
1349  * More verbose and detailed description of both problem btf_dedup is solving,
1350  * as well as solution could be found at:
1351  * https://facebookmicrosites.github.io/bpf/blog/2018/11/14/btf-enhancement.html
1352  *
1353  * Problem description and justification
1354  * =====================================
1355  *
1356  * BTF type information is typically emitted either as a result of conversion
1357  * from DWARF to BTF or directly by compiler. In both cases, each compilation
1358  * unit contains information about a subset of all the types that are used
1359  * in an application. These subsets are frequently overlapping and contain a lot
1360  * of duplicated information when later concatenated together into a single
1361  * binary. This algorithm ensures that each unique type is represented by single
1362  * BTF type descriptor, greatly reducing resulting size of BTF data.
1363  *
1364  * Compilation unit isolation and subsequent duplication of data is not the only
1365  * problem. The same type hierarchy (e.g., struct and all the type that struct
1366  * references) in different compilation units can be represented in BTF to
1367  * various degrees of completeness (or, rather, incompleteness) due to
1368  * struct/union forward declarations.
1369  *
1370  * Let's take a look at an example, that we'll use to better understand the
1371  * problem (and solution). Suppose we have two compilation units, each using
1372  * same `struct S`, but each of them having incomplete type information about
1373  * struct's fields:
1374  *
1375  * // CU #1:
1376  * struct S;
1377  * struct A {
1378  *	int a;
1379  *	struct A* self;
1380  *	struct S* parent;
1381  * };
1382  * struct B;
1383  * struct S {
1384  *	struct A* a_ptr;
1385  *	struct B* b_ptr;
1386  * };
1387  *
1388  * // CU #2:
1389  * struct S;
1390  * struct A;
1391  * struct B {
1392  *	int b;
1393  *	struct B* self;
1394  *	struct S* parent;
1395  * };
1396  * struct S {
1397  *	struct A* a_ptr;
1398  *	struct B* b_ptr;
1399  * };
1400  *
1401  * In case of CU #1, BTF data will know only that `struct B` exist (but no
1402  * more), but will know the complete type information about `struct A`. While
1403  * for CU #2, it will know full type information about `struct B`, but will
1404  * only know about forward declaration of `struct A` (in BTF terms, it will
1405  * have `BTF_KIND_FWD` type descriptor with name `B`).
1406  *
1407  * This compilation unit isolation means that it's possible that there is no
1408  * single CU with complete type information describing structs `S`, `A`, and
1409  * `B`. Also, we might get tons of duplicated and redundant type information.
1410  *
1411  * Additional complication we need to keep in mind comes from the fact that
1412  * types, in general, can form graphs containing cycles, not just DAGs.
1413  *
1414  * While algorithm does deduplication, it also merges and resolves type
1415  * information (unless disabled throught `struct btf_opts`), whenever possible.
1416  * E.g., in the example above with two compilation units having partial type
1417  * information for structs `A` and `B`, the output of algorithm will emit
1418  * a single copy of each BTF type that describes structs `A`, `B`, and `S`
1419  * (as well as type information for `int` and pointers), as if they were defined
1420  * in a single compilation unit as:
1421  *
1422  * struct A {
1423  *	int a;
1424  *	struct A* self;
1425  *	struct S* parent;
1426  * };
1427  * struct B {
1428  *	int b;
1429  *	struct B* self;
1430  *	struct S* parent;
1431  * };
1432  * struct S {
1433  *	struct A* a_ptr;
1434  *	struct B* b_ptr;
1435  * };
1436  *
1437  * Algorithm summary
1438  * =================
1439  *
1440  * Algorithm completes its work in 6 separate passes:
1441  *
1442  * 1. Strings deduplication.
1443  * 2. Primitive types deduplication (int, enum, fwd).
1444  * 3. Struct/union types deduplication.
1445  * 4. Reference types deduplication (pointers, typedefs, arrays, funcs, func
1446  *    protos, and const/volatile/restrict modifiers).
1447  * 5. Types compaction.
1448  * 6. Types remapping.
1449  *
1450  * Algorithm determines canonical type descriptor, which is a single
1451  * representative type for each truly unique type. This canonical type is the
1452  * one that will go into final deduplicated BTF type information. For
1453  * struct/unions, it is also the type that algorithm will merge additional type
1454  * information into (while resolving FWDs), as it discovers it from data in
1455  * other CUs. Each input BTF type eventually gets either mapped to itself, if
1456  * that type is canonical, or to some other type, if that type is equivalent
1457  * and was chosen as canonical representative. This mapping is stored in
1458  * `btf_dedup->map` array. This map is also used to record STRUCT/UNION that
1459  * FWD type got resolved to.
1460  *
1461  * To facilitate fast discovery of canonical types, we also maintain canonical
1462  * index (`btf_dedup->dedup_table`), which maps type descriptor's signature hash
1463  * (i.e., hashed kind, name, size, fields, etc) into a list of canonical types
1464  * that match that signature. With sufficiently good choice of type signature
1465  * hashing function, we can limit number of canonical types for each unique type
1466  * signature to a very small number, allowing to find canonical type for any
1467  * duplicated type very quickly.
1468  *
1469  * Struct/union deduplication is the most critical part and algorithm for
1470  * deduplicating structs/unions is described in greater details in comments for
1471  * `btf_dedup_is_equiv` function.
1472  */
1473 int btf__dedup(struct btf *btf, struct btf_ext *btf_ext,
1474 	       const struct btf_dedup_opts *opts)
1475 {
1476 	struct btf_dedup *d = btf_dedup_new(btf, btf_ext, opts);
1477 	int err;
1478 
1479 	if (IS_ERR(d)) {
1480 		pr_debug("btf_dedup_new failed: %ld", PTR_ERR(d));
1481 		return -EINVAL;
1482 	}
1483 
1484 	err = btf_dedup_strings(d);
1485 	if (err < 0) {
1486 		pr_debug("btf_dedup_strings failed:%d\n", err);
1487 		goto done;
1488 	}
1489 	err = btf_dedup_prim_types(d);
1490 	if (err < 0) {
1491 		pr_debug("btf_dedup_prim_types failed:%d\n", err);
1492 		goto done;
1493 	}
1494 	err = btf_dedup_struct_types(d);
1495 	if (err < 0) {
1496 		pr_debug("btf_dedup_struct_types failed:%d\n", err);
1497 		goto done;
1498 	}
1499 	err = btf_dedup_ref_types(d);
1500 	if (err < 0) {
1501 		pr_debug("btf_dedup_ref_types failed:%d\n", err);
1502 		goto done;
1503 	}
1504 	err = btf_dedup_compact_types(d);
1505 	if (err < 0) {
1506 		pr_debug("btf_dedup_compact_types failed:%d\n", err);
1507 		goto done;
1508 	}
1509 	err = btf_dedup_remap_types(d);
1510 	if (err < 0) {
1511 		pr_debug("btf_dedup_remap_types failed:%d\n", err);
1512 		goto done;
1513 	}
1514 
1515 done:
1516 	btf_dedup_free(d);
1517 	return err;
1518 }
1519 
1520 #define BTF_UNPROCESSED_ID ((__u32)-1)
1521 #define BTF_IN_PROGRESS_ID ((__u32)-2)
1522 
1523 struct btf_dedup {
1524 	/* .BTF section to be deduped in-place */
1525 	struct btf *btf;
1526 	/*
1527 	 * Optional .BTF.ext section. When provided, any strings referenced
1528 	 * from it will be taken into account when deduping strings
1529 	 */
1530 	struct btf_ext *btf_ext;
1531 	/*
1532 	 * This is a map from any type's signature hash to a list of possible
1533 	 * canonical representative type candidates. Hash collisions are
1534 	 * ignored, so even types of various kinds can share same list of
1535 	 * candidates, which is fine because we rely on subsequent
1536 	 * btf_xxx_equal() checks to authoritatively verify type equality.
1537 	 */
1538 	struct hashmap *dedup_table;
1539 	/* Canonical types map */
1540 	__u32 *map;
1541 	/* Hypothetical mapping, used during type graph equivalence checks */
1542 	__u32 *hypot_map;
1543 	__u32 *hypot_list;
1544 	size_t hypot_cnt;
1545 	size_t hypot_cap;
1546 	/* Various option modifying behavior of algorithm */
1547 	struct btf_dedup_opts opts;
1548 };
1549 
1550 struct btf_str_ptr {
1551 	const char *str;
1552 	__u32 new_off;
1553 	bool used;
1554 };
1555 
1556 struct btf_str_ptrs {
1557 	struct btf_str_ptr *ptrs;
1558 	const char *data;
1559 	__u32 cnt;
1560 	__u32 cap;
1561 };
1562 
1563 static long hash_combine(long h, long value)
1564 {
1565 	return h * 31 + value;
1566 }
1567 
1568 #define for_each_dedup_cand(d, node, hash) \
1569 	hashmap__for_each_key_entry(d->dedup_table, node, (void *)hash)
1570 
1571 static int btf_dedup_table_add(struct btf_dedup *d, long hash, __u32 type_id)
1572 {
1573 	return hashmap__append(d->dedup_table,
1574 			       (void *)hash, (void *)(long)type_id);
1575 }
1576 
1577 static int btf_dedup_hypot_map_add(struct btf_dedup *d,
1578 				   __u32 from_id, __u32 to_id)
1579 {
1580 	if (d->hypot_cnt == d->hypot_cap) {
1581 		__u32 *new_list;
1582 
1583 		d->hypot_cap += max((size_t)16, d->hypot_cap / 2);
1584 		new_list = realloc(d->hypot_list, sizeof(__u32) * d->hypot_cap);
1585 		if (!new_list)
1586 			return -ENOMEM;
1587 		d->hypot_list = new_list;
1588 	}
1589 	d->hypot_list[d->hypot_cnt++] = from_id;
1590 	d->hypot_map[from_id] = to_id;
1591 	return 0;
1592 }
1593 
1594 static void btf_dedup_clear_hypot_map(struct btf_dedup *d)
1595 {
1596 	int i;
1597 
1598 	for (i = 0; i < d->hypot_cnt; i++)
1599 		d->hypot_map[d->hypot_list[i]] = BTF_UNPROCESSED_ID;
1600 	d->hypot_cnt = 0;
1601 }
1602 
1603 static void btf_dedup_free(struct btf_dedup *d)
1604 {
1605 	hashmap__free(d->dedup_table);
1606 	d->dedup_table = NULL;
1607 
1608 	free(d->map);
1609 	d->map = NULL;
1610 
1611 	free(d->hypot_map);
1612 	d->hypot_map = NULL;
1613 
1614 	free(d->hypot_list);
1615 	d->hypot_list = NULL;
1616 
1617 	free(d);
1618 }
1619 
1620 static size_t btf_dedup_identity_hash_fn(const void *key, void *ctx)
1621 {
1622 	return (size_t)key;
1623 }
1624 
1625 static size_t btf_dedup_collision_hash_fn(const void *key, void *ctx)
1626 {
1627 	return 0;
1628 }
1629 
1630 static bool btf_dedup_equal_fn(const void *k1, const void *k2, void *ctx)
1631 {
1632 	return k1 == k2;
1633 }
1634 
1635 static struct btf_dedup *btf_dedup_new(struct btf *btf, struct btf_ext *btf_ext,
1636 				       const struct btf_dedup_opts *opts)
1637 {
1638 	struct btf_dedup *d = calloc(1, sizeof(struct btf_dedup));
1639 	hashmap_hash_fn hash_fn = btf_dedup_identity_hash_fn;
1640 	int i, err = 0;
1641 
1642 	if (!d)
1643 		return ERR_PTR(-ENOMEM);
1644 
1645 	d->opts.dont_resolve_fwds = opts && opts->dont_resolve_fwds;
1646 	/* dedup_table_size is now used only to force collisions in tests */
1647 	if (opts && opts->dedup_table_size == 1)
1648 		hash_fn = btf_dedup_collision_hash_fn;
1649 
1650 	d->btf = btf;
1651 	d->btf_ext = btf_ext;
1652 
1653 	d->dedup_table = hashmap__new(hash_fn, btf_dedup_equal_fn, NULL);
1654 	if (IS_ERR(d->dedup_table)) {
1655 		err = PTR_ERR(d->dedup_table);
1656 		d->dedup_table = NULL;
1657 		goto done;
1658 	}
1659 
1660 	d->map = malloc(sizeof(__u32) * (1 + btf->nr_types));
1661 	if (!d->map) {
1662 		err = -ENOMEM;
1663 		goto done;
1664 	}
1665 	/* special BTF "void" type is made canonical immediately */
1666 	d->map[0] = 0;
1667 	for (i = 1; i <= btf->nr_types; i++) {
1668 		struct btf_type *t = d->btf->types[i];
1669 
1670 		/* VAR and DATASEC are never deduped and are self-canonical */
1671 		if (btf_is_var(t) || btf_is_datasec(t))
1672 			d->map[i] = i;
1673 		else
1674 			d->map[i] = BTF_UNPROCESSED_ID;
1675 	}
1676 
1677 	d->hypot_map = malloc(sizeof(__u32) * (1 + btf->nr_types));
1678 	if (!d->hypot_map) {
1679 		err = -ENOMEM;
1680 		goto done;
1681 	}
1682 	for (i = 0; i <= btf->nr_types; i++)
1683 		d->hypot_map[i] = BTF_UNPROCESSED_ID;
1684 
1685 done:
1686 	if (err) {
1687 		btf_dedup_free(d);
1688 		return ERR_PTR(err);
1689 	}
1690 
1691 	return d;
1692 }
1693 
1694 typedef int (*str_off_fn_t)(__u32 *str_off_ptr, void *ctx);
1695 
1696 /*
1697  * Iterate over all possible places in .BTF and .BTF.ext that can reference
1698  * string and pass pointer to it to a provided callback `fn`.
1699  */
1700 static int btf_for_each_str_off(struct btf_dedup *d, str_off_fn_t fn, void *ctx)
1701 {
1702 	void *line_data_cur, *line_data_end;
1703 	int i, j, r, rec_size;
1704 	struct btf_type *t;
1705 
1706 	for (i = 1; i <= d->btf->nr_types; i++) {
1707 		t = d->btf->types[i];
1708 		r = fn(&t->name_off, ctx);
1709 		if (r)
1710 			return r;
1711 
1712 		switch (btf_kind(t)) {
1713 		case BTF_KIND_STRUCT:
1714 		case BTF_KIND_UNION: {
1715 			struct btf_member *m = btf_members(t);
1716 			__u16 vlen = btf_vlen(t);
1717 
1718 			for (j = 0; j < vlen; j++) {
1719 				r = fn(&m->name_off, ctx);
1720 				if (r)
1721 					return r;
1722 				m++;
1723 			}
1724 			break;
1725 		}
1726 		case BTF_KIND_ENUM: {
1727 			struct btf_enum *m = btf_enum(t);
1728 			__u16 vlen = btf_vlen(t);
1729 
1730 			for (j = 0; j < vlen; j++) {
1731 				r = fn(&m->name_off, ctx);
1732 				if (r)
1733 					return r;
1734 				m++;
1735 			}
1736 			break;
1737 		}
1738 		case BTF_KIND_FUNC_PROTO: {
1739 			struct btf_param *m = btf_params(t);
1740 			__u16 vlen = btf_vlen(t);
1741 
1742 			for (j = 0; j < vlen; j++) {
1743 				r = fn(&m->name_off, ctx);
1744 				if (r)
1745 					return r;
1746 				m++;
1747 			}
1748 			break;
1749 		}
1750 		default:
1751 			break;
1752 		}
1753 	}
1754 
1755 	if (!d->btf_ext)
1756 		return 0;
1757 
1758 	line_data_cur = d->btf_ext->line_info.info;
1759 	line_data_end = d->btf_ext->line_info.info + d->btf_ext->line_info.len;
1760 	rec_size = d->btf_ext->line_info.rec_size;
1761 
1762 	while (line_data_cur < line_data_end) {
1763 		struct btf_ext_info_sec *sec = line_data_cur;
1764 		struct bpf_line_info_min *line_info;
1765 		__u32 num_info = sec->num_info;
1766 
1767 		r = fn(&sec->sec_name_off, ctx);
1768 		if (r)
1769 			return r;
1770 
1771 		line_data_cur += sizeof(struct btf_ext_info_sec);
1772 		for (i = 0; i < num_info; i++) {
1773 			line_info = line_data_cur;
1774 			r = fn(&line_info->file_name_off, ctx);
1775 			if (r)
1776 				return r;
1777 			r = fn(&line_info->line_off, ctx);
1778 			if (r)
1779 				return r;
1780 			line_data_cur += rec_size;
1781 		}
1782 	}
1783 
1784 	return 0;
1785 }
1786 
1787 static int str_sort_by_content(const void *a1, const void *a2)
1788 {
1789 	const struct btf_str_ptr *p1 = a1;
1790 	const struct btf_str_ptr *p2 = a2;
1791 
1792 	return strcmp(p1->str, p2->str);
1793 }
1794 
1795 static int str_sort_by_offset(const void *a1, const void *a2)
1796 {
1797 	const struct btf_str_ptr *p1 = a1;
1798 	const struct btf_str_ptr *p2 = a2;
1799 
1800 	if (p1->str != p2->str)
1801 		return p1->str < p2->str ? -1 : 1;
1802 	return 0;
1803 }
1804 
1805 static int btf_dedup_str_ptr_cmp(const void *str_ptr, const void *pelem)
1806 {
1807 	const struct btf_str_ptr *p = pelem;
1808 
1809 	if (str_ptr != p->str)
1810 		return (const char *)str_ptr < p->str ? -1 : 1;
1811 	return 0;
1812 }
1813 
1814 static int btf_str_mark_as_used(__u32 *str_off_ptr, void *ctx)
1815 {
1816 	struct btf_str_ptrs *strs;
1817 	struct btf_str_ptr *s;
1818 
1819 	if (*str_off_ptr == 0)
1820 		return 0;
1821 
1822 	strs = ctx;
1823 	s = bsearch(strs->data + *str_off_ptr, strs->ptrs, strs->cnt,
1824 		    sizeof(struct btf_str_ptr), btf_dedup_str_ptr_cmp);
1825 	if (!s)
1826 		return -EINVAL;
1827 	s->used = true;
1828 	return 0;
1829 }
1830 
1831 static int btf_str_remap_offset(__u32 *str_off_ptr, void *ctx)
1832 {
1833 	struct btf_str_ptrs *strs;
1834 	struct btf_str_ptr *s;
1835 
1836 	if (*str_off_ptr == 0)
1837 		return 0;
1838 
1839 	strs = ctx;
1840 	s = bsearch(strs->data + *str_off_ptr, strs->ptrs, strs->cnt,
1841 		    sizeof(struct btf_str_ptr), btf_dedup_str_ptr_cmp);
1842 	if (!s)
1843 		return -EINVAL;
1844 	*str_off_ptr = s->new_off;
1845 	return 0;
1846 }
1847 
1848 /*
1849  * Dedup string and filter out those that are not referenced from either .BTF
1850  * or .BTF.ext (if provided) sections.
1851  *
1852  * This is done by building index of all strings in BTF's string section,
1853  * then iterating over all entities that can reference strings (e.g., type
1854  * names, struct field names, .BTF.ext line info, etc) and marking corresponding
1855  * strings as used. After that all used strings are deduped and compacted into
1856  * sequential blob of memory and new offsets are calculated. Then all the string
1857  * references are iterated again and rewritten using new offsets.
1858  */
1859 static int btf_dedup_strings(struct btf_dedup *d)
1860 {
1861 	const struct btf_header *hdr = d->btf->hdr;
1862 	char *start = (char *)d->btf->nohdr_data + hdr->str_off;
1863 	char *end = start + d->btf->hdr->str_len;
1864 	char *p = start, *tmp_strs = NULL;
1865 	struct btf_str_ptrs strs = {
1866 		.cnt = 0,
1867 		.cap = 0,
1868 		.ptrs = NULL,
1869 		.data = start,
1870 	};
1871 	int i, j, err = 0, grp_idx;
1872 	bool grp_used;
1873 
1874 	/* build index of all strings */
1875 	while (p < end) {
1876 		if (strs.cnt + 1 > strs.cap) {
1877 			struct btf_str_ptr *new_ptrs;
1878 
1879 			strs.cap += max(strs.cnt / 2, 16U);
1880 			new_ptrs = realloc(strs.ptrs,
1881 					   sizeof(strs.ptrs[0]) * strs.cap);
1882 			if (!new_ptrs) {
1883 				err = -ENOMEM;
1884 				goto done;
1885 			}
1886 			strs.ptrs = new_ptrs;
1887 		}
1888 
1889 		strs.ptrs[strs.cnt].str = p;
1890 		strs.ptrs[strs.cnt].used = false;
1891 
1892 		p += strlen(p) + 1;
1893 		strs.cnt++;
1894 	}
1895 
1896 	/* temporary storage for deduplicated strings */
1897 	tmp_strs = malloc(d->btf->hdr->str_len);
1898 	if (!tmp_strs) {
1899 		err = -ENOMEM;
1900 		goto done;
1901 	}
1902 
1903 	/* mark all used strings */
1904 	strs.ptrs[0].used = true;
1905 	err = btf_for_each_str_off(d, btf_str_mark_as_used, &strs);
1906 	if (err)
1907 		goto done;
1908 
1909 	/* sort strings by context, so that we can identify duplicates */
1910 	qsort(strs.ptrs, strs.cnt, sizeof(strs.ptrs[0]), str_sort_by_content);
1911 
1912 	/*
1913 	 * iterate groups of equal strings and if any instance in a group was
1914 	 * referenced, emit single instance and remember new offset
1915 	 */
1916 	p = tmp_strs;
1917 	grp_idx = 0;
1918 	grp_used = strs.ptrs[0].used;
1919 	/* iterate past end to avoid code duplication after loop */
1920 	for (i = 1; i <= strs.cnt; i++) {
1921 		/*
1922 		 * when i == strs.cnt, we want to skip string comparison and go
1923 		 * straight to handling last group of strings (otherwise we'd
1924 		 * need to handle last group after the loop w/ duplicated code)
1925 		 */
1926 		if (i < strs.cnt &&
1927 		    !strcmp(strs.ptrs[i].str, strs.ptrs[grp_idx].str)) {
1928 			grp_used = grp_used || strs.ptrs[i].used;
1929 			continue;
1930 		}
1931 
1932 		/*
1933 		 * this check would have been required after the loop to handle
1934 		 * last group of strings, but due to <= condition in a loop
1935 		 * we avoid that duplication
1936 		 */
1937 		if (grp_used) {
1938 			int new_off = p - tmp_strs;
1939 			__u32 len = strlen(strs.ptrs[grp_idx].str);
1940 
1941 			memmove(p, strs.ptrs[grp_idx].str, len + 1);
1942 			for (j = grp_idx; j < i; j++)
1943 				strs.ptrs[j].new_off = new_off;
1944 			p += len + 1;
1945 		}
1946 
1947 		if (i < strs.cnt) {
1948 			grp_idx = i;
1949 			grp_used = strs.ptrs[i].used;
1950 		}
1951 	}
1952 
1953 	/* replace original strings with deduped ones */
1954 	d->btf->hdr->str_len = p - tmp_strs;
1955 	memmove(start, tmp_strs, d->btf->hdr->str_len);
1956 	end = start + d->btf->hdr->str_len;
1957 
1958 	/* restore original order for further binary search lookups */
1959 	qsort(strs.ptrs, strs.cnt, sizeof(strs.ptrs[0]), str_sort_by_offset);
1960 
1961 	/* remap string offsets */
1962 	err = btf_for_each_str_off(d, btf_str_remap_offset, &strs);
1963 	if (err)
1964 		goto done;
1965 
1966 	d->btf->hdr->str_len = end - start;
1967 
1968 done:
1969 	free(tmp_strs);
1970 	free(strs.ptrs);
1971 	return err;
1972 }
1973 
1974 static long btf_hash_common(struct btf_type *t)
1975 {
1976 	long h;
1977 
1978 	h = hash_combine(0, t->name_off);
1979 	h = hash_combine(h, t->info);
1980 	h = hash_combine(h, t->size);
1981 	return h;
1982 }
1983 
1984 static bool btf_equal_common(struct btf_type *t1, struct btf_type *t2)
1985 {
1986 	return t1->name_off == t2->name_off &&
1987 	       t1->info == t2->info &&
1988 	       t1->size == t2->size;
1989 }
1990 
1991 /* Calculate type signature hash of INT. */
1992 static long btf_hash_int(struct btf_type *t)
1993 {
1994 	__u32 info = *(__u32 *)(t + 1);
1995 	long h;
1996 
1997 	h = btf_hash_common(t);
1998 	h = hash_combine(h, info);
1999 	return h;
2000 }
2001 
2002 /* Check structural equality of two INTs. */
2003 static bool btf_equal_int(struct btf_type *t1, struct btf_type *t2)
2004 {
2005 	__u32 info1, info2;
2006 
2007 	if (!btf_equal_common(t1, t2))
2008 		return false;
2009 	info1 = *(__u32 *)(t1 + 1);
2010 	info2 = *(__u32 *)(t2 + 1);
2011 	return info1 == info2;
2012 }
2013 
2014 /* Calculate type signature hash of ENUM. */
2015 static long btf_hash_enum(struct btf_type *t)
2016 {
2017 	long h;
2018 
2019 	/* don't hash vlen and enum members to support enum fwd resolving */
2020 	h = hash_combine(0, t->name_off);
2021 	h = hash_combine(h, t->info & ~0xffff);
2022 	h = hash_combine(h, t->size);
2023 	return h;
2024 }
2025 
2026 /* Check structural equality of two ENUMs. */
2027 static bool btf_equal_enum(struct btf_type *t1, struct btf_type *t2)
2028 {
2029 	const struct btf_enum *m1, *m2;
2030 	__u16 vlen;
2031 	int i;
2032 
2033 	if (!btf_equal_common(t1, t2))
2034 		return false;
2035 
2036 	vlen = btf_vlen(t1);
2037 	m1 = btf_enum(t1);
2038 	m2 = btf_enum(t2);
2039 	for (i = 0; i < vlen; i++) {
2040 		if (m1->name_off != m2->name_off || m1->val != m2->val)
2041 			return false;
2042 		m1++;
2043 		m2++;
2044 	}
2045 	return true;
2046 }
2047 
2048 static inline bool btf_is_enum_fwd(struct btf_type *t)
2049 {
2050 	return btf_is_enum(t) && btf_vlen(t) == 0;
2051 }
2052 
2053 static bool btf_compat_enum(struct btf_type *t1, struct btf_type *t2)
2054 {
2055 	if (!btf_is_enum_fwd(t1) && !btf_is_enum_fwd(t2))
2056 		return btf_equal_enum(t1, t2);
2057 	/* ignore vlen when comparing */
2058 	return t1->name_off == t2->name_off &&
2059 	       (t1->info & ~0xffff) == (t2->info & ~0xffff) &&
2060 	       t1->size == t2->size;
2061 }
2062 
2063 /*
2064  * Calculate type signature hash of STRUCT/UNION, ignoring referenced type IDs,
2065  * as referenced type IDs equivalence is established separately during type
2066  * graph equivalence check algorithm.
2067  */
2068 static long btf_hash_struct(struct btf_type *t)
2069 {
2070 	const struct btf_member *member = btf_members(t);
2071 	__u32 vlen = btf_vlen(t);
2072 	long h = btf_hash_common(t);
2073 	int i;
2074 
2075 	for (i = 0; i < vlen; i++) {
2076 		h = hash_combine(h, member->name_off);
2077 		h = hash_combine(h, member->offset);
2078 		/* no hashing of referenced type ID, it can be unresolved yet */
2079 		member++;
2080 	}
2081 	return h;
2082 }
2083 
2084 /*
2085  * Check structural compatibility of two FUNC_PROTOs, ignoring referenced type
2086  * IDs. This check is performed during type graph equivalence check and
2087  * referenced types equivalence is checked separately.
2088  */
2089 static bool btf_shallow_equal_struct(struct btf_type *t1, struct btf_type *t2)
2090 {
2091 	const struct btf_member *m1, *m2;
2092 	__u16 vlen;
2093 	int i;
2094 
2095 	if (!btf_equal_common(t1, t2))
2096 		return false;
2097 
2098 	vlen = btf_vlen(t1);
2099 	m1 = btf_members(t1);
2100 	m2 = btf_members(t2);
2101 	for (i = 0; i < vlen; i++) {
2102 		if (m1->name_off != m2->name_off || m1->offset != m2->offset)
2103 			return false;
2104 		m1++;
2105 		m2++;
2106 	}
2107 	return true;
2108 }
2109 
2110 /*
2111  * Calculate type signature hash of ARRAY, including referenced type IDs,
2112  * under assumption that they were already resolved to canonical type IDs and
2113  * are not going to change.
2114  */
2115 static long btf_hash_array(struct btf_type *t)
2116 {
2117 	const struct btf_array *info = btf_array(t);
2118 	long h = btf_hash_common(t);
2119 
2120 	h = hash_combine(h, info->type);
2121 	h = hash_combine(h, info->index_type);
2122 	h = hash_combine(h, info->nelems);
2123 	return h;
2124 }
2125 
2126 /*
2127  * Check exact equality of two ARRAYs, taking into account referenced
2128  * type IDs, under assumption that they were already resolved to canonical
2129  * type IDs and are not going to change.
2130  * This function is called during reference types deduplication to compare
2131  * ARRAY to potential canonical representative.
2132  */
2133 static bool btf_equal_array(struct btf_type *t1, struct btf_type *t2)
2134 {
2135 	const struct btf_array *info1, *info2;
2136 
2137 	if (!btf_equal_common(t1, t2))
2138 		return false;
2139 
2140 	info1 = btf_array(t1);
2141 	info2 = btf_array(t2);
2142 	return info1->type == info2->type &&
2143 	       info1->index_type == info2->index_type &&
2144 	       info1->nelems == info2->nelems;
2145 }
2146 
2147 /*
2148  * Check structural compatibility of two ARRAYs, ignoring referenced type
2149  * IDs. This check is performed during type graph equivalence check and
2150  * referenced types equivalence is checked separately.
2151  */
2152 static bool btf_compat_array(struct btf_type *t1, struct btf_type *t2)
2153 {
2154 	if (!btf_equal_common(t1, t2))
2155 		return false;
2156 
2157 	return btf_array(t1)->nelems == btf_array(t2)->nelems;
2158 }
2159 
2160 /*
2161  * Calculate type signature hash of FUNC_PROTO, including referenced type IDs,
2162  * under assumption that they were already resolved to canonical type IDs and
2163  * are not going to change.
2164  */
2165 static long btf_hash_fnproto(struct btf_type *t)
2166 {
2167 	const struct btf_param *member = btf_params(t);
2168 	__u16 vlen = btf_vlen(t);
2169 	long h = btf_hash_common(t);
2170 	int i;
2171 
2172 	for (i = 0; i < vlen; i++) {
2173 		h = hash_combine(h, member->name_off);
2174 		h = hash_combine(h, member->type);
2175 		member++;
2176 	}
2177 	return h;
2178 }
2179 
2180 /*
2181  * Check exact equality of two FUNC_PROTOs, taking into account referenced
2182  * type IDs, under assumption that they were already resolved to canonical
2183  * type IDs and are not going to change.
2184  * This function is called during reference types deduplication to compare
2185  * FUNC_PROTO to potential canonical representative.
2186  */
2187 static bool btf_equal_fnproto(struct btf_type *t1, struct btf_type *t2)
2188 {
2189 	const struct btf_param *m1, *m2;
2190 	__u16 vlen;
2191 	int i;
2192 
2193 	if (!btf_equal_common(t1, t2))
2194 		return false;
2195 
2196 	vlen = btf_vlen(t1);
2197 	m1 = btf_params(t1);
2198 	m2 = btf_params(t2);
2199 	for (i = 0; i < vlen; i++) {
2200 		if (m1->name_off != m2->name_off || m1->type != m2->type)
2201 			return false;
2202 		m1++;
2203 		m2++;
2204 	}
2205 	return true;
2206 }
2207 
2208 /*
2209  * Check structural compatibility of two FUNC_PROTOs, ignoring referenced type
2210  * IDs. This check is performed during type graph equivalence check and
2211  * referenced types equivalence is checked separately.
2212  */
2213 static bool btf_compat_fnproto(struct btf_type *t1, struct btf_type *t2)
2214 {
2215 	const struct btf_param *m1, *m2;
2216 	__u16 vlen;
2217 	int i;
2218 
2219 	/* skip return type ID */
2220 	if (t1->name_off != t2->name_off || t1->info != t2->info)
2221 		return false;
2222 
2223 	vlen = btf_vlen(t1);
2224 	m1 = btf_params(t1);
2225 	m2 = btf_params(t2);
2226 	for (i = 0; i < vlen; i++) {
2227 		if (m1->name_off != m2->name_off)
2228 			return false;
2229 		m1++;
2230 		m2++;
2231 	}
2232 	return true;
2233 }
2234 
2235 /*
2236  * Deduplicate primitive types, that can't reference other types, by calculating
2237  * their type signature hash and comparing them with any possible canonical
2238  * candidate. If no canonical candidate matches, type itself is marked as
2239  * canonical and is added into `btf_dedup->dedup_table` as another candidate.
2240  */
2241 static int btf_dedup_prim_type(struct btf_dedup *d, __u32 type_id)
2242 {
2243 	struct btf_type *t = d->btf->types[type_id];
2244 	struct hashmap_entry *hash_entry;
2245 	struct btf_type *cand;
2246 	/* if we don't find equivalent type, then we are canonical */
2247 	__u32 new_id = type_id;
2248 	__u32 cand_id;
2249 	long h;
2250 
2251 	switch (btf_kind(t)) {
2252 	case BTF_KIND_CONST:
2253 	case BTF_KIND_VOLATILE:
2254 	case BTF_KIND_RESTRICT:
2255 	case BTF_KIND_PTR:
2256 	case BTF_KIND_TYPEDEF:
2257 	case BTF_KIND_ARRAY:
2258 	case BTF_KIND_STRUCT:
2259 	case BTF_KIND_UNION:
2260 	case BTF_KIND_FUNC:
2261 	case BTF_KIND_FUNC_PROTO:
2262 	case BTF_KIND_VAR:
2263 	case BTF_KIND_DATASEC:
2264 		return 0;
2265 
2266 	case BTF_KIND_INT:
2267 		h = btf_hash_int(t);
2268 		for_each_dedup_cand(d, hash_entry, h) {
2269 			cand_id = (__u32)(long)hash_entry->value;
2270 			cand = d->btf->types[cand_id];
2271 			if (btf_equal_int(t, cand)) {
2272 				new_id = cand_id;
2273 				break;
2274 			}
2275 		}
2276 		break;
2277 
2278 	case BTF_KIND_ENUM:
2279 		h = btf_hash_enum(t);
2280 		for_each_dedup_cand(d, hash_entry, h) {
2281 			cand_id = (__u32)(long)hash_entry->value;
2282 			cand = d->btf->types[cand_id];
2283 			if (btf_equal_enum(t, cand)) {
2284 				new_id = cand_id;
2285 				break;
2286 			}
2287 			if (d->opts.dont_resolve_fwds)
2288 				continue;
2289 			if (btf_compat_enum(t, cand)) {
2290 				if (btf_is_enum_fwd(t)) {
2291 					/* resolve fwd to full enum */
2292 					new_id = cand_id;
2293 					break;
2294 				}
2295 				/* resolve canonical enum fwd to full enum */
2296 				d->map[cand_id] = type_id;
2297 			}
2298 		}
2299 		break;
2300 
2301 	case BTF_KIND_FWD:
2302 		h = btf_hash_common(t);
2303 		for_each_dedup_cand(d, hash_entry, h) {
2304 			cand_id = (__u32)(long)hash_entry->value;
2305 			cand = d->btf->types[cand_id];
2306 			if (btf_equal_common(t, cand)) {
2307 				new_id = cand_id;
2308 				break;
2309 			}
2310 		}
2311 		break;
2312 
2313 	default:
2314 		return -EINVAL;
2315 	}
2316 
2317 	d->map[type_id] = new_id;
2318 	if (type_id == new_id && btf_dedup_table_add(d, h, type_id))
2319 		return -ENOMEM;
2320 
2321 	return 0;
2322 }
2323 
2324 static int btf_dedup_prim_types(struct btf_dedup *d)
2325 {
2326 	int i, err;
2327 
2328 	for (i = 1; i <= d->btf->nr_types; i++) {
2329 		err = btf_dedup_prim_type(d, i);
2330 		if (err)
2331 			return err;
2332 	}
2333 	return 0;
2334 }
2335 
2336 /*
2337  * Check whether type is already mapped into canonical one (could be to itself).
2338  */
2339 static inline bool is_type_mapped(struct btf_dedup *d, uint32_t type_id)
2340 {
2341 	return d->map[type_id] <= BTF_MAX_NR_TYPES;
2342 }
2343 
2344 /*
2345  * Resolve type ID into its canonical type ID, if any; otherwise return original
2346  * type ID. If type is FWD and is resolved into STRUCT/UNION already, follow
2347  * STRUCT/UNION link and resolve it into canonical type ID as well.
2348  */
2349 static inline __u32 resolve_type_id(struct btf_dedup *d, __u32 type_id)
2350 {
2351 	while (is_type_mapped(d, type_id) && d->map[type_id] != type_id)
2352 		type_id = d->map[type_id];
2353 	return type_id;
2354 }
2355 
2356 /*
2357  * Resolve FWD to underlying STRUCT/UNION, if any; otherwise return original
2358  * type ID.
2359  */
2360 static uint32_t resolve_fwd_id(struct btf_dedup *d, uint32_t type_id)
2361 {
2362 	__u32 orig_type_id = type_id;
2363 
2364 	if (!btf_is_fwd(d->btf->types[type_id]))
2365 		return type_id;
2366 
2367 	while (is_type_mapped(d, type_id) && d->map[type_id] != type_id)
2368 		type_id = d->map[type_id];
2369 
2370 	if (!btf_is_fwd(d->btf->types[type_id]))
2371 		return type_id;
2372 
2373 	return orig_type_id;
2374 }
2375 
2376 
2377 static inline __u16 btf_fwd_kind(struct btf_type *t)
2378 {
2379 	return btf_kflag(t) ? BTF_KIND_UNION : BTF_KIND_STRUCT;
2380 }
2381 
2382 /*
2383  * Check equivalence of BTF type graph formed by candidate struct/union (we'll
2384  * call it "candidate graph" in this description for brevity) to a type graph
2385  * formed by (potential) canonical struct/union ("canonical graph" for brevity
2386  * here, though keep in mind that not all types in canonical graph are
2387  * necessarily canonical representatives themselves, some of them might be
2388  * duplicates or its uniqueness might not have been established yet).
2389  * Returns:
2390  *  - >0, if type graphs are equivalent;
2391  *  -  0, if not equivalent;
2392  *  - <0, on error.
2393  *
2394  * Algorithm performs side-by-side DFS traversal of both type graphs and checks
2395  * equivalence of BTF types at each step. If at any point BTF types in candidate
2396  * and canonical graphs are not compatible structurally, whole graphs are
2397  * incompatible. If types are structurally equivalent (i.e., all information
2398  * except referenced type IDs is exactly the same), a mapping from `canon_id` to
2399  * a `cand_id` is recored in hypothetical mapping (`btf_dedup->hypot_map`).
2400  * If a type references other types, then those referenced types are checked
2401  * for equivalence recursively.
2402  *
2403  * During DFS traversal, if we find that for current `canon_id` type we
2404  * already have some mapping in hypothetical map, we check for two possible
2405  * situations:
2406  *   - `canon_id` is mapped to exactly the same type as `cand_id`. This will
2407  *     happen when type graphs have cycles. In this case we assume those two
2408  *     types are equivalent.
2409  *   - `canon_id` is mapped to different type. This is contradiction in our
2410  *     hypothetical mapping, because same graph in canonical graph corresponds
2411  *     to two different types in candidate graph, which for equivalent type
2412  *     graphs shouldn't happen. This condition terminates equivalence check
2413  *     with negative result.
2414  *
2415  * If type graphs traversal exhausts types to check and find no contradiction,
2416  * then type graphs are equivalent.
2417  *
2418  * When checking types for equivalence, there is one special case: FWD types.
2419  * If FWD type resolution is allowed and one of the types (either from canonical
2420  * or candidate graph) is FWD and other is STRUCT/UNION (depending on FWD's kind
2421  * flag) and their names match, hypothetical mapping is updated to point from
2422  * FWD to STRUCT/UNION. If graphs will be determined as equivalent successfully,
2423  * this mapping will be used to record FWD -> STRUCT/UNION mapping permanently.
2424  *
2425  * Technically, this could lead to incorrect FWD to STRUCT/UNION resolution,
2426  * if there are two exactly named (or anonymous) structs/unions that are
2427  * compatible structurally, one of which has FWD field, while other is concrete
2428  * STRUCT/UNION, but according to C sources they are different structs/unions
2429  * that are referencing different types with the same name. This is extremely
2430  * unlikely to happen, but btf_dedup API allows to disable FWD resolution if
2431  * this logic is causing problems.
2432  *
2433  * Doing FWD resolution means that both candidate and/or canonical graphs can
2434  * consists of portions of the graph that come from multiple compilation units.
2435  * This is due to the fact that types within single compilation unit are always
2436  * deduplicated and FWDs are already resolved, if referenced struct/union
2437  * definiton is available. So, if we had unresolved FWD and found corresponding
2438  * STRUCT/UNION, they will be from different compilation units. This
2439  * consequently means that when we "link" FWD to corresponding STRUCT/UNION,
2440  * type graph will likely have at least two different BTF types that describe
2441  * same type (e.g., most probably there will be two different BTF types for the
2442  * same 'int' primitive type) and could even have "overlapping" parts of type
2443  * graph that describe same subset of types.
2444  *
2445  * This in turn means that our assumption that each type in canonical graph
2446  * must correspond to exactly one type in candidate graph might not hold
2447  * anymore and will make it harder to detect contradictions using hypothetical
2448  * map. To handle this problem, we allow to follow FWD -> STRUCT/UNION
2449  * resolution only in canonical graph. FWDs in candidate graphs are never
2450  * resolved. To see why it's OK, let's check all possible situations w.r.t. FWDs
2451  * that can occur:
2452  *   - Both types in canonical and candidate graphs are FWDs. If they are
2453  *     structurally equivalent, then they can either be both resolved to the
2454  *     same STRUCT/UNION or not resolved at all. In both cases they are
2455  *     equivalent and there is no need to resolve FWD on candidate side.
2456  *   - Both types in canonical and candidate graphs are concrete STRUCT/UNION,
2457  *     so nothing to resolve as well, algorithm will check equivalence anyway.
2458  *   - Type in canonical graph is FWD, while type in candidate is concrete
2459  *     STRUCT/UNION. In this case candidate graph comes from single compilation
2460  *     unit, so there is exactly one BTF type for each unique C type. After
2461  *     resolving FWD into STRUCT/UNION, there might be more than one BTF type
2462  *     in canonical graph mapping to single BTF type in candidate graph, but
2463  *     because hypothetical mapping maps from canonical to candidate types, it's
2464  *     alright, and we still maintain the property of having single `canon_id`
2465  *     mapping to single `cand_id` (there could be two different `canon_id`
2466  *     mapped to the same `cand_id`, but it's not contradictory).
2467  *   - Type in canonical graph is concrete STRUCT/UNION, while type in candidate
2468  *     graph is FWD. In this case we are just going to check compatibility of
2469  *     STRUCT/UNION and corresponding FWD, and if they are compatible, we'll
2470  *     assume that whatever STRUCT/UNION FWD resolves to must be equivalent to
2471  *     a concrete STRUCT/UNION from canonical graph. If the rest of type graphs
2472  *     turn out equivalent, we'll re-resolve FWD to concrete STRUCT/UNION from
2473  *     canonical graph.
2474  */
2475 static int btf_dedup_is_equiv(struct btf_dedup *d, __u32 cand_id,
2476 			      __u32 canon_id)
2477 {
2478 	struct btf_type *cand_type;
2479 	struct btf_type *canon_type;
2480 	__u32 hypot_type_id;
2481 	__u16 cand_kind;
2482 	__u16 canon_kind;
2483 	int i, eq;
2484 
2485 	/* if both resolve to the same canonical, they must be equivalent */
2486 	if (resolve_type_id(d, cand_id) == resolve_type_id(d, canon_id))
2487 		return 1;
2488 
2489 	canon_id = resolve_fwd_id(d, canon_id);
2490 
2491 	hypot_type_id = d->hypot_map[canon_id];
2492 	if (hypot_type_id <= BTF_MAX_NR_TYPES)
2493 		return hypot_type_id == cand_id;
2494 
2495 	if (btf_dedup_hypot_map_add(d, canon_id, cand_id))
2496 		return -ENOMEM;
2497 
2498 	cand_type = d->btf->types[cand_id];
2499 	canon_type = d->btf->types[canon_id];
2500 	cand_kind = btf_kind(cand_type);
2501 	canon_kind = btf_kind(canon_type);
2502 
2503 	if (cand_type->name_off != canon_type->name_off)
2504 		return 0;
2505 
2506 	/* FWD <--> STRUCT/UNION equivalence check, if enabled */
2507 	if (!d->opts.dont_resolve_fwds
2508 	    && (cand_kind == BTF_KIND_FWD || canon_kind == BTF_KIND_FWD)
2509 	    && cand_kind != canon_kind) {
2510 		__u16 real_kind;
2511 		__u16 fwd_kind;
2512 
2513 		if (cand_kind == BTF_KIND_FWD) {
2514 			real_kind = canon_kind;
2515 			fwd_kind = btf_fwd_kind(cand_type);
2516 		} else {
2517 			real_kind = cand_kind;
2518 			fwd_kind = btf_fwd_kind(canon_type);
2519 		}
2520 		return fwd_kind == real_kind;
2521 	}
2522 
2523 	if (cand_kind != canon_kind)
2524 		return 0;
2525 
2526 	switch (cand_kind) {
2527 	case BTF_KIND_INT:
2528 		return btf_equal_int(cand_type, canon_type);
2529 
2530 	case BTF_KIND_ENUM:
2531 		if (d->opts.dont_resolve_fwds)
2532 			return btf_equal_enum(cand_type, canon_type);
2533 		else
2534 			return btf_compat_enum(cand_type, canon_type);
2535 
2536 	case BTF_KIND_FWD:
2537 		return btf_equal_common(cand_type, canon_type);
2538 
2539 	case BTF_KIND_CONST:
2540 	case BTF_KIND_VOLATILE:
2541 	case BTF_KIND_RESTRICT:
2542 	case BTF_KIND_PTR:
2543 	case BTF_KIND_TYPEDEF:
2544 	case BTF_KIND_FUNC:
2545 		if (cand_type->info != canon_type->info)
2546 			return 0;
2547 		return btf_dedup_is_equiv(d, cand_type->type, canon_type->type);
2548 
2549 	case BTF_KIND_ARRAY: {
2550 		const struct btf_array *cand_arr, *canon_arr;
2551 
2552 		if (!btf_compat_array(cand_type, canon_type))
2553 			return 0;
2554 		cand_arr = btf_array(cand_type);
2555 		canon_arr = btf_array(canon_type);
2556 		eq = btf_dedup_is_equiv(d,
2557 			cand_arr->index_type, canon_arr->index_type);
2558 		if (eq <= 0)
2559 			return eq;
2560 		return btf_dedup_is_equiv(d, cand_arr->type, canon_arr->type);
2561 	}
2562 
2563 	case BTF_KIND_STRUCT:
2564 	case BTF_KIND_UNION: {
2565 		const struct btf_member *cand_m, *canon_m;
2566 		__u16 vlen;
2567 
2568 		if (!btf_shallow_equal_struct(cand_type, canon_type))
2569 			return 0;
2570 		vlen = btf_vlen(cand_type);
2571 		cand_m = btf_members(cand_type);
2572 		canon_m = btf_members(canon_type);
2573 		for (i = 0; i < vlen; i++) {
2574 			eq = btf_dedup_is_equiv(d, cand_m->type, canon_m->type);
2575 			if (eq <= 0)
2576 				return eq;
2577 			cand_m++;
2578 			canon_m++;
2579 		}
2580 
2581 		return 1;
2582 	}
2583 
2584 	case BTF_KIND_FUNC_PROTO: {
2585 		const struct btf_param *cand_p, *canon_p;
2586 		__u16 vlen;
2587 
2588 		if (!btf_compat_fnproto(cand_type, canon_type))
2589 			return 0;
2590 		eq = btf_dedup_is_equiv(d, cand_type->type, canon_type->type);
2591 		if (eq <= 0)
2592 			return eq;
2593 		vlen = btf_vlen(cand_type);
2594 		cand_p = btf_params(cand_type);
2595 		canon_p = btf_params(canon_type);
2596 		for (i = 0; i < vlen; i++) {
2597 			eq = btf_dedup_is_equiv(d, cand_p->type, canon_p->type);
2598 			if (eq <= 0)
2599 				return eq;
2600 			cand_p++;
2601 			canon_p++;
2602 		}
2603 		return 1;
2604 	}
2605 
2606 	default:
2607 		return -EINVAL;
2608 	}
2609 	return 0;
2610 }
2611 
2612 /*
2613  * Use hypothetical mapping, produced by successful type graph equivalence
2614  * check, to augment existing struct/union canonical mapping, where possible.
2615  *
2616  * If BTF_KIND_FWD resolution is allowed, this mapping is also used to record
2617  * FWD -> STRUCT/UNION correspondence as well. FWD resolution is bidirectional:
2618  * it doesn't matter if FWD type was part of canonical graph or candidate one,
2619  * we are recording the mapping anyway. As opposed to carefulness required
2620  * for struct/union correspondence mapping (described below), for FWD resolution
2621  * it's not important, as by the time that FWD type (reference type) will be
2622  * deduplicated all structs/unions will be deduped already anyway.
2623  *
2624  * Recording STRUCT/UNION mapping is purely a performance optimization and is
2625  * not required for correctness. It needs to be done carefully to ensure that
2626  * struct/union from candidate's type graph is not mapped into corresponding
2627  * struct/union from canonical type graph that itself hasn't been resolved into
2628  * canonical representative. The only guarantee we have is that canonical
2629  * struct/union was determined as canonical and that won't change. But any
2630  * types referenced through that struct/union fields could have been not yet
2631  * resolved, so in case like that it's too early to establish any kind of
2632  * correspondence between structs/unions.
2633  *
2634  * No canonical correspondence is derived for primitive types (they are already
2635  * deduplicated completely already anyway) or reference types (they rely on
2636  * stability of struct/union canonical relationship for equivalence checks).
2637  */
2638 static void btf_dedup_merge_hypot_map(struct btf_dedup *d)
2639 {
2640 	__u32 cand_type_id, targ_type_id;
2641 	__u16 t_kind, c_kind;
2642 	__u32 t_id, c_id;
2643 	int i;
2644 
2645 	for (i = 0; i < d->hypot_cnt; i++) {
2646 		cand_type_id = d->hypot_list[i];
2647 		targ_type_id = d->hypot_map[cand_type_id];
2648 		t_id = resolve_type_id(d, targ_type_id);
2649 		c_id = resolve_type_id(d, cand_type_id);
2650 		t_kind = btf_kind(d->btf->types[t_id]);
2651 		c_kind = btf_kind(d->btf->types[c_id]);
2652 		/*
2653 		 * Resolve FWD into STRUCT/UNION.
2654 		 * It's ok to resolve FWD into STRUCT/UNION that's not yet
2655 		 * mapped to canonical representative (as opposed to
2656 		 * STRUCT/UNION <--> STRUCT/UNION mapping logic below), because
2657 		 * eventually that struct is going to be mapped and all resolved
2658 		 * FWDs will automatically resolve to correct canonical
2659 		 * representative. This will happen before ref type deduping,
2660 		 * which critically depends on stability of these mapping. This
2661 		 * stability is not a requirement for STRUCT/UNION equivalence
2662 		 * checks, though.
2663 		 */
2664 		if (t_kind != BTF_KIND_FWD && c_kind == BTF_KIND_FWD)
2665 			d->map[c_id] = t_id;
2666 		else if (t_kind == BTF_KIND_FWD && c_kind != BTF_KIND_FWD)
2667 			d->map[t_id] = c_id;
2668 
2669 		if ((t_kind == BTF_KIND_STRUCT || t_kind == BTF_KIND_UNION) &&
2670 		    c_kind != BTF_KIND_FWD &&
2671 		    is_type_mapped(d, c_id) &&
2672 		    !is_type_mapped(d, t_id)) {
2673 			/*
2674 			 * as a perf optimization, we can map struct/union
2675 			 * that's part of type graph we just verified for
2676 			 * equivalence. We can do that for struct/union that has
2677 			 * canonical representative only, though.
2678 			 */
2679 			d->map[t_id] = c_id;
2680 		}
2681 	}
2682 }
2683 
2684 /*
2685  * Deduplicate struct/union types.
2686  *
2687  * For each struct/union type its type signature hash is calculated, taking
2688  * into account type's name, size, number, order and names of fields, but
2689  * ignoring type ID's referenced from fields, because they might not be deduped
2690  * completely until after reference types deduplication phase. This type hash
2691  * is used to iterate over all potential canonical types, sharing same hash.
2692  * For each canonical candidate we check whether type graphs that they form
2693  * (through referenced types in fields and so on) are equivalent using algorithm
2694  * implemented in `btf_dedup_is_equiv`. If such equivalence is found and
2695  * BTF_KIND_FWD resolution is allowed, then hypothetical mapping
2696  * (btf_dedup->hypot_map) produced by aforementioned type graph equivalence
2697  * algorithm is used to record FWD -> STRUCT/UNION mapping. It's also used to
2698  * potentially map other structs/unions to their canonical representatives,
2699  * if such relationship hasn't yet been established. This speeds up algorithm
2700  * by eliminating some of the duplicate work.
2701  *
2702  * If no matching canonical representative was found, struct/union is marked
2703  * as canonical for itself and is added into btf_dedup->dedup_table hash map
2704  * for further look ups.
2705  */
2706 static int btf_dedup_struct_type(struct btf_dedup *d, __u32 type_id)
2707 {
2708 	struct btf_type *cand_type, *t;
2709 	struct hashmap_entry *hash_entry;
2710 	/* if we don't find equivalent type, then we are canonical */
2711 	__u32 new_id = type_id;
2712 	__u16 kind;
2713 	long h;
2714 
2715 	/* already deduped or is in process of deduping (loop detected) */
2716 	if (d->map[type_id] <= BTF_MAX_NR_TYPES)
2717 		return 0;
2718 
2719 	t = d->btf->types[type_id];
2720 	kind = btf_kind(t);
2721 
2722 	if (kind != BTF_KIND_STRUCT && kind != BTF_KIND_UNION)
2723 		return 0;
2724 
2725 	h = btf_hash_struct(t);
2726 	for_each_dedup_cand(d, hash_entry, h) {
2727 		__u32 cand_id = (__u32)(long)hash_entry->value;
2728 		int eq;
2729 
2730 		/*
2731 		 * Even though btf_dedup_is_equiv() checks for
2732 		 * btf_shallow_equal_struct() internally when checking two
2733 		 * structs (unions) for equivalence, we need to guard here
2734 		 * from picking matching FWD type as a dedup candidate.
2735 		 * This can happen due to hash collision. In such case just
2736 		 * relying on btf_dedup_is_equiv() would lead to potentially
2737 		 * creating a loop (FWD -> STRUCT and STRUCT -> FWD), because
2738 		 * FWD and compatible STRUCT/UNION are considered equivalent.
2739 		 */
2740 		cand_type = d->btf->types[cand_id];
2741 		if (!btf_shallow_equal_struct(t, cand_type))
2742 			continue;
2743 
2744 		btf_dedup_clear_hypot_map(d);
2745 		eq = btf_dedup_is_equiv(d, type_id, cand_id);
2746 		if (eq < 0)
2747 			return eq;
2748 		if (!eq)
2749 			continue;
2750 		new_id = cand_id;
2751 		btf_dedup_merge_hypot_map(d);
2752 		break;
2753 	}
2754 
2755 	d->map[type_id] = new_id;
2756 	if (type_id == new_id && btf_dedup_table_add(d, h, type_id))
2757 		return -ENOMEM;
2758 
2759 	return 0;
2760 }
2761 
2762 static int btf_dedup_struct_types(struct btf_dedup *d)
2763 {
2764 	int i, err;
2765 
2766 	for (i = 1; i <= d->btf->nr_types; i++) {
2767 		err = btf_dedup_struct_type(d, i);
2768 		if (err)
2769 			return err;
2770 	}
2771 	return 0;
2772 }
2773 
2774 /*
2775  * Deduplicate reference type.
2776  *
2777  * Once all primitive and struct/union types got deduplicated, we can easily
2778  * deduplicate all other (reference) BTF types. This is done in two steps:
2779  *
2780  * 1. Resolve all referenced type IDs into their canonical type IDs. This
2781  * resolution can be done either immediately for primitive or struct/union types
2782  * (because they were deduped in previous two phases) or recursively for
2783  * reference types. Recursion will always terminate at either primitive or
2784  * struct/union type, at which point we can "unwind" chain of reference types
2785  * one by one. There is no danger of encountering cycles because in C type
2786  * system the only way to form type cycle is through struct/union, so any chain
2787  * of reference types, even those taking part in a type cycle, will inevitably
2788  * reach struct/union at some point.
2789  *
2790  * 2. Once all referenced type IDs are resolved into canonical ones, BTF type
2791  * becomes "stable", in the sense that no further deduplication will cause
2792  * any changes to it. With that, it's now possible to calculate type's signature
2793  * hash (this time taking into account referenced type IDs) and loop over all
2794  * potential canonical representatives. If no match was found, current type
2795  * will become canonical representative of itself and will be added into
2796  * btf_dedup->dedup_table as another possible canonical representative.
2797  */
2798 static int btf_dedup_ref_type(struct btf_dedup *d, __u32 type_id)
2799 {
2800 	struct hashmap_entry *hash_entry;
2801 	__u32 new_id = type_id, cand_id;
2802 	struct btf_type *t, *cand;
2803 	/* if we don't find equivalent type, then we are representative type */
2804 	int ref_type_id;
2805 	long h;
2806 
2807 	if (d->map[type_id] == BTF_IN_PROGRESS_ID)
2808 		return -ELOOP;
2809 	if (d->map[type_id] <= BTF_MAX_NR_TYPES)
2810 		return resolve_type_id(d, type_id);
2811 
2812 	t = d->btf->types[type_id];
2813 	d->map[type_id] = BTF_IN_PROGRESS_ID;
2814 
2815 	switch (btf_kind(t)) {
2816 	case BTF_KIND_CONST:
2817 	case BTF_KIND_VOLATILE:
2818 	case BTF_KIND_RESTRICT:
2819 	case BTF_KIND_PTR:
2820 	case BTF_KIND_TYPEDEF:
2821 	case BTF_KIND_FUNC:
2822 		ref_type_id = btf_dedup_ref_type(d, t->type);
2823 		if (ref_type_id < 0)
2824 			return ref_type_id;
2825 		t->type = ref_type_id;
2826 
2827 		h = btf_hash_common(t);
2828 		for_each_dedup_cand(d, hash_entry, h) {
2829 			cand_id = (__u32)(long)hash_entry->value;
2830 			cand = d->btf->types[cand_id];
2831 			if (btf_equal_common(t, cand)) {
2832 				new_id = cand_id;
2833 				break;
2834 			}
2835 		}
2836 		break;
2837 
2838 	case BTF_KIND_ARRAY: {
2839 		struct btf_array *info = btf_array(t);
2840 
2841 		ref_type_id = btf_dedup_ref_type(d, info->type);
2842 		if (ref_type_id < 0)
2843 			return ref_type_id;
2844 		info->type = ref_type_id;
2845 
2846 		ref_type_id = btf_dedup_ref_type(d, info->index_type);
2847 		if (ref_type_id < 0)
2848 			return ref_type_id;
2849 		info->index_type = ref_type_id;
2850 
2851 		h = btf_hash_array(t);
2852 		for_each_dedup_cand(d, hash_entry, h) {
2853 			cand_id = (__u32)(long)hash_entry->value;
2854 			cand = d->btf->types[cand_id];
2855 			if (btf_equal_array(t, cand)) {
2856 				new_id = cand_id;
2857 				break;
2858 			}
2859 		}
2860 		break;
2861 	}
2862 
2863 	case BTF_KIND_FUNC_PROTO: {
2864 		struct btf_param *param;
2865 		__u16 vlen;
2866 		int i;
2867 
2868 		ref_type_id = btf_dedup_ref_type(d, t->type);
2869 		if (ref_type_id < 0)
2870 			return ref_type_id;
2871 		t->type = ref_type_id;
2872 
2873 		vlen = btf_vlen(t);
2874 		param = btf_params(t);
2875 		for (i = 0; i < vlen; i++) {
2876 			ref_type_id = btf_dedup_ref_type(d, param->type);
2877 			if (ref_type_id < 0)
2878 				return ref_type_id;
2879 			param->type = ref_type_id;
2880 			param++;
2881 		}
2882 
2883 		h = btf_hash_fnproto(t);
2884 		for_each_dedup_cand(d, hash_entry, h) {
2885 			cand_id = (__u32)(long)hash_entry->value;
2886 			cand = d->btf->types[cand_id];
2887 			if (btf_equal_fnproto(t, cand)) {
2888 				new_id = cand_id;
2889 				break;
2890 			}
2891 		}
2892 		break;
2893 	}
2894 
2895 	default:
2896 		return -EINVAL;
2897 	}
2898 
2899 	d->map[type_id] = new_id;
2900 	if (type_id == new_id && btf_dedup_table_add(d, h, type_id))
2901 		return -ENOMEM;
2902 
2903 	return new_id;
2904 }
2905 
2906 static int btf_dedup_ref_types(struct btf_dedup *d)
2907 {
2908 	int i, err;
2909 
2910 	for (i = 1; i <= d->btf->nr_types; i++) {
2911 		err = btf_dedup_ref_type(d, i);
2912 		if (err < 0)
2913 			return err;
2914 	}
2915 	/* we won't need d->dedup_table anymore */
2916 	hashmap__free(d->dedup_table);
2917 	d->dedup_table = NULL;
2918 	return 0;
2919 }
2920 
2921 /*
2922  * Compact types.
2923  *
2924  * After we established for each type its corresponding canonical representative
2925  * type, we now can eliminate types that are not canonical and leave only
2926  * canonical ones layed out sequentially in memory by copying them over
2927  * duplicates. During compaction btf_dedup->hypot_map array is reused to store
2928  * a map from original type ID to a new compacted type ID, which will be used
2929  * during next phase to "fix up" type IDs, referenced from struct/union and
2930  * reference types.
2931  */
2932 static int btf_dedup_compact_types(struct btf_dedup *d)
2933 {
2934 	struct btf_type **new_types;
2935 	__u32 next_type_id = 1;
2936 	char *types_start, *p;
2937 	int i, len;
2938 
2939 	/* we are going to reuse hypot_map to store compaction remapping */
2940 	d->hypot_map[0] = 0;
2941 	for (i = 1; i <= d->btf->nr_types; i++)
2942 		d->hypot_map[i] = BTF_UNPROCESSED_ID;
2943 
2944 	types_start = d->btf->nohdr_data + d->btf->hdr->type_off;
2945 	p = types_start;
2946 
2947 	for (i = 1; i <= d->btf->nr_types; i++) {
2948 		if (d->map[i] != i)
2949 			continue;
2950 
2951 		len = btf_type_size(d->btf->types[i]);
2952 		if (len < 0)
2953 			return len;
2954 
2955 		memmove(p, d->btf->types[i], len);
2956 		d->hypot_map[i] = next_type_id;
2957 		d->btf->types[next_type_id] = (struct btf_type *)p;
2958 		p += len;
2959 		next_type_id++;
2960 	}
2961 
2962 	/* shrink struct btf's internal types index and update btf_header */
2963 	d->btf->nr_types = next_type_id - 1;
2964 	d->btf->types_size = d->btf->nr_types;
2965 	d->btf->hdr->type_len = p - types_start;
2966 	new_types = realloc(d->btf->types,
2967 			    (1 + d->btf->nr_types) * sizeof(struct btf_type *));
2968 	if (!new_types)
2969 		return -ENOMEM;
2970 	d->btf->types = new_types;
2971 
2972 	/* make sure string section follows type information without gaps */
2973 	d->btf->hdr->str_off = p - (char *)d->btf->nohdr_data;
2974 	memmove(p, d->btf->strings, d->btf->hdr->str_len);
2975 	d->btf->strings = p;
2976 	p += d->btf->hdr->str_len;
2977 
2978 	d->btf->data_size = p - (char *)d->btf->data;
2979 	return 0;
2980 }
2981 
2982 /*
2983  * Figure out final (deduplicated and compacted) type ID for provided original
2984  * `type_id` by first resolving it into corresponding canonical type ID and
2985  * then mapping it to a deduplicated type ID, stored in btf_dedup->hypot_map,
2986  * which is populated during compaction phase.
2987  */
2988 static int btf_dedup_remap_type_id(struct btf_dedup *d, __u32 type_id)
2989 {
2990 	__u32 resolved_type_id, new_type_id;
2991 
2992 	resolved_type_id = resolve_type_id(d, type_id);
2993 	new_type_id = d->hypot_map[resolved_type_id];
2994 	if (new_type_id > BTF_MAX_NR_TYPES)
2995 		return -EINVAL;
2996 	return new_type_id;
2997 }
2998 
2999 /*
3000  * Remap referenced type IDs into deduped type IDs.
3001  *
3002  * After BTF types are deduplicated and compacted, their final type IDs may
3003  * differ from original ones. The map from original to a corresponding
3004  * deduped type ID is stored in btf_dedup->hypot_map and is populated during
3005  * compaction phase. During remapping phase we are rewriting all type IDs
3006  * referenced from any BTF type (e.g., struct fields, func proto args, etc) to
3007  * their final deduped type IDs.
3008  */
3009 static int btf_dedup_remap_type(struct btf_dedup *d, __u32 type_id)
3010 {
3011 	struct btf_type *t = d->btf->types[type_id];
3012 	int i, r;
3013 
3014 	switch (btf_kind(t)) {
3015 	case BTF_KIND_INT:
3016 	case BTF_KIND_ENUM:
3017 		break;
3018 
3019 	case BTF_KIND_FWD:
3020 	case BTF_KIND_CONST:
3021 	case BTF_KIND_VOLATILE:
3022 	case BTF_KIND_RESTRICT:
3023 	case BTF_KIND_PTR:
3024 	case BTF_KIND_TYPEDEF:
3025 	case BTF_KIND_FUNC:
3026 	case BTF_KIND_VAR:
3027 		r = btf_dedup_remap_type_id(d, t->type);
3028 		if (r < 0)
3029 			return r;
3030 		t->type = r;
3031 		break;
3032 
3033 	case BTF_KIND_ARRAY: {
3034 		struct btf_array *arr_info = btf_array(t);
3035 
3036 		r = btf_dedup_remap_type_id(d, arr_info->type);
3037 		if (r < 0)
3038 			return r;
3039 		arr_info->type = r;
3040 		r = btf_dedup_remap_type_id(d, arr_info->index_type);
3041 		if (r < 0)
3042 			return r;
3043 		arr_info->index_type = r;
3044 		break;
3045 	}
3046 
3047 	case BTF_KIND_STRUCT:
3048 	case BTF_KIND_UNION: {
3049 		struct btf_member *member = btf_members(t);
3050 		__u16 vlen = btf_vlen(t);
3051 
3052 		for (i = 0; i < vlen; i++) {
3053 			r = btf_dedup_remap_type_id(d, member->type);
3054 			if (r < 0)
3055 				return r;
3056 			member->type = r;
3057 			member++;
3058 		}
3059 		break;
3060 	}
3061 
3062 	case BTF_KIND_FUNC_PROTO: {
3063 		struct btf_param *param = btf_params(t);
3064 		__u16 vlen = btf_vlen(t);
3065 
3066 		r = btf_dedup_remap_type_id(d, t->type);
3067 		if (r < 0)
3068 			return r;
3069 		t->type = r;
3070 
3071 		for (i = 0; i < vlen; i++) {
3072 			r = btf_dedup_remap_type_id(d, param->type);
3073 			if (r < 0)
3074 				return r;
3075 			param->type = r;
3076 			param++;
3077 		}
3078 		break;
3079 	}
3080 
3081 	case BTF_KIND_DATASEC: {
3082 		struct btf_var_secinfo *var = btf_var_secinfos(t);
3083 		__u16 vlen = btf_vlen(t);
3084 
3085 		for (i = 0; i < vlen; i++) {
3086 			r = btf_dedup_remap_type_id(d, var->type);
3087 			if (r < 0)
3088 				return r;
3089 			var->type = r;
3090 			var++;
3091 		}
3092 		break;
3093 	}
3094 
3095 	default:
3096 		return -EINVAL;
3097 	}
3098 
3099 	return 0;
3100 }
3101 
3102 static int btf_dedup_remap_types(struct btf_dedup *d)
3103 {
3104 	int i, r;
3105 
3106 	for (i = 1; i <= d->btf->nr_types; i++) {
3107 		r = btf_dedup_remap_type(d, i);
3108 		if (r < 0)
3109 			return r;
3110 	}
3111 	return 0;
3112 }
3113 
3114 /*
3115  * Probe few well-known locations for vmlinux kernel image and try to load BTF
3116  * data out of it to use for target BTF.
3117  */
3118 struct btf *libbpf_find_kernel_btf(void)
3119 {
3120 	struct {
3121 		const char *path_fmt;
3122 		bool raw_btf;
3123 	} locations[] = {
3124 		/* try canonical vmlinux BTF through sysfs first */
3125 		{ "/sys/kernel/btf/vmlinux", true /* raw BTF */ },
3126 		/* fall back to trying to find vmlinux ELF on disk otherwise */
3127 		{ "/boot/vmlinux-%1$s" },
3128 		{ "/lib/modules/%1$s/vmlinux-%1$s" },
3129 		{ "/lib/modules/%1$s/build/vmlinux" },
3130 		{ "/usr/lib/modules/%1$s/kernel/vmlinux" },
3131 		{ "/usr/lib/debug/boot/vmlinux-%1$s" },
3132 		{ "/usr/lib/debug/boot/vmlinux-%1$s.debug" },
3133 		{ "/usr/lib/debug/lib/modules/%1$s/vmlinux" },
3134 	};
3135 	char path[PATH_MAX + 1];
3136 	struct utsname buf;
3137 	struct btf *btf;
3138 	int i;
3139 
3140 	uname(&buf);
3141 
3142 	for (i = 0; i < ARRAY_SIZE(locations); i++) {
3143 		snprintf(path, PATH_MAX, locations[i].path_fmt, buf.release);
3144 
3145 		if (access(path, R_OK))
3146 			continue;
3147 
3148 		if (locations[i].raw_btf)
3149 			btf = btf__parse_raw(path);
3150 		else
3151 			btf = btf__parse_elf(path, NULL);
3152 
3153 		pr_debug("loading kernel BTF '%s': %ld\n",
3154 			 path, IS_ERR(btf) ? PTR_ERR(btf) : 0);
3155 		if (IS_ERR(btf))
3156 			continue;
3157 
3158 		return btf;
3159 	}
3160 
3161 	pr_warn("failed to find valid kernel BTF\n");
3162 	return ERR_PTR(-ESRCH);
3163 }
3164