xref: /linux/tools/lib/bpf/btf.c (revision a1ff5a7d78a036d6c2178ee5acd6ba4946243800)
1 // SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
2 /* Copyright (c) 2018 Facebook */
3 
4 #include <byteswap.h>
5 #include <endian.h>
6 #include <stdio.h>
7 #include <stdlib.h>
8 #include <string.h>
9 #include <fcntl.h>
10 #include <unistd.h>
11 #include <errno.h>
12 #include <sys/utsname.h>
13 #include <sys/param.h>
14 #include <sys/stat.h>
15 #include <linux/kernel.h>
16 #include <linux/err.h>
17 #include <linux/btf.h>
18 #include <gelf.h>
19 #include "btf.h"
20 #include "bpf.h"
21 #include "libbpf.h"
22 #include "libbpf_internal.h"
23 #include "hashmap.h"
24 #include "strset.h"
25 
26 #define BTF_MAX_NR_TYPES 0x7fffffffU
27 #define BTF_MAX_STR_OFFSET 0x7fffffffU
28 
29 static struct btf_type btf_void;
30 
31 struct btf {
32 	/* raw BTF data in native endianness */
33 	void *raw_data;
34 	/* raw BTF data in non-native endianness */
35 	void *raw_data_swapped;
36 	__u32 raw_size;
37 	/* whether target endianness differs from the native one */
38 	bool swapped_endian;
39 
40 	/*
41 	 * When BTF is loaded from an ELF or raw memory it is stored
42 	 * in a contiguous memory block. The hdr, type_data, and, strs_data
43 	 * point inside that memory region to their respective parts of BTF
44 	 * representation:
45 	 *
46 	 * +--------------------------------+
47 	 * |  Header  |  Types  |  Strings  |
48 	 * +--------------------------------+
49 	 * ^          ^         ^
50 	 * |          |         |
51 	 * hdr        |         |
52 	 * types_data-+         |
53 	 * strs_data------------+
54 	 *
55 	 * If BTF data is later modified, e.g., due to types added or
56 	 * removed, BTF deduplication performed, etc, this contiguous
57 	 * representation is broken up into three independently allocated
58 	 * memory regions to be able to modify them independently.
59 	 * raw_data is nulled out at that point, but can be later allocated
60 	 * and cached again if user calls btf__raw_data(), at which point
61 	 * raw_data will contain a contiguous copy of header, types, and
62 	 * strings:
63 	 *
64 	 * +----------+  +---------+  +-----------+
65 	 * |  Header  |  |  Types  |  |  Strings  |
66 	 * +----------+  +---------+  +-----------+
67 	 * ^             ^            ^
68 	 * |             |            |
69 	 * hdr           |            |
70 	 * types_data----+            |
71 	 * strset__data(strs_set)-----+
72 	 *
73 	 *               +----------+---------+-----------+
74 	 *               |  Header  |  Types  |  Strings  |
75 	 * raw_data----->+----------+---------+-----------+
76 	 */
77 	struct btf_header *hdr;
78 
79 	void *types_data;
80 	size_t types_data_cap; /* used size stored in hdr->type_len */
81 
82 	/* type ID to `struct btf_type *` lookup index
83 	 * type_offs[0] corresponds to the first non-VOID type:
84 	 *   - for base BTF it's type [1];
85 	 *   - for split BTF it's the first non-base BTF type.
86 	 */
87 	__u32 *type_offs;
88 	size_t type_offs_cap;
89 	/* number of types in this BTF instance:
90 	 *   - doesn't include special [0] void type;
91 	 *   - for split BTF counts number of types added on top of base BTF.
92 	 */
93 	__u32 nr_types;
94 	/* if not NULL, points to the base BTF on top of which the current
95 	 * split BTF is based
96 	 */
97 	struct btf *base_btf;
98 	/* BTF type ID of the first type in this BTF instance:
99 	 *   - for base BTF it's equal to 1;
100 	 *   - for split BTF it's equal to biggest type ID of base BTF plus 1.
101 	 */
102 	int start_id;
103 	/* logical string offset of this BTF instance:
104 	 *   - for base BTF it's equal to 0;
105 	 *   - for split BTF it's equal to total size of base BTF's string section size.
106 	 */
107 	int start_str_off;
108 
109 	/* only one of strs_data or strs_set can be non-NULL, depending on
110 	 * whether BTF is in a modifiable state (strs_set is used) or not
111 	 * (strs_data points inside raw_data)
112 	 */
113 	void *strs_data;
114 	/* a set of unique strings */
115 	struct strset *strs_set;
116 	/* whether strings are already deduplicated */
117 	bool strs_deduped;
118 
119 	/* whether base_btf should be freed in btf_free for this instance */
120 	bool owns_base;
121 
122 	/* BTF object FD, if loaded into kernel */
123 	int fd;
124 
125 	/* Pointer size (in bytes) for a target architecture of this BTF */
126 	int ptr_sz;
127 };
128 
ptr_to_u64(const void * ptr)129 static inline __u64 ptr_to_u64(const void *ptr)
130 {
131 	return (__u64) (unsigned long) ptr;
132 }
133 
134 /* Ensure given dynamically allocated memory region pointed to by *data* with
135  * capacity of *cap_cnt* elements each taking *elem_sz* bytes has enough
136  * memory to accommodate *add_cnt* new elements, assuming *cur_cnt* elements
137  * are already used. At most *max_cnt* elements can be ever allocated.
138  * If necessary, memory is reallocated and all existing data is copied over,
139  * new pointer to the memory region is stored at *data, new memory region
140  * capacity (in number of elements) is stored in *cap.
141  * On success, memory pointer to the beginning of unused memory is returned.
142  * On error, NULL is returned.
143  */
libbpf_add_mem(void ** data,size_t * cap_cnt,size_t elem_sz,size_t cur_cnt,size_t max_cnt,size_t add_cnt)144 void *libbpf_add_mem(void **data, size_t *cap_cnt, size_t elem_sz,
145 		     size_t cur_cnt, size_t max_cnt, size_t add_cnt)
146 {
147 	size_t new_cnt;
148 	void *new_data;
149 
150 	if (cur_cnt + add_cnt <= *cap_cnt)
151 		return *data + cur_cnt * elem_sz;
152 
153 	/* requested more than the set limit */
154 	if (cur_cnt + add_cnt > max_cnt)
155 		return NULL;
156 
157 	new_cnt = *cap_cnt;
158 	new_cnt += new_cnt / 4;		  /* expand by 25% */
159 	if (new_cnt < 16)		  /* but at least 16 elements */
160 		new_cnt = 16;
161 	if (new_cnt > max_cnt)		  /* but not exceeding a set limit */
162 		new_cnt = max_cnt;
163 	if (new_cnt < cur_cnt + add_cnt)  /* also ensure we have enough memory */
164 		new_cnt = cur_cnt + add_cnt;
165 
166 	new_data = libbpf_reallocarray(*data, new_cnt, elem_sz);
167 	if (!new_data)
168 		return NULL;
169 
170 	/* zero out newly allocated portion of memory */
171 	memset(new_data + (*cap_cnt) * elem_sz, 0, (new_cnt - *cap_cnt) * elem_sz);
172 
173 	*data = new_data;
174 	*cap_cnt = new_cnt;
175 	return new_data + cur_cnt * elem_sz;
176 }
177 
178 /* Ensure given dynamically allocated memory region has enough allocated space
179  * to accommodate *need_cnt* elements of size *elem_sz* bytes each
180  */
libbpf_ensure_mem(void ** data,size_t * cap_cnt,size_t elem_sz,size_t need_cnt)181 int libbpf_ensure_mem(void **data, size_t *cap_cnt, size_t elem_sz, size_t need_cnt)
182 {
183 	void *p;
184 
185 	if (need_cnt <= *cap_cnt)
186 		return 0;
187 
188 	p = libbpf_add_mem(data, cap_cnt, elem_sz, *cap_cnt, SIZE_MAX, need_cnt - *cap_cnt);
189 	if (!p)
190 		return -ENOMEM;
191 
192 	return 0;
193 }
194 
btf_add_type_offs_mem(struct btf * btf,size_t add_cnt)195 static void *btf_add_type_offs_mem(struct btf *btf, size_t add_cnt)
196 {
197 	return libbpf_add_mem((void **)&btf->type_offs, &btf->type_offs_cap, sizeof(__u32),
198 			      btf->nr_types, BTF_MAX_NR_TYPES, add_cnt);
199 }
200 
btf_add_type_idx_entry(struct btf * btf,__u32 type_off)201 static int btf_add_type_idx_entry(struct btf *btf, __u32 type_off)
202 {
203 	__u32 *p;
204 
205 	p = btf_add_type_offs_mem(btf, 1);
206 	if (!p)
207 		return -ENOMEM;
208 
209 	*p = type_off;
210 	return 0;
211 }
212 
btf_bswap_hdr(struct btf_header * h)213 static void btf_bswap_hdr(struct btf_header *h)
214 {
215 	h->magic = bswap_16(h->magic);
216 	h->hdr_len = bswap_32(h->hdr_len);
217 	h->type_off = bswap_32(h->type_off);
218 	h->type_len = bswap_32(h->type_len);
219 	h->str_off = bswap_32(h->str_off);
220 	h->str_len = bswap_32(h->str_len);
221 }
222 
btf_parse_hdr(struct btf * btf)223 static int btf_parse_hdr(struct btf *btf)
224 {
225 	struct btf_header *hdr = btf->hdr;
226 	__u32 meta_left;
227 
228 	if (btf->raw_size < sizeof(struct btf_header)) {
229 		pr_debug("BTF header not found\n");
230 		return -EINVAL;
231 	}
232 
233 	if (hdr->magic == bswap_16(BTF_MAGIC)) {
234 		btf->swapped_endian = true;
235 		if (bswap_32(hdr->hdr_len) != sizeof(struct btf_header)) {
236 			pr_warn("Can't load BTF with non-native endianness due to unsupported header length %u\n",
237 				bswap_32(hdr->hdr_len));
238 			return -ENOTSUP;
239 		}
240 		btf_bswap_hdr(hdr);
241 	} else if (hdr->magic != BTF_MAGIC) {
242 		pr_debug("Invalid BTF magic: %x\n", hdr->magic);
243 		return -EINVAL;
244 	}
245 
246 	if (btf->raw_size < hdr->hdr_len) {
247 		pr_debug("BTF header len %u larger than data size %u\n",
248 			 hdr->hdr_len, btf->raw_size);
249 		return -EINVAL;
250 	}
251 
252 	meta_left = btf->raw_size - hdr->hdr_len;
253 	if (meta_left < (long long)hdr->str_off + hdr->str_len) {
254 		pr_debug("Invalid BTF total size: %u\n", btf->raw_size);
255 		return -EINVAL;
256 	}
257 
258 	if ((long long)hdr->type_off + hdr->type_len > hdr->str_off) {
259 		pr_debug("Invalid BTF data sections layout: type data at %u + %u, strings data at %u + %u\n",
260 			 hdr->type_off, hdr->type_len, hdr->str_off, hdr->str_len);
261 		return -EINVAL;
262 	}
263 
264 	if (hdr->type_off % 4) {
265 		pr_debug("BTF type section is not aligned to 4 bytes\n");
266 		return -EINVAL;
267 	}
268 
269 	return 0;
270 }
271 
btf_parse_str_sec(struct btf * btf)272 static int btf_parse_str_sec(struct btf *btf)
273 {
274 	const struct btf_header *hdr = btf->hdr;
275 	const char *start = btf->strs_data;
276 	const char *end = start + btf->hdr->str_len;
277 
278 	if (btf->base_btf && hdr->str_len == 0)
279 		return 0;
280 	if (!hdr->str_len || hdr->str_len - 1 > BTF_MAX_STR_OFFSET || end[-1]) {
281 		pr_debug("Invalid BTF string section\n");
282 		return -EINVAL;
283 	}
284 	if (!btf->base_btf && start[0]) {
285 		pr_debug("Invalid BTF string section\n");
286 		return -EINVAL;
287 	}
288 	return 0;
289 }
290 
btf_type_size(const struct btf_type * t)291 static int btf_type_size(const struct btf_type *t)
292 {
293 	const int base_size = sizeof(struct btf_type);
294 	__u16 vlen = btf_vlen(t);
295 
296 	switch (btf_kind(t)) {
297 	case BTF_KIND_FWD:
298 	case BTF_KIND_CONST:
299 	case BTF_KIND_VOLATILE:
300 	case BTF_KIND_RESTRICT:
301 	case BTF_KIND_PTR:
302 	case BTF_KIND_TYPEDEF:
303 	case BTF_KIND_FUNC:
304 	case BTF_KIND_FLOAT:
305 	case BTF_KIND_TYPE_TAG:
306 		return base_size;
307 	case BTF_KIND_INT:
308 		return base_size + sizeof(__u32);
309 	case BTF_KIND_ENUM:
310 		return base_size + vlen * sizeof(struct btf_enum);
311 	case BTF_KIND_ENUM64:
312 		return base_size + vlen * sizeof(struct btf_enum64);
313 	case BTF_KIND_ARRAY:
314 		return base_size + sizeof(struct btf_array);
315 	case BTF_KIND_STRUCT:
316 	case BTF_KIND_UNION:
317 		return base_size + vlen * sizeof(struct btf_member);
318 	case BTF_KIND_FUNC_PROTO:
319 		return base_size + vlen * sizeof(struct btf_param);
320 	case BTF_KIND_VAR:
321 		return base_size + sizeof(struct btf_var);
322 	case BTF_KIND_DATASEC:
323 		return base_size + vlen * sizeof(struct btf_var_secinfo);
324 	case BTF_KIND_DECL_TAG:
325 		return base_size + sizeof(struct btf_decl_tag);
326 	default:
327 		pr_debug("Unsupported BTF_KIND:%u\n", btf_kind(t));
328 		return -EINVAL;
329 	}
330 }
331 
btf_bswap_type_base(struct btf_type * t)332 static void btf_bswap_type_base(struct btf_type *t)
333 {
334 	t->name_off = bswap_32(t->name_off);
335 	t->info = bswap_32(t->info);
336 	t->type = bswap_32(t->type);
337 }
338 
btf_bswap_type_rest(struct btf_type * t)339 static int btf_bswap_type_rest(struct btf_type *t)
340 {
341 	struct btf_var_secinfo *v;
342 	struct btf_enum64 *e64;
343 	struct btf_member *m;
344 	struct btf_array *a;
345 	struct btf_param *p;
346 	struct btf_enum *e;
347 	__u16 vlen = btf_vlen(t);
348 	int i;
349 
350 	switch (btf_kind(t)) {
351 	case BTF_KIND_FWD:
352 	case BTF_KIND_CONST:
353 	case BTF_KIND_VOLATILE:
354 	case BTF_KIND_RESTRICT:
355 	case BTF_KIND_PTR:
356 	case BTF_KIND_TYPEDEF:
357 	case BTF_KIND_FUNC:
358 	case BTF_KIND_FLOAT:
359 	case BTF_KIND_TYPE_TAG:
360 		return 0;
361 	case BTF_KIND_INT:
362 		*(__u32 *)(t + 1) = bswap_32(*(__u32 *)(t + 1));
363 		return 0;
364 	case BTF_KIND_ENUM:
365 		for (i = 0, e = btf_enum(t); i < vlen; i++, e++) {
366 			e->name_off = bswap_32(e->name_off);
367 			e->val = bswap_32(e->val);
368 		}
369 		return 0;
370 	case BTF_KIND_ENUM64:
371 		for (i = 0, e64 = btf_enum64(t); i < vlen; i++, e64++) {
372 			e64->name_off = bswap_32(e64->name_off);
373 			e64->val_lo32 = bswap_32(e64->val_lo32);
374 			e64->val_hi32 = bswap_32(e64->val_hi32);
375 		}
376 		return 0;
377 	case BTF_KIND_ARRAY:
378 		a = btf_array(t);
379 		a->type = bswap_32(a->type);
380 		a->index_type = bswap_32(a->index_type);
381 		a->nelems = bswap_32(a->nelems);
382 		return 0;
383 	case BTF_KIND_STRUCT:
384 	case BTF_KIND_UNION:
385 		for (i = 0, m = btf_members(t); i < vlen; i++, m++) {
386 			m->name_off = bswap_32(m->name_off);
387 			m->type = bswap_32(m->type);
388 			m->offset = bswap_32(m->offset);
389 		}
390 		return 0;
391 	case BTF_KIND_FUNC_PROTO:
392 		for (i = 0, p = btf_params(t); i < vlen; i++, p++) {
393 			p->name_off = bswap_32(p->name_off);
394 			p->type = bswap_32(p->type);
395 		}
396 		return 0;
397 	case BTF_KIND_VAR:
398 		btf_var(t)->linkage = bswap_32(btf_var(t)->linkage);
399 		return 0;
400 	case BTF_KIND_DATASEC:
401 		for (i = 0, v = btf_var_secinfos(t); i < vlen; i++, v++) {
402 			v->type = bswap_32(v->type);
403 			v->offset = bswap_32(v->offset);
404 			v->size = bswap_32(v->size);
405 		}
406 		return 0;
407 	case BTF_KIND_DECL_TAG:
408 		btf_decl_tag(t)->component_idx = bswap_32(btf_decl_tag(t)->component_idx);
409 		return 0;
410 	default:
411 		pr_debug("Unsupported BTF_KIND:%u\n", btf_kind(t));
412 		return -EINVAL;
413 	}
414 }
415 
btf_parse_type_sec(struct btf * btf)416 static int btf_parse_type_sec(struct btf *btf)
417 {
418 	struct btf_header *hdr = btf->hdr;
419 	void *next_type = btf->types_data;
420 	void *end_type = next_type + hdr->type_len;
421 	int err, type_size;
422 
423 	while (next_type + sizeof(struct btf_type) <= end_type) {
424 		if (btf->swapped_endian)
425 			btf_bswap_type_base(next_type);
426 
427 		type_size = btf_type_size(next_type);
428 		if (type_size < 0)
429 			return type_size;
430 		if (next_type + type_size > end_type) {
431 			pr_warn("BTF type [%d] is malformed\n", btf->start_id + btf->nr_types);
432 			return -EINVAL;
433 		}
434 
435 		if (btf->swapped_endian && btf_bswap_type_rest(next_type))
436 			return -EINVAL;
437 
438 		err = btf_add_type_idx_entry(btf, next_type - btf->types_data);
439 		if (err)
440 			return err;
441 
442 		next_type += type_size;
443 		btf->nr_types++;
444 	}
445 
446 	if (next_type != end_type) {
447 		pr_warn("BTF types data is malformed\n");
448 		return -EINVAL;
449 	}
450 
451 	return 0;
452 }
453 
btf_validate_str(const struct btf * btf,__u32 str_off,const char * what,__u32 type_id)454 static int btf_validate_str(const struct btf *btf, __u32 str_off, const char *what, __u32 type_id)
455 {
456 	const char *s;
457 
458 	s = btf__str_by_offset(btf, str_off);
459 	if (!s) {
460 		pr_warn("btf: type [%u]: invalid %s (string offset %u)\n", type_id, what, str_off);
461 		return -EINVAL;
462 	}
463 
464 	return 0;
465 }
466 
btf_validate_id(const struct btf * btf,__u32 id,__u32 ctx_id)467 static int btf_validate_id(const struct btf *btf, __u32 id, __u32 ctx_id)
468 {
469 	const struct btf_type *t;
470 
471 	t = btf__type_by_id(btf, id);
472 	if (!t) {
473 		pr_warn("btf: type [%u]: invalid referenced type ID %u\n", ctx_id, id);
474 		return -EINVAL;
475 	}
476 
477 	return 0;
478 }
479 
btf_validate_type(const struct btf * btf,const struct btf_type * t,__u32 id)480 static int btf_validate_type(const struct btf *btf, const struct btf_type *t, __u32 id)
481 {
482 	__u32 kind = btf_kind(t);
483 	int err, i, n;
484 
485 	err = btf_validate_str(btf, t->name_off, "type name", id);
486 	if (err)
487 		return err;
488 
489 	switch (kind) {
490 	case BTF_KIND_UNKN:
491 	case BTF_KIND_INT:
492 	case BTF_KIND_FWD:
493 	case BTF_KIND_FLOAT:
494 		break;
495 	case BTF_KIND_PTR:
496 	case BTF_KIND_TYPEDEF:
497 	case BTF_KIND_VOLATILE:
498 	case BTF_KIND_CONST:
499 	case BTF_KIND_RESTRICT:
500 	case BTF_KIND_VAR:
501 	case BTF_KIND_DECL_TAG:
502 	case BTF_KIND_TYPE_TAG:
503 		err = btf_validate_id(btf, t->type, id);
504 		if (err)
505 			return err;
506 		break;
507 	case BTF_KIND_ARRAY: {
508 		const struct btf_array *a = btf_array(t);
509 
510 		err = btf_validate_id(btf, a->type, id);
511 		err = err ?: btf_validate_id(btf, a->index_type, id);
512 		if (err)
513 			return err;
514 		break;
515 	}
516 	case BTF_KIND_STRUCT:
517 	case BTF_KIND_UNION: {
518 		const struct btf_member *m = btf_members(t);
519 
520 		n = btf_vlen(t);
521 		for (i = 0; i < n; i++, m++) {
522 			err = btf_validate_str(btf, m->name_off, "field name", id);
523 			err = err ?: btf_validate_id(btf, m->type, id);
524 			if (err)
525 				return err;
526 		}
527 		break;
528 	}
529 	case BTF_KIND_ENUM: {
530 		const struct btf_enum *m = btf_enum(t);
531 
532 		n = btf_vlen(t);
533 		for (i = 0; i < n; i++, m++) {
534 			err = btf_validate_str(btf, m->name_off, "enum name", id);
535 			if (err)
536 				return err;
537 		}
538 		break;
539 	}
540 	case BTF_KIND_ENUM64: {
541 		const struct btf_enum64 *m = btf_enum64(t);
542 
543 		n = btf_vlen(t);
544 		for (i = 0; i < n; i++, m++) {
545 			err = btf_validate_str(btf, m->name_off, "enum name", id);
546 			if (err)
547 				return err;
548 		}
549 		break;
550 	}
551 	case BTF_KIND_FUNC: {
552 		const struct btf_type *ft;
553 
554 		err = btf_validate_id(btf, t->type, id);
555 		if (err)
556 			return err;
557 		ft = btf__type_by_id(btf, t->type);
558 		if (btf_kind(ft) != BTF_KIND_FUNC_PROTO) {
559 			pr_warn("btf: type [%u]: referenced type [%u] is not FUNC_PROTO\n", id, t->type);
560 			return -EINVAL;
561 		}
562 		break;
563 	}
564 	case BTF_KIND_FUNC_PROTO: {
565 		const struct btf_param *m = btf_params(t);
566 
567 		n = btf_vlen(t);
568 		for (i = 0; i < n; i++, m++) {
569 			err = btf_validate_str(btf, m->name_off, "param name", id);
570 			err = err ?: btf_validate_id(btf, m->type, id);
571 			if (err)
572 				return err;
573 		}
574 		break;
575 	}
576 	case BTF_KIND_DATASEC: {
577 		const struct btf_var_secinfo *m = btf_var_secinfos(t);
578 
579 		n = btf_vlen(t);
580 		for (i = 0; i < n; i++, m++) {
581 			err = btf_validate_id(btf, m->type, id);
582 			if (err)
583 				return err;
584 		}
585 		break;
586 	}
587 	default:
588 		pr_warn("btf: type [%u]: unrecognized kind %u\n", id, kind);
589 		return -EINVAL;
590 	}
591 	return 0;
592 }
593 
594 /* Validate basic sanity of BTF. It's intentionally less thorough than
595  * kernel's validation and validates only properties of BTF that libbpf relies
596  * on to be correct (e.g., valid type IDs, valid string offsets, etc)
597  */
btf_sanity_check(const struct btf * btf)598 static int btf_sanity_check(const struct btf *btf)
599 {
600 	const struct btf_type *t;
601 	__u32 i, n = btf__type_cnt(btf);
602 	int err;
603 
604 	for (i = btf->start_id; i < n; i++) {
605 		t = btf_type_by_id(btf, i);
606 		err = btf_validate_type(btf, t, i);
607 		if (err)
608 			return err;
609 	}
610 	return 0;
611 }
612 
btf__type_cnt(const struct btf * btf)613 __u32 btf__type_cnt(const struct btf *btf)
614 {
615 	return btf->start_id + btf->nr_types;
616 }
617 
btf__base_btf(const struct btf * btf)618 const struct btf *btf__base_btf(const struct btf *btf)
619 {
620 	return btf->base_btf;
621 }
622 
623 /* internal helper returning non-const pointer to a type */
btf_type_by_id(const struct btf * btf,__u32 type_id)624 struct btf_type *btf_type_by_id(const struct btf *btf, __u32 type_id)
625 {
626 	if (type_id == 0)
627 		return &btf_void;
628 	if (type_id < btf->start_id)
629 		return btf_type_by_id(btf->base_btf, type_id);
630 	return btf->types_data + btf->type_offs[type_id - btf->start_id];
631 }
632 
btf__type_by_id(const struct btf * btf,__u32 type_id)633 const struct btf_type *btf__type_by_id(const struct btf *btf, __u32 type_id)
634 {
635 	if (type_id >= btf->start_id + btf->nr_types)
636 		return errno = EINVAL, NULL;
637 	return btf_type_by_id((struct btf *)btf, type_id);
638 }
639 
determine_ptr_size(const struct btf * btf)640 static int determine_ptr_size(const struct btf *btf)
641 {
642 	static const char * const long_aliases[] = {
643 		"long",
644 		"long int",
645 		"int long",
646 		"unsigned long",
647 		"long unsigned",
648 		"unsigned long int",
649 		"unsigned int long",
650 		"long unsigned int",
651 		"long int unsigned",
652 		"int unsigned long",
653 		"int long unsigned",
654 	};
655 	const struct btf_type *t;
656 	const char *name;
657 	int i, j, n;
658 
659 	if (btf->base_btf && btf->base_btf->ptr_sz > 0)
660 		return btf->base_btf->ptr_sz;
661 
662 	n = btf__type_cnt(btf);
663 	for (i = 1; i < n; i++) {
664 		t = btf__type_by_id(btf, i);
665 		if (!btf_is_int(t))
666 			continue;
667 
668 		if (t->size != 4 && t->size != 8)
669 			continue;
670 
671 		name = btf__name_by_offset(btf, t->name_off);
672 		if (!name)
673 			continue;
674 
675 		for (j = 0; j < ARRAY_SIZE(long_aliases); j++) {
676 			if (strcmp(name, long_aliases[j]) == 0)
677 				return t->size;
678 		}
679 	}
680 
681 	return -1;
682 }
683 
btf_ptr_sz(const struct btf * btf)684 static size_t btf_ptr_sz(const struct btf *btf)
685 {
686 	if (!btf->ptr_sz)
687 		((struct btf *)btf)->ptr_sz = determine_ptr_size(btf);
688 	return btf->ptr_sz < 0 ? sizeof(void *) : btf->ptr_sz;
689 }
690 
691 /* Return pointer size this BTF instance assumes. The size is heuristically
692  * determined by looking for 'long' or 'unsigned long' integer type and
693  * recording its size in bytes. If BTF type information doesn't have any such
694  * type, this function returns 0. In the latter case, native architecture's
695  * pointer size is assumed, so will be either 4 or 8, depending on
696  * architecture that libbpf was compiled for. It's possible to override
697  * guessed value by using btf__set_pointer_size() API.
698  */
btf__pointer_size(const struct btf * btf)699 size_t btf__pointer_size(const struct btf *btf)
700 {
701 	if (!btf->ptr_sz)
702 		((struct btf *)btf)->ptr_sz = determine_ptr_size(btf);
703 
704 	if (btf->ptr_sz < 0)
705 		/* not enough BTF type info to guess */
706 		return 0;
707 
708 	return btf->ptr_sz;
709 }
710 
711 /* Override or set pointer size in bytes. Only values of 4 and 8 are
712  * supported.
713  */
btf__set_pointer_size(struct btf * btf,size_t ptr_sz)714 int btf__set_pointer_size(struct btf *btf, size_t ptr_sz)
715 {
716 	if (ptr_sz != 4 && ptr_sz != 8)
717 		return libbpf_err(-EINVAL);
718 	btf->ptr_sz = ptr_sz;
719 	return 0;
720 }
721 
is_host_big_endian(void)722 static bool is_host_big_endian(void)
723 {
724 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
725 	return false;
726 #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
727 	return true;
728 #else
729 # error "Unrecognized __BYTE_ORDER__"
730 #endif
731 }
732 
btf__endianness(const struct btf * btf)733 enum btf_endianness btf__endianness(const struct btf *btf)
734 {
735 	if (is_host_big_endian())
736 		return btf->swapped_endian ? BTF_LITTLE_ENDIAN : BTF_BIG_ENDIAN;
737 	else
738 		return btf->swapped_endian ? BTF_BIG_ENDIAN : BTF_LITTLE_ENDIAN;
739 }
740 
btf__set_endianness(struct btf * btf,enum btf_endianness endian)741 int btf__set_endianness(struct btf *btf, enum btf_endianness endian)
742 {
743 	if (endian != BTF_LITTLE_ENDIAN && endian != BTF_BIG_ENDIAN)
744 		return libbpf_err(-EINVAL);
745 
746 	btf->swapped_endian = is_host_big_endian() != (endian == BTF_BIG_ENDIAN);
747 	if (!btf->swapped_endian) {
748 		free(btf->raw_data_swapped);
749 		btf->raw_data_swapped = NULL;
750 	}
751 	return 0;
752 }
753 
btf_type_is_void(const struct btf_type * t)754 static bool btf_type_is_void(const struct btf_type *t)
755 {
756 	return t == &btf_void || btf_is_fwd(t);
757 }
758 
btf_type_is_void_or_null(const struct btf_type * t)759 static bool btf_type_is_void_or_null(const struct btf_type *t)
760 {
761 	return !t || btf_type_is_void(t);
762 }
763 
764 #define MAX_RESOLVE_DEPTH 32
765 
btf__resolve_size(const struct btf * btf,__u32 type_id)766 __s64 btf__resolve_size(const struct btf *btf, __u32 type_id)
767 {
768 	const struct btf_array *array;
769 	const struct btf_type *t;
770 	__u32 nelems = 1;
771 	__s64 size = -1;
772 	int i;
773 
774 	t = btf__type_by_id(btf, type_id);
775 	for (i = 0; i < MAX_RESOLVE_DEPTH && !btf_type_is_void_or_null(t); i++) {
776 		switch (btf_kind(t)) {
777 		case BTF_KIND_INT:
778 		case BTF_KIND_STRUCT:
779 		case BTF_KIND_UNION:
780 		case BTF_KIND_ENUM:
781 		case BTF_KIND_ENUM64:
782 		case BTF_KIND_DATASEC:
783 		case BTF_KIND_FLOAT:
784 			size = t->size;
785 			goto done;
786 		case BTF_KIND_PTR:
787 			size = btf_ptr_sz(btf);
788 			goto done;
789 		case BTF_KIND_TYPEDEF:
790 		case BTF_KIND_VOLATILE:
791 		case BTF_KIND_CONST:
792 		case BTF_KIND_RESTRICT:
793 		case BTF_KIND_VAR:
794 		case BTF_KIND_DECL_TAG:
795 		case BTF_KIND_TYPE_TAG:
796 			type_id = t->type;
797 			break;
798 		case BTF_KIND_ARRAY:
799 			array = btf_array(t);
800 			if (nelems && array->nelems > UINT32_MAX / nelems)
801 				return libbpf_err(-E2BIG);
802 			nelems *= array->nelems;
803 			type_id = array->type;
804 			break;
805 		default:
806 			return libbpf_err(-EINVAL);
807 		}
808 
809 		t = btf__type_by_id(btf, type_id);
810 	}
811 
812 done:
813 	if (size < 0)
814 		return libbpf_err(-EINVAL);
815 	if (nelems && size > UINT32_MAX / nelems)
816 		return libbpf_err(-E2BIG);
817 
818 	return nelems * size;
819 }
820 
btf__align_of(const struct btf * btf,__u32 id)821 int btf__align_of(const struct btf *btf, __u32 id)
822 {
823 	const struct btf_type *t = btf__type_by_id(btf, id);
824 	__u16 kind = btf_kind(t);
825 
826 	switch (kind) {
827 	case BTF_KIND_INT:
828 	case BTF_KIND_ENUM:
829 	case BTF_KIND_ENUM64:
830 	case BTF_KIND_FLOAT:
831 		return min(btf_ptr_sz(btf), (size_t)t->size);
832 	case BTF_KIND_PTR:
833 		return btf_ptr_sz(btf);
834 	case BTF_KIND_TYPEDEF:
835 	case BTF_KIND_VOLATILE:
836 	case BTF_KIND_CONST:
837 	case BTF_KIND_RESTRICT:
838 	case BTF_KIND_TYPE_TAG:
839 		return btf__align_of(btf, t->type);
840 	case BTF_KIND_ARRAY:
841 		return btf__align_of(btf, btf_array(t)->type);
842 	case BTF_KIND_STRUCT:
843 	case BTF_KIND_UNION: {
844 		const struct btf_member *m = btf_members(t);
845 		__u16 vlen = btf_vlen(t);
846 		int i, max_align = 1, align;
847 
848 		for (i = 0; i < vlen; i++, m++) {
849 			align = btf__align_of(btf, m->type);
850 			if (align <= 0)
851 				return libbpf_err(align);
852 			max_align = max(max_align, align);
853 
854 			/* if field offset isn't aligned according to field
855 			 * type's alignment, then struct must be packed
856 			 */
857 			if (btf_member_bitfield_size(t, i) == 0 &&
858 			    (m->offset % (8 * align)) != 0)
859 				return 1;
860 		}
861 
862 		/* if struct/union size isn't a multiple of its alignment,
863 		 * then struct must be packed
864 		 */
865 		if ((t->size % max_align) != 0)
866 			return 1;
867 
868 		return max_align;
869 	}
870 	default:
871 		pr_warn("unsupported BTF_KIND:%u\n", btf_kind(t));
872 		return errno = EINVAL, 0;
873 	}
874 }
875 
btf__resolve_type(const struct btf * btf,__u32 type_id)876 int btf__resolve_type(const struct btf *btf, __u32 type_id)
877 {
878 	const struct btf_type *t;
879 	int depth = 0;
880 
881 	t = btf__type_by_id(btf, type_id);
882 	while (depth < MAX_RESOLVE_DEPTH &&
883 	       !btf_type_is_void_or_null(t) &&
884 	       (btf_is_mod(t) || btf_is_typedef(t) || btf_is_var(t))) {
885 		type_id = t->type;
886 		t = btf__type_by_id(btf, type_id);
887 		depth++;
888 	}
889 
890 	if (depth == MAX_RESOLVE_DEPTH || btf_type_is_void_or_null(t))
891 		return libbpf_err(-EINVAL);
892 
893 	return type_id;
894 }
895 
btf__find_by_name(const struct btf * btf,const char * type_name)896 __s32 btf__find_by_name(const struct btf *btf, const char *type_name)
897 {
898 	__u32 i, nr_types = btf__type_cnt(btf);
899 
900 	if (!strcmp(type_name, "void"))
901 		return 0;
902 
903 	for (i = 1; i < nr_types; i++) {
904 		const struct btf_type *t = btf__type_by_id(btf, i);
905 		const char *name = btf__name_by_offset(btf, t->name_off);
906 
907 		if (name && !strcmp(type_name, name))
908 			return i;
909 	}
910 
911 	return libbpf_err(-ENOENT);
912 }
913 
btf_find_by_name_kind(const struct btf * btf,int start_id,const char * type_name,__u32 kind)914 static __s32 btf_find_by_name_kind(const struct btf *btf, int start_id,
915 				   const char *type_name, __u32 kind)
916 {
917 	__u32 i, nr_types = btf__type_cnt(btf);
918 
919 	if (kind == BTF_KIND_UNKN || !strcmp(type_name, "void"))
920 		return 0;
921 
922 	for (i = start_id; i < nr_types; i++) {
923 		const struct btf_type *t = btf__type_by_id(btf, i);
924 		const char *name;
925 
926 		if (btf_kind(t) != kind)
927 			continue;
928 		name = btf__name_by_offset(btf, t->name_off);
929 		if (name && !strcmp(type_name, name))
930 			return i;
931 	}
932 
933 	return libbpf_err(-ENOENT);
934 }
935 
btf__find_by_name_kind_own(const struct btf * btf,const char * type_name,__u32 kind)936 __s32 btf__find_by_name_kind_own(const struct btf *btf, const char *type_name,
937 				 __u32 kind)
938 {
939 	return btf_find_by_name_kind(btf, btf->start_id, type_name, kind);
940 }
941 
btf__find_by_name_kind(const struct btf * btf,const char * type_name,__u32 kind)942 __s32 btf__find_by_name_kind(const struct btf *btf, const char *type_name,
943 			     __u32 kind)
944 {
945 	return btf_find_by_name_kind(btf, 1, type_name, kind);
946 }
947 
btf_is_modifiable(const struct btf * btf)948 static bool btf_is_modifiable(const struct btf *btf)
949 {
950 	return (void *)btf->hdr != btf->raw_data;
951 }
952 
btf__free(struct btf * btf)953 void btf__free(struct btf *btf)
954 {
955 	if (IS_ERR_OR_NULL(btf))
956 		return;
957 
958 	if (btf->fd >= 0)
959 		close(btf->fd);
960 
961 	if (btf_is_modifiable(btf)) {
962 		/* if BTF was modified after loading, it will have a split
963 		 * in-memory representation for header, types, and strings
964 		 * sections, so we need to free all of them individually. It
965 		 * might still have a cached contiguous raw data present,
966 		 * which will be unconditionally freed below.
967 		 */
968 		free(btf->hdr);
969 		free(btf->types_data);
970 		strset__free(btf->strs_set);
971 	}
972 	free(btf->raw_data);
973 	free(btf->raw_data_swapped);
974 	free(btf->type_offs);
975 	if (btf->owns_base)
976 		btf__free(btf->base_btf);
977 	free(btf);
978 }
979 
btf_new_empty(struct btf * base_btf)980 static struct btf *btf_new_empty(struct btf *base_btf)
981 {
982 	struct btf *btf;
983 
984 	btf = calloc(1, sizeof(*btf));
985 	if (!btf)
986 		return ERR_PTR(-ENOMEM);
987 
988 	btf->nr_types = 0;
989 	btf->start_id = 1;
990 	btf->start_str_off = 0;
991 	btf->fd = -1;
992 	btf->ptr_sz = sizeof(void *);
993 	btf->swapped_endian = false;
994 
995 	if (base_btf) {
996 		btf->base_btf = base_btf;
997 		btf->start_id = btf__type_cnt(base_btf);
998 		btf->start_str_off = base_btf->hdr->str_len;
999 	}
1000 
1001 	/* +1 for empty string at offset 0 */
1002 	btf->raw_size = sizeof(struct btf_header) + (base_btf ? 0 : 1);
1003 	btf->raw_data = calloc(1, btf->raw_size);
1004 	if (!btf->raw_data) {
1005 		free(btf);
1006 		return ERR_PTR(-ENOMEM);
1007 	}
1008 
1009 	btf->hdr = btf->raw_data;
1010 	btf->hdr->hdr_len = sizeof(struct btf_header);
1011 	btf->hdr->magic = BTF_MAGIC;
1012 	btf->hdr->version = BTF_VERSION;
1013 
1014 	btf->types_data = btf->raw_data + btf->hdr->hdr_len;
1015 	btf->strs_data = btf->raw_data + btf->hdr->hdr_len;
1016 	btf->hdr->str_len = base_btf ? 0 : 1; /* empty string at offset 0 */
1017 
1018 	return btf;
1019 }
1020 
btf__new_empty(void)1021 struct btf *btf__new_empty(void)
1022 {
1023 	return libbpf_ptr(btf_new_empty(NULL));
1024 }
1025 
btf__new_empty_split(struct btf * base_btf)1026 struct btf *btf__new_empty_split(struct btf *base_btf)
1027 {
1028 	return libbpf_ptr(btf_new_empty(base_btf));
1029 }
1030 
btf_new(const void * data,__u32 size,struct btf * base_btf)1031 static struct btf *btf_new(const void *data, __u32 size, struct btf *base_btf)
1032 {
1033 	struct btf *btf;
1034 	int err;
1035 
1036 	btf = calloc(1, sizeof(struct btf));
1037 	if (!btf)
1038 		return ERR_PTR(-ENOMEM);
1039 
1040 	btf->nr_types = 0;
1041 	btf->start_id = 1;
1042 	btf->start_str_off = 0;
1043 	btf->fd = -1;
1044 
1045 	if (base_btf) {
1046 		btf->base_btf = base_btf;
1047 		btf->start_id = btf__type_cnt(base_btf);
1048 		btf->start_str_off = base_btf->hdr->str_len;
1049 	}
1050 
1051 	btf->raw_data = malloc(size);
1052 	if (!btf->raw_data) {
1053 		err = -ENOMEM;
1054 		goto done;
1055 	}
1056 	memcpy(btf->raw_data, data, size);
1057 	btf->raw_size = size;
1058 
1059 	btf->hdr = btf->raw_data;
1060 	err = btf_parse_hdr(btf);
1061 	if (err)
1062 		goto done;
1063 
1064 	btf->strs_data = btf->raw_data + btf->hdr->hdr_len + btf->hdr->str_off;
1065 	btf->types_data = btf->raw_data + btf->hdr->hdr_len + btf->hdr->type_off;
1066 
1067 	err = btf_parse_str_sec(btf);
1068 	err = err ?: btf_parse_type_sec(btf);
1069 	err = err ?: btf_sanity_check(btf);
1070 	if (err)
1071 		goto done;
1072 
1073 done:
1074 	if (err) {
1075 		btf__free(btf);
1076 		return ERR_PTR(err);
1077 	}
1078 
1079 	return btf;
1080 }
1081 
btf__new(const void * data,__u32 size)1082 struct btf *btf__new(const void *data, __u32 size)
1083 {
1084 	return libbpf_ptr(btf_new(data, size, NULL));
1085 }
1086 
btf__new_split(const void * data,__u32 size,struct btf * base_btf)1087 struct btf *btf__new_split(const void *data, __u32 size, struct btf *base_btf)
1088 {
1089 	return libbpf_ptr(btf_new(data, size, base_btf));
1090 }
1091 
1092 struct btf_elf_secs {
1093 	Elf_Data *btf_data;
1094 	Elf_Data *btf_ext_data;
1095 	Elf_Data *btf_base_data;
1096 };
1097 
btf_find_elf_sections(Elf * elf,const char * path,struct btf_elf_secs * secs)1098 static int btf_find_elf_sections(Elf *elf, const char *path, struct btf_elf_secs *secs)
1099 {
1100 	Elf_Scn *scn = NULL;
1101 	Elf_Data *data;
1102 	GElf_Ehdr ehdr;
1103 	size_t shstrndx;
1104 	int idx = 0;
1105 
1106 	if (!gelf_getehdr(elf, &ehdr)) {
1107 		pr_warn("failed to get EHDR from %s\n", path);
1108 		goto err;
1109 	}
1110 
1111 	if (elf_getshdrstrndx(elf, &shstrndx)) {
1112 		pr_warn("failed to get section names section index for %s\n",
1113 			path);
1114 		goto err;
1115 	}
1116 
1117 	if (!elf_rawdata(elf_getscn(elf, shstrndx), NULL)) {
1118 		pr_warn("failed to get e_shstrndx from %s\n", path);
1119 		goto err;
1120 	}
1121 
1122 	while ((scn = elf_nextscn(elf, scn)) != NULL) {
1123 		Elf_Data **field;
1124 		GElf_Shdr sh;
1125 		char *name;
1126 
1127 		idx++;
1128 		if (gelf_getshdr(scn, &sh) != &sh) {
1129 			pr_warn("failed to get section(%d) header from %s\n",
1130 				idx, path);
1131 			goto err;
1132 		}
1133 		name = elf_strptr(elf, shstrndx, sh.sh_name);
1134 		if (!name) {
1135 			pr_warn("failed to get section(%d) name from %s\n",
1136 				idx, path);
1137 			goto err;
1138 		}
1139 
1140 		if (strcmp(name, BTF_ELF_SEC) == 0)
1141 			field = &secs->btf_data;
1142 		else if (strcmp(name, BTF_EXT_ELF_SEC) == 0)
1143 			field = &secs->btf_ext_data;
1144 		else if (strcmp(name, BTF_BASE_ELF_SEC) == 0)
1145 			field = &secs->btf_base_data;
1146 		else
1147 			continue;
1148 
1149 		data = elf_getdata(scn, 0);
1150 		if (!data) {
1151 			pr_warn("failed to get section(%d, %s) data from %s\n",
1152 				idx, name, path);
1153 			goto err;
1154 		}
1155 		*field = data;
1156 	}
1157 
1158 	return 0;
1159 
1160 err:
1161 	return -LIBBPF_ERRNO__FORMAT;
1162 }
1163 
btf_parse_elf(const char * path,struct btf * base_btf,struct btf_ext ** btf_ext)1164 static struct btf *btf_parse_elf(const char *path, struct btf *base_btf,
1165 				 struct btf_ext **btf_ext)
1166 {
1167 	struct btf_elf_secs secs = {};
1168 	struct btf *dist_base_btf = NULL;
1169 	struct btf *btf = NULL;
1170 	int err = 0, fd = -1;
1171 	Elf *elf = NULL;
1172 
1173 	if (elf_version(EV_CURRENT) == EV_NONE) {
1174 		pr_warn("failed to init libelf for %s\n", path);
1175 		return ERR_PTR(-LIBBPF_ERRNO__LIBELF);
1176 	}
1177 
1178 	fd = open(path, O_RDONLY | O_CLOEXEC);
1179 	if (fd < 0) {
1180 		err = -errno;
1181 		pr_warn("failed to open %s: %s\n", path, strerror(errno));
1182 		return ERR_PTR(err);
1183 	}
1184 
1185 	elf = elf_begin(fd, ELF_C_READ, NULL);
1186 	if (!elf) {
1187 		pr_warn("failed to open %s as ELF file\n", path);
1188 		goto done;
1189 	}
1190 
1191 	err = btf_find_elf_sections(elf, path, &secs);
1192 	if (err)
1193 		goto done;
1194 
1195 	if (!secs.btf_data) {
1196 		pr_warn("failed to find '%s' ELF section in %s\n", BTF_ELF_SEC, path);
1197 		err = -ENODATA;
1198 		goto done;
1199 	}
1200 
1201 	if (secs.btf_base_data) {
1202 		dist_base_btf = btf_new(secs.btf_base_data->d_buf, secs.btf_base_data->d_size,
1203 					NULL);
1204 		if (IS_ERR(dist_base_btf)) {
1205 			err = PTR_ERR(dist_base_btf);
1206 			dist_base_btf = NULL;
1207 			goto done;
1208 		}
1209 	}
1210 
1211 	btf = btf_new(secs.btf_data->d_buf, secs.btf_data->d_size,
1212 		      dist_base_btf ?: base_btf);
1213 	if (IS_ERR(btf)) {
1214 		err = PTR_ERR(btf);
1215 		goto done;
1216 	}
1217 	if (dist_base_btf && base_btf) {
1218 		err = btf__relocate(btf, base_btf);
1219 		if (err)
1220 			goto done;
1221 		btf__free(dist_base_btf);
1222 		dist_base_btf = NULL;
1223 	}
1224 
1225 	if (dist_base_btf)
1226 		btf->owns_base = true;
1227 
1228 	switch (gelf_getclass(elf)) {
1229 	case ELFCLASS32:
1230 		btf__set_pointer_size(btf, 4);
1231 		break;
1232 	case ELFCLASS64:
1233 		btf__set_pointer_size(btf, 8);
1234 		break;
1235 	default:
1236 		pr_warn("failed to get ELF class (bitness) for %s\n", path);
1237 		break;
1238 	}
1239 
1240 	if (btf_ext && secs.btf_ext_data) {
1241 		*btf_ext = btf_ext__new(secs.btf_ext_data->d_buf, secs.btf_ext_data->d_size);
1242 		if (IS_ERR(*btf_ext)) {
1243 			err = PTR_ERR(*btf_ext);
1244 			goto done;
1245 		}
1246 	} else if (btf_ext) {
1247 		*btf_ext = NULL;
1248 	}
1249 done:
1250 	if (elf)
1251 		elf_end(elf);
1252 	close(fd);
1253 
1254 	if (!err)
1255 		return btf;
1256 
1257 	if (btf_ext)
1258 		btf_ext__free(*btf_ext);
1259 	btf__free(dist_base_btf);
1260 	btf__free(btf);
1261 
1262 	return ERR_PTR(err);
1263 }
1264 
btf__parse_elf(const char * path,struct btf_ext ** btf_ext)1265 struct btf *btf__parse_elf(const char *path, struct btf_ext **btf_ext)
1266 {
1267 	return libbpf_ptr(btf_parse_elf(path, NULL, btf_ext));
1268 }
1269 
btf__parse_elf_split(const char * path,struct btf * base_btf)1270 struct btf *btf__parse_elf_split(const char *path, struct btf *base_btf)
1271 {
1272 	return libbpf_ptr(btf_parse_elf(path, base_btf, NULL));
1273 }
1274 
btf_parse_raw(const char * path,struct btf * base_btf)1275 static struct btf *btf_parse_raw(const char *path, struct btf *base_btf)
1276 {
1277 	struct btf *btf = NULL;
1278 	void *data = NULL;
1279 	FILE *f = NULL;
1280 	__u16 magic;
1281 	int err = 0;
1282 	long sz;
1283 
1284 	f = fopen(path, "rbe");
1285 	if (!f) {
1286 		err = -errno;
1287 		goto err_out;
1288 	}
1289 
1290 	/* check BTF magic */
1291 	if (fread(&magic, 1, sizeof(magic), f) < sizeof(magic)) {
1292 		err = -EIO;
1293 		goto err_out;
1294 	}
1295 	if (magic != BTF_MAGIC && magic != bswap_16(BTF_MAGIC)) {
1296 		/* definitely not a raw BTF */
1297 		err = -EPROTO;
1298 		goto err_out;
1299 	}
1300 
1301 	/* get file size */
1302 	if (fseek(f, 0, SEEK_END)) {
1303 		err = -errno;
1304 		goto err_out;
1305 	}
1306 	sz = ftell(f);
1307 	if (sz < 0) {
1308 		err = -errno;
1309 		goto err_out;
1310 	}
1311 	/* rewind to the start */
1312 	if (fseek(f, 0, SEEK_SET)) {
1313 		err = -errno;
1314 		goto err_out;
1315 	}
1316 
1317 	/* pre-alloc memory and read all of BTF data */
1318 	data = malloc(sz);
1319 	if (!data) {
1320 		err = -ENOMEM;
1321 		goto err_out;
1322 	}
1323 	if (fread(data, 1, sz, f) < sz) {
1324 		err = -EIO;
1325 		goto err_out;
1326 	}
1327 
1328 	/* finally parse BTF data */
1329 	btf = btf_new(data, sz, base_btf);
1330 
1331 err_out:
1332 	free(data);
1333 	if (f)
1334 		fclose(f);
1335 	return err ? ERR_PTR(err) : btf;
1336 }
1337 
btf__parse_raw(const char * path)1338 struct btf *btf__parse_raw(const char *path)
1339 {
1340 	return libbpf_ptr(btf_parse_raw(path, NULL));
1341 }
1342 
btf__parse_raw_split(const char * path,struct btf * base_btf)1343 struct btf *btf__parse_raw_split(const char *path, struct btf *base_btf)
1344 {
1345 	return libbpf_ptr(btf_parse_raw(path, base_btf));
1346 }
1347 
btf_parse(const char * path,struct btf * base_btf,struct btf_ext ** btf_ext)1348 static struct btf *btf_parse(const char *path, struct btf *base_btf, struct btf_ext **btf_ext)
1349 {
1350 	struct btf *btf;
1351 	int err;
1352 
1353 	if (btf_ext)
1354 		*btf_ext = NULL;
1355 
1356 	btf = btf_parse_raw(path, base_btf);
1357 	err = libbpf_get_error(btf);
1358 	if (!err)
1359 		return btf;
1360 	if (err != -EPROTO)
1361 		return ERR_PTR(err);
1362 	return btf_parse_elf(path, base_btf, btf_ext);
1363 }
1364 
btf__parse(const char * path,struct btf_ext ** btf_ext)1365 struct btf *btf__parse(const char *path, struct btf_ext **btf_ext)
1366 {
1367 	return libbpf_ptr(btf_parse(path, NULL, btf_ext));
1368 }
1369 
btf__parse_split(const char * path,struct btf * base_btf)1370 struct btf *btf__parse_split(const char *path, struct btf *base_btf)
1371 {
1372 	return libbpf_ptr(btf_parse(path, base_btf, NULL));
1373 }
1374 
1375 static void *btf_get_raw_data(const struct btf *btf, __u32 *size, bool swap_endian);
1376 
btf_load_into_kernel(struct btf * btf,char * log_buf,size_t log_sz,__u32 log_level,int token_fd)1377 int btf_load_into_kernel(struct btf *btf,
1378 			 char *log_buf, size_t log_sz, __u32 log_level,
1379 			 int token_fd)
1380 {
1381 	LIBBPF_OPTS(bpf_btf_load_opts, opts);
1382 	__u32 buf_sz = 0, raw_size;
1383 	char *buf = NULL, *tmp;
1384 	void *raw_data;
1385 	int err = 0;
1386 
1387 	if (btf->fd >= 0)
1388 		return libbpf_err(-EEXIST);
1389 	if (log_sz && !log_buf)
1390 		return libbpf_err(-EINVAL);
1391 
1392 	/* cache native raw data representation */
1393 	raw_data = btf_get_raw_data(btf, &raw_size, false);
1394 	if (!raw_data) {
1395 		err = -ENOMEM;
1396 		goto done;
1397 	}
1398 	btf->raw_size = raw_size;
1399 	btf->raw_data = raw_data;
1400 
1401 retry_load:
1402 	/* if log_level is 0, we won't provide log_buf/log_size to the kernel,
1403 	 * initially. Only if BTF loading fails, we bump log_level to 1 and
1404 	 * retry, using either auto-allocated or custom log_buf. This way
1405 	 * non-NULL custom log_buf provides a buffer just in case, but hopes
1406 	 * for successful load and no need for log_buf.
1407 	 */
1408 	if (log_level) {
1409 		/* if caller didn't provide custom log_buf, we'll keep
1410 		 * allocating our own progressively bigger buffers for BTF
1411 		 * verification log
1412 		 */
1413 		if (!log_buf) {
1414 			buf_sz = max((__u32)BPF_LOG_BUF_SIZE, buf_sz * 2);
1415 			tmp = realloc(buf, buf_sz);
1416 			if (!tmp) {
1417 				err = -ENOMEM;
1418 				goto done;
1419 			}
1420 			buf = tmp;
1421 			buf[0] = '\0';
1422 		}
1423 
1424 		opts.log_buf = log_buf ? log_buf : buf;
1425 		opts.log_size = log_buf ? log_sz : buf_sz;
1426 		opts.log_level = log_level;
1427 	}
1428 
1429 	opts.token_fd = token_fd;
1430 	if (token_fd)
1431 		opts.btf_flags |= BPF_F_TOKEN_FD;
1432 
1433 	btf->fd = bpf_btf_load(raw_data, raw_size, &opts);
1434 	if (btf->fd < 0) {
1435 		/* time to turn on verbose mode and try again */
1436 		if (log_level == 0) {
1437 			log_level = 1;
1438 			goto retry_load;
1439 		}
1440 		/* only retry if caller didn't provide custom log_buf, but
1441 		 * make sure we can never overflow buf_sz
1442 		 */
1443 		if (!log_buf && errno == ENOSPC && buf_sz <= UINT_MAX / 2)
1444 			goto retry_load;
1445 
1446 		err = -errno;
1447 		pr_warn("BTF loading error: %d\n", err);
1448 		/* don't print out contents of custom log_buf */
1449 		if (!log_buf && buf[0])
1450 			pr_warn("-- BEGIN BTF LOAD LOG ---\n%s\n-- END BTF LOAD LOG --\n", buf);
1451 	}
1452 
1453 done:
1454 	free(buf);
1455 	return libbpf_err(err);
1456 }
1457 
btf__load_into_kernel(struct btf * btf)1458 int btf__load_into_kernel(struct btf *btf)
1459 {
1460 	return btf_load_into_kernel(btf, NULL, 0, 0, 0);
1461 }
1462 
btf__fd(const struct btf * btf)1463 int btf__fd(const struct btf *btf)
1464 {
1465 	return btf->fd;
1466 }
1467 
btf__set_fd(struct btf * btf,int fd)1468 void btf__set_fd(struct btf *btf, int fd)
1469 {
1470 	btf->fd = fd;
1471 }
1472 
btf_strs_data(const struct btf * btf)1473 static const void *btf_strs_data(const struct btf *btf)
1474 {
1475 	return btf->strs_data ? btf->strs_data : strset__data(btf->strs_set);
1476 }
1477 
btf_get_raw_data(const struct btf * btf,__u32 * size,bool swap_endian)1478 static void *btf_get_raw_data(const struct btf *btf, __u32 *size, bool swap_endian)
1479 {
1480 	struct btf_header *hdr = btf->hdr;
1481 	struct btf_type *t;
1482 	void *data, *p;
1483 	__u32 data_sz;
1484 	int i;
1485 
1486 	data = swap_endian ? btf->raw_data_swapped : btf->raw_data;
1487 	if (data) {
1488 		*size = btf->raw_size;
1489 		return data;
1490 	}
1491 
1492 	data_sz = hdr->hdr_len + hdr->type_len + hdr->str_len;
1493 	data = calloc(1, data_sz);
1494 	if (!data)
1495 		return NULL;
1496 	p = data;
1497 
1498 	memcpy(p, hdr, hdr->hdr_len);
1499 	if (swap_endian)
1500 		btf_bswap_hdr(p);
1501 	p += hdr->hdr_len;
1502 
1503 	memcpy(p, btf->types_data, hdr->type_len);
1504 	if (swap_endian) {
1505 		for (i = 0; i < btf->nr_types; i++) {
1506 			t = p + btf->type_offs[i];
1507 			/* btf_bswap_type_rest() relies on native t->info, so
1508 			 * we swap base type info after we swapped all the
1509 			 * additional information
1510 			 */
1511 			if (btf_bswap_type_rest(t))
1512 				goto err_out;
1513 			btf_bswap_type_base(t);
1514 		}
1515 	}
1516 	p += hdr->type_len;
1517 
1518 	memcpy(p, btf_strs_data(btf), hdr->str_len);
1519 	p += hdr->str_len;
1520 
1521 	*size = data_sz;
1522 	return data;
1523 err_out:
1524 	free(data);
1525 	return NULL;
1526 }
1527 
btf__raw_data(const struct btf * btf_ro,__u32 * size)1528 const void *btf__raw_data(const struct btf *btf_ro, __u32 *size)
1529 {
1530 	struct btf *btf = (struct btf *)btf_ro;
1531 	__u32 data_sz;
1532 	void *data;
1533 
1534 	data = btf_get_raw_data(btf, &data_sz, btf->swapped_endian);
1535 	if (!data)
1536 		return errno = ENOMEM, NULL;
1537 
1538 	btf->raw_size = data_sz;
1539 	if (btf->swapped_endian)
1540 		btf->raw_data_swapped = data;
1541 	else
1542 		btf->raw_data = data;
1543 	*size = data_sz;
1544 	return data;
1545 }
1546 
1547 __attribute__((alias("btf__raw_data")))
1548 const void *btf__get_raw_data(const struct btf *btf, __u32 *size);
1549 
btf__str_by_offset(const struct btf * btf,__u32 offset)1550 const char *btf__str_by_offset(const struct btf *btf, __u32 offset)
1551 {
1552 	if (offset < btf->start_str_off)
1553 		return btf__str_by_offset(btf->base_btf, offset);
1554 	else if (offset - btf->start_str_off < btf->hdr->str_len)
1555 		return btf_strs_data(btf) + (offset - btf->start_str_off);
1556 	else
1557 		return errno = EINVAL, NULL;
1558 }
1559 
btf__name_by_offset(const struct btf * btf,__u32 offset)1560 const char *btf__name_by_offset(const struct btf *btf, __u32 offset)
1561 {
1562 	return btf__str_by_offset(btf, offset);
1563 }
1564 
btf_get_from_fd(int btf_fd,struct btf * base_btf)1565 struct btf *btf_get_from_fd(int btf_fd, struct btf *base_btf)
1566 {
1567 	struct bpf_btf_info btf_info;
1568 	__u32 len = sizeof(btf_info);
1569 	__u32 last_size;
1570 	struct btf *btf;
1571 	void *ptr;
1572 	int err;
1573 
1574 	/* we won't know btf_size until we call bpf_btf_get_info_by_fd(). so
1575 	 * let's start with a sane default - 4KiB here - and resize it only if
1576 	 * bpf_btf_get_info_by_fd() needs a bigger buffer.
1577 	 */
1578 	last_size = 4096;
1579 	ptr = malloc(last_size);
1580 	if (!ptr)
1581 		return ERR_PTR(-ENOMEM);
1582 
1583 	memset(&btf_info, 0, sizeof(btf_info));
1584 	btf_info.btf = ptr_to_u64(ptr);
1585 	btf_info.btf_size = last_size;
1586 	err = bpf_btf_get_info_by_fd(btf_fd, &btf_info, &len);
1587 
1588 	if (!err && btf_info.btf_size > last_size) {
1589 		void *temp_ptr;
1590 
1591 		last_size = btf_info.btf_size;
1592 		temp_ptr = realloc(ptr, last_size);
1593 		if (!temp_ptr) {
1594 			btf = ERR_PTR(-ENOMEM);
1595 			goto exit_free;
1596 		}
1597 		ptr = temp_ptr;
1598 
1599 		len = sizeof(btf_info);
1600 		memset(&btf_info, 0, sizeof(btf_info));
1601 		btf_info.btf = ptr_to_u64(ptr);
1602 		btf_info.btf_size = last_size;
1603 
1604 		err = bpf_btf_get_info_by_fd(btf_fd, &btf_info, &len);
1605 	}
1606 
1607 	if (err || btf_info.btf_size > last_size) {
1608 		btf = err ? ERR_PTR(-errno) : ERR_PTR(-E2BIG);
1609 		goto exit_free;
1610 	}
1611 
1612 	btf = btf_new(ptr, btf_info.btf_size, base_btf);
1613 
1614 exit_free:
1615 	free(ptr);
1616 	return btf;
1617 }
1618 
btf__load_from_kernel_by_id_split(__u32 id,struct btf * base_btf)1619 struct btf *btf__load_from_kernel_by_id_split(__u32 id, struct btf *base_btf)
1620 {
1621 	struct btf *btf;
1622 	int btf_fd;
1623 
1624 	btf_fd = bpf_btf_get_fd_by_id(id);
1625 	if (btf_fd < 0)
1626 		return libbpf_err_ptr(-errno);
1627 
1628 	btf = btf_get_from_fd(btf_fd, base_btf);
1629 	close(btf_fd);
1630 
1631 	return libbpf_ptr(btf);
1632 }
1633 
btf__load_from_kernel_by_id(__u32 id)1634 struct btf *btf__load_from_kernel_by_id(__u32 id)
1635 {
1636 	return btf__load_from_kernel_by_id_split(id, NULL);
1637 }
1638 
btf_invalidate_raw_data(struct btf * btf)1639 static void btf_invalidate_raw_data(struct btf *btf)
1640 {
1641 	if (btf->raw_data) {
1642 		free(btf->raw_data);
1643 		btf->raw_data = NULL;
1644 	}
1645 	if (btf->raw_data_swapped) {
1646 		free(btf->raw_data_swapped);
1647 		btf->raw_data_swapped = NULL;
1648 	}
1649 }
1650 
1651 /* Ensure BTF is ready to be modified (by splitting into a three memory
1652  * regions for header, types, and strings). Also invalidate cached
1653  * raw_data, if any.
1654  */
btf_ensure_modifiable(struct btf * btf)1655 static int btf_ensure_modifiable(struct btf *btf)
1656 {
1657 	void *hdr, *types;
1658 	struct strset *set = NULL;
1659 	int err = -ENOMEM;
1660 
1661 	if (btf_is_modifiable(btf)) {
1662 		/* any BTF modification invalidates raw_data */
1663 		btf_invalidate_raw_data(btf);
1664 		return 0;
1665 	}
1666 
1667 	/* split raw data into three memory regions */
1668 	hdr = malloc(btf->hdr->hdr_len);
1669 	types = malloc(btf->hdr->type_len);
1670 	if (!hdr || !types)
1671 		goto err_out;
1672 
1673 	memcpy(hdr, btf->hdr, btf->hdr->hdr_len);
1674 	memcpy(types, btf->types_data, btf->hdr->type_len);
1675 
1676 	/* build lookup index for all strings */
1677 	set = strset__new(BTF_MAX_STR_OFFSET, btf->strs_data, btf->hdr->str_len);
1678 	if (IS_ERR(set)) {
1679 		err = PTR_ERR(set);
1680 		goto err_out;
1681 	}
1682 
1683 	/* only when everything was successful, update internal state */
1684 	btf->hdr = hdr;
1685 	btf->types_data = types;
1686 	btf->types_data_cap = btf->hdr->type_len;
1687 	btf->strs_data = NULL;
1688 	btf->strs_set = set;
1689 	/* if BTF was created from scratch, all strings are guaranteed to be
1690 	 * unique and deduplicated
1691 	 */
1692 	if (btf->hdr->str_len == 0)
1693 		btf->strs_deduped = true;
1694 	if (!btf->base_btf && btf->hdr->str_len == 1)
1695 		btf->strs_deduped = true;
1696 
1697 	/* invalidate raw_data representation */
1698 	btf_invalidate_raw_data(btf);
1699 
1700 	return 0;
1701 
1702 err_out:
1703 	strset__free(set);
1704 	free(hdr);
1705 	free(types);
1706 	return err;
1707 }
1708 
1709 /* Find an offset in BTF string section that corresponds to a given string *s*.
1710  * Returns:
1711  *   - >0 offset into string section, if string is found;
1712  *   - -ENOENT, if string is not in the string section;
1713  *   - <0, on any other error.
1714  */
btf__find_str(struct btf * btf,const char * s)1715 int btf__find_str(struct btf *btf, const char *s)
1716 {
1717 	int off;
1718 
1719 	if (btf->base_btf) {
1720 		off = btf__find_str(btf->base_btf, s);
1721 		if (off != -ENOENT)
1722 			return off;
1723 	}
1724 
1725 	/* BTF needs to be in a modifiable state to build string lookup index */
1726 	if (btf_ensure_modifiable(btf))
1727 		return libbpf_err(-ENOMEM);
1728 
1729 	off = strset__find_str(btf->strs_set, s);
1730 	if (off < 0)
1731 		return libbpf_err(off);
1732 
1733 	return btf->start_str_off + off;
1734 }
1735 
1736 /* Add a string s to the BTF string section.
1737  * Returns:
1738  *   - > 0 offset into string section, on success;
1739  *   - < 0, on error.
1740  */
btf__add_str(struct btf * btf,const char * s)1741 int btf__add_str(struct btf *btf, const char *s)
1742 {
1743 	int off;
1744 
1745 	if (btf->base_btf) {
1746 		off = btf__find_str(btf->base_btf, s);
1747 		if (off != -ENOENT)
1748 			return off;
1749 	}
1750 
1751 	if (btf_ensure_modifiable(btf))
1752 		return libbpf_err(-ENOMEM);
1753 
1754 	off = strset__add_str(btf->strs_set, s);
1755 	if (off < 0)
1756 		return libbpf_err(off);
1757 
1758 	btf->hdr->str_len = strset__data_size(btf->strs_set);
1759 
1760 	return btf->start_str_off + off;
1761 }
1762 
btf_add_type_mem(struct btf * btf,size_t add_sz)1763 static void *btf_add_type_mem(struct btf *btf, size_t add_sz)
1764 {
1765 	return libbpf_add_mem(&btf->types_data, &btf->types_data_cap, 1,
1766 			      btf->hdr->type_len, UINT_MAX, add_sz);
1767 }
1768 
btf_type_inc_vlen(struct btf_type * t)1769 static void btf_type_inc_vlen(struct btf_type *t)
1770 {
1771 	t->info = btf_type_info(btf_kind(t), btf_vlen(t) + 1, btf_kflag(t));
1772 }
1773 
btf_commit_type(struct btf * btf,int data_sz)1774 static int btf_commit_type(struct btf *btf, int data_sz)
1775 {
1776 	int err;
1777 
1778 	err = btf_add_type_idx_entry(btf, btf->hdr->type_len);
1779 	if (err)
1780 		return libbpf_err(err);
1781 
1782 	btf->hdr->type_len += data_sz;
1783 	btf->hdr->str_off += data_sz;
1784 	btf->nr_types++;
1785 	return btf->start_id + btf->nr_types - 1;
1786 }
1787 
1788 struct btf_pipe {
1789 	const struct btf *src;
1790 	struct btf *dst;
1791 	struct hashmap *str_off_map; /* map string offsets from src to dst */
1792 };
1793 
btf_rewrite_str(struct btf_pipe * p,__u32 * str_off)1794 static int btf_rewrite_str(struct btf_pipe *p, __u32 *str_off)
1795 {
1796 	long mapped_off;
1797 	int off, err;
1798 
1799 	if (!*str_off) /* nothing to do for empty strings */
1800 		return 0;
1801 
1802 	if (p->str_off_map &&
1803 	    hashmap__find(p->str_off_map, *str_off, &mapped_off)) {
1804 		*str_off = mapped_off;
1805 		return 0;
1806 	}
1807 
1808 	off = btf__add_str(p->dst, btf__str_by_offset(p->src, *str_off));
1809 	if (off < 0)
1810 		return off;
1811 
1812 	/* Remember string mapping from src to dst.  It avoids
1813 	 * performing expensive string comparisons.
1814 	 */
1815 	if (p->str_off_map) {
1816 		err = hashmap__append(p->str_off_map, *str_off, off);
1817 		if (err)
1818 			return err;
1819 	}
1820 
1821 	*str_off = off;
1822 	return 0;
1823 }
1824 
btf_add_type(struct btf_pipe * p,const struct btf_type * src_type)1825 static int btf_add_type(struct btf_pipe *p, const struct btf_type *src_type)
1826 {
1827 	struct btf_field_iter it;
1828 	struct btf_type *t;
1829 	__u32 *str_off;
1830 	int sz, err;
1831 
1832 	sz = btf_type_size(src_type);
1833 	if (sz < 0)
1834 		return libbpf_err(sz);
1835 
1836 	/* deconstruct BTF, if necessary, and invalidate raw_data */
1837 	if (btf_ensure_modifiable(p->dst))
1838 		return libbpf_err(-ENOMEM);
1839 
1840 	t = btf_add_type_mem(p->dst, sz);
1841 	if (!t)
1842 		return libbpf_err(-ENOMEM);
1843 
1844 	memcpy(t, src_type, sz);
1845 
1846 	err = btf_field_iter_init(&it, t, BTF_FIELD_ITER_STRS);
1847 	if (err)
1848 		return libbpf_err(err);
1849 
1850 	while ((str_off = btf_field_iter_next(&it))) {
1851 		err = btf_rewrite_str(p, str_off);
1852 		if (err)
1853 			return libbpf_err(err);
1854 	}
1855 
1856 	return btf_commit_type(p->dst, sz);
1857 }
1858 
btf__add_type(struct btf * btf,const struct btf * src_btf,const struct btf_type * src_type)1859 int btf__add_type(struct btf *btf, const struct btf *src_btf, const struct btf_type *src_type)
1860 {
1861 	struct btf_pipe p = { .src = src_btf, .dst = btf };
1862 
1863 	return btf_add_type(&p, src_type);
1864 }
1865 
1866 static size_t btf_dedup_identity_hash_fn(long key, void *ctx);
1867 static bool btf_dedup_equal_fn(long k1, long k2, void *ctx);
1868 
btf__add_btf(struct btf * btf,const struct btf * src_btf)1869 int btf__add_btf(struct btf *btf, const struct btf *src_btf)
1870 {
1871 	struct btf_pipe p = { .src = src_btf, .dst = btf };
1872 	int data_sz, sz, cnt, i, err, old_strs_len;
1873 	__u32 *off;
1874 	void *t;
1875 
1876 	/* appending split BTF isn't supported yet */
1877 	if (src_btf->base_btf)
1878 		return libbpf_err(-ENOTSUP);
1879 
1880 	/* deconstruct BTF, if necessary, and invalidate raw_data */
1881 	if (btf_ensure_modifiable(btf))
1882 		return libbpf_err(-ENOMEM);
1883 
1884 	/* remember original strings section size if we have to roll back
1885 	 * partial strings section changes
1886 	 */
1887 	old_strs_len = btf->hdr->str_len;
1888 
1889 	data_sz = src_btf->hdr->type_len;
1890 	cnt = btf__type_cnt(src_btf) - 1;
1891 
1892 	/* pre-allocate enough memory for new types */
1893 	t = btf_add_type_mem(btf, data_sz);
1894 	if (!t)
1895 		return libbpf_err(-ENOMEM);
1896 
1897 	/* pre-allocate enough memory for type offset index for new types */
1898 	off = btf_add_type_offs_mem(btf, cnt);
1899 	if (!off)
1900 		return libbpf_err(-ENOMEM);
1901 
1902 	/* Map the string offsets from src_btf to the offsets from btf to improve performance */
1903 	p.str_off_map = hashmap__new(btf_dedup_identity_hash_fn, btf_dedup_equal_fn, NULL);
1904 	if (IS_ERR(p.str_off_map))
1905 		return libbpf_err(-ENOMEM);
1906 
1907 	/* bulk copy types data for all types from src_btf */
1908 	memcpy(t, src_btf->types_data, data_sz);
1909 
1910 	for (i = 0; i < cnt; i++) {
1911 		struct btf_field_iter it;
1912 		__u32 *type_id, *str_off;
1913 
1914 		sz = btf_type_size(t);
1915 		if (sz < 0) {
1916 			/* unlikely, has to be corrupted src_btf */
1917 			err = sz;
1918 			goto err_out;
1919 		}
1920 
1921 		/* fill out type ID to type offset mapping for lookups by type ID */
1922 		*off = t - btf->types_data;
1923 
1924 		/* add, dedup, and remap strings referenced by this BTF type */
1925 		err = btf_field_iter_init(&it, t, BTF_FIELD_ITER_STRS);
1926 		if (err)
1927 			goto err_out;
1928 		while ((str_off = btf_field_iter_next(&it))) {
1929 			err = btf_rewrite_str(&p, str_off);
1930 			if (err)
1931 				goto err_out;
1932 		}
1933 
1934 		/* remap all type IDs referenced from this BTF type */
1935 		err = btf_field_iter_init(&it, t, BTF_FIELD_ITER_IDS);
1936 		if (err)
1937 			goto err_out;
1938 
1939 		while ((type_id = btf_field_iter_next(&it))) {
1940 			if (!*type_id) /* nothing to do for VOID references */
1941 				continue;
1942 
1943 			/* we haven't updated btf's type count yet, so
1944 			 * btf->start_id + btf->nr_types - 1 is the type ID offset we should
1945 			 * add to all newly added BTF types
1946 			 */
1947 			*type_id += btf->start_id + btf->nr_types - 1;
1948 		}
1949 
1950 		/* go to next type data and type offset index entry */
1951 		t += sz;
1952 		off++;
1953 	}
1954 
1955 	/* Up until now any of the copied type data was effectively invisible,
1956 	 * so if we exited early before this point due to error, BTF would be
1957 	 * effectively unmodified. There would be extra internal memory
1958 	 * pre-allocated, but it would not be available for querying.  But now
1959 	 * that we've copied and rewritten all the data successfully, we can
1960 	 * update type count and various internal offsets and sizes to
1961 	 * "commit" the changes and made them visible to the outside world.
1962 	 */
1963 	btf->hdr->type_len += data_sz;
1964 	btf->hdr->str_off += data_sz;
1965 	btf->nr_types += cnt;
1966 
1967 	hashmap__free(p.str_off_map);
1968 
1969 	/* return type ID of the first added BTF type */
1970 	return btf->start_id + btf->nr_types - cnt;
1971 err_out:
1972 	/* zero out preallocated memory as if it was just allocated with
1973 	 * libbpf_add_mem()
1974 	 */
1975 	memset(btf->types_data + btf->hdr->type_len, 0, data_sz);
1976 	memset(btf->strs_data + old_strs_len, 0, btf->hdr->str_len - old_strs_len);
1977 
1978 	/* and now restore original strings section size; types data size
1979 	 * wasn't modified, so doesn't need restoring, see big comment above
1980 	 */
1981 	btf->hdr->str_len = old_strs_len;
1982 
1983 	hashmap__free(p.str_off_map);
1984 
1985 	return libbpf_err(err);
1986 }
1987 
1988 /*
1989  * Append new BTF_KIND_INT type with:
1990  *   - *name* - non-empty, non-NULL type name;
1991  *   - *sz* - power-of-2 (1, 2, 4, ..) size of the type, in bytes;
1992  *   - encoding is a combination of BTF_INT_SIGNED, BTF_INT_CHAR, BTF_INT_BOOL.
1993  * Returns:
1994  *   - >0, type ID of newly added BTF type;
1995  *   - <0, on error.
1996  */
btf__add_int(struct btf * btf,const char * name,size_t byte_sz,int encoding)1997 int btf__add_int(struct btf *btf, const char *name, size_t byte_sz, int encoding)
1998 {
1999 	struct btf_type *t;
2000 	int sz, name_off;
2001 
2002 	/* non-empty name */
2003 	if (!name || !name[0])
2004 		return libbpf_err(-EINVAL);
2005 	/* byte_sz must be power of 2 */
2006 	if (!byte_sz || (byte_sz & (byte_sz - 1)) || byte_sz > 16)
2007 		return libbpf_err(-EINVAL);
2008 	if (encoding & ~(BTF_INT_SIGNED | BTF_INT_CHAR | BTF_INT_BOOL))
2009 		return libbpf_err(-EINVAL);
2010 
2011 	/* deconstruct BTF, if necessary, and invalidate raw_data */
2012 	if (btf_ensure_modifiable(btf))
2013 		return libbpf_err(-ENOMEM);
2014 
2015 	sz = sizeof(struct btf_type) + sizeof(int);
2016 	t = btf_add_type_mem(btf, sz);
2017 	if (!t)
2018 		return libbpf_err(-ENOMEM);
2019 
2020 	/* if something goes wrong later, we might end up with an extra string,
2021 	 * but that shouldn't be a problem, because BTF can't be constructed
2022 	 * completely anyway and will most probably be just discarded
2023 	 */
2024 	name_off = btf__add_str(btf, name);
2025 	if (name_off < 0)
2026 		return name_off;
2027 
2028 	t->name_off = name_off;
2029 	t->info = btf_type_info(BTF_KIND_INT, 0, 0);
2030 	t->size = byte_sz;
2031 	/* set INT info, we don't allow setting legacy bit offset/size */
2032 	*(__u32 *)(t + 1) = (encoding << 24) | (byte_sz * 8);
2033 
2034 	return btf_commit_type(btf, sz);
2035 }
2036 
2037 /*
2038  * Append new BTF_KIND_FLOAT type with:
2039  *   - *name* - non-empty, non-NULL type name;
2040  *   - *sz* - size of the type, in bytes;
2041  * Returns:
2042  *   - >0, type ID of newly added BTF type;
2043  *   - <0, on error.
2044  */
btf__add_float(struct btf * btf,const char * name,size_t byte_sz)2045 int btf__add_float(struct btf *btf, const char *name, size_t byte_sz)
2046 {
2047 	struct btf_type *t;
2048 	int sz, name_off;
2049 
2050 	/* non-empty name */
2051 	if (!name || !name[0])
2052 		return libbpf_err(-EINVAL);
2053 
2054 	/* byte_sz must be one of the explicitly allowed values */
2055 	if (byte_sz != 2 && byte_sz != 4 && byte_sz != 8 && byte_sz != 12 &&
2056 	    byte_sz != 16)
2057 		return libbpf_err(-EINVAL);
2058 
2059 	if (btf_ensure_modifiable(btf))
2060 		return libbpf_err(-ENOMEM);
2061 
2062 	sz = sizeof(struct btf_type);
2063 	t = btf_add_type_mem(btf, sz);
2064 	if (!t)
2065 		return libbpf_err(-ENOMEM);
2066 
2067 	name_off = btf__add_str(btf, name);
2068 	if (name_off < 0)
2069 		return name_off;
2070 
2071 	t->name_off = name_off;
2072 	t->info = btf_type_info(BTF_KIND_FLOAT, 0, 0);
2073 	t->size = byte_sz;
2074 
2075 	return btf_commit_type(btf, sz);
2076 }
2077 
2078 /* it's completely legal to append BTF types with type IDs pointing forward to
2079  * types that haven't been appended yet, so we only make sure that id looks
2080  * sane, we can't guarantee that ID will always be valid
2081  */
validate_type_id(int id)2082 static int validate_type_id(int id)
2083 {
2084 	if (id < 0 || id > BTF_MAX_NR_TYPES)
2085 		return -EINVAL;
2086 	return 0;
2087 }
2088 
2089 /* generic append function for PTR, TYPEDEF, CONST/VOLATILE/RESTRICT */
btf_add_ref_kind(struct btf * btf,int kind,const char * name,int ref_type_id)2090 static int btf_add_ref_kind(struct btf *btf, int kind, const char *name, int ref_type_id)
2091 {
2092 	struct btf_type *t;
2093 	int sz, name_off = 0;
2094 
2095 	if (validate_type_id(ref_type_id))
2096 		return libbpf_err(-EINVAL);
2097 
2098 	if (btf_ensure_modifiable(btf))
2099 		return libbpf_err(-ENOMEM);
2100 
2101 	sz = sizeof(struct btf_type);
2102 	t = btf_add_type_mem(btf, sz);
2103 	if (!t)
2104 		return libbpf_err(-ENOMEM);
2105 
2106 	if (name && name[0]) {
2107 		name_off = btf__add_str(btf, name);
2108 		if (name_off < 0)
2109 			return name_off;
2110 	}
2111 
2112 	t->name_off = name_off;
2113 	t->info = btf_type_info(kind, 0, 0);
2114 	t->type = ref_type_id;
2115 
2116 	return btf_commit_type(btf, sz);
2117 }
2118 
2119 /*
2120  * Append new BTF_KIND_PTR type with:
2121  *   - *ref_type_id* - referenced type ID, it might not exist yet;
2122  * Returns:
2123  *   - >0, type ID of newly added BTF type;
2124  *   - <0, on error.
2125  */
btf__add_ptr(struct btf * btf,int ref_type_id)2126 int btf__add_ptr(struct btf *btf, int ref_type_id)
2127 {
2128 	return btf_add_ref_kind(btf, BTF_KIND_PTR, NULL, ref_type_id);
2129 }
2130 
2131 /*
2132  * Append new BTF_KIND_ARRAY type with:
2133  *   - *index_type_id* - type ID of the type describing array index;
2134  *   - *elem_type_id* - type ID of the type describing array element;
2135  *   - *nr_elems* - the size of the array;
2136  * Returns:
2137  *   - >0, type ID of newly added BTF type;
2138  *   - <0, on error.
2139  */
btf__add_array(struct btf * btf,int index_type_id,int elem_type_id,__u32 nr_elems)2140 int btf__add_array(struct btf *btf, int index_type_id, int elem_type_id, __u32 nr_elems)
2141 {
2142 	struct btf_type *t;
2143 	struct btf_array *a;
2144 	int sz;
2145 
2146 	if (validate_type_id(index_type_id) || validate_type_id(elem_type_id))
2147 		return libbpf_err(-EINVAL);
2148 
2149 	if (btf_ensure_modifiable(btf))
2150 		return libbpf_err(-ENOMEM);
2151 
2152 	sz = sizeof(struct btf_type) + sizeof(struct btf_array);
2153 	t = btf_add_type_mem(btf, sz);
2154 	if (!t)
2155 		return libbpf_err(-ENOMEM);
2156 
2157 	t->name_off = 0;
2158 	t->info = btf_type_info(BTF_KIND_ARRAY, 0, 0);
2159 	t->size = 0;
2160 
2161 	a = btf_array(t);
2162 	a->type = elem_type_id;
2163 	a->index_type = index_type_id;
2164 	a->nelems = nr_elems;
2165 
2166 	return btf_commit_type(btf, sz);
2167 }
2168 
2169 /* generic STRUCT/UNION append function */
btf_add_composite(struct btf * btf,int kind,const char * name,__u32 bytes_sz)2170 static int btf_add_composite(struct btf *btf, int kind, const char *name, __u32 bytes_sz)
2171 {
2172 	struct btf_type *t;
2173 	int sz, name_off = 0;
2174 
2175 	if (btf_ensure_modifiable(btf))
2176 		return libbpf_err(-ENOMEM);
2177 
2178 	sz = sizeof(struct btf_type);
2179 	t = btf_add_type_mem(btf, sz);
2180 	if (!t)
2181 		return libbpf_err(-ENOMEM);
2182 
2183 	if (name && name[0]) {
2184 		name_off = btf__add_str(btf, name);
2185 		if (name_off < 0)
2186 			return name_off;
2187 	}
2188 
2189 	/* start out with vlen=0 and no kflag; this will be adjusted when
2190 	 * adding each member
2191 	 */
2192 	t->name_off = name_off;
2193 	t->info = btf_type_info(kind, 0, 0);
2194 	t->size = bytes_sz;
2195 
2196 	return btf_commit_type(btf, sz);
2197 }
2198 
2199 /*
2200  * Append new BTF_KIND_STRUCT type with:
2201  *   - *name* - name of the struct, can be NULL or empty for anonymous structs;
2202  *   - *byte_sz* - size of the struct, in bytes;
2203  *
2204  * Struct initially has no fields in it. Fields can be added by
2205  * btf__add_field() right after btf__add_struct() succeeds.
2206  *
2207  * Returns:
2208  *   - >0, type ID of newly added BTF type;
2209  *   - <0, on error.
2210  */
btf__add_struct(struct btf * btf,const char * name,__u32 byte_sz)2211 int btf__add_struct(struct btf *btf, const char *name, __u32 byte_sz)
2212 {
2213 	return btf_add_composite(btf, BTF_KIND_STRUCT, name, byte_sz);
2214 }
2215 
2216 /*
2217  * Append new BTF_KIND_UNION type with:
2218  *   - *name* - name of the union, can be NULL or empty for anonymous union;
2219  *   - *byte_sz* - size of the union, in bytes;
2220  *
2221  * Union initially has no fields in it. Fields can be added by
2222  * btf__add_field() right after btf__add_union() succeeds. All fields
2223  * should have *bit_offset* of 0.
2224  *
2225  * Returns:
2226  *   - >0, type ID of newly added BTF type;
2227  *   - <0, on error.
2228  */
btf__add_union(struct btf * btf,const char * name,__u32 byte_sz)2229 int btf__add_union(struct btf *btf, const char *name, __u32 byte_sz)
2230 {
2231 	return btf_add_composite(btf, BTF_KIND_UNION, name, byte_sz);
2232 }
2233 
btf_last_type(struct btf * btf)2234 static struct btf_type *btf_last_type(struct btf *btf)
2235 {
2236 	return btf_type_by_id(btf, btf__type_cnt(btf) - 1);
2237 }
2238 
2239 /*
2240  * Append new field for the current STRUCT/UNION type with:
2241  *   - *name* - name of the field, can be NULL or empty for anonymous field;
2242  *   - *type_id* - type ID for the type describing field type;
2243  *   - *bit_offset* - bit offset of the start of the field within struct/union;
2244  *   - *bit_size* - bit size of a bitfield, 0 for non-bitfield fields;
2245  * Returns:
2246  *   -  0, on success;
2247  *   - <0, on error.
2248  */
btf__add_field(struct btf * btf,const char * name,int type_id,__u32 bit_offset,__u32 bit_size)2249 int btf__add_field(struct btf *btf, const char *name, int type_id,
2250 		   __u32 bit_offset, __u32 bit_size)
2251 {
2252 	struct btf_type *t;
2253 	struct btf_member *m;
2254 	bool is_bitfield;
2255 	int sz, name_off = 0;
2256 
2257 	/* last type should be union/struct */
2258 	if (btf->nr_types == 0)
2259 		return libbpf_err(-EINVAL);
2260 	t = btf_last_type(btf);
2261 	if (!btf_is_composite(t))
2262 		return libbpf_err(-EINVAL);
2263 
2264 	if (validate_type_id(type_id))
2265 		return libbpf_err(-EINVAL);
2266 	/* best-effort bit field offset/size enforcement */
2267 	is_bitfield = bit_size || (bit_offset % 8 != 0);
2268 	if (is_bitfield && (bit_size == 0 || bit_size > 255 || bit_offset > 0xffffff))
2269 		return libbpf_err(-EINVAL);
2270 
2271 	/* only offset 0 is allowed for unions */
2272 	if (btf_is_union(t) && bit_offset)
2273 		return libbpf_err(-EINVAL);
2274 
2275 	/* decompose and invalidate raw data */
2276 	if (btf_ensure_modifiable(btf))
2277 		return libbpf_err(-ENOMEM);
2278 
2279 	sz = sizeof(struct btf_member);
2280 	m = btf_add_type_mem(btf, sz);
2281 	if (!m)
2282 		return libbpf_err(-ENOMEM);
2283 
2284 	if (name && name[0]) {
2285 		name_off = btf__add_str(btf, name);
2286 		if (name_off < 0)
2287 			return name_off;
2288 	}
2289 
2290 	m->name_off = name_off;
2291 	m->type = type_id;
2292 	m->offset = bit_offset | (bit_size << 24);
2293 
2294 	/* btf_add_type_mem can invalidate t pointer */
2295 	t = btf_last_type(btf);
2296 	/* update parent type's vlen and kflag */
2297 	t->info = btf_type_info(btf_kind(t), btf_vlen(t) + 1, is_bitfield || btf_kflag(t));
2298 
2299 	btf->hdr->type_len += sz;
2300 	btf->hdr->str_off += sz;
2301 	return 0;
2302 }
2303 
btf_add_enum_common(struct btf * btf,const char * name,__u32 byte_sz,bool is_signed,__u8 kind)2304 static int btf_add_enum_common(struct btf *btf, const char *name, __u32 byte_sz,
2305 			       bool is_signed, __u8 kind)
2306 {
2307 	struct btf_type *t;
2308 	int sz, name_off = 0;
2309 
2310 	/* byte_sz must be power of 2 */
2311 	if (!byte_sz || (byte_sz & (byte_sz - 1)) || byte_sz > 8)
2312 		return libbpf_err(-EINVAL);
2313 
2314 	if (btf_ensure_modifiable(btf))
2315 		return libbpf_err(-ENOMEM);
2316 
2317 	sz = sizeof(struct btf_type);
2318 	t = btf_add_type_mem(btf, sz);
2319 	if (!t)
2320 		return libbpf_err(-ENOMEM);
2321 
2322 	if (name && name[0]) {
2323 		name_off = btf__add_str(btf, name);
2324 		if (name_off < 0)
2325 			return name_off;
2326 	}
2327 
2328 	/* start out with vlen=0; it will be adjusted when adding enum values */
2329 	t->name_off = name_off;
2330 	t->info = btf_type_info(kind, 0, is_signed);
2331 	t->size = byte_sz;
2332 
2333 	return btf_commit_type(btf, sz);
2334 }
2335 
2336 /*
2337  * Append new BTF_KIND_ENUM type with:
2338  *   - *name* - name of the enum, can be NULL or empty for anonymous enums;
2339  *   - *byte_sz* - size of the enum, in bytes.
2340  *
2341  * Enum initially has no enum values in it (and corresponds to enum forward
2342  * declaration). Enumerator values can be added by btf__add_enum_value()
2343  * immediately after btf__add_enum() succeeds.
2344  *
2345  * Returns:
2346  *   - >0, type ID of newly added BTF type;
2347  *   - <0, on error.
2348  */
btf__add_enum(struct btf * btf,const char * name,__u32 byte_sz)2349 int btf__add_enum(struct btf *btf, const char *name, __u32 byte_sz)
2350 {
2351 	/*
2352 	 * set the signedness to be unsigned, it will change to signed
2353 	 * if any later enumerator is negative.
2354 	 */
2355 	return btf_add_enum_common(btf, name, byte_sz, false, BTF_KIND_ENUM);
2356 }
2357 
2358 /*
2359  * Append new enum value for the current ENUM type with:
2360  *   - *name* - name of the enumerator value, can't be NULL or empty;
2361  *   - *value* - integer value corresponding to enum value *name*;
2362  * Returns:
2363  *   -  0, on success;
2364  *   - <0, on error.
2365  */
btf__add_enum_value(struct btf * btf,const char * name,__s64 value)2366 int btf__add_enum_value(struct btf *btf, const char *name, __s64 value)
2367 {
2368 	struct btf_type *t;
2369 	struct btf_enum *v;
2370 	int sz, name_off;
2371 
2372 	/* last type should be BTF_KIND_ENUM */
2373 	if (btf->nr_types == 0)
2374 		return libbpf_err(-EINVAL);
2375 	t = btf_last_type(btf);
2376 	if (!btf_is_enum(t))
2377 		return libbpf_err(-EINVAL);
2378 
2379 	/* non-empty name */
2380 	if (!name || !name[0])
2381 		return libbpf_err(-EINVAL);
2382 	if (value < INT_MIN || value > UINT_MAX)
2383 		return libbpf_err(-E2BIG);
2384 
2385 	/* decompose and invalidate raw data */
2386 	if (btf_ensure_modifiable(btf))
2387 		return libbpf_err(-ENOMEM);
2388 
2389 	sz = sizeof(struct btf_enum);
2390 	v = btf_add_type_mem(btf, sz);
2391 	if (!v)
2392 		return libbpf_err(-ENOMEM);
2393 
2394 	name_off = btf__add_str(btf, name);
2395 	if (name_off < 0)
2396 		return name_off;
2397 
2398 	v->name_off = name_off;
2399 	v->val = value;
2400 
2401 	/* update parent type's vlen */
2402 	t = btf_last_type(btf);
2403 	btf_type_inc_vlen(t);
2404 
2405 	/* if negative value, set signedness to signed */
2406 	if (value < 0)
2407 		t->info = btf_type_info(btf_kind(t), btf_vlen(t), true);
2408 
2409 	btf->hdr->type_len += sz;
2410 	btf->hdr->str_off += sz;
2411 	return 0;
2412 }
2413 
2414 /*
2415  * Append new BTF_KIND_ENUM64 type with:
2416  *   - *name* - name of the enum, can be NULL or empty for anonymous enums;
2417  *   - *byte_sz* - size of the enum, in bytes.
2418  *   - *is_signed* - whether the enum values are signed or not;
2419  *
2420  * Enum initially has no enum values in it (and corresponds to enum forward
2421  * declaration). Enumerator values can be added by btf__add_enum64_value()
2422  * immediately after btf__add_enum64() succeeds.
2423  *
2424  * Returns:
2425  *   - >0, type ID of newly added BTF type;
2426  *   - <0, on error.
2427  */
btf__add_enum64(struct btf * btf,const char * name,__u32 byte_sz,bool is_signed)2428 int btf__add_enum64(struct btf *btf, const char *name, __u32 byte_sz,
2429 		    bool is_signed)
2430 {
2431 	return btf_add_enum_common(btf, name, byte_sz, is_signed,
2432 				   BTF_KIND_ENUM64);
2433 }
2434 
2435 /*
2436  * Append new enum value for the current ENUM64 type with:
2437  *   - *name* - name of the enumerator value, can't be NULL or empty;
2438  *   - *value* - integer value corresponding to enum value *name*;
2439  * Returns:
2440  *   -  0, on success;
2441  *   - <0, on error.
2442  */
btf__add_enum64_value(struct btf * btf,const char * name,__u64 value)2443 int btf__add_enum64_value(struct btf *btf, const char *name, __u64 value)
2444 {
2445 	struct btf_enum64 *v;
2446 	struct btf_type *t;
2447 	int sz, name_off;
2448 
2449 	/* last type should be BTF_KIND_ENUM64 */
2450 	if (btf->nr_types == 0)
2451 		return libbpf_err(-EINVAL);
2452 	t = btf_last_type(btf);
2453 	if (!btf_is_enum64(t))
2454 		return libbpf_err(-EINVAL);
2455 
2456 	/* non-empty name */
2457 	if (!name || !name[0])
2458 		return libbpf_err(-EINVAL);
2459 
2460 	/* decompose and invalidate raw data */
2461 	if (btf_ensure_modifiable(btf))
2462 		return libbpf_err(-ENOMEM);
2463 
2464 	sz = sizeof(struct btf_enum64);
2465 	v = btf_add_type_mem(btf, sz);
2466 	if (!v)
2467 		return libbpf_err(-ENOMEM);
2468 
2469 	name_off = btf__add_str(btf, name);
2470 	if (name_off < 0)
2471 		return name_off;
2472 
2473 	v->name_off = name_off;
2474 	v->val_lo32 = (__u32)value;
2475 	v->val_hi32 = value >> 32;
2476 
2477 	/* update parent type's vlen */
2478 	t = btf_last_type(btf);
2479 	btf_type_inc_vlen(t);
2480 
2481 	btf->hdr->type_len += sz;
2482 	btf->hdr->str_off += sz;
2483 	return 0;
2484 }
2485 
2486 /*
2487  * Append new BTF_KIND_FWD type with:
2488  *   - *name*, non-empty/non-NULL name;
2489  *   - *fwd_kind*, kind of forward declaration, one of BTF_FWD_STRUCT,
2490  *     BTF_FWD_UNION, or BTF_FWD_ENUM;
2491  * Returns:
2492  *   - >0, type ID of newly added BTF type;
2493  *   - <0, on error.
2494  */
btf__add_fwd(struct btf * btf,const char * name,enum btf_fwd_kind fwd_kind)2495 int btf__add_fwd(struct btf *btf, const char *name, enum btf_fwd_kind fwd_kind)
2496 {
2497 	if (!name || !name[0])
2498 		return libbpf_err(-EINVAL);
2499 
2500 	switch (fwd_kind) {
2501 	case BTF_FWD_STRUCT:
2502 	case BTF_FWD_UNION: {
2503 		struct btf_type *t;
2504 		int id;
2505 
2506 		id = btf_add_ref_kind(btf, BTF_KIND_FWD, name, 0);
2507 		if (id <= 0)
2508 			return id;
2509 		t = btf_type_by_id(btf, id);
2510 		t->info = btf_type_info(BTF_KIND_FWD, 0, fwd_kind == BTF_FWD_UNION);
2511 		return id;
2512 	}
2513 	case BTF_FWD_ENUM:
2514 		/* enum forward in BTF currently is just an enum with no enum
2515 		 * values; we also assume a standard 4-byte size for it
2516 		 */
2517 		return btf__add_enum(btf, name, sizeof(int));
2518 	default:
2519 		return libbpf_err(-EINVAL);
2520 	}
2521 }
2522 
2523 /*
2524  * Append new BTF_KING_TYPEDEF type with:
2525  *   - *name*, non-empty/non-NULL name;
2526  *   - *ref_type_id* - referenced type ID, it might not exist yet;
2527  * Returns:
2528  *   - >0, type ID of newly added BTF type;
2529  *   - <0, on error.
2530  */
btf__add_typedef(struct btf * btf,const char * name,int ref_type_id)2531 int btf__add_typedef(struct btf *btf, const char *name, int ref_type_id)
2532 {
2533 	if (!name || !name[0])
2534 		return libbpf_err(-EINVAL);
2535 
2536 	return btf_add_ref_kind(btf, BTF_KIND_TYPEDEF, name, ref_type_id);
2537 }
2538 
2539 /*
2540  * Append new BTF_KIND_VOLATILE type with:
2541  *   - *ref_type_id* - referenced type ID, it might not exist yet;
2542  * Returns:
2543  *   - >0, type ID of newly added BTF type;
2544  *   - <0, on error.
2545  */
btf__add_volatile(struct btf * btf,int ref_type_id)2546 int btf__add_volatile(struct btf *btf, int ref_type_id)
2547 {
2548 	return btf_add_ref_kind(btf, BTF_KIND_VOLATILE, NULL, ref_type_id);
2549 }
2550 
2551 /*
2552  * Append new BTF_KIND_CONST type with:
2553  *   - *ref_type_id* - referenced type ID, it might not exist yet;
2554  * Returns:
2555  *   - >0, type ID of newly added BTF type;
2556  *   - <0, on error.
2557  */
btf__add_const(struct btf * btf,int ref_type_id)2558 int btf__add_const(struct btf *btf, int ref_type_id)
2559 {
2560 	return btf_add_ref_kind(btf, BTF_KIND_CONST, NULL, ref_type_id);
2561 }
2562 
2563 /*
2564  * Append new BTF_KIND_RESTRICT type with:
2565  *   - *ref_type_id* - referenced type ID, it might not exist yet;
2566  * Returns:
2567  *   - >0, type ID of newly added BTF type;
2568  *   - <0, on error.
2569  */
btf__add_restrict(struct btf * btf,int ref_type_id)2570 int btf__add_restrict(struct btf *btf, int ref_type_id)
2571 {
2572 	return btf_add_ref_kind(btf, BTF_KIND_RESTRICT, NULL, ref_type_id);
2573 }
2574 
2575 /*
2576  * Append new BTF_KIND_TYPE_TAG type with:
2577  *   - *value*, non-empty/non-NULL tag value;
2578  *   - *ref_type_id* - referenced type ID, it might not exist yet;
2579  * Returns:
2580  *   - >0, type ID of newly added BTF type;
2581  *   - <0, on error.
2582  */
btf__add_type_tag(struct btf * btf,const char * value,int ref_type_id)2583 int btf__add_type_tag(struct btf *btf, const char *value, int ref_type_id)
2584 {
2585 	if (!value || !value[0])
2586 		return libbpf_err(-EINVAL);
2587 
2588 	return btf_add_ref_kind(btf, BTF_KIND_TYPE_TAG, value, ref_type_id);
2589 }
2590 
2591 /*
2592  * Append new BTF_KIND_FUNC type with:
2593  *   - *name*, non-empty/non-NULL name;
2594  *   - *proto_type_id* - FUNC_PROTO's type ID, it might not exist yet;
2595  * Returns:
2596  *   - >0, type ID of newly added BTF type;
2597  *   - <0, on error.
2598  */
btf__add_func(struct btf * btf,const char * name,enum btf_func_linkage linkage,int proto_type_id)2599 int btf__add_func(struct btf *btf, const char *name,
2600 		  enum btf_func_linkage linkage, int proto_type_id)
2601 {
2602 	int id;
2603 
2604 	if (!name || !name[0])
2605 		return libbpf_err(-EINVAL);
2606 	if (linkage != BTF_FUNC_STATIC && linkage != BTF_FUNC_GLOBAL &&
2607 	    linkage != BTF_FUNC_EXTERN)
2608 		return libbpf_err(-EINVAL);
2609 
2610 	id = btf_add_ref_kind(btf, BTF_KIND_FUNC, name, proto_type_id);
2611 	if (id > 0) {
2612 		struct btf_type *t = btf_type_by_id(btf, id);
2613 
2614 		t->info = btf_type_info(BTF_KIND_FUNC, linkage, 0);
2615 	}
2616 	return libbpf_err(id);
2617 }
2618 
2619 /*
2620  * Append new BTF_KIND_FUNC_PROTO with:
2621  *   - *ret_type_id* - type ID for return result of a function.
2622  *
2623  * Function prototype initially has no arguments, but they can be added by
2624  * btf__add_func_param() one by one, immediately after
2625  * btf__add_func_proto() succeeded.
2626  *
2627  * Returns:
2628  *   - >0, type ID of newly added BTF type;
2629  *   - <0, on error.
2630  */
btf__add_func_proto(struct btf * btf,int ret_type_id)2631 int btf__add_func_proto(struct btf *btf, int ret_type_id)
2632 {
2633 	struct btf_type *t;
2634 	int sz;
2635 
2636 	if (validate_type_id(ret_type_id))
2637 		return libbpf_err(-EINVAL);
2638 
2639 	if (btf_ensure_modifiable(btf))
2640 		return libbpf_err(-ENOMEM);
2641 
2642 	sz = sizeof(struct btf_type);
2643 	t = btf_add_type_mem(btf, sz);
2644 	if (!t)
2645 		return libbpf_err(-ENOMEM);
2646 
2647 	/* start out with vlen=0; this will be adjusted when adding enum
2648 	 * values, if necessary
2649 	 */
2650 	t->name_off = 0;
2651 	t->info = btf_type_info(BTF_KIND_FUNC_PROTO, 0, 0);
2652 	t->type = ret_type_id;
2653 
2654 	return btf_commit_type(btf, sz);
2655 }
2656 
2657 /*
2658  * Append new function parameter for current FUNC_PROTO type with:
2659  *   - *name* - parameter name, can be NULL or empty;
2660  *   - *type_id* - type ID describing the type of the parameter.
2661  * Returns:
2662  *   -  0, on success;
2663  *   - <0, on error.
2664  */
btf__add_func_param(struct btf * btf,const char * name,int type_id)2665 int btf__add_func_param(struct btf *btf, const char *name, int type_id)
2666 {
2667 	struct btf_type *t;
2668 	struct btf_param *p;
2669 	int sz, name_off = 0;
2670 
2671 	if (validate_type_id(type_id))
2672 		return libbpf_err(-EINVAL);
2673 
2674 	/* last type should be BTF_KIND_FUNC_PROTO */
2675 	if (btf->nr_types == 0)
2676 		return libbpf_err(-EINVAL);
2677 	t = btf_last_type(btf);
2678 	if (!btf_is_func_proto(t))
2679 		return libbpf_err(-EINVAL);
2680 
2681 	/* decompose and invalidate raw data */
2682 	if (btf_ensure_modifiable(btf))
2683 		return libbpf_err(-ENOMEM);
2684 
2685 	sz = sizeof(struct btf_param);
2686 	p = btf_add_type_mem(btf, sz);
2687 	if (!p)
2688 		return libbpf_err(-ENOMEM);
2689 
2690 	if (name && name[0]) {
2691 		name_off = btf__add_str(btf, name);
2692 		if (name_off < 0)
2693 			return name_off;
2694 	}
2695 
2696 	p->name_off = name_off;
2697 	p->type = type_id;
2698 
2699 	/* update parent type's vlen */
2700 	t = btf_last_type(btf);
2701 	btf_type_inc_vlen(t);
2702 
2703 	btf->hdr->type_len += sz;
2704 	btf->hdr->str_off += sz;
2705 	return 0;
2706 }
2707 
2708 /*
2709  * Append new BTF_KIND_VAR type with:
2710  *   - *name* - non-empty/non-NULL name;
2711  *   - *linkage* - variable linkage, one of BTF_VAR_STATIC,
2712  *     BTF_VAR_GLOBAL_ALLOCATED, or BTF_VAR_GLOBAL_EXTERN;
2713  *   - *type_id* - type ID of the type describing the type of the variable.
2714  * Returns:
2715  *   - >0, type ID of newly added BTF type;
2716  *   - <0, on error.
2717  */
btf__add_var(struct btf * btf,const char * name,int linkage,int type_id)2718 int btf__add_var(struct btf *btf, const char *name, int linkage, int type_id)
2719 {
2720 	struct btf_type *t;
2721 	struct btf_var *v;
2722 	int sz, name_off;
2723 
2724 	/* non-empty name */
2725 	if (!name || !name[0])
2726 		return libbpf_err(-EINVAL);
2727 	if (linkage != BTF_VAR_STATIC && linkage != BTF_VAR_GLOBAL_ALLOCATED &&
2728 	    linkage != BTF_VAR_GLOBAL_EXTERN)
2729 		return libbpf_err(-EINVAL);
2730 	if (validate_type_id(type_id))
2731 		return libbpf_err(-EINVAL);
2732 
2733 	/* deconstruct BTF, if necessary, and invalidate raw_data */
2734 	if (btf_ensure_modifiable(btf))
2735 		return libbpf_err(-ENOMEM);
2736 
2737 	sz = sizeof(struct btf_type) + sizeof(struct btf_var);
2738 	t = btf_add_type_mem(btf, sz);
2739 	if (!t)
2740 		return libbpf_err(-ENOMEM);
2741 
2742 	name_off = btf__add_str(btf, name);
2743 	if (name_off < 0)
2744 		return name_off;
2745 
2746 	t->name_off = name_off;
2747 	t->info = btf_type_info(BTF_KIND_VAR, 0, 0);
2748 	t->type = type_id;
2749 
2750 	v = btf_var(t);
2751 	v->linkage = linkage;
2752 
2753 	return btf_commit_type(btf, sz);
2754 }
2755 
2756 /*
2757  * Append new BTF_KIND_DATASEC type with:
2758  *   - *name* - non-empty/non-NULL name;
2759  *   - *byte_sz* - data section size, in bytes.
2760  *
2761  * Data section is initially empty. Variables info can be added with
2762  * btf__add_datasec_var_info() calls, after btf__add_datasec() succeeds.
2763  *
2764  * Returns:
2765  *   - >0, type ID of newly added BTF type;
2766  *   - <0, on error.
2767  */
btf__add_datasec(struct btf * btf,const char * name,__u32 byte_sz)2768 int btf__add_datasec(struct btf *btf, const char *name, __u32 byte_sz)
2769 {
2770 	struct btf_type *t;
2771 	int sz, name_off;
2772 
2773 	/* non-empty name */
2774 	if (!name || !name[0])
2775 		return libbpf_err(-EINVAL);
2776 
2777 	if (btf_ensure_modifiable(btf))
2778 		return libbpf_err(-ENOMEM);
2779 
2780 	sz = sizeof(struct btf_type);
2781 	t = btf_add_type_mem(btf, sz);
2782 	if (!t)
2783 		return libbpf_err(-ENOMEM);
2784 
2785 	name_off = btf__add_str(btf, name);
2786 	if (name_off < 0)
2787 		return name_off;
2788 
2789 	/* start with vlen=0, which will be update as var_secinfos are added */
2790 	t->name_off = name_off;
2791 	t->info = btf_type_info(BTF_KIND_DATASEC, 0, 0);
2792 	t->size = byte_sz;
2793 
2794 	return btf_commit_type(btf, sz);
2795 }
2796 
2797 /*
2798  * Append new data section variable information entry for current DATASEC type:
2799  *   - *var_type_id* - type ID, describing type of the variable;
2800  *   - *offset* - variable offset within data section, in bytes;
2801  *   - *byte_sz* - variable size, in bytes.
2802  *
2803  * Returns:
2804  *   -  0, on success;
2805  *   - <0, on error.
2806  */
btf__add_datasec_var_info(struct btf * btf,int var_type_id,__u32 offset,__u32 byte_sz)2807 int btf__add_datasec_var_info(struct btf *btf, int var_type_id, __u32 offset, __u32 byte_sz)
2808 {
2809 	struct btf_type *t;
2810 	struct btf_var_secinfo *v;
2811 	int sz;
2812 
2813 	/* last type should be BTF_KIND_DATASEC */
2814 	if (btf->nr_types == 0)
2815 		return libbpf_err(-EINVAL);
2816 	t = btf_last_type(btf);
2817 	if (!btf_is_datasec(t))
2818 		return libbpf_err(-EINVAL);
2819 
2820 	if (validate_type_id(var_type_id))
2821 		return libbpf_err(-EINVAL);
2822 
2823 	/* decompose and invalidate raw data */
2824 	if (btf_ensure_modifiable(btf))
2825 		return libbpf_err(-ENOMEM);
2826 
2827 	sz = sizeof(struct btf_var_secinfo);
2828 	v = btf_add_type_mem(btf, sz);
2829 	if (!v)
2830 		return libbpf_err(-ENOMEM);
2831 
2832 	v->type = var_type_id;
2833 	v->offset = offset;
2834 	v->size = byte_sz;
2835 
2836 	/* update parent type's vlen */
2837 	t = btf_last_type(btf);
2838 	btf_type_inc_vlen(t);
2839 
2840 	btf->hdr->type_len += sz;
2841 	btf->hdr->str_off += sz;
2842 	return 0;
2843 }
2844 
2845 /*
2846  * Append new BTF_KIND_DECL_TAG type with:
2847  *   - *value* - non-empty/non-NULL string;
2848  *   - *ref_type_id* - referenced type ID, it might not exist yet;
2849  *   - *component_idx* - -1 for tagging reference type, otherwise struct/union
2850  *     member or function argument index;
2851  * Returns:
2852  *   - >0, type ID of newly added BTF type;
2853  *   - <0, on error.
2854  */
btf__add_decl_tag(struct btf * btf,const char * value,int ref_type_id,int component_idx)2855 int btf__add_decl_tag(struct btf *btf, const char *value, int ref_type_id,
2856 		 int component_idx)
2857 {
2858 	struct btf_type *t;
2859 	int sz, value_off;
2860 
2861 	if (!value || !value[0] || component_idx < -1)
2862 		return libbpf_err(-EINVAL);
2863 
2864 	if (validate_type_id(ref_type_id))
2865 		return libbpf_err(-EINVAL);
2866 
2867 	if (btf_ensure_modifiable(btf))
2868 		return libbpf_err(-ENOMEM);
2869 
2870 	sz = sizeof(struct btf_type) + sizeof(struct btf_decl_tag);
2871 	t = btf_add_type_mem(btf, sz);
2872 	if (!t)
2873 		return libbpf_err(-ENOMEM);
2874 
2875 	value_off = btf__add_str(btf, value);
2876 	if (value_off < 0)
2877 		return value_off;
2878 
2879 	t->name_off = value_off;
2880 	t->info = btf_type_info(BTF_KIND_DECL_TAG, 0, false);
2881 	t->type = ref_type_id;
2882 	btf_decl_tag(t)->component_idx = component_idx;
2883 
2884 	return btf_commit_type(btf, sz);
2885 }
2886 
2887 struct btf_ext_sec_setup_param {
2888 	__u32 off;
2889 	__u32 len;
2890 	__u32 min_rec_size;
2891 	struct btf_ext_info *ext_info;
2892 	const char *desc;
2893 };
2894 
btf_ext_setup_info(struct btf_ext * btf_ext,struct btf_ext_sec_setup_param * ext_sec)2895 static int btf_ext_setup_info(struct btf_ext *btf_ext,
2896 			      struct btf_ext_sec_setup_param *ext_sec)
2897 {
2898 	const struct btf_ext_info_sec *sinfo;
2899 	struct btf_ext_info *ext_info;
2900 	__u32 info_left, record_size;
2901 	size_t sec_cnt = 0;
2902 	/* The start of the info sec (including the __u32 record_size). */
2903 	void *info;
2904 
2905 	if (ext_sec->len == 0)
2906 		return 0;
2907 
2908 	if (ext_sec->off & 0x03) {
2909 		pr_debug(".BTF.ext %s section is not aligned to 4 bytes\n",
2910 		     ext_sec->desc);
2911 		return -EINVAL;
2912 	}
2913 
2914 	info = btf_ext->data + btf_ext->hdr->hdr_len + ext_sec->off;
2915 	info_left = ext_sec->len;
2916 
2917 	if (btf_ext->data + btf_ext->data_size < info + ext_sec->len) {
2918 		pr_debug("%s section (off:%u len:%u) is beyond the end of the ELF section .BTF.ext\n",
2919 			 ext_sec->desc, ext_sec->off, ext_sec->len);
2920 		return -EINVAL;
2921 	}
2922 
2923 	/* At least a record size */
2924 	if (info_left < sizeof(__u32)) {
2925 		pr_debug(".BTF.ext %s record size not found\n", ext_sec->desc);
2926 		return -EINVAL;
2927 	}
2928 
2929 	/* The record size needs to meet the minimum standard */
2930 	record_size = *(__u32 *)info;
2931 	if (record_size < ext_sec->min_rec_size ||
2932 	    record_size & 0x03) {
2933 		pr_debug("%s section in .BTF.ext has invalid record size %u\n",
2934 			 ext_sec->desc, record_size);
2935 		return -EINVAL;
2936 	}
2937 
2938 	sinfo = info + sizeof(__u32);
2939 	info_left -= sizeof(__u32);
2940 
2941 	/* If no records, return failure now so .BTF.ext won't be used. */
2942 	if (!info_left) {
2943 		pr_debug("%s section in .BTF.ext has no records", ext_sec->desc);
2944 		return -EINVAL;
2945 	}
2946 
2947 	while (info_left) {
2948 		unsigned int sec_hdrlen = sizeof(struct btf_ext_info_sec);
2949 		__u64 total_record_size;
2950 		__u32 num_records;
2951 
2952 		if (info_left < sec_hdrlen) {
2953 			pr_debug("%s section header is not found in .BTF.ext\n",
2954 			     ext_sec->desc);
2955 			return -EINVAL;
2956 		}
2957 
2958 		num_records = sinfo->num_info;
2959 		if (num_records == 0) {
2960 			pr_debug("%s section has incorrect num_records in .BTF.ext\n",
2961 			     ext_sec->desc);
2962 			return -EINVAL;
2963 		}
2964 
2965 		total_record_size = sec_hdrlen + (__u64)num_records * record_size;
2966 		if (info_left < total_record_size) {
2967 			pr_debug("%s section has incorrect num_records in .BTF.ext\n",
2968 			     ext_sec->desc);
2969 			return -EINVAL;
2970 		}
2971 
2972 		info_left -= total_record_size;
2973 		sinfo = (void *)sinfo + total_record_size;
2974 		sec_cnt++;
2975 	}
2976 
2977 	ext_info = ext_sec->ext_info;
2978 	ext_info->len = ext_sec->len - sizeof(__u32);
2979 	ext_info->rec_size = record_size;
2980 	ext_info->info = info + sizeof(__u32);
2981 	ext_info->sec_cnt = sec_cnt;
2982 
2983 	return 0;
2984 }
2985 
btf_ext_setup_func_info(struct btf_ext * btf_ext)2986 static int btf_ext_setup_func_info(struct btf_ext *btf_ext)
2987 {
2988 	struct btf_ext_sec_setup_param param = {
2989 		.off = btf_ext->hdr->func_info_off,
2990 		.len = btf_ext->hdr->func_info_len,
2991 		.min_rec_size = sizeof(struct bpf_func_info_min),
2992 		.ext_info = &btf_ext->func_info,
2993 		.desc = "func_info"
2994 	};
2995 
2996 	return btf_ext_setup_info(btf_ext, &param);
2997 }
2998 
btf_ext_setup_line_info(struct btf_ext * btf_ext)2999 static int btf_ext_setup_line_info(struct btf_ext *btf_ext)
3000 {
3001 	struct btf_ext_sec_setup_param param = {
3002 		.off = btf_ext->hdr->line_info_off,
3003 		.len = btf_ext->hdr->line_info_len,
3004 		.min_rec_size = sizeof(struct bpf_line_info_min),
3005 		.ext_info = &btf_ext->line_info,
3006 		.desc = "line_info",
3007 	};
3008 
3009 	return btf_ext_setup_info(btf_ext, &param);
3010 }
3011 
btf_ext_setup_core_relos(struct btf_ext * btf_ext)3012 static int btf_ext_setup_core_relos(struct btf_ext *btf_ext)
3013 {
3014 	struct btf_ext_sec_setup_param param = {
3015 		.off = btf_ext->hdr->core_relo_off,
3016 		.len = btf_ext->hdr->core_relo_len,
3017 		.min_rec_size = sizeof(struct bpf_core_relo),
3018 		.ext_info = &btf_ext->core_relo_info,
3019 		.desc = "core_relo",
3020 	};
3021 
3022 	return btf_ext_setup_info(btf_ext, &param);
3023 }
3024 
btf_ext_parse_hdr(__u8 * data,__u32 data_size)3025 static int btf_ext_parse_hdr(__u8 *data, __u32 data_size)
3026 {
3027 	const struct btf_ext_header *hdr = (struct btf_ext_header *)data;
3028 
3029 	if (data_size < offsetofend(struct btf_ext_header, hdr_len) ||
3030 	    data_size < hdr->hdr_len) {
3031 		pr_debug("BTF.ext header not found");
3032 		return -EINVAL;
3033 	}
3034 
3035 	if (hdr->magic == bswap_16(BTF_MAGIC)) {
3036 		pr_warn("BTF.ext in non-native endianness is not supported\n");
3037 		return -ENOTSUP;
3038 	} else if (hdr->magic != BTF_MAGIC) {
3039 		pr_debug("Invalid BTF.ext magic:%x\n", hdr->magic);
3040 		return -EINVAL;
3041 	}
3042 
3043 	if (hdr->version != BTF_VERSION) {
3044 		pr_debug("Unsupported BTF.ext version:%u\n", hdr->version);
3045 		return -ENOTSUP;
3046 	}
3047 
3048 	if (hdr->flags) {
3049 		pr_debug("Unsupported BTF.ext flags:%x\n", hdr->flags);
3050 		return -ENOTSUP;
3051 	}
3052 
3053 	if (data_size == hdr->hdr_len) {
3054 		pr_debug("BTF.ext has no data\n");
3055 		return -EINVAL;
3056 	}
3057 
3058 	return 0;
3059 }
3060 
btf_ext__free(struct btf_ext * btf_ext)3061 void btf_ext__free(struct btf_ext *btf_ext)
3062 {
3063 	if (IS_ERR_OR_NULL(btf_ext))
3064 		return;
3065 	free(btf_ext->func_info.sec_idxs);
3066 	free(btf_ext->line_info.sec_idxs);
3067 	free(btf_ext->core_relo_info.sec_idxs);
3068 	free(btf_ext->data);
3069 	free(btf_ext);
3070 }
3071 
btf_ext__new(const __u8 * data,__u32 size)3072 struct btf_ext *btf_ext__new(const __u8 *data, __u32 size)
3073 {
3074 	struct btf_ext *btf_ext;
3075 	int err;
3076 
3077 	btf_ext = calloc(1, sizeof(struct btf_ext));
3078 	if (!btf_ext)
3079 		return libbpf_err_ptr(-ENOMEM);
3080 
3081 	btf_ext->data_size = size;
3082 	btf_ext->data = malloc(size);
3083 	if (!btf_ext->data) {
3084 		err = -ENOMEM;
3085 		goto done;
3086 	}
3087 	memcpy(btf_ext->data, data, size);
3088 
3089 	err = btf_ext_parse_hdr(btf_ext->data, size);
3090 	if (err)
3091 		goto done;
3092 
3093 	if (btf_ext->hdr->hdr_len < offsetofend(struct btf_ext_header, line_info_len)) {
3094 		err = -EINVAL;
3095 		goto done;
3096 	}
3097 
3098 	err = btf_ext_setup_func_info(btf_ext);
3099 	if (err)
3100 		goto done;
3101 
3102 	err = btf_ext_setup_line_info(btf_ext);
3103 	if (err)
3104 		goto done;
3105 
3106 	if (btf_ext->hdr->hdr_len < offsetofend(struct btf_ext_header, core_relo_len))
3107 		goto done; /* skip core relos parsing */
3108 
3109 	err = btf_ext_setup_core_relos(btf_ext);
3110 	if (err)
3111 		goto done;
3112 
3113 done:
3114 	if (err) {
3115 		btf_ext__free(btf_ext);
3116 		return libbpf_err_ptr(err);
3117 	}
3118 
3119 	return btf_ext;
3120 }
3121 
btf_ext__raw_data(const struct btf_ext * btf_ext,__u32 * size)3122 const void *btf_ext__raw_data(const struct btf_ext *btf_ext, __u32 *size)
3123 {
3124 	*size = btf_ext->data_size;
3125 	return btf_ext->data;
3126 }
3127 
3128 __attribute__((alias("btf_ext__raw_data")))
3129 const void *btf_ext__get_raw_data(const struct btf_ext *btf_ext, __u32 *size);
3130 
3131 
3132 struct btf_dedup;
3133 
3134 static struct btf_dedup *btf_dedup_new(struct btf *btf, const struct btf_dedup_opts *opts);
3135 static void btf_dedup_free(struct btf_dedup *d);
3136 static int btf_dedup_prep(struct btf_dedup *d);
3137 static int btf_dedup_strings(struct btf_dedup *d);
3138 static int btf_dedup_prim_types(struct btf_dedup *d);
3139 static int btf_dedup_struct_types(struct btf_dedup *d);
3140 static int btf_dedup_ref_types(struct btf_dedup *d);
3141 static int btf_dedup_resolve_fwds(struct btf_dedup *d);
3142 static int btf_dedup_compact_types(struct btf_dedup *d);
3143 static int btf_dedup_remap_types(struct btf_dedup *d);
3144 
3145 /*
3146  * Deduplicate BTF types and strings.
3147  *
3148  * BTF dedup algorithm takes as an input `struct btf` representing `.BTF` ELF
3149  * section with all BTF type descriptors and string data. It overwrites that
3150  * memory in-place with deduplicated types and strings without any loss of
3151  * information. If optional `struct btf_ext` representing '.BTF.ext' ELF section
3152  * is provided, all the strings referenced from .BTF.ext section are honored
3153  * and updated to point to the right offsets after deduplication.
3154  *
3155  * If function returns with error, type/string data might be garbled and should
3156  * be discarded.
3157  *
3158  * More verbose and detailed description of both problem btf_dedup is solving,
3159  * as well as solution could be found at:
3160  * https://facebookmicrosites.github.io/bpf/blog/2018/11/14/btf-enhancement.html
3161  *
3162  * Problem description and justification
3163  * =====================================
3164  *
3165  * BTF type information is typically emitted either as a result of conversion
3166  * from DWARF to BTF or directly by compiler. In both cases, each compilation
3167  * unit contains information about a subset of all the types that are used
3168  * in an application. These subsets are frequently overlapping and contain a lot
3169  * of duplicated information when later concatenated together into a single
3170  * binary. This algorithm ensures that each unique type is represented by single
3171  * BTF type descriptor, greatly reducing resulting size of BTF data.
3172  *
3173  * Compilation unit isolation and subsequent duplication of data is not the only
3174  * problem. The same type hierarchy (e.g., struct and all the type that struct
3175  * references) in different compilation units can be represented in BTF to
3176  * various degrees of completeness (or, rather, incompleteness) due to
3177  * struct/union forward declarations.
3178  *
3179  * Let's take a look at an example, that we'll use to better understand the
3180  * problem (and solution). Suppose we have two compilation units, each using
3181  * same `struct S`, but each of them having incomplete type information about
3182  * struct's fields:
3183  *
3184  * // CU #1:
3185  * struct S;
3186  * struct A {
3187  *	int a;
3188  *	struct A* self;
3189  *	struct S* parent;
3190  * };
3191  * struct B;
3192  * struct S {
3193  *	struct A* a_ptr;
3194  *	struct B* b_ptr;
3195  * };
3196  *
3197  * // CU #2:
3198  * struct S;
3199  * struct A;
3200  * struct B {
3201  *	int b;
3202  *	struct B* self;
3203  *	struct S* parent;
3204  * };
3205  * struct S {
3206  *	struct A* a_ptr;
3207  *	struct B* b_ptr;
3208  * };
3209  *
3210  * In case of CU #1, BTF data will know only that `struct B` exist (but no
3211  * more), but will know the complete type information about `struct A`. While
3212  * for CU #2, it will know full type information about `struct B`, but will
3213  * only know about forward declaration of `struct A` (in BTF terms, it will
3214  * have `BTF_KIND_FWD` type descriptor with name `B`).
3215  *
3216  * This compilation unit isolation means that it's possible that there is no
3217  * single CU with complete type information describing structs `S`, `A`, and
3218  * `B`. Also, we might get tons of duplicated and redundant type information.
3219  *
3220  * Additional complication we need to keep in mind comes from the fact that
3221  * types, in general, can form graphs containing cycles, not just DAGs.
3222  *
3223  * While algorithm does deduplication, it also merges and resolves type
3224  * information (unless disabled throught `struct btf_opts`), whenever possible.
3225  * E.g., in the example above with two compilation units having partial type
3226  * information for structs `A` and `B`, the output of algorithm will emit
3227  * a single copy of each BTF type that describes structs `A`, `B`, and `S`
3228  * (as well as type information for `int` and pointers), as if they were defined
3229  * in a single compilation unit as:
3230  *
3231  * struct A {
3232  *	int a;
3233  *	struct A* self;
3234  *	struct S* parent;
3235  * };
3236  * struct B {
3237  *	int b;
3238  *	struct B* self;
3239  *	struct S* parent;
3240  * };
3241  * struct S {
3242  *	struct A* a_ptr;
3243  *	struct B* b_ptr;
3244  * };
3245  *
3246  * Algorithm summary
3247  * =================
3248  *
3249  * Algorithm completes its work in 7 separate passes:
3250  *
3251  * 1. Strings deduplication.
3252  * 2. Primitive types deduplication (int, enum, fwd).
3253  * 3. Struct/union types deduplication.
3254  * 4. Resolve unambiguous forward declarations.
3255  * 5. Reference types deduplication (pointers, typedefs, arrays, funcs, func
3256  *    protos, and const/volatile/restrict modifiers).
3257  * 6. Types compaction.
3258  * 7. Types remapping.
3259  *
3260  * Algorithm determines canonical type descriptor, which is a single
3261  * representative type for each truly unique type. This canonical type is the
3262  * one that will go into final deduplicated BTF type information. For
3263  * struct/unions, it is also the type that algorithm will merge additional type
3264  * information into (while resolving FWDs), as it discovers it from data in
3265  * other CUs. Each input BTF type eventually gets either mapped to itself, if
3266  * that type is canonical, or to some other type, if that type is equivalent
3267  * and was chosen as canonical representative. This mapping is stored in
3268  * `btf_dedup->map` array. This map is also used to record STRUCT/UNION that
3269  * FWD type got resolved to.
3270  *
3271  * To facilitate fast discovery of canonical types, we also maintain canonical
3272  * index (`btf_dedup->dedup_table`), which maps type descriptor's signature hash
3273  * (i.e., hashed kind, name, size, fields, etc) into a list of canonical types
3274  * that match that signature. With sufficiently good choice of type signature
3275  * hashing function, we can limit number of canonical types for each unique type
3276  * signature to a very small number, allowing to find canonical type for any
3277  * duplicated type very quickly.
3278  *
3279  * Struct/union deduplication is the most critical part and algorithm for
3280  * deduplicating structs/unions is described in greater details in comments for
3281  * `btf_dedup_is_equiv` function.
3282  */
btf__dedup(struct btf * btf,const struct btf_dedup_opts * opts)3283 int btf__dedup(struct btf *btf, const struct btf_dedup_opts *opts)
3284 {
3285 	struct btf_dedup *d;
3286 	int err;
3287 
3288 	if (!OPTS_VALID(opts, btf_dedup_opts))
3289 		return libbpf_err(-EINVAL);
3290 
3291 	d = btf_dedup_new(btf, opts);
3292 	if (IS_ERR(d)) {
3293 		pr_debug("btf_dedup_new failed: %ld", PTR_ERR(d));
3294 		return libbpf_err(-EINVAL);
3295 	}
3296 
3297 	if (btf_ensure_modifiable(btf)) {
3298 		err = -ENOMEM;
3299 		goto done;
3300 	}
3301 
3302 	err = btf_dedup_prep(d);
3303 	if (err) {
3304 		pr_debug("btf_dedup_prep failed:%d\n", err);
3305 		goto done;
3306 	}
3307 	err = btf_dedup_strings(d);
3308 	if (err < 0) {
3309 		pr_debug("btf_dedup_strings failed:%d\n", err);
3310 		goto done;
3311 	}
3312 	err = btf_dedup_prim_types(d);
3313 	if (err < 0) {
3314 		pr_debug("btf_dedup_prim_types failed:%d\n", err);
3315 		goto done;
3316 	}
3317 	err = btf_dedup_struct_types(d);
3318 	if (err < 0) {
3319 		pr_debug("btf_dedup_struct_types failed:%d\n", err);
3320 		goto done;
3321 	}
3322 	err = btf_dedup_resolve_fwds(d);
3323 	if (err < 0) {
3324 		pr_debug("btf_dedup_resolve_fwds failed:%d\n", err);
3325 		goto done;
3326 	}
3327 	err = btf_dedup_ref_types(d);
3328 	if (err < 0) {
3329 		pr_debug("btf_dedup_ref_types failed:%d\n", err);
3330 		goto done;
3331 	}
3332 	err = btf_dedup_compact_types(d);
3333 	if (err < 0) {
3334 		pr_debug("btf_dedup_compact_types failed:%d\n", err);
3335 		goto done;
3336 	}
3337 	err = btf_dedup_remap_types(d);
3338 	if (err < 0) {
3339 		pr_debug("btf_dedup_remap_types failed:%d\n", err);
3340 		goto done;
3341 	}
3342 
3343 done:
3344 	btf_dedup_free(d);
3345 	return libbpf_err(err);
3346 }
3347 
3348 #define BTF_UNPROCESSED_ID ((__u32)-1)
3349 #define BTF_IN_PROGRESS_ID ((__u32)-2)
3350 
3351 struct btf_dedup {
3352 	/* .BTF section to be deduped in-place */
3353 	struct btf *btf;
3354 	/*
3355 	 * Optional .BTF.ext section. When provided, any strings referenced
3356 	 * from it will be taken into account when deduping strings
3357 	 */
3358 	struct btf_ext *btf_ext;
3359 	/*
3360 	 * This is a map from any type's signature hash to a list of possible
3361 	 * canonical representative type candidates. Hash collisions are
3362 	 * ignored, so even types of various kinds can share same list of
3363 	 * candidates, which is fine because we rely on subsequent
3364 	 * btf_xxx_equal() checks to authoritatively verify type equality.
3365 	 */
3366 	struct hashmap *dedup_table;
3367 	/* Canonical types map */
3368 	__u32 *map;
3369 	/* Hypothetical mapping, used during type graph equivalence checks */
3370 	__u32 *hypot_map;
3371 	__u32 *hypot_list;
3372 	size_t hypot_cnt;
3373 	size_t hypot_cap;
3374 	/* Whether hypothetical mapping, if successful, would need to adjust
3375 	 * already canonicalized types (due to a new forward declaration to
3376 	 * concrete type resolution). In such case, during split BTF dedup
3377 	 * candidate type would still be considered as different, because base
3378 	 * BTF is considered to be immutable.
3379 	 */
3380 	bool hypot_adjust_canon;
3381 	/* Various option modifying behavior of algorithm */
3382 	struct btf_dedup_opts opts;
3383 	/* temporary strings deduplication state */
3384 	struct strset *strs_set;
3385 };
3386 
hash_combine(long h,long value)3387 static long hash_combine(long h, long value)
3388 {
3389 	return h * 31 + value;
3390 }
3391 
3392 #define for_each_dedup_cand(d, node, hash) \
3393 	hashmap__for_each_key_entry(d->dedup_table, node, hash)
3394 
btf_dedup_table_add(struct btf_dedup * d,long hash,__u32 type_id)3395 static int btf_dedup_table_add(struct btf_dedup *d, long hash, __u32 type_id)
3396 {
3397 	return hashmap__append(d->dedup_table, hash, type_id);
3398 }
3399 
btf_dedup_hypot_map_add(struct btf_dedup * d,__u32 from_id,__u32 to_id)3400 static int btf_dedup_hypot_map_add(struct btf_dedup *d,
3401 				   __u32 from_id, __u32 to_id)
3402 {
3403 	if (d->hypot_cnt == d->hypot_cap) {
3404 		__u32 *new_list;
3405 
3406 		d->hypot_cap += max((size_t)16, d->hypot_cap / 2);
3407 		new_list = libbpf_reallocarray(d->hypot_list, d->hypot_cap, sizeof(__u32));
3408 		if (!new_list)
3409 			return -ENOMEM;
3410 		d->hypot_list = new_list;
3411 	}
3412 	d->hypot_list[d->hypot_cnt++] = from_id;
3413 	d->hypot_map[from_id] = to_id;
3414 	return 0;
3415 }
3416 
btf_dedup_clear_hypot_map(struct btf_dedup * d)3417 static void btf_dedup_clear_hypot_map(struct btf_dedup *d)
3418 {
3419 	int i;
3420 
3421 	for (i = 0; i < d->hypot_cnt; i++)
3422 		d->hypot_map[d->hypot_list[i]] = BTF_UNPROCESSED_ID;
3423 	d->hypot_cnt = 0;
3424 	d->hypot_adjust_canon = false;
3425 }
3426 
btf_dedup_free(struct btf_dedup * d)3427 static void btf_dedup_free(struct btf_dedup *d)
3428 {
3429 	hashmap__free(d->dedup_table);
3430 	d->dedup_table = NULL;
3431 
3432 	free(d->map);
3433 	d->map = NULL;
3434 
3435 	free(d->hypot_map);
3436 	d->hypot_map = NULL;
3437 
3438 	free(d->hypot_list);
3439 	d->hypot_list = NULL;
3440 
3441 	free(d);
3442 }
3443 
btf_dedup_identity_hash_fn(long key,void * ctx)3444 static size_t btf_dedup_identity_hash_fn(long key, void *ctx)
3445 {
3446 	return key;
3447 }
3448 
btf_dedup_collision_hash_fn(long key,void * ctx)3449 static size_t btf_dedup_collision_hash_fn(long key, void *ctx)
3450 {
3451 	return 0;
3452 }
3453 
btf_dedup_equal_fn(long k1,long k2,void * ctx)3454 static bool btf_dedup_equal_fn(long k1, long k2, void *ctx)
3455 {
3456 	return k1 == k2;
3457 }
3458 
btf_dedup_new(struct btf * btf,const struct btf_dedup_opts * opts)3459 static struct btf_dedup *btf_dedup_new(struct btf *btf, const struct btf_dedup_opts *opts)
3460 {
3461 	struct btf_dedup *d = calloc(1, sizeof(struct btf_dedup));
3462 	hashmap_hash_fn hash_fn = btf_dedup_identity_hash_fn;
3463 	int i, err = 0, type_cnt;
3464 
3465 	if (!d)
3466 		return ERR_PTR(-ENOMEM);
3467 
3468 	if (OPTS_GET(opts, force_collisions, false))
3469 		hash_fn = btf_dedup_collision_hash_fn;
3470 
3471 	d->btf = btf;
3472 	d->btf_ext = OPTS_GET(opts, btf_ext, NULL);
3473 
3474 	d->dedup_table = hashmap__new(hash_fn, btf_dedup_equal_fn, NULL);
3475 	if (IS_ERR(d->dedup_table)) {
3476 		err = PTR_ERR(d->dedup_table);
3477 		d->dedup_table = NULL;
3478 		goto done;
3479 	}
3480 
3481 	type_cnt = btf__type_cnt(btf);
3482 	d->map = malloc(sizeof(__u32) * type_cnt);
3483 	if (!d->map) {
3484 		err = -ENOMEM;
3485 		goto done;
3486 	}
3487 	/* special BTF "void" type is made canonical immediately */
3488 	d->map[0] = 0;
3489 	for (i = 1; i < type_cnt; i++) {
3490 		struct btf_type *t = btf_type_by_id(d->btf, i);
3491 
3492 		/* VAR and DATASEC are never deduped and are self-canonical */
3493 		if (btf_is_var(t) || btf_is_datasec(t))
3494 			d->map[i] = i;
3495 		else
3496 			d->map[i] = BTF_UNPROCESSED_ID;
3497 	}
3498 
3499 	d->hypot_map = malloc(sizeof(__u32) * type_cnt);
3500 	if (!d->hypot_map) {
3501 		err = -ENOMEM;
3502 		goto done;
3503 	}
3504 	for (i = 0; i < type_cnt; i++)
3505 		d->hypot_map[i] = BTF_UNPROCESSED_ID;
3506 
3507 done:
3508 	if (err) {
3509 		btf_dedup_free(d);
3510 		return ERR_PTR(err);
3511 	}
3512 
3513 	return d;
3514 }
3515 
3516 /*
3517  * Iterate over all possible places in .BTF and .BTF.ext that can reference
3518  * string and pass pointer to it to a provided callback `fn`.
3519  */
btf_for_each_str_off(struct btf_dedup * d,str_off_visit_fn fn,void * ctx)3520 static int btf_for_each_str_off(struct btf_dedup *d, str_off_visit_fn fn, void *ctx)
3521 {
3522 	int i, r;
3523 
3524 	for (i = 0; i < d->btf->nr_types; i++) {
3525 		struct btf_field_iter it;
3526 		struct btf_type *t = btf_type_by_id(d->btf, d->btf->start_id + i);
3527 		__u32 *str_off;
3528 
3529 		r = btf_field_iter_init(&it, t, BTF_FIELD_ITER_STRS);
3530 		if (r)
3531 			return r;
3532 
3533 		while ((str_off = btf_field_iter_next(&it))) {
3534 			r = fn(str_off, ctx);
3535 			if (r)
3536 				return r;
3537 		}
3538 	}
3539 
3540 	if (!d->btf_ext)
3541 		return 0;
3542 
3543 	r = btf_ext_visit_str_offs(d->btf_ext, fn, ctx);
3544 	if (r)
3545 		return r;
3546 
3547 	return 0;
3548 }
3549 
strs_dedup_remap_str_off(__u32 * str_off_ptr,void * ctx)3550 static int strs_dedup_remap_str_off(__u32 *str_off_ptr, void *ctx)
3551 {
3552 	struct btf_dedup *d = ctx;
3553 	__u32 str_off = *str_off_ptr;
3554 	const char *s;
3555 	int off, err;
3556 
3557 	/* don't touch empty string or string in main BTF */
3558 	if (str_off == 0 || str_off < d->btf->start_str_off)
3559 		return 0;
3560 
3561 	s = btf__str_by_offset(d->btf, str_off);
3562 	if (d->btf->base_btf) {
3563 		err = btf__find_str(d->btf->base_btf, s);
3564 		if (err >= 0) {
3565 			*str_off_ptr = err;
3566 			return 0;
3567 		}
3568 		if (err != -ENOENT)
3569 			return err;
3570 	}
3571 
3572 	off = strset__add_str(d->strs_set, s);
3573 	if (off < 0)
3574 		return off;
3575 
3576 	*str_off_ptr = d->btf->start_str_off + off;
3577 	return 0;
3578 }
3579 
3580 /*
3581  * Dedup string and filter out those that are not referenced from either .BTF
3582  * or .BTF.ext (if provided) sections.
3583  *
3584  * This is done by building index of all strings in BTF's string section,
3585  * then iterating over all entities that can reference strings (e.g., type
3586  * names, struct field names, .BTF.ext line info, etc) and marking corresponding
3587  * strings as used. After that all used strings are deduped and compacted into
3588  * sequential blob of memory and new offsets are calculated. Then all the string
3589  * references are iterated again and rewritten using new offsets.
3590  */
btf_dedup_strings(struct btf_dedup * d)3591 static int btf_dedup_strings(struct btf_dedup *d)
3592 {
3593 	int err;
3594 
3595 	if (d->btf->strs_deduped)
3596 		return 0;
3597 
3598 	d->strs_set = strset__new(BTF_MAX_STR_OFFSET, NULL, 0);
3599 	if (IS_ERR(d->strs_set)) {
3600 		err = PTR_ERR(d->strs_set);
3601 		goto err_out;
3602 	}
3603 
3604 	if (!d->btf->base_btf) {
3605 		/* insert empty string; we won't be looking it up during strings
3606 		 * dedup, but it's good to have it for generic BTF string lookups
3607 		 */
3608 		err = strset__add_str(d->strs_set, "");
3609 		if (err < 0)
3610 			goto err_out;
3611 	}
3612 
3613 	/* remap string offsets */
3614 	err = btf_for_each_str_off(d, strs_dedup_remap_str_off, d);
3615 	if (err)
3616 		goto err_out;
3617 
3618 	/* replace BTF string data and hash with deduped ones */
3619 	strset__free(d->btf->strs_set);
3620 	d->btf->hdr->str_len = strset__data_size(d->strs_set);
3621 	d->btf->strs_set = d->strs_set;
3622 	d->strs_set = NULL;
3623 	d->btf->strs_deduped = true;
3624 	return 0;
3625 
3626 err_out:
3627 	strset__free(d->strs_set);
3628 	d->strs_set = NULL;
3629 
3630 	return err;
3631 }
3632 
btf_hash_common(struct btf_type * t)3633 static long btf_hash_common(struct btf_type *t)
3634 {
3635 	long h;
3636 
3637 	h = hash_combine(0, t->name_off);
3638 	h = hash_combine(h, t->info);
3639 	h = hash_combine(h, t->size);
3640 	return h;
3641 }
3642 
btf_equal_common(struct btf_type * t1,struct btf_type * t2)3643 static bool btf_equal_common(struct btf_type *t1, struct btf_type *t2)
3644 {
3645 	return t1->name_off == t2->name_off &&
3646 	       t1->info == t2->info &&
3647 	       t1->size == t2->size;
3648 }
3649 
3650 /* Calculate type signature hash of INT or TAG. */
btf_hash_int_decl_tag(struct btf_type * t)3651 static long btf_hash_int_decl_tag(struct btf_type *t)
3652 {
3653 	__u32 info = *(__u32 *)(t + 1);
3654 	long h;
3655 
3656 	h = btf_hash_common(t);
3657 	h = hash_combine(h, info);
3658 	return h;
3659 }
3660 
3661 /* Check structural equality of two INTs or TAGs. */
btf_equal_int_tag(struct btf_type * t1,struct btf_type * t2)3662 static bool btf_equal_int_tag(struct btf_type *t1, struct btf_type *t2)
3663 {
3664 	__u32 info1, info2;
3665 
3666 	if (!btf_equal_common(t1, t2))
3667 		return false;
3668 	info1 = *(__u32 *)(t1 + 1);
3669 	info2 = *(__u32 *)(t2 + 1);
3670 	return info1 == info2;
3671 }
3672 
3673 /* Calculate type signature hash of ENUM/ENUM64. */
btf_hash_enum(struct btf_type * t)3674 static long btf_hash_enum(struct btf_type *t)
3675 {
3676 	long h;
3677 
3678 	/* don't hash vlen, enum members and size to support enum fwd resolving */
3679 	h = hash_combine(0, t->name_off);
3680 	return h;
3681 }
3682 
btf_equal_enum_members(struct btf_type * t1,struct btf_type * t2)3683 static bool btf_equal_enum_members(struct btf_type *t1, struct btf_type *t2)
3684 {
3685 	const struct btf_enum *m1, *m2;
3686 	__u16 vlen;
3687 	int i;
3688 
3689 	vlen = btf_vlen(t1);
3690 	m1 = btf_enum(t1);
3691 	m2 = btf_enum(t2);
3692 	for (i = 0; i < vlen; i++) {
3693 		if (m1->name_off != m2->name_off || m1->val != m2->val)
3694 			return false;
3695 		m1++;
3696 		m2++;
3697 	}
3698 	return true;
3699 }
3700 
btf_equal_enum64_members(struct btf_type * t1,struct btf_type * t2)3701 static bool btf_equal_enum64_members(struct btf_type *t1, struct btf_type *t2)
3702 {
3703 	const struct btf_enum64 *m1, *m2;
3704 	__u16 vlen;
3705 	int i;
3706 
3707 	vlen = btf_vlen(t1);
3708 	m1 = btf_enum64(t1);
3709 	m2 = btf_enum64(t2);
3710 	for (i = 0; i < vlen; i++) {
3711 		if (m1->name_off != m2->name_off || m1->val_lo32 != m2->val_lo32 ||
3712 		    m1->val_hi32 != m2->val_hi32)
3713 			return false;
3714 		m1++;
3715 		m2++;
3716 	}
3717 	return true;
3718 }
3719 
3720 /* Check structural equality of two ENUMs or ENUM64s. */
btf_equal_enum(struct btf_type * t1,struct btf_type * t2)3721 static bool btf_equal_enum(struct btf_type *t1, struct btf_type *t2)
3722 {
3723 	if (!btf_equal_common(t1, t2))
3724 		return false;
3725 
3726 	/* t1 & t2 kinds are identical because of btf_equal_common */
3727 	if (btf_kind(t1) == BTF_KIND_ENUM)
3728 		return btf_equal_enum_members(t1, t2);
3729 	else
3730 		return btf_equal_enum64_members(t1, t2);
3731 }
3732 
btf_is_enum_fwd(struct btf_type * t)3733 static inline bool btf_is_enum_fwd(struct btf_type *t)
3734 {
3735 	return btf_is_any_enum(t) && btf_vlen(t) == 0;
3736 }
3737 
btf_compat_enum(struct btf_type * t1,struct btf_type * t2)3738 static bool btf_compat_enum(struct btf_type *t1, struct btf_type *t2)
3739 {
3740 	if (!btf_is_enum_fwd(t1) && !btf_is_enum_fwd(t2))
3741 		return btf_equal_enum(t1, t2);
3742 	/* At this point either t1 or t2 or both are forward declarations, thus:
3743 	 * - skip comparing vlen because it is zero for forward declarations;
3744 	 * - skip comparing size to allow enum forward declarations
3745 	 *   to be compatible with enum64 full declarations;
3746 	 * - skip comparing kind for the same reason.
3747 	 */
3748 	return t1->name_off == t2->name_off &&
3749 	       btf_is_any_enum(t1) && btf_is_any_enum(t2);
3750 }
3751 
3752 /*
3753  * Calculate type signature hash of STRUCT/UNION, ignoring referenced type IDs,
3754  * as referenced type IDs equivalence is established separately during type
3755  * graph equivalence check algorithm.
3756  */
btf_hash_struct(struct btf_type * t)3757 static long btf_hash_struct(struct btf_type *t)
3758 {
3759 	const struct btf_member *member = btf_members(t);
3760 	__u32 vlen = btf_vlen(t);
3761 	long h = btf_hash_common(t);
3762 	int i;
3763 
3764 	for (i = 0; i < vlen; i++) {
3765 		h = hash_combine(h, member->name_off);
3766 		h = hash_combine(h, member->offset);
3767 		/* no hashing of referenced type ID, it can be unresolved yet */
3768 		member++;
3769 	}
3770 	return h;
3771 }
3772 
3773 /*
3774  * Check structural compatibility of two STRUCTs/UNIONs, ignoring referenced
3775  * type IDs. This check is performed during type graph equivalence check and
3776  * referenced types equivalence is checked separately.
3777  */
btf_shallow_equal_struct(struct btf_type * t1,struct btf_type * t2)3778 static bool btf_shallow_equal_struct(struct btf_type *t1, struct btf_type *t2)
3779 {
3780 	const struct btf_member *m1, *m2;
3781 	__u16 vlen;
3782 	int i;
3783 
3784 	if (!btf_equal_common(t1, t2))
3785 		return false;
3786 
3787 	vlen = btf_vlen(t1);
3788 	m1 = btf_members(t1);
3789 	m2 = btf_members(t2);
3790 	for (i = 0; i < vlen; i++) {
3791 		if (m1->name_off != m2->name_off || m1->offset != m2->offset)
3792 			return false;
3793 		m1++;
3794 		m2++;
3795 	}
3796 	return true;
3797 }
3798 
3799 /*
3800  * Calculate type signature hash of ARRAY, including referenced type IDs,
3801  * under assumption that they were already resolved to canonical type IDs and
3802  * are not going to change.
3803  */
btf_hash_array(struct btf_type * t)3804 static long btf_hash_array(struct btf_type *t)
3805 {
3806 	const struct btf_array *info = btf_array(t);
3807 	long h = btf_hash_common(t);
3808 
3809 	h = hash_combine(h, info->type);
3810 	h = hash_combine(h, info->index_type);
3811 	h = hash_combine(h, info->nelems);
3812 	return h;
3813 }
3814 
3815 /*
3816  * Check exact equality of two ARRAYs, taking into account referenced
3817  * type IDs, under assumption that they were already resolved to canonical
3818  * type IDs and are not going to change.
3819  * This function is called during reference types deduplication to compare
3820  * ARRAY to potential canonical representative.
3821  */
btf_equal_array(struct btf_type * t1,struct btf_type * t2)3822 static bool btf_equal_array(struct btf_type *t1, struct btf_type *t2)
3823 {
3824 	const struct btf_array *info1, *info2;
3825 
3826 	if (!btf_equal_common(t1, t2))
3827 		return false;
3828 
3829 	info1 = btf_array(t1);
3830 	info2 = btf_array(t2);
3831 	return info1->type == info2->type &&
3832 	       info1->index_type == info2->index_type &&
3833 	       info1->nelems == info2->nelems;
3834 }
3835 
3836 /*
3837  * Check structural compatibility of two ARRAYs, ignoring referenced type
3838  * IDs. This check is performed during type graph equivalence check and
3839  * referenced types equivalence is checked separately.
3840  */
btf_compat_array(struct btf_type * t1,struct btf_type * t2)3841 static bool btf_compat_array(struct btf_type *t1, struct btf_type *t2)
3842 {
3843 	if (!btf_equal_common(t1, t2))
3844 		return false;
3845 
3846 	return btf_array(t1)->nelems == btf_array(t2)->nelems;
3847 }
3848 
3849 /*
3850  * Calculate type signature hash of FUNC_PROTO, including referenced type IDs,
3851  * under assumption that they were already resolved to canonical type IDs and
3852  * are not going to change.
3853  */
btf_hash_fnproto(struct btf_type * t)3854 static long btf_hash_fnproto(struct btf_type *t)
3855 {
3856 	const struct btf_param *member = btf_params(t);
3857 	__u16 vlen = btf_vlen(t);
3858 	long h = btf_hash_common(t);
3859 	int i;
3860 
3861 	for (i = 0; i < vlen; i++) {
3862 		h = hash_combine(h, member->name_off);
3863 		h = hash_combine(h, member->type);
3864 		member++;
3865 	}
3866 	return h;
3867 }
3868 
3869 /*
3870  * Check exact equality of two FUNC_PROTOs, taking into account referenced
3871  * type IDs, under assumption that they were already resolved to canonical
3872  * type IDs and are not going to change.
3873  * This function is called during reference types deduplication to compare
3874  * FUNC_PROTO to potential canonical representative.
3875  */
btf_equal_fnproto(struct btf_type * t1,struct btf_type * t2)3876 static bool btf_equal_fnproto(struct btf_type *t1, struct btf_type *t2)
3877 {
3878 	const struct btf_param *m1, *m2;
3879 	__u16 vlen;
3880 	int i;
3881 
3882 	if (!btf_equal_common(t1, t2))
3883 		return false;
3884 
3885 	vlen = btf_vlen(t1);
3886 	m1 = btf_params(t1);
3887 	m2 = btf_params(t2);
3888 	for (i = 0; i < vlen; i++) {
3889 		if (m1->name_off != m2->name_off || m1->type != m2->type)
3890 			return false;
3891 		m1++;
3892 		m2++;
3893 	}
3894 	return true;
3895 }
3896 
3897 /*
3898  * Check structural compatibility of two FUNC_PROTOs, ignoring referenced type
3899  * IDs. This check is performed during type graph equivalence check and
3900  * referenced types equivalence is checked separately.
3901  */
btf_compat_fnproto(struct btf_type * t1,struct btf_type * t2)3902 static bool btf_compat_fnproto(struct btf_type *t1, struct btf_type *t2)
3903 {
3904 	const struct btf_param *m1, *m2;
3905 	__u16 vlen;
3906 	int i;
3907 
3908 	/* skip return type ID */
3909 	if (t1->name_off != t2->name_off || t1->info != t2->info)
3910 		return false;
3911 
3912 	vlen = btf_vlen(t1);
3913 	m1 = btf_params(t1);
3914 	m2 = btf_params(t2);
3915 	for (i = 0; i < vlen; i++) {
3916 		if (m1->name_off != m2->name_off)
3917 			return false;
3918 		m1++;
3919 		m2++;
3920 	}
3921 	return true;
3922 }
3923 
3924 /* Prepare split BTF for deduplication by calculating hashes of base BTF's
3925  * types and initializing the rest of the state (canonical type mapping) for
3926  * the fixed base BTF part.
3927  */
btf_dedup_prep(struct btf_dedup * d)3928 static int btf_dedup_prep(struct btf_dedup *d)
3929 {
3930 	struct btf_type *t;
3931 	int type_id;
3932 	long h;
3933 
3934 	if (!d->btf->base_btf)
3935 		return 0;
3936 
3937 	for (type_id = 1; type_id < d->btf->start_id; type_id++) {
3938 		t = btf_type_by_id(d->btf, type_id);
3939 
3940 		/* all base BTF types are self-canonical by definition */
3941 		d->map[type_id] = type_id;
3942 
3943 		switch (btf_kind(t)) {
3944 		case BTF_KIND_VAR:
3945 		case BTF_KIND_DATASEC:
3946 			/* VAR and DATASEC are never hash/deduplicated */
3947 			continue;
3948 		case BTF_KIND_CONST:
3949 		case BTF_KIND_VOLATILE:
3950 		case BTF_KIND_RESTRICT:
3951 		case BTF_KIND_PTR:
3952 		case BTF_KIND_FWD:
3953 		case BTF_KIND_TYPEDEF:
3954 		case BTF_KIND_FUNC:
3955 		case BTF_KIND_FLOAT:
3956 		case BTF_KIND_TYPE_TAG:
3957 			h = btf_hash_common(t);
3958 			break;
3959 		case BTF_KIND_INT:
3960 		case BTF_KIND_DECL_TAG:
3961 			h = btf_hash_int_decl_tag(t);
3962 			break;
3963 		case BTF_KIND_ENUM:
3964 		case BTF_KIND_ENUM64:
3965 			h = btf_hash_enum(t);
3966 			break;
3967 		case BTF_KIND_STRUCT:
3968 		case BTF_KIND_UNION:
3969 			h = btf_hash_struct(t);
3970 			break;
3971 		case BTF_KIND_ARRAY:
3972 			h = btf_hash_array(t);
3973 			break;
3974 		case BTF_KIND_FUNC_PROTO:
3975 			h = btf_hash_fnproto(t);
3976 			break;
3977 		default:
3978 			pr_debug("unknown kind %d for type [%d]\n", btf_kind(t), type_id);
3979 			return -EINVAL;
3980 		}
3981 		if (btf_dedup_table_add(d, h, type_id))
3982 			return -ENOMEM;
3983 	}
3984 
3985 	return 0;
3986 }
3987 
3988 /*
3989  * Deduplicate primitive types, that can't reference other types, by calculating
3990  * their type signature hash and comparing them with any possible canonical
3991  * candidate. If no canonical candidate matches, type itself is marked as
3992  * canonical and is added into `btf_dedup->dedup_table` as another candidate.
3993  */
btf_dedup_prim_type(struct btf_dedup * d,__u32 type_id)3994 static int btf_dedup_prim_type(struct btf_dedup *d, __u32 type_id)
3995 {
3996 	struct btf_type *t = btf_type_by_id(d->btf, type_id);
3997 	struct hashmap_entry *hash_entry;
3998 	struct btf_type *cand;
3999 	/* if we don't find equivalent type, then we are canonical */
4000 	__u32 new_id = type_id;
4001 	__u32 cand_id;
4002 	long h;
4003 
4004 	switch (btf_kind(t)) {
4005 	case BTF_KIND_CONST:
4006 	case BTF_KIND_VOLATILE:
4007 	case BTF_KIND_RESTRICT:
4008 	case BTF_KIND_PTR:
4009 	case BTF_KIND_TYPEDEF:
4010 	case BTF_KIND_ARRAY:
4011 	case BTF_KIND_STRUCT:
4012 	case BTF_KIND_UNION:
4013 	case BTF_KIND_FUNC:
4014 	case BTF_KIND_FUNC_PROTO:
4015 	case BTF_KIND_VAR:
4016 	case BTF_KIND_DATASEC:
4017 	case BTF_KIND_DECL_TAG:
4018 	case BTF_KIND_TYPE_TAG:
4019 		return 0;
4020 
4021 	case BTF_KIND_INT:
4022 		h = btf_hash_int_decl_tag(t);
4023 		for_each_dedup_cand(d, hash_entry, h) {
4024 			cand_id = hash_entry->value;
4025 			cand = btf_type_by_id(d->btf, cand_id);
4026 			if (btf_equal_int_tag(t, cand)) {
4027 				new_id = cand_id;
4028 				break;
4029 			}
4030 		}
4031 		break;
4032 
4033 	case BTF_KIND_ENUM:
4034 	case BTF_KIND_ENUM64:
4035 		h = btf_hash_enum(t);
4036 		for_each_dedup_cand(d, hash_entry, h) {
4037 			cand_id = hash_entry->value;
4038 			cand = btf_type_by_id(d->btf, cand_id);
4039 			if (btf_equal_enum(t, cand)) {
4040 				new_id = cand_id;
4041 				break;
4042 			}
4043 			if (btf_compat_enum(t, cand)) {
4044 				if (btf_is_enum_fwd(t)) {
4045 					/* resolve fwd to full enum */
4046 					new_id = cand_id;
4047 					break;
4048 				}
4049 				/* resolve canonical enum fwd to full enum */
4050 				d->map[cand_id] = type_id;
4051 			}
4052 		}
4053 		break;
4054 
4055 	case BTF_KIND_FWD:
4056 	case BTF_KIND_FLOAT:
4057 		h = btf_hash_common(t);
4058 		for_each_dedup_cand(d, hash_entry, h) {
4059 			cand_id = hash_entry->value;
4060 			cand = btf_type_by_id(d->btf, cand_id);
4061 			if (btf_equal_common(t, cand)) {
4062 				new_id = cand_id;
4063 				break;
4064 			}
4065 		}
4066 		break;
4067 
4068 	default:
4069 		return -EINVAL;
4070 	}
4071 
4072 	d->map[type_id] = new_id;
4073 	if (type_id == new_id && btf_dedup_table_add(d, h, type_id))
4074 		return -ENOMEM;
4075 
4076 	return 0;
4077 }
4078 
btf_dedup_prim_types(struct btf_dedup * d)4079 static int btf_dedup_prim_types(struct btf_dedup *d)
4080 {
4081 	int i, err;
4082 
4083 	for (i = 0; i < d->btf->nr_types; i++) {
4084 		err = btf_dedup_prim_type(d, d->btf->start_id + i);
4085 		if (err)
4086 			return err;
4087 	}
4088 	return 0;
4089 }
4090 
4091 /*
4092  * Check whether type is already mapped into canonical one (could be to itself).
4093  */
is_type_mapped(struct btf_dedup * d,uint32_t type_id)4094 static inline bool is_type_mapped(struct btf_dedup *d, uint32_t type_id)
4095 {
4096 	return d->map[type_id] <= BTF_MAX_NR_TYPES;
4097 }
4098 
4099 /*
4100  * Resolve type ID into its canonical type ID, if any; otherwise return original
4101  * type ID. If type is FWD and is resolved into STRUCT/UNION already, follow
4102  * STRUCT/UNION link and resolve it into canonical type ID as well.
4103  */
resolve_type_id(struct btf_dedup * d,__u32 type_id)4104 static inline __u32 resolve_type_id(struct btf_dedup *d, __u32 type_id)
4105 {
4106 	while (is_type_mapped(d, type_id) && d->map[type_id] != type_id)
4107 		type_id = d->map[type_id];
4108 	return type_id;
4109 }
4110 
4111 /*
4112  * Resolve FWD to underlying STRUCT/UNION, if any; otherwise return original
4113  * type ID.
4114  */
resolve_fwd_id(struct btf_dedup * d,uint32_t type_id)4115 static uint32_t resolve_fwd_id(struct btf_dedup *d, uint32_t type_id)
4116 {
4117 	__u32 orig_type_id = type_id;
4118 
4119 	if (!btf_is_fwd(btf__type_by_id(d->btf, type_id)))
4120 		return type_id;
4121 
4122 	while (is_type_mapped(d, type_id) && d->map[type_id] != type_id)
4123 		type_id = d->map[type_id];
4124 
4125 	if (!btf_is_fwd(btf__type_by_id(d->btf, type_id)))
4126 		return type_id;
4127 
4128 	return orig_type_id;
4129 }
4130 
4131 
btf_fwd_kind(struct btf_type * t)4132 static inline __u16 btf_fwd_kind(struct btf_type *t)
4133 {
4134 	return btf_kflag(t) ? BTF_KIND_UNION : BTF_KIND_STRUCT;
4135 }
4136 
4137 /* Check if given two types are identical ARRAY definitions */
btf_dedup_identical_arrays(struct btf_dedup * d,__u32 id1,__u32 id2)4138 static bool btf_dedup_identical_arrays(struct btf_dedup *d, __u32 id1, __u32 id2)
4139 {
4140 	struct btf_type *t1, *t2;
4141 
4142 	t1 = btf_type_by_id(d->btf, id1);
4143 	t2 = btf_type_by_id(d->btf, id2);
4144 	if (!btf_is_array(t1) || !btf_is_array(t2))
4145 		return false;
4146 
4147 	return btf_equal_array(t1, t2);
4148 }
4149 
4150 /* Check if given two types are identical STRUCT/UNION definitions */
btf_dedup_identical_structs(struct btf_dedup * d,__u32 id1,__u32 id2)4151 static bool btf_dedup_identical_structs(struct btf_dedup *d, __u32 id1, __u32 id2)
4152 {
4153 	const struct btf_member *m1, *m2;
4154 	struct btf_type *t1, *t2;
4155 	int n, i;
4156 
4157 	t1 = btf_type_by_id(d->btf, id1);
4158 	t2 = btf_type_by_id(d->btf, id2);
4159 
4160 	if (!btf_is_composite(t1) || btf_kind(t1) != btf_kind(t2))
4161 		return false;
4162 
4163 	if (!btf_shallow_equal_struct(t1, t2))
4164 		return false;
4165 
4166 	m1 = btf_members(t1);
4167 	m2 = btf_members(t2);
4168 	for (i = 0, n = btf_vlen(t1); i < n; i++, m1++, m2++) {
4169 		if (m1->type != m2->type &&
4170 		    !btf_dedup_identical_arrays(d, m1->type, m2->type) &&
4171 		    !btf_dedup_identical_structs(d, m1->type, m2->type))
4172 			return false;
4173 	}
4174 	return true;
4175 }
4176 
4177 /*
4178  * Check equivalence of BTF type graph formed by candidate struct/union (we'll
4179  * call it "candidate graph" in this description for brevity) to a type graph
4180  * formed by (potential) canonical struct/union ("canonical graph" for brevity
4181  * here, though keep in mind that not all types in canonical graph are
4182  * necessarily canonical representatives themselves, some of them might be
4183  * duplicates or its uniqueness might not have been established yet).
4184  * Returns:
4185  *  - >0, if type graphs are equivalent;
4186  *  -  0, if not equivalent;
4187  *  - <0, on error.
4188  *
4189  * Algorithm performs side-by-side DFS traversal of both type graphs and checks
4190  * equivalence of BTF types at each step. If at any point BTF types in candidate
4191  * and canonical graphs are not compatible structurally, whole graphs are
4192  * incompatible. If types are structurally equivalent (i.e., all information
4193  * except referenced type IDs is exactly the same), a mapping from `canon_id` to
4194  * a `cand_id` is recored in hypothetical mapping (`btf_dedup->hypot_map`).
4195  * If a type references other types, then those referenced types are checked
4196  * for equivalence recursively.
4197  *
4198  * During DFS traversal, if we find that for current `canon_id` type we
4199  * already have some mapping in hypothetical map, we check for two possible
4200  * situations:
4201  *   - `canon_id` is mapped to exactly the same type as `cand_id`. This will
4202  *     happen when type graphs have cycles. In this case we assume those two
4203  *     types are equivalent.
4204  *   - `canon_id` is mapped to different type. This is contradiction in our
4205  *     hypothetical mapping, because same graph in canonical graph corresponds
4206  *     to two different types in candidate graph, which for equivalent type
4207  *     graphs shouldn't happen. This condition terminates equivalence check
4208  *     with negative result.
4209  *
4210  * If type graphs traversal exhausts types to check and find no contradiction,
4211  * then type graphs are equivalent.
4212  *
4213  * When checking types for equivalence, there is one special case: FWD types.
4214  * If FWD type resolution is allowed and one of the types (either from canonical
4215  * or candidate graph) is FWD and other is STRUCT/UNION (depending on FWD's kind
4216  * flag) and their names match, hypothetical mapping is updated to point from
4217  * FWD to STRUCT/UNION. If graphs will be determined as equivalent successfully,
4218  * this mapping will be used to record FWD -> STRUCT/UNION mapping permanently.
4219  *
4220  * Technically, this could lead to incorrect FWD to STRUCT/UNION resolution,
4221  * if there are two exactly named (or anonymous) structs/unions that are
4222  * compatible structurally, one of which has FWD field, while other is concrete
4223  * STRUCT/UNION, but according to C sources they are different structs/unions
4224  * that are referencing different types with the same name. This is extremely
4225  * unlikely to happen, but btf_dedup API allows to disable FWD resolution if
4226  * this logic is causing problems.
4227  *
4228  * Doing FWD resolution means that both candidate and/or canonical graphs can
4229  * consists of portions of the graph that come from multiple compilation units.
4230  * This is due to the fact that types within single compilation unit are always
4231  * deduplicated and FWDs are already resolved, if referenced struct/union
4232  * definiton is available. So, if we had unresolved FWD and found corresponding
4233  * STRUCT/UNION, they will be from different compilation units. This
4234  * consequently means that when we "link" FWD to corresponding STRUCT/UNION,
4235  * type graph will likely have at least two different BTF types that describe
4236  * same type (e.g., most probably there will be two different BTF types for the
4237  * same 'int' primitive type) and could even have "overlapping" parts of type
4238  * graph that describe same subset of types.
4239  *
4240  * This in turn means that our assumption that each type in canonical graph
4241  * must correspond to exactly one type in candidate graph might not hold
4242  * anymore and will make it harder to detect contradictions using hypothetical
4243  * map. To handle this problem, we allow to follow FWD -> STRUCT/UNION
4244  * resolution only in canonical graph. FWDs in candidate graphs are never
4245  * resolved. To see why it's OK, let's check all possible situations w.r.t. FWDs
4246  * that can occur:
4247  *   - Both types in canonical and candidate graphs are FWDs. If they are
4248  *     structurally equivalent, then they can either be both resolved to the
4249  *     same STRUCT/UNION or not resolved at all. In both cases they are
4250  *     equivalent and there is no need to resolve FWD on candidate side.
4251  *   - Both types in canonical and candidate graphs are concrete STRUCT/UNION,
4252  *     so nothing to resolve as well, algorithm will check equivalence anyway.
4253  *   - Type in canonical graph is FWD, while type in candidate is concrete
4254  *     STRUCT/UNION. In this case candidate graph comes from single compilation
4255  *     unit, so there is exactly one BTF type for each unique C type. After
4256  *     resolving FWD into STRUCT/UNION, there might be more than one BTF type
4257  *     in canonical graph mapping to single BTF type in candidate graph, but
4258  *     because hypothetical mapping maps from canonical to candidate types, it's
4259  *     alright, and we still maintain the property of having single `canon_id`
4260  *     mapping to single `cand_id` (there could be two different `canon_id`
4261  *     mapped to the same `cand_id`, but it's not contradictory).
4262  *   - Type in canonical graph is concrete STRUCT/UNION, while type in candidate
4263  *     graph is FWD. In this case we are just going to check compatibility of
4264  *     STRUCT/UNION and corresponding FWD, and if they are compatible, we'll
4265  *     assume that whatever STRUCT/UNION FWD resolves to must be equivalent to
4266  *     a concrete STRUCT/UNION from canonical graph. If the rest of type graphs
4267  *     turn out equivalent, we'll re-resolve FWD to concrete STRUCT/UNION from
4268  *     canonical graph.
4269  */
btf_dedup_is_equiv(struct btf_dedup * d,__u32 cand_id,__u32 canon_id)4270 static int btf_dedup_is_equiv(struct btf_dedup *d, __u32 cand_id,
4271 			      __u32 canon_id)
4272 {
4273 	struct btf_type *cand_type;
4274 	struct btf_type *canon_type;
4275 	__u32 hypot_type_id;
4276 	__u16 cand_kind;
4277 	__u16 canon_kind;
4278 	int i, eq;
4279 
4280 	/* if both resolve to the same canonical, they must be equivalent */
4281 	if (resolve_type_id(d, cand_id) == resolve_type_id(d, canon_id))
4282 		return 1;
4283 
4284 	canon_id = resolve_fwd_id(d, canon_id);
4285 
4286 	hypot_type_id = d->hypot_map[canon_id];
4287 	if (hypot_type_id <= BTF_MAX_NR_TYPES) {
4288 		if (hypot_type_id == cand_id)
4289 			return 1;
4290 		/* In some cases compiler will generate different DWARF types
4291 		 * for *identical* array type definitions and use them for
4292 		 * different fields within the *same* struct. This breaks type
4293 		 * equivalence check, which makes an assumption that candidate
4294 		 * types sub-graph has a consistent and deduped-by-compiler
4295 		 * types within a single CU. So work around that by explicitly
4296 		 * allowing identical array types here.
4297 		 */
4298 		if (btf_dedup_identical_arrays(d, hypot_type_id, cand_id))
4299 			return 1;
4300 		/* It turns out that similar situation can happen with
4301 		 * struct/union sometimes, sigh... Handle the case where
4302 		 * structs/unions are exactly the same, down to the referenced
4303 		 * type IDs. Anything more complicated (e.g., if referenced
4304 		 * types are different, but equivalent) is *way more*
4305 		 * complicated and requires a many-to-many equivalence mapping.
4306 		 */
4307 		if (btf_dedup_identical_structs(d, hypot_type_id, cand_id))
4308 			return 1;
4309 		return 0;
4310 	}
4311 
4312 	if (btf_dedup_hypot_map_add(d, canon_id, cand_id))
4313 		return -ENOMEM;
4314 
4315 	cand_type = btf_type_by_id(d->btf, cand_id);
4316 	canon_type = btf_type_by_id(d->btf, canon_id);
4317 	cand_kind = btf_kind(cand_type);
4318 	canon_kind = btf_kind(canon_type);
4319 
4320 	if (cand_type->name_off != canon_type->name_off)
4321 		return 0;
4322 
4323 	/* FWD <--> STRUCT/UNION equivalence check, if enabled */
4324 	if ((cand_kind == BTF_KIND_FWD || canon_kind == BTF_KIND_FWD)
4325 	    && cand_kind != canon_kind) {
4326 		__u16 real_kind;
4327 		__u16 fwd_kind;
4328 
4329 		if (cand_kind == BTF_KIND_FWD) {
4330 			real_kind = canon_kind;
4331 			fwd_kind = btf_fwd_kind(cand_type);
4332 		} else {
4333 			real_kind = cand_kind;
4334 			fwd_kind = btf_fwd_kind(canon_type);
4335 			/* we'd need to resolve base FWD to STRUCT/UNION */
4336 			if (fwd_kind == real_kind && canon_id < d->btf->start_id)
4337 				d->hypot_adjust_canon = true;
4338 		}
4339 		return fwd_kind == real_kind;
4340 	}
4341 
4342 	if (cand_kind != canon_kind)
4343 		return 0;
4344 
4345 	switch (cand_kind) {
4346 	case BTF_KIND_INT:
4347 		return btf_equal_int_tag(cand_type, canon_type);
4348 
4349 	case BTF_KIND_ENUM:
4350 	case BTF_KIND_ENUM64:
4351 		return btf_compat_enum(cand_type, canon_type);
4352 
4353 	case BTF_KIND_FWD:
4354 	case BTF_KIND_FLOAT:
4355 		return btf_equal_common(cand_type, canon_type);
4356 
4357 	case BTF_KIND_CONST:
4358 	case BTF_KIND_VOLATILE:
4359 	case BTF_KIND_RESTRICT:
4360 	case BTF_KIND_PTR:
4361 	case BTF_KIND_TYPEDEF:
4362 	case BTF_KIND_FUNC:
4363 	case BTF_KIND_TYPE_TAG:
4364 		if (cand_type->info != canon_type->info)
4365 			return 0;
4366 		return btf_dedup_is_equiv(d, cand_type->type, canon_type->type);
4367 
4368 	case BTF_KIND_ARRAY: {
4369 		const struct btf_array *cand_arr, *canon_arr;
4370 
4371 		if (!btf_compat_array(cand_type, canon_type))
4372 			return 0;
4373 		cand_arr = btf_array(cand_type);
4374 		canon_arr = btf_array(canon_type);
4375 		eq = btf_dedup_is_equiv(d, cand_arr->index_type, canon_arr->index_type);
4376 		if (eq <= 0)
4377 			return eq;
4378 		return btf_dedup_is_equiv(d, cand_arr->type, canon_arr->type);
4379 	}
4380 
4381 	case BTF_KIND_STRUCT:
4382 	case BTF_KIND_UNION: {
4383 		const struct btf_member *cand_m, *canon_m;
4384 		__u16 vlen;
4385 
4386 		if (!btf_shallow_equal_struct(cand_type, canon_type))
4387 			return 0;
4388 		vlen = btf_vlen(cand_type);
4389 		cand_m = btf_members(cand_type);
4390 		canon_m = btf_members(canon_type);
4391 		for (i = 0; i < vlen; i++) {
4392 			eq = btf_dedup_is_equiv(d, cand_m->type, canon_m->type);
4393 			if (eq <= 0)
4394 				return eq;
4395 			cand_m++;
4396 			canon_m++;
4397 		}
4398 
4399 		return 1;
4400 	}
4401 
4402 	case BTF_KIND_FUNC_PROTO: {
4403 		const struct btf_param *cand_p, *canon_p;
4404 		__u16 vlen;
4405 
4406 		if (!btf_compat_fnproto(cand_type, canon_type))
4407 			return 0;
4408 		eq = btf_dedup_is_equiv(d, cand_type->type, canon_type->type);
4409 		if (eq <= 0)
4410 			return eq;
4411 		vlen = btf_vlen(cand_type);
4412 		cand_p = btf_params(cand_type);
4413 		canon_p = btf_params(canon_type);
4414 		for (i = 0; i < vlen; i++) {
4415 			eq = btf_dedup_is_equiv(d, cand_p->type, canon_p->type);
4416 			if (eq <= 0)
4417 				return eq;
4418 			cand_p++;
4419 			canon_p++;
4420 		}
4421 		return 1;
4422 	}
4423 
4424 	default:
4425 		return -EINVAL;
4426 	}
4427 	return 0;
4428 }
4429 
4430 /*
4431  * Use hypothetical mapping, produced by successful type graph equivalence
4432  * check, to augment existing struct/union canonical mapping, where possible.
4433  *
4434  * If BTF_KIND_FWD resolution is allowed, this mapping is also used to record
4435  * FWD -> STRUCT/UNION correspondence as well. FWD resolution is bidirectional:
4436  * it doesn't matter if FWD type was part of canonical graph or candidate one,
4437  * we are recording the mapping anyway. As opposed to carefulness required
4438  * for struct/union correspondence mapping (described below), for FWD resolution
4439  * it's not important, as by the time that FWD type (reference type) will be
4440  * deduplicated all structs/unions will be deduped already anyway.
4441  *
4442  * Recording STRUCT/UNION mapping is purely a performance optimization and is
4443  * not required for correctness. It needs to be done carefully to ensure that
4444  * struct/union from candidate's type graph is not mapped into corresponding
4445  * struct/union from canonical type graph that itself hasn't been resolved into
4446  * canonical representative. The only guarantee we have is that canonical
4447  * struct/union was determined as canonical and that won't change. But any
4448  * types referenced through that struct/union fields could have been not yet
4449  * resolved, so in case like that it's too early to establish any kind of
4450  * correspondence between structs/unions.
4451  *
4452  * No canonical correspondence is derived for primitive types (they are already
4453  * deduplicated completely already anyway) or reference types (they rely on
4454  * stability of struct/union canonical relationship for equivalence checks).
4455  */
btf_dedup_merge_hypot_map(struct btf_dedup * d)4456 static void btf_dedup_merge_hypot_map(struct btf_dedup *d)
4457 {
4458 	__u32 canon_type_id, targ_type_id;
4459 	__u16 t_kind, c_kind;
4460 	__u32 t_id, c_id;
4461 	int i;
4462 
4463 	for (i = 0; i < d->hypot_cnt; i++) {
4464 		canon_type_id = d->hypot_list[i];
4465 		targ_type_id = d->hypot_map[canon_type_id];
4466 		t_id = resolve_type_id(d, targ_type_id);
4467 		c_id = resolve_type_id(d, canon_type_id);
4468 		t_kind = btf_kind(btf__type_by_id(d->btf, t_id));
4469 		c_kind = btf_kind(btf__type_by_id(d->btf, c_id));
4470 		/*
4471 		 * Resolve FWD into STRUCT/UNION.
4472 		 * It's ok to resolve FWD into STRUCT/UNION that's not yet
4473 		 * mapped to canonical representative (as opposed to
4474 		 * STRUCT/UNION <--> STRUCT/UNION mapping logic below), because
4475 		 * eventually that struct is going to be mapped and all resolved
4476 		 * FWDs will automatically resolve to correct canonical
4477 		 * representative. This will happen before ref type deduping,
4478 		 * which critically depends on stability of these mapping. This
4479 		 * stability is not a requirement for STRUCT/UNION equivalence
4480 		 * checks, though.
4481 		 */
4482 
4483 		/* if it's the split BTF case, we still need to point base FWD
4484 		 * to STRUCT/UNION in a split BTF, because FWDs from split BTF
4485 		 * will be resolved against base FWD. If we don't point base
4486 		 * canonical FWD to the resolved STRUCT/UNION, then all the
4487 		 * FWDs in split BTF won't be correctly resolved to a proper
4488 		 * STRUCT/UNION.
4489 		 */
4490 		if (t_kind != BTF_KIND_FWD && c_kind == BTF_KIND_FWD)
4491 			d->map[c_id] = t_id;
4492 
4493 		/* if graph equivalence determined that we'd need to adjust
4494 		 * base canonical types, then we need to only point base FWDs
4495 		 * to STRUCTs/UNIONs and do no more modifications. For all
4496 		 * other purposes the type graphs were not equivalent.
4497 		 */
4498 		if (d->hypot_adjust_canon)
4499 			continue;
4500 
4501 		if (t_kind == BTF_KIND_FWD && c_kind != BTF_KIND_FWD)
4502 			d->map[t_id] = c_id;
4503 
4504 		if ((t_kind == BTF_KIND_STRUCT || t_kind == BTF_KIND_UNION) &&
4505 		    c_kind != BTF_KIND_FWD &&
4506 		    is_type_mapped(d, c_id) &&
4507 		    !is_type_mapped(d, t_id)) {
4508 			/*
4509 			 * as a perf optimization, we can map struct/union
4510 			 * that's part of type graph we just verified for
4511 			 * equivalence. We can do that for struct/union that has
4512 			 * canonical representative only, though.
4513 			 */
4514 			d->map[t_id] = c_id;
4515 		}
4516 	}
4517 }
4518 
4519 /*
4520  * Deduplicate struct/union types.
4521  *
4522  * For each struct/union type its type signature hash is calculated, taking
4523  * into account type's name, size, number, order and names of fields, but
4524  * ignoring type ID's referenced from fields, because they might not be deduped
4525  * completely until after reference types deduplication phase. This type hash
4526  * is used to iterate over all potential canonical types, sharing same hash.
4527  * For each canonical candidate we check whether type graphs that they form
4528  * (through referenced types in fields and so on) are equivalent using algorithm
4529  * implemented in `btf_dedup_is_equiv`. If such equivalence is found and
4530  * BTF_KIND_FWD resolution is allowed, then hypothetical mapping
4531  * (btf_dedup->hypot_map) produced by aforementioned type graph equivalence
4532  * algorithm is used to record FWD -> STRUCT/UNION mapping. It's also used to
4533  * potentially map other structs/unions to their canonical representatives,
4534  * if such relationship hasn't yet been established. This speeds up algorithm
4535  * by eliminating some of the duplicate work.
4536  *
4537  * If no matching canonical representative was found, struct/union is marked
4538  * as canonical for itself and is added into btf_dedup->dedup_table hash map
4539  * for further look ups.
4540  */
btf_dedup_struct_type(struct btf_dedup * d,__u32 type_id)4541 static int btf_dedup_struct_type(struct btf_dedup *d, __u32 type_id)
4542 {
4543 	struct btf_type *cand_type, *t;
4544 	struct hashmap_entry *hash_entry;
4545 	/* if we don't find equivalent type, then we are canonical */
4546 	__u32 new_id = type_id;
4547 	__u16 kind;
4548 	long h;
4549 
4550 	/* already deduped or is in process of deduping (loop detected) */
4551 	if (d->map[type_id] <= BTF_MAX_NR_TYPES)
4552 		return 0;
4553 
4554 	t = btf_type_by_id(d->btf, type_id);
4555 	kind = btf_kind(t);
4556 
4557 	if (kind != BTF_KIND_STRUCT && kind != BTF_KIND_UNION)
4558 		return 0;
4559 
4560 	h = btf_hash_struct(t);
4561 	for_each_dedup_cand(d, hash_entry, h) {
4562 		__u32 cand_id = hash_entry->value;
4563 		int eq;
4564 
4565 		/*
4566 		 * Even though btf_dedup_is_equiv() checks for
4567 		 * btf_shallow_equal_struct() internally when checking two
4568 		 * structs (unions) for equivalence, we need to guard here
4569 		 * from picking matching FWD type as a dedup candidate.
4570 		 * This can happen due to hash collision. In such case just
4571 		 * relying on btf_dedup_is_equiv() would lead to potentially
4572 		 * creating a loop (FWD -> STRUCT and STRUCT -> FWD), because
4573 		 * FWD and compatible STRUCT/UNION are considered equivalent.
4574 		 */
4575 		cand_type = btf_type_by_id(d->btf, cand_id);
4576 		if (!btf_shallow_equal_struct(t, cand_type))
4577 			continue;
4578 
4579 		btf_dedup_clear_hypot_map(d);
4580 		eq = btf_dedup_is_equiv(d, type_id, cand_id);
4581 		if (eq < 0)
4582 			return eq;
4583 		if (!eq)
4584 			continue;
4585 		btf_dedup_merge_hypot_map(d);
4586 		if (d->hypot_adjust_canon) /* not really equivalent */
4587 			continue;
4588 		new_id = cand_id;
4589 		break;
4590 	}
4591 
4592 	d->map[type_id] = new_id;
4593 	if (type_id == new_id && btf_dedup_table_add(d, h, type_id))
4594 		return -ENOMEM;
4595 
4596 	return 0;
4597 }
4598 
btf_dedup_struct_types(struct btf_dedup * d)4599 static int btf_dedup_struct_types(struct btf_dedup *d)
4600 {
4601 	int i, err;
4602 
4603 	for (i = 0; i < d->btf->nr_types; i++) {
4604 		err = btf_dedup_struct_type(d, d->btf->start_id + i);
4605 		if (err)
4606 			return err;
4607 	}
4608 	return 0;
4609 }
4610 
4611 /*
4612  * Deduplicate reference type.
4613  *
4614  * Once all primitive and struct/union types got deduplicated, we can easily
4615  * deduplicate all other (reference) BTF types. This is done in two steps:
4616  *
4617  * 1. Resolve all referenced type IDs into their canonical type IDs. This
4618  * resolution can be done either immediately for primitive or struct/union types
4619  * (because they were deduped in previous two phases) or recursively for
4620  * reference types. Recursion will always terminate at either primitive or
4621  * struct/union type, at which point we can "unwind" chain of reference types
4622  * one by one. There is no danger of encountering cycles because in C type
4623  * system the only way to form type cycle is through struct/union, so any chain
4624  * of reference types, even those taking part in a type cycle, will inevitably
4625  * reach struct/union at some point.
4626  *
4627  * 2. Once all referenced type IDs are resolved into canonical ones, BTF type
4628  * becomes "stable", in the sense that no further deduplication will cause
4629  * any changes to it. With that, it's now possible to calculate type's signature
4630  * hash (this time taking into account referenced type IDs) and loop over all
4631  * potential canonical representatives. If no match was found, current type
4632  * will become canonical representative of itself and will be added into
4633  * btf_dedup->dedup_table as another possible canonical representative.
4634  */
btf_dedup_ref_type(struct btf_dedup * d,__u32 type_id)4635 static int btf_dedup_ref_type(struct btf_dedup *d, __u32 type_id)
4636 {
4637 	struct hashmap_entry *hash_entry;
4638 	__u32 new_id = type_id, cand_id;
4639 	struct btf_type *t, *cand;
4640 	/* if we don't find equivalent type, then we are representative type */
4641 	int ref_type_id;
4642 	long h;
4643 
4644 	if (d->map[type_id] == BTF_IN_PROGRESS_ID)
4645 		return -ELOOP;
4646 	if (d->map[type_id] <= BTF_MAX_NR_TYPES)
4647 		return resolve_type_id(d, type_id);
4648 
4649 	t = btf_type_by_id(d->btf, type_id);
4650 	d->map[type_id] = BTF_IN_PROGRESS_ID;
4651 
4652 	switch (btf_kind(t)) {
4653 	case BTF_KIND_CONST:
4654 	case BTF_KIND_VOLATILE:
4655 	case BTF_KIND_RESTRICT:
4656 	case BTF_KIND_PTR:
4657 	case BTF_KIND_TYPEDEF:
4658 	case BTF_KIND_FUNC:
4659 	case BTF_KIND_TYPE_TAG:
4660 		ref_type_id = btf_dedup_ref_type(d, t->type);
4661 		if (ref_type_id < 0)
4662 			return ref_type_id;
4663 		t->type = ref_type_id;
4664 
4665 		h = btf_hash_common(t);
4666 		for_each_dedup_cand(d, hash_entry, h) {
4667 			cand_id = hash_entry->value;
4668 			cand = btf_type_by_id(d->btf, cand_id);
4669 			if (btf_equal_common(t, cand)) {
4670 				new_id = cand_id;
4671 				break;
4672 			}
4673 		}
4674 		break;
4675 
4676 	case BTF_KIND_DECL_TAG:
4677 		ref_type_id = btf_dedup_ref_type(d, t->type);
4678 		if (ref_type_id < 0)
4679 			return ref_type_id;
4680 		t->type = ref_type_id;
4681 
4682 		h = btf_hash_int_decl_tag(t);
4683 		for_each_dedup_cand(d, hash_entry, h) {
4684 			cand_id = hash_entry->value;
4685 			cand = btf_type_by_id(d->btf, cand_id);
4686 			if (btf_equal_int_tag(t, cand)) {
4687 				new_id = cand_id;
4688 				break;
4689 			}
4690 		}
4691 		break;
4692 
4693 	case BTF_KIND_ARRAY: {
4694 		struct btf_array *info = btf_array(t);
4695 
4696 		ref_type_id = btf_dedup_ref_type(d, info->type);
4697 		if (ref_type_id < 0)
4698 			return ref_type_id;
4699 		info->type = ref_type_id;
4700 
4701 		ref_type_id = btf_dedup_ref_type(d, info->index_type);
4702 		if (ref_type_id < 0)
4703 			return ref_type_id;
4704 		info->index_type = ref_type_id;
4705 
4706 		h = btf_hash_array(t);
4707 		for_each_dedup_cand(d, hash_entry, h) {
4708 			cand_id = hash_entry->value;
4709 			cand = btf_type_by_id(d->btf, cand_id);
4710 			if (btf_equal_array(t, cand)) {
4711 				new_id = cand_id;
4712 				break;
4713 			}
4714 		}
4715 		break;
4716 	}
4717 
4718 	case BTF_KIND_FUNC_PROTO: {
4719 		struct btf_param *param;
4720 		__u16 vlen;
4721 		int i;
4722 
4723 		ref_type_id = btf_dedup_ref_type(d, t->type);
4724 		if (ref_type_id < 0)
4725 			return ref_type_id;
4726 		t->type = ref_type_id;
4727 
4728 		vlen = btf_vlen(t);
4729 		param = btf_params(t);
4730 		for (i = 0; i < vlen; i++) {
4731 			ref_type_id = btf_dedup_ref_type(d, param->type);
4732 			if (ref_type_id < 0)
4733 				return ref_type_id;
4734 			param->type = ref_type_id;
4735 			param++;
4736 		}
4737 
4738 		h = btf_hash_fnproto(t);
4739 		for_each_dedup_cand(d, hash_entry, h) {
4740 			cand_id = hash_entry->value;
4741 			cand = btf_type_by_id(d->btf, cand_id);
4742 			if (btf_equal_fnproto(t, cand)) {
4743 				new_id = cand_id;
4744 				break;
4745 			}
4746 		}
4747 		break;
4748 	}
4749 
4750 	default:
4751 		return -EINVAL;
4752 	}
4753 
4754 	d->map[type_id] = new_id;
4755 	if (type_id == new_id && btf_dedup_table_add(d, h, type_id))
4756 		return -ENOMEM;
4757 
4758 	return new_id;
4759 }
4760 
btf_dedup_ref_types(struct btf_dedup * d)4761 static int btf_dedup_ref_types(struct btf_dedup *d)
4762 {
4763 	int i, err;
4764 
4765 	for (i = 0; i < d->btf->nr_types; i++) {
4766 		err = btf_dedup_ref_type(d, d->btf->start_id + i);
4767 		if (err < 0)
4768 			return err;
4769 	}
4770 	/* we won't need d->dedup_table anymore */
4771 	hashmap__free(d->dedup_table);
4772 	d->dedup_table = NULL;
4773 	return 0;
4774 }
4775 
4776 /*
4777  * Collect a map from type names to type ids for all canonical structs
4778  * and unions. If the same name is shared by several canonical types
4779  * use a special value 0 to indicate this fact.
4780  */
btf_dedup_fill_unique_names_map(struct btf_dedup * d,struct hashmap * names_map)4781 static int btf_dedup_fill_unique_names_map(struct btf_dedup *d, struct hashmap *names_map)
4782 {
4783 	__u32 nr_types = btf__type_cnt(d->btf);
4784 	struct btf_type *t;
4785 	__u32 type_id;
4786 	__u16 kind;
4787 	int err;
4788 
4789 	/*
4790 	 * Iterate over base and split module ids in order to get all
4791 	 * available structs in the map.
4792 	 */
4793 	for (type_id = 1; type_id < nr_types; ++type_id) {
4794 		t = btf_type_by_id(d->btf, type_id);
4795 		kind = btf_kind(t);
4796 
4797 		if (kind != BTF_KIND_STRUCT && kind != BTF_KIND_UNION)
4798 			continue;
4799 
4800 		/* Skip non-canonical types */
4801 		if (type_id != d->map[type_id])
4802 			continue;
4803 
4804 		err = hashmap__add(names_map, t->name_off, type_id);
4805 		if (err == -EEXIST)
4806 			err = hashmap__set(names_map, t->name_off, 0, NULL, NULL);
4807 
4808 		if (err)
4809 			return err;
4810 	}
4811 
4812 	return 0;
4813 }
4814 
btf_dedup_resolve_fwd(struct btf_dedup * d,struct hashmap * names_map,__u32 type_id)4815 static int btf_dedup_resolve_fwd(struct btf_dedup *d, struct hashmap *names_map, __u32 type_id)
4816 {
4817 	struct btf_type *t = btf_type_by_id(d->btf, type_id);
4818 	enum btf_fwd_kind fwd_kind = btf_kflag(t);
4819 	__u16 cand_kind, kind = btf_kind(t);
4820 	struct btf_type *cand_t;
4821 	uintptr_t cand_id;
4822 
4823 	if (kind != BTF_KIND_FWD)
4824 		return 0;
4825 
4826 	/* Skip if this FWD already has a mapping */
4827 	if (type_id != d->map[type_id])
4828 		return 0;
4829 
4830 	if (!hashmap__find(names_map, t->name_off, &cand_id))
4831 		return 0;
4832 
4833 	/* Zero is a special value indicating that name is not unique */
4834 	if (!cand_id)
4835 		return 0;
4836 
4837 	cand_t = btf_type_by_id(d->btf, cand_id);
4838 	cand_kind = btf_kind(cand_t);
4839 	if ((cand_kind == BTF_KIND_STRUCT && fwd_kind != BTF_FWD_STRUCT) ||
4840 	    (cand_kind == BTF_KIND_UNION && fwd_kind != BTF_FWD_UNION))
4841 		return 0;
4842 
4843 	d->map[type_id] = cand_id;
4844 
4845 	return 0;
4846 }
4847 
4848 /*
4849  * Resolve unambiguous forward declarations.
4850  *
4851  * The lion's share of all FWD declarations is resolved during
4852  * `btf_dedup_struct_types` phase when different type graphs are
4853  * compared against each other. However, if in some compilation unit a
4854  * FWD declaration is not a part of a type graph compared against
4855  * another type graph that declaration's canonical type would not be
4856  * changed. Example:
4857  *
4858  * CU #1:
4859  *
4860  * struct foo;
4861  * struct foo *some_global;
4862  *
4863  * CU #2:
4864  *
4865  * struct foo { int u; };
4866  * struct foo *another_global;
4867  *
4868  * After `btf_dedup_struct_types` the BTF looks as follows:
4869  *
4870  * [1] STRUCT 'foo' size=4 vlen=1 ...
4871  * [2] INT 'int' size=4 ...
4872  * [3] PTR '(anon)' type_id=1
4873  * [4] FWD 'foo' fwd_kind=struct
4874  * [5] PTR '(anon)' type_id=4
4875  *
4876  * This pass assumes that such FWD declarations should be mapped to
4877  * structs or unions with identical name in case if the name is not
4878  * ambiguous.
4879  */
btf_dedup_resolve_fwds(struct btf_dedup * d)4880 static int btf_dedup_resolve_fwds(struct btf_dedup *d)
4881 {
4882 	int i, err;
4883 	struct hashmap *names_map;
4884 
4885 	names_map = hashmap__new(btf_dedup_identity_hash_fn, btf_dedup_equal_fn, NULL);
4886 	if (IS_ERR(names_map))
4887 		return PTR_ERR(names_map);
4888 
4889 	err = btf_dedup_fill_unique_names_map(d, names_map);
4890 	if (err < 0)
4891 		goto exit;
4892 
4893 	for (i = 0; i < d->btf->nr_types; i++) {
4894 		err = btf_dedup_resolve_fwd(d, names_map, d->btf->start_id + i);
4895 		if (err < 0)
4896 			break;
4897 	}
4898 
4899 exit:
4900 	hashmap__free(names_map);
4901 	return err;
4902 }
4903 
4904 /*
4905  * Compact types.
4906  *
4907  * After we established for each type its corresponding canonical representative
4908  * type, we now can eliminate types that are not canonical and leave only
4909  * canonical ones layed out sequentially in memory by copying them over
4910  * duplicates. During compaction btf_dedup->hypot_map array is reused to store
4911  * a map from original type ID to a new compacted type ID, which will be used
4912  * during next phase to "fix up" type IDs, referenced from struct/union and
4913  * reference types.
4914  */
btf_dedup_compact_types(struct btf_dedup * d)4915 static int btf_dedup_compact_types(struct btf_dedup *d)
4916 {
4917 	__u32 *new_offs;
4918 	__u32 next_type_id = d->btf->start_id;
4919 	const struct btf_type *t;
4920 	void *p;
4921 	int i, id, len;
4922 
4923 	/* we are going to reuse hypot_map to store compaction remapping */
4924 	d->hypot_map[0] = 0;
4925 	/* base BTF types are not renumbered */
4926 	for (id = 1; id < d->btf->start_id; id++)
4927 		d->hypot_map[id] = id;
4928 	for (i = 0, id = d->btf->start_id; i < d->btf->nr_types; i++, id++)
4929 		d->hypot_map[id] = BTF_UNPROCESSED_ID;
4930 
4931 	p = d->btf->types_data;
4932 
4933 	for (i = 0, id = d->btf->start_id; i < d->btf->nr_types; i++, id++) {
4934 		if (d->map[id] != id)
4935 			continue;
4936 
4937 		t = btf__type_by_id(d->btf, id);
4938 		len = btf_type_size(t);
4939 		if (len < 0)
4940 			return len;
4941 
4942 		memmove(p, t, len);
4943 		d->hypot_map[id] = next_type_id;
4944 		d->btf->type_offs[next_type_id - d->btf->start_id] = p - d->btf->types_data;
4945 		p += len;
4946 		next_type_id++;
4947 	}
4948 
4949 	/* shrink struct btf's internal types index and update btf_header */
4950 	d->btf->nr_types = next_type_id - d->btf->start_id;
4951 	d->btf->type_offs_cap = d->btf->nr_types;
4952 	d->btf->hdr->type_len = p - d->btf->types_data;
4953 	new_offs = libbpf_reallocarray(d->btf->type_offs, d->btf->type_offs_cap,
4954 				       sizeof(*new_offs));
4955 	if (d->btf->type_offs_cap && !new_offs)
4956 		return -ENOMEM;
4957 	d->btf->type_offs = new_offs;
4958 	d->btf->hdr->str_off = d->btf->hdr->type_len;
4959 	d->btf->raw_size = d->btf->hdr->hdr_len + d->btf->hdr->type_len + d->btf->hdr->str_len;
4960 	return 0;
4961 }
4962 
4963 /*
4964  * Figure out final (deduplicated and compacted) type ID for provided original
4965  * `type_id` by first resolving it into corresponding canonical type ID and
4966  * then mapping it to a deduplicated type ID, stored in btf_dedup->hypot_map,
4967  * which is populated during compaction phase.
4968  */
btf_dedup_remap_type_id(__u32 * type_id,void * ctx)4969 static int btf_dedup_remap_type_id(__u32 *type_id, void *ctx)
4970 {
4971 	struct btf_dedup *d = ctx;
4972 	__u32 resolved_type_id, new_type_id;
4973 
4974 	resolved_type_id = resolve_type_id(d, *type_id);
4975 	new_type_id = d->hypot_map[resolved_type_id];
4976 	if (new_type_id > BTF_MAX_NR_TYPES)
4977 		return -EINVAL;
4978 
4979 	*type_id = new_type_id;
4980 	return 0;
4981 }
4982 
4983 /*
4984  * Remap referenced type IDs into deduped type IDs.
4985  *
4986  * After BTF types are deduplicated and compacted, their final type IDs may
4987  * differ from original ones. The map from original to a corresponding
4988  * deduped type ID is stored in btf_dedup->hypot_map and is populated during
4989  * compaction phase. During remapping phase we are rewriting all type IDs
4990  * referenced from any BTF type (e.g., struct fields, func proto args, etc) to
4991  * their final deduped type IDs.
4992  */
btf_dedup_remap_types(struct btf_dedup * d)4993 static int btf_dedup_remap_types(struct btf_dedup *d)
4994 {
4995 	int i, r;
4996 
4997 	for (i = 0; i < d->btf->nr_types; i++) {
4998 		struct btf_type *t = btf_type_by_id(d->btf, d->btf->start_id + i);
4999 		struct btf_field_iter it;
5000 		__u32 *type_id;
5001 
5002 		r = btf_field_iter_init(&it, t, BTF_FIELD_ITER_IDS);
5003 		if (r)
5004 			return r;
5005 
5006 		while ((type_id = btf_field_iter_next(&it))) {
5007 			__u32 resolved_id, new_id;
5008 
5009 			resolved_id = resolve_type_id(d, *type_id);
5010 			new_id = d->hypot_map[resolved_id];
5011 			if (new_id > BTF_MAX_NR_TYPES)
5012 				return -EINVAL;
5013 
5014 			*type_id = new_id;
5015 		}
5016 	}
5017 
5018 	if (!d->btf_ext)
5019 		return 0;
5020 
5021 	r = btf_ext_visit_type_ids(d->btf_ext, btf_dedup_remap_type_id, d);
5022 	if (r)
5023 		return r;
5024 
5025 	return 0;
5026 }
5027 
5028 /*
5029  * Probe few well-known locations for vmlinux kernel image and try to load BTF
5030  * data out of it to use for target BTF.
5031  */
btf__load_vmlinux_btf(void)5032 struct btf *btf__load_vmlinux_btf(void)
5033 {
5034 	const char *sysfs_btf_path = "/sys/kernel/btf/vmlinux";
5035 	/* fall back locations, trying to find vmlinux on disk */
5036 	const char *locations[] = {
5037 		"/boot/vmlinux-%1$s",
5038 		"/lib/modules/%1$s/vmlinux-%1$s",
5039 		"/lib/modules/%1$s/build/vmlinux",
5040 		"/usr/lib/modules/%1$s/kernel/vmlinux",
5041 		"/usr/lib/debug/boot/vmlinux-%1$s",
5042 		"/usr/lib/debug/boot/vmlinux-%1$s.debug",
5043 		"/usr/lib/debug/lib/modules/%1$s/vmlinux",
5044 	};
5045 	char path[PATH_MAX + 1];
5046 	struct utsname buf;
5047 	struct btf *btf;
5048 	int i, err;
5049 
5050 	/* is canonical sysfs location accessible? */
5051 	if (faccessat(AT_FDCWD, sysfs_btf_path, F_OK, AT_EACCESS) < 0) {
5052 		pr_warn("kernel BTF is missing at '%s', was CONFIG_DEBUG_INFO_BTF enabled?\n",
5053 			sysfs_btf_path);
5054 	} else {
5055 		btf = btf__parse(sysfs_btf_path, NULL);
5056 		if (!btf) {
5057 			err = -errno;
5058 			pr_warn("failed to read kernel BTF from '%s': %d\n", sysfs_btf_path, err);
5059 			return libbpf_err_ptr(err);
5060 		}
5061 		pr_debug("loaded kernel BTF from '%s'\n", sysfs_btf_path);
5062 		return btf;
5063 	}
5064 
5065 	/* try fallback locations */
5066 	uname(&buf);
5067 	for (i = 0; i < ARRAY_SIZE(locations); i++) {
5068 		snprintf(path, PATH_MAX, locations[i], buf.release);
5069 
5070 		if (faccessat(AT_FDCWD, path, R_OK, AT_EACCESS))
5071 			continue;
5072 
5073 		btf = btf__parse(path, NULL);
5074 		err = libbpf_get_error(btf);
5075 		pr_debug("loading kernel BTF '%s': %d\n", path, err);
5076 		if (err)
5077 			continue;
5078 
5079 		return btf;
5080 	}
5081 
5082 	pr_warn("failed to find valid kernel BTF\n");
5083 	return libbpf_err_ptr(-ESRCH);
5084 }
5085 
5086 struct btf *libbpf_find_kernel_btf(void) __attribute__((alias("btf__load_vmlinux_btf")));
5087 
btf__load_module_btf(const char * module_name,struct btf * vmlinux_btf)5088 struct btf *btf__load_module_btf(const char *module_name, struct btf *vmlinux_btf)
5089 {
5090 	char path[80];
5091 
5092 	snprintf(path, sizeof(path), "/sys/kernel/btf/%s", module_name);
5093 	return btf__parse_split(path, vmlinux_btf);
5094 }
5095 
btf_ext_visit_type_ids(struct btf_ext * btf_ext,type_id_visit_fn visit,void * ctx)5096 int btf_ext_visit_type_ids(struct btf_ext *btf_ext, type_id_visit_fn visit, void *ctx)
5097 {
5098 	const struct btf_ext_info *seg;
5099 	struct btf_ext_info_sec *sec;
5100 	int i, err;
5101 
5102 	seg = &btf_ext->func_info;
5103 	for_each_btf_ext_sec(seg, sec) {
5104 		struct bpf_func_info_min *rec;
5105 
5106 		for_each_btf_ext_rec(seg, sec, i, rec) {
5107 			err = visit(&rec->type_id, ctx);
5108 			if (err < 0)
5109 				return err;
5110 		}
5111 	}
5112 
5113 	seg = &btf_ext->core_relo_info;
5114 	for_each_btf_ext_sec(seg, sec) {
5115 		struct bpf_core_relo *rec;
5116 
5117 		for_each_btf_ext_rec(seg, sec, i, rec) {
5118 			err = visit(&rec->type_id, ctx);
5119 			if (err < 0)
5120 				return err;
5121 		}
5122 	}
5123 
5124 	return 0;
5125 }
5126 
btf_ext_visit_str_offs(struct btf_ext * btf_ext,str_off_visit_fn visit,void * ctx)5127 int btf_ext_visit_str_offs(struct btf_ext *btf_ext, str_off_visit_fn visit, void *ctx)
5128 {
5129 	const struct btf_ext_info *seg;
5130 	struct btf_ext_info_sec *sec;
5131 	int i, err;
5132 
5133 	seg = &btf_ext->func_info;
5134 	for_each_btf_ext_sec(seg, sec) {
5135 		err = visit(&sec->sec_name_off, ctx);
5136 		if (err)
5137 			return err;
5138 	}
5139 
5140 	seg = &btf_ext->line_info;
5141 	for_each_btf_ext_sec(seg, sec) {
5142 		struct bpf_line_info_min *rec;
5143 
5144 		err = visit(&sec->sec_name_off, ctx);
5145 		if (err)
5146 			return err;
5147 
5148 		for_each_btf_ext_rec(seg, sec, i, rec) {
5149 			err = visit(&rec->file_name_off, ctx);
5150 			if (err)
5151 				return err;
5152 			err = visit(&rec->line_off, ctx);
5153 			if (err)
5154 				return err;
5155 		}
5156 	}
5157 
5158 	seg = &btf_ext->core_relo_info;
5159 	for_each_btf_ext_sec(seg, sec) {
5160 		struct bpf_core_relo *rec;
5161 
5162 		err = visit(&sec->sec_name_off, ctx);
5163 		if (err)
5164 			return err;
5165 
5166 		for_each_btf_ext_rec(seg, sec, i, rec) {
5167 			err = visit(&rec->access_str_off, ctx);
5168 			if (err)
5169 				return err;
5170 		}
5171 	}
5172 
5173 	return 0;
5174 }
5175 
5176 struct btf_distill {
5177 	struct btf_pipe pipe;
5178 	int *id_map;
5179 	unsigned int split_start_id;
5180 	unsigned int split_start_str;
5181 	int diff_id;
5182 };
5183 
btf_add_distilled_type_ids(struct btf_distill * dist,__u32 i)5184 static int btf_add_distilled_type_ids(struct btf_distill *dist, __u32 i)
5185 {
5186 	struct btf_type *split_t = btf_type_by_id(dist->pipe.src, i);
5187 	struct btf_field_iter it;
5188 	__u32 *id;
5189 	int err;
5190 
5191 	err = btf_field_iter_init(&it, split_t, BTF_FIELD_ITER_IDS);
5192 	if (err)
5193 		return err;
5194 	while ((id = btf_field_iter_next(&it))) {
5195 		struct btf_type *base_t;
5196 
5197 		if (!*id)
5198 			continue;
5199 		/* split BTF id, not needed */
5200 		if (*id >= dist->split_start_id)
5201 			continue;
5202 		/* already added ? */
5203 		if (dist->id_map[*id] > 0)
5204 			continue;
5205 
5206 		/* only a subset of base BTF types should be referenced from
5207 		 * split BTF; ensure nothing unexpected is referenced.
5208 		 */
5209 		base_t = btf_type_by_id(dist->pipe.src, *id);
5210 		switch (btf_kind(base_t)) {
5211 		case BTF_KIND_INT:
5212 		case BTF_KIND_FLOAT:
5213 		case BTF_KIND_FWD:
5214 		case BTF_KIND_ARRAY:
5215 		case BTF_KIND_STRUCT:
5216 		case BTF_KIND_UNION:
5217 		case BTF_KIND_TYPEDEF:
5218 		case BTF_KIND_ENUM:
5219 		case BTF_KIND_ENUM64:
5220 		case BTF_KIND_PTR:
5221 		case BTF_KIND_CONST:
5222 		case BTF_KIND_RESTRICT:
5223 		case BTF_KIND_VOLATILE:
5224 		case BTF_KIND_FUNC_PROTO:
5225 		case BTF_KIND_TYPE_TAG:
5226 			dist->id_map[*id] = *id;
5227 			break;
5228 		default:
5229 			pr_warn("unexpected reference to base type[%u] of kind [%u] when creating distilled base BTF.\n",
5230 				*id, btf_kind(base_t));
5231 			return -EINVAL;
5232 		}
5233 		/* If a base type is used, ensure types it refers to are
5234 		 * marked as used also; so for example if we find a PTR to INT
5235 		 * we need both the PTR and INT.
5236 		 *
5237 		 * The only exception is named struct/unions, since distilled
5238 		 * base BTF composite types have no members.
5239 		 */
5240 		if (btf_is_composite(base_t) && base_t->name_off)
5241 			continue;
5242 		err = btf_add_distilled_type_ids(dist, *id);
5243 		if (err)
5244 			return err;
5245 	}
5246 	return 0;
5247 }
5248 
btf_add_distilled_types(struct btf_distill * dist)5249 static int btf_add_distilled_types(struct btf_distill *dist)
5250 {
5251 	bool adding_to_base = dist->pipe.dst->start_id == 1;
5252 	int id = btf__type_cnt(dist->pipe.dst);
5253 	struct btf_type *t;
5254 	int i, err = 0;
5255 
5256 
5257 	/* Add types for each of the required references to either distilled
5258 	 * base or split BTF, depending on type characteristics.
5259 	 */
5260 	for (i = 1; i < dist->split_start_id; i++) {
5261 		const char *name;
5262 		int kind;
5263 
5264 		if (!dist->id_map[i])
5265 			continue;
5266 		t = btf_type_by_id(dist->pipe.src, i);
5267 		kind = btf_kind(t);
5268 		name = btf__name_by_offset(dist->pipe.src, t->name_off);
5269 
5270 		switch (kind) {
5271 		case BTF_KIND_INT:
5272 		case BTF_KIND_FLOAT:
5273 		case BTF_KIND_FWD:
5274 			/* Named int, float, fwd are added to base. */
5275 			if (!adding_to_base)
5276 				continue;
5277 			err = btf_add_type(&dist->pipe, t);
5278 			break;
5279 		case BTF_KIND_STRUCT:
5280 		case BTF_KIND_UNION:
5281 			/* Named struct/union are added to base as 0-vlen
5282 			 * struct/union of same size.  Anonymous struct/unions
5283 			 * are added to split BTF as-is.
5284 			 */
5285 			if (adding_to_base) {
5286 				if (!t->name_off)
5287 					continue;
5288 				err = btf_add_composite(dist->pipe.dst, kind, name, t->size);
5289 			} else {
5290 				if (t->name_off)
5291 					continue;
5292 				err = btf_add_type(&dist->pipe, t);
5293 			}
5294 			break;
5295 		case BTF_KIND_ENUM:
5296 		case BTF_KIND_ENUM64:
5297 			/* Named enum[64]s are added to base as a sized
5298 			 * enum; relocation will match with appropriately-named
5299 			 * and sized enum or enum64.
5300 			 *
5301 			 * Anonymous enums are added to split BTF as-is.
5302 			 */
5303 			if (adding_to_base) {
5304 				if (!t->name_off)
5305 					continue;
5306 				err = btf__add_enum(dist->pipe.dst, name, t->size);
5307 			} else {
5308 				if (t->name_off)
5309 					continue;
5310 				err = btf_add_type(&dist->pipe, t);
5311 			}
5312 			break;
5313 		case BTF_KIND_ARRAY:
5314 		case BTF_KIND_TYPEDEF:
5315 		case BTF_KIND_PTR:
5316 		case BTF_KIND_CONST:
5317 		case BTF_KIND_RESTRICT:
5318 		case BTF_KIND_VOLATILE:
5319 		case BTF_KIND_FUNC_PROTO:
5320 		case BTF_KIND_TYPE_TAG:
5321 			/* All other types are added to split BTF. */
5322 			if (adding_to_base)
5323 				continue;
5324 			err = btf_add_type(&dist->pipe, t);
5325 			break;
5326 		default:
5327 			pr_warn("unexpected kind when adding base type '%s'[%u] of kind [%u] to distilled base BTF.\n",
5328 				name, i, kind);
5329 			return -EINVAL;
5330 
5331 		}
5332 		if (err < 0)
5333 			break;
5334 		dist->id_map[i] = id++;
5335 	}
5336 	return err;
5337 }
5338 
5339 /* Split BTF ids without a mapping will be shifted downwards since distilled
5340  * base BTF is smaller than the original base BTF.  For those that have a
5341  * mapping (either to base or updated split BTF), update the id based on
5342  * that mapping.
5343  */
btf_update_distilled_type_ids(struct btf_distill * dist,__u32 i)5344 static int btf_update_distilled_type_ids(struct btf_distill *dist, __u32 i)
5345 {
5346 	struct btf_type *t = btf_type_by_id(dist->pipe.dst, i);
5347 	struct btf_field_iter it;
5348 	__u32 *id;
5349 	int err;
5350 
5351 	err = btf_field_iter_init(&it, t, BTF_FIELD_ITER_IDS);
5352 	if (err)
5353 		return err;
5354 	while ((id = btf_field_iter_next(&it))) {
5355 		if (dist->id_map[*id])
5356 			*id = dist->id_map[*id];
5357 		else if (*id >= dist->split_start_id)
5358 			*id -= dist->diff_id;
5359 	}
5360 	return 0;
5361 }
5362 
5363 /* Create updated split BTF with distilled base BTF; distilled base BTF
5364  * consists of BTF information required to clarify the types that split
5365  * BTF refers to, omitting unneeded details.  Specifically it will contain
5366  * base types and memberless definitions of named structs, unions and enumerated
5367  * types. Associated reference types like pointers, arrays and anonymous
5368  * structs, unions and enumerated types will be added to split BTF.
5369  * Size is recorded for named struct/unions to help guide matching to the
5370  * target base BTF during later relocation.
5371  *
5372  * The only case where structs, unions or enumerated types are fully represented
5373  * is when they are anonymous; in such cases, the anonymous type is added to
5374  * split BTF in full.
5375  *
5376  * We return newly-created split BTF where the split BTF refers to a newly-created
5377  * distilled base BTF. Both must be freed separately by the caller.
5378  */
btf__distill_base(const struct btf * src_btf,struct btf ** new_base_btf,struct btf ** new_split_btf)5379 int btf__distill_base(const struct btf *src_btf, struct btf **new_base_btf,
5380 		      struct btf **new_split_btf)
5381 {
5382 	struct btf *new_base = NULL, *new_split = NULL;
5383 	const struct btf *old_base;
5384 	unsigned int n = btf__type_cnt(src_btf);
5385 	struct btf_distill dist = {};
5386 	struct btf_type *t;
5387 	int i, err = 0;
5388 
5389 	/* src BTF must be split BTF. */
5390 	old_base = btf__base_btf(src_btf);
5391 	if (!new_base_btf || !new_split_btf || !old_base)
5392 		return libbpf_err(-EINVAL);
5393 
5394 	new_base = btf__new_empty();
5395 	if (!new_base)
5396 		return libbpf_err(-ENOMEM);
5397 	dist.id_map = calloc(n, sizeof(*dist.id_map));
5398 	if (!dist.id_map) {
5399 		err = -ENOMEM;
5400 		goto done;
5401 	}
5402 	dist.pipe.src = src_btf;
5403 	dist.pipe.dst = new_base;
5404 	dist.pipe.str_off_map = hashmap__new(btf_dedup_identity_hash_fn, btf_dedup_equal_fn, NULL);
5405 	if (IS_ERR(dist.pipe.str_off_map)) {
5406 		err = -ENOMEM;
5407 		goto done;
5408 	}
5409 	dist.split_start_id = btf__type_cnt(old_base);
5410 	dist.split_start_str = old_base->hdr->str_len;
5411 
5412 	/* Pass over src split BTF; generate the list of base BTF type ids it
5413 	 * references; these will constitute our distilled BTF set to be
5414 	 * distributed over base and split BTF as appropriate.
5415 	 */
5416 	for (i = src_btf->start_id; i < n; i++) {
5417 		err = btf_add_distilled_type_ids(&dist, i);
5418 		if (err < 0)
5419 			goto done;
5420 	}
5421 	/* Next add types for each of the required references to base BTF and split BTF
5422 	 * in turn.
5423 	 */
5424 	err = btf_add_distilled_types(&dist);
5425 	if (err < 0)
5426 		goto done;
5427 
5428 	/* Create new split BTF with distilled base BTF as its base; the final
5429 	 * state is split BTF with distilled base BTF that represents enough
5430 	 * about its base references to allow it to be relocated with the base
5431 	 * BTF available.
5432 	 */
5433 	new_split = btf__new_empty_split(new_base);
5434 	if (!new_split) {
5435 		err = -errno;
5436 		goto done;
5437 	}
5438 	dist.pipe.dst = new_split;
5439 	/* First add all split types */
5440 	for (i = src_btf->start_id; i < n; i++) {
5441 		t = btf_type_by_id(src_btf, i);
5442 		err = btf_add_type(&dist.pipe, t);
5443 		if (err < 0)
5444 			goto done;
5445 	}
5446 	/* Now add distilled types to split BTF that are not added to base. */
5447 	err = btf_add_distilled_types(&dist);
5448 	if (err < 0)
5449 		goto done;
5450 
5451 	/* All split BTF ids will be shifted downwards since there are less base
5452 	 * BTF ids in distilled base BTF.
5453 	 */
5454 	dist.diff_id = dist.split_start_id - btf__type_cnt(new_base);
5455 
5456 	n = btf__type_cnt(new_split);
5457 	/* Now update base/split BTF ids. */
5458 	for (i = 1; i < n; i++) {
5459 		err = btf_update_distilled_type_ids(&dist, i);
5460 		if (err < 0)
5461 			break;
5462 	}
5463 done:
5464 	free(dist.id_map);
5465 	hashmap__free(dist.pipe.str_off_map);
5466 	if (err) {
5467 		btf__free(new_split);
5468 		btf__free(new_base);
5469 		return libbpf_err(err);
5470 	}
5471 	*new_base_btf = new_base;
5472 	*new_split_btf = new_split;
5473 
5474 	return 0;
5475 }
5476 
btf_header(const struct btf * btf)5477 const struct btf_header *btf_header(const struct btf *btf)
5478 {
5479 	return btf->hdr;
5480 }
5481 
btf_set_base_btf(struct btf * btf,const struct btf * base_btf)5482 void btf_set_base_btf(struct btf *btf, const struct btf *base_btf)
5483 {
5484 	btf->base_btf = (struct btf *)base_btf;
5485 	btf->start_id = btf__type_cnt(base_btf);
5486 	btf->start_str_off = base_btf->hdr->str_len;
5487 }
5488 
btf__relocate(struct btf * btf,const struct btf * base_btf)5489 int btf__relocate(struct btf *btf, const struct btf *base_btf)
5490 {
5491 	int err = btf_relocate(btf, base_btf, NULL);
5492 
5493 	if (!err)
5494 		btf->owns_base = false;
5495 	return libbpf_err(err);
5496 }
5497