xref: /linux/tools/lib/bpf/libbpf.c (revision c034a177d3c898f370f52877e7252da8c4f8235c)
1 // SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
2 
3 /*
4  * Common eBPF ELF object loading operations.
5  *
6  * Copyright (C) 2013-2015 Alexei Starovoitov <ast@kernel.org>
7  * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com>
8  * Copyright (C) 2015 Huawei Inc.
9  * Copyright (C) 2017 Nicira, Inc.
10  */
11 
12 #define _GNU_SOURCE
13 #include <stdlib.h>
14 #include <stdio.h>
15 #include <stdarg.h>
16 #include <libgen.h>
17 #include <inttypes.h>
18 #include <string.h>
19 #include <unistd.h>
20 #include <fcntl.h>
21 #include <errno.h>
22 #include <asm/unistd.h>
23 #include <linux/err.h>
24 #include <linux/kernel.h>
25 #include <linux/bpf.h>
26 #include <linux/btf.h>
27 #include <linux/list.h>
28 #include <linux/limits.h>
29 #include <linux/perf_event.h>
30 #include <sys/stat.h>
31 #include <sys/types.h>
32 #include <sys/vfs.h>
33 #include <tools/libc_compat.h>
34 #include <libelf.h>
35 #include <gelf.h>
36 
37 #include "libbpf.h"
38 #include "bpf.h"
39 #include "btf.h"
40 #include "str_error.h"
41 
42 #ifndef EM_BPF
43 #define EM_BPF 247
44 #endif
45 
46 #ifndef BPF_FS_MAGIC
47 #define BPF_FS_MAGIC		0xcafe4a11
48 #endif
49 
50 #define __printf(a, b)	__attribute__((format(printf, a, b)))
51 
52 __printf(1, 2)
53 static int __base_pr(const char *format, ...)
54 {
55 	va_list args;
56 	int err;
57 
58 	va_start(args, format);
59 	err = vfprintf(stderr, format, args);
60 	va_end(args);
61 	return err;
62 }
63 
64 static __printf(1, 2) libbpf_print_fn_t __pr_warning = __base_pr;
65 static __printf(1, 2) libbpf_print_fn_t __pr_info = __base_pr;
66 static __printf(1, 2) libbpf_print_fn_t __pr_debug;
67 
68 #define __pr(func, fmt, ...)	\
69 do {				\
70 	if ((func))		\
71 		(func)("libbpf: " fmt, ##__VA_ARGS__); \
72 } while (0)
73 
74 #define pr_warning(fmt, ...)	__pr(__pr_warning, fmt, ##__VA_ARGS__)
75 #define pr_info(fmt, ...)	__pr(__pr_info, fmt, ##__VA_ARGS__)
76 #define pr_debug(fmt, ...)	__pr(__pr_debug, fmt, ##__VA_ARGS__)
77 
78 void libbpf_set_print(libbpf_print_fn_t warn,
79 		      libbpf_print_fn_t info,
80 		      libbpf_print_fn_t debug)
81 {
82 	__pr_warning = warn;
83 	__pr_info = info;
84 	__pr_debug = debug;
85 }
86 
87 #define STRERR_BUFSIZE  128
88 
89 #define CHECK_ERR(action, err, out) do {	\
90 	err = action;			\
91 	if (err)			\
92 		goto out;		\
93 } while(0)
94 
95 
96 /* Copied from tools/perf/util/util.h */
97 #ifndef zfree
98 # define zfree(ptr) ({ free(*ptr); *ptr = NULL; })
99 #endif
100 
101 #ifndef zclose
102 # define zclose(fd) ({			\
103 	int ___err = 0;			\
104 	if ((fd) >= 0)			\
105 		___err = close((fd));	\
106 	fd = -1;			\
107 	___err; })
108 #endif
109 
110 #ifdef HAVE_LIBELF_MMAP_SUPPORT
111 # define LIBBPF_ELF_C_READ_MMAP ELF_C_READ_MMAP
112 #else
113 # define LIBBPF_ELF_C_READ_MMAP ELF_C_READ
114 #endif
115 
116 /*
117  * bpf_prog should be a better name but it has been used in
118  * linux/filter.h.
119  */
120 struct bpf_program {
121 	/* Index in elf obj file, for relocation use. */
122 	int idx;
123 	char *name;
124 	int prog_ifindex;
125 	char *section_name;
126 	struct bpf_insn *insns;
127 	size_t insns_cnt, main_prog_cnt;
128 	enum bpf_prog_type type;
129 
130 	struct reloc_desc {
131 		enum {
132 			RELO_LD64,
133 			RELO_CALL,
134 		} type;
135 		int insn_idx;
136 		union {
137 			int map_idx;
138 			int text_off;
139 		};
140 	} *reloc_desc;
141 	int nr_reloc;
142 
143 	struct {
144 		int nr;
145 		int *fds;
146 	} instances;
147 	bpf_program_prep_t preprocessor;
148 
149 	struct bpf_object *obj;
150 	void *priv;
151 	bpf_program_clear_priv_t clear_priv;
152 
153 	enum bpf_attach_type expected_attach_type;
154 };
155 
156 struct bpf_map {
157 	int fd;
158 	char *name;
159 	size_t offset;
160 	int map_ifindex;
161 	struct bpf_map_def def;
162 	__u32 btf_key_type_id;
163 	__u32 btf_value_type_id;
164 	void *priv;
165 	bpf_map_clear_priv_t clear_priv;
166 };
167 
168 static LIST_HEAD(bpf_objects_list);
169 
170 struct bpf_object {
171 	char license[64];
172 	__u32 kern_version;
173 
174 	struct bpf_program *programs;
175 	size_t nr_programs;
176 	struct bpf_map *maps;
177 	size_t nr_maps;
178 
179 	bool loaded;
180 	bool has_pseudo_calls;
181 
182 	/*
183 	 * Information when doing elf related work. Only valid if fd
184 	 * is valid.
185 	 */
186 	struct {
187 		int fd;
188 		void *obj_buf;
189 		size_t obj_buf_sz;
190 		Elf *elf;
191 		GElf_Ehdr ehdr;
192 		Elf_Data *symbols;
193 		size_t strtabidx;
194 		struct {
195 			GElf_Shdr shdr;
196 			Elf_Data *data;
197 		} *reloc;
198 		int nr_reloc;
199 		int maps_shndx;
200 		int text_shndx;
201 	} efile;
202 	/*
203 	 * All loaded bpf_object is linked in a list, which is
204 	 * hidden to caller. bpf_objects__<func> handlers deal with
205 	 * all objects.
206 	 */
207 	struct list_head list;
208 
209 	struct btf *btf;
210 
211 	void *priv;
212 	bpf_object_clear_priv_t clear_priv;
213 
214 	char path[];
215 };
216 #define obj_elf_valid(o)	((o)->efile.elf)
217 
218 void bpf_program__unload(struct bpf_program *prog)
219 {
220 	int i;
221 
222 	if (!prog)
223 		return;
224 
225 	/*
226 	 * If the object is opened but the program was never loaded,
227 	 * it is possible that prog->instances.nr == -1.
228 	 */
229 	if (prog->instances.nr > 0) {
230 		for (i = 0; i < prog->instances.nr; i++)
231 			zclose(prog->instances.fds[i]);
232 	} else if (prog->instances.nr != -1) {
233 		pr_warning("Internal error: instances.nr is %d\n",
234 			   prog->instances.nr);
235 	}
236 
237 	prog->instances.nr = -1;
238 	zfree(&prog->instances.fds);
239 }
240 
241 static void bpf_program__exit(struct bpf_program *prog)
242 {
243 	if (!prog)
244 		return;
245 
246 	if (prog->clear_priv)
247 		prog->clear_priv(prog, prog->priv);
248 
249 	prog->priv = NULL;
250 	prog->clear_priv = NULL;
251 
252 	bpf_program__unload(prog);
253 	zfree(&prog->name);
254 	zfree(&prog->section_name);
255 	zfree(&prog->insns);
256 	zfree(&prog->reloc_desc);
257 
258 	prog->nr_reloc = 0;
259 	prog->insns_cnt = 0;
260 	prog->idx = -1;
261 }
262 
263 static int
264 bpf_program__init(void *data, size_t size, char *section_name, int idx,
265 		  struct bpf_program *prog)
266 {
267 	if (size < sizeof(struct bpf_insn)) {
268 		pr_warning("corrupted section '%s'\n", section_name);
269 		return -EINVAL;
270 	}
271 
272 	bzero(prog, sizeof(*prog));
273 
274 	prog->section_name = strdup(section_name);
275 	if (!prog->section_name) {
276 		pr_warning("failed to alloc name for prog under section(%d) %s\n",
277 			   idx, section_name);
278 		goto errout;
279 	}
280 
281 	prog->insns = malloc(size);
282 	if (!prog->insns) {
283 		pr_warning("failed to alloc insns for prog under section %s\n",
284 			   section_name);
285 		goto errout;
286 	}
287 	prog->insns_cnt = size / sizeof(struct bpf_insn);
288 	memcpy(prog->insns, data,
289 	       prog->insns_cnt * sizeof(struct bpf_insn));
290 	prog->idx = idx;
291 	prog->instances.fds = NULL;
292 	prog->instances.nr = -1;
293 	prog->type = BPF_PROG_TYPE_KPROBE;
294 
295 	return 0;
296 errout:
297 	bpf_program__exit(prog);
298 	return -ENOMEM;
299 }
300 
301 static int
302 bpf_object__add_program(struct bpf_object *obj, void *data, size_t size,
303 			char *section_name, int idx)
304 {
305 	struct bpf_program prog, *progs;
306 	int nr_progs, err;
307 
308 	err = bpf_program__init(data, size, section_name, idx, &prog);
309 	if (err)
310 		return err;
311 
312 	progs = obj->programs;
313 	nr_progs = obj->nr_programs;
314 
315 	progs = reallocarray(progs, nr_progs + 1, sizeof(progs[0]));
316 	if (!progs) {
317 		/*
318 		 * In this case the original obj->programs
319 		 * is still valid, so don't need special treat for
320 		 * bpf_close_object().
321 		 */
322 		pr_warning("failed to alloc a new program under section '%s'\n",
323 			   section_name);
324 		bpf_program__exit(&prog);
325 		return -ENOMEM;
326 	}
327 
328 	pr_debug("found program %s\n", prog.section_name);
329 	obj->programs = progs;
330 	obj->nr_programs = nr_progs + 1;
331 	prog.obj = obj;
332 	progs[nr_progs] = prog;
333 	return 0;
334 }
335 
336 static int
337 bpf_object__init_prog_names(struct bpf_object *obj)
338 {
339 	Elf_Data *symbols = obj->efile.symbols;
340 	struct bpf_program *prog;
341 	size_t pi, si;
342 
343 	for (pi = 0; pi < obj->nr_programs; pi++) {
344 		const char *name = NULL;
345 
346 		prog = &obj->programs[pi];
347 
348 		for (si = 0; si < symbols->d_size / sizeof(GElf_Sym) && !name;
349 		     si++) {
350 			GElf_Sym sym;
351 
352 			if (!gelf_getsym(symbols, si, &sym))
353 				continue;
354 			if (sym.st_shndx != prog->idx)
355 				continue;
356 			if (GELF_ST_BIND(sym.st_info) != STB_GLOBAL)
357 				continue;
358 
359 			name = elf_strptr(obj->efile.elf,
360 					  obj->efile.strtabidx,
361 					  sym.st_name);
362 			if (!name) {
363 				pr_warning("failed to get sym name string for prog %s\n",
364 					   prog->section_name);
365 				return -LIBBPF_ERRNO__LIBELF;
366 			}
367 		}
368 
369 		if (!name && prog->idx == obj->efile.text_shndx)
370 			name = ".text";
371 
372 		if (!name) {
373 			pr_warning("failed to find sym for prog %s\n",
374 				   prog->section_name);
375 			return -EINVAL;
376 		}
377 
378 		prog->name = strdup(name);
379 		if (!prog->name) {
380 			pr_warning("failed to allocate memory for prog sym %s\n",
381 				   name);
382 			return -ENOMEM;
383 		}
384 	}
385 
386 	return 0;
387 }
388 
389 static struct bpf_object *bpf_object__new(const char *path,
390 					  void *obj_buf,
391 					  size_t obj_buf_sz)
392 {
393 	struct bpf_object *obj;
394 
395 	obj = calloc(1, sizeof(struct bpf_object) + strlen(path) + 1);
396 	if (!obj) {
397 		pr_warning("alloc memory failed for %s\n", path);
398 		return ERR_PTR(-ENOMEM);
399 	}
400 
401 	strcpy(obj->path, path);
402 	obj->efile.fd = -1;
403 
404 	/*
405 	 * Caller of this function should also calls
406 	 * bpf_object__elf_finish() after data collection to return
407 	 * obj_buf to user. If not, we should duplicate the buffer to
408 	 * avoid user freeing them before elf finish.
409 	 */
410 	obj->efile.obj_buf = obj_buf;
411 	obj->efile.obj_buf_sz = obj_buf_sz;
412 	obj->efile.maps_shndx = -1;
413 
414 	obj->loaded = false;
415 
416 	INIT_LIST_HEAD(&obj->list);
417 	list_add(&obj->list, &bpf_objects_list);
418 	return obj;
419 }
420 
421 static void bpf_object__elf_finish(struct bpf_object *obj)
422 {
423 	if (!obj_elf_valid(obj))
424 		return;
425 
426 	if (obj->efile.elf) {
427 		elf_end(obj->efile.elf);
428 		obj->efile.elf = NULL;
429 	}
430 	obj->efile.symbols = NULL;
431 
432 	zfree(&obj->efile.reloc);
433 	obj->efile.nr_reloc = 0;
434 	zclose(obj->efile.fd);
435 	obj->efile.obj_buf = NULL;
436 	obj->efile.obj_buf_sz = 0;
437 }
438 
439 static int bpf_object__elf_init(struct bpf_object *obj)
440 {
441 	int err = 0;
442 	GElf_Ehdr *ep;
443 
444 	if (obj_elf_valid(obj)) {
445 		pr_warning("elf init: internal error\n");
446 		return -LIBBPF_ERRNO__LIBELF;
447 	}
448 
449 	if (obj->efile.obj_buf_sz > 0) {
450 		/*
451 		 * obj_buf should have been validated by
452 		 * bpf_object__open_buffer().
453 		 */
454 		obj->efile.elf = elf_memory(obj->efile.obj_buf,
455 					    obj->efile.obj_buf_sz);
456 	} else {
457 		obj->efile.fd = open(obj->path, O_RDONLY);
458 		if (obj->efile.fd < 0) {
459 			char errmsg[STRERR_BUFSIZE];
460 			char *cp = libbpf_strerror_r(errno, errmsg,
461 						     sizeof(errmsg));
462 
463 			pr_warning("failed to open %s: %s\n", obj->path, cp);
464 			return -errno;
465 		}
466 
467 		obj->efile.elf = elf_begin(obj->efile.fd,
468 				LIBBPF_ELF_C_READ_MMAP,
469 				NULL);
470 	}
471 
472 	if (!obj->efile.elf) {
473 		pr_warning("failed to open %s as ELF file\n",
474 				obj->path);
475 		err = -LIBBPF_ERRNO__LIBELF;
476 		goto errout;
477 	}
478 
479 	if (!gelf_getehdr(obj->efile.elf, &obj->efile.ehdr)) {
480 		pr_warning("failed to get EHDR from %s\n",
481 				obj->path);
482 		err = -LIBBPF_ERRNO__FORMAT;
483 		goto errout;
484 	}
485 	ep = &obj->efile.ehdr;
486 
487 	/* Old LLVM set e_machine to EM_NONE */
488 	if ((ep->e_type != ET_REL) || (ep->e_machine && (ep->e_machine != EM_BPF))) {
489 		pr_warning("%s is not an eBPF object file\n",
490 			obj->path);
491 		err = -LIBBPF_ERRNO__FORMAT;
492 		goto errout;
493 	}
494 
495 	return 0;
496 errout:
497 	bpf_object__elf_finish(obj);
498 	return err;
499 }
500 
501 static int
502 bpf_object__check_endianness(struct bpf_object *obj)
503 {
504 	static unsigned int const endian = 1;
505 
506 	switch (obj->efile.ehdr.e_ident[EI_DATA]) {
507 	case ELFDATA2LSB:
508 		/* We are big endian, BPF obj is little endian. */
509 		if (*(unsigned char const *)&endian != 1)
510 			goto mismatch;
511 		break;
512 
513 	case ELFDATA2MSB:
514 		/* We are little endian, BPF obj is big endian. */
515 		if (*(unsigned char const *)&endian != 0)
516 			goto mismatch;
517 		break;
518 	default:
519 		return -LIBBPF_ERRNO__ENDIAN;
520 	}
521 
522 	return 0;
523 
524 mismatch:
525 	pr_warning("Error: endianness mismatch.\n");
526 	return -LIBBPF_ERRNO__ENDIAN;
527 }
528 
529 static int
530 bpf_object__init_license(struct bpf_object *obj,
531 			 void *data, size_t size)
532 {
533 	memcpy(obj->license, data,
534 	       min(size, sizeof(obj->license) - 1));
535 	pr_debug("license of %s is %s\n", obj->path, obj->license);
536 	return 0;
537 }
538 
539 static int
540 bpf_object__init_kversion(struct bpf_object *obj,
541 			  void *data, size_t size)
542 {
543 	__u32 kver;
544 
545 	if (size != sizeof(kver)) {
546 		pr_warning("invalid kver section in %s\n", obj->path);
547 		return -LIBBPF_ERRNO__FORMAT;
548 	}
549 	memcpy(&kver, data, sizeof(kver));
550 	obj->kern_version = kver;
551 	pr_debug("kernel version of %s is %x\n", obj->path,
552 		 obj->kern_version);
553 	return 0;
554 }
555 
556 static int compare_bpf_map(const void *_a, const void *_b)
557 {
558 	const struct bpf_map *a = _a;
559 	const struct bpf_map *b = _b;
560 
561 	return a->offset - b->offset;
562 }
563 
564 static int
565 bpf_object__init_maps(struct bpf_object *obj, int flags)
566 {
567 	bool strict = !(flags & MAPS_RELAX_COMPAT);
568 	int i, map_idx, map_def_sz, nr_maps = 0;
569 	Elf_Scn *scn;
570 	Elf_Data *data;
571 	Elf_Data *symbols = obj->efile.symbols;
572 
573 	if (obj->efile.maps_shndx < 0)
574 		return -EINVAL;
575 	if (!symbols)
576 		return -EINVAL;
577 
578 	scn = elf_getscn(obj->efile.elf, obj->efile.maps_shndx);
579 	if (scn)
580 		data = elf_getdata(scn, NULL);
581 	if (!scn || !data) {
582 		pr_warning("failed to get Elf_Data from map section %d\n",
583 			   obj->efile.maps_shndx);
584 		return -EINVAL;
585 	}
586 
587 	/*
588 	 * Count number of maps. Each map has a name.
589 	 * Array of maps is not supported: only the first element is
590 	 * considered.
591 	 *
592 	 * TODO: Detect array of map and report error.
593 	 */
594 	for (i = 0; i < symbols->d_size / sizeof(GElf_Sym); i++) {
595 		GElf_Sym sym;
596 
597 		if (!gelf_getsym(symbols, i, &sym))
598 			continue;
599 		if (sym.st_shndx != obj->efile.maps_shndx)
600 			continue;
601 		nr_maps++;
602 	}
603 
604 	/* Alloc obj->maps and fill nr_maps. */
605 	pr_debug("maps in %s: %d maps in %zd bytes\n", obj->path,
606 		 nr_maps, data->d_size);
607 
608 	if (!nr_maps)
609 		return 0;
610 
611 	/* Assume equally sized map definitions */
612 	map_def_sz = data->d_size / nr_maps;
613 	if (!data->d_size || (data->d_size % nr_maps) != 0) {
614 		pr_warning("unable to determine map definition size "
615 			   "section %s, %d maps in %zd bytes\n",
616 			   obj->path, nr_maps, data->d_size);
617 		return -EINVAL;
618 	}
619 
620 	obj->maps = calloc(nr_maps, sizeof(obj->maps[0]));
621 	if (!obj->maps) {
622 		pr_warning("alloc maps for object failed\n");
623 		return -ENOMEM;
624 	}
625 	obj->nr_maps = nr_maps;
626 
627 	/*
628 	 * fill all fd with -1 so won't close incorrect
629 	 * fd (fd=0 is stdin) when failure (zclose won't close
630 	 * negative fd)).
631 	 */
632 	for (i = 0; i < nr_maps; i++)
633 		obj->maps[i].fd = -1;
634 
635 	/*
636 	 * Fill obj->maps using data in "maps" section.
637 	 */
638 	for (i = 0, map_idx = 0; i < symbols->d_size / sizeof(GElf_Sym); i++) {
639 		GElf_Sym sym;
640 		const char *map_name;
641 		struct bpf_map_def *def;
642 
643 		if (!gelf_getsym(symbols, i, &sym))
644 			continue;
645 		if (sym.st_shndx != obj->efile.maps_shndx)
646 			continue;
647 
648 		map_name = elf_strptr(obj->efile.elf,
649 				      obj->efile.strtabidx,
650 				      sym.st_name);
651 		obj->maps[map_idx].offset = sym.st_value;
652 		if (sym.st_value + map_def_sz > data->d_size) {
653 			pr_warning("corrupted maps section in %s: last map \"%s\" too small\n",
654 				   obj->path, map_name);
655 			return -EINVAL;
656 		}
657 
658 		obj->maps[map_idx].name = strdup(map_name);
659 		if (!obj->maps[map_idx].name) {
660 			pr_warning("failed to alloc map name\n");
661 			return -ENOMEM;
662 		}
663 		pr_debug("map %d is \"%s\"\n", map_idx,
664 			 obj->maps[map_idx].name);
665 		def = (struct bpf_map_def *)(data->d_buf + sym.st_value);
666 		/*
667 		 * If the definition of the map in the object file fits in
668 		 * bpf_map_def, copy it.  Any extra fields in our version
669 		 * of bpf_map_def will default to zero as a result of the
670 		 * calloc above.
671 		 */
672 		if (map_def_sz <= sizeof(struct bpf_map_def)) {
673 			memcpy(&obj->maps[map_idx].def, def, map_def_sz);
674 		} else {
675 			/*
676 			 * Here the map structure being read is bigger than what
677 			 * we expect, truncate if the excess bits are all zero.
678 			 * If they are not zero, reject this map as
679 			 * incompatible.
680 			 */
681 			char *b;
682 			for (b = ((char *)def) + sizeof(struct bpf_map_def);
683 			     b < ((char *)def) + map_def_sz; b++) {
684 				if (*b != 0) {
685 					pr_warning("maps section in %s: \"%s\" "
686 						   "has unrecognized, non-zero "
687 						   "options\n",
688 						   obj->path, map_name);
689 					if (strict)
690 						return -EINVAL;
691 				}
692 			}
693 			memcpy(&obj->maps[map_idx].def, def,
694 			       sizeof(struct bpf_map_def));
695 		}
696 		map_idx++;
697 	}
698 
699 	qsort(obj->maps, obj->nr_maps, sizeof(obj->maps[0]), compare_bpf_map);
700 	return 0;
701 }
702 
703 static bool section_have_execinstr(struct bpf_object *obj, int idx)
704 {
705 	Elf_Scn *scn;
706 	GElf_Shdr sh;
707 
708 	scn = elf_getscn(obj->efile.elf, idx);
709 	if (!scn)
710 		return false;
711 
712 	if (gelf_getshdr(scn, &sh) != &sh)
713 		return false;
714 
715 	if (sh.sh_flags & SHF_EXECINSTR)
716 		return true;
717 
718 	return false;
719 }
720 
721 static int bpf_object__elf_collect(struct bpf_object *obj, int flags)
722 {
723 	Elf *elf = obj->efile.elf;
724 	GElf_Ehdr *ep = &obj->efile.ehdr;
725 	Elf_Scn *scn = NULL;
726 	int idx = 0, err = 0;
727 
728 	/* Elf is corrupted/truncated, avoid calling elf_strptr. */
729 	if (!elf_rawdata(elf_getscn(elf, ep->e_shstrndx), NULL)) {
730 		pr_warning("failed to get e_shstrndx from %s\n",
731 			   obj->path);
732 		return -LIBBPF_ERRNO__FORMAT;
733 	}
734 
735 	while ((scn = elf_nextscn(elf, scn)) != NULL) {
736 		char *name;
737 		GElf_Shdr sh;
738 		Elf_Data *data;
739 
740 		idx++;
741 		if (gelf_getshdr(scn, &sh) != &sh) {
742 			pr_warning("failed to get section(%d) header from %s\n",
743 				   idx, obj->path);
744 			err = -LIBBPF_ERRNO__FORMAT;
745 			goto out;
746 		}
747 
748 		name = elf_strptr(elf, ep->e_shstrndx, sh.sh_name);
749 		if (!name) {
750 			pr_warning("failed to get section(%d) name from %s\n",
751 				   idx, obj->path);
752 			err = -LIBBPF_ERRNO__FORMAT;
753 			goto out;
754 		}
755 
756 		data = elf_getdata(scn, 0);
757 		if (!data) {
758 			pr_warning("failed to get section(%d) data from %s(%s)\n",
759 				   idx, name, obj->path);
760 			err = -LIBBPF_ERRNO__FORMAT;
761 			goto out;
762 		}
763 		pr_debug("section(%d) %s, size %ld, link %d, flags %lx, type=%d\n",
764 			 idx, name, (unsigned long)data->d_size,
765 			 (int)sh.sh_link, (unsigned long)sh.sh_flags,
766 			 (int)sh.sh_type);
767 
768 		if (strcmp(name, "license") == 0)
769 			err = bpf_object__init_license(obj,
770 						       data->d_buf,
771 						       data->d_size);
772 		else if (strcmp(name, "version") == 0)
773 			err = bpf_object__init_kversion(obj,
774 							data->d_buf,
775 							data->d_size);
776 		else if (strcmp(name, "maps") == 0)
777 			obj->efile.maps_shndx = idx;
778 		else if (strcmp(name, BTF_ELF_SEC) == 0) {
779 			obj->btf = btf__new(data->d_buf, data->d_size,
780 					    __pr_debug);
781 			if (IS_ERR(obj->btf)) {
782 				pr_warning("Error loading ELF section %s: %ld. Ignored and continue.\n",
783 					   BTF_ELF_SEC, PTR_ERR(obj->btf));
784 				obj->btf = NULL;
785 			}
786 		} else if (sh.sh_type == SHT_SYMTAB) {
787 			if (obj->efile.symbols) {
788 				pr_warning("bpf: multiple SYMTAB in %s\n",
789 					   obj->path);
790 				err = -LIBBPF_ERRNO__FORMAT;
791 			} else {
792 				obj->efile.symbols = data;
793 				obj->efile.strtabidx = sh.sh_link;
794 			}
795 		} else if ((sh.sh_type == SHT_PROGBITS) &&
796 			   (sh.sh_flags & SHF_EXECINSTR) &&
797 			   (data->d_size > 0)) {
798 			if (strcmp(name, ".text") == 0)
799 				obj->efile.text_shndx = idx;
800 			err = bpf_object__add_program(obj, data->d_buf,
801 						      data->d_size, name, idx);
802 			if (err) {
803 				char errmsg[STRERR_BUFSIZE];
804 				char *cp = libbpf_strerror_r(-err, errmsg,
805 							     sizeof(errmsg));
806 
807 				pr_warning("failed to alloc program %s (%s): %s",
808 					   name, obj->path, cp);
809 			}
810 		} else if (sh.sh_type == SHT_REL) {
811 			void *reloc = obj->efile.reloc;
812 			int nr_reloc = obj->efile.nr_reloc + 1;
813 			int sec = sh.sh_info; /* points to other section */
814 
815 			/* Only do relo for section with exec instructions */
816 			if (!section_have_execinstr(obj, sec)) {
817 				pr_debug("skip relo %s(%d) for section(%d)\n",
818 					 name, idx, sec);
819 				continue;
820 			}
821 
822 			reloc = reallocarray(reloc, nr_reloc,
823 					     sizeof(*obj->efile.reloc));
824 			if (!reloc) {
825 				pr_warning("realloc failed\n");
826 				err = -ENOMEM;
827 			} else {
828 				int n = nr_reloc - 1;
829 
830 				obj->efile.reloc = reloc;
831 				obj->efile.nr_reloc = nr_reloc;
832 
833 				obj->efile.reloc[n].shdr = sh;
834 				obj->efile.reloc[n].data = data;
835 			}
836 		} else {
837 			pr_debug("skip section(%d) %s\n", idx, name);
838 		}
839 		if (err)
840 			goto out;
841 	}
842 
843 	if (!obj->efile.strtabidx || obj->efile.strtabidx >= idx) {
844 		pr_warning("Corrupted ELF file: index of strtab invalid\n");
845 		return LIBBPF_ERRNO__FORMAT;
846 	}
847 	if (obj->efile.maps_shndx >= 0) {
848 		err = bpf_object__init_maps(obj, flags);
849 		if (err)
850 			goto out;
851 	}
852 	err = bpf_object__init_prog_names(obj);
853 out:
854 	return err;
855 }
856 
857 static struct bpf_program *
858 bpf_object__find_prog_by_idx(struct bpf_object *obj, int idx)
859 {
860 	struct bpf_program *prog;
861 	size_t i;
862 
863 	for (i = 0; i < obj->nr_programs; i++) {
864 		prog = &obj->programs[i];
865 		if (prog->idx == idx)
866 			return prog;
867 	}
868 	return NULL;
869 }
870 
871 struct bpf_program *
872 bpf_object__find_program_by_title(struct bpf_object *obj, const char *title)
873 {
874 	struct bpf_program *pos;
875 
876 	bpf_object__for_each_program(pos, obj) {
877 		if (pos->section_name && !strcmp(pos->section_name, title))
878 			return pos;
879 	}
880 	return NULL;
881 }
882 
883 static int
884 bpf_program__collect_reloc(struct bpf_program *prog, GElf_Shdr *shdr,
885 			   Elf_Data *data, struct bpf_object *obj)
886 {
887 	Elf_Data *symbols = obj->efile.symbols;
888 	int text_shndx = obj->efile.text_shndx;
889 	int maps_shndx = obj->efile.maps_shndx;
890 	struct bpf_map *maps = obj->maps;
891 	size_t nr_maps = obj->nr_maps;
892 	int i, nrels;
893 
894 	pr_debug("collecting relocating info for: '%s'\n",
895 		 prog->section_name);
896 	nrels = shdr->sh_size / shdr->sh_entsize;
897 
898 	prog->reloc_desc = malloc(sizeof(*prog->reloc_desc) * nrels);
899 	if (!prog->reloc_desc) {
900 		pr_warning("failed to alloc memory in relocation\n");
901 		return -ENOMEM;
902 	}
903 	prog->nr_reloc = nrels;
904 
905 	for (i = 0; i < nrels; i++) {
906 		GElf_Sym sym;
907 		GElf_Rel rel;
908 		unsigned int insn_idx;
909 		struct bpf_insn *insns = prog->insns;
910 		size_t map_idx;
911 
912 		if (!gelf_getrel(data, i, &rel)) {
913 			pr_warning("relocation: failed to get %d reloc\n", i);
914 			return -LIBBPF_ERRNO__FORMAT;
915 		}
916 
917 		if (!gelf_getsym(symbols,
918 				 GELF_R_SYM(rel.r_info),
919 				 &sym)) {
920 			pr_warning("relocation: symbol %"PRIx64" not found\n",
921 				   GELF_R_SYM(rel.r_info));
922 			return -LIBBPF_ERRNO__FORMAT;
923 		}
924 		pr_debug("relo for %lld value %lld name %d\n",
925 			 (long long) (rel.r_info >> 32),
926 			 (long long) sym.st_value, sym.st_name);
927 
928 		if (sym.st_shndx != maps_shndx && sym.st_shndx != text_shndx) {
929 			pr_warning("Program '%s' contains non-map related relo data pointing to section %u\n",
930 				   prog->section_name, sym.st_shndx);
931 			return -LIBBPF_ERRNO__RELOC;
932 		}
933 
934 		insn_idx = rel.r_offset / sizeof(struct bpf_insn);
935 		pr_debug("relocation: insn_idx=%u\n", insn_idx);
936 
937 		if (insns[insn_idx].code == (BPF_JMP | BPF_CALL)) {
938 			if (insns[insn_idx].src_reg != BPF_PSEUDO_CALL) {
939 				pr_warning("incorrect bpf_call opcode\n");
940 				return -LIBBPF_ERRNO__RELOC;
941 			}
942 			prog->reloc_desc[i].type = RELO_CALL;
943 			prog->reloc_desc[i].insn_idx = insn_idx;
944 			prog->reloc_desc[i].text_off = sym.st_value;
945 			obj->has_pseudo_calls = true;
946 			continue;
947 		}
948 
949 		if (insns[insn_idx].code != (BPF_LD | BPF_IMM | BPF_DW)) {
950 			pr_warning("bpf: relocation: invalid relo for insns[%d].code 0x%x\n",
951 				   insn_idx, insns[insn_idx].code);
952 			return -LIBBPF_ERRNO__RELOC;
953 		}
954 
955 		/* TODO: 'maps' is sorted. We can use bsearch to make it faster. */
956 		for (map_idx = 0; map_idx < nr_maps; map_idx++) {
957 			if (maps[map_idx].offset == sym.st_value) {
958 				pr_debug("relocation: find map %zd (%s) for insn %u\n",
959 					 map_idx, maps[map_idx].name, insn_idx);
960 				break;
961 			}
962 		}
963 
964 		if (map_idx >= nr_maps) {
965 			pr_warning("bpf relocation: map_idx %d large than %d\n",
966 				   (int)map_idx, (int)nr_maps - 1);
967 			return -LIBBPF_ERRNO__RELOC;
968 		}
969 
970 		prog->reloc_desc[i].type = RELO_LD64;
971 		prog->reloc_desc[i].insn_idx = insn_idx;
972 		prog->reloc_desc[i].map_idx = map_idx;
973 	}
974 	return 0;
975 }
976 
977 static int bpf_map_find_btf_info(struct bpf_map *map, const struct btf *btf)
978 {
979 	const struct btf_type *container_type;
980 	const struct btf_member *key, *value;
981 	struct bpf_map_def *def = &map->def;
982 	const size_t max_name = 256;
983 	char container_name[max_name];
984 	__s64 key_size, value_size;
985 	__s32 container_id;
986 
987 	if (snprintf(container_name, max_name, "____btf_map_%s", map->name) ==
988 	    max_name) {
989 		pr_warning("map:%s length of '____btf_map_%s' is too long\n",
990 			   map->name, map->name);
991 		return -EINVAL;
992 	}
993 
994 	container_id = btf__find_by_name(btf, container_name);
995 	if (container_id < 0) {
996 		pr_debug("map:%s container_name:%s cannot be found in BTF. Missing BPF_ANNOTATE_KV_PAIR?\n",
997 			 map->name, container_name);
998 		return container_id;
999 	}
1000 
1001 	container_type = btf__type_by_id(btf, container_id);
1002 	if (!container_type) {
1003 		pr_warning("map:%s cannot find BTF type for container_id:%u\n",
1004 			   map->name, container_id);
1005 		return -EINVAL;
1006 	}
1007 
1008 	if (BTF_INFO_KIND(container_type->info) != BTF_KIND_STRUCT ||
1009 	    BTF_INFO_VLEN(container_type->info) < 2) {
1010 		pr_warning("map:%s container_name:%s is an invalid container struct\n",
1011 			   map->name, container_name);
1012 		return -EINVAL;
1013 	}
1014 
1015 	key = (struct btf_member *)(container_type + 1);
1016 	value = key + 1;
1017 
1018 	key_size = btf__resolve_size(btf, key->type);
1019 	if (key_size < 0) {
1020 		pr_warning("map:%s invalid BTF key_type_size\n",
1021 			   map->name);
1022 		return key_size;
1023 	}
1024 
1025 	if (def->key_size != key_size) {
1026 		pr_warning("map:%s btf_key_type_size:%u != map_def_key_size:%u\n",
1027 			   map->name, (__u32)key_size, def->key_size);
1028 		return -EINVAL;
1029 	}
1030 
1031 	value_size = btf__resolve_size(btf, value->type);
1032 	if (value_size < 0) {
1033 		pr_warning("map:%s invalid BTF value_type_size\n", map->name);
1034 		return value_size;
1035 	}
1036 
1037 	if (def->value_size != value_size) {
1038 		pr_warning("map:%s btf_value_type_size:%u != map_def_value_size:%u\n",
1039 			   map->name, (__u32)value_size, def->value_size);
1040 		return -EINVAL;
1041 	}
1042 
1043 	map->btf_key_type_id = key->type;
1044 	map->btf_value_type_id = value->type;
1045 
1046 	return 0;
1047 }
1048 
1049 int bpf_map__reuse_fd(struct bpf_map *map, int fd)
1050 {
1051 	struct bpf_map_info info = {};
1052 	__u32 len = sizeof(info);
1053 	int new_fd, err;
1054 	char *new_name;
1055 
1056 	err = bpf_obj_get_info_by_fd(fd, &info, &len);
1057 	if (err)
1058 		return err;
1059 
1060 	new_name = strdup(info.name);
1061 	if (!new_name)
1062 		return -errno;
1063 
1064 	new_fd = open("/", O_RDONLY | O_CLOEXEC);
1065 	if (new_fd < 0)
1066 		goto err_free_new_name;
1067 
1068 	new_fd = dup3(fd, new_fd, O_CLOEXEC);
1069 	if (new_fd < 0)
1070 		goto err_close_new_fd;
1071 
1072 	err = zclose(map->fd);
1073 	if (err)
1074 		goto err_close_new_fd;
1075 	free(map->name);
1076 
1077 	map->fd = new_fd;
1078 	map->name = new_name;
1079 	map->def.type = info.type;
1080 	map->def.key_size = info.key_size;
1081 	map->def.value_size = info.value_size;
1082 	map->def.max_entries = info.max_entries;
1083 	map->def.map_flags = info.map_flags;
1084 	map->btf_key_type_id = info.btf_key_type_id;
1085 	map->btf_value_type_id = info.btf_value_type_id;
1086 
1087 	return 0;
1088 
1089 err_close_new_fd:
1090 	close(new_fd);
1091 err_free_new_name:
1092 	free(new_name);
1093 	return -errno;
1094 }
1095 
1096 static int
1097 bpf_object__create_maps(struct bpf_object *obj)
1098 {
1099 	struct bpf_create_map_attr create_attr = {};
1100 	unsigned int i;
1101 	int err;
1102 
1103 	for (i = 0; i < obj->nr_maps; i++) {
1104 		struct bpf_map *map = &obj->maps[i];
1105 		struct bpf_map_def *def = &map->def;
1106 		char *cp, errmsg[STRERR_BUFSIZE];
1107 		int *pfd = &map->fd;
1108 
1109 		if (map->fd >= 0) {
1110 			pr_debug("skip map create (preset) %s: fd=%d\n",
1111 				 map->name, map->fd);
1112 			continue;
1113 		}
1114 
1115 		create_attr.name = map->name;
1116 		create_attr.map_ifindex = map->map_ifindex;
1117 		create_attr.map_type = def->type;
1118 		create_attr.map_flags = def->map_flags;
1119 		create_attr.key_size = def->key_size;
1120 		create_attr.value_size = def->value_size;
1121 		create_attr.max_entries = def->max_entries;
1122 		create_attr.btf_fd = 0;
1123 		create_attr.btf_key_type_id = 0;
1124 		create_attr.btf_value_type_id = 0;
1125 
1126 		if (obj->btf && !bpf_map_find_btf_info(map, obj->btf)) {
1127 			create_attr.btf_fd = btf__fd(obj->btf);
1128 			create_attr.btf_key_type_id = map->btf_key_type_id;
1129 			create_attr.btf_value_type_id = map->btf_value_type_id;
1130 		}
1131 
1132 		*pfd = bpf_create_map_xattr(&create_attr);
1133 		if (*pfd < 0 && create_attr.btf_key_type_id) {
1134 			cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
1135 			pr_warning("Error in bpf_create_map_xattr(%s):%s(%d). Retrying without BTF.\n",
1136 				   map->name, cp, errno);
1137 			create_attr.btf_fd = 0;
1138 			create_attr.btf_key_type_id = 0;
1139 			create_attr.btf_value_type_id = 0;
1140 			map->btf_key_type_id = 0;
1141 			map->btf_value_type_id = 0;
1142 			*pfd = bpf_create_map_xattr(&create_attr);
1143 		}
1144 
1145 		if (*pfd < 0) {
1146 			size_t j;
1147 
1148 			err = *pfd;
1149 			cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
1150 			pr_warning("failed to create map (name: '%s'): %s\n",
1151 				   map->name, cp);
1152 			for (j = 0; j < i; j++)
1153 				zclose(obj->maps[j].fd);
1154 			return err;
1155 		}
1156 		pr_debug("create map %s: fd=%d\n", map->name, *pfd);
1157 	}
1158 
1159 	return 0;
1160 }
1161 
1162 static int
1163 bpf_program__reloc_text(struct bpf_program *prog, struct bpf_object *obj,
1164 			struct reloc_desc *relo)
1165 {
1166 	struct bpf_insn *insn, *new_insn;
1167 	struct bpf_program *text;
1168 	size_t new_cnt;
1169 
1170 	if (relo->type != RELO_CALL)
1171 		return -LIBBPF_ERRNO__RELOC;
1172 
1173 	if (prog->idx == obj->efile.text_shndx) {
1174 		pr_warning("relo in .text insn %d into off %d\n",
1175 			   relo->insn_idx, relo->text_off);
1176 		return -LIBBPF_ERRNO__RELOC;
1177 	}
1178 
1179 	if (prog->main_prog_cnt == 0) {
1180 		text = bpf_object__find_prog_by_idx(obj, obj->efile.text_shndx);
1181 		if (!text) {
1182 			pr_warning("no .text section found yet relo into text exist\n");
1183 			return -LIBBPF_ERRNO__RELOC;
1184 		}
1185 		new_cnt = prog->insns_cnt + text->insns_cnt;
1186 		new_insn = reallocarray(prog->insns, new_cnt, sizeof(*insn));
1187 		if (!new_insn) {
1188 			pr_warning("oom in prog realloc\n");
1189 			return -ENOMEM;
1190 		}
1191 		memcpy(new_insn + prog->insns_cnt, text->insns,
1192 		       text->insns_cnt * sizeof(*insn));
1193 		prog->insns = new_insn;
1194 		prog->main_prog_cnt = prog->insns_cnt;
1195 		prog->insns_cnt = new_cnt;
1196 		pr_debug("added %zd insn from %s to prog %s\n",
1197 			 text->insns_cnt, text->section_name,
1198 			 prog->section_name);
1199 	}
1200 	insn = &prog->insns[relo->insn_idx];
1201 	insn->imm += prog->main_prog_cnt - relo->insn_idx;
1202 	return 0;
1203 }
1204 
1205 static int
1206 bpf_program__relocate(struct bpf_program *prog, struct bpf_object *obj)
1207 {
1208 	int i, err;
1209 
1210 	if (!prog || !prog->reloc_desc)
1211 		return 0;
1212 
1213 	for (i = 0; i < prog->nr_reloc; i++) {
1214 		if (prog->reloc_desc[i].type == RELO_LD64) {
1215 			struct bpf_insn *insns = prog->insns;
1216 			int insn_idx, map_idx;
1217 
1218 			insn_idx = prog->reloc_desc[i].insn_idx;
1219 			map_idx = prog->reloc_desc[i].map_idx;
1220 
1221 			if (insn_idx >= (int)prog->insns_cnt) {
1222 				pr_warning("relocation out of range: '%s'\n",
1223 					   prog->section_name);
1224 				return -LIBBPF_ERRNO__RELOC;
1225 			}
1226 			insns[insn_idx].src_reg = BPF_PSEUDO_MAP_FD;
1227 			insns[insn_idx].imm = obj->maps[map_idx].fd;
1228 		} else {
1229 			err = bpf_program__reloc_text(prog, obj,
1230 						      &prog->reloc_desc[i]);
1231 			if (err)
1232 				return err;
1233 		}
1234 	}
1235 
1236 	zfree(&prog->reloc_desc);
1237 	prog->nr_reloc = 0;
1238 	return 0;
1239 }
1240 
1241 
1242 static int
1243 bpf_object__relocate(struct bpf_object *obj)
1244 {
1245 	struct bpf_program *prog;
1246 	size_t i;
1247 	int err;
1248 
1249 	for (i = 0; i < obj->nr_programs; i++) {
1250 		prog = &obj->programs[i];
1251 
1252 		err = bpf_program__relocate(prog, obj);
1253 		if (err) {
1254 			pr_warning("failed to relocate '%s'\n",
1255 				   prog->section_name);
1256 			return err;
1257 		}
1258 	}
1259 	return 0;
1260 }
1261 
1262 static int bpf_object__collect_reloc(struct bpf_object *obj)
1263 {
1264 	int i, err;
1265 
1266 	if (!obj_elf_valid(obj)) {
1267 		pr_warning("Internal error: elf object is closed\n");
1268 		return -LIBBPF_ERRNO__INTERNAL;
1269 	}
1270 
1271 	for (i = 0; i < obj->efile.nr_reloc; i++) {
1272 		GElf_Shdr *shdr = &obj->efile.reloc[i].shdr;
1273 		Elf_Data *data = obj->efile.reloc[i].data;
1274 		int idx = shdr->sh_info;
1275 		struct bpf_program *prog;
1276 
1277 		if (shdr->sh_type != SHT_REL) {
1278 			pr_warning("internal error at %d\n", __LINE__);
1279 			return -LIBBPF_ERRNO__INTERNAL;
1280 		}
1281 
1282 		prog = bpf_object__find_prog_by_idx(obj, idx);
1283 		if (!prog) {
1284 			pr_warning("relocation failed: no section(%d)\n", idx);
1285 			return -LIBBPF_ERRNO__RELOC;
1286 		}
1287 
1288 		err = bpf_program__collect_reloc(prog,
1289 						 shdr, data,
1290 						 obj);
1291 		if (err)
1292 			return err;
1293 	}
1294 	return 0;
1295 }
1296 
1297 static int
1298 load_program(enum bpf_prog_type type, enum bpf_attach_type expected_attach_type,
1299 	     const char *name, struct bpf_insn *insns, int insns_cnt,
1300 	     char *license, __u32 kern_version, int *pfd, int prog_ifindex)
1301 {
1302 	struct bpf_load_program_attr load_attr;
1303 	char *cp, errmsg[STRERR_BUFSIZE];
1304 	char *log_buf;
1305 	int ret;
1306 
1307 	memset(&load_attr, 0, sizeof(struct bpf_load_program_attr));
1308 	load_attr.prog_type = type;
1309 	load_attr.expected_attach_type = expected_attach_type;
1310 	load_attr.name = name;
1311 	load_attr.insns = insns;
1312 	load_attr.insns_cnt = insns_cnt;
1313 	load_attr.license = license;
1314 	load_attr.kern_version = kern_version;
1315 	load_attr.prog_ifindex = prog_ifindex;
1316 
1317 	if (!load_attr.insns || !load_attr.insns_cnt)
1318 		return -EINVAL;
1319 
1320 	log_buf = malloc(BPF_LOG_BUF_SIZE);
1321 	if (!log_buf)
1322 		pr_warning("Alloc log buffer for bpf loader error, continue without log\n");
1323 
1324 	ret = bpf_load_program_xattr(&load_attr, log_buf, BPF_LOG_BUF_SIZE);
1325 
1326 	if (ret >= 0) {
1327 		*pfd = ret;
1328 		ret = 0;
1329 		goto out;
1330 	}
1331 
1332 	ret = -LIBBPF_ERRNO__LOAD;
1333 	cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
1334 	pr_warning("load bpf program failed: %s\n", cp);
1335 
1336 	if (log_buf && log_buf[0] != '\0') {
1337 		ret = -LIBBPF_ERRNO__VERIFY;
1338 		pr_warning("-- BEGIN DUMP LOG ---\n");
1339 		pr_warning("\n%s\n", log_buf);
1340 		pr_warning("-- END LOG --\n");
1341 	} else if (load_attr.insns_cnt >= BPF_MAXINSNS) {
1342 		pr_warning("Program too large (%zu insns), at most %d insns\n",
1343 			   load_attr.insns_cnt, BPF_MAXINSNS);
1344 		ret = -LIBBPF_ERRNO__PROG2BIG;
1345 	} else {
1346 		/* Wrong program type? */
1347 		if (load_attr.prog_type != BPF_PROG_TYPE_KPROBE) {
1348 			int fd;
1349 
1350 			load_attr.prog_type = BPF_PROG_TYPE_KPROBE;
1351 			load_attr.expected_attach_type = 0;
1352 			fd = bpf_load_program_xattr(&load_attr, NULL, 0);
1353 			if (fd >= 0) {
1354 				close(fd);
1355 				ret = -LIBBPF_ERRNO__PROGTYPE;
1356 				goto out;
1357 			}
1358 		}
1359 
1360 		if (log_buf)
1361 			ret = -LIBBPF_ERRNO__KVER;
1362 	}
1363 
1364 out:
1365 	free(log_buf);
1366 	return ret;
1367 }
1368 
1369 int
1370 bpf_program__load(struct bpf_program *prog,
1371 		  char *license, __u32 kern_version)
1372 {
1373 	int err = 0, fd, i;
1374 
1375 	if (prog->instances.nr < 0 || !prog->instances.fds) {
1376 		if (prog->preprocessor) {
1377 			pr_warning("Internal error: can't load program '%s'\n",
1378 				   prog->section_name);
1379 			return -LIBBPF_ERRNO__INTERNAL;
1380 		}
1381 
1382 		prog->instances.fds = malloc(sizeof(int));
1383 		if (!prog->instances.fds) {
1384 			pr_warning("Not enough memory for BPF fds\n");
1385 			return -ENOMEM;
1386 		}
1387 		prog->instances.nr = 1;
1388 		prog->instances.fds[0] = -1;
1389 	}
1390 
1391 	if (!prog->preprocessor) {
1392 		if (prog->instances.nr != 1) {
1393 			pr_warning("Program '%s' is inconsistent: nr(%d) != 1\n",
1394 				   prog->section_name, prog->instances.nr);
1395 		}
1396 		err = load_program(prog->type, prog->expected_attach_type,
1397 				   prog->name, prog->insns, prog->insns_cnt,
1398 				   license, kern_version, &fd,
1399 				   prog->prog_ifindex);
1400 		if (!err)
1401 			prog->instances.fds[0] = fd;
1402 		goto out;
1403 	}
1404 
1405 	for (i = 0; i < prog->instances.nr; i++) {
1406 		struct bpf_prog_prep_result result;
1407 		bpf_program_prep_t preprocessor = prog->preprocessor;
1408 
1409 		bzero(&result, sizeof(result));
1410 		err = preprocessor(prog, i, prog->insns,
1411 				   prog->insns_cnt, &result);
1412 		if (err) {
1413 			pr_warning("Preprocessing the %dth instance of program '%s' failed\n",
1414 				   i, prog->section_name);
1415 			goto out;
1416 		}
1417 
1418 		if (!result.new_insn_ptr || !result.new_insn_cnt) {
1419 			pr_debug("Skip loading the %dth instance of program '%s'\n",
1420 				 i, prog->section_name);
1421 			prog->instances.fds[i] = -1;
1422 			if (result.pfd)
1423 				*result.pfd = -1;
1424 			continue;
1425 		}
1426 
1427 		err = load_program(prog->type, prog->expected_attach_type,
1428 				   prog->name, result.new_insn_ptr,
1429 				   result.new_insn_cnt,
1430 				   license, kern_version, &fd,
1431 				   prog->prog_ifindex);
1432 
1433 		if (err) {
1434 			pr_warning("Loading the %dth instance of program '%s' failed\n",
1435 					i, prog->section_name);
1436 			goto out;
1437 		}
1438 
1439 		if (result.pfd)
1440 			*result.pfd = fd;
1441 		prog->instances.fds[i] = fd;
1442 	}
1443 out:
1444 	if (err)
1445 		pr_warning("failed to load program '%s'\n",
1446 			   prog->section_name);
1447 	zfree(&prog->insns);
1448 	prog->insns_cnt = 0;
1449 	return err;
1450 }
1451 
1452 static bool bpf_program__is_function_storage(struct bpf_program *prog,
1453 					     struct bpf_object *obj)
1454 {
1455 	return prog->idx == obj->efile.text_shndx && obj->has_pseudo_calls;
1456 }
1457 
1458 static int
1459 bpf_object__load_progs(struct bpf_object *obj)
1460 {
1461 	size_t i;
1462 	int err;
1463 
1464 	for (i = 0; i < obj->nr_programs; i++) {
1465 		if (bpf_program__is_function_storage(&obj->programs[i], obj))
1466 			continue;
1467 		err = bpf_program__load(&obj->programs[i],
1468 					obj->license,
1469 					obj->kern_version);
1470 		if (err)
1471 			return err;
1472 	}
1473 	return 0;
1474 }
1475 
1476 static bool bpf_prog_type__needs_kver(enum bpf_prog_type type)
1477 {
1478 	switch (type) {
1479 	case BPF_PROG_TYPE_SOCKET_FILTER:
1480 	case BPF_PROG_TYPE_SCHED_CLS:
1481 	case BPF_PROG_TYPE_SCHED_ACT:
1482 	case BPF_PROG_TYPE_XDP:
1483 	case BPF_PROG_TYPE_CGROUP_SKB:
1484 	case BPF_PROG_TYPE_CGROUP_SOCK:
1485 	case BPF_PROG_TYPE_LWT_IN:
1486 	case BPF_PROG_TYPE_LWT_OUT:
1487 	case BPF_PROG_TYPE_LWT_XMIT:
1488 	case BPF_PROG_TYPE_LWT_SEG6LOCAL:
1489 	case BPF_PROG_TYPE_SOCK_OPS:
1490 	case BPF_PROG_TYPE_SK_SKB:
1491 	case BPF_PROG_TYPE_CGROUP_DEVICE:
1492 	case BPF_PROG_TYPE_SK_MSG:
1493 	case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
1494 	case BPF_PROG_TYPE_LIRC_MODE2:
1495 	case BPF_PROG_TYPE_SK_REUSEPORT:
1496 	case BPF_PROG_TYPE_FLOW_DISSECTOR:
1497 		return false;
1498 	case BPF_PROG_TYPE_UNSPEC:
1499 	case BPF_PROG_TYPE_KPROBE:
1500 	case BPF_PROG_TYPE_TRACEPOINT:
1501 	case BPF_PROG_TYPE_PERF_EVENT:
1502 	case BPF_PROG_TYPE_RAW_TRACEPOINT:
1503 	default:
1504 		return true;
1505 	}
1506 }
1507 
1508 static int bpf_object__validate(struct bpf_object *obj, bool needs_kver)
1509 {
1510 	if (needs_kver && obj->kern_version == 0) {
1511 		pr_warning("%s doesn't provide kernel version\n",
1512 			   obj->path);
1513 		return -LIBBPF_ERRNO__KVERSION;
1514 	}
1515 	return 0;
1516 }
1517 
1518 static struct bpf_object *
1519 __bpf_object__open(const char *path, void *obj_buf, size_t obj_buf_sz,
1520 		   bool needs_kver, int flags)
1521 {
1522 	struct bpf_object *obj;
1523 	int err;
1524 
1525 	if (elf_version(EV_CURRENT) == EV_NONE) {
1526 		pr_warning("failed to init libelf for %s\n", path);
1527 		return ERR_PTR(-LIBBPF_ERRNO__LIBELF);
1528 	}
1529 
1530 	obj = bpf_object__new(path, obj_buf, obj_buf_sz);
1531 	if (IS_ERR(obj))
1532 		return obj;
1533 
1534 	CHECK_ERR(bpf_object__elf_init(obj), err, out);
1535 	CHECK_ERR(bpf_object__check_endianness(obj), err, out);
1536 	CHECK_ERR(bpf_object__elf_collect(obj, flags), err, out);
1537 	CHECK_ERR(bpf_object__collect_reloc(obj), err, out);
1538 	CHECK_ERR(bpf_object__validate(obj, needs_kver), err, out);
1539 
1540 	bpf_object__elf_finish(obj);
1541 	return obj;
1542 out:
1543 	bpf_object__close(obj);
1544 	return ERR_PTR(err);
1545 }
1546 
1547 struct bpf_object *__bpf_object__open_xattr(struct bpf_object_open_attr *attr,
1548 					    int flags)
1549 {
1550 	/* param validation */
1551 	if (!attr->file)
1552 		return NULL;
1553 
1554 	pr_debug("loading %s\n", attr->file);
1555 
1556 	return __bpf_object__open(attr->file, NULL, 0,
1557 				  bpf_prog_type__needs_kver(attr->prog_type),
1558 				  flags);
1559 }
1560 
1561 struct bpf_object *bpf_object__open_xattr(struct bpf_object_open_attr *attr)
1562 {
1563 	return __bpf_object__open_xattr(attr, 0);
1564 }
1565 
1566 struct bpf_object *bpf_object__open(const char *path)
1567 {
1568 	struct bpf_object_open_attr attr = {
1569 		.file		= path,
1570 		.prog_type	= BPF_PROG_TYPE_UNSPEC,
1571 	};
1572 
1573 	return bpf_object__open_xattr(&attr);
1574 }
1575 
1576 struct bpf_object *bpf_object__open_buffer(void *obj_buf,
1577 					   size_t obj_buf_sz,
1578 					   const char *name)
1579 {
1580 	char tmp_name[64];
1581 
1582 	/* param validation */
1583 	if (!obj_buf || obj_buf_sz <= 0)
1584 		return NULL;
1585 
1586 	if (!name) {
1587 		snprintf(tmp_name, sizeof(tmp_name), "%lx-%lx",
1588 			 (unsigned long)obj_buf,
1589 			 (unsigned long)obj_buf_sz);
1590 		tmp_name[sizeof(tmp_name) - 1] = '\0';
1591 		name = tmp_name;
1592 	}
1593 	pr_debug("loading object '%s' from buffer\n",
1594 		 name);
1595 
1596 	return __bpf_object__open(name, obj_buf, obj_buf_sz, true, true);
1597 }
1598 
1599 int bpf_object__unload(struct bpf_object *obj)
1600 {
1601 	size_t i;
1602 
1603 	if (!obj)
1604 		return -EINVAL;
1605 
1606 	for (i = 0; i < obj->nr_maps; i++)
1607 		zclose(obj->maps[i].fd);
1608 
1609 	for (i = 0; i < obj->nr_programs; i++)
1610 		bpf_program__unload(&obj->programs[i]);
1611 
1612 	return 0;
1613 }
1614 
1615 int bpf_object__load(struct bpf_object *obj)
1616 {
1617 	int err;
1618 
1619 	if (!obj)
1620 		return -EINVAL;
1621 
1622 	if (obj->loaded) {
1623 		pr_warning("object should not be loaded twice\n");
1624 		return -EINVAL;
1625 	}
1626 
1627 	obj->loaded = true;
1628 
1629 	CHECK_ERR(bpf_object__create_maps(obj), err, out);
1630 	CHECK_ERR(bpf_object__relocate(obj), err, out);
1631 	CHECK_ERR(bpf_object__load_progs(obj), err, out);
1632 
1633 	return 0;
1634 out:
1635 	bpf_object__unload(obj);
1636 	pr_warning("failed to load object '%s'\n", obj->path);
1637 	return err;
1638 }
1639 
1640 static int check_path(const char *path)
1641 {
1642 	char *cp, errmsg[STRERR_BUFSIZE];
1643 	struct statfs st_fs;
1644 	char *dname, *dir;
1645 	int err = 0;
1646 
1647 	if (path == NULL)
1648 		return -EINVAL;
1649 
1650 	dname = strdup(path);
1651 	if (dname == NULL)
1652 		return -ENOMEM;
1653 
1654 	dir = dirname(dname);
1655 	if (statfs(dir, &st_fs)) {
1656 		cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
1657 		pr_warning("failed to statfs %s: %s\n", dir, cp);
1658 		err = -errno;
1659 	}
1660 	free(dname);
1661 
1662 	if (!err && st_fs.f_type != BPF_FS_MAGIC) {
1663 		pr_warning("specified path %s is not on BPF FS\n", path);
1664 		err = -EINVAL;
1665 	}
1666 
1667 	return err;
1668 }
1669 
1670 int bpf_program__pin_instance(struct bpf_program *prog, const char *path,
1671 			      int instance)
1672 {
1673 	char *cp, errmsg[STRERR_BUFSIZE];
1674 	int err;
1675 
1676 	err = check_path(path);
1677 	if (err)
1678 		return err;
1679 
1680 	if (prog == NULL) {
1681 		pr_warning("invalid program pointer\n");
1682 		return -EINVAL;
1683 	}
1684 
1685 	if (instance < 0 || instance >= prog->instances.nr) {
1686 		pr_warning("invalid prog instance %d of prog %s (max %d)\n",
1687 			   instance, prog->section_name, prog->instances.nr);
1688 		return -EINVAL;
1689 	}
1690 
1691 	if (bpf_obj_pin(prog->instances.fds[instance], path)) {
1692 		cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
1693 		pr_warning("failed to pin program: %s\n", cp);
1694 		return -errno;
1695 	}
1696 	pr_debug("pinned program '%s'\n", path);
1697 
1698 	return 0;
1699 }
1700 
1701 static int make_dir(const char *path)
1702 {
1703 	char *cp, errmsg[STRERR_BUFSIZE];
1704 	int err = 0;
1705 
1706 	if (mkdir(path, 0700) && errno != EEXIST)
1707 		err = -errno;
1708 
1709 	if (err) {
1710 		cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg));
1711 		pr_warning("failed to mkdir %s: %s\n", path, cp);
1712 	}
1713 	return err;
1714 }
1715 
1716 int bpf_program__pin(struct bpf_program *prog, const char *path)
1717 {
1718 	int i, err;
1719 
1720 	err = check_path(path);
1721 	if (err)
1722 		return err;
1723 
1724 	if (prog == NULL) {
1725 		pr_warning("invalid program pointer\n");
1726 		return -EINVAL;
1727 	}
1728 
1729 	if (prog->instances.nr <= 0) {
1730 		pr_warning("no instances of prog %s to pin\n",
1731 			   prog->section_name);
1732 		return -EINVAL;
1733 	}
1734 
1735 	err = make_dir(path);
1736 	if (err)
1737 		return err;
1738 
1739 	for (i = 0; i < prog->instances.nr; i++) {
1740 		char buf[PATH_MAX];
1741 		int len;
1742 
1743 		len = snprintf(buf, PATH_MAX, "%s/%d", path, i);
1744 		if (len < 0)
1745 			return -EINVAL;
1746 		else if (len >= PATH_MAX)
1747 			return -ENAMETOOLONG;
1748 
1749 		err = bpf_program__pin_instance(prog, buf, i);
1750 		if (err)
1751 			return err;
1752 	}
1753 
1754 	return 0;
1755 }
1756 
1757 int bpf_map__pin(struct bpf_map *map, const char *path)
1758 {
1759 	char *cp, errmsg[STRERR_BUFSIZE];
1760 	int err;
1761 
1762 	err = check_path(path);
1763 	if (err)
1764 		return err;
1765 
1766 	if (map == NULL) {
1767 		pr_warning("invalid map pointer\n");
1768 		return -EINVAL;
1769 	}
1770 
1771 	if (bpf_obj_pin(map->fd, path)) {
1772 		cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
1773 		pr_warning("failed to pin map: %s\n", cp);
1774 		return -errno;
1775 	}
1776 
1777 	pr_debug("pinned map '%s'\n", path);
1778 	return 0;
1779 }
1780 
1781 int bpf_object__pin(struct bpf_object *obj, const char *path)
1782 {
1783 	struct bpf_program *prog;
1784 	struct bpf_map *map;
1785 	int err;
1786 
1787 	if (!obj)
1788 		return -ENOENT;
1789 
1790 	if (!obj->loaded) {
1791 		pr_warning("object not yet loaded; load it first\n");
1792 		return -ENOENT;
1793 	}
1794 
1795 	err = make_dir(path);
1796 	if (err)
1797 		return err;
1798 
1799 	bpf_map__for_each(map, obj) {
1800 		char buf[PATH_MAX];
1801 		int len;
1802 
1803 		len = snprintf(buf, PATH_MAX, "%s/%s", path,
1804 			       bpf_map__name(map));
1805 		if (len < 0)
1806 			return -EINVAL;
1807 		else if (len >= PATH_MAX)
1808 			return -ENAMETOOLONG;
1809 
1810 		err = bpf_map__pin(map, buf);
1811 		if (err)
1812 			return err;
1813 	}
1814 
1815 	bpf_object__for_each_program(prog, obj) {
1816 		char buf[PATH_MAX];
1817 		int len;
1818 
1819 		len = snprintf(buf, PATH_MAX, "%s/%s", path,
1820 			       prog->section_name);
1821 		if (len < 0)
1822 			return -EINVAL;
1823 		else if (len >= PATH_MAX)
1824 			return -ENAMETOOLONG;
1825 
1826 		err = bpf_program__pin(prog, buf);
1827 		if (err)
1828 			return err;
1829 	}
1830 
1831 	return 0;
1832 }
1833 
1834 void bpf_object__close(struct bpf_object *obj)
1835 {
1836 	size_t i;
1837 
1838 	if (!obj)
1839 		return;
1840 
1841 	if (obj->clear_priv)
1842 		obj->clear_priv(obj, obj->priv);
1843 
1844 	bpf_object__elf_finish(obj);
1845 	bpf_object__unload(obj);
1846 	btf__free(obj->btf);
1847 
1848 	for (i = 0; i < obj->nr_maps; i++) {
1849 		zfree(&obj->maps[i].name);
1850 		if (obj->maps[i].clear_priv)
1851 			obj->maps[i].clear_priv(&obj->maps[i],
1852 						obj->maps[i].priv);
1853 		obj->maps[i].priv = NULL;
1854 		obj->maps[i].clear_priv = NULL;
1855 	}
1856 	zfree(&obj->maps);
1857 	obj->nr_maps = 0;
1858 
1859 	if (obj->programs && obj->nr_programs) {
1860 		for (i = 0; i < obj->nr_programs; i++)
1861 			bpf_program__exit(&obj->programs[i]);
1862 	}
1863 	zfree(&obj->programs);
1864 
1865 	list_del(&obj->list);
1866 	free(obj);
1867 }
1868 
1869 struct bpf_object *
1870 bpf_object__next(struct bpf_object *prev)
1871 {
1872 	struct bpf_object *next;
1873 
1874 	if (!prev)
1875 		next = list_first_entry(&bpf_objects_list,
1876 					struct bpf_object,
1877 					list);
1878 	else
1879 		next = list_next_entry(prev, list);
1880 
1881 	/* Empty list is noticed here so don't need checking on entry. */
1882 	if (&next->list == &bpf_objects_list)
1883 		return NULL;
1884 
1885 	return next;
1886 }
1887 
1888 const char *bpf_object__name(struct bpf_object *obj)
1889 {
1890 	return obj ? obj->path : ERR_PTR(-EINVAL);
1891 }
1892 
1893 unsigned int bpf_object__kversion(struct bpf_object *obj)
1894 {
1895 	return obj ? obj->kern_version : 0;
1896 }
1897 
1898 int bpf_object__btf_fd(const struct bpf_object *obj)
1899 {
1900 	return obj->btf ? btf__fd(obj->btf) : -1;
1901 }
1902 
1903 int bpf_object__set_priv(struct bpf_object *obj, void *priv,
1904 			 bpf_object_clear_priv_t clear_priv)
1905 {
1906 	if (obj->priv && obj->clear_priv)
1907 		obj->clear_priv(obj, obj->priv);
1908 
1909 	obj->priv = priv;
1910 	obj->clear_priv = clear_priv;
1911 	return 0;
1912 }
1913 
1914 void *bpf_object__priv(struct bpf_object *obj)
1915 {
1916 	return obj ? obj->priv : ERR_PTR(-EINVAL);
1917 }
1918 
1919 static struct bpf_program *
1920 __bpf_program__next(struct bpf_program *prev, struct bpf_object *obj)
1921 {
1922 	size_t idx;
1923 
1924 	if (!obj->programs)
1925 		return NULL;
1926 	/* First handler */
1927 	if (prev == NULL)
1928 		return &obj->programs[0];
1929 
1930 	if (prev->obj != obj) {
1931 		pr_warning("error: program handler doesn't match object\n");
1932 		return NULL;
1933 	}
1934 
1935 	idx = (prev - obj->programs) + 1;
1936 	if (idx >= obj->nr_programs)
1937 		return NULL;
1938 	return &obj->programs[idx];
1939 }
1940 
1941 struct bpf_program *
1942 bpf_program__next(struct bpf_program *prev, struct bpf_object *obj)
1943 {
1944 	struct bpf_program *prog = prev;
1945 
1946 	do {
1947 		prog = __bpf_program__next(prog, obj);
1948 	} while (prog && bpf_program__is_function_storage(prog, obj));
1949 
1950 	return prog;
1951 }
1952 
1953 int bpf_program__set_priv(struct bpf_program *prog, void *priv,
1954 			  bpf_program_clear_priv_t clear_priv)
1955 {
1956 	if (prog->priv && prog->clear_priv)
1957 		prog->clear_priv(prog, prog->priv);
1958 
1959 	prog->priv = priv;
1960 	prog->clear_priv = clear_priv;
1961 	return 0;
1962 }
1963 
1964 void *bpf_program__priv(struct bpf_program *prog)
1965 {
1966 	return prog ? prog->priv : ERR_PTR(-EINVAL);
1967 }
1968 
1969 void bpf_program__set_ifindex(struct bpf_program *prog, __u32 ifindex)
1970 {
1971 	prog->prog_ifindex = ifindex;
1972 }
1973 
1974 const char *bpf_program__title(struct bpf_program *prog, bool needs_copy)
1975 {
1976 	const char *title;
1977 
1978 	title = prog->section_name;
1979 	if (needs_copy) {
1980 		title = strdup(title);
1981 		if (!title) {
1982 			pr_warning("failed to strdup program title\n");
1983 			return ERR_PTR(-ENOMEM);
1984 		}
1985 	}
1986 
1987 	return title;
1988 }
1989 
1990 int bpf_program__fd(struct bpf_program *prog)
1991 {
1992 	return bpf_program__nth_fd(prog, 0);
1993 }
1994 
1995 int bpf_program__set_prep(struct bpf_program *prog, int nr_instances,
1996 			  bpf_program_prep_t prep)
1997 {
1998 	int *instances_fds;
1999 
2000 	if (nr_instances <= 0 || !prep)
2001 		return -EINVAL;
2002 
2003 	if (prog->instances.nr > 0 || prog->instances.fds) {
2004 		pr_warning("Can't set pre-processor after loading\n");
2005 		return -EINVAL;
2006 	}
2007 
2008 	instances_fds = malloc(sizeof(int) * nr_instances);
2009 	if (!instances_fds) {
2010 		pr_warning("alloc memory failed for fds\n");
2011 		return -ENOMEM;
2012 	}
2013 
2014 	/* fill all fd with -1 */
2015 	memset(instances_fds, -1, sizeof(int) * nr_instances);
2016 
2017 	prog->instances.nr = nr_instances;
2018 	prog->instances.fds = instances_fds;
2019 	prog->preprocessor = prep;
2020 	return 0;
2021 }
2022 
2023 int bpf_program__nth_fd(struct bpf_program *prog, int n)
2024 {
2025 	int fd;
2026 
2027 	if (!prog)
2028 		return -EINVAL;
2029 
2030 	if (n >= prog->instances.nr || n < 0) {
2031 		pr_warning("Can't get the %dth fd from program %s: only %d instances\n",
2032 			   n, prog->section_name, prog->instances.nr);
2033 		return -EINVAL;
2034 	}
2035 
2036 	fd = prog->instances.fds[n];
2037 	if (fd < 0) {
2038 		pr_warning("%dth instance of program '%s' is invalid\n",
2039 			   n, prog->section_name);
2040 		return -ENOENT;
2041 	}
2042 
2043 	return fd;
2044 }
2045 
2046 void bpf_program__set_type(struct bpf_program *prog, enum bpf_prog_type type)
2047 {
2048 	prog->type = type;
2049 }
2050 
2051 static bool bpf_program__is_type(struct bpf_program *prog,
2052 				 enum bpf_prog_type type)
2053 {
2054 	return prog ? (prog->type == type) : false;
2055 }
2056 
2057 #define BPF_PROG_TYPE_FNS(NAME, TYPE)			\
2058 int bpf_program__set_##NAME(struct bpf_program *prog)	\
2059 {							\
2060 	if (!prog)					\
2061 		return -EINVAL;				\
2062 	bpf_program__set_type(prog, TYPE);		\
2063 	return 0;					\
2064 }							\
2065 							\
2066 bool bpf_program__is_##NAME(struct bpf_program *prog)	\
2067 {							\
2068 	return bpf_program__is_type(prog, TYPE);	\
2069 }							\
2070 
2071 BPF_PROG_TYPE_FNS(socket_filter, BPF_PROG_TYPE_SOCKET_FILTER);
2072 BPF_PROG_TYPE_FNS(kprobe, BPF_PROG_TYPE_KPROBE);
2073 BPF_PROG_TYPE_FNS(sched_cls, BPF_PROG_TYPE_SCHED_CLS);
2074 BPF_PROG_TYPE_FNS(sched_act, BPF_PROG_TYPE_SCHED_ACT);
2075 BPF_PROG_TYPE_FNS(tracepoint, BPF_PROG_TYPE_TRACEPOINT);
2076 BPF_PROG_TYPE_FNS(raw_tracepoint, BPF_PROG_TYPE_RAW_TRACEPOINT);
2077 BPF_PROG_TYPE_FNS(xdp, BPF_PROG_TYPE_XDP);
2078 BPF_PROG_TYPE_FNS(perf_event, BPF_PROG_TYPE_PERF_EVENT);
2079 
2080 void bpf_program__set_expected_attach_type(struct bpf_program *prog,
2081 					   enum bpf_attach_type type)
2082 {
2083 	prog->expected_attach_type = type;
2084 }
2085 
2086 #define BPF_PROG_SEC_IMPL(string, ptype, eatype, atype) \
2087 	{ string, sizeof(string) - 1, ptype, eatype, atype }
2088 
2089 /* Programs that can NOT be attached. */
2090 #define BPF_PROG_SEC(string, ptype) BPF_PROG_SEC_IMPL(string, ptype, 0, -EINVAL)
2091 
2092 /* Programs that can be attached. */
2093 #define BPF_APROG_SEC(string, ptype, atype) \
2094 	BPF_PROG_SEC_IMPL(string, ptype, 0, atype)
2095 
2096 /* Programs that must specify expected attach type at load time. */
2097 #define BPF_EAPROG_SEC(string, ptype, eatype) \
2098 	BPF_PROG_SEC_IMPL(string, ptype, eatype, eatype)
2099 
2100 /* Programs that can be attached but attach type can't be identified by section
2101  * name. Kept for backward compatibility.
2102  */
2103 #define BPF_APROG_COMPAT(string, ptype) BPF_PROG_SEC(string, ptype)
2104 
2105 static const struct {
2106 	const char *sec;
2107 	size_t len;
2108 	enum bpf_prog_type prog_type;
2109 	enum bpf_attach_type expected_attach_type;
2110 	enum bpf_attach_type attach_type;
2111 } section_names[] = {
2112 	BPF_PROG_SEC("socket",			BPF_PROG_TYPE_SOCKET_FILTER),
2113 	BPF_PROG_SEC("kprobe/",			BPF_PROG_TYPE_KPROBE),
2114 	BPF_PROG_SEC("kretprobe/",		BPF_PROG_TYPE_KPROBE),
2115 	BPF_PROG_SEC("classifier",		BPF_PROG_TYPE_SCHED_CLS),
2116 	BPF_PROG_SEC("action",			BPF_PROG_TYPE_SCHED_ACT),
2117 	BPF_PROG_SEC("tracepoint/",		BPF_PROG_TYPE_TRACEPOINT),
2118 	BPF_PROG_SEC("raw_tracepoint/",		BPF_PROG_TYPE_RAW_TRACEPOINT),
2119 	BPF_PROG_SEC("xdp",			BPF_PROG_TYPE_XDP),
2120 	BPF_PROG_SEC("perf_event",		BPF_PROG_TYPE_PERF_EVENT),
2121 	BPF_PROG_SEC("lwt_in",			BPF_PROG_TYPE_LWT_IN),
2122 	BPF_PROG_SEC("lwt_out",			BPF_PROG_TYPE_LWT_OUT),
2123 	BPF_PROG_SEC("lwt_xmit",		BPF_PROG_TYPE_LWT_XMIT),
2124 	BPF_PROG_SEC("lwt_seg6local",		BPF_PROG_TYPE_LWT_SEG6LOCAL),
2125 	BPF_APROG_SEC("cgroup_skb/ingress",	BPF_PROG_TYPE_CGROUP_SKB,
2126 						BPF_CGROUP_INET_INGRESS),
2127 	BPF_APROG_SEC("cgroup_skb/egress",	BPF_PROG_TYPE_CGROUP_SKB,
2128 						BPF_CGROUP_INET_EGRESS),
2129 	BPF_APROG_COMPAT("cgroup/skb",		BPF_PROG_TYPE_CGROUP_SKB),
2130 	BPF_APROG_SEC("cgroup/sock",		BPF_PROG_TYPE_CGROUP_SOCK,
2131 						BPF_CGROUP_INET_SOCK_CREATE),
2132 	BPF_EAPROG_SEC("cgroup/post_bind4",	BPF_PROG_TYPE_CGROUP_SOCK,
2133 						BPF_CGROUP_INET4_POST_BIND),
2134 	BPF_EAPROG_SEC("cgroup/post_bind6",	BPF_PROG_TYPE_CGROUP_SOCK,
2135 						BPF_CGROUP_INET6_POST_BIND),
2136 	BPF_APROG_SEC("cgroup/dev",		BPF_PROG_TYPE_CGROUP_DEVICE,
2137 						BPF_CGROUP_DEVICE),
2138 	BPF_APROG_SEC("sockops",		BPF_PROG_TYPE_SOCK_OPS,
2139 						BPF_CGROUP_SOCK_OPS),
2140 	BPF_APROG_SEC("sk_skb/stream_parser",	BPF_PROG_TYPE_SK_SKB,
2141 						BPF_SK_SKB_STREAM_PARSER),
2142 	BPF_APROG_SEC("sk_skb/stream_verdict",	BPF_PROG_TYPE_SK_SKB,
2143 						BPF_SK_SKB_STREAM_VERDICT),
2144 	BPF_APROG_COMPAT("sk_skb",		BPF_PROG_TYPE_SK_SKB),
2145 	BPF_APROG_SEC("sk_msg",			BPF_PROG_TYPE_SK_MSG,
2146 						BPF_SK_MSG_VERDICT),
2147 	BPF_APROG_SEC("lirc_mode2",		BPF_PROG_TYPE_LIRC_MODE2,
2148 						BPF_LIRC_MODE2),
2149 	BPF_APROG_SEC("flow_dissector",		BPF_PROG_TYPE_FLOW_DISSECTOR,
2150 						BPF_FLOW_DISSECTOR),
2151 	BPF_EAPROG_SEC("cgroup/bind4",		BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
2152 						BPF_CGROUP_INET4_BIND),
2153 	BPF_EAPROG_SEC("cgroup/bind6",		BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
2154 						BPF_CGROUP_INET6_BIND),
2155 	BPF_EAPROG_SEC("cgroup/connect4",	BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
2156 						BPF_CGROUP_INET4_CONNECT),
2157 	BPF_EAPROG_SEC("cgroup/connect6",	BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
2158 						BPF_CGROUP_INET6_CONNECT),
2159 	BPF_EAPROG_SEC("cgroup/sendmsg4",	BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
2160 						BPF_CGROUP_UDP4_SENDMSG),
2161 	BPF_EAPROG_SEC("cgroup/sendmsg6",	BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
2162 						BPF_CGROUP_UDP6_SENDMSG),
2163 };
2164 
2165 #undef BPF_PROG_SEC_IMPL
2166 #undef BPF_PROG_SEC
2167 #undef BPF_APROG_SEC
2168 #undef BPF_EAPROG_SEC
2169 #undef BPF_APROG_COMPAT
2170 
2171 int libbpf_prog_type_by_name(const char *name, enum bpf_prog_type *prog_type,
2172 			     enum bpf_attach_type *expected_attach_type)
2173 {
2174 	int i;
2175 
2176 	if (!name)
2177 		return -EINVAL;
2178 
2179 	for (i = 0; i < ARRAY_SIZE(section_names); i++) {
2180 		if (strncmp(name, section_names[i].sec, section_names[i].len))
2181 			continue;
2182 		*prog_type = section_names[i].prog_type;
2183 		*expected_attach_type = section_names[i].expected_attach_type;
2184 		return 0;
2185 	}
2186 	return -EINVAL;
2187 }
2188 
2189 int libbpf_attach_type_by_name(const char *name,
2190 			       enum bpf_attach_type *attach_type)
2191 {
2192 	int i;
2193 
2194 	if (!name)
2195 		return -EINVAL;
2196 
2197 	for (i = 0; i < ARRAY_SIZE(section_names); i++) {
2198 		if (strncmp(name, section_names[i].sec, section_names[i].len))
2199 			continue;
2200 		if (section_names[i].attach_type == -EINVAL)
2201 			return -EINVAL;
2202 		*attach_type = section_names[i].attach_type;
2203 		return 0;
2204 	}
2205 	return -EINVAL;
2206 }
2207 
2208 static int
2209 bpf_program__identify_section(struct bpf_program *prog,
2210 			      enum bpf_prog_type *prog_type,
2211 			      enum bpf_attach_type *expected_attach_type)
2212 {
2213 	return libbpf_prog_type_by_name(prog->section_name, prog_type,
2214 					expected_attach_type);
2215 }
2216 
2217 int bpf_map__fd(struct bpf_map *map)
2218 {
2219 	return map ? map->fd : -EINVAL;
2220 }
2221 
2222 const struct bpf_map_def *bpf_map__def(struct bpf_map *map)
2223 {
2224 	return map ? &map->def : ERR_PTR(-EINVAL);
2225 }
2226 
2227 const char *bpf_map__name(struct bpf_map *map)
2228 {
2229 	return map ? map->name : NULL;
2230 }
2231 
2232 __u32 bpf_map__btf_key_type_id(const struct bpf_map *map)
2233 {
2234 	return map ? map->btf_key_type_id : 0;
2235 }
2236 
2237 __u32 bpf_map__btf_value_type_id(const struct bpf_map *map)
2238 {
2239 	return map ? map->btf_value_type_id : 0;
2240 }
2241 
2242 int bpf_map__set_priv(struct bpf_map *map, void *priv,
2243 		     bpf_map_clear_priv_t clear_priv)
2244 {
2245 	if (!map)
2246 		return -EINVAL;
2247 
2248 	if (map->priv) {
2249 		if (map->clear_priv)
2250 			map->clear_priv(map, map->priv);
2251 	}
2252 
2253 	map->priv = priv;
2254 	map->clear_priv = clear_priv;
2255 	return 0;
2256 }
2257 
2258 void *bpf_map__priv(struct bpf_map *map)
2259 {
2260 	return map ? map->priv : ERR_PTR(-EINVAL);
2261 }
2262 
2263 bool bpf_map__is_offload_neutral(struct bpf_map *map)
2264 {
2265 	return map->def.type == BPF_MAP_TYPE_PERF_EVENT_ARRAY;
2266 }
2267 
2268 void bpf_map__set_ifindex(struct bpf_map *map, __u32 ifindex)
2269 {
2270 	map->map_ifindex = ifindex;
2271 }
2272 
2273 struct bpf_map *
2274 bpf_map__next(struct bpf_map *prev, struct bpf_object *obj)
2275 {
2276 	size_t idx;
2277 	struct bpf_map *s, *e;
2278 
2279 	if (!obj || !obj->maps)
2280 		return NULL;
2281 
2282 	s = obj->maps;
2283 	e = obj->maps + obj->nr_maps;
2284 
2285 	if (prev == NULL)
2286 		return s;
2287 
2288 	if ((prev < s) || (prev >= e)) {
2289 		pr_warning("error in %s: map handler doesn't belong to object\n",
2290 			   __func__);
2291 		return NULL;
2292 	}
2293 
2294 	idx = (prev - obj->maps) + 1;
2295 	if (idx >= obj->nr_maps)
2296 		return NULL;
2297 	return &obj->maps[idx];
2298 }
2299 
2300 struct bpf_map *
2301 bpf_object__find_map_by_name(struct bpf_object *obj, const char *name)
2302 {
2303 	struct bpf_map *pos;
2304 
2305 	bpf_map__for_each(pos, obj) {
2306 		if (pos->name && !strcmp(pos->name, name))
2307 			return pos;
2308 	}
2309 	return NULL;
2310 }
2311 
2312 struct bpf_map *
2313 bpf_object__find_map_by_offset(struct bpf_object *obj, size_t offset)
2314 {
2315 	int i;
2316 
2317 	for (i = 0; i < obj->nr_maps; i++) {
2318 		if (obj->maps[i].offset == offset)
2319 			return &obj->maps[i];
2320 	}
2321 	return ERR_PTR(-ENOENT);
2322 }
2323 
2324 long libbpf_get_error(const void *ptr)
2325 {
2326 	if (IS_ERR(ptr))
2327 		return PTR_ERR(ptr);
2328 	return 0;
2329 }
2330 
2331 int bpf_prog_load(const char *file, enum bpf_prog_type type,
2332 		  struct bpf_object **pobj, int *prog_fd)
2333 {
2334 	struct bpf_prog_load_attr attr;
2335 
2336 	memset(&attr, 0, sizeof(struct bpf_prog_load_attr));
2337 	attr.file = file;
2338 	attr.prog_type = type;
2339 	attr.expected_attach_type = 0;
2340 
2341 	return bpf_prog_load_xattr(&attr, pobj, prog_fd);
2342 }
2343 
2344 int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr,
2345 			struct bpf_object **pobj, int *prog_fd)
2346 {
2347 	struct bpf_object_open_attr open_attr = {
2348 		.file		= attr->file,
2349 		.prog_type	= attr->prog_type,
2350 	};
2351 	struct bpf_program *prog, *first_prog = NULL;
2352 	enum bpf_attach_type expected_attach_type;
2353 	enum bpf_prog_type prog_type;
2354 	struct bpf_object *obj;
2355 	struct bpf_map *map;
2356 	int err;
2357 
2358 	if (!attr)
2359 		return -EINVAL;
2360 	if (!attr->file)
2361 		return -EINVAL;
2362 
2363 	obj = bpf_object__open_xattr(&open_attr);
2364 	if (IS_ERR_OR_NULL(obj))
2365 		return -ENOENT;
2366 
2367 	bpf_object__for_each_program(prog, obj) {
2368 		/*
2369 		 * If type is not specified, try to guess it based on
2370 		 * section name.
2371 		 */
2372 		prog_type = attr->prog_type;
2373 		prog->prog_ifindex = attr->ifindex;
2374 		expected_attach_type = attr->expected_attach_type;
2375 		if (prog_type == BPF_PROG_TYPE_UNSPEC) {
2376 			err = bpf_program__identify_section(prog, &prog_type,
2377 							    &expected_attach_type);
2378 			if (err < 0) {
2379 				pr_warning("failed to guess program type based on section name %s\n",
2380 					   prog->section_name);
2381 				bpf_object__close(obj);
2382 				return -EINVAL;
2383 			}
2384 		}
2385 
2386 		bpf_program__set_type(prog, prog_type);
2387 		bpf_program__set_expected_attach_type(prog,
2388 						      expected_attach_type);
2389 
2390 		if (!first_prog)
2391 			first_prog = prog;
2392 	}
2393 
2394 	bpf_map__for_each(map, obj) {
2395 		if (!bpf_map__is_offload_neutral(map))
2396 			map->map_ifindex = attr->ifindex;
2397 	}
2398 
2399 	if (!first_prog) {
2400 		pr_warning("object file doesn't contain bpf program\n");
2401 		bpf_object__close(obj);
2402 		return -ENOENT;
2403 	}
2404 
2405 	err = bpf_object__load(obj);
2406 	if (err) {
2407 		bpf_object__close(obj);
2408 		return -EINVAL;
2409 	}
2410 
2411 	*pobj = obj;
2412 	*prog_fd = bpf_program__fd(first_prog);
2413 	return 0;
2414 }
2415 
2416 enum bpf_perf_event_ret
2417 bpf_perf_event_read_simple(void *mem, unsigned long size,
2418 			   unsigned long page_size, void **buf, size_t *buf_len,
2419 			   bpf_perf_event_print_t fn, void *priv)
2420 {
2421 	volatile struct perf_event_mmap_page *header = mem;
2422 	__u64 data_tail = header->data_tail;
2423 	__u64 data_head = header->data_head;
2424 	int ret = LIBBPF_PERF_EVENT_ERROR;
2425 	void *base, *begin, *end;
2426 
2427 	asm volatile("" ::: "memory"); /* in real code it should be smp_rmb() */
2428 	if (data_head == data_tail)
2429 		return LIBBPF_PERF_EVENT_CONT;
2430 
2431 	base = ((char *)header) + page_size;
2432 
2433 	begin = base + data_tail % size;
2434 	end = base + data_head % size;
2435 
2436 	while (begin != end) {
2437 		struct perf_event_header *ehdr;
2438 
2439 		ehdr = begin;
2440 		if (begin + ehdr->size > base + size) {
2441 			long len = base + size - begin;
2442 
2443 			if (*buf_len < ehdr->size) {
2444 				free(*buf);
2445 				*buf = malloc(ehdr->size);
2446 				if (!*buf) {
2447 					ret = LIBBPF_PERF_EVENT_ERROR;
2448 					break;
2449 				}
2450 				*buf_len = ehdr->size;
2451 			}
2452 
2453 			memcpy(*buf, begin, len);
2454 			memcpy(*buf + len, base, ehdr->size - len);
2455 			ehdr = (void *)*buf;
2456 			begin = base + ehdr->size - len;
2457 		} else if (begin + ehdr->size == base + size) {
2458 			begin = base;
2459 		} else {
2460 			begin += ehdr->size;
2461 		}
2462 
2463 		ret = fn(ehdr, priv);
2464 		if (ret != LIBBPF_PERF_EVENT_CONT)
2465 			break;
2466 
2467 		data_tail += ehdr->size;
2468 	}
2469 
2470 	__sync_synchronize(); /* smp_mb() */
2471 	header->data_tail = data_tail;
2472 
2473 	return ret;
2474 }
2475