xref: /linux/tools/lib/bpf/libbpf.c (revision 77380998d91dee8aafdbe42634776ba1ef692f1e)
1 // SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
2 
3 /*
4  * Common eBPF ELF object loading operations.
5  *
6  * Copyright (C) 2013-2015 Alexei Starovoitov <ast@kernel.org>
7  * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com>
8  * Copyright (C) 2015 Huawei Inc.
9  * Copyright (C) 2017 Nicira, Inc.
10  */
11 
12 #define _GNU_SOURCE
13 #include <stdlib.h>
14 #include <stdio.h>
15 #include <stdarg.h>
16 #include <libgen.h>
17 #include <inttypes.h>
18 #include <string.h>
19 #include <unistd.h>
20 #include <fcntl.h>
21 #include <errno.h>
22 #include <asm/unistd.h>
23 #include <linux/err.h>
24 #include <linux/kernel.h>
25 #include <linux/bpf.h>
26 #include <linux/btf.h>
27 #include <linux/list.h>
28 #include <linux/limits.h>
29 #include <linux/perf_event.h>
30 #include <linux/ring_buffer.h>
31 #include <sys/stat.h>
32 #include <sys/types.h>
33 #include <sys/vfs.h>
34 #include <tools/libc_compat.h>
35 #include <libelf.h>
36 #include <gelf.h>
37 
38 #include "libbpf.h"
39 #include "bpf.h"
40 #include "btf.h"
41 #include "str_error.h"
42 
43 #ifndef EM_BPF
44 #define EM_BPF 247
45 #endif
46 
47 #ifndef BPF_FS_MAGIC
48 #define BPF_FS_MAGIC		0xcafe4a11
49 #endif
50 
51 #define __printf(a, b)	__attribute__((format(printf, a, b)))
52 
53 __printf(1, 2)
54 static int __base_pr(const char *format, ...)
55 {
56 	va_list args;
57 	int err;
58 
59 	va_start(args, format);
60 	err = vfprintf(stderr, format, args);
61 	va_end(args);
62 	return err;
63 }
64 
65 static __printf(1, 2) libbpf_print_fn_t __pr_warning = __base_pr;
66 static __printf(1, 2) libbpf_print_fn_t __pr_info = __base_pr;
67 static __printf(1, 2) libbpf_print_fn_t __pr_debug;
68 
69 #define __pr(func, fmt, ...)	\
70 do {				\
71 	if ((func))		\
72 		(func)("libbpf: " fmt, ##__VA_ARGS__); \
73 } while (0)
74 
75 #define pr_warning(fmt, ...)	__pr(__pr_warning, fmt, ##__VA_ARGS__)
76 #define pr_info(fmt, ...)	__pr(__pr_info, fmt, ##__VA_ARGS__)
77 #define pr_debug(fmt, ...)	__pr(__pr_debug, fmt, ##__VA_ARGS__)
78 
79 void libbpf_set_print(libbpf_print_fn_t warn,
80 		      libbpf_print_fn_t info,
81 		      libbpf_print_fn_t debug)
82 {
83 	__pr_warning = warn;
84 	__pr_info = info;
85 	__pr_debug = debug;
86 }
87 
88 #define STRERR_BUFSIZE  128
89 
90 #define CHECK_ERR(action, err, out) do {	\
91 	err = action;			\
92 	if (err)			\
93 		goto out;		\
94 } while(0)
95 
96 
97 /* Copied from tools/perf/util/util.h */
98 #ifndef zfree
99 # define zfree(ptr) ({ free(*ptr); *ptr = NULL; })
100 #endif
101 
102 #ifndef zclose
103 # define zclose(fd) ({			\
104 	int ___err = 0;			\
105 	if ((fd) >= 0)			\
106 		___err = close((fd));	\
107 	fd = -1;			\
108 	___err; })
109 #endif
110 
111 #ifdef HAVE_LIBELF_MMAP_SUPPORT
112 # define LIBBPF_ELF_C_READ_MMAP ELF_C_READ_MMAP
113 #else
114 # define LIBBPF_ELF_C_READ_MMAP ELF_C_READ
115 #endif
116 
117 /*
118  * bpf_prog should be a better name but it has been used in
119  * linux/filter.h.
120  */
121 struct bpf_program {
122 	/* Index in elf obj file, for relocation use. */
123 	int idx;
124 	char *name;
125 	int prog_ifindex;
126 	char *section_name;
127 	/* section_name with / replaced by _; makes recursive pinning
128 	 * in bpf_object__pin_programs easier
129 	 */
130 	char *pin_name;
131 	struct bpf_insn *insns;
132 	size_t insns_cnt, main_prog_cnt;
133 	enum bpf_prog_type type;
134 
135 	struct reloc_desc {
136 		enum {
137 			RELO_LD64,
138 			RELO_CALL,
139 		} type;
140 		int insn_idx;
141 		union {
142 			int map_idx;
143 			int text_off;
144 		};
145 	} *reloc_desc;
146 	int nr_reloc;
147 
148 	struct {
149 		int nr;
150 		int *fds;
151 	} instances;
152 	bpf_program_prep_t preprocessor;
153 
154 	struct bpf_object *obj;
155 	void *priv;
156 	bpf_program_clear_priv_t clear_priv;
157 
158 	enum bpf_attach_type expected_attach_type;
159 };
160 
161 struct bpf_map {
162 	int fd;
163 	char *name;
164 	size_t offset;
165 	int map_ifindex;
166 	struct bpf_map_def def;
167 	__u32 btf_key_type_id;
168 	__u32 btf_value_type_id;
169 	void *priv;
170 	bpf_map_clear_priv_t clear_priv;
171 };
172 
173 static LIST_HEAD(bpf_objects_list);
174 
175 struct bpf_object {
176 	char license[64];
177 	__u32 kern_version;
178 
179 	struct bpf_program *programs;
180 	size_t nr_programs;
181 	struct bpf_map *maps;
182 	size_t nr_maps;
183 
184 	bool loaded;
185 	bool has_pseudo_calls;
186 
187 	/*
188 	 * Information when doing elf related work. Only valid if fd
189 	 * is valid.
190 	 */
191 	struct {
192 		int fd;
193 		void *obj_buf;
194 		size_t obj_buf_sz;
195 		Elf *elf;
196 		GElf_Ehdr ehdr;
197 		Elf_Data *symbols;
198 		size_t strtabidx;
199 		struct {
200 			GElf_Shdr shdr;
201 			Elf_Data *data;
202 		} *reloc;
203 		int nr_reloc;
204 		int maps_shndx;
205 		int text_shndx;
206 	} efile;
207 	/*
208 	 * All loaded bpf_object is linked in a list, which is
209 	 * hidden to caller. bpf_objects__<func> handlers deal with
210 	 * all objects.
211 	 */
212 	struct list_head list;
213 
214 	struct btf *btf;
215 
216 	void *priv;
217 	bpf_object_clear_priv_t clear_priv;
218 
219 	char path[];
220 };
221 #define obj_elf_valid(o)	((o)->efile.elf)
222 
223 void bpf_program__unload(struct bpf_program *prog)
224 {
225 	int i;
226 
227 	if (!prog)
228 		return;
229 
230 	/*
231 	 * If the object is opened but the program was never loaded,
232 	 * it is possible that prog->instances.nr == -1.
233 	 */
234 	if (prog->instances.nr > 0) {
235 		for (i = 0; i < prog->instances.nr; i++)
236 			zclose(prog->instances.fds[i]);
237 	} else if (prog->instances.nr != -1) {
238 		pr_warning("Internal error: instances.nr is %d\n",
239 			   prog->instances.nr);
240 	}
241 
242 	prog->instances.nr = -1;
243 	zfree(&prog->instances.fds);
244 }
245 
246 static void bpf_program__exit(struct bpf_program *prog)
247 {
248 	if (!prog)
249 		return;
250 
251 	if (prog->clear_priv)
252 		prog->clear_priv(prog, prog->priv);
253 
254 	prog->priv = NULL;
255 	prog->clear_priv = NULL;
256 
257 	bpf_program__unload(prog);
258 	zfree(&prog->name);
259 	zfree(&prog->section_name);
260 	zfree(&prog->pin_name);
261 	zfree(&prog->insns);
262 	zfree(&prog->reloc_desc);
263 
264 	prog->nr_reloc = 0;
265 	prog->insns_cnt = 0;
266 	prog->idx = -1;
267 }
268 
269 static char *__bpf_program__pin_name(struct bpf_program *prog)
270 {
271 	char *name, *p;
272 
273 	name = p = strdup(prog->section_name);
274 	while ((p = strchr(p, '/')))
275 		*p = '_';
276 
277 	return name;
278 }
279 
280 static int
281 bpf_program__init(void *data, size_t size, char *section_name, int idx,
282 		  struct bpf_program *prog)
283 {
284 	if (size < sizeof(struct bpf_insn)) {
285 		pr_warning("corrupted section '%s'\n", section_name);
286 		return -EINVAL;
287 	}
288 
289 	bzero(prog, sizeof(*prog));
290 
291 	prog->section_name = strdup(section_name);
292 	if (!prog->section_name) {
293 		pr_warning("failed to alloc name for prog under section(%d) %s\n",
294 			   idx, section_name);
295 		goto errout;
296 	}
297 
298 	prog->pin_name = __bpf_program__pin_name(prog);
299 	if (!prog->pin_name) {
300 		pr_warning("failed to alloc pin name for prog under section(%d) %s\n",
301 			   idx, section_name);
302 		goto errout;
303 	}
304 
305 	prog->insns = malloc(size);
306 	if (!prog->insns) {
307 		pr_warning("failed to alloc insns for prog under section %s\n",
308 			   section_name);
309 		goto errout;
310 	}
311 	prog->insns_cnt = size / sizeof(struct bpf_insn);
312 	memcpy(prog->insns, data,
313 	       prog->insns_cnt * sizeof(struct bpf_insn));
314 	prog->idx = idx;
315 	prog->instances.fds = NULL;
316 	prog->instances.nr = -1;
317 	prog->type = BPF_PROG_TYPE_KPROBE;
318 
319 	return 0;
320 errout:
321 	bpf_program__exit(prog);
322 	return -ENOMEM;
323 }
324 
325 static int
326 bpf_object__add_program(struct bpf_object *obj, void *data, size_t size,
327 			char *section_name, int idx)
328 {
329 	struct bpf_program prog, *progs;
330 	int nr_progs, err;
331 
332 	err = bpf_program__init(data, size, section_name, idx, &prog);
333 	if (err)
334 		return err;
335 
336 	progs = obj->programs;
337 	nr_progs = obj->nr_programs;
338 
339 	progs = reallocarray(progs, nr_progs + 1, sizeof(progs[0]));
340 	if (!progs) {
341 		/*
342 		 * In this case the original obj->programs
343 		 * is still valid, so don't need special treat for
344 		 * bpf_close_object().
345 		 */
346 		pr_warning("failed to alloc a new program under section '%s'\n",
347 			   section_name);
348 		bpf_program__exit(&prog);
349 		return -ENOMEM;
350 	}
351 
352 	pr_debug("found program %s\n", prog.section_name);
353 	obj->programs = progs;
354 	obj->nr_programs = nr_progs + 1;
355 	prog.obj = obj;
356 	progs[nr_progs] = prog;
357 	return 0;
358 }
359 
360 static int
361 bpf_object__init_prog_names(struct bpf_object *obj)
362 {
363 	Elf_Data *symbols = obj->efile.symbols;
364 	struct bpf_program *prog;
365 	size_t pi, si;
366 
367 	for (pi = 0; pi < obj->nr_programs; pi++) {
368 		const char *name = NULL;
369 
370 		prog = &obj->programs[pi];
371 
372 		for (si = 0; si < symbols->d_size / sizeof(GElf_Sym) && !name;
373 		     si++) {
374 			GElf_Sym sym;
375 
376 			if (!gelf_getsym(symbols, si, &sym))
377 				continue;
378 			if (sym.st_shndx != prog->idx)
379 				continue;
380 			if (GELF_ST_BIND(sym.st_info) != STB_GLOBAL)
381 				continue;
382 
383 			name = elf_strptr(obj->efile.elf,
384 					  obj->efile.strtabidx,
385 					  sym.st_name);
386 			if (!name) {
387 				pr_warning("failed to get sym name string for prog %s\n",
388 					   prog->section_name);
389 				return -LIBBPF_ERRNO__LIBELF;
390 			}
391 		}
392 
393 		if (!name && prog->idx == obj->efile.text_shndx)
394 			name = ".text";
395 
396 		if (!name) {
397 			pr_warning("failed to find sym for prog %s\n",
398 				   prog->section_name);
399 			return -EINVAL;
400 		}
401 
402 		prog->name = strdup(name);
403 		if (!prog->name) {
404 			pr_warning("failed to allocate memory for prog sym %s\n",
405 				   name);
406 			return -ENOMEM;
407 		}
408 	}
409 
410 	return 0;
411 }
412 
413 static struct bpf_object *bpf_object__new(const char *path,
414 					  void *obj_buf,
415 					  size_t obj_buf_sz)
416 {
417 	struct bpf_object *obj;
418 
419 	obj = calloc(1, sizeof(struct bpf_object) + strlen(path) + 1);
420 	if (!obj) {
421 		pr_warning("alloc memory failed for %s\n", path);
422 		return ERR_PTR(-ENOMEM);
423 	}
424 
425 	strcpy(obj->path, path);
426 	obj->efile.fd = -1;
427 
428 	/*
429 	 * Caller of this function should also calls
430 	 * bpf_object__elf_finish() after data collection to return
431 	 * obj_buf to user. If not, we should duplicate the buffer to
432 	 * avoid user freeing them before elf finish.
433 	 */
434 	obj->efile.obj_buf = obj_buf;
435 	obj->efile.obj_buf_sz = obj_buf_sz;
436 	obj->efile.maps_shndx = -1;
437 
438 	obj->loaded = false;
439 
440 	INIT_LIST_HEAD(&obj->list);
441 	list_add(&obj->list, &bpf_objects_list);
442 	return obj;
443 }
444 
445 static void bpf_object__elf_finish(struct bpf_object *obj)
446 {
447 	if (!obj_elf_valid(obj))
448 		return;
449 
450 	if (obj->efile.elf) {
451 		elf_end(obj->efile.elf);
452 		obj->efile.elf = NULL;
453 	}
454 	obj->efile.symbols = NULL;
455 
456 	zfree(&obj->efile.reloc);
457 	obj->efile.nr_reloc = 0;
458 	zclose(obj->efile.fd);
459 	obj->efile.obj_buf = NULL;
460 	obj->efile.obj_buf_sz = 0;
461 }
462 
463 static int bpf_object__elf_init(struct bpf_object *obj)
464 {
465 	int err = 0;
466 	GElf_Ehdr *ep;
467 
468 	if (obj_elf_valid(obj)) {
469 		pr_warning("elf init: internal error\n");
470 		return -LIBBPF_ERRNO__LIBELF;
471 	}
472 
473 	if (obj->efile.obj_buf_sz > 0) {
474 		/*
475 		 * obj_buf should have been validated by
476 		 * bpf_object__open_buffer().
477 		 */
478 		obj->efile.elf = elf_memory(obj->efile.obj_buf,
479 					    obj->efile.obj_buf_sz);
480 	} else {
481 		obj->efile.fd = open(obj->path, O_RDONLY);
482 		if (obj->efile.fd < 0) {
483 			char errmsg[STRERR_BUFSIZE];
484 			char *cp = libbpf_strerror_r(errno, errmsg,
485 						     sizeof(errmsg));
486 
487 			pr_warning("failed to open %s: %s\n", obj->path, cp);
488 			return -errno;
489 		}
490 
491 		obj->efile.elf = elf_begin(obj->efile.fd,
492 				LIBBPF_ELF_C_READ_MMAP,
493 				NULL);
494 	}
495 
496 	if (!obj->efile.elf) {
497 		pr_warning("failed to open %s as ELF file\n",
498 				obj->path);
499 		err = -LIBBPF_ERRNO__LIBELF;
500 		goto errout;
501 	}
502 
503 	if (!gelf_getehdr(obj->efile.elf, &obj->efile.ehdr)) {
504 		pr_warning("failed to get EHDR from %s\n",
505 				obj->path);
506 		err = -LIBBPF_ERRNO__FORMAT;
507 		goto errout;
508 	}
509 	ep = &obj->efile.ehdr;
510 
511 	/* Old LLVM set e_machine to EM_NONE */
512 	if ((ep->e_type != ET_REL) || (ep->e_machine && (ep->e_machine != EM_BPF))) {
513 		pr_warning("%s is not an eBPF object file\n",
514 			obj->path);
515 		err = -LIBBPF_ERRNO__FORMAT;
516 		goto errout;
517 	}
518 
519 	return 0;
520 errout:
521 	bpf_object__elf_finish(obj);
522 	return err;
523 }
524 
525 static int
526 bpf_object__check_endianness(struct bpf_object *obj)
527 {
528 	static unsigned int const endian = 1;
529 
530 	switch (obj->efile.ehdr.e_ident[EI_DATA]) {
531 	case ELFDATA2LSB:
532 		/* We are big endian, BPF obj is little endian. */
533 		if (*(unsigned char const *)&endian != 1)
534 			goto mismatch;
535 		break;
536 
537 	case ELFDATA2MSB:
538 		/* We are little endian, BPF obj is big endian. */
539 		if (*(unsigned char const *)&endian != 0)
540 			goto mismatch;
541 		break;
542 	default:
543 		return -LIBBPF_ERRNO__ENDIAN;
544 	}
545 
546 	return 0;
547 
548 mismatch:
549 	pr_warning("Error: endianness mismatch.\n");
550 	return -LIBBPF_ERRNO__ENDIAN;
551 }
552 
553 static int
554 bpf_object__init_license(struct bpf_object *obj,
555 			 void *data, size_t size)
556 {
557 	memcpy(obj->license, data,
558 	       min(size, sizeof(obj->license) - 1));
559 	pr_debug("license of %s is %s\n", obj->path, obj->license);
560 	return 0;
561 }
562 
563 static int
564 bpf_object__init_kversion(struct bpf_object *obj,
565 			  void *data, size_t size)
566 {
567 	__u32 kver;
568 
569 	if (size != sizeof(kver)) {
570 		pr_warning("invalid kver section in %s\n", obj->path);
571 		return -LIBBPF_ERRNO__FORMAT;
572 	}
573 	memcpy(&kver, data, sizeof(kver));
574 	obj->kern_version = kver;
575 	pr_debug("kernel version of %s is %x\n", obj->path,
576 		 obj->kern_version);
577 	return 0;
578 }
579 
580 static int compare_bpf_map(const void *_a, const void *_b)
581 {
582 	const struct bpf_map *a = _a;
583 	const struct bpf_map *b = _b;
584 
585 	return a->offset - b->offset;
586 }
587 
588 static int
589 bpf_object__init_maps(struct bpf_object *obj, int flags)
590 {
591 	bool strict = !(flags & MAPS_RELAX_COMPAT);
592 	int i, map_idx, map_def_sz, nr_maps = 0;
593 	Elf_Scn *scn;
594 	Elf_Data *data;
595 	Elf_Data *symbols = obj->efile.symbols;
596 
597 	if (obj->efile.maps_shndx < 0)
598 		return -EINVAL;
599 	if (!symbols)
600 		return -EINVAL;
601 
602 	scn = elf_getscn(obj->efile.elf, obj->efile.maps_shndx);
603 	if (scn)
604 		data = elf_getdata(scn, NULL);
605 	if (!scn || !data) {
606 		pr_warning("failed to get Elf_Data from map section %d\n",
607 			   obj->efile.maps_shndx);
608 		return -EINVAL;
609 	}
610 
611 	/*
612 	 * Count number of maps. Each map has a name.
613 	 * Array of maps is not supported: only the first element is
614 	 * considered.
615 	 *
616 	 * TODO: Detect array of map and report error.
617 	 */
618 	for (i = 0; i < symbols->d_size / sizeof(GElf_Sym); i++) {
619 		GElf_Sym sym;
620 
621 		if (!gelf_getsym(symbols, i, &sym))
622 			continue;
623 		if (sym.st_shndx != obj->efile.maps_shndx)
624 			continue;
625 		nr_maps++;
626 	}
627 
628 	/* Alloc obj->maps and fill nr_maps. */
629 	pr_debug("maps in %s: %d maps in %zd bytes\n", obj->path,
630 		 nr_maps, data->d_size);
631 
632 	if (!nr_maps)
633 		return 0;
634 
635 	/* Assume equally sized map definitions */
636 	map_def_sz = data->d_size / nr_maps;
637 	if (!data->d_size || (data->d_size % nr_maps) != 0) {
638 		pr_warning("unable to determine map definition size "
639 			   "section %s, %d maps in %zd bytes\n",
640 			   obj->path, nr_maps, data->d_size);
641 		return -EINVAL;
642 	}
643 
644 	obj->maps = calloc(nr_maps, sizeof(obj->maps[0]));
645 	if (!obj->maps) {
646 		pr_warning("alloc maps for object failed\n");
647 		return -ENOMEM;
648 	}
649 	obj->nr_maps = nr_maps;
650 
651 	/*
652 	 * fill all fd with -1 so won't close incorrect
653 	 * fd (fd=0 is stdin) when failure (zclose won't close
654 	 * negative fd)).
655 	 */
656 	for (i = 0; i < nr_maps; i++)
657 		obj->maps[i].fd = -1;
658 
659 	/*
660 	 * Fill obj->maps using data in "maps" section.
661 	 */
662 	for (i = 0, map_idx = 0; i < symbols->d_size / sizeof(GElf_Sym); i++) {
663 		GElf_Sym sym;
664 		const char *map_name;
665 		struct bpf_map_def *def;
666 
667 		if (!gelf_getsym(symbols, i, &sym))
668 			continue;
669 		if (sym.st_shndx != obj->efile.maps_shndx)
670 			continue;
671 
672 		map_name = elf_strptr(obj->efile.elf,
673 				      obj->efile.strtabidx,
674 				      sym.st_name);
675 		obj->maps[map_idx].offset = sym.st_value;
676 		if (sym.st_value + map_def_sz > data->d_size) {
677 			pr_warning("corrupted maps section in %s: last map \"%s\" too small\n",
678 				   obj->path, map_name);
679 			return -EINVAL;
680 		}
681 
682 		obj->maps[map_idx].name = strdup(map_name);
683 		if (!obj->maps[map_idx].name) {
684 			pr_warning("failed to alloc map name\n");
685 			return -ENOMEM;
686 		}
687 		pr_debug("map %d is \"%s\"\n", map_idx,
688 			 obj->maps[map_idx].name);
689 		def = (struct bpf_map_def *)(data->d_buf + sym.st_value);
690 		/*
691 		 * If the definition of the map in the object file fits in
692 		 * bpf_map_def, copy it.  Any extra fields in our version
693 		 * of bpf_map_def will default to zero as a result of the
694 		 * calloc above.
695 		 */
696 		if (map_def_sz <= sizeof(struct bpf_map_def)) {
697 			memcpy(&obj->maps[map_idx].def, def, map_def_sz);
698 		} else {
699 			/*
700 			 * Here the map structure being read is bigger than what
701 			 * we expect, truncate if the excess bits are all zero.
702 			 * If they are not zero, reject this map as
703 			 * incompatible.
704 			 */
705 			char *b;
706 			for (b = ((char *)def) + sizeof(struct bpf_map_def);
707 			     b < ((char *)def) + map_def_sz; b++) {
708 				if (*b != 0) {
709 					pr_warning("maps section in %s: \"%s\" "
710 						   "has unrecognized, non-zero "
711 						   "options\n",
712 						   obj->path, map_name);
713 					if (strict)
714 						return -EINVAL;
715 				}
716 			}
717 			memcpy(&obj->maps[map_idx].def, def,
718 			       sizeof(struct bpf_map_def));
719 		}
720 		map_idx++;
721 	}
722 
723 	qsort(obj->maps, obj->nr_maps, sizeof(obj->maps[0]), compare_bpf_map);
724 	return 0;
725 }
726 
727 static bool section_have_execinstr(struct bpf_object *obj, int idx)
728 {
729 	Elf_Scn *scn;
730 	GElf_Shdr sh;
731 
732 	scn = elf_getscn(obj->efile.elf, idx);
733 	if (!scn)
734 		return false;
735 
736 	if (gelf_getshdr(scn, &sh) != &sh)
737 		return false;
738 
739 	if (sh.sh_flags & SHF_EXECINSTR)
740 		return true;
741 
742 	return false;
743 }
744 
745 static int bpf_object__elf_collect(struct bpf_object *obj, int flags)
746 {
747 	Elf *elf = obj->efile.elf;
748 	GElf_Ehdr *ep = &obj->efile.ehdr;
749 	Elf_Scn *scn = NULL;
750 	int idx = 0, err = 0;
751 
752 	/* Elf is corrupted/truncated, avoid calling elf_strptr. */
753 	if (!elf_rawdata(elf_getscn(elf, ep->e_shstrndx), NULL)) {
754 		pr_warning("failed to get e_shstrndx from %s\n",
755 			   obj->path);
756 		return -LIBBPF_ERRNO__FORMAT;
757 	}
758 
759 	while ((scn = elf_nextscn(elf, scn)) != NULL) {
760 		char *name;
761 		GElf_Shdr sh;
762 		Elf_Data *data;
763 
764 		idx++;
765 		if (gelf_getshdr(scn, &sh) != &sh) {
766 			pr_warning("failed to get section(%d) header from %s\n",
767 				   idx, obj->path);
768 			err = -LIBBPF_ERRNO__FORMAT;
769 			goto out;
770 		}
771 
772 		name = elf_strptr(elf, ep->e_shstrndx, sh.sh_name);
773 		if (!name) {
774 			pr_warning("failed to get section(%d) name from %s\n",
775 				   idx, obj->path);
776 			err = -LIBBPF_ERRNO__FORMAT;
777 			goto out;
778 		}
779 
780 		data = elf_getdata(scn, 0);
781 		if (!data) {
782 			pr_warning("failed to get section(%d) data from %s(%s)\n",
783 				   idx, name, obj->path);
784 			err = -LIBBPF_ERRNO__FORMAT;
785 			goto out;
786 		}
787 		pr_debug("section(%d) %s, size %ld, link %d, flags %lx, type=%d\n",
788 			 idx, name, (unsigned long)data->d_size,
789 			 (int)sh.sh_link, (unsigned long)sh.sh_flags,
790 			 (int)sh.sh_type);
791 
792 		if (strcmp(name, "license") == 0)
793 			err = bpf_object__init_license(obj,
794 						       data->d_buf,
795 						       data->d_size);
796 		else if (strcmp(name, "version") == 0)
797 			err = bpf_object__init_kversion(obj,
798 							data->d_buf,
799 							data->d_size);
800 		else if (strcmp(name, "maps") == 0)
801 			obj->efile.maps_shndx = idx;
802 		else if (strcmp(name, BTF_ELF_SEC) == 0) {
803 			obj->btf = btf__new(data->d_buf, data->d_size,
804 					    __pr_debug);
805 			if (IS_ERR(obj->btf)) {
806 				pr_warning("Error loading ELF section %s: %ld. Ignored and continue.\n",
807 					   BTF_ELF_SEC, PTR_ERR(obj->btf));
808 				obj->btf = NULL;
809 			}
810 		} else if (sh.sh_type == SHT_SYMTAB) {
811 			if (obj->efile.symbols) {
812 				pr_warning("bpf: multiple SYMTAB in %s\n",
813 					   obj->path);
814 				err = -LIBBPF_ERRNO__FORMAT;
815 			} else {
816 				obj->efile.symbols = data;
817 				obj->efile.strtabidx = sh.sh_link;
818 			}
819 		} else if ((sh.sh_type == SHT_PROGBITS) &&
820 			   (sh.sh_flags & SHF_EXECINSTR) &&
821 			   (data->d_size > 0)) {
822 			if (strcmp(name, ".text") == 0)
823 				obj->efile.text_shndx = idx;
824 			err = bpf_object__add_program(obj, data->d_buf,
825 						      data->d_size, name, idx);
826 			if (err) {
827 				char errmsg[STRERR_BUFSIZE];
828 				char *cp = libbpf_strerror_r(-err, errmsg,
829 							     sizeof(errmsg));
830 
831 				pr_warning("failed to alloc program %s (%s): %s",
832 					   name, obj->path, cp);
833 			}
834 		} else if (sh.sh_type == SHT_REL) {
835 			void *reloc = obj->efile.reloc;
836 			int nr_reloc = obj->efile.nr_reloc + 1;
837 			int sec = sh.sh_info; /* points to other section */
838 
839 			/* Only do relo for section with exec instructions */
840 			if (!section_have_execinstr(obj, sec)) {
841 				pr_debug("skip relo %s(%d) for section(%d)\n",
842 					 name, idx, sec);
843 				continue;
844 			}
845 
846 			reloc = reallocarray(reloc, nr_reloc,
847 					     sizeof(*obj->efile.reloc));
848 			if (!reloc) {
849 				pr_warning("realloc failed\n");
850 				err = -ENOMEM;
851 			} else {
852 				int n = nr_reloc - 1;
853 
854 				obj->efile.reloc = reloc;
855 				obj->efile.nr_reloc = nr_reloc;
856 
857 				obj->efile.reloc[n].shdr = sh;
858 				obj->efile.reloc[n].data = data;
859 			}
860 		} else {
861 			pr_debug("skip section(%d) %s\n", idx, name);
862 		}
863 		if (err)
864 			goto out;
865 	}
866 
867 	if (!obj->efile.strtabidx || obj->efile.strtabidx >= idx) {
868 		pr_warning("Corrupted ELF file: index of strtab invalid\n");
869 		return LIBBPF_ERRNO__FORMAT;
870 	}
871 	if (obj->efile.maps_shndx >= 0) {
872 		err = bpf_object__init_maps(obj, flags);
873 		if (err)
874 			goto out;
875 	}
876 	err = bpf_object__init_prog_names(obj);
877 out:
878 	return err;
879 }
880 
881 static struct bpf_program *
882 bpf_object__find_prog_by_idx(struct bpf_object *obj, int idx)
883 {
884 	struct bpf_program *prog;
885 	size_t i;
886 
887 	for (i = 0; i < obj->nr_programs; i++) {
888 		prog = &obj->programs[i];
889 		if (prog->idx == idx)
890 			return prog;
891 	}
892 	return NULL;
893 }
894 
895 struct bpf_program *
896 bpf_object__find_program_by_title(struct bpf_object *obj, const char *title)
897 {
898 	struct bpf_program *pos;
899 
900 	bpf_object__for_each_program(pos, obj) {
901 		if (pos->section_name && !strcmp(pos->section_name, title))
902 			return pos;
903 	}
904 	return NULL;
905 }
906 
907 static int
908 bpf_program__collect_reloc(struct bpf_program *prog, GElf_Shdr *shdr,
909 			   Elf_Data *data, struct bpf_object *obj)
910 {
911 	Elf_Data *symbols = obj->efile.symbols;
912 	int text_shndx = obj->efile.text_shndx;
913 	int maps_shndx = obj->efile.maps_shndx;
914 	struct bpf_map *maps = obj->maps;
915 	size_t nr_maps = obj->nr_maps;
916 	int i, nrels;
917 
918 	pr_debug("collecting relocating info for: '%s'\n",
919 		 prog->section_name);
920 	nrels = shdr->sh_size / shdr->sh_entsize;
921 
922 	prog->reloc_desc = malloc(sizeof(*prog->reloc_desc) * nrels);
923 	if (!prog->reloc_desc) {
924 		pr_warning("failed to alloc memory in relocation\n");
925 		return -ENOMEM;
926 	}
927 	prog->nr_reloc = nrels;
928 
929 	for (i = 0; i < nrels; i++) {
930 		GElf_Sym sym;
931 		GElf_Rel rel;
932 		unsigned int insn_idx;
933 		struct bpf_insn *insns = prog->insns;
934 		size_t map_idx;
935 
936 		if (!gelf_getrel(data, i, &rel)) {
937 			pr_warning("relocation: failed to get %d reloc\n", i);
938 			return -LIBBPF_ERRNO__FORMAT;
939 		}
940 
941 		if (!gelf_getsym(symbols,
942 				 GELF_R_SYM(rel.r_info),
943 				 &sym)) {
944 			pr_warning("relocation: symbol %"PRIx64" not found\n",
945 				   GELF_R_SYM(rel.r_info));
946 			return -LIBBPF_ERRNO__FORMAT;
947 		}
948 		pr_debug("relo for %lld value %lld name %d\n",
949 			 (long long) (rel.r_info >> 32),
950 			 (long long) sym.st_value, sym.st_name);
951 
952 		if (sym.st_shndx != maps_shndx && sym.st_shndx != text_shndx) {
953 			pr_warning("Program '%s' contains non-map related relo data pointing to section %u\n",
954 				   prog->section_name, sym.st_shndx);
955 			return -LIBBPF_ERRNO__RELOC;
956 		}
957 
958 		insn_idx = rel.r_offset / sizeof(struct bpf_insn);
959 		pr_debug("relocation: insn_idx=%u\n", insn_idx);
960 
961 		if (insns[insn_idx].code == (BPF_JMP | BPF_CALL)) {
962 			if (insns[insn_idx].src_reg != BPF_PSEUDO_CALL) {
963 				pr_warning("incorrect bpf_call opcode\n");
964 				return -LIBBPF_ERRNO__RELOC;
965 			}
966 			prog->reloc_desc[i].type = RELO_CALL;
967 			prog->reloc_desc[i].insn_idx = insn_idx;
968 			prog->reloc_desc[i].text_off = sym.st_value;
969 			obj->has_pseudo_calls = true;
970 			continue;
971 		}
972 
973 		if (insns[insn_idx].code != (BPF_LD | BPF_IMM | BPF_DW)) {
974 			pr_warning("bpf: relocation: invalid relo for insns[%d].code 0x%x\n",
975 				   insn_idx, insns[insn_idx].code);
976 			return -LIBBPF_ERRNO__RELOC;
977 		}
978 
979 		/* TODO: 'maps' is sorted. We can use bsearch to make it faster. */
980 		for (map_idx = 0; map_idx < nr_maps; map_idx++) {
981 			if (maps[map_idx].offset == sym.st_value) {
982 				pr_debug("relocation: find map %zd (%s) for insn %u\n",
983 					 map_idx, maps[map_idx].name, insn_idx);
984 				break;
985 			}
986 		}
987 
988 		if (map_idx >= nr_maps) {
989 			pr_warning("bpf relocation: map_idx %d large than %d\n",
990 				   (int)map_idx, (int)nr_maps - 1);
991 			return -LIBBPF_ERRNO__RELOC;
992 		}
993 
994 		prog->reloc_desc[i].type = RELO_LD64;
995 		prog->reloc_desc[i].insn_idx = insn_idx;
996 		prog->reloc_desc[i].map_idx = map_idx;
997 	}
998 	return 0;
999 }
1000 
1001 static int bpf_map_find_btf_info(struct bpf_map *map, const struct btf *btf)
1002 {
1003 	const struct btf_type *container_type;
1004 	const struct btf_member *key, *value;
1005 	struct bpf_map_def *def = &map->def;
1006 	const size_t max_name = 256;
1007 	char container_name[max_name];
1008 	__s64 key_size, value_size;
1009 	__s32 container_id;
1010 
1011 	if (snprintf(container_name, max_name, "____btf_map_%s", map->name) ==
1012 	    max_name) {
1013 		pr_warning("map:%s length of '____btf_map_%s' is too long\n",
1014 			   map->name, map->name);
1015 		return -EINVAL;
1016 	}
1017 
1018 	container_id = btf__find_by_name(btf, container_name);
1019 	if (container_id < 0) {
1020 		pr_debug("map:%s container_name:%s cannot be found in BTF. Missing BPF_ANNOTATE_KV_PAIR?\n",
1021 			 map->name, container_name);
1022 		return container_id;
1023 	}
1024 
1025 	container_type = btf__type_by_id(btf, container_id);
1026 	if (!container_type) {
1027 		pr_warning("map:%s cannot find BTF type for container_id:%u\n",
1028 			   map->name, container_id);
1029 		return -EINVAL;
1030 	}
1031 
1032 	if (BTF_INFO_KIND(container_type->info) != BTF_KIND_STRUCT ||
1033 	    BTF_INFO_VLEN(container_type->info) < 2) {
1034 		pr_warning("map:%s container_name:%s is an invalid container struct\n",
1035 			   map->name, container_name);
1036 		return -EINVAL;
1037 	}
1038 
1039 	key = (struct btf_member *)(container_type + 1);
1040 	value = key + 1;
1041 
1042 	key_size = btf__resolve_size(btf, key->type);
1043 	if (key_size < 0) {
1044 		pr_warning("map:%s invalid BTF key_type_size\n",
1045 			   map->name);
1046 		return key_size;
1047 	}
1048 
1049 	if (def->key_size != key_size) {
1050 		pr_warning("map:%s btf_key_type_size:%u != map_def_key_size:%u\n",
1051 			   map->name, (__u32)key_size, def->key_size);
1052 		return -EINVAL;
1053 	}
1054 
1055 	value_size = btf__resolve_size(btf, value->type);
1056 	if (value_size < 0) {
1057 		pr_warning("map:%s invalid BTF value_type_size\n", map->name);
1058 		return value_size;
1059 	}
1060 
1061 	if (def->value_size != value_size) {
1062 		pr_warning("map:%s btf_value_type_size:%u != map_def_value_size:%u\n",
1063 			   map->name, (__u32)value_size, def->value_size);
1064 		return -EINVAL;
1065 	}
1066 
1067 	map->btf_key_type_id = key->type;
1068 	map->btf_value_type_id = value->type;
1069 
1070 	return 0;
1071 }
1072 
1073 int bpf_map__reuse_fd(struct bpf_map *map, int fd)
1074 {
1075 	struct bpf_map_info info = {};
1076 	__u32 len = sizeof(info);
1077 	int new_fd, err;
1078 	char *new_name;
1079 
1080 	err = bpf_obj_get_info_by_fd(fd, &info, &len);
1081 	if (err)
1082 		return err;
1083 
1084 	new_name = strdup(info.name);
1085 	if (!new_name)
1086 		return -errno;
1087 
1088 	new_fd = open("/", O_RDONLY | O_CLOEXEC);
1089 	if (new_fd < 0)
1090 		goto err_free_new_name;
1091 
1092 	new_fd = dup3(fd, new_fd, O_CLOEXEC);
1093 	if (new_fd < 0)
1094 		goto err_close_new_fd;
1095 
1096 	err = zclose(map->fd);
1097 	if (err)
1098 		goto err_close_new_fd;
1099 	free(map->name);
1100 
1101 	map->fd = new_fd;
1102 	map->name = new_name;
1103 	map->def.type = info.type;
1104 	map->def.key_size = info.key_size;
1105 	map->def.value_size = info.value_size;
1106 	map->def.max_entries = info.max_entries;
1107 	map->def.map_flags = info.map_flags;
1108 	map->btf_key_type_id = info.btf_key_type_id;
1109 	map->btf_value_type_id = info.btf_value_type_id;
1110 
1111 	return 0;
1112 
1113 err_close_new_fd:
1114 	close(new_fd);
1115 err_free_new_name:
1116 	free(new_name);
1117 	return -errno;
1118 }
1119 
1120 static int
1121 bpf_object__create_maps(struct bpf_object *obj)
1122 {
1123 	struct bpf_create_map_attr create_attr = {};
1124 	unsigned int i;
1125 	int err;
1126 
1127 	for (i = 0; i < obj->nr_maps; i++) {
1128 		struct bpf_map *map = &obj->maps[i];
1129 		struct bpf_map_def *def = &map->def;
1130 		char *cp, errmsg[STRERR_BUFSIZE];
1131 		int *pfd = &map->fd;
1132 
1133 		if (map->fd >= 0) {
1134 			pr_debug("skip map create (preset) %s: fd=%d\n",
1135 				 map->name, map->fd);
1136 			continue;
1137 		}
1138 
1139 		create_attr.name = map->name;
1140 		create_attr.map_ifindex = map->map_ifindex;
1141 		create_attr.map_type = def->type;
1142 		create_attr.map_flags = def->map_flags;
1143 		create_attr.key_size = def->key_size;
1144 		create_attr.value_size = def->value_size;
1145 		create_attr.max_entries = def->max_entries;
1146 		create_attr.btf_fd = 0;
1147 		create_attr.btf_key_type_id = 0;
1148 		create_attr.btf_value_type_id = 0;
1149 
1150 		if (obj->btf && !bpf_map_find_btf_info(map, obj->btf)) {
1151 			create_attr.btf_fd = btf__fd(obj->btf);
1152 			create_attr.btf_key_type_id = map->btf_key_type_id;
1153 			create_attr.btf_value_type_id = map->btf_value_type_id;
1154 		}
1155 
1156 		*pfd = bpf_create_map_xattr(&create_attr);
1157 		if (*pfd < 0 && create_attr.btf_key_type_id) {
1158 			cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
1159 			pr_warning("Error in bpf_create_map_xattr(%s):%s(%d). Retrying without BTF.\n",
1160 				   map->name, cp, errno);
1161 			create_attr.btf_fd = 0;
1162 			create_attr.btf_key_type_id = 0;
1163 			create_attr.btf_value_type_id = 0;
1164 			map->btf_key_type_id = 0;
1165 			map->btf_value_type_id = 0;
1166 			*pfd = bpf_create_map_xattr(&create_attr);
1167 		}
1168 
1169 		if (*pfd < 0) {
1170 			size_t j;
1171 
1172 			err = *pfd;
1173 			cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
1174 			pr_warning("failed to create map (name: '%s'): %s\n",
1175 				   map->name, cp);
1176 			for (j = 0; j < i; j++)
1177 				zclose(obj->maps[j].fd);
1178 			return err;
1179 		}
1180 		pr_debug("create map %s: fd=%d\n", map->name, *pfd);
1181 	}
1182 
1183 	return 0;
1184 }
1185 
1186 static int
1187 bpf_program__reloc_text(struct bpf_program *prog, struct bpf_object *obj,
1188 			struct reloc_desc *relo)
1189 {
1190 	struct bpf_insn *insn, *new_insn;
1191 	struct bpf_program *text;
1192 	size_t new_cnt;
1193 
1194 	if (relo->type != RELO_CALL)
1195 		return -LIBBPF_ERRNO__RELOC;
1196 
1197 	if (prog->idx == obj->efile.text_shndx) {
1198 		pr_warning("relo in .text insn %d into off %d\n",
1199 			   relo->insn_idx, relo->text_off);
1200 		return -LIBBPF_ERRNO__RELOC;
1201 	}
1202 
1203 	if (prog->main_prog_cnt == 0) {
1204 		text = bpf_object__find_prog_by_idx(obj, obj->efile.text_shndx);
1205 		if (!text) {
1206 			pr_warning("no .text section found yet relo into text exist\n");
1207 			return -LIBBPF_ERRNO__RELOC;
1208 		}
1209 		new_cnt = prog->insns_cnt + text->insns_cnt;
1210 		new_insn = reallocarray(prog->insns, new_cnt, sizeof(*insn));
1211 		if (!new_insn) {
1212 			pr_warning("oom in prog realloc\n");
1213 			return -ENOMEM;
1214 		}
1215 		memcpy(new_insn + prog->insns_cnt, text->insns,
1216 		       text->insns_cnt * sizeof(*insn));
1217 		prog->insns = new_insn;
1218 		prog->main_prog_cnt = prog->insns_cnt;
1219 		prog->insns_cnt = new_cnt;
1220 		pr_debug("added %zd insn from %s to prog %s\n",
1221 			 text->insns_cnt, text->section_name,
1222 			 prog->section_name);
1223 	}
1224 	insn = &prog->insns[relo->insn_idx];
1225 	insn->imm += prog->main_prog_cnt - relo->insn_idx;
1226 	return 0;
1227 }
1228 
1229 static int
1230 bpf_program__relocate(struct bpf_program *prog, struct bpf_object *obj)
1231 {
1232 	int i, err;
1233 
1234 	if (!prog || !prog->reloc_desc)
1235 		return 0;
1236 
1237 	for (i = 0; i < prog->nr_reloc; i++) {
1238 		if (prog->reloc_desc[i].type == RELO_LD64) {
1239 			struct bpf_insn *insns = prog->insns;
1240 			int insn_idx, map_idx;
1241 
1242 			insn_idx = prog->reloc_desc[i].insn_idx;
1243 			map_idx = prog->reloc_desc[i].map_idx;
1244 
1245 			if (insn_idx >= (int)prog->insns_cnt) {
1246 				pr_warning("relocation out of range: '%s'\n",
1247 					   prog->section_name);
1248 				return -LIBBPF_ERRNO__RELOC;
1249 			}
1250 			insns[insn_idx].src_reg = BPF_PSEUDO_MAP_FD;
1251 			insns[insn_idx].imm = obj->maps[map_idx].fd;
1252 		} else {
1253 			err = bpf_program__reloc_text(prog, obj,
1254 						      &prog->reloc_desc[i]);
1255 			if (err)
1256 				return err;
1257 		}
1258 	}
1259 
1260 	zfree(&prog->reloc_desc);
1261 	prog->nr_reloc = 0;
1262 	return 0;
1263 }
1264 
1265 
1266 static int
1267 bpf_object__relocate(struct bpf_object *obj)
1268 {
1269 	struct bpf_program *prog;
1270 	size_t i;
1271 	int err;
1272 
1273 	for (i = 0; i < obj->nr_programs; i++) {
1274 		prog = &obj->programs[i];
1275 
1276 		err = bpf_program__relocate(prog, obj);
1277 		if (err) {
1278 			pr_warning("failed to relocate '%s'\n",
1279 				   prog->section_name);
1280 			return err;
1281 		}
1282 	}
1283 	return 0;
1284 }
1285 
1286 static int bpf_object__collect_reloc(struct bpf_object *obj)
1287 {
1288 	int i, err;
1289 
1290 	if (!obj_elf_valid(obj)) {
1291 		pr_warning("Internal error: elf object is closed\n");
1292 		return -LIBBPF_ERRNO__INTERNAL;
1293 	}
1294 
1295 	for (i = 0; i < obj->efile.nr_reloc; i++) {
1296 		GElf_Shdr *shdr = &obj->efile.reloc[i].shdr;
1297 		Elf_Data *data = obj->efile.reloc[i].data;
1298 		int idx = shdr->sh_info;
1299 		struct bpf_program *prog;
1300 
1301 		if (shdr->sh_type != SHT_REL) {
1302 			pr_warning("internal error at %d\n", __LINE__);
1303 			return -LIBBPF_ERRNO__INTERNAL;
1304 		}
1305 
1306 		prog = bpf_object__find_prog_by_idx(obj, idx);
1307 		if (!prog) {
1308 			pr_warning("relocation failed: no section(%d)\n", idx);
1309 			return -LIBBPF_ERRNO__RELOC;
1310 		}
1311 
1312 		err = bpf_program__collect_reloc(prog,
1313 						 shdr, data,
1314 						 obj);
1315 		if (err)
1316 			return err;
1317 	}
1318 	return 0;
1319 }
1320 
1321 static int
1322 load_program(enum bpf_prog_type type, enum bpf_attach_type expected_attach_type,
1323 	     const char *name, struct bpf_insn *insns, int insns_cnt,
1324 	     char *license, __u32 kern_version, int *pfd, int prog_ifindex)
1325 {
1326 	struct bpf_load_program_attr load_attr;
1327 	char *cp, errmsg[STRERR_BUFSIZE];
1328 	char *log_buf;
1329 	int ret;
1330 
1331 	memset(&load_attr, 0, sizeof(struct bpf_load_program_attr));
1332 	load_attr.prog_type = type;
1333 	load_attr.expected_attach_type = expected_attach_type;
1334 	load_attr.name = name;
1335 	load_attr.insns = insns;
1336 	load_attr.insns_cnt = insns_cnt;
1337 	load_attr.license = license;
1338 	load_attr.kern_version = kern_version;
1339 	load_attr.prog_ifindex = prog_ifindex;
1340 
1341 	if (!load_attr.insns || !load_attr.insns_cnt)
1342 		return -EINVAL;
1343 
1344 	log_buf = malloc(BPF_LOG_BUF_SIZE);
1345 	if (!log_buf)
1346 		pr_warning("Alloc log buffer for bpf loader error, continue without log\n");
1347 
1348 	ret = bpf_load_program_xattr(&load_attr, log_buf, BPF_LOG_BUF_SIZE);
1349 
1350 	if (ret >= 0) {
1351 		*pfd = ret;
1352 		ret = 0;
1353 		goto out;
1354 	}
1355 
1356 	ret = -LIBBPF_ERRNO__LOAD;
1357 	cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
1358 	pr_warning("load bpf program failed: %s\n", cp);
1359 
1360 	if (log_buf && log_buf[0] != '\0') {
1361 		ret = -LIBBPF_ERRNO__VERIFY;
1362 		pr_warning("-- BEGIN DUMP LOG ---\n");
1363 		pr_warning("\n%s\n", log_buf);
1364 		pr_warning("-- END LOG --\n");
1365 	} else if (load_attr.insns_cnt >= BPF_MAXINSNS) {
1366 		pr_warning("Program too large (%zu insns), at most %d insns\n",
1367 			   load_attr.insns_cnt, BPF_MAXINSNS);
1368 		ret = -LIBBPF_ERRNO__PROG2BIG;
1369 	} else {
1370 		/* Wrong program type? */
1371 		if (load_attr.prog_type != BPF_PROG_TYPE_KPROBE) {
1372 			int fd;
1373 
1374 			load_attr.prog_type = BPF_PROG_TYPE_KPROBE;
1375 			load_attr.expected_attach_type = 0;
1376 			fd = bpf_load_program_xattr(&load_attr, NULL, 0);
1377 			if (fd >= 0) {
1378 				close(fd);
1379 				ret = -LIBBPF_ERRNO__PROGTYPE;
1380 				goto out;
1381 			}
1382 		}
1383 
1384 		if (log_buf)
1385 			ret = -LIBBPF_ERRNO__KVER;
1386 	}
1387 
1388 out:
1389 	free(log_buf);
1390 	return ret;
1391 }
1392 
1393 int
1394 bpf_program__load(struct bpf_program *prog,
1395 		  char *license, __u32 kern_version)
1396 {
1397 	int err = 0, fd, i;
1398 
1399 	if (prog->instances.nr < 0 || !prog->instances.fds) {
1400 		if (prog->preprocessor) {
1401 			pr_warning("Internal error: can't load program '%s'\n",
1402 				   prog->section_name);
1403 			return -LIBBPF_ERRNO__INTERNAL;
1404 		}
1405 
1406 		prog->instances.fds = malloc(sizeof(int));
1407 		if (!prog->instances.fds) {
1408 			pr_warning("Not enough memory for BPF fds\n");
1409 			return -ENOMEM;
1410 		}
1411 		prog->instances.nr = 1;
1412 		prog->instances.fds[0] = -1;
1413 	}
1414 
1415 	if (!prog->preprocessor) {
1416 		if (prog->instances.nr != 1) {
1417 			pr_warning("Program '%s' is inconsistent: nr(%d) != 1\n",
1418 				   prog->section_name, prog->instances.nr);
1419 		}
1420 		err = load_program(prog->type, prog->expected_attach_type,
1421 				   prog->name, prog->insns, prog->insns_cnt,
1422 				   license, kern_version, &fd,
1423 				   prog->prog_ifindex);
1424 		if (!err)
1425 			prog->instances.fds[0] = fd;
1426 		goto out;
1427 	}
1428 
1429 	for (i = 0; i < prog->instances.nr; i++) {
1430 		struct bpf_prog_prep_result result;
1431 		bpf_program_prep_t preprocessor = prog->preprocessor;
1432 
1433 		bzero(&result, sizeof(result));
1434 		err = preprocessor(prog, i, prog->insns,
1435 				   prog->insns_cnt, &result);
1436 		if (err) {
1437 			pr_warning("Preprocessing the %dth instance of program '%s' failed\n",
1438 				   i, prog->section_name);
1439 			goto out;
1440 		}
1441 
1442 		if (!result.new_insn_ptr || !result.new_insn_cnt) {
1443 			pr_debug("Skip loading the %dth instance of program '%s'\n",
1444 				 i, prog->section_name);
1445 			prog->instances.fds[i] = -1;
1446 			if (result.pfd)
1447 				*result.pfd = -1;
1448 			continue;
1449 		}
1450 
1451 		err = load_program(prog->type, prog->expected_attach_type,
1452 				   prog->name, result.new_insn_ptr,
1453 				   result.new_insn_cnt,
1454 				   license, kern_version, &fd,
1455 				   prog->prog_ifindex);
1456 
1457 		if (err) {
1458 			pr_warning("Loading the %dth instance of program '%s' failed\n",
1459 					i, prog->section_name);
1460 			goto out;
1461 		}
1462 
1463 		if (result.pfd)
1464 			*result.pfd = fd;
1465 		prog->instances.fds[i] = fd;
1466 	}
1467 out:
1468 	if (err)
1469 		pr_warning("failed to load program '%s'\n",
1470 			   prog->section_name);
1471 	zfree(&prog->insns);
1472 	prog->insns_cnt = 0;
1473 	return err;
1474 }
1475 
1476 static bool bpf_program__is_function_storage(struct bpf_program *prog,
1477 					     struct bpf_object *obj)
1478 {
1479 	return prog->idx == obj->efile.text_shndx && obj->has_pseudo_calls;
1480 }
1481 
1482 static int
1483 bpf_object__load_progs(struct bpf_object *obj)
1484 {
1485 	size_t i;
1486 	int err;
1487 
1488 	for (i = 0; i < obj->nr_programs; i++) {
1489 		if (bpf_program__is_function_storage(&obj->programs[i], obj))
1490 			continue;
1491 		err = bpf_program__load(&obj->programs[i],
1492 					obj->license,
1493 					obj->kern_version);
1494 		if (err)
1495 			return err;
1496 	}
1497 	return 0;
1498 }
1499 
1500 static bool bpf_prog_type__needs_kver(enum bpf_prog_type type)
1501 {
1502 	switch (type) {
1503 	case BPF_PROG_TYPE_SOCKET_FILTER:
1504 	case BPF_PROG_TYPE_SCHED_CLS:
1505 	case BPF_PROG_TYPE_SCHED_ACT:
1506 	case BPF_PROG_TYPE_XDP:
1507 	case BPF_PROG_TYPE_CGROUP_SKB:
1508 	case BPF_PROG_TYPE_CGROUP_SOCK:
1509 	case BPF_PROG_TYPE_LWT_IN:
1510 	case BPF_PROG_TYPE_LWT_OUT:
1511 	case BPF_PROG_TYPE_LWT_XMIT:
1512 	case BPF_PROG_TYPE_LWT_SEG6LOCAL:
1513 	case BPF_PROG_TYPE_SOCK_OPS:
1514 	case BPF_PROG_TYPE_SK_SKB:
1515 	case BPF_PROG_TYPE_CGROUP_DEVICE:
1516 	case BPF_PROG_TYPE_SK_MSG:
1517 	case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
1518 	case BPF_PROG_TYPE_LIRC_MODE2:
1519 	case BPF_PROG_TYPE_SK_REUSEPORT:
1520 	case BPF_PROG_TYPE_FLOW_DISSECTOR:
1521 		return false;
1522 	case BPF_PROG_TYPE_UNSPEC:
1523 	case BPF_PROG_TYPE_KPROBE:
1524 	case BPF_PROG_TYPE_TRACEPOINT:
1525 	case BPF_PROG_TYPE_PERF_EVENT:
1526 	case BPF_PROG_TYPE_RAW_TRACEPOINT:
1527 	default:
1528 		return true;
1529 	}
1530 }
1531 
1532 static int bpf_object__validate(struct bpf_object *obj, bool needs_kver)
1533 {
1534 	if (needs_kver && obj->kern_version == 0) {
1535 		pr_warning("%s doesn't provide kernel version\n",
1536 			   obj->path);
1537 		return -LIBBPF_ERRNO__KVERSION;
1538 	}
1539 	return 0;
1540 }
1541 
1542 static struct bpf_object *
1543 __bpf_object__open(const char *path, void *obj_buf, size_t obj_buf_sz,
1544 		   bool needs_kver, int flags)
1545 {
1546 	struct bpf_object *obj;
1547 	int err;
1548 
1549 	if (elf_version(EV_CURRENT) == EV_NONE) {
1550 		pr_warning("failed to init libelf for %s\n", path);
1551 		return ERR_PTR(-LIBBPF_ERRNO__LIBELF);
1552 	}
1553 
1554 	obj = bpf_object__new(path, obj_buf, obj_buf_sz);
1555 	if (IS_ERR(obj))
1556 		return obj;
1557 
1558 	CHECK_ERR(bpf_object__elf_init(obj), err, out);
1559 	CHECK_ERR(bpf_object__check_endianness(obj), err, out);
1560 	CHECK_ERR(bpf_object__elf_collect(obj, flags), err, out);
1561 	CHECK_ERR(bpf_object__collect_reloc(obj), err, out);
1562 	CHECK_ERR(bpf_object__validate(obj, needs_kver), err, out);
1563 
1564 	bpf_object__elf_finish(obj);
1565 	return obj;
1566 out:
1567 	bpf_object__close(obj);
1568 	return ERR_PTR(err);
1569 }
1570 
1571 struct bpf_object *__bpf_object__open_xattr(struct bpf_object_open_attr *attr,
1572 					    int flags)
1573 {
1574 	/* param validation */
1575 	if (!attr->file)
1576 		return NULL;
1577 
1578 	pr_debug("loading %s\n", attr->file);
1579 
1580 	return __bpf_object__open(attr->file, NULL, 0,
1581 				  bpf_prog_type__needs_kver(attr->prog_type),
1582 				  flags);
1583 }
1584 
1585 struct bpf_object *bpf_object__open_xattr(struct bpf_object_open_attr *attr)
1586 {
1587 	return __bpf_object__open_xattr(attr, 0);
1588 }
1589 
1590 struct bpf_object *bpf_object__open(const char *path)
1591 {
1592 	struct bpf_object_open_attr attr = {
1593 		.file		= path,
1594 		.prog_type	= BPF_PROG_TYPE_UNSPEC,
1595 	};
1596 
1597 	return bpf_object__open_xattr(&attr);
1598 }
1599 
1600 struct bpf_object *bpf_object__open_buffer(void *obj_buf,
1601 					   size_t obj_buf_sz,
1602 					   const char *name)
1603 {
1604 	char tmp_name[64];
1605 
1606 	/* param validation */
1607 	if (!obj_buf || obj_buf_sz <= 0)
1608 		return NULL;
1609 
1610 	if (!name) {
1611 		snprintf(tmp_name, sizeof(tmp_name), "%lx-%lx",
1612 			 (unsigned long)obj_buf,
1613 			 (unsigned long)obj_buf_sz);
1614 		tmp_name[sizeof(tmp_name) - 1] = '\0';
1615 		name = tmp_name;
1616 	}
1617 	pr_debug("loading object '%s' from buffer\n",
1618 		 name);
1619 
1620 	return __bpf_object__open(name, obj_buf, obj_buf_sz, true, true);
1621 }
1622 
1623 int bpf_object__unload(struct bpf_object *obj)
1624 {
1625 	size_t i;
1626 
1627 	if (!obj)
1628 		return -EINVAL;
1629 
1630 	for (i = 0; i < obj->nr_maps; i++)
1631 		zclose(obj->maps[i].fd);
1632 
1633 	for (i = 0; i < obj->nr_programs; i++)
1634 		bpf_program__unload(&obj->programs[i]);
1635 
1636 	return 0;
1637 }
1638 
1639 int bpf_object__load(struct bpf_object *obj)
1640 {
1641 	int err;
1642 
1643 	if (!obj)
1644 		return -EINVAL;
1645 
1646 	if (obj->loaded) {
1647 		pr_warning("object should not be loaded twice\n");
1648 		return -EINVAL;
1649 	}
1650 
1651 	obj->loaded = true;
1652 
1653 	CHECK_ERR(bpf_object__create_maps(obj), err, out);
1654 	CHECK_ERR(bpf_object__relocate(obj), err, out);
1655 	CHECK_ERR(bpf_object__load_progs(obj), err, out);
1656 
1657 	return 0;
1658 out:
1659 	bpf_object__unload(obj);
1660 	pr_warning("failed to load object '%s'\n", obj->path);
1661 	return err;
1662 }
1663 
1664 static int check_path(const char *path)
1665 {
1666 	char *cp, errmsg[STRERR_BUFSIZE];
1667 	struct statfs st_fs;
1668 	char *dname, *dir;
1669 	int err = 0;
1670 
1671 	if (path == NULL)
1672 		return -EINVAL;
1673 
1674 	dname = strdup(path);
1675 	if (dname == NULL)
1676 		return -ENOMEM;
1677 
1678 	dir = dirname(dname);
1679 	if (statfs(dir, &st_fs)) {
1680 		cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
1681 		pr_warning("failed to statfs %s: %s\n", dir, cp);
1682 		err = -errno;
1683 	}
1684 	free(dname);
1685 
1686 	if (!err && st_fs.f_type != BPF_FS_MAGIC) {
1687 		pr_warning("specified path %s is not on BPF FS\n", path);
1688 		err = -EINVAL;
1689 	}
1690 
1691 	return err;
1692 }
1693 
1694 int bpf_program__pin_instance(struct bpf_program *prog, const char *path,
1695 			      int instance)
1696 {
1697 	char *cp, errmsg[STRERR_BUFSIZE];
1698 	int err;
1699 
1700 	err = check_path(path);
1701 	if (err)
1702 		return err;
1703 
1704 	if (prog == NULL) {
1705 		pr_warning("invalid program pointer\n");
1706 		return -EINVAL;
1707 	}
1708 
1709 	if (instance < 0 || instance >= prog->instances.nr) {
1710 		pr_warning("invalid prog instance %d of prog %s (max %d)\n",
1711 			   instance, prog->section_name, prog->instances.nr);
1712 		return -EINVAL;
1713 	}
1714 
1715 	if (bpf_obj_pin(prog->instances.fds[instance], path)) {
1716 		cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
1717 		pr_warning("failed to pin program: %s\n", cp);
1718 		return -errno;
1719 	}
1720 	pr_debug("pinned program '%s'\n", path);
1721 
1722 	return 0;
1723 }
1724 
1725 int bpf_program__unpin_instance(struct bpf_program *prog, const char *path,
1726 				int instance)
1727 {
1728 	int err;
1729 
1730 	err = check_path(path);
1731 	if (err)
1732 		return err;
1733 
1734 	if (prog == NULL) {
1735 		pr_warning("invalid program pointer\n");
1736 		return -EINVAL;
1737 	}
1738 
1739 	if (instance < 0 || instance >= prog->instances.nr) {
1740 		pr_warning("invalid prog instance %d of prog %s (max %d)\n",
1741 			   instance, prog->section_name, prog->instances.nr);
1742 		return -EINVAL;
1743 	}
1744 
1745 	err = unlink(path);
1746 	if (err != 0)
1747 		return -errno;
1748 	pr_debug("unpinned program '%s'\n", path);
1749 
1750 	return 0;
1751 }
1752 
1753 static int make_dir(const char *path)
1754 {
1755 	char *cp, errmsg[STRERR_BUFSIZE];
1756 	int err = 0;
1757 
1758 	if (mkdir(path, 0700) && errno != EEXIST)
1759 		err = -errno;
1760 
1761 	if (err) {
1762 		cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg));
1763 		pr_warning("failed to mkdir %s: %s\n", path, cp);
1764 	}
1765 	return err;
1766 }
1767 
1768 int bpf_program__pin(struct bpf_program *prog, const char *path)
1769 {
1770 	int i, err;
1771 
1772 	err = check_path(path);
1773 	if (err)
1774 		return err;
1775 
1776 	if (prog == NULL) {
1777 		pr_warning("invalid program pointer\n");
1778 		return -EINVAL;
1779 	}
1780 
1781 	if (prog->instances.nr <= 0) {
1782 		pr_warning("no instances of prog %s to pin\n",
1783 			   prog->section_name);
1784 		return -EINVAL;
1785 	}
1786 
1787 	if (prog->instances.nr == 1) {
1788 		/* don't create subdirs when pinning single instance */
1789 		return bpf_program__pin_instance(prog, path, 0);
1790 	}
1791 
1792 	err = make_dir(path);
1793 	if (err)
1794 		return err;
1795 
1796 	for (i = 0; i < prog->instances.nr; i++) {
1797 		char buf[PATH_MAX];
1798 		int len;
1799 
1800 		len = snprintf(buf, PATH_MAX, "%s/%d", path, i);
1801 		if (len < 0) {
1802 			err = -EINVAL;
1803 			goto err_unpin;
1804 		} else if (len >= PATH_MAX) {
1805 			err = -ENAMETOOLONG;
1806 			goto err_unpin;
1807 		}
1808 
1809 		err = bpf_program__pin_instance(prog, buf, i);
1810 		if (err)
1811 			goto err_unpin;
1812 	}
1813 
1814 	return 0;
1815 
1816 err_unpin:
1817 	for (i = i - 1; i >= 0; i--) {
1818 		char buf[PATH_MAX];
1819 		int len;
1820 
1821 		len = snprintf(buf, PATH_MAX, "%s/%d", path, i);
1822 		if (len < 0)
1823 			continue;
1824 		else if (len >= PATH_MAX)
1825 			continue;
1826 
1827 		bpf_program__unpin_instance(prog, buf, i);
1828 	}
1829 
1830 	rmdir(path);
1831 
1832 	return err;
1833 }
1834 
1835 int bpf_program__unpin(struct bpf_program *prog, const char *path)
1836 {
1837 	int i, err;
1838 
1839 	err = check_path(path);
1840 	if (err)
1841 		return err;
1842 
1843 	if (prog == NULL) {
1844 		pr_warning("invalid program pointer\n");
1845 		return -EINVAL;
1846 	}
1847 
1848 	if (prog->instances.nr <= 0) {
1849 		pr_warning("no instances of prog %s to pin\n",
1850 			   prog->section_name);
1851 		return -EINVAL;
1852 	}
1853 
1854 	if (prog->instances.nr == 1) {
1855 		/* don't create subdirs when pinning single instance */
1856 		return bpf_program__unpin_instance(prog, path, 0);
1857 	}
1858 
1859 	for (i = 0; i < prog->instances.nr; i++) {
1860 		char buf[PATH_MAX];
1861 		int len;
1862 
1863 		len = snprintf(buf, PATH_MAX, "%s/%d", path, i);
1864 		if (len < 0)
1865 			return -EINVAL;
1866 		else if (len >= PATH_MAX)
1867 			return -ENAMETOOLONG;
1868 
1869 		err = bpf_program__unpin_instance(prog, buf, i);
1870 		if (err)
1871 			return err;
1872 	}
1873 
1874 	err = rmdir(path);
1875 	if (err)
1876 		return -errno;
1877 
1878 	return 0;
1879 }
1880 
1881 int bpf_map__pin(struct bpf_map *map, const char *path)
1882 {
1883 	char *cp, errmsg[STRERR_BUFSIZE];
1884 	int err;
1885 
1886 	err = check_path(path);
1887 	if (err)
1888 		return err;
1889 
1890 	if (map == NULL) {
1891 		pr_warning("invalid map pointer\n");
1892 		return -EINVAL;
1893 	}
1894 
1895 	if (bpf_obj_pin(map->fd, path)) {
1896 		cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
1897 		pr_warning("failed to pin map: %s\n", cp);
1898 		return -errno;
1899 	}
1900 
1901 	pr_debug("pinned map '%s'\n", path);
1902 
1903 	return 0;
1904 }
1905 
1906 int bpf_map__unpin(struct bpf_map *map, const char *path)
1907 {
1908 	int err;
1909 
1910 	err = check_path(path);
1911 	if (err)
1912 		return err;
1913 
1914 	if (map == NULL) {
1915 		pr_warning("invalid map pointer\n");
1916 		return -EINVAL;
1917 	}
1918 
1919 	err = unlink(path);
1920 	if (err != 0)
1921 		return -errno;
1922 	pr_debug("unpinned map '%s'\n", path);
1923 
1924 	return 0;
1925 }
1926 
1927 int bpf_object__pin_maps(struct bpf_object *obj, const char *path)
1928 {
1929 	struct bpf_map *map;
1930 	int err;
1931 
1932 	if (!obj)
1933 		return -ENOENT;
1934 
1935 	if (!obj->loaded) {
1936 		pr_warning("object not yet loaded; load it first\n");
1937 		return -ENOENT;
1938 	}
1939 
1940 	err = make_dir(path);
1941 	if (err)
1942 		return err;
1943 
1944 	bpf_map__for_each(map, obj) {
1945 		char buf[PATH_MAX];
1946 		int len;
1947 
1948 		len = snprintf(buf, PATH_MAX, "%s/%s", path,
1949 			       bpf_map__name(map));
1950 		if (len < 0) {
1951 			err = -EINVAL;
1952 			goto err_unpin_maps;
1953 		} else if (len >= PATH_MAX) {
1954 			err = -ENAMETOOLONG;
1955 			goto err_unpin_maps;
1956 		}
1957 
1958 		err = bpf_map__pin(map, buf);
1959 		if (err)
1960 			goto err_unpin_maps;
1961 	}
1962 
1963 	return 0;
1964 
1965 err_unpin_maps:
1966 	while ((map = bpf_map__prev(map, obj))) {
1967 		char buf[PATH_MAX];
1968 		int len;
1969 
1970 		len = snprintf(buf, PATH_MAX, "%s/%s", path,
1971 			       bpf_map__name(map));
1972 		if (len < 0)
1973 			continue;
1974 		else if (len >= PATH_MAX)
1975 			continue;
1976 
1977 		bpf_map__unpin(map, buf);
1978 	}
1979 
1980 	return err;
1981 }
1982 
1983 int bpf_object__unpin_maps(struct bpf_object *obj, const char *path)
1984 {
1985 	struct bpf_map *map;
1986 	int err;
1987 
1988 	if (!obj)
1989 		return -ENOENT;
1990 
1991 	bpf_map__for_each(map, obj) {
1992 		char buf[PATH_MAX];
1993 		int len;
1994 
1995 		len = snprintf(buf, PATH_MAX, "%s/%s", path,
1996 			       bpf_map__name(map));
1997 		if (len < 0)
1998 			return -EINVAL;
1999 		else if (len >= PATH_MAX)
2000 			return -ENAMETOOLONG;
2001 
2002 		err = bpf_map__unpin(map, buf);
2003 		if (err)
2004 			return err;
2005 	}
2006 
2007 	return 0;
2008 }
2009 
2010 int bpf_object__pin_programs(struct bpf_object *obj, const char *path)
2011 {
2012 	struct bpf_program *prog;
2013 	int err;
2014 
2015 	if (!obj)
2016 		return -ENOENT;
2017 
2018 	if (!obj->loaded) {
2019 		pr_warning("object not yet loaded; load it first\n");
2020 		return -ENOENT;
2021 	}
2022 
2023 	err = make_dir(path);
2024 	if (err)
2025 		return err;
2026 
2027 	bpf_object__for_each_program(prog, obj) {
2028 		char buf[PATH_MAX];
2029 		int len;
2030 
2031 		len = snprintf(buf, PATH_MAX, "%s/%s", path,
2032 			       prog->pin_name);
2033 		if (len < 0) {
2034 			err = -EINVAL;
2035 			goto err_unpin_programs;
2036 		} else if (len >= PATH_MAX) {
2037 			err = -ENAMETOOLONG;
2038 			goto err_unpin_programs;
2039 		}
2040 
2041 		err = bpf_program__pin(prog, buf);
2042 		if (err)
2043 			goto err_unpin_programs;
2044 	}
2045 
2046 	return 0;
2047 
2048 err_unpin_programs:
2049 	while ((prog = bpf_program__prev(prog, obj))) {
2050 		char buf[PATH_MAX];
2051 		int len;
2052 
2053 		len = snprintf(buf, PATH_MAX, "%s/%s", path,
2054 			       prog->pin_name);
2055 		if (len < 0)
2056 			continue;
2057 		else if (len >= PATH_MAX)
2058 			continue;
2059 
2060 		bpf_program__unpin(prog, buf);
2061 	}
2062 
2063 	return err;
2064 }
2065 
2066 int bpf_object__unpin_programs(struct bpf_object *obj, const char *path)
2067 {
2068 	struct bpf_program *prog;
2069 	int err;
2070 
2071 	if (!obj)
2072 		return -ENOENT;
2073 
2074 	bpf_object__for_each_program(prog, obj) {
2075 		char buf[PATH_MAX];
2076 		int len;
2077 
2078 		len = snprintf(buf, PATH_MAX, "%s/%s", path,
2079 			       prog->pin_name);
2080 		if (len < 0)
2081 			return -EINVAL;
2082 		else if (len >= PATH_MAX)
2083 			return -ENAMETOOLONG;
2084 
2085 		err = bpf_program__unpin(prog, buf);
2086 		if (err)
2087 			return err;
2088 	}
2089 
2090 	return 0;
2091 }
2092 
2093 int bpf_object__pin(struct bpf_object *obj, const char *path)
2094 {
2095 	int err;
2096 
2097 	err = bpf_object__pin_maps(obj, path);
2098 	if (err)
2099 		return err;
2100 
2101 	err = bpf_object__pin_programs(obj, path);
2102 	if (err) {
2103 		bpf_object__unpin_maps(obj, path);
2104 		return err;
2105 	}
2106 
2107 	return 0;
2108 }
2109 
2110 void bpf_object__close(struct bpf_object *obj)
2111 {
2112 	size_t i;
2113 
2114 	if (!obj)
2115 		return;
2116 
2117 	if (obj->clear_priv)
2118 		obj->clear_priv(obj, obj->priv);
2119 
2120 	bpf_object__elf_finish(obj);
2121 	bpf_object__unload(obj);
2122 	btf__free(obj->btf);
2123 
2124 	for (i = 0; i < obj->nr_maps; i++) {
2125 		zfree(&obj->maps[i].name);
2126 		if (obj->maps[i].clear_priv)
2127 			obj->maps[i].clear_priv(&obj->maps[i],
2128 						obj->maps[i].priv);
2129 		obj->maps[i].priv = NULL;
2130 		obj->maps[i].clear_priv = NULL;
2131 	}
2132 	zfree(&obj->maps);
2133 	obj->nr_maps = 0;
2134 
2135 	if (obj->programs && obj->nr_programs) {
2136 		for (i = 0; i < obj->nr_programs; i++)
2137 			bpf_program__exit(&obj->programs[i]);
2138 	}
2139 	zfree(&obj->programs);
2140 
2141 	list_del(&obj->list);
2142 	free(obj);
2143 }
2144 
2145 struct bpf_object *
2146 bpf_object__next(struct bpf_object *prev)
2147 {
2148 	struct bpf_object *next;
2149 
2150 	if (!prev)
2151 		next = list_first_entry(&bpf_objects_list,
2152 					struct bpf_object,
2153 					list);
2154 	else
2155 		next = list_next_entry(prev, list);
2156 
2157 	/* Empty list is noticed here so don't need checking on entry. */
2158 	if (&next->list == &bpf_objects_list)
2159 		return NULL;
2160 
2161 	return next;
2162 }
2163 
2164 const char *bpf_object__name(struct bpf_object *obj)
2165 {
2166 	return obj ? obj->path : ERR_PTR(-EINVAL);
2167 }
2168 
2169 unsigned int bpf_object__kversion(struct bpf_object *obj)
2170 {
2171 	return obj ? obj->kern_version : 0;
2172 }
2173 
2174 int bpf_object__btf_fd(const struct bpf_object *obj)
2175 {
2176 	return obj->btf ? btf__fd(obj->btf) : -1;
2177 }
2178 
2179 int bpf_object__set_priv(struct bpf_object *obj, void *priv,
2180 			 bpf_object_clear_priv_t clear_priv)
2181 {
2182 	if (obj->priv && obj->clear_priv)
2183 		obj->clear_priv(obj, obj->priv);
2184 
2185 	obj->priv = priv;
2186 	obj->clear_priv = clear_priv;
2187 	return 0;
2188 }
2189 
2190 void *bpf_object__priv(struct bpf_object *obj)
2191 {
2192 	return obj ? obj->priv : ERR_PTR(-EINVAL);
2193 }
2194 
2195 static struct bpf_program *
2196 __bpf_program__iter(struct bpf_program *p, struct bpf_object *obj, int i)
2197 {
2198 	ssize_t idx;
2199 
2200 	if (!obj->programs)
2201 		return NULL;
2202 
2203 	if (p->obj != obj) {
2204 		pr_warning("error: program handler doesn't match object\n");
2205 		return NULL;
2206 	}
2207 
2208 	idx = (p - obj->programs) + i;
2209 	if (idx >= obj->nr_programs || idx < 0)
2210 		return NULL;
2211 	return &obj->programs[idx];
2212 }
2213 
2214 struct bpf_program *
2215 bpf_program__next(struct bpf_program *prev, struct bpf_object *obj)
2216 {
2217 	struct bpf_program *prog = prev;
2218 
2219 	if (prev == NULL)
2220 		return obj->programs;
2221 
2222 	do {
2223 		prog = __bpf_program__iter(prog, obj, 1);
2224 	} while (prog && bpf_program__is_function_storage(prog, obj));
2225 
2226 	return prog;
2227 }
2228 
2229 struct bpf_program *
2230 bpf_program__prev(struct bpf_program *next, struct bpf_object *obj)
2231 {
2232 	struct bpf_program *prog = next;
2233 
2234 	if (next == NULL) {
2235 		if (!obj->nr_programs)
2236 			return NULL;
2237 		return obj->programs + obj->nr_programs - 1;
2238 	}
2239 
2240 	do {
2241 		prog = __bpf_program__iter(prog, obj, -1);
2242 	} while (prog && bpf_program__is_function_storage(prog, obj));
2243 
2244 	return prog;
2245 }
2246 
2247 int bpf_program__set_priv(struct bpf_program *prog, void *priv,
2248 			  bpf_program_clear_priv_t clear_priv)
2249 {
2250 	if (prog->priv && prog->clear_priv)
2251 		prog->clear_priv(prog, prog->priv);
2252 
2253 	prog->priv = priv;
2254 	prog->clear_priv = clear_priv;
2255 	return 0;
2256 }
2257 
2258 void *bpf_program__priv(struct bpf_program *prog)
2259 {
2260 	return prog ? prog->priv : ERR_PTR(-EINVAL);
2261 }
2262 
2263 void bpf_program__set_ifindex(struct bpf_program *prog, __u32 ifindex)
2264 {
2265 	prog->prog_ifindex = ifindex;
2266 }
2267 
2268 const char *bpf_program__title(struct bpf_program *prog, bool needs_copy)
2269 {
2270 	const char *title;
2271 
2272 	title = prog->section_name;
2273 	if (needs_copy) {
2274 		title = strdup(title);
2275 		if (!title) {
2276 			pr_warning("failed to strdup program title\n");
2277 			return ERR_PTR(-ENOMEM);
2278 		}
2279 	}
2280 
2281 	return title;
2282 }
2283 
2284 int bpf_program__fd(struct bpf_program *prog)
2285 {
2286 	return bpf_program__nth_fd(prog, 0);
2287 }
2288 
2289 int bpf_program__set_prep(struct bpf_program *prog, int nr_instances,
2290 			  bpf_program_prep_t prep)
2291 {
2292 	int *instances_fds;
2293 
2294 	if (nr_instances <= 0 || !prep)
2295 		return -EINVAL;
2296 
2297 	if (prog->instances.nr > 0 || prog->instances.fds) {
2298 		pr_warning("Can't set pre-processor after loading\n");
2299 		return -EINVAL;
2300 	}
2301 
2302 	instances_fds = malloc(sizeof(int) * nr_instances);
2303 	if (!instances_fds) {
2304 		pr_warning("alloc memory failed for fds\n");
2305 		return -ENOMEM;
2306 	}
2307 
2308 	/* fill all fd with -1 */
2309 	memset(instances_fds, -1, sizeof(int) * nr_instances);
2310 
2311 	prog->instances.nr = nr_instances;
2312 	prog->instances.fds = instances_fds;
2313 	prog->preprocessor = prep;
2314 	return 0;
2315 }
2316 
2317 int bpf_program__nth_fd(struct bpf_program *prog, int n)
2318 {
2319 	int fd;
2320 
2321 	if (!prog)
2322 		return -EINVAL;
2323 
2324 	if (n >= prog->instances.nr || n < 0) {
2325 		pr_warning("Can't get the %dth fd from program %s: only %d instances\n",
2326 			   n, prog->section_name, prog->instances.nr);
2327 		return -EINVAL;
2328 	}
2329 
2330 	fd = prog->instances.fds[n];
2331 	if (fd < 0) {
2332 		pr_warning("%dth instance of program '%s' is invalid\n",
2333 			   n, prog->section_name);
2334 		return -ENOENT;
2335 	}
2336 
2337 	return fd;
2338 }
2339 
2340 void bpf_program__set_type(struct bpf_program *prog, enum bpf_prog_type type)
2341 {
2342 	prog->type = type;
2343 }
2344 
2345 static bool bpf_program__is_type(struct bpf_program *prog,
2346 				 enum bpf_prog_type type)
2347 {
2348 	return prog ? (prog->type == type) : false;
2349 }
2350 
2351 #define BPF_PROG_TYPE_FNS(NAME, TYPE)			\
2352 int bpf_program__set_##NAME(struct bpf_program *prog)	\
2353 {							\
2354 	if (!prog)					\
2355 		return -EINVAL;				\
2356 	bpf_program__set_type(prog, TYPE);		\
2357 	return 0;					\
2358 }							\
2359 							\
2360 bool bpf_program__is_##NAME(struct bpf_program *prog)	\
2361 {							\
2362 	return bpf_program__is_type(prog, TYPE);	\
2363 }							\
2364 
2365 BPF_PROG_TYPE_FNS(socket_filter, BPF_PROG_TYPE_SOCKET_FILTER);
2366 BPF_PROG_TYPE_FNS(kprobe, BPF_PROG_TYPE_KPROBE);
2367 BPF_PROG_TYPE_FNS(sched_cls, BPF_PROG_TYPE_SCHED_CLS);
2368 BPF_PROG_TYPE_FNS(sched_act, BPF_PROG_TYPE_SCHED_ACT);
2369 BPF_PROG_TYPE_FNS(tracepoint, BPF_PROG_TYPE_TRACEPOINT);
2370 BPF_PROG_TYPE_FNS(raw_tracepoint, BPF_PROG_TYPE_RAW_TRACEPOINT);
2371 BPF_PROG_TYPE_FNS(xdp, BPF_PROG_TYPE_XDP);
2372 BPF_PROG_TYPE_FNS(perf_event, BPF_PROG_TYPE_PERF_EVENT);
2373 
2374 void bpf_program__set_expected_attach_type(struct bpf_program *prog,
2375 					   enum bpf_attach_type type)
2376 {
2377 	prog->expected_attach_type = type;
2378 }
2379 
2380 #define BPF_PROG_SEC_IMPL(string, ptype, eatype, is_attachable, atype) \
2381 	{ string, sizeof(string) - 1, ptype, eatype, is_attachable, atype }
2382 
2383 /* Programs that can NOT be attached. */
2384 #define BPF_PROG_SEC(string, ptype) BPF_PROG_SEC_IMPL(string, ptype, 0, 0, 0)
2385 
2386 /* Programs that can be attached. */
2387 #define BPF_APROG_SEC(string, ptype, atype) \
2388 	BPF_PROG_SEC_IMPL(string, ptype, 0, 1, atype)
2389 
2390 /* Programs that must specify expected attach type at load time. */
2391 #define BPF_EAPROG_SEC(string, ptype, eatype) \
2392 	BPF_PROG_SEC_IMPL(string, ptype, eatype, 1, eatype)
2393 
2394 /* Programs that can be attached but attach type can't be identified by section
2395  * name. Kept for backward compatibility.
2396  */
2397 #define BPF_APROG_COMPAT(string, ptype) BPF_PROG_SEC(string, ptype)
2398 
2399 static const struct {
2400 	const char *sec;
2401 	size_t len;
2402 	enum bpf_prog_type prog_type;
2403 	enum bpf_attach_type expected_attach_type;
2404 	int is_attachable;
2405 	enum bpf_attach_type attach_type;
2406 } section_names[] = {
2407 	BPF_PROG_SEC("socket",			BPF_PROG_TYPE_SOCKET_FILTER),
2408 	BPF_PROG_SEC("kprobe/",			BPF_PROG_TYPE_KPROBE),
2409 	BPF_PROG_SEC("kretprobe/",		BPF_PROG_TYPE_KPROBE),
2410 	BPF_PROG_SEC("classifier",		BPF_PROG_TYPE_SCHED_CLS),
2411 	BPF_PROG_SEC("action",			BPF_PROG_TYPE_SCHED_ACT),
2412 	BPF_PROG_SEC("tracepoint/",		BPF_PROG_TYPE_TRACEPOINT),
2413 	BPF_PROG_SEC("raw_tracepoint/",		BPF_PROG_TYPE_RAW_TRACEPOINT),
2414 	BPF_PROG_SEC("xdp",			BPF_PROG_TYPE_XDP),
2415 	BPF_PROG_SEC("perf_event",		BPF_PROG_TYPE_PERF_EVENT),
2416 	BPF_PROG_SEC("lwt_in",			BPF_PROG_TYPE_LWT_IN),
2417 	BPF_PROG_SEC("lwt_out",			BPF_PROG_TYPE_LWT_OUT),
2418 	BPF_PROG_SEC("lwt_xmit",		BPF_PROG_TYPE_LWT_XMIT),
2419 	BPF_PROG_SEC("lwt_seg6local",		BPF_PROG_TYPE_LWT_SEG6LOCAL),
2420 	BPF_APROG_SEC("cgroup_skb/ingress",	BPF_PROG_TYPE_CGROUP_SKB,
2421 						BPF_CGROUP_INET_INGRESS),
2422 	BPF_APROG_SEC("cgroup_skb/egress",	BPF_PROG_TYPE_CGROUP_SKB,
2423 						BPF_CGROUP_INET_EGRESS),
2424 	BPF_APROG_COMPAT("cgroup/skb",		BPF_PROG_TYPE_CGROUP_SKB),
2425 	BPF_APROG_SEC("cgroup/sock",		BPF_PROG_TYPE_CGROUP_SOCK,
2426 						BPF_CGROUP_INET_SOCK_CREATE),
2427 	BPF_EAPROG_SEC("cgroup/post_bind4",	BPF_PROG_TYPE_CGROUP_SOCK,
2428 						BPF_CGROUP_INET4_POST_BIND),
2429 	BPF_EAPROG_SEC("cgroup/post_bind6",	BPF_PROG_TYPE_CGROUP_SOCK,
2430 						BPF_CGROUP_INET6_POST_BIND),
2431 	BPF_APROG_SEC("cgroup/dev",		BPF_PROG_TYPE_CGROUP_DEVICE,
2432 						BPF_CGROUP_DEVICE),
2433 	BPF_APROG_SEC("sockops",		BPF_PROG_TYPE_SOCK_OPS,
2434 						BPF_CGROUP_SOCK_OPS),
2435 	BPF_APROG_SEC("sk_skb/stream_parser",	BPF_PROG_TYPE_SK_SKB,
2436 						BPF_SK_SKB_STREAM_PARSER),
2437 	BPF_APROG_SEC("sk_skb/stream_verdict",	BPF_PROG_TYPE_SK_SKB,
2438 						BPF_SK_SKB_STREAM_VERDICT),
2439 	BPF_APROG_COMPAT("sk_skb",		BPF_PROG_TYPE_SK_SKB),
2440 	BPF_APROG_SEC("sk_msg",			BPF_PROG_TYPE_SK_MSG,
2441 						BPF_SK_MSG_VERDICT),
2442 	BPF_APROG_SEC("lirc_mode2",		BPF_PROG_TYPE_LIRC_MODE2,
2443 						BPF_LIRC_MODE2),
2444 	BPF_APROG_SEC("flow_dissector",		BPF_PROG_TYPE_FLOW_DISSECTOR,
2445 						BPF_FLOW_DISSECTOR),
2446 	BPF_EAPROG_SEC("cgroup/bind4",		BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
2447 						BPF_CGROUP_INET4_BIND),
2448 	BPF_EAPROG_SEC("cgroup/bind6",		BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
2449 						BPF_CGROUP_INET6_BIND),
2450 	BPF_EAPROG_SEC("cgroup/connect4",	BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
2451 						BPF_CGROUP_INET4_CONNECT),
2452 	BPF_EAPROG_SEC("cgroup/connect6",	BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
2453 						BPF_CGROUP_INET6_CONNECT),
2454 	BPF_EAPROG_SEC("cgroup/sendmsg4",	BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
2455 						BPF_CGROUP_UDP4_SENDMSG),
2456 	BPF_EAPROG_SEC("cgroup/sendmsg6",	BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
2457 						BPF_CGROUP_UDP6_SENDMSG),
2458 };
2459 
2460 #undef BPF_PROG_SEC_IMPL
2461 #undef BPF_PROG_SEC
2462 #undef BPF_APROG_SEC
2463 #undef BPF_EAPROG_SEC
2464 #undef BPF_APROG_COMPAT
2465 
2466 int libbpf_prog_type_by_name(const char *name, enum bpf_prog_type *prog_type,
2467 			     enum bpf_attach_type *expected_attach_type)
2468 {
2469 	int i;
2470 
2471 	if (!name)
2472 		return -EINVAL;
2473 
2474 	for (i = 0; i < ARRAY_SIZE(section_names); i++) {
2475 		if (strncmp(name, section_names[i].sec, section_names[i].len))
2476 			continue;
2477 		*prog_type = section_names[i].prog_type;
2478 		*expected_attach_type = section_names[i].expected_attach_type;
2479 		return 0;
2480 	}
2481 	return -EINVAL;
2482 }
2483 
2484 int libbpf_attach_type_by_name(const char *name,
2485 			       enum bpf_attach_type *attach_type)
2486 {
2487 	int i;
2488 
2489 	if (!name)
2490 		return -EINVAL;
2491 
2492 	for (i = 0; i < ARRAY_SIZE(section_names); i++) {
2493 		if (strncmp(name, section_names[i].sec, section_names[i].len))
2494 			continue;
2495 		if (!section_names[i].is_attachable)
2496 			return -EINVAL;
2497 		*attach_type = section_names[i].attach_type;
2498 		return 0;
2499 	}
2500 	return -EINVAL;
2501 }
2502 
2503 static int
2504 bpf_program__identify_section(struct bpf_program *prog,
2505 			      enum bpf_prog_type *prog_type,
2506 			      enum bpf_attach_type *expected_attach_type)
2507 {
2508 	return libbpf_prog_type_by_name(prog->section_name, prog_type,
2509 					expected_attach_type);
2510 }
2511 
2512 int bpf_map__fd(struct bpf_map *map)
2513 {
2514 	return map ? map->fd : -EINVAL;
2515 }
2516 
2517 const struct bpf_map_def *bpf_map__def(struct bpf_map *map)
2518 {
2519 	return map ? &map->def : ERR_PTR(-EINVAL);
2520 }
2521 
2522 const char *bpf_map__name(struct bpf_map *map)
2523 {
2524 	return map ? map->name : NULL;
2525 }
2526 
2527 __u32 bpf_map__btf_key_type_id(const struct bpf_map *map)
2528 {
2529 	return map ? map->btf_key_type_id : 0;
2530 }
2531 
2532 __u32 bpf_map__btf_value_type_id(const struct bpf_map *map)
2533 {
2534 	return map ? map->btf_value_type_id : 0;
2535 }
2536 
2537 int bpf_map__set_priv(struct bpf_map *map, void *priv,
2538 		     bpf_map_clear_priv_t clear_priv)
2539 {
2540 	if (!map)
2541 		return -EINVAL;
2542 
2543 	if (map->priv) {
2544 		if (map->clear_priv)
2545 			map->clear_priv(map, map->priv);
2546 	}
2547 
2548 	map->priv = priv;
2549 	map->clear_priv = clear_priv;
2550 	return 0;
2551 }
2552 
2553 void *bpf_map__priv(struct bpf_map *map)
2554 {
2555 	return map ? map->priv : ERR_PTR(-EINVAL);
2556 }
2557 
2558 bool bpf_map__is_offload_neutral(struct bpf_map *map)
2559 {
2560 	return map->def.type == BPF_MAP_TYPE_PERF_EVENT_ARRAY;
2561 }
2562 
2563 void bpf_map__set_ifindex(struct bpf_map *map, __u32 ifindex)
2564 {
2565 	map->map_ifindex = ifindex;
2566 }
2567 
2568 static struct bpf_map *
2569 __bpf_map__iter(struct bpf_map *m, struct bpf_object *obj, int i)
2570 {
2571 	ssize_t idx;
2572 	struct bpf_map *s, *e;
2573 
2574 	if (!obj || !obj->maps)
2575 		return NULL;
2576 
2577 	s = obj->maps;
2578 	e = obj->maps + obj->nr_maps;
2579 
2580 	if ((m < s) || (m >= e)) {
2581 		pr_warning("error in %s: map handler doesn't belong to object\n",
2582 			   __func__);
2583 		return NULL;
2584 	}
2585 
2586 	idx = (m - obj->maps) + i;
2587 	if (idx >= obj->nr_maps || idx < 0)
2588 		return NULL;
2589 	return &obj->maps[idx];
2590 }
2591 
2592 struct bpf_map *
2593 bpf_map__next(struct bpf_map *prev, struct bpf_object *obj)
2594 {
2595 	if (prev == NULL)
2596 		return obj->maps;
2597 
2598 	return __bpf_map__iter(prev, obj, 1);
2599 }
2600 
2601 struct bpf_map *
2602 bpf_map__prev(struct bpf_map *next, struct bpf_object *obj)
2603 {
2604 	if (next == NULL) {
2605 		if (!obj->nr_maps)
2606 			return NULL;
2607 		return obj->maps + obj->nr_maps - 1;
2608 	}
2609 
2610 	return __bpf_map__iter(next, obj, -1);
2611 }
2612 
2613 struct bpf_map *
2614 bpf_object__find_map_by_name(struct bpf_object *obj, const char *name)
2615 {
2616 	struct bpf_map *pos;
2617 
2618 	bpf_map__for_each(pos, obj) {
2619 		if (pos->name && !strcmp(pos->name, name))
2620 			return pos;
2621 	}
2622 	return NULL;
2623 }
2624 
2625 struct bpf_map *
2626 bpf_object__find_map_by_offset(struct bpf_object *obj, size_t offset)
2627 {
2628 	int i;
2629 
2630 	for (i = 0; i < obj->nr_maps; i++) {
2631 		if (obj->maps[i].offset == offset)
2632 			return &obj->maps[i];
2633 	}
2634 	return ERR_PTR(-ENOENT);
2635 }
2636 
2637 long libbpf_get_error(const void *ptr)
2638 {
2639 	if (IS_ERR(ptr))
2640 		return PTR_ERR(ptr);
2641 	return 0;
2642 }
2643 
2644 int bpf_prog_load(const char *file, enum bpf_prog_type type,
2645 		  struct bpf_object **pobj, int *prog_fd)
2646 {
2647 	struct bpf_prog_load_attr attr;
2648 
2649 	memset(&attr, 0, sizeof(struct bpf_prog_load_attr));
2650 	attr.file = file;
2651 	attr.prog_type = type;
2652 	attr.expected_attach_type = 0;
2653 
2654 	return bpf_prog_load_xattr(&attr, pobj, prog_fd);
2655 }
2656 
2657 int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr,
2658 			struct bpf_object **pobj, int *prog_fd)
2659 {
2660 	struct bpf_object_open_attr open_attr = {
2661 		.file		= attr->file,
2662 		.prog_type	= attr->prog_type,
2663 	};
2664 	struct bpf_program *prog, *first_prog = NULL;
2665 	enum bpf_attach_type expected_attach_type;
2666 	enum bpf_prog_type prog_type;
2667 	struct bpf_object *obj;
2668 	struct bpf_map *map;
2669 	int err;
2670 
2671 	if (!attr)
2672 		return -EINVAL;
2673 	if (!attr->file)
2674 		return -EINVAL;
2675 
2676 	obj = bpf_object__open_xattr(&open_attr);
2677 	if (IS_ERR_OR_NULL(obj))
2678 		return -ENOENT;
2679 
2680 	bpf_object__for_each_program(prog, obj) {
2681 		/*
2682 		 * If type is not specified, try to guess it based on
2683 		 * section name.
2684 		 */
2685 		prog_type = attr->prog_type;
2686 		prog->prog_ifindex = attr->ifindex;
2687 		expected_attach_type = attr->expected_attach_type;
2688 		if (prog_type == BPF_PROG_TYPE_UNSPEC) {
2689 			err = bpf_program__identify_section(prog, &prog_type,
2690 							    &expected_attach_type);
2691 			if (err < 0) {
2692 				pr_warning("failed to guess program type based on section name %s\n",
2693 					   prog->section_name);
2694 				bpf_object__close(obj);
2695 				return -EINVAL;
2696 			}
2697 		}
2698 
2699 		bpf_program__set_type(prog, prog_type);
2700 		bpf_program__set_expected_attach_type(prog,
2701 						      expected_attach_type);
2702 
2703 		if (!first_prog)
2704 			first_prog = prog;
2705 	}
2706 
2707 	bpf_map__for_each(map, obj) {
2708 		if (!bpf_map__is_offload_neutral(map))
2709 			map->map_ifindex = attr->ifindex;
2710 	}
2711 
2712 	if (!first_prog) {
2713 		pr_warning("object file doesn't contain bpf program\n");
2714 		bpf_object__close(obj);
2715 		return -ENOENT;
2716 	}
2717 
2718 	err = bpf_object__load(obj);
2719 	if (err) {
2720 		bpf_object__close(obj);
2721 		return -EINVAL;
2722 	}
2723 
2724 	*pobj = obj;
2725 	*prog_fd = bpf_program__fd(first_prog);
2726 	return 0;
2727 }
2728 
2729 enum bpf_perf_event_ret
2730 bpf_perf_event_read_simple(void *mmap_mem, size_t mmap_size, size_t page_size,
2731 			   void **copy_mem, size_t *copy_size,
2732 			   bpf_perf_event_print_t fn, void *private_data)
2733 {
2734 	struct perf_event_mmap_page *header = mmap_mem;
2735 	__u64 data_head = ring_buffer_read_head(header);
2736 	__u64 data_tail = header->data_tail;
2737 	void *base = ((__u8 *)header) + page_size;
2738 	int ret = LIBBPF_PERF_EVENT_CONT;
2739 	struct perf_event_header *ehdr;
2740 	size_t ehdr_size;
2741 
2742 	while (data_head != data_tail) {
2743 		ehdr = base + (data_tail & (mmap_size - 1));
2744 		ehdr_size = ehdr->size;
2745 
2746 		if (((void *)ehdr) + ehdr_size > base + mmap_size) {
2747 			void *copy_start = ehdr;
2748 			size_t len_first = base + mmap_size - copy_start;
2749 			size_t len_secnd = ehdr_size - len_first;
2750 
2751 			if (*copy_size < ehdr_size) {
2752 				free(*copy_mem);
2753 				*copy_mem = malloc(ehdr_size);
2754 				if (!*copy_mem) {
2755 					*copy_size = 0;
2756 					ret = LIBBPF_PERF_EVENT_ERROR;
2757 					break;
2758 				}
2759 				*copy_size = ehdr_size;
2760 			}
2761 
2762 			memcpy(*copy_mem, copy_start, len_first);
2763 			memcpy(*copy_mem + len_first, base, len_secnd);
2764 			ehdr = *copy_mem;
2765 		}
2766 
2767 		ret = fn(ehdr, private_data);
2768 		data_tail += ehdr_size;
2769 		if (ret != LIBBPF_PERF_EVENT_CONT)
2770 			break;
2771 	}
2772 
2773 	ring_buffer_write_tail(header, data_tail);
2774 	return ret;
2775 }
2776