xref: /linux/tools/lib/bpf/libbpf.c (revision 036b9e7caeb09598afb297a6d4fb36b477a4f6b2)
1 // SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
2 
3 /*
4  * Common eBPF ELF object loading operations.
5  *
6  * Copyright (C) 2013-2015 Alexei Starovoitov <ast@kernel.org>
7  * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com>
8  * Copyright (C) 2015 Huawei Inc.
9  * Copyright (C) 2017 Nicira, Inc.
10  */
11 
12 #ifndef _GNU_SOURCE
13 #define _GNU_SOURCE
14 #endif
15 #include <stdlib.h>
16 #include <stdio.h>
17 #include <stdarg.h>
18 #include <libgen.h>
19 #include <inttypes.h>
20 #include <string.h>
21 #include <unistd.h>
22 #include <fcntl.h>
23 #include <errno.h>
24 #include <asm/unistd.h>
25 #include <linux/err.h>
26 #include <linux/kernel.h>
27 #include <linux/bpf.h>
28 #include <linux/btf.h>
29 #include <linux/filter.h>
30 #include <linux/list.h>
31 #include <linux/limits.h>
32 #include <linux/perf_event.h>
33 #include <linux/ring_buffer.h>
34 #include <sys/stat.h>
35 #include <sys/types.h>
36 #include <sys/vfs.h>
37 #include <tools/libc_compat.h>
38 #include <libelf.h>
39 #include <gelf.h>
40 
41 #include "libbpf.h"
42 #include "bpf.h"
43 #include "btf.h"
44 #include "str_error.h"
45 
46 #ifndef EM_BPF
47 #define EM_BPF 247
48 #endif
49 
50 #ifndef BPF_FS_MAGIC
51 #define BPF_FS_MAGIC		0xcafe4a11
52 #endif
53 
54 #define __printf(a, b)	__attribute__((format(printf, a, b)))
55 
56 __printf(1, 2)
57 static int __base_pr(const char *format, ...)
58 {
59 	va_list args;
60 	int err;
61 
62 	va_start(args, format);
63 	err = vfprintf(stderr, format, args);
64 	va_end(args);
65 	return err;
66 }
67 
68 static __printf(1, 2) libbpf_print_fn_t __pr_warning = __base_pr;
69 static __printf(1, 2) libbpf_print_fn_t __pr_info = __base_pr;
70 static __printf(1, 2) libbpf_print_fn_t __pr_debug;
71 
72 #define __pr(func, fmt, ...)	\
73 do {				\
74 	if ((func))		\
75 		(func)("libbpf: " fmt, ##__VA_ARGS__); \
76 } while (0)
77 
78 #define pr_warning(fmt, ...)	__pr(__pr_warning, fmt, ##__VA_ARGS__)
79 #define pr_info(fmt, ...)	__pr(__pr_info, fmt, ##__VA_ARGS__)
80 #define pr_debug(fmt, ...)	__pr(__pr_debug, fmt, ##__VA_ARGS__)
81 
82 void libbpf_set_print(libbpf_print_fn_t warn,
83 		      libbpf_print_fn_t info,
84 		      libbpf_print_fn_t debug)
85 {
86 	__pr_warning = warn;
87 	__pr_info = info;
88 	__pr_debug = debug;
89 }
90 
91 #define STRERR_BUFSIZE  128
92 
93 #define CHECK_ERR(action, err, out) do {	\
94 	err = action;			\
95 	if (err)			\
96 		goto out;		\
97 } while(0)
98 
99 
100 /* Copied from tools/perf/util/util.h */
101 #ifndef zfree
102 # define zfree(ptr) ({ free(*ptr); *ptr = NULL; })
103 #endif
104 
105 #ifndef zclose
106 # define zclose(fd) ({			\
107 	int ___err = 0;			\
108 	if ((fd) >= 0)			\
109 		___err = close((fd));	\
110 	fd = -1;			\
111 	___err; })
112 #endif
113 
114 #ifdef HAVE_LIBELF_MMAP_SUPPORT
115 # define LIBBPF_ELF_C_READ_MMAP ELF_C_READ_MMAP
116 #else
117 # define LIBBPF_ELF_C_READ_MMAP ELF_C_READ
118 #endif
119 
120 struct bpf_capabilities {
121 	/* v4.14: kernel support for program & map names. */
122 	__u32 name:1;
123 };
124 
125 /*
126  * bpf_prog should be a better name but it has been used in
127  * linux/filter.h.
128  */
129 struct bpf_program {
130 	/* Index in elf obj file, for relocation use. */
131 	int idx;
132 	char *name;
133 	int prog_ifindex;
134 	char *section_name;
135 	/* section_name with / replaced by _; makes recursive pinning
136 	 * in bpf_object__pin_programs easier
137 	 */
138 	char *pin_name;
139 	struct bpf_insn *insns;
140 	size_t insns_cnt, main_prog_cnt;
141 	enum bpf_prog_type type;
142 
143 	struct reloc_desc {
144 		enum {
145 			RELO_LD64,
146 			RELO_CALL,
147 		} type;
148 		int insn_idx;
149 		union {
150 			int map_idx;
151 			int text_off;
152 		};
153 	} *reloc_desc;
154 	int nr_reloc;
155 
156 	struct {
157 		int nr;
158 		int *fds;
159 	} instances;
160 	bpf_program_prep_t preprocessor;
161 
162 	struct bpf_object *obj;
163 	void *priv;
164 	bpf_program_clear_priv_t clear_priv;
165 
166 	enum bpf_attach_type expected_attach_type;
167 	int btf_fd;
168 	void *func_info;
169 	__u32 func_info_rec_size;
170 	__u32 func_info_cnt;
171 
172 	struct bpf_capabilities *caps;
173 
174 	void *line_info;
175 	__u32 line_info_rec_size;
176 	__u32 line_info_cnt;
177 };
178 
179 struct bpf_map {
180 	int fd;
181 	char *name;
182 	size_t offset;
183 	int map_ifindex;
184 	int inner_map_fd;
185 	struct bpf_map_def def;
186 	__u32 btf_key_type_id;
187 	__u32 btf_value_type_id;
188 	void *priv;
189 	bpf_map_clear_priv_t clear_priv;
190 };
191 
192 static LIST_HEAD(bpf_objects_list);
193 
194 struct bpf_object {
195 	char license[64];
196 	__u32 kern_version;
197 
198 	struct bpf_program *programs;
199 	size_t nr_programs;
200 	struct bpf_map *maps;
201 	size_t nr_maps;
202 
203 	bool loaded;
204 	bool has_pseudo_calls;
205 
206 	/*
207 	 * Information when doing elf related work. Only valid if fd
208 	 * is valid.
209 	 */
210 	struct {
211 		int fd;
212 		void *obj_buf;
213 		size_t obj_buf_sz;
214 		Elf *elf;
215 		GElf_Ehdr ehdr;
216 		Elf_Data *symbols;
217 		size_t strtabidx;
218 		struct {
219 			GElf_Shdr shdr;
220 			Elf_Data *data;
221 		} *reloc;
222 		int nr_reloc;
223 		int maps_shndx;
224 		int text_shndx;
225 	} efile;
226 	/*
227 	 * All loaded bpf_object is linked in a list, which is
228 	 * hidden to caller. bpf_objects__<func> handlers deal with
229 	 * all objects.
230 	 */
231 	struct list_head list;
232 
233 	struct btf *btf;
234 	struct btf_ext *btf_ext;
235 
236 	void *priv;
237 	bpf_object_clear_priv_t clear_priv;
238 
239 	struct bpf_capabilities caps;
240 
241 	char path[];
242 };
243 #define obj_elf_valid(o)	((o)->efile.elf)
244 
245 void bpf_program__unload(struct bpf_program *prog)
246 {
247 	int i;
248 
249 	if (!prog)
250 		return;
251 
252 	/*
253 	 * If the object is opened but the program was never loaded,
254 	 * it is possible that prog->instances.nr == -1.
255 	 */
256 	if (prog->instances.nr > 0) {
257 		for (i = 0; i < prog->instances.nr; i++)
258 			zclose(prog->instances.fds[i]);
259 	} else if (prog->instances.nr != -1) {
260 		pr_warning("Internal error: instances.nr is %d\n",
261 			   prog->instances.nr);
262 	}
263 
264 	prog->instances.nr = -1;
265 	zfree(&prog->instances.fds);
266 
267 	zclose(prog->btf_fd);
268 	zfree(&prog->func_info);
269 }
270 
271 static void bpf_program__exit(struct bpf_program *prog)
272 {
273 	if (!prog)
274 		return;
275 
276 	if (prog->clear_priv)
277 		prog->clear_priv(prog, prog->priv);
278 
279 	prog->priv = NULL;
280 	prog->clear_priv = NULL;
281 
282 	bpf_program__unload(prog);
283 	zfree(&prog->name);
284 	zfree(&prog->section_name);
285 	zfree(&prog->pin_name);
286 	zfree(&prog->insns);
287 	zfree(&prog->reloc_desc);
288 
289 	prog->nr_reloc = 0;
290 	prog->insns_cnt = 0;
291 	prog->idx = -1;
292 }
293 
294 static char *__bpf_program__pin_name(struct bpf_program *prog)
295 {
296 	char *name, *p;
297 
298 	name = p = strdup(prog->section_name);
299 	while ((p = strchr(p, '/')))
300 		*p = '_';
301 
302 	return name;
303 }
304 
305 static int
306 bpf_program__init(void *data, size_t size, char *section_name, int idx,
307 		  struct bpf_program *prog)
308 {
309 	if (size < sizeof(struct bpf_insn)) {
310 		pr_warning("corrupted section '%s'\n", section_name);
311 		return -EINVAL;
312 	}
313 
314 	bzero(prog, sizeof(*prog));
315 
316 	prog->section_name = strdup(section_name);
317 	if (!prog->section_name) {
318 		pr_warning("failed to alloc name for prog under section(%d) %s\n",
319 			   idx, section_name);
320 		goto errout;
321 	}
322 
323 	prog->pin_name = __bpf_program__pin_name(prog);
324 	if (!prog->pin_name) {
325 		pr_warning("failed to alloc pin name for prog under section(%d) %s\n",
326 			   idx, section_name);
327 		goto errout;
328 	}
329 
330 	prog->insns = malloc(size);
331 	if (!prog->insns) {
332 		pr_warning("failed to alloc insns for prog under section %s\n",
333 			   section_name);
334 		goto errout;
335 	}
336 	prog->insns_cnt = size / sizeof(struct bpf_insn);
337 	memcpy(prog->insns, data,
338 	       prog->insns_cnt * sizeof(struct bpf_insn));
339 	prog->idx = idx;
340 	prog->instances.fds = NULL;
341 	prog->instances.nr = -1;
342 	prog->type = BPF_PROG_TYPE_UNSPEC;
343 	prog->btf_fd = -1;
344 
345 	return 0;
346 errout:
347 	bpf_program__exit(prog);
348 	return -ENOMEM;
349 }
350 
351 static int
352 bpf_object__add_program(struct bpf_object *obj, void *data, size_t size,
353 			char *section_name, int idx)
354 {
355 	struct bpf_program prog, *progs;
356 	int nr_progs, err;
357 
358 	err = bpf_program__init(data, size, section_name, idx, &prog);
359 	if (err)
360 		return err;
361 
362 	prog.caps = &obj->caps;
363 	progs = obj->programs;
364 	nr_progs = obj->nr_programs;
365 
366 	progs = reallocarray(progs, nr_progs + 1, sizeof(progs[0]));
367 	if (!progs) {
368 		/*
369 		 * In this case the original obj->programs
370 		 * is still valid, so don't need special treat for
371 		 * bpf_close_object().
372 		 */
373 		pr_warning("failed to alloc a new program under section '%s'\n",
374 			   section_name);
375 		bpf_program__exit(&prog);
376 		return -ENOMEM;
377 	}
378 
379 	pr_debug("found program %s\n", prog.section_name);
380 	obj->programs = progs;
381 	obj->nr_programs = nr_progs + 1;
382 	prog.obj = obj;
383 	progs[nr_progs] = prog;
384 	return 0;
385 }
386 
387 static int
388 bpf_object__init_prog_names(struct bpf_object *obj)
389 {
390 	Elf_Data *symbols = obj->efile.symbols;
391 	struct bpf_program *prog;
392 	size_t pi, si;
393 
394 	for (pi = 0; pi < obj->nr_programs; pi++) {
395 		const char *name = NULL;
396 
397 		prog = &obj->programs[pi];
398 
399 		for (si = 0; si < symbols->d_size / sizeof(GElf_Sym) && !name;
400 		     si++) {
401 			GElf_Sym sym;
402 
403 			if (!gelf_getsym(symbols, si, &sym))
404 				continue;
405 			if (sym.st_shndx != prog->idx)
406 				continue;
407 			if (GELF_ST_BIND(sym.st_info) != STB_GLOBAL)
408 				continue;
409 
410 			name = elf_strptr(obj->efile.elf,
411 					  obj->efile.strtabidx,
412 					  sym.st_name);
413 			if (!name) {
414 				pr_warning("failed to get sym name string for prog %s\n",
415 					   prog->section_name);
416 				return -LIBBPF_ERRNO__LIBELF;
417 			}
418 		}
419 
420 		if (!name && prog->idx == obj->efile.text_shndx)
421 			name = ".text";
422 
423 		if (!name) {
424 			pr_warning("failed to find sym for prog %s\n",
425 				   prog->section_name);
426 			return -EINVAL;
427 		}
428 
429 		prog->name = strdup(name);
430 		if (!prog->name) {
431 			pr_warning("failed to allocate memory for prog sym %s\n",
432 				   name);
433 			return -ENOMEM;
434 		}
435 	}
436 
437 	return 0;
438 }
439 
440 static struct bpf_object *bpf_object__new(const char *path,
441 					  void *obj_buf,
442 					  size_t obj_buf_sz)
443 {
444 	struct bpf_object *obj;
445 
446 	obj = calloc(1, sizeof(struct bpf_object) + strlen(path) + 1);
447 	if (!obj) {
448 		pr_warning("alloc memory failed for %s\n", path);
449 		return ERR_PTR(-ENOMEM);
450 	}
451 
452 	strcpy(obj->path, path);
453 	obj->efile.fd = -1;
454 
455 	/*
456 	 * Caller of this function should also calls
457 	 * bpf_object__elf_finish() after data collection to return
458 	 * obj_buf to user. If not, we should duplicate the buffer to
459 	 * avoid user freeing them before elf finish.
460 	 */
461 	obj->efile.obj_buf = obj_buf;
462 	obj->efile.obj_buf_sz = obj_buf_sz;
463 	obj->efile.maps_shndx = -1;
464 
465 	obj->loaded = false;
466 
467 	INIT_LIST_HEAD(&obj->list);
468 	list_add(&obj->list, &bpf_objects_list);
469 	return obj;
470 }
471 
472 static void bpf_object__elf_finish(struct bpf_object *obj)
473 {
474 	if (!obj_elf_valid(obj))
475 		return;
476 
477 	if (obj->efile.elf) {
478 		elf_end(obj->efile.elf);
479 		obj->efile.elf = NULL;
480 	}
481 	obj->efile.symbols = NULL;
482 
483 	zfree(&obj->efile.reloc);
484 	obj->efile.nr_reloc = 0;
485 	zclose(obj->efile.fd);
486 	obj->efile.obj_buf = NULL;
487 	obj->efile.obj_buf_sz = 0;
488 }
489 
490 static int bpf_object__elf_init(struct bpf_object *obj)
491 {
492 	int err = 0;
493 	GElf_Ehdr *ep;
494 
495 	if (obj_elf_valid(obj)) {
496 		pr_warning("elf init: internal error\n");
497 		return -LIBBPF_ERRNO__LIBELF;
498 	}
499 
500 	if (obj->efile.obj_buf_sz > 0) {
501 		/*
502 		 * obj_buf should have been validated by
503 		 * bpf_object__open_buffer().
504 		 */
505 		obj->efile.elf = elf_memory(obj->efile.obj_buf,
506 					    obj->efile.obj_buf_sz);
507 	} else {
508 		obj->efile.fd = open(obj->path, O_RDONLY);
509 		if (obj->efile.fd < 0) {
510 			char errmsg[STRERR_BUFSIZE];
511 			char *cp = libbpf_strerror_r(errno, errmsg,
512 						     sizeof(errmsg));
513 
514 			pr_warning("failed to open %s: %s\n", obj->path, cp);
515 			return -errno;
516 		}
517 
518 		obj->efile.elf = elf_begin(obj->efile.fd,
519 				LIBBPF_ELF_C_READ_MMAP,
520 				NULL);
521 	}
522 
523 	if (!obj->efile.elf) {
524 		pr_warning("failed to open %s as ELF file\n",
525 				obj->path);
526 		err = -LIBBPF_ERRNO__LIBELF;
527 		goto errout;
528 	}
529 
530 	if (!gelf_getehdr(obj->efile.elf, &obj->efile.ehdr)) {
531 		pr_warning("failed to get EHDR from %s\n",
532 				obj->path);
533 		err = -LIBBPF_ERRNO__FORMAT;
534 		goto errout;
535 	}
536 	ep = &obj->efile.ehdr;
537 
538 	/* Old LLVM set e_machine to EM_NONE */
539 	if ((ep->e_type != ET_REL) || (ep->e_machine && (ep->e_machine != EM_BPF))) {
540 		pr_warning("%s is not an eBPF object file\n",
541 			obj->path);
542 		err = -LIBBPF_ERRNO__FORMAT;
543 		goto errout;
544 	}
545 
546 	return 0;
547 errout:
548 	bpf_object__elf_finish(obj);
549 	return err;
550 }
551 
552 static int
553 bpf_object__check_endianness(struct bpf_object *obj)
554 {
555 	static unsigned int const endian = 1;
556 
557 	switch (obj->efile.ehdr.e_ident[EI_DATA]) {
558 	case ELFDATA2LSB:
559 		/* We are big endian, BPF obj is little endian. */
560 		if (*(unsigned char const *)&endian != 1)
561 			goto mismatch;
562 		break;
563 
564 	case ELFDATA2MSB:
565 		/* We are little endian, BPF obj is big endian. */
566 		if (*(unsigned char const *)&endian != 0)
567 			goto mismatch;
568 		break;
569 	default:
570 		return -LIBBPF_ERRNO__ENDIAN;
571 	}
572 
573 	return 0;
574 
575 mismatch:
576 	pr_warning("Error: endianness mismatch.\n");
577 	return -LIBBPF_ERRNO__ENDIAN;
578 }
579 
580 static int
581 bpf_object__init_license(struct bpf_object *obj,
582 			 void *data, size_t size)
583 {
584 	memcpy(obj->license, data,
585 	       min(size, sizeof(obj->license) - 1));
586 	pr_debug("license of %s is %s\n", obj->path, obj->license);
587 	return 0;
588 }
589 
590 static int
591 bpf_object__init_kversion(struct bpf_object *obj,
592 			  void *data, size_t size)
593 {
594 	__u32 kver;
595 
596 	if (size != sizeof(kver)) {
597 		pr_warning("invalid kver section in %s\n", obj->path);
598 		return -LIBBPF_ERRNO__FORMAT;
599 	}
600 	memcpy(&kver, data, sizeof(kver));
601 	obj->kern_version = kver;
602 	pr_debug("kernel version of %s is %x\n", obj->path,
603 		 obj->kern_version);
604 	return 0;
605 }
606 
607 static int compare_bpf_map(const void *_a, const void *_b)
608 {
609 	const struct bpf_map *a = _a;
610 	const struct bpf_map *b = _b;
611 
612 	return a->offset - b->offset;
613 }
614 
615 static bool bpf_map_type__is_map_in_map(enum bpf_map_type type)
616 {
617 	if (type == BPF_MAP_TYPE_ARRAY_OF_MAPS ||
618 	    type == BPF_MAP_TYPE_HASH_OF_MAPS)
619 		return true;
620 	return false;
621 }
622 
623 static int
624 bpf_object__init_maps(struct bpf_object *obj, int flags)
625 {
626 	bool strict = !(flags & MAPS_RELAX_COMPAT);
627 	int i, map_idx, map_def_sz, nr_maps = 0;
628 	Elf_Scn *scn;
629 	Elf_Data *data;
630 	Elf_Data *symbols = obj->efile.symbols;
631 
632 	if (obj->efile.maps_shndx < 0)
633 		return -EINVAL;
634 	if (!symbols)
635 		return -EINVAL;
636 
637 	scn = elf_getscn(obj->efile.elf, obj->efile.maps_shndx);
638 	if (scn)
639 		data = elf_getdata(scn, NULL);
640 	if (!scn || !data) {
641 		pr_warning("failed to get Elf_Data from map section %d\n",
642 			   obj->efile.maps_shndx);
643 		return -EINVAL;
644 	}
645 
646 	/*
647 	 * Count number of maps. Each map has a name.
648 	 * Array of maps is not supported: only the first element is
649 	 * considered.
650 	 *
651 	 * TODO: Detect array of map and report error.
652 	 */
653 	for (i = 0; i < symbols->d_size / sizeof(GElf_Sym); i++) {
654 		GElf_Sym sym;
655 
656 		if (!gelf_getsym(symbols, i, &sym))
657 			continue;
658 		if (sym.st_shndx != obj->efile.maps_shndx)
659 			continue;
660 		nr_maps++;
661 	}
662 
663 	/* Alloc obj->maps and fill nr_maps. */
664 	pr_debug("maps in %s: %d maps in %zd bytes\n", obj->path,
665 		 nr_maps, data->d_size);
666 
667 	if (!nr_maps)
668 		return 0;
669 
670 	/* Assume equally sized map definitions */
671 	map_def_sz = data->d_size / nr_maps;
672 	if (!data->d_size || (data->d_size % nr_maps) != 0) {
673 		pr_warning("unable to determine map definition size "
674 			   "section %s, %d maps in %zd bytes\n",
675 			   obj->path, nr_maps, data->d_size);
676 		return -EINVAL;
677 	}
678 
679 	obj->maps = calloc(nr_maps, sizeof(obj->maps[0]));
680 	if (!obj->maps) {
681 		pr_warning("alloc maps for object failed\n");
682 		return -ENOMEM;
683 	}
684 	obj->nr_maps = nr_maps;
685 
686 	for (i = 0; i < nr_maps; i++) {
687 		/*
688 		 * fill all fd with -1 so won't close incorrect
689 		 * fd (fd=0 is stdin) when failure (zclose won't close
690 		 * negative fd)).
691 		 */
692 		obj->maps[i].fd = -1;
693 		obj->maps[i].inner_map_fd = -1;
694 	}
695 
696 	/*
697 	 * Fill obj->maps using data in "maps" section.
698 	 */
699 	for (i = 0, map_idx = 0; i < symbols->d_size / sizeof(GElf_Sym); i++) {
700 		GElf_Sym sym;
701 		const char *map_name;
702 		struct bpf_map_def *def;
703 
704 		if (!gelf_getsym(symbols, i, &sym))
705 			continue;
706 		if (sym.st_shndx != obj->efile.maps_shndx)
707 			continue;
708 
709 		map_name = elf_strptr(obj->efile.elf,
710 				      obj->efile.strtabidx,
711 				      sym.st_name);
712 		obj->maps[map_idx].offset = sym.st_value;
713 		if (sym.st_value + map_def_sz > data->d_size) {
714 			pr_warning("corrupted maps section in %s: last map \"%s\" too small\n",
715 				   obj->path, map_name);
716 			return -EINVAL;
717 		}
718 
719 		obj->maps[map_idx].name = strdup(map_name);
720 		if (!obj->maps[map_idx].name) {
721 			pr_warning("failed to alloc map name\n");
722 			return -ENOMEM;
723 		}
724 		pr_debug("map %d is \"%s\"\n", map_idx,
725 			 obj->maps[map_idx].name);
726 		def = (struct bpf_map_def *)(data->d_buf + sym.st_value);
727 		/*
728 		 * If the definition of the map in the object file fits in
729 		 * bpf_map_def, copy it.  Any extra fields in our version
730 		 * of bpf_map_def will default to zero as a result of the
731 		 * calloc above.
732 		 */
733 		if (map_def_sz <= sizeof(struct bpf_map_def)) {
734 			memcpy(&obj->maps[map_idx].def, def, map_def_sz);
735 		} else {
736 			/*
737 			 * Here the map structure being read is bigger than what
738 			 * we expect, truncate if the excess bits are all zero.
739 			 * If they are not zero, reject this map as
740 			 * incompatible.
741 			 */
742 			char *b;
743 			for (b = ((char *)def) + sizeof(struct bpf_map_def);
744 			     b < ((char *)def) + map_def_sz; b++) {
745 				if (*b != 0) {
746 					pr_warning("maps section in %s: \"%s\" "
747 						   "has unrecognized, non-zero "
748 						   "options\n",
749 						   obj->path, map_name);
750 					if (strict)
751 						return -EINVAL;
752 				}
753 			}
754 			memcpy(&obj->maps[map_idx].def, def,
755 			       sizeof(struct bpf_map_def));
756 		}
757 		map_idx++;
758 	}
759 
760 	qsort(obj->maps, obj->nr_maps, sizeof(obj->maps[0]), compare_bpf_map);
761 	return 0;
762 }
763 
764 static bool section_have_execinstr(struct bpf_object *obj, int idx)
765 {
766 	Elf_Scn *scn;
767 	GElf_Shdr sh;
768 
769 	scn = elf_getscn(obj->efile.elf, idx);
770 	if (!scn)
771 		return false;
772 
773 	if (gelf_getshdr(scn, &sh) != &sh)
774 		return false;
775 
776 	if (sh.sh_flags & SHF_EXECINSTR)
777 		return true;
778 
779 	return false;
780 }
781 
782 static int bpf_object__elf_collect(struct bpf_object *obj, int flags)
783 {
784 	Elf *elf = obj->efile.elf;
785 	GElf_Ehdr *ep = &obj->efile.ehdr;
786 	Elf_Data *btf_ext_data = NULL;
787 	Elf_Scn *scn = NULL;
788 	int idx = 0, err = 0;
789 
790 	/* Elf is corrupted/truncated, avoid calling elf_strptr. */
791 	if (!elf_rawdata(elf_getscn(elf, ep->e_shstrndx), NULL)) {
792 		pr_warning("failed to get e_shstrndx from %s\n",
793 			   obj->path);
794 		return -LIBBPF_ERRNO__FORMAT;
795 	}
796 
797 	while ((scn = elf_nextscn(elf, scn)) != NULL) {
798 		char *name;
799 		GElf_Shdr sh;
800 		Elf_Data *data;
801 
802 		idx++;
803 		if (gelf_getshdr(scn, &sh) != &sh) {
804 			pr_warning("failed to get section(%d) header from %s\n",
805 				   idx, obj->path);
806 			err = -LIBBPF_ERRNO__FORMAT;
807 			goto out;
808 		}
809 
810 		name = elf_strptr(elf, ep->e_shstrndx, sh.sh_name);
811 		if (!name) {
812 			pr_warning("failed to get section(%d) name from %s\n",
813 				   idx, obj->path);
814 			err = -LIBBPF_ERRNO__FORMAT;
815 			goto out;
816 		}
817 
818 		data = elf_getdata(scn, 0);
819 		if (!data) {
820 			pr_warning("failed to get section(%d) data from %s(%s)\n",
821 				   idx, name, obj->path);
822 			err = -LIBBPF_ERRNO__FORMAT;
823 			goto out;
824 		}
825 		pr_debug("section(%d) %s, size %ld, link %d, flags %lx, type=%d\n",
826 			 idx, name, (unsigned long)data->d_size,
827 			 (int)sh.sh_link, (unsigned long)sh.sh_flags,
828 			 (int)sh.sh_type);
829 
830 		if (strcmp(name, "license") == 0)
831 			err = bpf_object__init_license(obj,
832 						       data->d_buf,
833 						       data->d_size);
834 		else if (strcmp(name, "version") == 0)
835 			err = bpf_object__init_kversion(obj,
836 							data->d_buf,
837 							data->d_size);
838 		else if (strcmp(name, "maps") == 0)
839 			obj->efile.maps_shndx = idx;
840 		else if (strcmp(name, BTF_ELF_SEC) == 0) {
841 			obj->btf = btf__new(data->d_buf, data->d_size,
842 					    __pr_debug);
843 			if (IS_ERR(obj->btf)) {
844 				pr_warning("Error loading ELF section %s: %ld. Ignored and continue.\n",
845 					   BTF_ELF_SEC, PTR_ERR(obj->btf));
846 				obj->btf = NULL;
847 			}
848 		} else if (strcmp(name, BTF_EXT_ELF_SEC) == 0) {
849 			btf_ext_data = data;
850 		} else if (sh.sh_type == SHT_SYMTAB) {
851 			if (obj->efile.symbols) {
852 				pr_warning("bpf: multiple SYMTAB in %s\n",
853 					   obj->path);
854 				err = -LIBBPF_ERRNO__FORMAT;
855 			} else {
856 				obj->efile.symbols = data;
857 				obj->efile.strtabidx = sh.sh_link;
858 			}
859 		} else if ((sh.sh_type == SHT_PROGBITS) &&
860 			   (sh.sh_flags & SHF_EXECINSTR) &&
861 			   (data->d_size > 0)) {
862 			if (strcmp(name, ".text") == 0)
863 				obj->efile.text_shndx = idx;
864 			err = bpf_object__add_program(obj, data->d_buf,
865 						      data->d_size, name, idx);
866 			if (err) {
867 				char errmsg[STRERR_BUFSIZE];
868 				char *cp = libbpf_strerror_r(-err, errmsg,
869 							     sizeof(errmsg));
870 
871 				pr_warning("failed to alloc program %s (%s): %s",
872 					   name, obj->path, cp);
873 			}
874 		} else if (sh.sh_type == SHT_REL) {
875 			void *reloc = obj->efile.reloc;
876 			int nr_reloc = obj->efile.nr_reloc + 1;
877 			int sec = sh.sh_info; /* points to other section */
878 
879 			/* Only do relo for section with exec instructions */
880 			if (!section_have_execinstr(obj, sec)) {
881 				pr_debug("skip relo %s(%d) for section(%d)\n",
882 					 name, idx, sec);
883 				continue;
884 			}
885 
886 			reloc = reallocarray(reloc, nr_reloc,
887 					     sizeof(*obj->efile.reloc));
888 			if (!reloc) {
889 				pr_warning("realloc failed\n");
890 				err = -ENOMEM;
891 			} else {
892 				int n = nr_reloc - 1;
893 
894 				obj->efile.reloc = reloc;
895 				obj->efile.nr_reloc = nr_reloc;
896 
897 				obj->efile.reloc[n].shdr = sh;
898 				obj->efile.reloc[n].data = data;
899 			}
900 		} else {
901 			pr_debug("skip section(%d) %s\n", idx, name);
902 		}
903 		if (err)
904 			goto out;
905 	}
906 
907 	if (!obj->efile.strtabidx || obj->efile.strtabidx >= idx) {
908 		pr_warning("Corrupted ELF file: index of strtab invalid\n");
909 		return LIBBPF_ERRNO__FORMAT;
910 	}
911 	if (btf_ext_data) {
912 		if (!obj->btf) {
913 			pr_debug("Ignore ELF section %s because its depending ELF section %s is not found.\n",
914 				 BTF_EXT_ELF_SEC, BTF_ELF_SEC);
915 		} else {
916 			obj->btf_ext = btf_ext__new(btf_ext_data->d_buf,
917 						    btf_ext_data->d_size,
918 						    __pr_debug);
919 			if (IS_ERR(obj->btf_ext)) {
920 				pr_warning("Error loading ELF section %s: %ld. Ignored and continue.\n",
921 					   BTF_EXT_ELF_SEC,
922 					   PTR_ERR(obj->btf_ext));
923 				obj->btf_ext = NULL;
924 			}
925 		}
926 	}
927 	if (obj->efile.maps_shndx >= 0) {
928 		err = bpf_object__init_maps(obj, flags);
929 		if (err)
930 			goto out;
931 	}
932 	err = bpf_object__init_prog_names(obj);
933 out:
934 	return err;
935 }
936 
937 static struct bpf_program *
938 bpf_object__find_prog_by_idx(struct bpf_object *obj, int idx)
939 {
940 	struct bpf_program *prog;
941 	size_t i;
942 
943 	for (i = 0; i < obj->nr_programs; i++) {
944 		prog = &obj->programs[i];
945 		if (prog->idx == idx)
946 			return prog;
947 	}
948 	return NULL;
949 }
950 
951 struct bpf_program *
952 bpf_object__find_program_by_title(struct bpf_object *obj, const char *title)
953 {
954 	struct bpf_program *pos;
955 
956 	bpf_object__for_each_program(pos, obj) {
957 		if (pos->section_name && !strcmp(pos->section_name, title))
958 			return pos;
959 	}
960 	return NULL;
961 }
962 
963 static int
964 bpf_program__collect_reloc(struct bpf_program *prog, GElf_Shdr *shdr,
965 			   Elf_Data *data, struct bpf_object *obj)
966 {
967 	Elf_Data *symbols = obj->efile.symbols;
968 	int text_shndx = obj->efile.text_shndx;
969 	int maps_shndx = obj->efile.maps_shndx;
970 	struct bpf_map *maps = obj->maps;
971 	size_t nr_maps = obj->nr_maps;
972 	int i, nrels;
973 
974 	pr_debug("collecting relocating info for: '%s'\n",
975 		 prog->section_name);
976 	nrels = shdr->sh_size / shdr->sh_entsize;
977 
978 	prog->reloc_desc = malloc(sizeof(*prog->reloc_desc) * nrels);
979 	if (!prog->reloc_desc) {
980 		pr_warning("failed to alloc memory in relocation\n");
981 		return -ENOMEM;
982 	}
983 	prog->nr_reloc = nrels;
984 
985 	for (i = 0; i < nrels; i++) {
986 		GElf_Sym sym;
987 		GElf_Rel rel;
988 		unsigned int insn_idx;
989 		struct bpf_insn *insns = prog->insns;
990 		size_t map_idx;
991 
992 		if (!gelf_getrel(data, i, &rel)) {
993 			pr_warning("relocation: failed to get %d reloc\n", i);
994 			return -LIBBPF_ERRNO__FORMAT;
995 		}
996 
997 		if (!gelf_getsym(symbols,
998 				 GELF_R_SYM(rel.r_info),
999 				 &sym)) {
1000 			pr_warning("relocation: symbol %"PRIx64" not found\n",
1001 				   GELF_R_SYM(rel.r_info));
1002 			return -LIBBPF_ERRNO__FORMAT;
1003 		}
1004 		pr_debug("relo for %lld value %lld name %d\n",
1005 			 (long long) (rel.r_info >> 32),
1006 			 (long long) sym.st_value, sym.st_name);
1007 
1008 		if (sym.st_shndx != maps_shndx && sym.st_shndx != text_shndx) {
1009 			pr_warning("Program '%s' contains non-map related relo data pointing to section %u\n",
1010 				   prog->section_name, sym.st_shndx);
1011 			return -LIBBPF_ERRNO__RELOC;
1012 		}
1013 
1014 		insn_idx = rel.r_offset / sizeof(struct bpf_insn);
1015 		pr_debug("relocation: insn_idx=%u\n", insn_idx);
1016 
1017 		if (insns[insn_idx].code == (BPF_JMP | BPF_CALL)) {
1018 			if (insns[insn_idx].src_reg != BPF_PSEUDO_CALL) {
1019 				pr_warning("incorrect bpf_call opcode\n");
1020 				return -LIBBPF_ERRNO__RELOC;
1021 			}
1022 			prog->reloc_desc[i].type = RELO_CALL;
1023 			prog->reloc_desc[i].insn_idx = insn_idx;
1024 			prog->reloc_desc[i].text_off = sym.st_value;
1025 			obj->has_pseudo_calls = true;
1026 			continue;
1027 		}
1028 
1029 		if (insns[insn_idx].code != (BPF_LD | BPF_IMM | BPF_DW)) {
1030 			pr_warning("bpf: relocation: invalid relo for insns[%d].code 0x%x\n",
1031 				   insn_idx, insns[insn_idx].code);
1032 			return -LIBBPF_ERRNO__RELOC;
1033 		}
1034 
1035 		/* TODO: 'maps' is sorted. We can use bsearch to make it faster. */
1036 		for (map_idx = 0; map_idx < nr_maps; map_idx++) {
1037 			if (maps[map_idx].offset == sym.st_value) {
1038 				pr_debug("relocation: find map %zd (%s) for insn %u\n",
1039 					 map_idx, maps[map_idx].name, insn_idx);
1040 				break;
1041 			}
1042 		}
1043 
1044 		if (map_idx >= nr_maps) {
1045 			pr_warning("bpf relocation: map_idx %d large than %d\n",
1046 				   (int)map_idx, (int)nr_maps - 1);
1047 			return -LIBBPF_ERRNO__RELOC;
1048 		}
1049 
1050 		prog->reloc_desc[i].type = RELO_LD64;
1051 		prog->reloc_desc[i].insn_idx = insn_idx;
1052 		prog->reloc_desc[i].map_idx = map_idx;
1053 	}
1054 	return 0;
1055 }
1056 
1057 static int bpf_map_find_btf_info(struct bpf_map *map, const struct btf *btf)
1058 {
1059 	const struct btf_type *container_type;
1060 	const struct btf_member *key, *value;
1061 	struct bpf_map_def *def = &map->def;
1062 	const size_t max_name = 256;
1063 	char container_name[max_name];
1064 	__s64 key_size, value_size;
1065 	__s32 container_id;
1066 
1067 	if (snprintf(container_name, max_name, "____btf_map_%s", map->name) ==
1068 	    max_name) {
1069 		pr_warning("map:%s length of '____btf_map_%s' is too long\n",
1070 			   map->name, map->name);
1071 		return -EINVAL;
1072 	}
1073 
1074 	container_id = btf__find_by_name(btf, container_name);
1075 	if (container_id < 0) {
1076 		pr_debug("map:%s container_name:%s cannot be found in BTF. Missing BPF_ANNOTATE_KV_PAIR?\n",
1077 			 map->name, container_name);
1078 		return container_id;
1079 	}
1080 
1081 	container_type = btf__type_by_id(btf, container_id);
1082 	if (!container_type) {
1083 		pr_warning("map:%s cannot find BTF type for container_id:%u\n",
1084 			   map->name, container_id);
1085 		return -EINVAL;
1086 	}
1087 
1088 	if (BTF_INFO_KIND(container_type->info) != BTF_KIND_STRUCT ||
1089 	    BTF_INFO_VLEN(container_type->info) < 2) {
1090 		pr_warning("map:%s container_name:%s is an invalid container struct\n",
1091 			   map->name, container_name);
1092 		return -EINVAL;
1093 	}
1094 
1095 	key = (struct btf_member *)(container_type + 1);
1096 	value = key + 1;
1097 
1098 	key_size = btf__resolve_size(btf, key->type);
1099 	if (key_size < 0) {
1100 		pr_warning("map:%s invalid BTF key_type_size\n",
1101 			   map->name);
1102 		return key_size;
1103 	}
1104 
1105 	if (def->key_size != key_size) {
1106 		pr_warning("map:%s btf_key_type_size:%u != map_def_key_size:%u\n",
1107 			   map->name, (__u32)key_size, def->key_size);
1108 		return -EINVAL;
1109 	}
1110 
1111 	value_size = btf__resolve_size(btf, value->type);
1112 	if (value_size < 0) {
1113 		pr_warning("map:%s invalid BTF value_type_size\n", map->name);
1114 		return value_size;
1115 	}
1116 
1117 	if (def->value_size != value_size) {
1118 		pr_warning("map:%s btf_value_type_size:%u != map_def_value_size:%u\n",
1119 			   map->name, (__u32)value_size, def->value_size);
1120 		return -EINVAL;
1121 	}
1122 
1123 	map->btf_key_type_id = key->type;
1124 	map->btf_value_type_id = value->type;
1125 
1126 	return 0;
1127 }
1128 
1129 int bpf_map__reuse_fd(struct bpf_map *map, int fd)
1130 {
1131 	struct bpf_map_info info = {};
1132 	__u32 len = sizeof(info);
1133 	int new_fd, err;
1134 	char *new_name;
1135 
1136 	err = bpf_obj_get_info_by_fd(fd, &info, &len);
1137 	if (err)
1138 		return err;
1139 
1140 	new_name = strdup(info.name);
1141 	if (!new_name)
1142 		return -errno;
1143 
1144 	new_fd = open("/", O_RDONLY | O_CLOEXEC);
1145 	if (new_fd < 0)
1146 		goto err_free_new_name;
1147 
1148 	new_fd = dup3(fd, new_fd, O_CLOEXEC);
1149 	if (new_fd < 0)
1150 		goto err_close_new_fd;
1151 
1152 	err = zclose(map->fd);
1153 	if (err)
1154 		goto err_close_new_fd;
1155 	free(map->name);
1156 
1157 	map->fd = new_fd;
1158 	map->name = new_name;
1159 	map->def.type = info.type;
1160 	map->def.key_size = info.key_size;
1161 	map->def.value_size = info.value_size;
1162 	map->def.max_entries = info.max_entries;
1163 	map->def.map_flags = info.map_flags;
1164 	map->btf_key_type_id = info.btf_key_type_id;
1165 	map->btf_value_type_id = info.btf_value_type_id;
1166 
1167 	return 0;
1168 
1169 err_close_new_fd:
1170 	close(new_fd);
1171 err_free_new_name:
1172 	free(new_name);
1173 	return -errno;
1174 }
1175 
1176 static int
1177 bpf_object__probe_name(struct bpf_object *obj)
1178 {
1179 	struct bpf_load_program_attr attr;
1180 	char *cp, errmsg[STRERR_BUFSIZE];
1181 	struct bpf_insn insns[] = {
1182 		BPF_MOV64_IMM(BPF_REG_0, 0),
1183 		BPF_EXIT_INSN(),
1184 	};
1185 	int ret;
1186 
1187 	/* make sure basic loading works */
1188 
1189 	memset(&attr, 0, sizeof(attr));
1190 	attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
1191 	attr.insns = insns;
1192 	attr.insns_cnt = ARRAY_SIZE(insns);
1193 	attr.license = "GPL";
1194 
1195 	ret = bpf_load_program_xattr(&attr, NULL, 0);
1196 	if (ret < 0) {
1197 		cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
1198 		pr_warning("Error in %s():%s(%d). Couldn't load basic 'r0 = 0' BPF program.\n",
1199 			   __func__, cp, errno);
1200 		return -errno;
1201 	}
1202 	close(ret);
1203 
1204 	/* now try the same program, but with the name */
1205 
1206 	attr.name = "test";
1207 	ret = bpf_load_program_xattr(&attr, NULL, 0);
1208 	if (ret >= 0) {
1209 		obj->caps.name = 1;
1210 		close(ret);
1211 	}
1212 
1213 	return 0;
1214 }
1215 
1216 static int
1217 bpf_object__probe_caps(struct bpf_object *obj)
1218 {
1219 	return bpf_object__probe_name(obj);
1220 }
1221 
1222 static int
1223 bpf_object__create_maps(struct bpf_object *obj)
1224 {
1225 	struct bpf_create_map_attr create_attr = {};
1226 	unsigned int i;
1227 	int err;
1228 
1229 	for (i = 0; i < obj->nr_maps; i++) {
1230 		struct bpf_map *map = &obj->maps[i];
1231 		struct bpf_map_def *def = &map->def;
1232 		char *cp, errmsg[STRERR_BUFSIZE];
1233 		int *pfd = &map->fd;
1234 
1235 		if (map->fd >= 0) {
1236 			pr_debug("skip map create (preset) %s: fd=%d\n",
1237 				 map->name, map->fd);
1238 			continue;
1239 		}
1240 
1241 		if (obj->caps.name)
1242 			create_attr.name = map->name;
1243 		create_attr.map_ifindex = map->map_ifindex;
1244 		create_attr.map_type = def->type;
1245 		create_attr.map_flags = def->map_flags;
1246 		create_attr.key_size = def->key_size;
1247 		create_attr.value_size = def->value_size;
1248 		create_attr.max_entries = def->max_entries;
1249 		create_attr.btf_fd = 0;
1250 		create_attr.btf_key_type_id = 0;
1251 		create_attr.btf_value_type_id = 0;
1252 		if (bpf_map_type__is_map_in_map(def->type) &&
1253 		    map->inner_map_fd >= 0)
1254 			create_attr.inner_map_fd = map->inner_map_fd;
1255 
1256 		if (obj->btf && !bpf_map_find_btf_info(map, obj->btf)) {
1257 			create_attr.btf_fd = btf__fd(obj->btf);
1258 			create_attr.btf_key_type_id = map->btf_key_type_id;
1259 			create_attr.btf_value_type_id = map->btf_value_type_id;
1260 		}
1261 
1262 		*pfd = bpf_create_map_xattr(&create_attr);
1263 		if (*pfd < 0 && create_attr.btf_key_type_id) {
1264 			cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
1265 			pr_warning("Error in bpf_create_map_xattr(%s):%s(%d). Retrying without BTF.\n",
1266 				   map->name, cp, errno);
1267 			create_attr.btf_fd = 0;
1268 			create_attr.btf_key_type_id = 0;
1269 			create_attr.btf_value_type_id = 0;
1270 			map->btf_key_type_id = 0;
1271 			map->btf_value_type_id = 0;
1272 			*pfd = bpf_create_map_xattr(&create_attr);
1273 		}
1274 
1275 		if (*pfd < 0) {
1276 			size_t j;
1277 
1278 			err = *pfd;
1279 			cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
1280 			pr_warning("failed to create map (name: '%s'): %s\n",
1281 				   map->name, cp);
1282 			for (j = 0; j < i; j++)
1283 				zclose(obj->maps[j].fd);
1284 			return err;
1285 		}
1286 		pr_debug("create map %s: fd=%d\n", map->name, *pfd);
1287 	}
1288 
1289 	return 0;
1290 }
1291 
1292 static int
1293 check_btf_ext_reloc_err(struct bpf_program *prog, int err,
1294 			void *btf_prog_info, const char *info_name)
1295 {
1296 	if (err != -ENOENT) {
1297 		pr_warning("Error in loading %s for sec %s.\n",
1298 			   info_name, prog->section_name);
1299 		return err;
1300 	}
1301 
1302 	/* err == -ENOENT (i.e. prog->section_name not found in btf_ext) */
1303 
1304 	if (btf_prog_info) {
1305 		/*
1306 		 * Some info has already been found but has problem
1307 		 * in the last btf_ext reloc.  Must have to error
1308 		 * out.
1309 		 */
1310 		pr_warning("Error in relocating %s for sec %s.\n",
1311 			   info_name, prog->section_name);
1312 		return err;
1313 	}
1314 
1315 	/*
1316 	 * Have problem loading the very first info.  Ignore
1317 	 * the rest.
1318 	 */
1319 	pr_warning("Cannot find %s for main program sec %s. Ignore all %s.\n",
1320 		   info_name, prog->section_name, info_name);
1321 	return 0;
1322 }
1323 
1324 static int
1325 bpf_program_reloc_btf_ext(struct bpf_program *prog, struct bpf_object *obj,
1326 			  const char *section_name,  __u32 insn_offset)
1327 {
1328 	int err;
1329 
1330 	if (!insn_offset || prog->func_info) {
1331 		/*
1332 		 * !insn_offset => main program
1333 		 *
1334 		 * For sub prog, the main program's func_info has to
1335 		 * be loaded first (i.e. prog->func_info != NULL)
1336 		 */
1337 		err = btf_ext__reloc_func_info(obj->btf, obj->btf_ext,
1338 					       section_name, insn_offset,
1339 					       &prog->func_info,
1340 					       &prog->func_info_cnt);
1341 		if (err)
1342 			return check_btf_ext_reloc_err(prog, err,
1343 						       prog->func_info,
1344 						       "bpf_func_info");
1345 
1346 		prog->func_info_rec_size = btf_ext__func_info_rec_size(obj->btf_ext);
1347 	}
1348 
1349 	if (!insn_offset || prog->line_info) {
1350 		err = btf_ext__reloc_line_info(obj->btf, obj->btf_ext,
1351 					       section_name, insn_offset,
1352 					       &prog->line_info,
1353 					       &prog->line_info_cnt);
1354 		if (err)
1355 			return check_btf_ext_reloc_err(prog, err,
1356 						       prog->line_info,
1357 						       "bpf_line_info");
1358 
1359 		prog->line_info_rec_size = btf_ext__line_info_rec_size(obj->btf_ext);
1360 	}
1361 
1362 	if (!insn_offset)
1363 		prog->btf_fd = btf__fd(obj->btf);
1364 
1365 	return 0;
1366 }
1367 
1368 static int
1369 bpf_program__reloc_text(struct bpf_program *prog, struct bpf_object *obj,
1370 			struct reloc_desc *relo)
1371 {
1372 	struct bpf_insn *insn, *new_insn;
1373 	struct bpf_program *text;
1374 	size_t new_cnt;
1375 	int err;
1376 
1377 	if (relo->type != RELO_CALL)
1378 		return -LIBBPF_ERRNO__RELOC;
1379 
1380 	if (prog->idx == obj->efile.text_shndx) {
1381 		pr_warning("relo in .text insn %d into off %d\n",
1382 			   relo->insn_idx, relo->text_off);
1383 		return -LIBBPF_ERRNO__RELOC;
1384 	}
1385 
1386 	if (prog->main_prog_cnt == 0) {
1387 		text = bpf_object__find_prog_by_idx(obj, obj->efile.text_shndx);
1388 		if (!text) {
1389 			pr_warning("no .text section found yet relo into text exist\n");
1390 			return -LIBBPF_ERRNO__RELOC;
1391 		}
1392 		new_cnt = prog->insns_cnt + text->insns_cnt;
1393 		new_insn = reallocarray(prog->insns, new_cnt, sizeof(*insn));
1394 		if (!new_insn) {
1395 			pr_warning("oom in prog realloc\n");
1396 			return -ENOMEM;
1397 		}
1398 
1399 		if (obj->btf_ext) {
1400 			err = bpf_program_reloc_btf_ext(prog, obj,
1401 							text->section_name,
1402 							prog->insns_cnt);
1403 			if (err)
1404 				return err;
1405 		}
1406 
1407 		memcpy(new_insn + prog->insns_cnt, text->insns,
1408 		       text->insns_cnt * sizeof(*insn));
1409 		prog->insns = new_insn;
1410 		prog->main_prog_cnt = prog->insns_cnt;
1411 		prog->insns_cnt = new_cnt;
1412 		pr_debug("added %zd insn from %s to prog %s\n",
1413 			 text->insns_cnt, text->section_name,
1414 			 prog->section_name);
1415 	}
1416 	insn = &prog->insns[relo->insn_idx];
1417 	insn->imm += prog->main_prog_cnt - relo->insn_idx;
1418 	return 0;
1419 }
1420 
1421 static int
1422 bpf_program__relocate(struct bpf_program *prog, struct bpf_object *obj)
1423 {
1424 	int i, err;
1425 
1426 	if (!prog)
1427 		return 0;
1428 
1429 	if (obj->btf_ext) {
1430 		err = bpf_program_reloc_btf_ext(prog, obj,
1431 						prog->section_name, 0);
1432 		if (err)
1433 			return err;
1434 	}
1435 
1436 	if (!prog->reloc_desc)
1437 		return 0;
1438 
1439 	for (i = 0; i < prog->nr_reloc; i++) {
1440 		if (prog->reloc_desc[i].type == RELO_LD64) {
1441 			struct bpf_insn *insns = prog->insns;
1442 			int insn_idx, map_idx;
1443 
1444 			insn_idx = prog->reloc_desc[i].insn_idx;
1445 			map_idx = prog->reloc_desc[i].map_idx;
1446 
1447 			if (insn_idx >= (int)prog->insns_cnt) {
1448 				pr_warning("relocation out of range: '%s'\n",
1449 					   prog->section_name);
1450 				return -LIBBPF_ERRNO__RELOC;
1451 			}
1452 			insns[insn_idx].src_reg = BPF_PSEUDO_MAP_FD;
1453 			insns[insn_idx].imm = obj->maps[map_idx].fd;
1454 		} else {
1455 			err = bpf_program__reloc_text(prog, obj,
1456 						      &prog->reloc_desc[i]);
1457 			if (err)
1458 				return err;
1459 		}
1460 	}
1461 
1462 	zfree(&prog->reloc_desc);
1463 	prog->nr_reloc = 0;
1464 	return 0;
1465 }
1466 
1467 
1468 static int
1469 bpf_object__relocate(struct bpf_object *obj)
1470 {
1471 	struct bpf_program *prog;
1472 	size_t i;
1473 	int err;
1474 
1475 	for (i = 0; i < obj->nr_programs; i++) {
1476 		prog = &obj->programs[i];
1477 
1478 		err = bpf_program__relocate(prog, obj);
1479 		if (err) {
1480 			pr_warning("failed to relocate '%s'\n",
1481 				   prog->section_name);
1482 			return err;
1483 		}
1484 	}
1485 	return 0;
1486 }
1487 
1488 static int bpf_object__collect_reloc(struct bpf_object *obj)
1489 {
1490 	int i, err;
1491 
1492 	if (!obj_elf_valid(obj)) {
1493 		pr_warning("Internal error: elf object is closed\n");
1494 		return -LIBBPF_ERRNO__INTERNAL;
1495 	}
1496 
1497 	for (i = 0; i < obj->efile.nr_reloc; i++) {
1498 		GElf_Shdr *shdr = &obj->efile.reloc[i].shdr;
1499 		Elf_Data *data = obj->efile.reloc[i].data;
1500 		int idx = shdr->sh_info;
1501 		struct bpf_program *prog;
1502 
1503 		if (shdr->sh_type != SHT_REL) {
1504 			pr_warning("internal error at %d\n", __LINE__);
1505 			return -LIBBPF_ERRNO__INTERNAL;
1506 		}
1507 
1508 		prog = bpf_object__find_prog_by_idx(obj, idx);
1509 		if (!prog) {
1510 			pr_warning("relocation failed: no section(%d)\n", idx);
1511 			return -LIBBPF_ERRNO__RELOC;
1512 		}
1513 
1514 		err = bpf_program__collect_reloc(prog,
1515 						 shdr, data,
1516 						 obj);
1517 		if (err)
1518 			return err;
1519 	}
1520 	return 0;
1521 }
1522 
1523 static int
1524 load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt,
1525 	     char *license, __u32 kern_version, int *pfd)
1526 {
1527 	struct bpf_load_program_attr load_attr;
1528 	char *cp, errmsg[STRERR_BUFSIZE];
1529 	char *log_buf;
1530 	int ret;
1531 
1532 	memset(&load_attr, 0, sizeof(struct bpf_load_program_attr));
1533 	load_attr.prog_type = prog->type;
1534 	load_attr.expected_attach_type = prog->expected_attach_type;
1535 	if (prog->caps->name)
1536 		load_attr.name = prog->name;
1537 	load_attr.insns = insns;
1538 	load_attr.insns_cnt = insns_cnt;
1539 	load_attr.license = license;
1540 	load_attr.kern_version = kern_version;
1541 	load_attr.prog_ifindex = prog->prog_ifindex;
1542 	load_attr.prog_btf_fd = prog->btf_fd >= 0 ? prog->btf_fd : 0;
1543 	load_attr.func_info = prog->func_info;
1544 	load_attr.func_info_rec_size = prog->func_info_rec_size;
1545 	load_attr.func_info_cnt = prog->func_info_cnt;
1546 	load_attr.line_info = prog->line_info;
1547 	load_attr.line_info_rec_size = prog->line_info_rec_size;
1548 	load_attr.line_info_cnt = prog->line_info_cnt;
1549 	if (!load_attr.insns || !load_attr.insns_cnt)
1550 		return -EINVAL;
1551 
1552 	log_buf = malloc(BPF_LOG_BUF_SIZE);
1553 	if (!log_buf)
1554 		pr_warning("Alloc log buffer for bpf loader error, continue without log\n");
1555 
1556 	ret = bpf_load_program_xattr(&load_attr, log_buf, BPF_LOG_BUF_SIZE);
1557 
1558 	if (ret >= 0) {
1559 		*pfd = ret;
1560 		ret = 0;
1561 		goto out;
1562 	}
1563 
1564 	ret = -LIBBPF_ERRNO__LOAD;
1565 	cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
1566 	pr_warning("load bpf program failed: %s\n", cp);
1567 
1568 	if (log_buf && log_buf[0] != '\0') {
1569 		ret = -LIBBPF_ERRNO__VERIFY;
1570 		pr_warning("-- BEGIN DUMP LOG ---\n");
1571 		pr_warning("\n%s\n", log_buf);
1572 		pr_warning("-- END LOG --\n");
1573 	} else if (load_attr.insns_cnt >= BPF_MAXINSNS) {
1574 		pr_warning("Program too large (%zu insns), at most %d insns\n",
1575 			   load_attr.insns_cnt, BPF_MAXINSNS);
1576 		ret = -LIBBPF_ERRNO__PROG2BIG;
1577 	} else {
1578 		/* Wrong program type? */
1579 		if (load_attr.prog_type != BPF_PROG_TYPE_KPROBE) {
1580 			int fd;
1581 
1582 			load_attr.prog_type = BPF_PROG_TYPE_KPROBE;
1583 			load_attr.expected_attach_type = 0;
1584 			fd = bpf_load_program_xattr(&load_attr, NULL, 0);
1585 			if (fd >= 0) {
1586 				close(fd);
1587 				ret = -LIBBPF_ERRNO__PROGTYPE;
1588 				goto out;
1589 			}
1590 		}
1591 
1592 		if (log_buf)
1593 			ret = -LIBBPF_ERRNO__KVER;
1594 	}
1595 
1596 out:
1597 	free(log_buf);
1598 	return ret;
1599 }
1600 
1601 int
1602 bpf_program__load(struct bpf_program *prog,
1603 		  char *license, __u32 kern_version)
1604 {
1605 	int err = 0, fd, i;
1606 
1607 	if (prog->instances.nr < 0 || !prog->instances.fds) {
1608 		if (prog->preprocessor) {
1609 			pr_warning("Internal error: can't load program '%s'\n",
1610 				   prog->section_name);
1611 			return -LIBBPF_ERRNO__INTERNAL;
1612 		}
1613 
1614 		prog->instances.fds = malloc(sizeof(int));
1615 		if (!prog->instances.fds) {
1616 			pr_warning("Not enough memory for BPF fds\n");
1617 			return -ENOMEM;
1618 		}
1619 		prog->instances.nr = 1;
1620 		prog->instances.fds[0] = -1;
1621 	}
1622 
1623 	if (!prog->preprocessor) {
1624 		if (prog->instances.nr != 1) {
1625 			pr_warning("Program '%s' is inconsistent: nr(%d) != 1\n",
1626 				   prog->section_name, prog->instances.nr);
1627 		}
1628 		err = load_program(prog, prog->insns, prog->insns_cnt,
1629 				   license, kern_version, &fd);
1630 		if (!err)
1631 			prog->instances.fds[0] = fd;
1632 		goto out;
1633 	}
1634 
1635 	for (i = 0; i < prog->instances.nr; i++) {
1636 		struct bpf_prog_prep_result result;
1637 		bpf_program_prep_t preprocessor = prog->preprocessor;
1638 
1639 		bzero(&result, sizeof(result));
1640 		err = preprocessor(prog, i, prog->insns,
1641 				   prog->insns_cnt, &result);
1642 		if (err) {
1643 			pr_warning("Preprocessing the %dth instance of program '%s' failed\n",
1644 				   i, prog->section_name);
1645 			goto out;
1646 		}
1647 
1648 		if (!result.new_insn_ptr || !result.new_insn_cnt) {
1649 			pr_debug("Skip loading the %dth instance of program '%s'\n",
1650 				 i, prog->section_name);
1651 			prog->instances.fds[i] = -1;
1652 			if (result.pfd)
1653 				*result.pfd = -1;
1654 			continue;
1655 		}
1656 
1657 		err = load_program(prog, result.new_insn_ptr,
1658 				   result.new_insn_cnt,
1659 				   license, kern_version, &fd);
1660 
1661 		if (err) {
1662 			pr_warning("Loading the %dth instance of program '%s' failed\n",
1663 					i, prog->section_name);
1664 			goto out;
1665 		}
1666 
1667 		if (result.pfd)
1668 			*result.pfd = fd;
1669 		prog->instances.fds[i] = fd;
1670 	}
1671 out:
1672 	if (err)
1673 		pr_warning("failed to load program '%s'\n",
1674 			   prog->section_name);
1675 	zfree(&prog->insns);
1676 	prog->insns_cnt = 0;
1677 	return err;
1678 }
1679 
1680 static bool bpf_program__is_function_storage(struct bpf_program *prog,
1681 					     struct bpf_object *obj)
1682 {
1683 	return prog->idx == obj->efile.text_shndx && obj->has_pseudo_calls;
1684 }
1685 
1686 static int
1687 bpf_object__load_progs(struct bpf_object *obj)
1688 {
1689 	size_t i;
1690 	int err;
1691 
1692 	for (i = 0; i < obj->nr_programs; i++) {
1693 		if (bpf_program__is_function_storage(&obj->programs[i], obj))
1694 			continue;
1695 		err = bpf_program__load(&obj->programs[i],
1696 					obj->license,
1697 					obj->kern_version);
1698 		if (err)
1699 			return err;
1700 	}
1701 	return 0;
1702 }
1703 
1704 static bool bpf_prog_type__needs_kver(enum bpf_prog_type type)
1705 {
1706 	switch (type) {
1707 	case BPF_PROG_TYPE_SOCKET_FILTER:
1708 	case BPF_PROG_TYPE_SCHED_CLS:
1709 	case BPF_PROG_TYPE_SCHED_ACT:
1710 	case BPF_PROG_TYPE_XDP:
1711 	case BPF_PROG_TYPE_CGROUP_SKB:
1712 	case BPF_PROG_TYPE_CGROUP_SOCK:
1713 	case BPF_PROG_TYPE_LWT_IN:
1714 	case BPF_PROG_TYPE_LWT_OUT:
1715 	case BPF_PROG_TYPE_LWT_XMIT:
1716 	case BPF_PROG_TYPE_LWT_SEG6LOCAL:
1717 	case BPF_PROG_TYPE_SOCK_OPS:
1718 	case BPF_PROG_TYPE_SK_SKB:
1719 	case BPF_PROG_TYPE_CGROUP_DEVICE:
1720 	case BPF_PROG_TYPE_SK_MSG:
1721 	case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
1722 	case BPF_PROG_TYPE_LIRC_MODE2:
1723 	case BPF_PROG_TYPE_SK_REUSEPORT:
1724 	case BPF_PROG_TYPE_FLOW_DISSECTOR:
1725 	case BPF_PROG_TYPE_UNSPEC:
1726 	case BPF_PROG_TYPE_TRACEPOINT:
1727 	case BPF_PROG_TYPE_RAW_TRACEPOINT:
1728 	case BPF_PROG_TYPE_PERF_EVENT:
1729 		return false;
1730 	case BPF_PROG_TYPE_KPROBE:
1731 	default:
1732 		return true;
1733 	}
1734 }
1735 
1736 static int bpf_object__validate(struct bpf_object *obj, bool needs_kver)
1737 {
1738 	if (needs_kver && obj->kern_version == 0) {
1739 		pr_warning("%s doesn't provide kernel version\n",
1740 			   obj->path);
1741 		return -LIBBPF_ERRNO__KVERSION;
1742 	}
1743 	return 0;
1744 }
1745 
1746 static struct bpf_object *
1747 __bpf_object__open(const char *path, void *obj_buf, size_t obj_buf_sz,
1748 		   bool needs_kver, int flags)
1749 {
1750 	struct bpf_object *obj;
1751 	int err;
1752 
1753 	if (elf_version(EV_CURRENT) == EV_NONE) {
1754 		pr_warning("failed to init libelf for %s\n", path);
1755 		return ERR_PTR(-LIBBPF_ERRNO__LIBELF);
1756 	}
1757 
1758 	obj = bpf_object__new(path, obj_buf, obj_buf_sz);
1759 	if (IS_ERR(obj))
1760 		return obj;
1761 
1762 	CHECK_ERR(bpf_object__elf_init(obj), err, out);
1763 	CHECK_ERR(bpf_object__check_endianness(obj), err, out);
1764 	CHECK_ERR(bpf_object__elf_collect(obj, flags), err, out);
1765 	CHECK_ERR(bpf_object__collect_reloc(obj), err, out);
1766 	CHECK_ERR(bpf_object__validate(obj, needs_kver), err, out);
1767 
1768 	bpf_object__elf_finish(obj);
1769 	return obj;
1770 out:
1771 	bpf_object__close(obj);
1772 	return ERR_PTR(err);
1773 }
1774 
1775 struct bpf_object *__bpf_object__open_xattr(struct bpf_object_open_attr *attr,
1776 					    int flags)
1777 {
1778 	/* param validation */
1779 	if (!attr->file)
1780 		return NULL;
1781 
1782 	pr_debug("loading %s\n", attr->file);
1783 
1784 	return __bpf_object__open(attr->file, NULL, 0,
1785 				  bpf_prog_type__needs_kver(attr->prog_type),
1786 				  flags);
1787 }
1788 
1789 struct bpf_object *bpf_object__open_xattr(struct bpf_object_open_attr *attr)
1790 {
1791 	return __bpf_object__open_xattr(attr, 0);
1792 }
1793 
1794 struct bpf_object *bpf_object__open(const char *path)
1795 {
1796 	struct bpf_object_open_attr attr = {
1797 		.file		= path,
1798 		.prog_type	= BPF_PROG_TYPE_UNSPEC,
1799 	};
1800 
1801 	return bpf_object__open_xattr(&attr);
1802 }
1803 
1804 struct bpf_object *bpf_object__open_buffer(void *obj_buf,
1805 					   size_t obj_buf_sz,
1806 					   const char *name)
1807 {
1808 	char tmp_name[64];
1809 
1810 	/* param validation */
1811 	if (!obj_buf || obj_buf_sz <= 0)
1812 		return NULL;
1813 
1814 	if (!name) {
1815 		snprintf(tmp_name, sizeof(tmp_name), "%lx-%lx",
1816 			 (unsigned long)obj_buf,
1817 			 (unsigned long)obj_buf_sz);
1818 		tmp_name[sizeof(tmp_name) - 1] = '\0';
1819 		name = tmp_name;
1820 	}
1821 	pr_debug("loading object '%s' from buffer\n",
1822 		 name);
1823 
1824 	return __bpf_object__open(name, obj_buf, obj_buf_sz, true, true);
1825 }
1826 
1827 int bpf_object__unload(struct bpf_object *obj)
1828 {
1829 	size_t i;
1830 
1831 	if (!obj)
1832 		return -EINVAL;
1833 
1834 	for (i = 0; i < obj->nr_maps; i++)
1835 		zclose(obj->maps[i].fd);
1836 
1837 	for (i = 0; i < obj->nr_programs; i++)
1838 		bpf_program__unload(&obj->programs[i]);
1839 
1840 	return 0;
1841 }
1842 
1843 int bpf_object__load(struct bpf_object *obj)
1844 {
1845 	int err;
1846 
1847 	if (!obj)
1848 		return -EINVAL;
1849 
1850 	if (obj->loaded) {
1851 		pr_warning("object should not be loaded twice\n");
1852 		return -EINVAL;
1853 	}
1854 
1855 	obj->loaded = true;
1856 
1857 	CHECK_ERR(bpf_object__probe_caps(obj), err, out);
1858 	CHECK_ERR(bpf_object__create_maps(obj), err, out);
1859 	CHECK_ERR(bpf_object__relocate(obj), err, out);
1860 	CHECK_ERR(bpf_object__load_progs(obj), err, out);
1861 
1862 	return 0;
1863 out:
1864 	bpf_object__unload(obj);
1865 	pr_warning("failed to load object '%s'\n", obj->path);
1866 	return err;
1867 }
1868 
1869 static int check_path(const char *path)
1870 {
1871 	char *cp, errmsg[STRERR_BUFSIZE];
1872 	struct statfs st_fs;
1873 	char *dname, *dir;
1874 	int err = 0;
1875 
1876 	if (path == NULL)
1877 		return -EINVAL;
1878 
1879 	dname = strdup(path);
1880 	if (dname == NULL)
1881 		return -ENOMEM;
1882 
1883 	dir = dirname(dname);
1884 	if (statfs(dir, &st_fs)) {
1885 		cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
1886 		pr_warning("failed to statfs %s: %s\n", dir, cp);
1887 		err = -errno;
1888 	}
1889 	free(dname);
1890 
1891 	if (!err && st_fs.f_type != BPF_FS_MAGIC) {
1892 		pr_warning("specified path %s is not on BPF FS\n", path);
1893 		err = -EINVAL;
1894 	}
1895 
1896 	return err;
1897 }
1898 
1899 int bpf_program__pin_instance(struct bpf_program *prog, const char *path,
1900 			      int instance)
1901 {
1902 	char *cp, errmsg[STRERR_BUFSIZE];
1903 	int err;
1904 
1905 	err = check_path(path);
1906 	if (err)
1907 		return err;
1908 
1909 	if (prog == NULL) {
1910 		pr_warning("invalid program pointer\n");
1911 		return -EINVAL;
1912 	}
1913 
1914 	if (instance < 0 || instance >= prog->instances.nr) {
1915 		pr_warning("invalid prog instance %d of prog %s (max %d)\n",
1916 			   instance, prog->section_name, prog->instances.nr);
1917 		return -EINVAL;
1918 	}
1919 
1920 	if (bpf_obj_pin(prog->instances.fds[instance], path)) {
1921 		cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
1922 		pr_warning("failed to pin program: %s\n", cp);
1923 		return -errno;
1924 	}
1925 	pr_debug("pinned program '%s'\n", path);
1926 
1927 	return 0;
1928 }
1929 
1930 int bpf_program__unpin_instance(struct bpf_program *prog, const char *path,
1931 				int instance)
1932 {
1933 	int err;
1934 
1935 	err = check_path(path);
1936 	if (err)
1937 		return err;
1938 
1939 	if (prog == NULL) {
1940 		pr_warning("invalid program pointer\n");
1941 		return -EINVAL;
1942 	}
1943 
1944 	if (instance < 0 || instance >= prog->instances.nr) {
1945 		pr_warning("invalid prog instance %d of prog %s (max %d)\n",
1946 			   instance, prog->section_name, prog->instances.nr);
1947 		return -EINVAL;
1948 	}
1949 
1950 	err = unlink(path);
1951 	if (err != 0)
1952 		return -errno;
1953 	pr_debug("unpinned program '%s'\n", path);
1954 
1955 	return 0;
1956 }
1957 
1958 static int make_dir(const char *path)
1959 {
1960 	char *cp, errmsg[STRERR_BUFSIZE];
1961 	int err = 0;
1962 
1963 	if (mkdir(path, 0700) && errno != EEXIST)
1964 		err = -errno;
1965 
1966 	if (err) {
1967 		cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg));
1968 		pr_warning("failed to mkdir %s: %s\n", path, cp);
1969 	}
1970 	return err;
1971 }
1972 
1973 int bpf_program__pin(struct bpf_program *prog, const char *path)
1974 {
1975 	int i, err;
1976 
1977 	err = check_path(path);
1978 	if (err)
1979 		return err;
1980 
1981 	if (prog == NULL) {
1982 		pr_warning("invalid program pointer\n");
1983 		return -EINVAL;
1984 	}
1985 
1986 	if (prog->instances.nr <= 0) {
1987 		pr_warning("no instances of prog %s to pin\n",
1988 			   prog->section_name);
1989 		return -EINVAL;
1990 	}
1991 
1992 	if (prog->instances.nr == 1) {
1993 		/* don't create subdirs when pinning single instance */
1994 		return bpf_program__pin_instance(prog, path, 0);
1995 	}
1996 
1997 	err = make_dir(path);
1998 	if (err)
1999 		return err;
2000 
2001 	for (i = 0; i < prog->instances.nr; i++) {
2002 		char buf[PATH_MAX];
2003 		int len;
2004 
2005 		len = snprintf(buf, PATH_MAX, "%s/%d", path, i);
2006 		if (len < 0) {
2007 			err = -EINVAL;
2008 			goto err_unpin;
2009 		} else if (len >= PATH_MAX) {
2010 			err = -ENAMETOOLONG;
2011 			goto err_unpin;
2012 		}
2013 
2014 		err = bpf_program__pin_instance(prog, buf, i);
2015 		if (err)
2016 			goto err_unpin;
2017 	}
2018 
2019 	return 0;
2020 
2021 err_unpin:
2022 	for (i = i - 1; i >= 0; i--) {
2023 		char buf[PATH_MAX];
2024 		int len;
2025 
2026 		len = snprintf(buf, PATH_MAX, "%s/%d", path, i);
2027 		if (len < 0)
2028 			continue;
2029 		else if (len >= PATH_MAX)
2030 			continue;
2031 
2032 		bpf_program__unpin_instance(prog, buf, i);
2033 	}
2034 
2035 	rmdir(path);
2036 
2037 	return err;
2038 }
2039 
2040 int bpf_program__unpin(struct bpf_program *prog, const char *path)
2041 {
2042 	int i, err;
2043 
2044 	err = check_path(path);
2045 	if (err)
2046 		return err;
2047 
2048 	if (prog == NULL) {
2049 		pr_warning("invalid program pointer\n");
2050 		return -EINVAL;
2051 	}
2052 
2053 	if (prog->instances.nr <= 0) {
2054 		pr_warning("no instances of prog %s to pin\n",
2055 			   prog->section_name);
2056 		return -EINVAL;
2057 	}
2058 
2059 	if (prog->instances.nr == 1) {
2060 		/* don't create subdirs when pinning single instance */
2061 		return bpf_program__unpin_instance(prog, path, 0);
2062 	}
2063 
2064 	for (i = 0; i < prog->instances.nr; i++) {
2065 		char buf[PATH_MAX];
2066 		int len;
2067 
2068 		len = snprintf(buf, PATH_MAX, "%s/%d", path, i);
2069 		if (len < 0)
2070 			return -EINVAL;
2071 		else if (len >= PATH_MAX)
2072 			return -ENAMETOOLONG;
2073 
2074 		err = bpf_program__unpin_instance(prog, buf, i);
2075 		if (err)
2076 			return err;
2077 	}
2078 
2079 	err = rmdir(path);
2080 	if (err)
2081 		return -errno;
2082 
2083 	return 0;
2084 }
2085 
2086 int bpf_map__pin(struct bpf_map *map, const char *path)
2087 {
2088 	char *cp, errmsg[STRERR_BUFSIZE];
2089 	int err;
2090 
2091 	err = check_path(path);
2092 	if (err)
2093 		return err;
2094 
2095 	if (map == NULL) {
2096 		pr_warning("invalid map pointer\n");
2097 		return -EINVAL;
2098 	}
2099 
2100 	if (bpf_obj_pin(map->fd, path)) {
2101 		cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
2102 		pr_warning("failed to pin map: %s\n", cp);
2103 		return -errno;
2104 	}
2105 
2106 	pr_debug("pinned map '%s'\n", path);
2107 
2108 	return 0;
2109 }
2110 
2111 int bpf_map__unpin(struct bpf_map *map, const char *path)
2112 {
2113 	int err;
2114 
2115 	err = check_path(path);
2116 	if (err)
2117 		return err;
2118 
2119 	if (map == NULL) {
2120 		pr_warning("invalid map pointer\n");
2121 		return -EINVAL;
2122 	}
2123 
2124 	err = unlink(path);
2125 	if (err != 0)
2126 		return -errno;
2127 	pr_debug("unpinned map '%s'\n", path);
2128 
2129 	return 0;
2130 }
2131 
2132 int bpf_object__pin_maps(struct bpf_object *obj, const char *path)
2133 {
2134 	struct bpf_map *map;
2135 	int err;
2136 
2137 	if (!obj)
2138 		return -ENOENT;
2139 
2140 	if (!obj->loaded) {
2141 		pr_warning("object not yet loaded; load it first\n");
2142 		return -ENOENT;
2143 	}
2144 
2145 	err = make_dir(path);
2146 	if (err)
2147 		return err;
2148 
2149 	bpf_map__for_each(map, obj) {
2150 		char buf[PATH_MAX];
2151 		int len;
2152 
2153 		len = snprintf(buf, PATH_MAX, "%s/%s", path,
2154 			       bpf_map__name(map));
2155 		if (len < 0) {
2156 			err = -EINVAL;
2157 			goto err_unpin_maps;
2158 		} else if (len >= PATH_MAX) {
2159 			err = -ENAMETOOLONG;
2160 			goto err_unpin_maps;
2161 		}
2162 
2163 		err = bpf_map__pin(map, buf);
2164 		if (err)
2165 			goto err_unpin_maps;
2166 	}
2167 
2168 	return 0;
2169 
2170 err_unpin_maps:
2171 	while ((map = bpf_map__prev(map, obj))) {
2172 		char buf[PATH_MAX];
2173 		int len;
2174 
2175 		len = snprintf(buf, PATH_MAX, "%s/%s", path,
2176 			       bpf_map__name(map));
2177 		if (len < 0)
2178 			continue;
2179 		else if (len >= PATH_MAX)
2180 			continue;
2181 
2182 		bpf_map__unpin(map, buf);
2183 	}
2184 
2185 	return err;
2186 }
2187 
2188 int bpf_object__unpin_maps(struct bpf_object *obj, const char *path)
2189 {
2190 	struct bpf_map *map;
2191 	int err;
2192 
2193 	if (!obj)
2194 		return -ENOENT;
2195 
2196 	bpf_map__for_each(map, obj) {
2197 		char buf[PATH_MAX];
2198 		int len;
2199 
2200 		len = snprintf(buf, PATH_MAX, "%s/%s", path,
2201 			       bpf_map__name(map));
2202 		if (len < 0)
2203 			return -EINVAL;
2204 		else if (len >= PATH_MAX)
2205 			return -ENAMETOOLONG;
2206 
2207 		err = bpf_map__unpin(map, buf);
2208 		if (err)
2209 			return err;
2210 	}
2211 
2212 	return 0;
2213 }
2214 
2215 int bpf_object__pin_programs(struct bpf_object *obj, const char *path)
2216 {
2217 	struct bpf_program *prog;
2218 	int err;
2219 
2220 	if (!obj)
2221 		return -ENOENT;
2222 
2223 	if (!obj->loaded) {
2224 		pr_warning("object not yet loaded; load it first\n");
2225 		return -ENOENT;
2226 	}
2227 
2228 	err = make_dir(path);
2229 	if (err)
2230 		return err;
2231 
2232 	bpf_object__for_each_program(prog, obj) {
2233 		char buf[PATH_MAX];
2234 		int len;
2235 
2236 		len = snprintf(buf, PATH_MAX, "%s/%s", path,
2237 			       prog->pin_name);
2238 		if (len < 0) {
2239 			err = -EINVAL;
2240 			goto err_unpin_programs;
2241 		} else if (len >= PATH_MAX) {
2242 			err = -ENAMETOOLONG;
2243 			goto err_unpin_programs;
2244 		}
2245 
2246 		err = bpf_program__pin(prog, buf);
2247 		if (err)
2248 			goto err_unpin_programs;
2249 	}
2250 
2251 	return 0;
2252 
2253 err_unpin_programs:
2254 	while ((prog = bpf_program__prev(prog, obj))) {
2255 		char buf[PATH_MAX];
2256 		int len;
2257 
2258 		len = snprintf(buf, PATH_MAX, "%s/%s", path,
2259 			       prog->pin_name);
2260 		if (len < 0)
2261 			continue;
2262 		else if (len >= PATH_MAX)
2263 			continue;
2264 
2265 		bpf_program__unpin(prog, buf);
2266 	}
2267 
2268 	return err;
2269 }
2270 
2271 int bpf_object__unpin_programs(struct bpf_object *obj, const char *path)
2272 {
2273 	struct bpf_program *prog;
2274 	int err;
2275 
2276 	if (!obj)
2277 		return -ENOENT;
2278 
2279 	bpf_object__for_each_program(prog, obj) {
2280 		char buf[PATH_MAX];
2281 		int len;
2282 
2283 		len = snprintf(buf, PATH_MAX, "%s/%s", path,
2284 			       prog->pin_name);
2285 		if (len < 0)
2286 			return -EINVAL;
2287 		else if (len >= PATH_MAX)
2288 			return -ENAMETOOLONG;
2289 
2290 		err = bpf_program__unpin(prog, buf);
2291 		if (err)
2292 			return err;
2293 	}
2294 
2295 	return 0;
2296 }
2297 
2298 int bpf_object__pin(struct bpf_object *obj, const char *path)
2299 {
2300 	int err;
2301 
2302 	err = bpf_object__pin_maps(obj, path);
2303 	if (err)
2304 		return err;
2305 
2306 	err = bpf_object__pin_programs(obj, path);
2307 	if (err) {
2308 		bpf_object__unpin_maps(obj, path);
2309 		return err;
2310 	}
2311 
2312 	return 0;
2313 }
2314 
2315 void bpf_object__close(struct bpf_object *obj)
2316 {
2317 	size_t i;
2318 
2319 	if (!obj)
2320 		return;
2321 
2322 	if (obj->clear_priv)
2323 		obj->clear_priv(obj, obj->priv);
2324 
2325 	bpf_object__elf_finish(obj);
2326 	bpf_object__unload(obj);
2327 	btf__free(obj->btf);
2328 	btf_ext__free(obj->btf_ext);
2329 
2330 	for (i = 0; i < obj->nr_maps; i++) {
2331 		zfree(&obj->maps[i].name);
2332 		if (obj->maps[i].clear_priv)
2333 			obj->maps[i].clear_priv(&obj->maps[i],
2334 						obj->maps[i].priv);
2335 		obj->maps[i].priv = NULL;
2336 		obj->maps[i].clear_priv = NULL;
2337 	}
2338 	zfree(&obj->maps);
2339 	obj->nr_maps = 0;
2340 
2341 	if (obj->programs && obj->nr_programs) {
2342 		for (i = 0; i < obj->nr_programs; i++)
2343 			bpf_program__exit(&obj->programs[i]);
2344 	}
2345 	zfree(&obj->programs);
2346 
2347 	list_del(&obj->list);
2348 	free(obj);
2349 }
2350 
2351 struct bpf_object *
2352 bpf_object__next(struct bpf_object *prev)
2353 {
2354 	struct bpf_object *next;
2355 
2356 	if (!prev)
2357 		next = list_first_entry(&bpf_objects_list,
2358 					struct bpf_object,
2359 					list);
2360 	else
2361 		next = list_next_entry(prev, list);
2362 
2363 	/* Empty list is noticed here so don't need checking on entry. */
2364 	if (&next->list == &bpf_objects_list)
2365 		return NULL;
2366 
2367 	return next;
2368 }
2369 
2370 const char *bpf_object__name(struct bpf_object *obj)
2371 {
2372 	return obj ? obj->path : ERR_PTR(-EINVAL);
2373 }
2374 
2375 unsigned int bpf_object__kversion(struct bpf_object *obj)
2376 {
2377 	return obj ? obj->kern_version : 0;
2378 }
2379 
2380 int bpf_object__btf_fd(const struct bpf_object *obj)
2381 {
2382 	return obj->btf ? btf__fd(obj->btf) : -1;
2383 }
2384 
2385 int bpf_object__set_priv(struct bpf_object *obj, void *priv,
2386 			 bpf_object_clear_priv_t clear_priv)
2387 {
2388 	if (obj->priv && obj->clear_priv)
2389 		obj->clear_priv(obj, obj->priv);
2390 
2391 	obj->priv = priv;
2392 	obj->clear_priv = clear_priv;
2393 	return 0;
2394 }
2395 
2396 void *bpf_object__priv(struct bpf_object *obj)
2397 {
2398 	return obj ? obj->priv : ERR_PTR(-EINVAL);
2399 }
2400 
2401 static struct bpf_program *
2402 __bpf_program__iter(struct bpf_program *p, struct bpf_object *obj, bool forward)
2403 {
2404 	size_t nr_programs = obj->nr_programs;
2405 	ssize_t idx;
2406 
2407 	if (!nr_programs)
2408 		return NULL;
2409 
2410 	if (!p)
2411 		/* Iter from the beginning */
2412 		return forward ? &obj->programs[0] :
2413 			&obj->programs[nr_programs - 1];
2414 
2415 	if (p->obj != obj) {
2416 		pr_warning("error: program handler doesn't match object\n");
2417 		return NULL;
2418 	}
2419 
2420 	idx = (p - obj->programs) + (forward ? 1 : -1);
2421 	if (idx >= obj->nr_programs || idx < 0)
2422 		return NULL;
2423 	return &obj->programs[idx];
2424 }
2425 
2426 struct bpf_program *
2427 bpf_program__next(struct bpf_program *prev, struct bpf_object *obj)
2428 {
2429 	struct bpf_program *prog = prev;
2430 
2431 	do {
2432 		prog = __bpf_program__iter(prog, obj, true);
2433 	} while (prog && bpf_program__is_function_storage(prog, obj));
2434 
2435 	return prog;
2436 }
2437 
2438 struct bpf_program *
2439 bpf_program__prev(struct bpf_program *next, struct bpf_object *obj)
2440 {
2441 	struct bpf_program *prog = next;
2442 
2443 	do {
2444 		prog = __bpf_program__iter(prog, obj, false);
2445 	} while (prog && bpf_program__is_function_storage(prog, obj));
2446 
2447 	return prog;
2448 }
2449 
2450 int bpf_program__set_priv(struct bpf_program *prog, void *priv,
2451 			  bpf_program_clear_priv_t clear_priv)
2452 {
2453 	if (prog->priv && prog->clear_priv)
2454 		prog->clear_priv(prog, prog->priv);
2455 
2456 	prog->priv = priv;
2457 	prog->clear_priv = clear_priv;
2458 	return 0;
2459 }
2460 
2461 void *bpf_program__priv(struct bpf_program *prog)
2462 {
2463 	return prog ? prog->priv : ERR_PTR(-EINVAL);
2464 }
2465 
2466 void bpf_program__set_ifindex(struct bpf_program *prog, __u32 ifindex)
2467 {
2468 	prog->prog_ifindex = ifindex;
2469 }
2470 
2471 const char *bpf_program__title(struct bpf_program *prog, bool needs_copy)
2472 {
2473 	const char *title;
2474 
2475 	title = prog->section_name;
2476 	if (needs_copy) {
2477 		title = strdup(title);
2478 		if (!title) {
2479 			pr_warning("failed to strdup program title\n");
2480 			return ERR_PTR(-ENOMEM);
2481 		}
2482 	}
2483 
2484 	return title;
2485 }
2486 
2487 int bpf_program__fd(struct bpf_program *prog)
2488 {
2489 	return bpf_program__nth_fd(prog, 0);
2490 }
2491 
2492 int bpf_program__set_prep(struct bpf_program *prog, int nr_instances,
2493 			  bpf_program_prep_t prep)
2494 {
2495 	int *instances_fds;
2496 
2497 	if (nr_instances <= 0 || !prep)
2498 		return -EINVAL;
2499 
2500 	if (prog->instances.nr > 0 || prog->instances.fds) {
2501 		pr_warning("Can't set pre-processor after loading\n");
2502 		return -EINVAL;
2503 	}
2504 
2505 	instances_fds = malloc(sizeof(int) * nr_instances);
2506 	if (!instances_fds) {
2507 		pr_warning("alloc memory failed for fds\n");
2508 		return -ENOMEM;
2509 	}
2510 
2511 	/* fill all fd with -1 */
2512 	memset(instances_fds, -1, sizeof(int) * nr_instances);
2513 
2514 	prog->instances.nr = nr_instances;
2515 	prog->instances.fds = instances_fds;
2516 	prog->preprocessor = prep;
2517 	return 0;
2518 }
2519 
2520 int bpf_program__nth_fd(struct bpf_program *prog, int n)
2521 {
2522 	int fd;
2523 
2524 	if (!prog)
2525 		return -EINVAL;
2526 
2527 	if (n >= prog->instances.nr || n < 0) {
2528 		pr_warning("Can't get the %dth fd from program %s: only %d instances\n",
2529 			   n, prog->section_name, prog->instances.nr);
2530 		return -EINVAL;
2531 	}
2532 
2533 	fd = prog->instances.fds[n];
2534 	if (fd < 0) {
2535 		pr_warning("%dth instance of program '%s' is invalid\n",
2536 			   n, prog->section_name);
2537 		return -ENOENT;
2538 	}
2539 
2540 	return fd;
2541 }
2542 
2543 void bpf_program__set_type(struct bpf_program *prog, enum bpf_prog_type type)
2544 {
2545 	prog->type = type;
2546 }
2547 
2548 static bool bpf_program__is_type(struct bpf_program *prog,
2549 				 enum bpf_prog_type type)
2550 {
2551 	return prog ? (prog->type == type) : false;
2552 }
2553 
2554 #define BPF_PROG_TYPE_FNS(NAME, TYPE)			\
2555 int bpf_program__set_##NAME(struct bpf_program *prog)	\
2556 {							\
2557 	if (!prog)					\
2558 		return -EINVAL;				\
2559 	bpf_program__set_type(prog, TYPE);		\
2560 	return 0;					\
2561 }							\
2562 							\
2563 bool bpf_program__is_##NAME(struct bpf_program *prog)	\
2564 {							\
2565 	return bpf_program__is_type(prog, TYPE);	\
2566 }							\
2567 
2568 BPF_PROG_TYPE_FNS(socket_filter, BPF_PROG_TYPE_SOCKET_FILTER);
2569 BPF_PROG_TYPE_FNS(kprobe, BPF_PROG_TYPE_KPROBE);
2570 BPF_PROG_TYPE_FNS(sched_cls, BPF_PROG_TYPE_SCHED_CLS);
2571 BPF_PROG_TYPE_FNS(sched_act, BPF_PROG_TYPE_SCHED_ACT);
2572 BPF_PROG_TYPE_FNS(tracepoint, BPF_PROG_TYPE_TRACEPOINT);
2573 BPF_PROG_TYPE_FNS(raw_tracepoint, BPF_PROG_TYPE_RAW_TRACEPOINT);
2574 BPF_PROG_TYPE_FNS(xdp, BPF_PROG_TYPE_XDP);
2575 BPF_PROG_TYPE_FNS(perf_event, BPF_PROG_TYPE_PERF_EVENT);
2576 
2577 void bpf_program__set_expected_attach_type(struct bpf_program *prog,
2578 					   enum bpf_attach_type type)
2579 {
2580 	prog->expected_attach_type = type;
2581 }
2582 
2583 #define BPF_PROG_SEC_IMPL(string, ptype, eatype, is_attachable, atype) \
2584 	{ string, sizeof(string) - 1, ptype, eatype, is_attachable, atype }
2585 
2586 /* Programs that can NOT be attached. */
2587 #define BPF_PROG_SEC(string, ptype) BPF_PROG_SEC_IMPL(string, ptype, 0, 0, 0)
2588 
2589 /* Programs that can be attached. */
2590 #define BPF_APROG_SEC(string, ptype, atype) \
2591 	BPF_PROG_SEC_IMPL(string, ptype, 0, 1, atype)
2592 
2593 /* Programs that must specify expected attach type at load time. */
2594 #define BPF_EAPROG_SEC(string, ptype, eatype) \
2595 	BPF_PROG_SEC_IMPL(string, ptype, eatype, 1, eatype)
2596 
2597 /* Programs that can be attached but attach type can't be identified by section
2598  * name. Kept for backward compatibility.
2599  */
2600 #define BPF_APROG_COMPAT(string, ptype) BPF_PROG_SEC(string, ptype)
2601 
2602 static const struct {
2603 	const char *sec;
2604 	size_t len;
2605 	enum bpf_prog_type prog_type;
2606 	enum bpf_attach_type expected_attach_type;
2607 	int is_attachable;
2608 	enum bpf_attach_type attach_type;
2609 } section_names[] = {
2610 	BPF_PROG_SEC("socket",			BPF_PROG_TYPE_SOCKET_FILTER),
2611 	BPF_PROG_SEC("kprobe/",			BPF_PROG_TYPE_KPROBE),
2612 	BPF_PROG_SEC("kretprobe/",		BPF_PROG_TYPE_KPROBE),
2613 	BPF_PROG_SEC("classifier",		BPF_PROG_TYPE_SCHED_CLS),
2614 	BPF_PROG_SEC("action",			BPF_PROG_TYPE_SCHED_ACT),
2615 	BPF_PROG_SEC("tracepoint/",		BPF_PROG_TYPE_TRACEPOINT),
2616 	BPF_PROG_SEC("raw_tracepoint/",		BPF_PROG_TYPE_RAW_TRACEPOINT),
2617 	BPF_PROG_SEC("xdp",			BPF_PROG_TYPE_XDP),
2618 	BPF_PROG_SEC("perf_event",		BPF_PROG_TYPE_PERF_EVENT),
2619 	BPF_PROG_SEC("lwt_in",			BPF_PROG_TYPE_LWT_IN),
2620 	BPF_PROG_SEC("lwt_out",			BPF_PROG_TYPE_LWT_OUT),
2621 	BPF_PROG_SEC("lwt_xmit",		BPF_PROG_TYPE_LWT_XMIT),
2622 	BPF_PROG_SEC("lwt_seg6local",		BPF_PROG_TYPE_LWT_SEG6LOCAL),
2623 	BPF_APROG_SEC("cgroup_skb/ingress",	BPF_PROG_TYPE_CGROUP_SKB,
2624 						BPF_CGROUP_INET_INGRESS),
2625 	BPF_APROG_SEC("cgroup_skb/egress",	BPF_PROG_TYPE_CGROUP_SKB,
2626 						BPF_CGROUP_INET_EGRESS),
2627 	BPF_APROG_COMPAT("cgroup/skb",		BPF_PROG_TYPE_CGROUP_SKB),
2628 	BPF_APROG_SEC("cgroup/sock",		BPF_PROG_TYPE_CGROUP_SOCK,
2629 						BPF_CGROUP_INET_SOCK_CREATE),
2630 	BPF_EAPROG_SEC("cgroup/post_bind4",	BPF_PROG_TYPE_CGROUP_SOCK,
2631 						BPF_CGROUP_INET4_POST_BIND),
2632 	BPF_EAPROG_SEC("cgroup/post_bind6",	BPF_PROG_TYPE_CGROUP_SOCK,
2633 						BPF_CGROUP_INET6_POST_BIND),
2634 	BPF_APROG_SEC("cgroup/dev",		BPF_PROG_TYPE_CGROUP_DEVICE,
2635 						BPF_CGROUP_DEVICE),
2636 	BPF_APROG_SEC("sockops",		BPF_PROG_TYPE_SOCK_OPS,
2637 						BPF_CGROUP_SOCK_OPS),
2638 	BPF_APROG_SEC("sk_skb/stream_parser",	BPF_PROG_TYPE_SK_SKB,
2639 						BPF_SK_SKB_STREAM_PARSER),
2640 	BPF_APROG_SEC("sk_skb/stream_verdict",	BPF_PROG_TYPE_SK_SKB,
2641 						BPF_SK_SKB_STREAM_VERDICT),
2642 	BPF_APROG_COMPAT("sk_skb",		BPF_PROG_TYPE_SK_SKB),
2643 	BPF_APROG_SEC("sk_msg",			BPF_PROG_TYPE_SK_MSG,
2644 						BPF_SK_MSG_VERDICT),
2645 	BPF_APROG_SEC("lirc_mode2",		BPF_PROG_TYPE_LIRC_MODE2,
2646 						BPF_LIRC_MODE2),
2647 	BPF_APROG_SEC("flow_dissector",		BPF_PROG_TYPE_FLOW_DISSECTOR,
2648 						BPF_FLOW_DISSECTOR),
2649 	BPF_EAPROG_SEC("cgroup/bind4",		BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
2650 						BPF_CGROUP_INET4_BIND),
2651 	BPF_EAPROG_SEC("cgroup/bind6",		BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
2652 						BPF_CGROUP_INET6_BIND),
2653 	BPF_EAPROG_SEC("cgroup/connect4",	BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
2654 						BPF_CGROUP_INET4_CONNECT),
2655 	BPF_EAPROG_SEC("cgroup/connect6",	BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
2656 						BPF_CGROUP_INET6_CONNECT),
2657 	BPF_EAPROG_SEC("cgroup/sendmsg4",	BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
2658 						BPF_CGROUP_UDP4_SENDMSG),
2659 	BPF_EAPROG_SEC("cgroup/sendmsg6",	BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
2660 						BPF_CGROUP_UDP6_SENDMSG),
2661 };
2662 
2663 #undef BPF_PROG_SEC_IMPL
2664 #undef BPF_PROG_SEC
2665 #undef BPF_APROG_SEC
2666 #undef BPF_EAPROG_SEC
2667 #undef BPF_APROG_COMPAT
2668 
2669 int libbpf_prog_type_by_name(const char *name, enum bpf_prog_type *prog_type,
2670 			     enum bpf_attach_type *expected_attach_type)
2671 {
2672 	int i;
2673 
2674 	if (!name)
2675 		return -EINVAL;
2676 
2677 	for (i = 0; i < ARRAY_SIZE(section_names); i++) {
2678 		if (strncmp(name, section_names[i].sec, section_names[i].len))
2679 			continue;
2680 		*prog_type = section_names[i].prog_type;
2681 		*expected_attach_type = section_names[i].expected_attach_type;
2682 		return 0;
2683 	}
2684 	return -EINVAL;
2685 }
2686 
2687 int libbpf_attach_type_by_name(const char *name,
2688 			       enum bpf_attach_type *attach_type)
2689 {
2690 	int i;
2691 
2692 	if (!name)
2693 		return -EINVAL;
2694 
2695 	for (i = 0; i < ARRAY_SIZE(section_names); i++) {
2696 		if (strncmp(name, section_names[i].sec, section_names[i].len))
2697 			continue;
2698 		if (!section_names[i].is_attachable)
2699 			return -EINVAL;
2700 		*attach_type = section_names[i].attach_type;
2701 		return 0;
2702 	}
2703 	return -EINVAL;
2704 }
2705 
2706 static int
2707 bpf_program__identify_section(struct bpf_program *prog,
2708 			      enum bpf_prog_type *prog_type,
2709 			      enum bpf_attach_type *expected_attach_type)
2710 {
2711 	return libbpf_prog_type_by_name(prog->section_name, prog_type,
2712 					expected_attach_type);
2713 }
2714 
2715 int bpf_map__fd(struct bpf_map *map)
2716 {
2717 	return map ? map->fd : -EINVAL;
2718 }
2719 
2720 const struct bpf_map_def *bpf_map__def(struct bpf_map *map)
2721 {
2722 	return map ? &map->def : ERR_PTR(-EINVAL);
2723 }
2724 
2725 const char *bpf_map__name(struct bpf_map *map)
2726 {
2727 	return map ? map->name : NULL;
2728 }
2729 
2730 __u32 bpf_map__btf_key_type_id(const struct bpf_map *map)
2731 {
2732 	return map ? map->btf_key_type_id : 0;
2733 }
2734 
2735 __u32 bpf_map__btf_value_type_id(const struct bpf_map *map)
2736 {
2737 	return map ? map->btf_value_type_id : 0;
2738 }
2739 
2740 int bpf_map__set_priv(struct bpf_map *map, void *priv,
2741 		     bpf_map_clear_priv_t clear_priv)
2742 {
2743 	if (!map)
2744 		return -EINVAL;
2745 
2746 	if (map->priv) {
2747 		if (map->clear_priv)
2748 			map->clear_priv(map, map->priv);
2749 	}
2750 
2751 	map->priv = priv;
2752 	map->clear_priv = clear_priv;
2753 	return 0;
2754 }
2755 
2756 void *bpf_map__priv(struct bpf_map *map)
2757 {
2758 	return map ? map->priv : ERR_PTR(-EINVAL);
2759 }
2760 
2761 bool bpf_map__is_offload_neutral(struct bpf_map *map)
2762 {
2763 	return map->def.type == BPF_MAP_TYPE_PERF_EVENT_ARRAY;
2764 }
2765 
2766 void bpf_map__set_ifindex(struct bpf_map *map, __u32 ifindex)
2767 {
2768 	map->map_ifindex = ifindex;
2769 }
2770 
2771 int bpf_map__set_inner_map_fd(struct bpf_map *map, int fd)
2772 {
2773 	if (!bpf_map_type__is_map_in_map(map->def.type)) {
2774 		pr_warning("error: unsupported map type\n");
2775 		return -EINVAL;
2776 	}
2777 	if (map->inner_map_fd != -1) {
2778 		pr_warning("error: inner_map_fd already specified\n");
2779 		return -EINVAL;
2780 	}
2781 	map->inner_map_fd = fd;
2782 	return 0;
2783 }
2784 
2785 static struct bpf_map *
2786 __bpf_map__iter(struct bpf_map *m, struct bpf_object *obj, int i)
2787 {
2788 	ssize_t idx;
2789 	struct bpf_map *s, *e;
2790 
2791 	if (!obj || !obj->maps)
2792 		return NULL;
2793 
2794 	s = obj->maps;
2795 	e = obj->maps + obj->nr_maps;
2796 
2797 	if ((m < s) || (m >= e)) {
2798 		pr_warning("error in %s: map handler doesn't belong to object\n",
2799 			   __func__);
2800 		return NULL;
2801 	}
2802 
2803 	idx = (m - obj->maps) + i;
2804 	if (idx >= obj->nr_maps || idx < 0)
2805 		return NULL;
2806 	return &obj->maps[idx];
2807 }
2808 
2809 struct bpf_map *
2810 bpf_map__next(struct bpf_map *prev, struct bpf_object *obj)
2811 {
2812 	if (prev == NULL)
2813 		return obj->maps;
2814 
2815 	return __bpf_map__iter(prev, obj, 1);
2816 }
2817 
2818 struct bpf_map *
2819 bpf_map__prev(struct bpf_map *next, struct bpf_object *obj)
2820 {
2821 	if (next == NULL) {
2822 		if (!obj->nr_maps)
2823 			return NULL;
2824 		return obj->maps + obj->nr_maps - 1;
2825 	}
2826 
2827 	return __bpf_map__iter(next, obj, -1);
2828 }
2829 
2830 struct bpf_map *
2831 bpf_object__find_map_by_name(struct bpf_object *obj, const char *name)
2832 {
2833 	struct bpf_map *pos;
2834 
2835 	bpf_map__for_each(pos, obj) {
2836 		if (pos->name && !strcmp(pos->name, name))
2837 			return pos;
2838 	}
2839 	return NULL;
2840 }
2841 
2842 struct bpf_map *
2843 bpf_object__find_map_by_offset(struct bpf_object *obj, size_t offset)
2844 {
2845 	int i;
2846 
2847 	for (i = 0; i < obj->nr_maps; i++) {
2848 		if (obj->maps[i].offset == offset)
2849 			return &obj->maps[i];
2850 	}
2851 	return ERR_PTR(-ENOENT);
2852 }
2853 
2854 long libbpf_get_error(const void *ptr)
2855 {
2856 	if (IS_ERR(ptr))
2857 		return PTR_ERR(ptr);
2858 	return 0;
2859 }
2860 
2861 int bpf_prog_load(const char *file, enum bpf_prog_type type,
2862 		  struct bpf_object **pobj, int *prog_fd)
2863 {
2864 	struct bpf_prog_load_attr attr;
2865 
2866 	memset(&attr, 0, sizeof(struct bpf_prog_load_attr));
2867 	attr.file = file;
2868 	attr.prog_type = type;
2869 	attr.expected_attach_type = 0;
2870 
2871 	return bpf_prog_load_xattr(&attr, pobj, prog_fd);
2872 }
2873 
2874 int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr,
2875 			struct bpf_object **pobj, int *prog_fd)
2876 {
2877 	struct bpf_object_open_attr open_attr = {
2878 		.file		= attr->file,
2879 		.prog_type	= attr->prog_type,
2880 	};
2881 	struct bpf_program *prog, *first_prog = NULL;
2882 	enum bpf_attach_type expected_attach_type;
2883 	enum bpf_prog_type prog_type;
2884 	struct bpf_object *obj;
2885 	struct bpf_map *map;
2886 	int err;
2887 
2888 	if (!attr)
2889 		return -EINVAL;
2890 	if (!attr->file)
2891 		return -EINVAL;
2892 
2893 	obj = bpf_object__open_xattr(&open_attr);
2894 	if (IS_ERR_OR_NULL(obj))
2895 		return -ENOENT;
2896 
2897 	bpf_object__for_each_program(prog, obj) {
2898 		/*
2899 		 * If type is not specified, try to guess it based on
2900 		 * section name.
2901 		 */
2902 		prog_type = attr->prog_type;
2903 		prog->prog_ifindex = attr->ifindex;
2904 		expected_attach_type = attr->expected_attach_type;
2905 		if (prog_type == BPF_PROG_TYPE_UNSPEC) {
2906 			err = bpf_program__identify_section(prog, &prog_type,
2907 							    &expected_attach_type);
2908 			if (err < 0) {
2909 				pr_warning("failed to guess program type based on section name %s\n",
2910 					   prog->section_name);
2911 				bpf_object__close(obj);
2912 				return -EINVAL;
2913 			}
2914 		}
2915 
2916 		bpf_program__set_type(prog, prog_type);
2917 		bpf_program__set_expected_attach_type(prog,
2918 						      expected_attach_type);
2919 
2920 		if (!first_prog)
2921 			first_prog = prog;
2922 	}
2923 
2924 	bpf_map__for_each(map, obj) {
2925 		if (!bpf_map__is_offload_neutral(map))
2926 			map->map_ifindex = attr->ifindex;
2927 	}
2928 
2929 	if (!first_prog) {
2930 		pr_warning("object file doesn't contain bpf program\n");
2931 		bpf_object__close(obj);
2932 		return -ENOENT;
2933 	}
2934 
2935 	err = bpf_object__load(obj);
2936 	if (err) {
2937 		bpf_object__close(obj);
2938 		return -EINVAL;
2939 	}
2940 
2941 	*pobj = obj;
2942 	*prog_fd = bpf_program__fd(first_prog);
2943 	return 0;
2944 }
2945 
2946 enum bpf_perf_event_ret
2947 bpf_perf_event_read_simple(void *mmap_mem, size_t mmap_size, size_t page_size,
2948 			   void **copy_mem, size_t *copy_size,
2949 			   bpf_perf_event_print_t fn, void *private_data)
2950 {
2951 	struct perf_event_mmap_page *header = mmap_mem;
2952 	__u64 data_head = ring_buffer_read_head(header);
2953 	__u64 data_tail = header->data_tail;
2954 	void *base = ((__u8 *)header) + page_size;
2955 	int ret = LIBBPF_PERF_EVENT_CONT;
2956 	struct perf_event_header *ehdr;
2957 	size_t ehdr_size;
2958 
2959 	while (data_head != data_tail) {
2960 		ehdr = base + (data_tail & (mmap_size - 1));
2961 		ehdr_size = ehdr->size;
2962 
2963 		if (((void *)ehdr) + ehdr_size > base + mmap_size) {
2964 			void *copy_start = ehdr;
2965 			size_t len_first = base + mmap_size - copy_start;
2966 			size_t len_secnd = ehdr_size - len_first;
2967 
2968 			if (*copy_size < ehdr_size) {
2969 				free(*copy_mem);
2970 				*copy_mem = malloc(ehdr_size);
2971 				if (!*copy_mem) {
2972 					*copy_size = 0;
2973 					ret = LIBBPF_PERF_EVENT_ERROR;
2974 					break;
2975 				}
2976 				*copy_size = ehdr_size;
2977 			}
2978 
2979 			memcpy(*copy_mem, copy_start, len_first);
2980 			memcpy(*copy_mem + len_first, base, len_secnd);
2981 			ehdr = *copy_mem;
2982 		}
2983 
2984 		ret = fn(ehdr, private_data);
2985 		data_tail += ehdr_size;
2986 		if (ret != LIBBPF_PERF_EVENT_CONT)
2987 			break;
2988 	}
2989 
2990 	ring_buffer_write_tail(header, data_tail);
2991 	return ret;
2992 }
2993