xref: /linux/tools/lib/bpf/libbpf.c (revision a5d9265e017f081f0dc133c0e2f45103d027b874)
1 // SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
2 
3 /*
4  * Common eBPF ELF object loading operations.
5  *
6  * Copyright (C) 2013-2015 Alexei Starovoitov <ast@kernel.org>
7  * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com>
8  * Copyright (C) 2015 Huawei Inc.
9  * Copyright (C) 2017 Nicira, Inc.
10  */
11 
12 #ifndef _GNU_SOURCE
13 #define _GNU_SOURCE
14 #endif
15 #include <stdlib.h>
16 #include <stdio.h>
17 #include <stdarg.h>
18 #include <libgen.h>
19 #include <inttypes.h>
20 #include <string.h>
21 #include <unistd.h>
22 #include <fcntl.h>
23 #include <errno.h>
24 #include <asm/unistd.h>
25 #include <linux/err.h>
26 #include <linux/kernel.h>
27 #include <linux/bpf.h>
28 #include <linux/btf.h>
29 #include <linux/filter.h>
30 #include <linux/list.h>
31 #include <linux/limits.h>
32 #include <linux/perf_event.h>
33 #include <linux/ring_buffer.h>
34 #include <sys/stat.h>
35 #include <sys/types.h>
36 #include <sys/vfs.h>
37 #include <tools/libc_compat.h>
38 #include <libelf.h>
39 #include <gelf.h>
40 
41 #include "libbpf.h"
42 #include "bpf.h"
43 #include "btf.h"
44 #include "str_error.h"
45 #include "libbpf_util.h"
46 
47 #ifndef EM_BPF
48 #define EM_BPF 247
49 #endif
50 
51 #ifndef BPF_FS_MAGIC
52 #define BPF_FS_MAGIC		0xcafe4a11
53 #endif
54 
55 #define __printf(a, b)	__attribute__((format(printf, a, b)))
56 
57 static int __base_pr(enum libbpf_print_level level, const char *format,
58 		     va_list args)
59 {
60 	if (level == LIBBPF_DEBUG)
61 		return 0;
62 
63 	return vfprintf(stderr, format, args);
64 }
65 
66 static libbpf_print_fn_t __libbpf_pr = __base_pr;
67 
68 void libbpf_set_print(libbpf_print_fn_t fn)
69 {
70 	__libbpf_pr = fn;
71 }
72 
73 __printf(2, 3)
74 void libbpf_print(enum libbpf_print_level level, const char *format, ...)
75 {
76 	va_list args;
77 
78 	if (!__libbpf_pr)
79 		return;
80 
81 	va_start(args, format);
82 	__libbpf_pr(level, format, args);
83 	va_end(args);
84 }
85 
86 #define STRERR_BUFSIZE  128
87 
88 #define CHECK_ERR(action, err, out) do {	\
89 	err = action;			\
90 	if (err)			\
91 		goto out;		\
92 } while(0)
93 
94 
95 /* Copied from tools/perf/util/util.h */
96 #ifndef zfree
97 # define zfree(ptr) ({ free(*ptr); *ptr = NULL; })
98 #endif
99 
100 #ifndef zclose
101 # define zclose(fd) ({			\
102 	int ___err = 0;			\
103 	if ((fd) >= 0)			\
104 		___err = close((fd));	\
105 	fd = -1;			\
106 	___err; })
107 #endif
108 
109 #ifdef HAVE_LIBELF_MMAP_SUPPORT
110 # define LIBBPF_ELF_C_READ_MMAP ELF_C_READ_MMAP
111 #else
112 # define LIBBPF_ELF_C_READ_MMAP ELF_C_READ
113 #endif
114 
115 struct bpf_capabilities {
116 	/* v4.14: kernel support for program & map names. */
117 	__u32 name:1;
118 };
119 
120 /*
121  * bpf_prog should be a better name but it has been used in
122  * linux/filter.h.
123  */
124 struct bpf_program {
125 	/* Index in elf obj file, for relocation use. */
126 	int idx;
127 	char *name;
128 	int prog_ifindex;
129 	char *section_name;
130 	/* section_name with / replaced by _; makes recursive pinning
131 	 * in bpf_object__pin_programs easier
132 	 */
133 	char *pin_name;
134 	struct bpf_insn *insns;
135 	size_t insns_cnt, main_prog_cnt;
136 	enum bpf_prog_type type;
137 
138 	struct reloc_desc {
139 		enum {
140 			RELO_LD64,
141 			RELO_CALL,
142 		} type;
143 		int insn_idx;
144 		union {
145 			int map_idx;
146 			int text_off;
147 		};
148 	} *reloc_desc;
149 	int nr_reloc;
150 
151 	struct {
152 		int nr;
153 		int *fds;
154 	} instances;
155 	bpf_program_prep_t preprocessor;
156 
157 	struct bpf_object *obj;
158 	void *priv;
159 	bpf_program_clear_priv_t clear_priv;
160 
161 	enum bpf_attach_type expected_attach_type;
162 	int btf_fd;
163 	void *func_info;
164 	__u32 func_info_rec_size;
165 	__u32 func_info_cnt;
166 
167 	struct bpf_capabilities *caps;
168 
169 	void *line_info;
170 	__u32 line_info_rec_size;
171 	__u32 line_info_cnt;
172 };
173 
174 struct bpf_map {
175 	int fd;
176 	char *name;
177 	size_t offset;
178 	int map_ifindex;
179 	int inner_map_fd;
180 	struct bpf_map_def def;
181 	__u32 btf_key_type_id;
182 	__u32 btf_value_type_id;
183 	void *priv;
184 	bpf_map_clear_priv_t clear_priv;
185 };
186 
187 static LIST_HEAD(bpf_objects_list);
188 
189 struct bpf_object {
190 	char license[64];
191 	__u32 kern_version;
192 
193 	struct bpf_program *programs;
194 	size_t nr_programs;
195 	struct bpf_map *maps;
196 	size_t nr_maps;
197 
198 	bool loaded;
199 	bool has_pseudo_calls;
200 
201 	/*
202 	 * Information when doing elf related work. Only valid if fd
203 	 * is valid.
204 	 */
205 	struct {
206 		int fd;
207 		void *obj_buf;
208 		size_t obj_buf_sz;
209 		Elf *elf;
210 		GElf_Ehdr ehdr;
211 		Elf_Data *symbols;
212 		size_t strtabidx;
213 		struct {
214 			GElf_Shdr shdr;
215 			Elf_Data *data;
216 		} *reloc;
217 		int nr_reloc;
218 		int maps_shndx;
219 		int text_shndx;
220 	} efile;
221 	/*
222 	 * All loaded bpf_object is linked in a list, which is
223 	 * hidden to caller. bpf_objects__<func> handlers deal with
224 	 * all objects.
225 	 */
226 	struct list_head list;
227 
228 	struct btf *btf;
229 	struct btf_ext *btf_ext;
230 
231 	void *priv;
232 	bpf_object_clear_priv_t clear_priv;
233 
234 	struct bpf_capabilities caps;
235 
236 	char path[];
237 };
238 #define obj_elf_valid(o)	((o)->efile.elf)
239 
240 void bpf_program__unload(struct bpf_program *prog)
241 {
242 	int i;
243 
244 	if (!prog)
245 		return;
246 
247 	/*
248 	 * If the object is opened but the program was never loaded,
249 	 * it is possible that prog->instances.nr == -1.
250 	 */
251 	if (prog->instances.nr > 0) {
252 		for (i = 0; i < prog->instances.nr; i++)
253 			zclose(prog->instances.fds[i]);
254 	} else if (prog->instances.nr != -1) {
255 		pr_warning("Internal error: instances.nr is %d\n",
256 			   prog->instances.nr);
257 	}
258 
259 	prog->instances.nr = -1;
260 	zfree(&prog->instances.fds);
261 
262 	zclose(prog->btf_fd);
263 	zfree(&prog->func_info);
264 	zfree(&prog->line_info);
265 }
266 
267 static void bpf_program__exit(struct bpf_program *prog)
268 {
269 	if (!prog)
270 		return;
271 
272 	if (prog->clear_priv)
273 		prog->clear_priv(prog, prog->priv);
274 
275 	prog->priv = NULL;
276 	prog->clear_priv = NULL;
277 
278 	bpf_program__unload(prog);
279 	zfree(&prog->name);
280 	zfree(&prog->section_name);
281 	zfree(&prog->pin_name);
282 	zfree(&prog->insns);
283 	zfree(&prog->reloc_desc);
284 
285 	prog->nr_reloc = 0;
286 	prog->insns_cnt = 0;
287 	prog->idx = -1;
288 }
289 
290 static char *__bpf_program__pin_name(struct bpf_program *prog)
291 {
292 	char *name, *p;
293 
294 	name = p = strdup(prog->section_name);
295 	while ((p = strchr(p, '/')))
296 		*p = '_';
297 
298 	return name;
299 }
300 
301 static int
302 bpf_program__init(void *data, size_t size, char *section_name, int idx,
303 		  struct bpf_program *prog)
304 {
305 	if (size < sizeof(struct bpf_insn)) {
306 		pr_warning("corrupted section '%s'\n", section_name);
307 		return -EINVAL;
308 	}
309 
310 	memset(prog, 0, sizeof(*prog));
311 
312 	prog->section_name = strdup(section_name);
313 	if (!prog->section_name) {
314 		pr_warning("failed to alloc name for prog under section(%d) %s\n",
315 			   idx, section_name);
316 		goto errout;
317 	}
318 
319 	prog->pin_name = __bpf_program__pin_name(prog);
320 	if (!prog->pin_name) {
321 		pr_warning("failed to alloc pin name for prog under section(%d) %s\n",
322 			   idx, section_name);
323 		goto errout;
324 	}
325 
326 	prog->insns = malloc(size);
327 	if (!prog->insns) {
328 		pr_warning("failed to alloc insns for prog under section %s\n",
329 			   section_name);
330 		goto errout;
331 	}
332 	prog->insns_cnt = size / sizeof(struct bpf_insn);
333 	memcpy(prog->insns, data,
334 	       prog->insns_cnt * sizeof(struct bpf_insn));
335 	prog->idx = idx;
336 	prog->instances.fds = NULL;
337 	prog->instances.nr = -1;
338 	prog->type = BPF_PROG_TYPE_UNSPEC;
339 	prog->btf_fd = -1;
340 
341 	return 0;
342 errout:
343 	bpf_program__exit(prog);
344 	return -ENOMEM;
345 }
346 
347 static int
348 bpf_object__add_program(struct bpf_object *obj, void *data, size_t size,
349 			char *section_name, int idx)
350 {
351 	struct bpf_program prog, *progs;
352 	int nr_progs, err;
353 
354 	err = bpf_program__init(data, size, section_name, idx, &prog);
355 	if (err)
356 		return err;
357 
358 	prog.caps = &obj->caps;
359 	progs = obj->programs;
360 	nr_progs = obj->nr_programs;
361 
362 	progs = reallocarray(progs, nr_progs + 1, sizeof(progs[0]));
363 	if (!progs) {
364 		/*
365 		 * In this case the original obj->programs
366 		 * is still valid, so don't need special treat for
367 		 * bpf_close_object().
368 		 */
369 		pr_warning("failed to alloc a new program under section '%s'\n",
370 			   section_name);
371 		bpf_program__exit(&prog);
372 		return -ENOMEM;
373 	}
374 
375 	pr_debug("found program %s\n", prog.section_name);
376 	obj->programs = progs;
377 	obj->nr_programs = nr_progs + 1;
378 	prog.obj = obj;
379 	progs[nr_progs] = prog;
380 	return 0;
381 }
382 
383 static int
384 bpf_object__init_prog_names(struct bpf_object *obj)
385 {
386 	Elf_Data *symbols = obj->efile.symbols;
387 	struct bpf_program *prog;
388 	size_t pi, si;
389 
390 	for (pi = 0; pi < obj->nr_programs; pi++) {
391 		const char *name = NULL;
392 
393 		prog = &obj->programs[pi];
394 
395 		for (si = 0; si < symbols->d_size / sizeof(GElf_Sym) && !name;
396 		     si++) {
397 			GElf_Sym sym;
398 
399 			if (!gelf_getsym(symbols, si, &sym))
400 				continue;
401 			if (sym.st_shndx != prog->idx)
402 				continue;
403 			if (GELF_ST_BIND(sym.st_info) != STB_GLOBAL)
404 				continue;
405 
406 			name = elf_strptr(obj->efile.elf,
407 					  obj->efile.strtabidx,
408 					  sym.st_name);
409 			if (!name) {
410 				pr_warning("failed to get sym name string for prog %s\n",
411 					   prog->section_name);
412 				return -LIBBPF_ERRNO__LIBELF;
413 			}
414 		}
415 
416 		if (!name && prog->idx == obj->efile.text_shndx)
417 			name = ".text";
418 
419 		if (!name) {
420 			pr_warning("failed to find sym for prog %s\n",
421 				   prog->section_name);
422 			return -EINVAL;
423 		}
424 
425 		prog->name = strdup(name);
426 		if (!prog->name) {
427 			pr_warning("failed to allocate memory for prog sym %s\n",
428 				   name);
429 			return -ENOMEM;
430 		}
431 	}
432 
433 	return 0;
434 }
435 
436 static struct bpf_object *bpf_object__new(const char *path,
437 					  void *obj_buf,
438 					  size_t obj_buf_sz)
439 {
440 	struct bpf_object *obj;
441 
442 	obj = calloc(1, sizeof(struct bpf_object) + strlen(path) + 1);
443 	if (!obj) {
444 		pr_warning("alloc memory failed for %s\n", path);
445 		return ERR_PTR(-ENOMEM);
446 	}
447 
448 	strcpy(obj->path, path);
449 	obj->efile.fd = -1;
450 
451 	/*
452 	 * Caller of this function should also calls
453 	 * bpf_object__elf_finish() after data collection to return
454 	 * obj_buf to user. If not, we should duplicate the buffer to
455 	 * avoid user freeing them before elf finish.
456 	 */
457 	obj->efile.obj_buf = obj_buf;
458 	obj->efile.obj_buf_sz = obj_buf_sz;
459 	obj->efile.maps_shndx = -1;
460 
461 	obj->loaded = false;
462 
463 	INIT_LIST_HEAD(&obj->list);
464 	list_add(&obj->list, &bpf_objects_list);
465 	return obj;
466 }
467 
468 static void bpf_object__elf_finish(struct bpf_object *obj)
469 {
470 	if (!obj_elf_valid(obj))
471 		return;
472 
473 	if (obj->efile.elf) {
474 		elf_end(obj->efile.elf);
475 		obj->efile.elf = NULL;
476 	}
477 	obj->efile.symbols = NULL;
478 
479 	zfree(&obj->efile.reloc);
480 	obj->efile.nr_reloc = 0;
481 	zclose(obj->efile.fd);
482 	obj->efile.obj_buf = NULL;
483 	obj->efile.obj_buf_sz = 0;
484 }
485 
486 static int bpf_object__elf_init(struct bpf_object *obj)
487 {
488 	int err = 0;
489 	GElf_Ehdr *ep;
490 
491 	if (obj_elf_valid(obj)) {
492 		pr_warning("elf init: internal error\n");
493 		return -LIBBPF_ERRNO__LIBELF;
494 	}
495 
496 	if (obj->efile.obj_buf_sz > 0) {
497 		/*
498 		 * obj_buf should have been validated by
499 		 * bpf_object__open_buffer().
500 		 */
501 		obj->efile.elf = elf_memory(obj->efile.obj_buf,
502 					    obj->efile.obj_buf_sz);
503 	} else {
504 		obj->efile.fd = open(obj->path, O_RDONLY);
505 		if (obj->efile.fd < 0) {
506 			char errmsg[STRERR_BUFSIZE];
507 			char *cp = libbpf_strerror_r(errno, errmsg,
508 						     sizeof(errmsg));
509 
510 			pr_warning("failed to open %s: %s\n", obj->path, cp);
511 			return -errno;
512 		}
513 
514 		obj->efile.elf = elf_begin(obj->efile.fd,
515 				LIBBPF_ELF_C_READ_MMAP,
516 				NULL);
517 	}
518 
519 	if (!obj->efile.elf) {
520 		pr_warning("failed to open %s as ELF file\n",
521 				obj->path);
522 		err = -LIBBPF_ERRNO__LIBELF;
523 		goto errout;
524 	}
525 
526 	if (!gelf_getehdr(obj->efile.elf, &obj->efile.ehdr)) {
527 		pr_warning("failed to get EHDR from %s\n",
528 				obj->path);
529 		err = -LIBBPF_ERRNO__FORMAT;
530 		goto errout;
531 	}
532 	ep = &obj->efile.ehdr;
533 
534 	/* Old LLVM set e_machine to EM_NONE */
535 	if ((ep->e_type != ET_REL) || (ep->e_machine && (ep->e_machine != EM_BPF))) {
536 		pr_warning("%s is not an eBPF object file\n",
537 			obj->path);
538 		err = -LIBBPF_ERRNO__FORMAT;
539 		goto errout;
540 	}
541 
542 	return 0;
543 errout:
544 	bpf_object__elf_finish(obj);
545 	return err;
546 }
547 
548 static int
549 bpf_object__check_endianness(struct bpf_object *obj)
550 {
551 	static unsigned int const endian = 1;
552 
553 	switch (obj->efile.ehdr.e_ident[EI_DATA]) {
554 	case ELFDATA2LSB:
555 		/* We are big endian, BPF obj is little endian. */
556 		if (*(unsigned char const *)&endian != 1)
557 			goto mismatch;
558 		break;
559 
560 	case ELFDATA2MSB:
561 		/* We are little endian, BPF obj is big endian. */
562 		if (*(unsigned char const *)&endian != 0)
563 			goto mismatch;
564 		break;
565 	default:
566 		return -LIBBPF_ERRNO__ENDIAN;
567 	}
568 
569 	return 0;
570 
571 mismatch:
572 	pr_warning("Error: endianness mismatch.\n");
573 	return -LIBBPF_ERRNO__ENDIAN;
574 }
575 
576 static int
577 bpf_object__init_license(struct bpf_object *obj,
578 			 void *data, size_t size)
579 {
580 	memcpy(obj->license, data,
581 	       min(size, sizeof(obj->license) - 1));
582 	pr_debug("license of %s is %s\n", obj->path, obj->license);
583 	return 0;
584 }
585 
586 static int
587 bpf_object__init_kversion(struct bpf_object *obj,
588 			  void *data, size_t size)
589 {
590 	__u32 kver;
591 
592 	if (size != sizeof(kver)) {
593 		pr_warning("invalid kver section in %s\n", obj->path);
594 		return -LIBBPF_ERRNO__FORMAT;
595 	}
596 	memcpy(&kver, data, sizeof(kver));
597 	obj->kern_version = kver;
598 	pr_debug("kernel version of %s is %x\n", obj->path,
599 		 obj->kern_version);
600 	return 0;
601 }
602 
603 static int compare_bpf_map(const void *_a, const void *_b)
604 {
605 	const struct bpf_map *a = _a;
606 	const struct bpf_map *b = _b;
607 
608 	return a->offset - b->offset;
609 }
610 
611 static bool bpf_map_type__is_map_in_map(enum bpf_map_type type)
612 {
613 	if (type == BPF_MAP_TYPE_ARRAY_OF_MAPS ||
614 	    type == BPF_MAP_TYPE_HASH_OF_MAPS)
615 		return true;
616 	return false;
617 }
618 
619 static int
620 bpf_object__init_maps(struct bpf_object *obj, int flags)
621 {
622 	bool strict = !(flags & MAPS_RELAX_COMPAT);
623 	int i, map_idx, map_def_sz, nr_maps = 0;
624 	Elf_Scn *scn;
625 	Elf_Data *data;
626 	Elf_Data *symbols = obj->efile.symbols;
627 
628 	if (obj->efile.maps_shndx < 0)
629 		return -EINVAL;
630 	if (!symbols)
631 		return -EINVAL;
632 
633 	scn = elf_getscn(obj->efile.elf, obj->efile.maps_shndx);
634 	if (scn)
635 		data = elf_getdata(scn, NULL);
636 	if (!scn || !data) {
637 		pr_warning("failed to get Elf_Data from map section %d\n",
638 			   obj->efile.maps_shndx);
639 		return -EINVAL;
640 	}
641 
642 	/*
643 	 * Count number of maps. Each map has a name.
644 	 * Array of maps is not supported: only the first element is
645 	 * considered.
646 	 *
647 	 * TODO: Detect array of map and report error.
648 	 */
649 	for (i = 0; i < symbols->d_size / sizeof(GElf_Sym); i++) {
650 		GElf_Sym sym;
651 
652 		if (!gelf_getsym(symbols, i, &sym))
653 			continue;
654 		if (sym.st_shndx != obj->efile.maps_shndx)
655 			continue;
656 		nr_maps++;
657 	}
658 
659 	/* Alloc obj->maps and fill nr_maps. */
660 	pr_debug("maps in %s: %d maps in %zd bytes\n", obj->path,
661 		 nr_maps, data->d_size);
662 
663 	if (!nr_maps)
664 		return 0;
665 
666 	/* Assume equally sized map definitions */
667 	map_def_sz = data->d_size / nr_maps;
668 	if (!data->d_size || (data->d_size % nr_maps) != 0) {
669 		pr_warning("unable to determine map definition size "
670 			   "section %s, %d maps in %zd bytes\n",
671 			   obj->path, nr_maps, data->d_size);
672 		return -EINVAL;
673 	}
674 
675 	obj->maps = calloc(nr_maps, sizeof(obj->maps[0]));
676 	if (!obj->maps) {
677 		pr_warning("alloc maps for object failed\n");
678 		return -ENOMEM;
679 	}
680 	obj->nr_maps = nr_maps;
681 
682 	for (i = 0; i < nr_maps; i++) {
683 		/*
684 		 * fill all fd with -1 so won't close incorrect
685 		 * fd (fd=0 is stdin) when failure (zclose won't close
686 		 * negative fd)).
687 		 */
688 		obj->maps[i].fd = -1;
689 		obj->maps[i].inner_map_fd = -1;
690 	}
691 
692 	/*
693 	 * Fill obj->maps using data in "maps" section.
694 	 */
695 	for (i = 0, map_idx = 0; i < symbols->d_size / sizeof(GElf_Sym); i++) {
696 		GElf_Sym sym;
697 		const char *map_name;
698 		struct bpf_map_def *def;
699 
700 		if (!gelf_getsym(symbols, i, &sym))
701 			continue;
702 		if (sym.st_shndx != obj->efile.maps_shndx)
703 			continue;
704 
705 		map_name = elf_strptr(obj->efile.elf,
706 				      obj->efile.strtabidx,
707 				      sym.st_name);
708 		obj->maps[map_idx].offset = sym.st_value;
709 		if (sym.st_value + map_def_sz > data->d_size) {
710 			pr_warning("corrupted maps section in %s: last map \"%s\" too small\n",
711 				   obj->path, map_name);
712 			return -EINVAL;
713 		}
714 
715 		obj->maps[map_idx].name = strdup(map_name);
716 		if (!obj->maps[map_idx].name) {
717 			pr_warning("failed to alloc map name\n");
718 			return -ENOMEM;
719 		}
720 		pr_debug("map %d is \"%s\"\n", map_idx,
721 			 obj->maps[map_idx].name);
722 		def = (struct bpf_map_def *)(data->d_buf + sym.st_value);
723 		/*
724 		 * If the definition of the map in the object file fits in
725 		 * bpf_map_def, copy it.  Any extra fields in our version
726 		 * of bpf_map_def will default to zero as a result of the
727 		 * calloc above.
728 		 */
729 		if (map_def_sz <= sizeof(struct bpf_map_def)) {
730 			memcpy(&obj->maps[map_idx].def, def, map_def_sz);
731 		} else {
732 			/*
733 			 * Here the map structure being read is bigger than what
734 			 * we expect, truncate if the excess bits are all zero.
735 			 * If they are not zero, reject this map as
736 			 * incompatible.
737 			 */
738 			char *b;
739 			for (b = ((char *)def) + sizeof(struct bpf_map_def);
740 			     b < ((char *)def) + map_def_sz; b++) {
741 				if (*b != 0) {
742 					pr_warning("maps section in %s: \"%s\" "
743 						   "has unrecognized, non-zero "
744 						   "options\n",
745 						   obj->path, map_name);
746 					if (strict)
747 						return -EINVAL;
748 				}
749 			}
750 			memcpy(&obj->maps[map_idx].def, def,
751 			       sizeof(struct bpf_map_def));
752 		}
753 		map_idx++;
754 	}
755 
756 	qsort(obj->maps, obj->nr_maps, sizeof(obj->maps[0]), compare_bpf_map);
757 	return 0;
758 }
759 
760 static bool section_have_execinstr(struct bpf_object *obj, int idx)
761 {
762 	Elf_Scn *scn;
763 	GElf_Shdr sh;
764 
765 	scn = elf_getscn(obj->efile.elf, idx);
766 	if (!scn)
767 		return false;
768 
769 	if (gelf_getshdr(scn, &sh) != &sh)
770 		return false;
771 
772 	if (sh.sh_flags & SHF_EXECINSTR)
773 		return true;
774 
775 	return false;
776 }
777 
778 static int bpf_object__elf_collect(struct bpf_object *obj, int flags)
779 {
780 	Elf *elf = obj->efile.elf;
781 	GElf_Ehdr *ep = &obj->efile.ehdr;
782 	Elf_Data *btf_ext_data = NULL;
783 	Elf_Scn *scn = NULL;
784 	int idx = 0, err = 0;
785 
786 	/* Elf is corrupted/truncated, avoid calling elf_strptr. */
787 	if (!elf_rawdata(elf_getscn(elf, ep->e_shstrndx), NULL)) {
788 		pr_warning("failed to get e_shstrndx from %s\n",
789 			   obj->path);
790 		return -LIBBPF_ERRNO__FORMAT;
791 	}
792 
793 	while ((scn = elf_nextscn(elf, scn)) != NULL) {
794 		char *name;
795 		GElf_Shdr sh;
796 		Elf_Data *data;
797 
798 		idx++;
799 		if (gelf_getshdr(scn, &sh) != &sh) {
800 			pr_warning("failed to get section(%d) header from %s\n",
801 				   idx, obj->path);
802 			err = -LIBBPF_ERRNO__FORMAT;
803 			goto out;
804 		}
805 
806 		name = elf_strptr(elf, ep->e_shstrndx, sh.sh_name);
807 		if (!name) {
808 			pr_warning("failed to get section(%d) name from %s\n",
809 				   idx, obj->path);
810 			err = -LIBBPF_ERRNO__FORMAT;
811 			goto out;
812 		}
813 
814 		data = elf_getdata(scn, 0);
815 		if (!data) {
816 			pr_warning("failed to get section(%d) data from %s(%s)\n",
817 				   idx, name, obj->path);
818 			err = -LIBBPF_ERRNO__FORMAT;
819 			goto out;
820 		}
821 		pr_debug("section(%d) %s, size %ld, link %d, flags %lx, type=%d\n",
822 			 idx, name, (unsigned long)data->d_size,
823 			 (int)sh.sh_link, (unsigned long)sh.sh_flags,
824 			 (int)sh.sh_type);
825 
826 		if (strcmp(name, "license") == 0)
827 			err = bpf_object__init_license(obj,
828 						       data->d_buf,
829 						       data->d_size);
830 		else if (strcmp(name, "version") == 0)
831 			err = bpf_object__init_kversion(obj,
832 							data->d_buf,
833 							data->d_size);
834 		else if (strcmp(name, "maps") == 0)
835 			obj->efile.maps_shndx = idx;
836 		else if (strcmp(name, BTF_ELF_SEC) == 0) {
837 			obj->btf = btf__new(data->d_buf, data->d_size);
838 			if (IS_ERR(obj->btf) || btf__load(obj->btf)) {
839 				pr_warning("Error loading ELF section %s: %ld. Ignored and continue.\n",
840 					   BTF_ELF_SEC, PTR_ERR(obj->btf));
841 				obj->btf = NULL;
842 			}
843 		} else if (strcmp(name, BTF_EXT_ELF_SEC) == 0) {
844 			btf_ext_data = data;
845 		} else if (sh.sh_type == SHT_SYMTAB) {
846 			if (obj->efile.symbols) {
847 				pr_warning("bpf: multiple SYMTAB in %s\n",
848 					   obj->path);
849 				err = -LIBBPF_ERRNO__FORMAT;
850 			} else {
851 				obj->efile.symbols = data;
852 				obj->efile.strtabidx = sh.sh_link;
853 			}
854 		} else if ((sh.sh_type == SHT_PROGBITS) &&
855 			   (sh.sh_flags & SHF_EXECINSTR) &&
856 			   (data->d_size > 0)) {
857 			if (strcmp(name, ".text") == 0)
858 				obj->efile.text_shndx = idx;
859 			err = bpf_object__add_program(obj, data->d_buf,
860 						      data->d_size, name, idx);
861 			if (err) {
862 				char errmsg[STRERR_BUFSIZE];
863 				char *cp = libbpf_strerror_r(-err, errmsg,
864 							     sizeof(errmsg));
865 
866 				pr_warning("failed to alloc program %s (%s): %s",
867 					   name, obj->path, cp);
868 			}
869 		} else if (sh.sh_type == SHT_REL) {
870 			void *reloc = obj->efile.reloc;
871 			int nr_reloc = obj->efile.nr_reloc + 1;
872 			int sec = sh.sh_info; /* points to other section */
873 
874 			/* Only do relo for section with exec instructions */
875 			if (!section_have_execinstr(obj, sec)) {
876 				pr_debug("skip relo %s(%d) for section(%d)\n",
877 					 name, idx, sec);
878 				continue;
879 			}
880 
881 			reloc = reallocarray(reloc, nr_reloc,
882 					     sizeof(*obj->efile.reloc));
883 			if (!reloc) {
884 				pr_warning("realloc failed\n");
885 				err = -ENOMEM;
886 			} else {
887 				int n = nr_reloc - 1;
888 
889 				obj->efile.reloc = reloc;
890 				obj->efile.nr_reloc = nr_reloc;
891 
892 				obj->efile.reloc[n].shdr = sh;
893 				obj->efile.reloc[n].data = data;
894 			}
895 		} else {
896 			pr_debug("skip section(%d) %s\n", idx, name);
897 		}
898 		if (err)
899 			goto out;
900 	}
901 
902 	if (!obj->efile.strtabidx || obj->efile.strtabidx >= idx) {
903 		pr_warning("Corrupted ELF file: index of strtab invalid\n");
904 		return LIBBPF_ERRNO__FORMAT;
905 	}
906 	if (btf_ext_data) {
907 		if (!obj->btf) {
908 			pr_debug("Ignore ELF section %s because its depending ELF section %s is not found.\n",
909 				 BTF_EXT_ELF_SEC, BTF_ELF_SEC);
910 		} else {
911 			obj->btf_ext = btf_ext__new(btf_ext_data->d_buf,
912 						    btf_ext_data->d_size);
913 			if (IS_ERR(obj->btf_ext)) {
914 				pr_warning("Error loading ELF section %s: %ld. Ignored and continue.\n",
915 					   BTF_EXT_ELF_SEC,
916 					   PTR_ERR(obj->btf_ext));
917 				obj->btf_ext = NULL;
918 			}
919 		}
920 	}
921 	if (obj->efile.maps_shndx >= 0) {
922 		err = bpf_object__init_maps(obj, flags);
923 		if (err)
924 			goto out;
925 	}
926 	err = bpf_object__init_prog_names(obj);
927 out:
928 	return err;
929 }
930 
931 static struct bpf_program *
932 bpf_object__find_prog_by_idx(struct bpf_object *obj, int idx)
933 {
934 	struct bpf_program *prog;
935 	size_t i;
936 
937 	for (i = 0; i < obj->nr_programs; i++) {
938 		prog = &obj->programs[i];
939 		if (prog->idx == idx)
940 			return prog;
941 	}
942 	return NULL;
943 }
944 
945 struct bpf_program *
946 bpf_object__find_program_by_title(struct bpf_object *obj, const char *title)
947 {
948 	struct bpf_program *pos;
949 
950 	bpf_object__for_each_program(pos, obj) {
951 		if (pos->section_name && !strcmp(pos->section_name, title))
952 			return pos;
953 	}
954 	return NULL;
955 }
956 
957 static int
958 bpf_program__collect_reloc(struct bpf_program *prog, GElf_Shdr *shdr,
959 			   Elf_Data *data, struct bpf_object *obj)
960 {
961 	Elf_Data *symbols = obj->efile.symbols;
962 	int text_shndx = obj->efile.text_shndx;
963 	int maps_shndx = obj->efile.maps_shndx;
964 	struct bpf_map *maps = obj->maps;
965 	size_t nr_maps = obj->nr_maps;
966 	int i, nrels;
967 
968 	pr_debug("collecting relocating info for: '%s'\n",
969 		 prog->section_name);
970 	nrels = shdr->sh_size / shdr->sh_entsize;
971 
972 	prog->reloc_desc = malloc(sizeof(*prog->reloc_desc) * nrels);
973 	if (!prog->reloc_desc) {
974 		pr_warning("failed to alloc memory in relocation\n");
975 		return -ENOMEM;
976 	}
977 	prog->nr_reloc = nrels;
978 
979 	for (i = 0; i < nrels; i++) {
980 		GElf_Sym sym;
981 		GElf_Rel rel;
982 		unsigned int insn_idx;
983 		struct bpf_insn *insns = prog->insns;
984 		size_t map_idx;
985 
986 		if (!gelf_getrel(data, i, &rel)) {
987 			pr_warning("relocation: failed to get %d reloc\n", i);
988 			return -LIBBPF_ERRNO__FORMAT;
989 		}
990 
991 		if (!gelf_getsym(symbols,
992 				 GELF_R_SYM(rel.r_info),
993 				 &sym)) {
994 			pr_warning("relocation: symbol %"PRIx64" not found\n",
995 				   GELF_R_SYM(rel.r_info));
996 			return -LIBBPF_ERRNO__FORMAT;
997 		}
998 		pr_debug("relo for %lld value %lld name %d\n",
999 			 (long long) (rel.r_info >> 32),
1000 			 (long long) sym.st_value, sym.st_name);
1001 
1002 		if (sym.st_shndx != maps_shndx && sym.st_shndx != text_shndx) {
1003 			pr_warning("Program '%s' contains non-map related relo data pointing to section %u\n",
1004 				   prog->section_name, sym.st_shndx);
1005 			return -LIBBPF_ERRNO__RELOC;
1006 		}
1007 
1008 		insn_idx = rel.r_offset / sizeof(struct bpf_insn);
1009 		pr_debug("relocation: insn_idx=%u\n", insn_idx);
1010 
1011 		if (insns[insn_idx].code == (BPF_JMP | BPF_CALL)) {
1012 			if (insns[insn_idx].src_reg != BPF_PSEUDO_CALL) {
1013 				pr_warning("incorrect bpf_call opcode\n");
1014 				return -LIBBPF_ERRNO__RELOC;
1015 			}
1016 			prog->reloc_desc[i].type = RELO_CALL;
1017 			prog->reloc_desc[i].insn_idx = insn_idx;
1018 			prog->reloc_desc[i].text_off = sym.st_value;
1019 			obj->has_pseudo_calls = true;
1020 			continue;
1021 		}
1022 
1023 		if (insns[insn_idx].code != (BPF_LD | BPF_IMM | BPF_DW)) {
1024 			pr_warning("bpf: relocation: invalid relo for insns[%d].code 0x%x\n",
1025 				   insn_idx, insns[insn_idx].code);
1026 			return -LIBBPF_ERRNO__RELOC;
1027 		}
1028 
1029 		/* TODO: 'maps' is sorted. We can use bsearch to make it faster. */
1030 		for (map_idx = 0; map_idx < nr_maps; map_idx++) {
1031 			if (maps[map_idx].offset == sym.st_value) {
1032 				pr_debug("relocation: find map %zd (%s) for insn %u\n",
1033 					 map_idx, maps[map_idx].name, insn_idx);
1034 				break;
1035 			}
1036 		}
1037 
1038 		if (map_idx >= nr_maps) {
1039 			pr_warning("bpf relocation: map_idx %d large than %d\n",
1040 				   (int)map_idx, (int)nr_maps - 1);
1041 			return -LIBBPF_ERRNO__RELOC;
1042 		}
1043 
1044 		prog->reloc_desc[i].type = RELO_LD64;
1045 		prog->reloc_desc[i].insn_idx = insn_idx;
1046 		prog->reloc_desc[i].map_idx = map_idx;
1047 	}
1048 	return 0;
1049 }
1050 
1051 static int bpf_map_find_btf_info(struct bpf_map *map, const struct btf *btf)
1052 {
1053 	struct bpf_map_def *def = &map->def;
1054 	__u32 key_type_id, value_type_id;
1055 	int ret;
1056 
1057 	ret = btf__get_map_kv_tids(btf, map->name, def->key_size,
1058 				   def->value_size, &key_type_id,
1059 				   &value_type_id);
1060 	if (ret)
1061 		return ret;
1062 
1063 	map->btf_key_type_id = key_type_id;
1064 	map->btf_value_type_id = value_type_id;
1065 
1066 	return 0;
1067 }
1068 
1069 int bpf_map__reuse_fd(struct bpf_map *map, int fd)
1070 {
1071 	struct bpf_map_info info = {};
1072 	__u32 len = sizeof(info);
1073 	int new_fd, err;
1074 	char *new_name;
1075 
1076 	err = bpf_obj_get_info_by_fd(fd, &info, &len);
1077 	if (err)
1078 		return err;
1079 
1080 	new_name = strdup(info.name);
1081 	if (!new_name)
1082 		return -errno;
1083 
1084 	new_fd = open("/", O_RDONLY | O_CLOEXEC);
1085 	if (new_fd < 0)
1086 		goto err_free_new_name;
1087 
1088 	new_fd = dup3(fd, new_fd, O_CLOEXEC);
1089 	if (new_fd < 0)
1090 		goto err_close_new_fd;
1091 
1092 	err = zclose(map->fd);
1093 	if (err)
1094 		goto err_close_new_fd;
1095 	free(map->name);
1096 
1097 	map->fd = new_fd;
1098 	map->name = new_name;
1099 	map->def.type = info.type;
1100 	map->def.key_size = info.key_size;
1101 	map->def.value_size = info.value_size;
1102 	map->def.max_entries = info.max_entries;
1103 	map->def.map_flags = info.map_flags;
1104 	map->btf_key_type_id = info.btf_key_type_id;
1105 	map->btf_value_type_id = info.btf_value_type_id;
1106 
1107 	return 0;
1108 
1109 err_close_new_fd:
1110 	close(new_fd);
1111 err_free_new_name:
1112 	free(new_name);
1113 	return -errno;
1114 }
1115 
1116 int bpf_map__resize(struct bpf_map *map, __u32 max_entries)
1117 {
1118 	if (!map || !max_entries)
1119 		return -EINVAL;
1120 
1121 	/* If map already created, its attributes can't be changed. */
1122 	if (map->fd >= 0)
1123 		return -EBUSY;
1124 
1125 	map->def.max_entries = max_entries;
1126 
1127 	return 0;
1128 }
1129 
1130 static int
1131 bpf_object__probe_name(struct bpf_object *obj)
1132 {
1133 	struct bpf_load_program_attr attr;
1134 	char *cp, errmsg[STRERR_BUFSIZE];
1135 	struct bpf_insn insns[] = {
1136 		BPF_MOV64_IMM(BPF_REG_0, 0),
1137 		BPF_EXIT_INSN(),
1138 	};
1139 	int ret;
1140 
1141 	/* make sure basic loading works */
1142 
1143 	memset(&attr, 0, sizeof(attr));
1144 	attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
1145 	attr.insns = insns;
1146 	attr.insns_cnt = ARRAY_SIZE(insns);
1147 	attr.license = "GPL";
1148 
1149 	ret = bpf_load_program_xattr(&attr, NULL, 0);
1150 	if (ret < 0) {
1151 		cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
1152 		pr_warning("Error in %s():%s(%d). Couldn't load basic 'r0 = 0' BPF program.\n",
1153 			   __func__, cp, errno);
1154 		return -errno;
1155 	}
1156 	close(ret);
1157 
1158 	/* now try the same program, but with the name */
1159 
1160 	attr.name = "test";
1161 	ret = bpf_load_program_xattr(&attr, NULL, 0);
1162 	if (ret >= 0) {
1163 		obj->caps.name = 1;
1164 		close(ret);
1165 	}
1166 
1167 	return 0;
1168 }
1169 
1170 static int
1171 bpf_object__probe_caps(struct bpf_object *obj)
1172 {
1173 	return bpf_object__probe_name(obj);
1174 }
1175 
1176 static int
1177 bpf_object__create_maps(struct bpf_object *obj)
1178 {
1179 	struct bpf_create_map_attr create_attr = {};
1180 	unsigned int i;
1181 	int err;
1182 
1183 	for (i = 0; i < obj->nr_maps; i++) {
1184 		struct bpf_map *map = &obj->maps[i];
1185 		struct bpf_map_def *def = &map->def;
1186 		char *cp, errmsg[STRERR_BUFSIZE];
1187 		int *pfd = &map->fd;
1188 
1189 		if (map->fd >= 0) {
1190 			pr_debug("skip map create (preset) %s: fd=%d\n",
1191 				 map->name, map->fd);
1192 			continue;
1193 		}
1194 
1195 		if (obj->caps.name)
1196 			create_attr.name = map->name;
1197 		create_attr.map_ifindex = map->map_ifindex;
1198 		create_attr.map_type = def->type;
1199 		create_attr.map_flags = def->map_flags;
1200 		create_attr.key_size = def->key_size;
1201 		create_attr.value_size = def->value_size;
1202 		create_attr.max_entries = def->max_entries;
1203 		create_attr.btf_fd = 0;
1204 		create_attr.btf_key_type_id = 0;
1205 		create_attr.btf_value_type_id = 0;
1206 		if (bpf_map_type__is_map_in_map(def->type) &&
1207 		    map->inner_map_fd >= 0)
1208 			create_attr.inner_map_fd = map->inner_map_fd;
1209 
1210 		if (obj->btf && !bpf_map_find_btf_info(map, obj->btf)) {
1211 			create_attr.btf_fd = btf__fd(obj->btf);
1212 			create_attr.btf_key_type_id = map->btf_key_type_id;
1213 			create_attr.btf_value_type_id = map->btf_value_type_id;
1214 		}
1215 
1216 		*pfd = bpf_create_map_xattr(&create_attr);
1217 		if (*pfd < 0 && create_attr.btf_key_type_id) {
1218 			cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
1219 			pr_warning("Error in bpf_create_map_xattr(%s):%s(%d). Retrying without BTF.\n",
1220 				   map->name, cp, errno);
1221 			create_attr.btf_fd = 0;
1222 			create_attr.btf_key_type_id = 0;
1223 			create_attr.btf_value_type_id = 0;
1224 			map->btf_key_type_id = 0;
1225 			map->btf_value_type_id = 0;
1226 			*pfd = bpf_create_map_xattr(&create_attr);
1227 		}
1228 
1229 		if (*pfd < 0) {
1230 			size_t j;
1231 
1232 			err = *pfd;
1233 			cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
1234 			pr_warning("failed to create map (name: '%s'): %s\n",
1235 				   map->name, cp);
1236 			for (j = 0; j < i; j++)
1237 				zclose(obj->maps[j].fd);
1238 			return err;
1239 		}
1240 		pr_debug("create map %s: fd=%d\n", map->name, *pfd);
1241 	}
1242 
1243 	return 0;
1244 }
1245 
1246 static int
1247 check_btf_ext_reloc_err(struct bpf_program *prog, int err,
1248 			void *btf_prog_info, const char *info_name)
1249 {
1250 	if (err != -ENOENT) {
1251 		pr_warning("Error in loading %s for sec %s.\n",
1252 			   info_name, prog->section_name);
1253 		return err;
1254 	}
1255 
1256 	/* err == -ENOENT (i.e. prog->section_name not found in btf_ext) */
1257 
1258 	if (btf_prog_info) {
1259 		/*
1260 		 * Some info has already been found but has problem
1261 		 * in the last btf_ext reloc.  Must have to error
1262 		 * out.
1263 		 */
1264 		pr_warning("Error in relocating %s for sec %s.\n",
1265 			   info_name, prog->section_name);
1266 		return err;
1267 	}
1268 
1269 	/*
1270 	 * Have problem loading the very first info.  Ignore
1271 	 * the rest.
1272 	 */
1273 	pr_warning("Cannot find %s for main program sec %s. Ignore all %s.\n",
1274 		   info_name, prog->section_name, info_name);
1275 	return 0;
1276 }
1277 
1278 static int
1279 bpf_program_reloc_btf_ext(struct bpf_program *prog, struct bpf_object *obj,
1280 			  const char *section_name,  __u32 insn_offset)
1281 {
1282 	int err;
1283 
1284 	if (!insn_offset || prog->func_info) {
1285 		/*
1286 		 * !insn_offset => main program
1287 		 *
1288 		 * For sub prog, the main program's func_info has to
1289 		 * be loaded first (i.e. prog->func_info != NULL)
1290 		 */
1291 		err = btf_ext__reloc_func_info(obj->btf, obj->btf_ext,
1292 					       section_name, insn_offset,
1293 					       &prog->func_info,
1294 					       &prog->func_info_cnt);
1295 		if (err)
1296 			return check_btf_ext_reloc_err(prog, err,
1297 						       prog->func_info,
1298 						       "bpf_func_info");
1299 
1300 		prog->func_info_rec_size = btf_ext__func_info_rec_size(obj->btf_ext);
1301 	}
1302 
1303 	if (!insn_offset || prog->line_info) {
1304 		err = btf_ext__reloc_line_info(obj->btf, obj->btf_ext,
1305 					       section_name, insn_offset,
1306 					       &prog->line_info,
1307 					       &prog->line_info_cnt);
1308 		if (err)
1309 			return check_btf_ext_reloc_err(prog, err,
1310 						       prog->line_info,
1311 						       "bpf_line_info");
1312 
1313 		prog->line_info_rec_size = btf_ext__line_info_rec_size(obj->btf_ext);
1314 	}
1315 
1316 	if (!insn_offset)
1317 		prog->btf_fd = btf__fd(obj->btf);
1318 
1319 	return 0;
1320 }
1321 
1322 static int
1323 bpf_program__reloc_text(struct bpf_program *prog, struct bpf_object *obj,
1324 			struct reloc_desc *relo)
1325 {
1326 	struct bpf_insn *insn, *new_insn;
1327 	struct bpf_program *text;
1328 	size_t new_cnt;
1329 	int err;
1330 
1331 	if (relo->type != RELO_CALL)
1332 		return -LIBBPF_ERRNO__RELOC;
1333 
1334 	if (prog->idx == obj->efile.text_shndx) {
1335 		pr_warning("relo in .text insn %d into off %d\n",
1336 			   relo->insn_idx, relo->text_off);
1337 		return -LIBBPF_ERRNO__RELOC;
1338 	}
1339 
1340 	if (prog->main_prog_cnt == 0) {
1341 		text = bpf_object__find_prog_by_idx(obj, obj->efile.text_shndx);
1342 		if (!text) {
1343 			pr_warning("no .text section found yet relo into text exist\n");
1344 			return -LIBBPF_ERRNO__RELOC;
1345 		}
1346 		new_cnt = prog->insns_cnt + text->insns_cnt;
1347 		new_insn = reallocarray(prog->insns, new_cnt, sizeof(*insn));
1348 		if (!new_insn) {
1349 			pr_warning("oom in prog realloc\n");
1350 			return -ENOMEM;
1351 		}
1352 
1353 		if (obj->btf_ext) {
1354 			err = bpf_program_reloc_btf_ext(prog, obj,
1355 							text->section_name,
1356 							prog->insns_cnt);
1357 			if (err)
1358 				return err;
1359 		}
1360 
1361 		memcpy(new_insn + prog->insns_cnt, text->insns,
1362 		       text->insns_cnt * sizeof(*insn));
1363 		prog->insns = new_insn;
1364 		prog->main_prog_cnt = prog->insns_cnt;
1365 		prog->insns_cnt = new_cnt;
1366 		pr_debug("added %zd insn from %s to prog %s\n",
1367 			 text->insns_cnt, text->section_name,
1368 			 prog->section_name);
1369 	}
1370 	insn = &prog->insns[relo->insn_idx];
1371 	insn->imm += prog->main_prog_cnt - relo->insn_idx;
1372 	return 0;
1373 }
1374 
1375 static int
1376 bpf_program__relocate(struct bpf_program *prog, struct bpf_object *obj)
1377 {
1378 	int i, err;
1379 
1380 	if (!prog)
1381 		return 0;
1382 
1383 	if (obj->btf_ext) {
1384 		err = bpf_program_reloc_btf_ext(prog, obj,
1385 						prog->section_name, 0);
1386 		if (err)
1387 			return err;
1388 	}
1389 
1390 	if (!prog->reloc_desc)
1391 		return 0;
1392 
1393 	for (i = 0; i < prog->nr_reloc; i++) {
1394 		if (prog->reloc_desc[i].type == RELO_LD64) {
1395 			struct bpf_insn *insns = prog->insns;
1396 			int insn_idx, map_idx;
1397 
1398 			insn_idx = prog->reloc_desc[i].insn_idx;
1399 			map_idx = prog->reloc_desc[i].map_idx;
1400 
1401 			if (insn_idx >= (int)prog->insns_cnt) {
1402 				pr_warning("relocation out of range: '%s'\n",
1403 					   prog->section_name);
1404 				return -LIBBPF_ERRNO__RELOC;
1405 			}
1406 			insns[insn_idx].src_reg = BPF_PSEUDO_MAP_FD;
1407 			insns[insn_idx].imm = obj->maps[map_idx].fd;
1408 		} else {
1409 			err = bpf_program__reloc_text(prog, obj,
1410 						      &prog->reloc_desc[i]);
1411 			if (err)
1412 				return err;
1413 		}
1414 	}
1415 
1416 	zfree(&prog->reloc_desc);
1417 	prog->nr_reloc = 0;
1418 	return 0;
1419 }
1420 
1421 
1422 static int
1423 bpf_object__relocate(struct bpf_object *obj)
1424 {
1425 	struct bpf_program *prog;
1426 	size_t i;
1427 	int err;
1428 
1429 	for (i = 0; i < obj->nr_programs; i++) {
1430 		prog = &obj->programs[i];
1431 
1432 		err = bpf_program__relocate(prog, obj);
1433 		if (err) {
1434 			pr_warning("failed to relocate '%s'\n",
1435 				   prog->section_name);
1436 			return err;
1437 		}
1438 	}
1439 	return 0;
1440 }
1441 
1442 static int bpf_object__collect_reloc(struct bpf_object *obj)
1443 {
1444 	int i, err;
1445 
1446 	if (!obj_elf_valid(obj)) {
1447 		pr_warning("Internal error: elf object is closed\n");
1448 		return -LIBBPF_ERRNO__INTERNAL;
1449 	}
1450 
1451 	for (i = 0; i < obj->efile.nr_reloc; i++) {
1452 		GElf_Shdr *shdr = &obj->efile.reloc[i].shdr;
1453 		Elf_Data *data = obj->efile.reloc[i].data;
1454 		int idx = shdr->sh_info;
1455 		struct bpf_program *prog;
1456 
1457 		if (shdr->sh_type != SHT_REL) {
1458 			pr_warning("internal error at %d\n", __LINE__);
1459 			return -LIBBPF_ERRNO__INTERNAL;
1460 		}
1461 
1462 		prog = bpf_object__find_prog_by_idx(obj, idx);
1463 		if (!prog) {
1464 			pr_warning("relocation failed: no section(%d)\n", idx);
1465 			return -LIBBPF_ERRNO__RELOC;
1466 		}
1467 
1468 		err = bpf_program__collect_reloc(prog,
1469 						 shdr, data,
1470 						 obj);
1471 		if (err)
1472 			return err;
1473 	}
1474 	return 0;
1475 }
1476 
1477 static int
1478 load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt,
1479 	     char *license, __u32 kern_version, int *pfd)
1480 {
1481 	struct bpf_load_program_attr load_attr;
1482 	char *cp, errmsg[STRERR_BUFSIZE];
1483 	char *log_buf;
1484 	int ret;
1485 
1486 	memset(&load_attr, 0, sizeof(struct bpf_load_program_attr));
1487 	load_attr.prog_type = prog->type;
1488 	load_attr.expected_attach_type = prog->expected_attach_type;
1489 	if (prog->caps->name)
1490 		load_attr.name = prog->name;
1491 	load_attr.insns = insns;
1492 	load_attr.insns_cnt = insns_cnt;
1493 	load_attr.license = license;
1494 	load_attr.kern_version = kern_version;
1495 	load_attr.prog_ifindex = prog->prog_ifindex;
1496 	load_attr.prog_btf_fd = prog->btf_fd >= 0 ? prog->btf_fd : 0;
1497 	load_attr.func_info = prog->func_info;
1498 	load_attr.func_info_rec_size = prog->func_info_rec_size;
1499 	load_attr.func_info_cnt = prog->func_info_cnt;
1500 	load_attr.line_info = prog->line_info;
1501 	load_attr.line_info_rec_size = prog->line_info_rec_size;
1502 	load_attr.line_info_cnt = prog->line_info_cnt;
1503 	if (!load_attr.insns || !load_attr.insns_cnt)
1504 		return -EINVAL;
1505 
1506 	log_buf = malloc(BPF_LOG_BUF_SIZE);
1507 	if (!log_buf)
1508 		pr_warning("Alloc log buffer for bpf loader error, continue without log\n");
1509 
1510 	ret = bpf_load_program_xattr(&load_attr, log_buf, BPF_LOG_BUF_SIZE);
1511 
1512 	if (ret >= 0) {
1513 		*pfd = ret;
1514 		ret = 0;
1515 		goto out;
1516 	}
1517 
1518 	ret = -LIBBPF_ERRNO__LOAD;
1519 	cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
1520 	pr_warning("load bpf program failed: %s\n", cp);
1521 
1522 	if (log_buf && log_buf[0] != '\0') {
1523 		ret = -LIBBPF_ERRNO__VERIFY;
1524 		pr_warning("-- BEGIN DUMP LOG ---\n");
1525 		pr_warning("\n%s\n", log_buf);
1526 		pr_warning("-- END LOG --\n");
1527 	} else if (load_attr.insns_cnt >= BPF_MAXINSNS) {
1528 		pr_warning("Program too large (%zu insns), at most %d insns\n",
1529 			   load_attr.insns_cnt, BPF_MAXINSNS);
1530 		ret = -LIBBPF_ERRNO__PROG2BIG;
1531 	} else {
1532 		/* Wrong program type? */
1533 		if (load_attr.prog_type != BPF_PROG_TYPE_KPROBE) {
1534 			int fd;
1535 
1536 			load_attr.prog_type = BPF_PROG_TYPE_KPROBE;
1537 			load_attr.expected_attach_type = 0;
1538 			fd = bpf_load_program_xattr(&load_attr, NULL, 0);
1539 			if (fd >= 0) {
1540 				close(fd);
1541 				ret = -LIBBPF_ERRNO__PROGTYPE;
1542 				goto out;
1543 			}
1544 		}
1545 
1546 		if (log_buf)
1547 			ret = -LIBBPF_ERRNO__KVER;
1548 	}
1549 
1550 out:
1551 	free(log_buf);
1552 	return ret;
1553 }
1554 
1555 int
1556 bpf_program__load(struct bpf_program *prog,
1557 		  char *license, __u32 kern_version)
1558 {
1559 	int err = 0, fd, i;
1560 
1561 	if (prog->instances.nr < 0 || !prog->instances.fds) {
1562 		if (prog->preprocessor) {
1563 			pr_warning("Internal error: can't load program '%s'\n",
1564 				   prog->section_name);
1565 			return -LIBBPF_ERRNO__INTERNAL;
1566 		}
1567 
1568 		prog->instances.fds = malloc(sizeof(int));
1569 		if (!prog->instances.fds) {
1570 			pr_warning("Not enough memory for BPF fds\n");
1571 			return -ENOMEM;
1572 		}
1573 		prog->instances.nr = 1;
1574 		prog->instances.fds[0] = -1;
1575 	}
1576 
1577 	if (!prog->preprocessor) {
1578 		if (prog->instances.nr != 1) {
1579 			pr_warning("Program '%s' is inconsistent: nr(%d) != 1\n",
1580 				   prog->section_name, prog->instances.nr);
1581 		}
1582 		err = load_program(prog, prog->insns, prog->insns_cnt,
1583 				   license, kern_version, &fd);
1584 		if (!err)
1585 			prog->instances.fds[0] = fd;
1586 		goto out;
1587 	}
1588 
1589 	for (i = 0; i < prog->instances.nr; i++) {
1590 		struct bpf_prog_prep_result result;
1591 		bpf_program_prep_t preprocessor = prog->preprocessor;
1592 
1593 		memset(&result, 0, sizeof(result));
1594 		err = preprocessor(prog, i, prog->insns,
1595 				   prog->insns_cnt, &result);
1596 		if (err) {
1597 			pr_warning("Preprocessing the %dth instance of program '%s' failed\n",
1598 				   i, prog->section_name);
1599 			goto out;
1600 		}
1601 
1602 		if (!result.new_insn_ptr || !result.new_insn_cnt) {
1603 			pr_debug("Skip loading the %dth instance of program '%s'\n",
1604 				 i, prog->section_name);
1605 			prog->instances.fds[i] = -1;
1606 			if (result.pfd)
1607 				*result.pfd = -1;
1608 			continue;
1609 		}
1610 
1611 		err = load_program(prog, result.new_insn_ptr,
1612 				   result.new_insn_cnt,
1613 				   license, kern_version, &fd);
1614 
1615 		if (err) {
1616 			pr_warning("Loading the %dth instance of program '%s' failed\n",
1617 					i, prog->section_name);
1618 			goto out;
1619 		}
1620 
1621 		if (result.pfd)
1622 			*result.pfd = fd;
1623 		prog->instances.fds[i] = fd;
1624 	}
1625 out:
1626 	if (err)
1627 		pr_warning("failed to load program '%s'\n",
1628 			   prog->section_name);
1629 	zfree(&prog->insns);
1630 	prog->insns_cnt = 0;
1631 	return err;
1632 }
1633 
1634 static bool bpf_program__is_function_storage(struct bpf_program *prog,
1635 					     struct bpf_object *obj)
1636 {
1637 	return prog->idx == obj->efile.text_shndx && obj->has_pseudo_calls;
1638 }
1639 
1640 static int
1641 bpf_object__load_progs(struct bpf_object *obj)
1642 {
1643 	size_t i;
1644 	int err;
1645 
1646 	for (i = 0; i < obj->nr_programs; i++) {
1647 		if (bpf_program__is_function_storage(&obj->programs[i], obj))
1648 			continue;
1649 		err = bpf_program__load(&obj->programs[i],
1650 					obj->license,
1651 					obj->kern_version);
1652 		if (err)
1653 			return err;
1654 	}
1655 	return 0;
1656 }
1657 
1658 static bool bpf_prog_type__needs_kver(enum bpf_prog_type type)
1659 {
1660 	switch (type) {
1661 	case BPF_PROG_TYPE_SOCKET_FILTER:
1662 	case BPF_PROG_TYPE_SCHED_CLS:
1663 	case BPF_PROG_TYPE_SCHED_ACT:
1664 	case BPF_PROG_TYPE_XDP:
1665 	case BPF_PROG_TYPE_CGROUP_SKB:
1666 	case BPF_PROG_TYPE_CGROUP_SOCK:
1667 	case BPF_PROG_TYPE_LWT_IN:
1668 	case BPF_PROG_TYPE_LWT_OUT:
1669 	case BPF_PROG_TYPE_LWT_XMIT:
1670 	case BPF_PROG_TYPE_LWT_SEG6LOCAL:
1671 	case BPF_PROG_TYPE_SOCK_OPS:
1672 	case BPF_PROG_TYPE_SK_SKB:
1673 	case BPF_PROG_TYPE_CGROUP_DEVICE:
1674 	case BPF_PROG_TYPE_SK_MSG:
1675 	case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
1676 	case BPF_PROG_TYPE_LIRC_MODE2:
1677 	case BPF_PROG_TYPE_SK_REUSEPORT:
1678 	case BPF_PROG_TYPE_FLOW_DISSECTOR:
1679 	case BPF_PROG_TYPE_UNSPEC:
1680 	case BPF_PROG_TYPE_TRACEPOINT:
1681 	case BPF_PROG_TYPE_RAW_TRACEPOINT:
1682 	case BPF_PROG_TYPE_PERF_EVENT:
1683 		return false;
1684 	case BPF_PROG_TYPE_KPROBE:
1685 	default:
1686 		return true;
1687 	}
1688 }
1689 
1690 static int bpf_object__validate(struct bpf_object *obj, bool needs_kver)
1691 {
1692 	if (needs_kver && obj->kern_version == 0) {
1693 		pr_warning("%s doesn't provide kernel version\n",
1694 			   obj->path);
1695 		return -LIBBPF_ERRNO__KVERSION;
1696 	}
1697 	return 0;
1698 }
1699 
1700 static struct bpf_object *
1701 __bpf_object__open(const char *path, void *obj_buf, size_t obj_buf_sz,
1702 		   bool needs_kver, int flags)
1703 {
1704 	struct bpf_object *obj;
1705 	int err;
1706 
1707 	if (elf_version(EV_CURRENT) == EV_NONE) {
1708 		pr_warning("failed to init libelf for %s\n", path);
1709 		return ERR_PTR(-LIBBPF_ERRNO__LIBELF);
1710 	}
1711 
1712 	obj = bpf_object__new(path, obj_buf, obj_buf_sz);
1713 	if (IS_ERR(obj))
1714 		return obj;
1715 
1716 	CHECK_ERR(bpf_object__elf_init(obj), err, out);
1717 	CHECK_ERR(bpf_object__check_endianness(obj), err, out);
1718 	CHECK_ERR(bpf_object__elf_collect(obj, flags), err, out);
1719 	CHECK_ERR(bpf_object__collect_reloc(obj), err, out);
1720 	CHECK_ERR(bpf_object__validate(obj, needs_kver), err, out);
1721 
1722 	bpf_object__elf_finish(obj);
1723 	return obj;
1724 out:
1725 	bpf_object__close(obj);
1726 	return ERR_PTR(err);
1727 }
1728 
1729 struct bpf_object *__bpf_object__open_xattr(struct bpf_object_open_attr *attr,
1730 					    int flags)
1731 {
1732 	/* param validation */
1733 	if (!attr->file)
1734 		return NULL;
1735 
1736 	pr_debug("loading %s\n", attr->file);
1737 
1738 	return __bpf_object__open(attr->file, NULL, 0,
1739 				  bpf_prog_type__needs_kver(attr->prog_type),
1740 				  flags);
1741 }
1742 
1743 struct bpf_object *bpf_object__open_xattr(struct bpf_object_open_attr *attr)
1744 {
1745 	return __bpf_object__open_xattr(attr, 0);
1746 }
1747 
1748 struct bpf_object *bpf_object__open(const char *path)
1749 {
1750 	struct bpf_object_open_attr attr = {
1751 		.file		= path,
1752 		.prog_type	= BPF_PROG_TYPE_UNSPEC,
1753 	};
1754 
1755 	return bpf_object__open_xattr(&attr);
1756 }
1757 
1758 struct bpf_object *bpf_object__open_buffer(void *obj_buf,
1759 					   size_t obj_buf_sz,
1760 					   const char *name)
1761 {
1762 	char tmp_name[64];
1763 
1764 	/* param validation */
1765 	if (!obj_buf || obj_buf_sz <= 0)
1766 		return NULL;
1767 
1768 	if (!name) {
1769 		snprintf(tmp_name, sizeof(tmp_name), "%lx-%lx",
1770 			 (unsigned long)obj_buf,
1771 			 (unsigned long)obj_buf_sz);
1772 		tmp_name[sizeof(tmp_name) - 1] = '\0';
1773 		name = tmp_name;
1774 	}
1775 	pr_debug("loading object '%s' from buffer\n",
1776 		 name);
1777 
1778 	return __bpf_object__open(name, obj_buf, obj_buf_sz, true, true);
1779 }
1780 
1781 int bpf_object__unload(struct bpf_object *obj)
1782 {
1783 	size_t i;
1784 
1785 	if (!obj)
1786 		return -EINVAL;
1787 
1788 	for (i = 0; i < obj->nr_maps; i++)
1789 		zclose(obj->maps[i].fd);
1790 
1791 	for (i = 0; i < obj->nr_programs; i++)
1792 		bpf_program__unload(&obj->programs[i]);
1793 
1794 	return 0;
1795 }
1796 
1797 int bpf_object__load(struct bpf_object *obj)
1798 {
1799 	int err;
1800 
1801 	if (!obj)
1802 		return -EINVAL;
1803 
1804 	if (obj->loaded) {
1805 		pr_warning("object should not be loaded twice\n");
1806 		return -EINVAL;
1807 	}
1808 
1809 	obj->loaded = true;
1810 
1811 	CHECK_ERR(bpf_object__probe_caps(obj), err, out);
1812 	CHECK_ERR(bpf_object__create_maps(obj), err, out);
1813 	CHECK_ERR(bpf_object__relocate(obj), err, out);
1814 	CHECK_ERR(bpf_object__load_progs(obj), err, out);
1815 
1816 	return 0;
1817 out:
1818 	bpf_object__unload(obj);
1819 	pr_warning("failed to load object '%s'\n", obj->path);
1820 	return err;
1821 }
1822 
1823 static int check_path(const char *path)
1824 {
1825 	char *cp, errmsg[STRERR_BUFSIZE];
1826 	struct statfs st_fs;
1827 	char *dname, *dir;
1828 	int err = 0;
1829 
1830 	if (path == NULL)
1831 		return -EINVAL;
1832 
1833 	dname = strdup(path);
1834 	if (dname == NULL)
1835 		return -ENOMEM;
1836 
1837 	dir = dirname(dname);
1838 	if (statfs(dir, &st_fs)) {
1839 		cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
1840 		pr_warning("failed to statfs %s: %s\n", dir, cp);
1841 		err = -errno;
1842 	}
1843 	free(dname);
1844 
1845 	if (!err && st_fs.f_type != BPF_FS_MAGIC) {
1846 		pr_warning("specified path %s is not on BPF FS\n", path);
1847 		err = -EINVAL;
1848 	}
1849 
1850 	return err;
1851 }
1852 
1853 int bpf_program__pin_instance(struct bpf_program *prog, const char *path,
1854 			      int instance)
1855 {
1856 	char *cp, errmsg[STRERR_BUFSIZE];
1857 	int err;
1858 
1859 	err = check_path(path);
1860 	if (err)
1861 		return err;
1862 
1863 	if (prog == NULL) {
1864 		pr_warning("invalid program pointer\n");
1865 		return -EINVAL;
1866 	}
1867 
1868 	if (instance < 0 || instance >= prog->instances.nr) {
1869 		pr_warning("invalid prog instance %d of prog %s (max %d)\n",
1870 			   instance, prog->section_name, prog->instances.nr);
1871 		return -EINVAL;
1872 	}
1873 
1874 	if (bpf_obj_pin(prog->instances.fds[instance], path)) {
1875 		cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
1876 		pr_warning("failed to pin program: %s\n", cp);
1877 		return -errno;
1878 	}
1879 	pr_debug("pinned program '%s'\n", path);
1880 
1881 	return 0;
1882 }
1883 
1884 int bpf_program__unpin_instance(struct bpf_program *prog, const char *path,
1885 				int instance)
1886 {
1887 	int err;
1888 
1889 	err = check_path(path);
1890 	if (err)
1891 		return err;
1892 
1893 	if (prog == NULL) {
1894 		pr_warning("invalid program pointer\n");
1895 		return -EINVAL;
1896 	}
1897 
1898 	if (instance < 0 || instance >= prog->instances.nr) {
1899 		pr_warning("invalid prog instance %d of prog %s (max %d)\n",
1900 			   instance, prog->section_name, prog->instances.nr);
1901 		return -EINVAL;
1902 	}
1903 
1904 	err = unlink(path);
1905 	if (err != 0)
1906 		return -errno;
1907 	pr_debug("unpinned program '%s'\n", path);
1908 
1909 	return 0;
1910 }
1911 
1912 static int make_dir(const char *path)
1913 {
1914 	char *cp, errmsg[STRERR_BUFSIZE];
1915 	int err = 0;
1916 
1917 	if (mkdir(path, 0700) && errno != EEXIST)
1918 		err = -errno;
1919 
1920 	if (err) {
1921 		cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg));
1922 		pr_warning("failed to mkdir %s: %s\n", path, cp);
1923 	}
1924 	return err;
1925 }
1926 
1927 int bpf_program__pin(struct bpf_program *prog, const char *path)
1928 {
1929 	int i, err;
1930 
1931 	err = check_path(path);
1932 	if (err)
1933 		return err;
1934 
1935 	if (prog == NULL) {
1936 		pr_warning("invalid program pointer\n");
1937 		return -EINVAL;
1938 	}
1939 
1940 	if (prog->instances.nr <= 0) {
1941 		pr_warning("no instances of prog %s to pin\n",
1942 			   prog->section_name);
1943 		return -EINVAL;
1944 	}
1945 
1946 	if (prog->instances.nr == 1) {
1947 		/* don't create subdirs when pinning single instance */
1948 		return bpf_program__pin_instance(prog, path, 0);
1949 	}
1950 
1951 	err = make_dir(path);
1952 	if (err)
1953 		return err;
1954 
1955 	for (i = 0; i < prog->instances.nr; i++) {
1956 		char buf[PATH_MAX];
1957 		int len;
1958 
1959 		len = snprintf(buf, PATH_MAX, "%s/%d", path, i);
1960 		if (len < 0) {
1961 			err = -EINVAL;
1962 			goto err_unpin;
1963 		} else if (len >= PATH_MAX) {
1964 			err = -ENAMETOOLONG;
1965 			goto err_unpin;
1966 		}
1967 
1968 		err = bpf_program__pin_instance(prog, buf, i);
1969 		if (err)
1970 			goto err_unpin;
1971 	}
1972 
1973 	return 0;
1974 
1975 err_unpin:
1976 	for (i = i - 1; i >= 0; i--) {
1977 		char buf[PATH_MAX];
1978 		int len;
1979 
1980 		len = snprintf(buf, PATH_MAX, "%s/%d", path, i);
1981 		if (len < 0)
1982 			continue;
1983 		else if (len >= PATH_MAX)
1984 			continue;
1985 
1986 		bpf_program__unpin_instance(prog, buf, i);
1987 	}
1988 
1989 	rmdir(path);
1990 
1991 	return err;
1992 }
1993 
1994 int bpf_program__unpin(struct bpf_program *prog, const char *path)
1995 {
1996 	int i, err;
1997 
1998 	err = check_path(path);
1999 	if (err)
2000 		return err;
2001 
2002 	if (prog == NULL) {
2003 		pr_warning("invalid program pointer\n");
2004 		return -EINVAL;
2005 	}
2006 
2007 	if (prog->instances.nr <= 0) {
2008 		pr_warning("no instances of prog %s to pin\n",
2009 			   prog->section_name);
2010 		return -EINVAL;
2011 	}
2012 
2013 	if (prog->instances.nr == 1) {
2014 		/* don't create subdirs when pinning single instance */
2015 		return bpf_program__unpin_instance(prog, path, 0);
2016 	}
2017 
2018 	for (i = 0; i < prog->instances.nr; i++) {
2019 		char buf[PATH_MAX];
2020 		int len;
2021 
2022 		len = snprintf(buf, PATH_MAX, "%s/%d", path, i);
2023 		if (len < 0)
2024 			return -EINVAL;
2025 		else if (len >= PATH_MAX)
2026 			return -ENAMETOOLONG;
2027 
2028 		err = bpf_program__unpin_instance(prog, buf, i);
2029 		if (err)
2030 			return err;
2031 	}
2032 
2033 	err = rmdir(path);
2034 	if (err)
2035 		return -errno;
2036 
2037 	return 0;
2038 }
2039 
2040 int bpf_map__pin(struct bpf_map *map, const char *path)
2041 {
2042 	char *cp, errmsg[STRERR_BUFSIZE];
2043 	int err;
2044 
2045 	err = check_path(path);
2046 	if (err)
2047 		return err;
2048 
2049 	if (map == NULL) {
2050 		pr_warning("invalid map pointer\n");
2051 		return -EINVAL;
2052 	}
2053 
2054 	if (bpf_obj_pin(map->fd, path)) {
2055 		cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
2056 		pr_warning("failed to pin map: %s\n", cp);
2057 		return -errno;
2058 	}
2059 
2060 	pr_debug("pinned map '%s'\n", path);
2061 
2062 	return 0;
2063 }
2064 
2065 int bpf_map__unpin(struct bpf_map *map, const char *path)
2066 {
2067 	int err;
2068 
2069 	err = check_path(path);
2070 	if (err)
2071 		return err;
2072 
2073 	if (map == NULL) {
2074 		pr_warning("invalid map pointer\n");
2075 		return -EINVAL;
2076 	}
2077 
2078 	err = unlink(path);
2079 	if (err != 0)
2080 		return -errno;
2081 	pr_debug("unpinned map '%s'\n", path);
2082 
2083 	return 0;
2084 }
2085 
2086 int bpf_object__pin_maps(struct bpf_object *obj, const char *path)
2087 {
2088 	struct bpf_map *map;
2089 	int err;
2090 
2091 	if (!obj)
2092 		return -ENOENT;
2093 
2094 	if (!obj->loaded) {
2095 		pr_warning("object not yet loaded; load it first\n");
2096 		return -ENOENT;
2097 	}
2098 
2099 	err = make_dir(path);
2100 	if (err)
2101 		return err;
2102 
2103 	bpf_map__for_each(map, obj) {
2104 		char buf[PATH_MAX];
2105 		int len;
2106 
2107 		len = snprintf(buf, PATH_MAX, "%s/%s", path,
2108 			       bpf_map__name(map));
2109 		if (len < 0) {
2110 			err = -EINVAL;
2111 			goto err_unpin_maps;
2112 		} else if (len >= PATH_MAX) {
2113 			err = -ENAMETOOLONG;
2114 			goto err_unpin_maps;
2115 		}
2116 
2117 		err = bpf_map__pin(map, buf);
2118 		if (err)
2119 			goto err_unpin_maps;
2120 	}
2121 
2122 	return 0;
2123 
2124 err_unpin_maps:
2125 	while ((map = bpf_map__prev(map, obj))) {
2126 		char buf[PATH_MAX];
2127 		int len;
2128 
2129 		len = snprintf(buf, PATH_MAX, "%s/%s", path,
2130 			       bpf_map__name(map));
2131 		if (len < 0)
2132 			continue;
2133 		else if (len >= PATH_MAX)
2134 			continue;
2135 
2136 		bpf_map__unpin(map, buf);
2137 	}
2138 
2139 	return err;
2140 }
2141 
2142 int bpf_object__unpin_maps(struct bpf_object *obj, const char *path)
2143 {
2144 	struct bpf_map *map;
2145 	int err;
2146 
2147 	if (!obj)
2148 		return -ENOENT;
2149 
2150 	bpf_map__for_each(map, obj) {
2151 		char buf[PATH_MAX];
2152 		int len;
2153 
2154 		len = snprintf(buf, PATH_MAX, "%s/%s", path,
2155 			       bpf_map__name(map));
2156 		if (len < 0)
2157 			return -EINVAL;
2158 		else if (len >= PATH_MAX)
2159 			return -ENAMETOOLONG;
2160 
2161 		err = bpf_map__unpin(map, buf);
2162 		if (err)
2163 			return err;
2164 	}
2165 
2166 	return 0;
2167 }
2168 
2169 int bpf_object__pin_programs(struct bpf_object *obj, const char *path)
2170 {
2171 	struct bpf_program *prog;
2172 	int err;
2173 
2174 	if (!obj)
2175 		return -ENOENT;
2176 
2177 	if (!obj->loaded) {
2178 		pr_warning("object not yet loaded; load it first\n");
2179 		return -ENOENT;
2180 	}
2181 
2182 	err = make_dir(path);
2183 	if (err)
2184 		return err;
2185 
2186 	bpf_object__for_each_program(prog, obj) {
2187 		char buf[PATH_MAX];
2188 		int len;
2189 
2190 		len = snprintf(buf, PATH_MAX, "%s/%s", path,
2191 			       prog->pin_name);
2192 		if (len < 0) {
2193 			err = -EINVAL;
2194 			goto err_unpin_programs;
2195 		} else if (len >= PATH_MAX) {
2196 			err = -ENAMETOOLONG;
2197 			goto err_unpin_programs;
2198 		}
2199 
2200 		err = bpf_program__pin(prog, buf);
2201 		if (err)
2202 			goto err_unpin_programs;
2203 	}
2204 
2205 	return 0;
2206 
2207 err_unpin_programs:
2208 	while ((prog = bpf_program__prev(prog, obj))) {
2209 		char buf[PATH_MAX];
2210 		int len;
2211 
2212 		len = snprintf(buf, PATH_MAX, "%s/%s", path,
2213 			       prog->pin_name);
2214 		if (len < 0)
2215 			continue;
2216 		else if (len >= PATH_MAX)
2217 			continue;
2218 
2219 		bpf_program__unpin(prog, buf);
2220 	}
2221 
2222 	return err;
2223 }
2224 
2225 int bpf_object__unpin_programs(struct bpf_object *obj, const char *path)
2226 {
2227 	struct bpf_program *prog;
2228 	int err;
2229 
2230 	if (!obj)
2231 		return -ENOENT;
2232 
2233 	bpf_object__for_each_program(prog, obj) {
2234 		char buf[PATH_MAX];
2235 		int len;
2236 
2237 		len = snprintf(buf, PATH_MAX, "%s/%s", path,
2238 			       prog->pin_name);
2239 		if (len < 0)
2240 			return -EINVAL;
2241 		else if (len >= PATH_MAX)
2242 			return -ENAMETOOLONG;
2243 
2244 		err = bpf_program__unpin(prog, buf);
2245 		if (err)
2246 			return err;
2247 	}
2248 
2249 	return 0;
2250 }
2251 
2252 int bpf_object__pin(struct bpf_object *obj, const char *path)
2253 {
2254 	int err;
2255 
2256 	err = bpf_object__pin_maps(obj, path);
2257 	if (err)
2258 		return err;
2259 
2260 	err = bpf_object__pin_programs(obj, path);
2261 	if (err) {
2262 		bpf_object__unpin_maps(obj, path);
2263 		return err;
2264 	}
2265 
2266 	return 0;
2267 }
2268 
2269 void bpf_object__close(struct bpf_object *obj)
2270 {
2271 	size_t i;
2272 
2273 	if (!obj)
2274 		return;
2275 
2276 	if (obj->clear_priv)
2277 		obj->clear_priv(obj, obj->priv);
2278 
2279 	bpf_object__elf_finish(obj);
2280 	bpf_object__unload(obj);
2281 	btf__free(obj->btf);
2282 	btf_ext__free(obj->btf_ext);
2283 
2284 	for (i = 0; i < obj->nr_maps; i++) {
2285 		zfree(&obj->maps[i].name);
2286 		if (obj->maps[i].clear_priv)
2287 			obj->maps[i].clear_priv(&obj->maps[i],
2288 						obj->maps[i].priv);
2289 		obj->maps[i].priv = NULL;
2290 		obj->maps[i].clear_priv = NULL;
2291 	}
2292 	zfree(&obj->maps);
2293 	obj->nr_maps = 0;
2294 
2295 	if (obj->programs && obj->nr_programs) {
2296 		for (i = 0; i < obj->nr_programs; i++)
2297 			bpf_program__exit(&obj->programs[i]);
2298 	}
2299 	zfree(&obj->programs);
2300 
2301 	list_del(&obj->list);
2302 	free(obj);
2303 }
2304 
2305 struct bpf_object *
2306 bpf_object__next(struct bpf_object *prev)
2307 {
2308 	struct bpf_object *next;
2309 
2310 	if (!prev)
2311 		next = list_first_entry(&bpf_objects_list,
2312 					struct bpf_object,
2313 					list);
2314 	else
2315 		next = list_next_entry(prev, list);
2316 
2317 	/* Empty list is noticed here so don't need checking on entry. */
2318 	if (&next->list == &bpf_objects_list)
2319 		return NULL;
2320 
2321 	return next;
2322 }
2323 
2324 const char *bpf_object__name(struct bpf_object *obj)
2325 {
2326 	return obj ? obj->path : ERR_PTR(-EINVAL);
2327 }
2328 
2329 unsigned int bpf_object__kversion(struct bpf_object *obj)
2330 {
2331 	return obj ? obj->kern_version : 0;
2332 }
2333 
2334 struct btf *bpf_object__btf(struct bpf_object *obj)
2335 {
2336 	return obj ? obj->btf : NULL;
2337 }
2338 
2339 int bpf_object__btf_fd(const struct bpf_object *obj)
2340 {
2341 	return obj->btf ? btf__fd(obj->btf) : -1;
2342 }
2343 
2344 int bpf_object__set_priv(struct bpf_object *obj, void *priv,
2345 			 bpf_object_clear_priv_t clear_priv)
2346 {
2347 	if (obj->priv && obj->clear_priv)
2348 		obj->clear_priv(obj, obj->priv);
2349 
2350 	obj->priv = priv;
2351 	obj->clear_priv = clear_priv;
2352 	return 0;
2353 }
2354 
2355 void *bpf_object__priv(struct bpf_object *obj)
2356 {
2357 	return obj ? obj->priv : ERR_PTR(-EINVAL);
2358 }
2359 
2360 static struct bpf_program *
2361 __bpf_program__iter(struct bpf_program *p, struct bpf_object *obj, bool forward)
2362 {
2363 	size_t nr_programs = obj->nr_programs;
2364 	ssize_t idx;
2365 
2366 	if (!nr_programs)
2367 		return NULL;
2368 
2369 	if (!p)
2370 		/* Iter from the beginning */
2371 		return forward ? &obj->programs[0] :
2372 			&obj->programs[nr_programs - 1];
2373 
2374 	if (p->obj != obj) {
2375 		pr_warning("error: program handler doesn't match object\n");
2376 		return NULL;
2377 	}
2378 
2379 	idx = (p - obj->programs) + (forward ? 1 : -1);
2380 	if (idx >= obj->nr_programs || idx < 0)
2381 		return NULL;
2382 	return &obj->programs[idx];
2383 }
2384 
2385 struct bpf_program *
2386 bpf_program__next(struct bpf_program *prev, struct bpf_object *obj)
2387 {
2388 	struct bpf_program *prog = prev;
2389 
2390 	do {
2391 		prog = __bpf_program__iter(prog, obj, true);
2392 	} while (prog && bpf_program__is_function_storage(prog, obj));
2393 
2394 	return prog;
2395 }
2396 
2397 struct bpf_program *
2398 bpf_program__prev(struct bpf_program *next, struct bpf_object *obj)
2399 {
2400 	struct bpf_program *prog = next;
2401 
2402 	do {
2403 		prog = __bpf_program__iter(prog, obj, false);
2404 	} while (prog && bpf_program__is_function_storage(prog, obj));
2405 
2406 	return prog;
2407 }
2408 
2409 int bpf_program__set_priv(struct bpf_program *prog, void *priv,
2410 			  bpf_program_clear_priv_t clear_priv)
2411 {
2412 	if (prog->priv && prog->clear_priv)
2413 		prog->clear_priv(prog, prog->priv);
2414 
2415 	prog->priv = priv;
2416 	prog->clear_priv = clear_priv;
2417 	return 0;
2418 }
2419 
2420 void *bpf_program__priv(struct bpf_program *prog)
2421 {
2422 	return prog ? prog->priv : ERR_PTR(-EINVAL);
2423 }
2424 
2425 void bpf_program__set_ifindex(struct bpf_program *prog, __u32 ifindex)
2426 {
2427 	prog->prog_ifindex = ifindex;
2428 }
2429 
2430 const char *bpf_program__title(struct bpf_program *prog, bool needs_copy)
2431 {
2432 	const char *title;
2433 
2434 	title = prog->section_name;
2435 	if (needs_copy) {
2436 		title = strdup(title);
2437 		if (!title) {
2438 			pr_warning("failed to strdup program title\n");
2439 			return ERR_PTR(-ENOMEM);
2440 		}
2441 	}
2442 
2443 	return title;
2444 }
2445 
2446 int bpf_program__fd(struct bpf_program *prog)
2447 {
2448 	return bpf_program__nth_fd(prog, 0);
2449 }
2450 
2451 int bpf_program__set_prep(struct bpf_program *prog, int nr_instances,
2452 			  bpf_program_prep_t prep)
2453 {
2454 	int *instances_fds;
2455 
2456 	if (nr_instances <= 0 || !prep)
2457 		return -EINVAL;
2458 
2459 	if (prog->instances.nr > 0 || prog->instances.fds) {
2460 		pr_warning("Can't set pre-processor after loading\n");
2461 		return -EINVAL;
2462 	}
2463 
2464 	instances_fds = malloc(sizeof(int) * nr_instances);
2465 	if (!instances_fds) {
2466 		pr_warning("alloc memory failed for fds\n");
2467 		return -ENOMEM;
2468 	}
2469 
2470 	/* fill all fd with -1 */
2471 	memset(instances_fds, -1, sizeof(int) * nr_instances);
2472 
2473 	prog->instances.nr = nr_instances;
2474 	prog->instances.fds = instances_fds;
2475 	prog->preprocessor = prep;
2476 	return 0;
2477 }
2478 
2479 int bpf_program__nth_fd(struct bpf_program *prog, int n)
2480 {
2481 	int fd;
2482 
2483 	if (!prog)
2484 		return -EINVAL;
2485 
2486 	if (n >= prog->instances.nr || n < 0) {
2487 		pr_warning("Can't get the %dth fd from program %s: only %d instances\n",
2488 			   n, prog->section_name, prog->instances.nr);
2489 		return -EINVAL;
2490 	}
2491 
2492 	fd = prog->instances.fds[n];
2493 	if (fd < 0) {
2494 		pr_warning("%dth instance of program '%s' is invalid\n",
2495 			   n, prog->section_name);
2496 		return -ENOENT;
2497 	}
2498 
2499 	return fd;
2500 }
2501 
2502 void bpf_program__set_type(struct bpf_program *prog, enum bpf_prog_type type)
2503 {
2504 	prog->type = type;
2505 }
2506 
2507 static bool bpf_program__is_type(struct bpf_program *prog,
2508 				 enum bpf_prog_type type)
2509 {
2510 	return prog ? (prog->type == type) : false;
2511 }
2512 
2513 #define BPF_PROG_TYPE_FNS(NAME, TYPE)			\
2514 int bpf_program__set_##NAME(struct bpf_program *prog)	\
2515 {							\
2516 	if (!prog)					\
2517 		return -EINVAL;				\
2518 	bpf_program__set_type(prog, TYPE);		\
2519 	return 0;					\
2520 }							\
2521 							\
2522 bool bpf_program__is_##NAME(struct bpf_program *prog)	\
2523 {							\
2524 	return bpf_program__is_type(prog, TYPE);	\
2525 }							\
2526 
2527 BPF_PROG_TYPE_FNS(socket_filter, BPF_PROG_TYPE_SOCKET_FILTER);
2528 BPF_PROG_TYPE_FNS(kprobe, BPF_PROG_TYPE_KPROBE);
2529 BPF_PROG_TYPE_FNS(sched_cls, BPF_PROG_TYPE_SCHED_CLS);
2530 BPF_PROG_TYPE_FNS(sched_act, BPF_PROG_TYPE_SCHED_ACT);
2531 BPF_PROG_TYPE_FNS(tracepoint, BPF_PROG_TYPE_TRACEPOINT);
2532 BPF_PROG_TYPE_FNS(raw_tracepoint, BPF_PROG_TYPE_RAW_TRACEPOINT);
2533 BPF_PROG_TYPE_FNS(xdp, BPF_PROG_TYPE_XDP);
2534 BPF_PROG_TYPE_FNS(perf_event, BPF_PROG_TYPE_PERF_EVENT);
2535 
2536 void bpf_program__set_expected_attach_type(struct bpf_program *prog,
2537 					   enum bpf_attach_type type)
2538 {
2539 	prog->expected_attach_type = type;
2540 }
2541 
2542 #define BPF_PROG_SEC_IMPL(string, ptype, eatype, is_attachable, atype) \
2543 	{ string, sizeof(string) - 1, ptype, eatype, is_attachable, atype }
2544 
2545 /* Programs that can NOT be attached. */
2546 #define BPF_PROG_SEC(string, ptype) BPF_PROG_SEC_IMPL(string, ptype, 0, 0, 0)
2547 
2548 /* Programs that can be attached. */
2549 #define BPF_APROG_SEC(string, ptype, atype) \
2550 	BPF_PROG_SEC_IMPL(string, ptype, 0, 1, atype)
2551 
2552 /* Programs that must specify expected attach type at load time. */
2553 #define BPF_EAPROG_SEC(string, ptype, eatype) \
2554 	BPF_PROG_SEC_IMPL(string, ptype, eatype, 1, eatype)
2555 
2556 /* Programs that can be attached but attach type can't be identified by section
2557  * name. Kept for backward compatibility.
2558  */
2559 #define BPF_APROG_COMPAT(string, ptype) BPF_PROG_SEC(string, ptype)
2560 
2561 static const struct {
2562 	const char *sec;
2563 	size_t len;
2564 	enum bpf_prog_type prog_type;
2565 	enum bpf_attach_type expected_attach_type;
2566 	int is_attachable;
2567 	enum bpf_attach_type attach_type;
2568 } section_names[] = {
2569 	BPF_PROG_SEC("socket",			BPF_PROG_TYPE_SOCKET_FILTER),
2570 	BPF_PROG_SEC("kprobe/",			BPF_PROG_TYPE_KPROBE),
2571 	BPF_PROG_SEC("kretprobe/",		BPF_PROG_TYPE_KPROBE),
2572 	BPF_PROG_SEC("classifier",		BPF_PROG_TYPE_SCHED_CLS),
2573 	BPF_PROG_SEC("action",			BPF_PROG_TYPE_SCHED_ACT),
2574 	BPF_PROG_SEC("tracepoint/",		BPF_PROG_TYPE_TRACEPOINT),
2575 	BPF_PROG_SEC("raw_tracepoint/",		BPF_PROG_TYPE_RAW_TRACEPOINT),
2576 	BPF_PROG_SEC("xdp",			BPF_PROG_TYPE_XDP),
2577 	BPF_PROG_SEC("perf_event",		BPF_PROG_TYPE_PERF_EVENT),
2578 	BPF_PROG_SEC("lwt_in",			BPF_PROG_TYPE_LWT_IN),
2579 	BPF_PROG_SEC("lwt_out",			BPF_PROG_TYPE_LWT_OUT),
2580 	BPF_PROG_SEC("lwt_xmit",		BPF_PROG_TYPE_LWT_XMIT),
2581 	BPF_PROG_SEC("lwt_seg6local",		BPF_PROG_TYPE_LWT_SEG6LOCAL),
2582 	BPF_APROG_SEC("cgroup_skb/ingress",	BPF_PROG_TYPE_CGROUP_SKB,
2583 						BPF_CGROUP_INET_INGRESS),
2584 	BPF_APROG_SEC("cgroup_skb/egress",	BPF_PROG_TYPE_CGROUP_SKB,
2585 						BPF_CGROUP_INET_EGRESS),
2586 	BPF_APROG_COMPAT("cgroup/skb",		BPF_PROG_TYPE_CGROUP_SKB),
2587 	BPF_APROG_SEC("cgroup/sock",		BPF_PROG_TYPE_CGROUP_SOCK,
2588 						BPF_CGROUP_INET_SOCK_CREATE),
2589 	BPF_EAPROG_SEC("cgroup/post_bind4",	BPF_PROG_TYPE_CGROUP_SOCK,
2590 						BPF_CGROUP_INET4_POST_BIND),
2591 	BPF_EAPROG_SEC("cgroup/post_bind6",	BPF_PROG_TYPE_CGROUP_SOCK,
2592 						BPF_CGROUP_INET6_POST_BIND),
2593 	BPF_APROG_SEC("cgroup/dev",		BPF_PROG_TYPE_CGROUP_DEVICE,
2594 						BPF_CGROUP_DEVICE),
2595 	BPF_APROG_SEC("sockops",		BPF_PROG_TYPE_SOCK_OPS,
2596 						BPF_CGROUP_SOCK_OPS),
2597 	BPF_APROG_SEC("sk_skb/stream_parser",	BPF_PROG_TYPE_SK_SKB,
2598 						BPF_SK_SKB_STREAM_PARSER),
2599 	BPF_APROG_SEC("sk_skb/stream_verdict",	BPF_PROG_TYPE_SK_SKB,
2600 						BPF_SK_SKB_STREAM_VERDICT),
2601 	BPF_APROG_COMPAT("sk_skb",		BPF_PROG_TYPE_SK_SKB),
2602 	BPF_APROG_SEC("sk_msg",			BPF_PROG_TYPE_SK_MSG,
2603 						BPF_SK_MSG_VERDICT),
2604 	BPF_APROG_SEC("lirc_mode2",		BPF_PROG_TYPE_LIRC_MODE2,
2605 						BPF_LIRC_MODE2),
2606 	BPF_APROG_SEC("flow_dissector",		BPF_PROG_TYPE_FLOW_DISSECTOR,
2607 						BPF_FLOW_DISSECTOR),
2608 	BPF_EAPROG_SEC("cgroup/bind4",		BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
2609 						BPF_CGROUP_INET4_BIND),
2610 	BPF_EAPROG_SEC("cgroup/bind6",		BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
2611 						BPF_CGROUP_INET6_BIND),
2612 	BPF_EAPROG_SEC("cgroup/connect4",	BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
2613 						BPF_CGROUP_INET4_CONNECT),
2614 	BPF_EAPROG_SEC("cgroup/connect6",	BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
2615 						BPF_CGROUP_INET6_CONNECT),
2616 	BPF_EAPROG_SEC("cgroup/sendmsg4",	BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
2617 						BPF_CGROUP_UDP4_SENDMSG),
2618 	BPF_EAPROG_SEC("cgroup/sendmsg6",	BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
2619 						BPF_CGROUP_UDP6_SENDMSG),
2620 };
2621 
2622 #undef BPF_PROG_SEC_IMPL
2623 #undef BPF_PROG_SEC
2624 #undef BPF_APROG_SEC
2625 #undef BPF_EAPROG_SEC
2626 #undef BPF_APROG_COMPAT
2627 
2628 #define MAX_TYPE_NAME_SIZE 32
2629 
2630 static char *libbpf_get_type_names(bool attach_type)
2631 {
2632 	int i, len = ARRAY_SIZE(section_names) * MAX_TYPE_NAME_SIZE;
2633 	char *buf;
2634 
2635 	buf = malloc(len);
2636 	if (!buf)
2637 		return NULL;
2638 
2639 	buf[0] = '\0';
2640 	/* Forge string buf with all available names */
2641 	for (i = 0; i < ARRAY_SIZE(section_names); i++) {
2642 		if (attach_type && !section_names[i].is_attachable)
2643 			continue;
2644 
2645 		if (strlen(buf) + strlen(section_names[i].sec) + 2 > len) {
2646 			free(buf);
2647 			return NULL;
2648 		}
2649 		strcat(buf, " ");
2650 		strcat(buf, section_names[i].sec);
2651 	}
2652 
2653 	return buf;
2654 }
2655 
2656 int libbpf_prog_type_by_name(const char *name, enum bpf_prog_type *prog_type,
2657 			     enum bpf_attach_type *expected_attach_type)
2658 {
2659 	char *type_names;
2660 	int i;
2661 
2662 	if (!name)
2663 		return -EINVAL;
2664 
2665 	for (i = 0; i < ARRAY_SIZE(section_names); i++) {
2666 		if (strncmp(name, section_names[i].sec, section_names[i].len))
2667 			continue;
2668 		*prog_type = section_names[i].prog_type;
2669 		*expected_attach_type = section_names[i].expected_attach_type;
2670 		return 0;
2671 	}
2672 	pr_warning("failed to guess program type based on ELF section name '%s'\n", name);
2673 	type_names = libbpf_get_type_names(false);
2674 	if (type_names != NULL) {
2675 		pr_info("supported section(type) names are:%s\n", type_names);
2676 		free(type_names);
2677 	}
2678 
2679 	return -EINVAL;
2680 }
2681 
2682 int libbpf_attach_type_by_name(const char *name,
2683 			       enum bpf_attach_type *attach_type)
2684 {
2685 	char *type_names;
2686 	int i;
2687 
2688 	if (!name)
2689 		return -EINVAL;
2690 
2691 	for (i = 0; i < ARRAY_SIZE(section_names); i++) {
2692 		if (strncmp(name, section_names[i].sec, section_names[i].len))
2693 			continue;
2694 		if (!section_names[i].is_attachable)
2695 			return -EINVAL;
2696 		*attach_type = section_names[i].attach_type;
2697 		return 0;
2698 	}
2699 	pr_warning("failed to guess attach type based on ELF section name '%s'\n", name);
2700 	type_names = libbpf_get_type_names(true);
2701 	if (type_names != NULL) {
2702 		pr_info("attachable section(type) names are:%s\n", type_names);
2703 		free(type_names);
2704 	}
2705 
2706 	return -EINVAL;
2707 }
2708 
2709 static int
2710 bpf_program__identify_section(struct bpf_program *prog,
2711 			      enum bpf_prog_type *prog_type,
2712 			      enum bpf_attach_type *expected_attach_type)
2713 {
2714 	return libbpf_prog_type_by_name(prog->section_name, prog_type,
2715 					expected_attach_type);
2716 }
2717 
2718 int bpf_map__fd(struct bpf_map *map)
2719 {
2720 	return map ? map->fd : -EINVAL;
2721 }
2722 
2723 const struct bpf_map_def *bpf_map__def(struct bpf_map *map)
2724 {
2725 	return map ? &map->def : ERR_PTR(-EINVAL);
2726 }
2727 
2728 const char *bpf_map__name(struct bpf_map *map)
2729 {
2730 	return map ? map->name : NULL;
2731 }
2732 
2733 __u32 bpf_map__btf_key_type_id(const struct bpf_map *map)
2734 {
2735 	return map ? map->btf_key_type_id : 0;
2736 }
2737 
2738 __u32 bpf_map__btf_value_type_id(const struct bpf_map *map)
2739 {
2740 	return map ? map->btf_value_type_id : 0;
2741 }
2742 
2743 int bpf_map__set_priv(struct bpf_map *map, void *priv,
2744 		     bpf_map_clear_priv_t clear_priv)
2745 {
2746 	if (!map)
2747 		return -EINVAL;
2748 
2749 	if (map->priv) {
2750 		if (map->clear_priv)
2751 			map->clear_priv(map, map->priv);
2752 	}
2753 
2754 	map->priv = priv;
2755 	map->clear_priv = clear_priv;
2756 	return 0;
2757 }
2758 
2759 void *bpf_map__priv(struct bpf_map *map)
2760 {
2761 	return map ? map->priv : ERR_PTR(-EINVAL);
2762 }
2763 
2764 bool bpf_map__is_offload_neutral(struct bpf_map *map)
2765 {
2766 	return map->def.type == BPF_MAP_TYPE_PERF_EVENT_ARRAY;
2767 }
2768 
2769 void bpf_map__set_ifindex(struct bpf_map *map, __u32 ifindex)
2770 {
2771 	map->map_ifindex = ifindex;
2772 }
2773 
2774 int bpf_map__set_inner_map_fd(struct bpf_map *map, int fd)
2775 {
2776 	if (!bpf_map_type__is_map_in_map(map->def.type)) {
2777 		pr_warning("error: unsupported map type\n");
2778 		return -EINVAL;
2779 	}
2780 	if (map->inner_map_fd != -1) {
2781 		pr_warning("error: inner_map_fd already specified\n");
2782 		return -EINVAL;
2783 	}
2784 	map->inner_map_fd = fd;
2785 	return 0;
2786 }
2787 
2788 static struct bpf_map *
2789 __bpf_map__iter(struct bpf_map *m, struct bpf_object *obj, int i)
2790 {
2791 	ssize_t idx;
2792 	struct bpf_map *s, *e;
2793 
2794 	if (!obj || !obj->maps)
2795 		return NULL;
2796 
2797 	s = obj->maps;
2798 	e = obj->maps + obj->nr_maps;
2799 
2800 	if ((m < s) || (m >= e)) {
2801 		pr_warning("error in %s: map handler doesn't belong to object\n",
2802 			   __func__);
2803 		return NULL;
2804 	}
2805 
2806 	idx = (m - obj->maps) + i;
2807 	if (idx >= obj->nr_maps || idx < 0)
2808 		return NULL;
2809 	return &obj->maps[idx];
2810 }
2811 
2812 struct bpf_map *
2813 bpf_map__next(struct bpf_map *prev, struct bpf_object *obj)
2814 {
2815 	if (prev == NULL)
2816 		return obj->maps;
2817 
2818 	return __bpf_map__iter(prev, obj, 1);
2819 }
2820 
2821 struct bpf_map *
2822 bpf_map__prev(struct bpf_map *next, struct bpf_object *obj)
2823 {
2824 	if (next == NULL) {
2825 		if (!obj->nr_maps)
2826 			return NULL;
2827 		return obj->maps + obj->nr_maps - 1;
2828 	}
2829 
2830 	return __bpf_map__iter(next, obj, -1);
2831 }
2832 
2833 struct bpf_map *
2834 bpf_object__find_map_by_name(struct bpf_object *obj, const char *name)
2835 {
2836 	struct bpf_map *pos;
2837 
2838 	bpf_map__for_each(pos, obj) {
2839 		if (pos->name && !strcmp(pos->name, name))
2840 			return pos;
2841 	}
2842 	return NULL;
2843 }
2844 
2845 int
2846 bpf_object__find_map_fd_by_name(struct bpf_object *obj, const char *name)
2847 {
2848 	return bpf_map__fd(bpf_object__find_map_by_name(obj, name));
2849 }
2850 
2851 struct bpf_map *
2852 bpf_object__find_map_by_offset(struct bpf_object *obj, size_t offset)
2853 {
2854 	int i;
2855 
2856 	for (i = 0; i < obj->nr_maps; i++) {
2857 		if (obj->maps[i].offset == offset)
2858 			return &obj->maps[i];
2859 	}
2860 	return ERR_PTR(-ENOENT);
2861 }
2862 
2863 long libbpf_get_error(const void *ptr)
2864 {
2865 	if (IS_ERR(ptr))
2866 		return PTR_ERR(ptr);
2867 	return 0;
2868 }
2869 
2870 int bpf_prog_load(const char *file, enum bpf_prog_type type,
2871 		  struct bpf_object **pobj, int *prog_fd)
2872 {
2873 	struct bpf_prog_load_attr attr;
2874 
2875 	memset(&attr, 0, sizeof(struct bpf_prog_load_attr));
2876 	attr.file = file;
2877 	attr.prog_type = type;
2878 	attr.expected_attach_type = 0;
2879 
2880 	return bpf_prog_load_xattr(&attr, pobj, prog_fd);
2881 }
2882 
2883 int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr,
2884 			struct bpf_object **pobj, int *prog_fd)
2885 {
2886 	struct bpf_object_open_attr open_attr = {
2887 		.file		= attr->file,
2888 		.prog_type	= attr->prog_type,
2889 	};
2890 	struct bpf_program *prog, *first_prog = NULL;
2891 	enum bpf_attach_type expected_attach_type;
2892 	enum bpf_prog_type prog_type;
2893 	struct bpf_object *obj;
2894 	struct bpf_map *map;
2895 	int err;
2896 
2897 	if (!attr)
2898 		return -EINVAL;
2899 	if (!attr->file)
2900 		return -EINVAL;
2901 
2902 	obj = bpf_object__open_xattr(&open_attr);
2903 	if (IS_ERR_OR_NULL(obj))
2904 		return -ENOENT;
2905 
2906 	bpf_object__for_each_program(prog, obj) {
2907 		/*
2908 		 * If type is not specified, try to guess it based on
2909 		 * section name.
2910 		 */
2911 		prog_type = attr->prog_type;
2912 		prog->prog_ifindex = attr->ifindex;
2913 		expected_attach_type = attr->expected_attach_type;
2914 		if (prog_type == BPF_PROG_TYPE_UNSPEC) {
2915 			err = bpf_program__identify_section(prog, &prog_type,
2916 							    &expected_attach_type);
2917 			if (err < 0) {
2918 				bpf_object__close(obj);
2919 				return -EINVAL;
2920 			}
2921 		}
2922 
2923 		bpf_program__set_type(prog, prog_type);
2924 		bpf_program__set_expected_attach_type(prog,
2925 						      expected_attach_type);
2926 
2927 		if (!first_prog)
2928 			first_prog = prog;
2929 	}
2930 
2931 	bpf_map__for_each(map, obj) {
2932 		if (!bpf_map__is_offload_neutral(map))
2933 			map->map_ifindex = attr->ifindex;
2934 	}
2935 
2936 	if (!first_prog) {
2937 		pr_warning("object file doesn't contain bpf program\n");
2938 		bpf_object__close(obj);
2939 		return -ENOENT;
2940 	}
2941 
2942 	err = bpf_object__load(obj);
2943 	if (err) {
2944 		bpf_object__close(obj);
2945 		return -EINVAL;
2946 	}
2947 
2948 	*pobj = obj;
2949 	*prog_fd = bpf_program__fd(first_prog);
2950 	return 0;
2951 }
2952 
2953 enum bpf_perf_event_ret
2954 bpf_perf_event_read_simple(void *mmap_mem, size_t mmap_size, size_t page_size,
2955 			   void **copy_mem, size_t *copy_size,
2956 			   bpf_perf_event_print_t fn, void *private_data)
2957 {
2958 	struct perf_event_mmap_page *header = mmap_mem;
2959 	__u64 data_head = ring_buffer_read_head(header);
2960 	__u64 data_tail = header->data_tail;
2961 	void *base = ((__u8 *)header) + page_size;
2962 	int ret = LIBBPF_PERF_EVENT_CONT;
2963 	struct perf_event_header *ehdr;
2964 	size_t ehdr_size;
2965 
2966 	while (data_head != data_tail) {
2967 		ehdr = base + (data_tail & (mmap_size - 1));
2968 		ehdr_size = ehdr->size;
2969 
2970 		if (((void *)ehdr) + ehdr_size > base + mmap_size) {
2971 			void *copy_start = ehdr;
2972 			size_t len_first = base + mmap_size - copy_start;
2973 			size_t len_secnd = ehdr_size - len_first;
2974 
2975 			if (*copy_size < ehdr_size) {
2976 				free(*copy_mem);
2977 				*copy_mem = malloc(ehdr_size);
2978 				if (!*copy_mem) {
2979 					*copy_size = 0;
2980 					ret = LIBBPF_PERF_EVENT_ERROR;
2981 					break;
2982 				}
2983 				*copy_size = ehdr_size;
2984 			}
2985 
2986 			memcpy(*copy_mem, copy_start, len_first);
2987 			memcpy(*copy_mem + len_first, base, len_secnd);
2988 			ehdr = *copy_mem;
2989 		}
2990 
2991 		ret = fn(ehdr, private_data);
2992 		data_tail += ehdr_size;
2993 		if (ret != LIBBPF_PERF_EVENT_CONT)
2994 			break;
2995 	}
2996 
2997 	ring_buffer_write_tail(header, data_tail);
2998 	return ret;
2999 }
3000