xref: /linux/tools/lib/bpf/libbpf.c (revision 69050f8d6d075dc01af7a5f2f550a8067510366f)
1 // SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
2 
3 /*
4  * Common eBPF ELF object loading operations.
5  *
6  * Copyright (C) 2013-2015 Alexei Starovoitov <ast@kernel.org>
7  * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com>
8  * Copyright (C) 2015 Huawei Inc.
9  * Copyright (C) 2017 Nicira, Inc.
10  * Copyright (C) 2019 Isovalent, Inc.
11  */
12 
13 #ifndef _GNU_SOURCE
14 #define _GNU_SOURCE
15 #endif
16 #include <stdlib.h>
17 #include <stdio.h>
18 #include <stdarg.h>
19 #include <libgen.h>
20 #include <inttypes.h>
21 #include <limits.h>
22 #include <string.h>
23 #include <unistd.h>
24 #include <endian.h>
25 #include <fcntl.h>
26 #include <errno.h>
27 #include <ctype.h>
28 #include <asm/unistd.h>
29 #include <linux/err.h>
30 #include <linux/kernel.h>
31 #include <linux/bpf.h>
32 #include <linux/btf.h>
33 #include <linux/filter.h>
34 #include <linux/limits.h>
35 #include <linux/perf_event.h>
36 #include <linux/bpf_perf_event.h>
37 #include <linux/ring_buffer.h>
38 #include <sys/epoll.h>
39 #include <sys/ioctl.h>
40 #include <sys/mman.h>
41 #include <sys/stat.h>
42 #include <sys/types.h>
43 #include <sys/vfs.h>
44 #include <sys/utsname.h>
45 #include <sys/resource.h>
46 #include <libelf.h>
47 #include <gelf.h>
48 #include <zlib.h>
49 
50 #include "libbpf.h"
51 #include "bpf.h"
52 #include "btf.h"
53 #include "libbpf_internal.h"
54 #include "hashmap.h"
55 #include "bpf_gen_internal.h"
56 #include "zip.h"
57 
58 #ifndef BPF_FS_MAGIC
59 #define BPF_FS_MAGIC		0xcafe4a11
60 #endif
61 
62 #define MAX_EVENT_NAME_LEN	64
63 
64 #define BPF_FS_DEFAULT_PATH "/sys/fs/bpf"
65 
66 #define BPF_INSN_SZ (sizeof(struct bpf_insn))
67 
68 /* vsprintf() in __base_pr() uses nonliteral format string. It may break
69  * compilation if user enables corresponding warning. Disable it explicitly.
70  */
71 #pragma GCC diagnostic ignored "-Wformat-nonliteral"
72 
73 #define __printf(a, b)	__attribute__((format(printf, a, b)))
74 
75 static struct bpf_map *bpf_object__add_map(struct bpf_object *obj);
76 static bool prog_is_subprog(const struct bpf_object *obj, const struct bpf_program *prog);
77 static int map_set_def_max_entries(struct bpf_map *map);
78 
79 static const char * const attach_type_name[] = {
80 	[BPF_CGROUP_INET_INGRESS]	= "cgroup_inet_ingress",
81 	[BPF_CGROUP_INET_EGRESS]	= "cgroup_inet_egress",
82 	[BPF_CGROUP_INET_SOCK_CREATE]	= "cgroup_inet_sock_create",
83 	[BPF_CGROUP_INET_SOCK_RELEASE]	= "cgroup_inet_sock_release",
84 	[BPF_CGROUP_SOCK_OPS]		= "cgroup_sock_ops",
85 	[BPF_CGROUP_DEVICE]		= "cgroup_device",
86 	[BPF_CGROUP_INET4_BIND]		= "cgroup_inet4_bind",
87 	[BPF_CGROUP_INET6_BIND]		= "cgroup_inet6_bind",
88 	[BPF_CGROUP_INET4_CONNECT]	= "cgroup_inet4_connect",
89 	[BPF_CGROUP_INET6_CONNECT]	= "cgroup_inet6_connect",
90 	[BPF_CGROUP_UNIX_CONNECT]       = "cgroup_unix_connect",
91 	[BPF_CGROUP_INET4_POST_BIND]	= "cgroup_inet4_post_bind",
92 	[BPF_CGROUP_INET6_POST_BIND]	= "cgroup_inet6_post_bind",
93 	[BPF_CGROUP_INET4_GETPEERNAME]	= "cgroup_inet4_getpeername",
94 	[BPF_CGROUP_INET6_GETPEERNAME]	= "cgroup_inet6_getpeername",
95 	[BPF_CGROUP_UNIX_GETPEERNAME]	= "cgroup_unix_getpeername",
96 	[BPF_CGROUP_INET4_GETSOCKNAME]	= "cgroup_inet4_getsockname",
97 	[BPF_CGROUP_INET6_GETSOCKNAME]	= "cgroup_inet6_getsockname",
98 	[BPF_CGROUP_UNIX_GETSOCKNAME]	= "cgroup_unix_getsockname",
99 	[BPF_CGROUP_UDP4_SENDMSG]	= "cgroup_udp4_sendmsg",
100 	[BPF_CGROUP_UDP6_SENDMSG]	= "cgroup_udp6_sendmsg",
101 	[BPF_CGROUP_UNIX_SENDMSG]	= "cgroup_unix_sendmsg",
102 	[BPF_CGROUP_SYSCTL]		= "cgroup_sysctl",
103 	[BPF_CGROUP_UDP4_RECVMSG]	= "cgroup_udp4_recvmsg",
104 	[BPF_CGROUP_UDP6_RECVMSG]	= "cgroup_udp6_recvmsg",
105 	[BPF_CGROUP_UNIX_RECVMSG]	= "cgroup_unix_recvmsg",
106 	[BPF_CGROUP_GETSOCKOPT]		= "cgroup_getsockopt",
107 	[BPF_CGROUP_SETSOCKOPT]		= "cgroup_setsockopt",
108 	[BPF_SK_SKB_STREAM_PARSER]	= "sk_skb_stream_parser",
109 	[BPF_SK_SKB_STREAM_VERDICT]	= "sk_skb_stream_verdict",
110 	[BPF_SK_SKB_VERDICT]		= "sk_skb_verdict",
111 	[BPF_SK_MSG_VERDICT]		= "sk_msg_verdict",
112 	[BPF_LIRC_MODE2]		= "lirc_mode2",
113 	[BPF_FLOW_DISSECTOR]		= "flow_dissector",
114 	[BPF_TRACE_RAW_TP]		= "trace_raw_tp",
115 	[BPF_TRACE_FENTRY]		= "trace_fentry",
116 	[BPF_TRACE_FEXIT]		= "trace_fexit",
117 	[BPF_MODIFY_RETURN]		= "modify_return",
118 	[BPF_TRACE_FSESSION]		= "trace_fsession",
119 	[BPF_LSM_MAC]			= "lsm_mac",
120 	[BPF_LSM_CGROUP]		= "lsm_cgroup",
121 	[BPF_SK_LOOKUP]			= "sk_lookup",
122 	[BPF_TRACE_ITER]		= "trace_iter",
123 	[BPF_XDP_DEVMAP]		= "xdp_devmap",
124 	[BPF_XDP_CPUMAP]		= "xdp_cpumap",
125 	[BPF_XDP]			= "xdp",
126 	[BPF_SK_REUSEPORT_SELECT]	= "sk_reuseport_select",
127 	[BPF_SK_REUSEPORT_SELECT_OR_MIGRATE]	= "sk_reuseport_select_or_migrate",
128 	[BPF_PERF_EVENT]		= "perf_event",
129 	[BPF_TRACE_KPROBE_MULTI]	= "trace_kprobe_multi",
130 	[BPF_STRUCT_OPS]		= "struct_ops",
131 	[BPF_NETFILTER]			= "netfilter",
132 	[BPF_TCX_INGRESS]		= "tcx_ingress",
133 	[BPF_TCX_EGRESS]		= "tcx_egress",
134 	[BPF_TRACE_UPROBE_MULTI]	= "trace_uprobe_multi",
135 	[BPF_NETKIT_PRIMARY]		= "netkit_primary",
136 	[BPF_NETKIT_PEER]		= "netkit_peer",
137 	[BPF_TRACE_KPROBE_SESSION]	= "trace_kprobe_session",
138 	[BPF_TRACE_UPROBE_SESSION]	= "trace_uprobe_session",
139 };
140 
141 static const char * const link_type_name[] = {
142 	[BPF_LINK_TYPE_UNSPEC]			= "unspec",
143 	[BPF_LINK_TYPE_RAW_TRACEPOINT]		= "raw_tracepoint",
144 	[BPF_LINK_TYPE_TRACING]			= "tracing",
145 	[BPF_LINK_TYPE_CGROUP]			= "cgroup",
146 	[BPF_LINK_TYPE_ITER]			= "iter",
147 	[BPF_LINK_TYPE_NETNS]			= "netns",
148 	[BPF_LINK_TYPE_XDP]			= "xdp",
149 	[BPF_LINK_TYPE_PERF_EVENT]		= "perf_event",
150 	[BPF_LINK_TYPE_KPROBE_MULTI]		= "kprobe_multi",
151 	[BPF_LINK_TYPE_STRUCT_OPS]		= "struct_ops",
152 	[BPF_LINK_TYPE_NETFILTER]		= "netfilter",
153 	[BPF_LINK_TYPE_TCX]			= "tcx",
154 	[BPF_LINK_TYPE_UPROBE_MULTI]		= "uprobe_multi",
155 	[BPF_LINK_TYPE_NETKIT]			= "netkit",
156 	[BPF_LINK_TYPE_SOCKMAP]			= "sockmap",
157 };
158 
159 static const char * const map_type_name[] = {
160 	[BPF_MAP_TYPE_UNSPEC]			= "unspec",
161 	[BPF_MAP_TYPE_HASH]			= "hash",
162 	[BPF_MAP_TYPE_ARRAY]			= "array",
163 	[BPF_MAP_TYPE_PROG_ARRAY]		= "prog_array",
164 	[BPF_MAP_TYPE_PERF_EVENT_ARRAY]		= "perf_event_array",
165 	[BPF_MAP_TYPE_PERCPU_HASH]		= "percpu_hash",
166 	[BPF_MAP_TYPE_PERCPU_ARRAY]		= "percpu_array",
167 	[BPF_MAP_TYPE_STACK_TRACE]		= "stack_trace",
168 	[BPF_MAP_TYPE_CGROUP_ARRAY]		= "cgroup_array",
169 	[BPF_MAP_TYPE_LRU_HASH]			= "lru_hash",
170 	[BPF_MAP_TYPE_LRU_PERCPU_HASH]		= "lru_percpu_hash",
171 	[BPF_MAP_TYPE_LPM_TRIE]			= "lpm_trie",
172 	[BPF_MAP_TYPE_ARRAY_OF_MAPS]		= "array_of_maps",
173 	[BPF_MAP_TYPE_HASH_OF_MAPS]		= "hash_of_maps",
174 	[BPF_MAP_TYPE_DEVMAP]			= "devmap",
175 	[BPF_MAP_TYPE_DEVMAP_HASH]		= "devmap_hash",
176 	[BPF_MAP_TYPE_SOCKMAP]			= "sockmap",
177 	[BPF_MAP_TYPE_CPUMAP]			= "cpumap",
178 	[BPF_MAP_TYPE_XSKMAP]			= "xskmap",
179 	[BPF_MAP_TYPE_SOCKHASH]			= "sockhash",
180 	[BPF_MAP_TYPE_CGROUP_STORAGE]		= "cgroup_storage",
181 	[BPF_MAP_TYPE_REUSEPORT_SOCKARRAY]	= "reuseport_sockarray",
182 	[BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE]	= "percpu_cgroup_storage",
183 	[BPF_MAP_TYPE_QUEUE]			= "queue",
184 	[BPF_MAP_TYPE_STACK]			= "stack",
185 	[BPF_MAP_TYPE_SK_STORAGE]		= "sk_storage",
186 	[BPF_MAP_TYPE_STRUCT_OPS]		= "struct_ops",
187 	[BPF_MAP_TYPE_RINGBUF]			= "ringbuf",
188 	[BPF_MAP_TYPE_INODE_STORAGE]		= "inode_storage",
189 	[BPF_MAP_TYPE_TASK_STORAGE]		= "task_storage",
190 	[BPF_MAP_TYPE_BLOOM_FILTER]		= "bloom_filter",
191 	[BPF_MAP_TYPE_USER_RINGBUF]             = "user_ringbuf",
192 	[BPF_MAP_TYPE_CGRP_STORAGE]		= "cgrp_storage",
193 	[BPF_MAP_TYPE_ARENA]			= "arena",
194 	[BPF_MAP_TYPE_INSN_ARRAY]		= "insn_array",
195 };
196 
197 static const char * const prog_type_name[] = {
198 	[BPF_PROG_TYPE_UNSPEC]			= "unspec",
199 	[BPF_PROG_TYPE_SOCKET_FILTER]		= "socket_filter",
200 	[BPF_PROG_TYPE_KPROBE]			= "kprobe",
201 	[BPF_PROG_TYPE_SCHED_CLS]		= "sched_cls",
202 	[BPF_PROG_TYPE_SCHED_ACT]		= "sched_act",
203 	[BPF_PROG_TYPE_TRACEPOINT]		= "tracepoint",
204 	[BPF_PROG_TYPE_XDP]			= "xdp",
205 	[BPF_PROG_TYPE_PERF_EVENT]		= "perf_event",
206 	[BPF_PROG_TYPE_CGROUP_SKB]		= "cgroup_skb",
207 	[BPF_PROG_TYPE_CGROUP_SOCK]		= "cgroup_sock",
208 	[BPF_PROG_TYPE_LWT_IN]			= "lwt_in",
209 	[BPF_PROG_TYPE_LWT_OUT]			= "lwt_out",
210 	[BPF_PROG_TYPE_LWT_XMIT]		= "lwt_xmit",
211 	[BPF_PROG_TYPE_SOCK_OPS]		= "sock_ops",
212 	[BPF_PROG_TYPE_SK_SKB]			= "sk_skb",
213 	[BPF_PROG_TYPE_CGROUP_DEVICE]		= "cgroup_device",
214 	[BPF_PROG_TYPE_SK_MSG]			= "sk_msg",
215 	[BPF_PROG_TYPE_RAW_TRACEPOINT]		= "raw_tracepoint",
216 	[BPF_PROG_TYPE_CGROUP_SOCK_ADDR]	= "cgroup_sock_addr",
217 	[BPF_PROG_TYPE_LWT_SEG6LOCAL]		= "lwt_seg6local",
218 	[BPF_PROG_TYPE_LIRC_MODE2]		= "lirc_mode2",
219 	[BPF_PROG_TYPE_SK_REUSEPORT]		= "sk_reuseport",
220 	[BPF_PROG_TYPE_FLOW_DISSECTOR]		= "flow_dissector",
221 	[BPF_PROG_TYPE_CGROUP_SYSCTL]		= "cgroup_sysctl",
222 	[BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE]	= "raw_tracepoint_writable",
223 	[BPF_PROG_TYPE_CGROUP_SOCKOPT]		= "cgroup_sockopt",
224 	[BPF_PROG_TYPE_TRACING]			= "tracing",
225 	[BPF_PROG_TYPE_STRUCT_OPS]		= "struct_ops",
226 	[BPF_PROG_TYPE_EXT]			= "ext",
227 	[BPF_PROG_TYPE_LSM]			= "lsm",
228 	[BPF_PROG_TYPE_SK_LOOKUP]		= "sk_lookup",
229 	[BPF_PROG_TYPE_SYSCALL]			= "syscall",
230 	[BPF_PROG_TYPE_NETFILTER]		= "netfilter",
231 };
232 
233 static int __base_pr(enum libbpf_print_level level, const char *format,
234 		     va_list args)
235 {
236 	const char *env_var = "LIBBPF_LOG_LEVEL";
237 	static enum libbpf_print_level min_level = LIBBPF_INFO;
238 	static bool initialized;
239 
240 	if (!initialized) {
241 		char *verbosity;
242 
243 		initialized = true;
244 		verbosity = getenv(env_var);
245 		if (verbosity) {
246 			if (strcasecmp(verbosity, "warn") == 0)
247 				min_level = LIBBPF_WARN;
248 			else if (strcasecmp(verbosity, "debug") == 0)
249 				min_level = LIBBPF_DEBUG;
250 			else if (strcasecmp(verbosity, "info") == 0)
251 				min_level = LIBBPF_INFO;
252 			else
253 				fprintf(stderr, "libbpf: unrecognized '%s' envvar value: '%s', should be one of 'warn', 'debug', or 'info'.\n",
254 					env_var, verbosity);
255 		}
256 	}
257 
258 	/* if too verbose, skip logging  */
259 	if (level > min_level)
260 		return 0;
261 
262 	return vfprintf(stderr, format, args);
263 }
264 
265 static libbpf_print_fn_t __libbpf_pr = __base_pr;
266 
267 libbpf_print_fn_t libbpf_set_print(libbpf_print_fn_t fn)
268 {
269 	libbpf_print_fn_t old_print_fn;
270 
271 	old_print_fn = __atomic_exchange_n(&__libbpf_pr, fn, __ATOMIC_RELAXED);
272 
273 	return old_print_fn;
274 }
275 
276 __printf(2, 3)
277 void libbpf_print(enum libbpf_print_level level, const char *format, ...)
278 {
279 	va_list args;
280 	int old_errno;
281 	libbpf_print_fn_t print_fn;
282 
283 	print_fn = __atomic_load_n(&__libbpf_pr, __ATOMIC_RELAXED);
284 	if (!print_fn)
285 		return;
286 
287 	old_errno = errno;
288 
289 	va_start(args, format);
290 	print_fn(level, format, args);
291 	va_end(args);
292 
293 	errno = old_errno;
294 }
295 
296 static void pr_perm_msg(int err)
297 {
298 	struct rlimit limit;
299 	char buf[100];
300 
301 	if (err != -EPERM || geteuid() != 0)
302 		return;
303 
304 	err = getrlimit(RLIMIT_MEMLOCK, &limit);
305 	if (err)
306 		return;
307 
308 	if (limit.rlim_cur == RLIM_INFINITY)
309 		return;
310 
311 	if (limit.rlim_cur < 1024)
312 		snprintf(buf, sizeof(buf), "%zu bytes", (size_t)limit.rlim_cur);
313 	else if (limit.rlim_cur < 1024*1024)
314 		snprintf(buf, sizeof(buf), "%.1f KiB", (double)limit.rlim_cur / 1024);
315 	else
316 		snprintf(buf, sizeof(buf), "%.1f MiB", (double)limit.rlim_cur / (1024*1024));
317 
318 	pr_warn("permission error while running as root; try raising 'ulimit -l'? current value: %s\n",
319 		buf);
320 }
321 
322 /* Copied from tools/perf/util/util.h */
323 #ifndef zfree
324 # define zfree(ptr) ({ free(*ptr); *ptr = NULL; })
325 #endif
326 
327 #ifndef zclose
328 # define zclose(fd) ({			\
329 	int ___err = 0;			\
330 	if ((fd) >= 0)			\
331 		___err = close((fd));	\
332 	fd = -1;			\
333 	___err; })
334 #endif
335 
336 static inline __u64 ptr_to_u64(const void *ptr)
337 {
338 	return (__u64) (unsigned long) ptr;
339 }
340 
341 int libbpf_set_strict_mode(enum libbpf_strict_mode mode)
342 {
343 	/* as of v1.0 libbpf_set_strict_mode() is a no-op */
344 	return 0;
345 }
346 
347 __u32 libbpf_major_version(void)
348 {
349 	return LIBBPF_MAJOR_VERSION;
350 }
351 
352 __u32 libbpf_minor_version(void)
353 {
354 	return LIBBPF_MINOR_VERSION;
355 }
356 
357 const char *libbpf_version_string(void)
358 {
359 #define __S(X) #X
360 #define _S(X) __S(X)
361 	return  "v" _S(LIBBPF_MAJOR_VERSION) "." _S(LIBBPF_MINOR_VERSION);
362 #undef _S
363 #undef __S
364 }
365 
366 enum reloc_type {
367 	RELO_LD64,
368 	RELO_CALL,
369 	RELO_DATA,
370 	RELO_EXTERN_LD64,
371 	RELO_EXTERN_CALL,
372 	RELO_SUBPROG_ADDR,
373 	RELO_CORE,
374 	RELO_INSN_ARRAY,
375 };
376 
377 struct reloc_desc {
378 	enum reloc_type type;
379 	int insn_idx;
380 	union {
381 		const struct bpf_core_relo *core_relo; /* used when type == RELO_CORE */
382 		struct {
383 			int map_idx;
384 			unsigned int sym_off;
385 			/*
386 			 * The following two fields can be unionized, as the
387 			 * ext_idx field is used for extern symbols, and the
388 			 * sym_size is used for jump tables, which are never
389 			 * extern
390 			 */
391 			union {
392 				int ext_idx;
393 				int sym_size;
394 			};
395 		};
396 	};
397 };
398 
399 /* stored as sec_def->cookie for all libbpf-supported SEC()s */
400 enum sec_def_flags {
401 	SEC_NONE = 0,
402 	/* expected_attach_type is optional, if kernel doesn't support that */
403 	SEC_EXP_ATTACH_OPT = 1,
404 	/* legacy, only used by libbpf_get_type_names() and
405 	 * libbpf_attach_type_by_name(), not used by libbpf itself at all.
406 	 * This used to be associated with cgroup (and few other) BPF programs
407 	 * that were attachable through BPF_PROG_ATTACH command. Pretty
408 	 * meaningless nowadays, though.
409 	 */
410 	SEC_ATTACHABLE = 2,
411 	SEC_ATTACHABLE_OPT = SEC_ATTACHABLE | SEC_EXP_ATTACH_OPT,
412 	/* attachment target is specified through BTF ID in either kernel or
413 	 * other BPF program's BTF object
414 	 */
415 	SEC_ATTACH_BTF = 4,
416 	/* BPF program type allows sleeping/blocking in kernel */
417 	SEC_SLEEPABLE = 8,
418 	/* BPF program support non-linear XDP buffer */
419 	SEC_XDP_FRAGS = 16,
420 	/* Setup proper attach type for usdt probes. */
421 	SEC_USDT = 32,
422 };
423 
424 struct bpf_sec_def {
425 	char *sec;
426 	enum bpf_prog_type prog_type;
427 	enum bpf_attach_type expected_attach_type;
428 	long cookie;
429 	int handler_id;
430 
431 	libbpf_prog_setup_fn_t prog_setup_fn;
432 	libbpf_prog_prepare_load_fn_t prog_prepare_load_fn;
433 	libbpf_prog_attach_fn_t prog_attach_fn;
434 };
435 
436 struct bpf_light_subprog {
437 	__u32 sec_insn_off;
438 	__u32 sub_insn_off;
439 };
440 
441 /*
442  * bpf_prog should be a better name but it has been used in
443  * linux/filter.h.
444  */
445 struct bpf_program {
446 	char *name;
447 	char *sec_name;
448 	size_t sec_idx;
449 	const struct bpf_sec_def *sec_def;
450 	/* this program's instruction offset (in number of instructions)
451 	 * within its containing ELF section
452 	 */
453 	size_t sec_insn_off;
454 	/* number of original instructions in ELF section belonging to this
455 	 * program, not taking into account subprogram instructions possible
456 	 * appended later during relocation
457 	 */
458 	size_t sec_insn_cnt;
459 	/* Offset (in number of instructions) of the start of instruction
460 	 * belonging to this BPF program  within its containing main BPF
461 	 * program. For the entry-point (main) BPF program, this is always
462 	 * zero. For a sub-program, this gets reset before each of main BPF
463 	 * programs are processed and relocated and is used to determined
464 	 * whether sub-program was already appended to the main program, and
465 	 * if yes, at which instruction offset.
466 	 */
467 	size_t sub_insn_off;
468 
469 	/* instructions that belong to BPF program; insns[0] is located at
470 	 * sec_insn_off instruction within its ELF section in ELF file, so
471 	 * when mapping ELF file instruction index to the local instruction,
472 	 * one needs to subtract sec_insn_off; and vice versa.
473 	 */
474 	struct bpf_insn *insns;
475 	/* actual number of instruction in this BPF program's image; for
476 	 * entry-point BPF programs this includes the size of main program
477 	 * itself plus all the used sub-programs, appended at the end
478 	 */
479 	size_t insns_cnt;
480 
481 	struct reloc_desc *reloc_desc;
482 	int nr_reloc;
483 
484 	/* BPF verifier log settings */
485 	char *log_buf;
486 	size_t log_size;
487 	__u32 log_level;
488 
489 	struct bpf_object *obj;
490 
491 	int fd;
492 	bool autoload;
493 	bool autoattach;
494 	bool sym_global;
495 	bool mark_btf_static;
496 	enum bpf_prog_type type;
497 	enum bpf_attach_type expected_attach_type;
498 	int exception_cb_idx;
499 
500 	int prog_ifindex;
501 	__u32 attach_btf_obj_fd;
502 	__u32 attach_btf_id;
503 	__u32 attach_prog_fd;
504 
505 	void *func_info;
506 	__u32 func_info_rec_size;
507 	__u32 func_info_cnt;
508 
509 	void *line_info;
510 	__u32 line_info_rec_size;
511 	__u32 line_info_cnt;
512 	__u32 prog_flags;
513 	__u8  hash[SHA256_DIGEST_LENGTH];
514 
515 	struct bpf_light_subprog *subprogs;
516 	__u32 subprog_cnt;
517 };
518 
519 struct bpf_struct_ops {
520 	struct bpf_program **progs;
521 	__u32 *kern_func_off;
522 	/* e.g. struct tcp_congestion_ops in bpf_prog's btf format */
523 	void *data;
524 	/* e.g. struct bpf_struct_ops_tcp_congestion_ops in
525 	 *      btf_vmlinux's format.
526 	 * struct bpf_struct_ops_tcp_congestion_ops {
527 	 *	[... some other kernel fields ...]
528 	 *	struct tcp_congestion_ops data;
529 	 * }
530 	 * kern_vdata-size == sizeof(struct bpf_struct_ops_tcp_congestion_ops)
531 	 * bpf_map__init_kern_struct_ops() will populate the "kern_vdata"
532 	 * from "data".
533 	 */
534 	void *kern_vdata;
535 	__u32 type_id;
536 };
537 
538 #define DATA_SEC ".data"
539 #define BSS_SEC ".bss"
540 #define RODATA_SEC ".rodata"
541 #define KCONFIG_SEC ".kconfig"
542 #define KSYMS_SEC ".ksyms"
543 #define STRUCT_OPS_SEC ".struct_ops"
544 #define STRUCT_OPS_LINK_SEC ".struct_ops.link"
545 #define ARENA_SEC ".addr_space.1"
546 
547 enum libbpf_map_type {
548 	LIBBPF_MAP_UNSPEC,
549 	LIBBPF_MAP_DATA,
550 	LIBBPF_MAP_BSS,
551 	LIBBPF_MAP_RODATA,
552 	LIBBPF_MAP_KCONFIG,
553 };
554 
555 struct bpf_map_def {
556 	unsigned int type;
557 	unsigned int key_size;
558 	unsigned int value_size;
559 	unsigned int max_entries;
560 	unsigned int map_flags;
561 };
562 
563 struct bpf_map {
564 	struct bpf_object *obj;
565 	char *name;
566 	/* real_name is defined for special internal maps (.rodata*,
567 	 * .data*, .bss, .kconfig) and preserves their original ELF section
568 	 * name. This is important to be able to find corresponding BTF
569 	 * DATASEC information.
570 	 */
571 	char *real_name;
572 	int fd;
573 	int sec_idx;
574 	size_t sec_offset;
575 	int map_ifindex;
576 	int inner_map_fd;
577 	struct bpf_map_def def;
578 	__u32 numa_node;
579 	__u32 btf_var_idx;
580 	int mod_btf_fd;
581 	__u32 btf_key_type_id;
582 	__u32 btf_value_type_id;
583 	__u32 btf_vmlinux_value_type_id;
584 	enum libbpf_map_type libbpf_type;
585 	void *mmaped;
586 	struct bpf_struct_ops *st_ops;
587 	struct bpf_map *inner_map;
588 	void **init_slots;
589 	int init_slots_sz;
590 	char *pin_path;
591 	bool pinned;
592 	bool reused;
593 	bool autocreate;
594 	bool autoattach;
595 	__u64 map_extra;
596 	struct bpf_program *excl_prog;
597 };
598 
599 enum extern_type {
600 	EXT_UNKNOWN,
601 	EXT_KCFG,
602 	EXT_KSYM,
603 };
604 
605 enum kcfg_type {
606 	KCFG_UNKNOWN,
607 	KCFG_CHAR,
608 	KCFG_BOOL,
609 	KCFG_INT,
610 	KCFG_TRISTATE,
611 	KCFG_CHAR_ARR,
612 };
613 
614 struct extern_desc {
615 	enum extern_type type;
616 	int sym_idx;
617 	int btf_id;
618 	int sec_btf_id;
619 	char *name;
620 	char *essent_name;
621 	bool is_set;
622 	bool is_weak;
623 	union {
624 		struct {
625 			enum kcfg_type type;
626 			int sz;
627 			int align;
628 			int data_off;
629 			bool is_signed;
630 		} kcfg;
631 		struct {
632 			unsigned long long addr;
633 
634 			/* target btf_id of the corresponding kernel var. */
635 			int kernel_btf_obj_fd;
636 			int kernel_btf_id;
637 
638 			/* local btf_id of the ksym extern's type. */
639 			__u32 type_id;
640 			/* BTF fd index to be patched in for insn->off, this is
641 			 * 0 for vmlinux BTF, index in obj->fd_array for module
642 			 * BTF
643 			 */
644 			__s16 btf_fd_idx;
645 		} ksym;
646 	};
647 };
648 
649 struct module_btf {
650 	struct btf *btf;
651 	char *name;
652 	__u32 id;
653 	int fd;
654 	int fd_array_idx;
655 };
656 
657 enum sec_type {
658 	SEC_UNUSED = 0,
659 	SEC_RELO,
660 	SEC_BSS,
661 	SEC_DATA,
662 	SEC_RODATA,
663 	SEC_ST_OPS,
664 };
665 
666 struct elf_sec_desc {
667 	enum sec_type sec_type;
668 	Elf64_Shdr *shdr;
669 	Elf_Data *data;
670 };
671 
672 struct elf_state {
673 	int fd;
674 	const void *obj_buf;
675 	size_t obj_buf_sz;
676 	Elf *elf;
677 	Elf64_Ehdr *ehdr;
678 	Elf_Data *symbols;
679 	Elf_Data *arena_data;
680 	size_t shstrndx; /* section index for section name strings */
681 	size_t strtabidx;
682 	struct elf_sec_desc *secs;
683 	size_t sec_cnt;
684 	int btf_maps_shndx;
685 	__u32 btf_maps_sec_btf_id;
686 	int text_shndx;
687 	int symbols_shndx;
688 	bool has_st_ops;
689 	int arena_data_shndx;
690 	int jumptables_data_shndx;
691 };
692 
693 struct usdt_manager;
694 
695 enum bpf_object_state {
696 	OBJ_OPEN,
697 	OBJ_PREPARED,
698 	OBJ_LOADED,
699 };
700 
701 struct bpf_object {
702 	char name[BPF_OBJ_NAME_LEN];
703 	char license[64];
704 	__u32 kern_version;
705 
706 	enum bpf_object_state state;
707 	struct bpf_program *programs;
708 	size_t nr_programs;
709 	struct bpf_map *maps;
710 	size_t nr_maps;
711 	size_t maps_cap;
712 
713 	char *kconfig;
714 	struct extern_desc *externs;
715 	int nr_extern;
716 	int kconfig_map_idx;
717 
718 	bool has_subcalls;
719 	bool has_rodata;
720 
721 	struct bpf_gen *gen_loader;
722 
723 	/* Information when doing ELF related work. Only valid if efile.elf is not NULL */
724 	struct elf_state efile;
725 
726 	unsigned char byteorder;
727 
728 	struct btf *btf;
729 	struct btf_ext *btf_ext;
730 
731 	/* Parse and load BTF vmlinux if any of the programs in the object need
732 	 * it at load time.
733 	 */
734 	struct btf *btf_vmlinux;
735 	/* Path to the custom BTF to be used for BPF CO-RE relocations as an
736 	 * override for vmlinux BTF.
737 	 */
738 	char *btf_custom_path;
739 	/* vmlinux BTF override for CO-RE relocations */
740 	struct btf *btf_vmlinux_override;
741 	/* Lazily initialized kernel module BTFs */
742 	struct module_btf *btf_modules;
743 	bool btf_modules_loaded;
744 	size_t btf_module_cnt;
745 	size_t btf_module_cap;
746 
747 	/* optional log settings passed to BPF_BTF_LOAD and BPF_PROG_LOAD commands */
748 	char *log_buf;
749 	size_t log_size;
750 	__u32 log_level;
751 
752 	int *fd_array;
753 	size_t fd_array_cap;
754 	size_t fd_array_cnt;
755 
756 	struct usdt_manager *usdt_man;
757 
758 	int arena_map_idx;
759 	void *arena_data;
760 	size_t arena_data_sz;
761 	size_t arena_data_off;
762 
763 	void *jumptables_data;
764 	size_t jumptables_data_sz;
765 
766 	struct {
767 		struct bpf_program *prog;
768 		unsigned int sym_off;
769 		int fd;
770 	} *jumptable_maps;
771 	size_t jumptable_map_cnt;
772 
773 	struct kern_feature_cache *feat_cache;
774 	char *token_path;
775 	int token_fd;
776 
777 	char path[];
778 };
779 
780 static const char *elf_sym_str(const struct bpf_object *obj, size_t off);
781 static const char *elf_sec_str(const struct bpf_object *obj, size_t off);
782 static Elf_Scn *elf_sec_by_idx(const struct bpf_object *obj, size_t idx);
783 static Elf_Scn *elf_sec_by_name(const struct bpf_object *obj, const char *name);
784 static Elf64_Shdr *elf_sec_hdr(const struct bpf_object *obj, Elf_Scn *scn);
785 static const char *elf_sec_name(const struct bpf_object *obj, Elf_Scn *scn);
786 static Elf_Data *elf_sec_data(const struct bpf_object *obj, Elf_Scn *scn);
787 static Elf64_Sym *elf_sym_by_idx(const struct bpf_object *obj, size_t idx);
788 static Elf64_Rel *elf_rel_by_idx(Elf_Data *data, size_t idx);
789 
790 void bpf_program__unload(struct bpf_program *prog)
791 {
792 	if (!prog)
793 		return;
794 
795 	zclose(prog->fd);
796 
797 	zfree(&prog->func_info);
798 	zfree(&prog->line_info);
799 	zfree(&prog->subprogs);
800 }
801 
802 static void bpf_program__exit(struct bpf_program *prog)
803 {
804 	if (!prog)
805 		return;
806 
807 	bpf_program__unload(prog);
808 	zfree(&prog->name);
809 	zfree(&prog->sec_name);
810 	zfree(&prog->insns);
811 	zfree(&prog->reloc_desc);
812 
813 	prog->nr_reloc = 0;
814 	prog->insns_cnt = 0;
815 	prog->sec_idx = -1;
816 }
817 
818 static bool insn_is_subprog_call(const struct bpf_insn *insn)
819 {
820 	return BPF_CLASS(insn->code) == BPF_JMP &&
821 	       BPF_OP(insn->code) == BPF_CALL &&
822 	       BPF_SRC(insn->code) == BPF_K &&
823 	       insn->src_reg == BPF_PSEUDO_CALL &&
824 	       insn->dst_reg == 0 &&
825 	       insn->off == 0;
826 }
827 
828 static bool is_call_insn(const struct bpf_insn *insn)
829 {
830 	return insn->code == (BPF_JMP | BPF_CALL);
831 }
832 
833 static bool insn_is_pseudo_func(struct bpf_insn *insn)
834 {
835 	return is_ldimm64_insn(insn) && insn->src_reg == BPF_PSEUDO_FUNC;
836 }
837 
838 static int
839 bpf_object__init_prog(struct bpf_object *obj, struct bpf_program *prog,
840 		      const char *name, size_t sec_idx, const char *sec_name,
841 		      size_t sec_off, void *insn_data, size_t insn_data_sz)
842 {
843 	if (insn_data_sz == 0 || insn_data_sz % BPF_INSN_SZ || sec_off % BPF_INSN_SZ) {
844 		pr_warn("sec '%s': corrupted program '%s', offset %zu, size %zu\n",
845 			sec_name, name, sec_off, insn_data_sz);
846 		return -EINVAL;
847 	}
848 
849 	memset(prog, 0, sizeof(*prog));
850 	prog->obj = obj;
851 
852 	prog->sec_idx = sec_idx;
853 	prog->sec_insn_off = sec_off / BPF_INSN_SZ;
854 	prog->sec_insn_cnt = insn_data_sz / BPF_INSN_SZ;
855 	/* insns_cnt can later be increased by appending used subprograms */
856 	prog->insns_cnt = prog->sec_insn_cnt;
857 
858 	prog->type = BPF_PROG_TYPE_UNSPEC;
859 	prog->fd = -1;
860 	prog->exception_cb_idx = -1;
861 
862 	/* libbpf's convention for SEC("?abc...") is that it's just like
863 	 * SEC("abc...") but the corresponding bpf_program starts out with
864 	 * autoload set to false.
865 	 */
866 	if (sec_name[0] == '?') {
867 		prog->autoload = false;
868 		/* from now on forget there was ? in section name */
869 		sec_name++;
870 	} else {
871 		prog->autoload = true;
872 	}
873 
874 	prog->autoattach = true;
875 
876 	/* inherit object's log_level */
877 	prog->log_level = obj->log_level;
878 
879 	prog->sec_name = strdup(sec_name);
880 	if (!prog->sec_name)
881 		goto errout;
882 
883 	prog->name = strdup(name);
884 	if (!prog->name)
885 		goto errout;
886 
887 	prog->insns = malloc(insn_data_sz);
888 	if (!prog->insns)
889 		goto errout;
890 	memcpy(prog->insns, insn_data, insn_data_sz);
891 
892 	return 0;
893 errout:
894 	pr_warn("sec '%s': failed to allocate memory for prog '%s'\n", sec_name, name);
895 	bpf_program__exit(prog);
896 	return -ENOMEM;
897 }
898 
899 static int
900 bpf_object__add_programs(struct bpf_object *obj, Elf_Data *sec_data,
901 			 const char *sec_name, int sec_idx)
902 {
903 	Elf_Data *symbols = obj->efile.symbols;
904 	struct bpf_program *prog, *progs;
905 	void *data = sec_data->d_buf;
906 	size_t sec_sz = sec_data->d_size, sec_off, prog_sz, nr_syms;
907 	int nr_progs, err, i;
908 	const char *name;
909 	Elf64_Sym *sym;
910 
911 	progs = obj->programs;
912 	nr_progs = obj->nr_programs;
913 	nr_syms = symbols->d_size / sizeof(Elf64_Sym);
914 
915 	for (i = 0; i < nr_syms; i++) {
916 		sym = elf_sym_by_idx(obj, i);
917 
918 		if (sym->st_shndx != sec_idx)
919 			continue;
920 		if (ELF64_ST_TYPE(sym->st_info) != STT_FUNC)
921 			continue;
922 
923 		prog_sz = sym->st_size;
924 		sec_off = sym->st_value;
925 
926 		name = elf_sym_str(obj, sym->st_name);
927 		if (!name) {
928 			pr_warn("sec '%s': failed to get symbol name for offset %zu\n",
929 				sec_name, sec_off);
930 			return -LIBBPF_ERRNO__FORMAT;
931 		}
932 
933 		if (sec_off + prog_sz > sec_sz || sec_off + prog_sz < sec_off) {
934 			pr_warn("sec '%s': program at offset %zu crosses section boundary\n",
935 				sec_name, sec_off);
936 			return -LIBBPF_ERRNO__FORMAT;
937 		}
938 
939 		if (sec_idx != obj->efile.text_shndx && ELF64_ST_BIND(sym->st_info) == STB_LOCAL) {
940 			pr_warn("sec '%s': program '%s' is static and not supported\n", sec_name, name);
941 			return -ENOTSUP;
942 		}
943 
944 		pr_debug("sec '%s': found program '%s' at insn offset %zu (%zu bytes), code size %zu insns (%zu bytes)\n",
945 			 sec_name, name, sec_off / BPF_INSN_SZ, sec_off, prog_sz / BPF_INSN_SZ, prog_sz);
946 
947 		progs = libbpf_reallocarray(progs, nr_progs + 1, sizeof(*progs));
948 		if (!progs) {
949 			/*
950 			 * In this case the original obj->programs
951 			 * is still valid, so don't need special treat for
952 			 * bpf_close_object().
953 			 */
954 			pr_warn("sec '%s': failed to alloc memory for new program '%s'\n",
955 				sec_name, name);
956 			return -ENOMEM;
957 		}
958 		obj->programs = progs;
959 
960 		prog = &progs[nr_progs];
961 
962 		err = bpf_object__init_prog(obj, prog, name, sec_idx, sec_name,
963 					    sec_off, data + sec_off, prog_sz);
964 		if (err)
965 			return err;
966 
967 		if (ELF64_ST_BIND(sym->st_info) != STB_LOCAL)
968 			prog->sym_global = true;
969 
970 		/* if function is a global/weak symbol, but has restricted
971 		 * (STV_HIDDEN or STV_INTERNAL) visibility, mark its BTF FUNC
972 		 * as static to enable more permissive BPF verification mode
973 		 * with more outside context available to BPF verifier
974 		 */
975 		if (prog->sym_global && (ELF64_ST_VISIBILITY(sym->st_other) == STV_HIDDEN
976 		    || ELF64_ST_VISIBILITY(sym->st_other) == STV_INTERNAL))
977 			prog->mark_btf_static = true;
978 
979 		nr_progs++;
980 		obj->nr_programs = nr_progs;
981 	}
982 
983 	return 0;
984 }
985 
986 static void bpf_object_bswap_progs(struct bpf_object *obj)
987 {
988 	struct bpf_program *prog = obj->programs;
989 	struct bpf_insn *insn;
990 	int p, i;
991 
992 	for (p = 0; p < obj->nr_programs; p++, prog++) {
993 		insn = prog->insns;
994 		for (i = 0; i < prog->insns_cnt; i++, insn++)
995 			bpf_insn_bswap(insn);
996 	}
997 	pr_debug("converted %zu BPF programs to native byte order\n", obj->nr_programs);
998 }
999 
1000 static const struct btf_member *
1001 find_member_by_offset(const struct btf_type *t, __u32 bit_offset)
1002 {
1003 	struct btf_member *m;
1004 	int i;
1005 
1006 	for (i = 0, m = btf_members(t); i < btf_vlen(t); i++, m++) {
1007 		if (btf_member_bit_offset(t, i) == bit_offset)
1008 			return m;
1009 	}
1010 
1011 	return NULL;
1012 }
1013 
1014 static const struct btf_member *
1015 find_member_by_name(const struct btf *btf, const struct btf_type *t,
1016 		    const char *name)
1017 {
1018 	struct btf_member *m;
1019 	int i;
1020 
1021 	for (i = 0, m = btf_members(t); i < btf_vlen(t); i++, m++) {
1022 		if (!strcmp(btf__name_by_offset(btf, m->name_off), name))
1023 			return m;
1024 	}
1025 
1026 	return NULL;
1027 }
1028 
1029 static int find_ksym_btf_id(struct bpf_object *obj, const char *ksym_name,
1030 			    __u16 kind, struct btf **res_btf,
1031 			    struct module_btf **res_mod_btf);
1032 
1033 #define STRUCT_OPS_VALUE_PREFIX "bpf_struct_ops_"
1034 static int find_btf_by_prefix_kind(const struct btf *btf, const char *prefix,
1035 				   const char *name, __u32 kind);
1036 
1037 static int
1038 find_struct_ops_kern_types(struct bpf_object *obj, const char *tname_raw,
1039 			   struct module_btf **mod_btf,
1040 			   const struct btf_type **type, __u32 *type_id,
1041 			   const struct btf_type **vtype, __u32 *vtype_id,
1042 			   const struct btf_member **data_member)
1043 {
1044 	const struct btf_type *kern_type, *kern_vtype;
1045 	const struct btf_member *kern_data_member;
1046 	struct btf *btf = NULL;
1047 	__s32 kern_vtype_id, kern_type_id;
1048 	char tname[192], stname[256];
1049 	__u32 i;
1050 
1051 	snprintf(tname, sizeof(tname), "%.*s",
1052 		 (int)bpf_core_essential_name_len(tname_raw), tname_raw);
1053 
1054 	snprintf(stname, sizeof(stname), "%s%s", STRUCT_OPS_VALUE_PREFIX, tname);
1055 
1056 	/* Look for the corresponding "map_value" type that will be used
1057 	 * in map_update(BPF_MAP_TYPE_STRUCT_OPS) first, figure out the btf
1058 	 * and the mod_btf.
1059 	 * For example, find "struct bpf_struct_ops_tcp_congestion_ops".
1060 	 */
1061 	kern_vtype_id = find_ksym_btf_id(obj, stname, BTF_KIND_STRUCT, &btf, mod_btf);
1062 	if (kern_vtype_id < 0) {
1063 		pr_warn("struct_ops init_kern: struct %s is not found in kernel BTF\n", stname);
1064 		return kern_vtype_id;
1065 	}
1066 	kern_vtype = btf__type_by_id(btf, kern_vtype_id);
1067 
1068 	kern_type_id = btf__find_by_name_kind(btf, tname, BTF_KIND_STRUCT);
1069 	if (kern_type_id < 0) {
1070 		pr_warn("struct_ops init_kern: struct %s is not found in kernel BTF\n", tname);
1071 		return kern_type_id;
1072 	}
1073 	kern_type = btf__type_by_id(btf, kern_type_id);
1074 
1075 	/* Find "struct tcp_congestion_ops" from
1076 	 * struct bpf_struct_ops_tcp_congestion_ops {
1077 	 *	[ ... ]
1078 	 *	struct tcp_congestion_ops data;
1079 	 * }
1080 	 */
1081 	kern_data_member = btf_members(kern_vtype);
1082 	for (i = 0; i < btf_vlen(kern_vtype); i++, kern_data_member++) {
1083 		if (kern_data_member->type == kern_type_id)
1084 			break;
1085 	}
1086 	if (i == btf_vlen(kern_vtype)) {
1087 		pr_warn("struct_ops init_kern: struct %s data is not found in struct %s\n",
1088 			tname, stname);
1089 		return -EINVAL;
1090 	}
1091 
1092 	*type = kern_type;
1093 	*type_id = kern_type_id;
1094 	*vtype = kern_vtype;
1095 	*vtype_id = kern_vtype_id;
1096 	*data_member = kern_data_member;
1097 
1098 	return 0;
1099 }
1100 
1101 static bool bpf_map__is_struct_ops(const struct bpf_map *map)
1102 {
1103 	return map->def.type == BPF_MAP_TYPE_STRUCT_OPS;
1104 }
1105 
1106 static bool is_valid_st_ops_program(struct bpf_object *obj,
1107 				    const struct bpf_program *prog)
1108 {
1109 	int i;
1110 
1111 	for (i = 0; i < obj->nr_programs; i++) {
1112 		if (&obj->programs[i] == prog)
1113 			return prog->type == BPF_PROG_TYPE_STRUCT_OPS;
1114 	}
1115 
1116 	return false;
1117 }
1118 
1119 /* For each struct_ops program P, referenced from some struct_ops map M,
1120  * enable P.autoload if there are Ms for which M.autocreate is true,
1121  * disable P.autoload if for all Ms M.autocreate is false.
1122  * Don't change P.autoload for programs that are not referenced from any maps.
1123  */
1124 static int bpf_object_adjust_struct_ops_autoload(struct bpf_object *obj)
1125 {
1126 	struct bpf_program *prog, *slot_prog;
1127 	struct bpf_map *map;
1128 	int i, j, k, vlen;
1129 
1130 	for (i = 0; i < obj->nr_programs; ++i) {
1131 		int should_load = false;
1132 		int use_cnt = 0;
1133 
1134 		prog = &obj->programs[i];
1135 		if (prog->type != BPF_PROG_TYPE_STRUCT_OPS)
1136 			continue;
1137 
1138 		for (j = 0; j < obj->nr_maps; ++j) {
1139 			const struct btf_type *type;
1140 
1141 			map = &obj->maps[j];
1142 			if (!bpf_map__is_struct_ops(map))
1143 				continue;
1144 
1145 			type = btf__type_by_id(obj->btf, map->st_ops->type_id);
1146 			vlen = btf_vlen(type);
1147 			for (k = 0; k < vlen; ++k) {
1148 				slot_prog = map->st_ops->progs[k];
1149 				if (prog != slot_prog)
1150 					continue;
1151 
1152 				use_cnt++;
1153 				if (map->autocreate)
1154 					should_load = true;
1155 			}
1156 		}
1157 		if (use_cnt)
1158 			prog->autoload = should_load;
1159 	}
1160 
1161 	return 0;
1162 }
1163 
1164 /* Init the map's fields that depend on kern_btf */
1165 static int bpf_map__init_kern_struct_ops(struct bpf_map *map)
1166 {
1167 	const struct btf_member *member, *kern_member, *kern_data_member;
1168 	const struct btf_type *type, *kern_type, *kern_vtype;
1169 	__u32 i, kern_type_id, kern_vtype_id, kern_data_off;
1170 	struct bpf_object *obj = map->obj;
1171 	const struct btf *btf = obj->btf;
1172 	struct bpf_struct_ops *st_ops;
1173 	const struct btf *kern_btf;
1174 	struct module_btf *mod_btf = NULL;
1175 	void *data, *kern_data;
1176 	const char *tname;
1177 	int err;
1178 
1179 	st_ops = map->st_ops;
1180 	type = btf__type_by_id(btf, st_ops->type_id);
1181 	tname = btf__name_by_offset(btf, type->name_off);
1182 	err = find_struct_ops_kern_types(obj, tname, &mod_btf,
1183 					 &kern_type, &kern_type_id,
1184 					 &kern_vtype, &kern_vtype_id,
1185 					 &kern_data_member);
1186 	if (err)
1187 		return err;
1188 
1189 	kern_btf = mod_btf ? mod_btf->btf : obj->btf_vmlinux;
1190 
1191 	pr_debug("struct_ops init_kern %s: type_id:%u kern_type_id:%u kern_vtype_id:%u\n",
1192 		 map->name, st_ops->type_id, kern_type_id, kern_vtype_id);
1193 
1194 	map->mod_btf_fd = mod_btf ? mod_btf->fd : -1;
1195 	map->def.value_size = kern_vtype->size;
1196 	map->btf_vmlinux_value_type_id = kern_vtype_id;
1197 
1198 	st_ops->kern_vdata = calloc(1, kern_vtype->size);
1199 	if (!st_ops->kern_vdata)
1200 		return -ENOMEM;
1201 
1202 	data = st_ops->data;
1203 	kern_data_off = kern_data_member->offset / 8;
1204 	kern_data = st_ops->kern_vdata + kern_data_off;
1205 
1206 	member = btf_members(type);
1207 	for (i = 0; i < btf_vlen(type); i++, member++) {
1208 		const struct btf_type *mtype, *kern_mtype;
1209 		__u32 mtype_id, kern_mtype_id;
1210 		void *mdata, *kern_mdata;
1211 		struct bpf_program *prog;
1212 		__s64 msize, kern_msize;
1213 		__u32 moff, kern_moff;
1214 		__u32 kern_member_idx;
1215 		const char *mname;
1216 
1217 		mname = btf__name_by_offset(btf, member->name_off);
1218 		moff = member->offset / 8;
1219 		mdata = data + moff;
1220 		msize = btf__resolve_size(btf, member->type);
1221 		if (msize < 0) {
1222 			pr_warn("struct_ops init_kern %s: failed to resolve the size of member %s\n",
1223 				map->name, mname);
1224 			return msize;
1225 		}
1226 
1227 		kern_member = find_member_by_name(kern_btf, kern_type, mname);
1228 		if (!kern_member) {
1229 			if (!libbpf_is_mem_zeroed(mdata, msize)) {
1230 				pr_warn("struct_ops init_kern %s: Cannot find member %s in kernel BTF\n",
1231 					map->name, mname);
1232 				return -ENOTSUP;
1233 			}
1234 
1235 			if (st_ops->progs[i]) {
1236 				/* If we had declaratively set struct_ops callback, we need to
1237 				 * force its autoload to false, because it doesn't have
1238 				 * a chance of succeeding from POV of the current struct_ops map.
1239 				 * If this program is still referenced somewhere else, though,
1240 				 * then bpf_object_adjust_struct_ops_autoload() will update its
1241 				 * autoload accordingly.
1242 				 */
1243 				st_ops->progs[i]->autoload = false;
1244 				st_ops->progs[i] = NULL;
1245 			}
1246 
1247 			/* Skip all-zero/NULL fields if they are not present in the kernel BTF */
1248 			pr_info("struct_ops %s: member %s not found in kernel, skipping it as it's set to zero\n",
1249 				map->name, mname);
1250 			continue;
1251 		}
1252 
1253 		kern_member_idx = kern_member - btf_members(kern_type);
1254 		if (btf_member_bitfield_size(type, i) ||
1255 		    btf_member_bitfield_size(kern_type, kern_member_idx)) {
1256 			pr_warn("struct_ops init_kern %s: bitfield %s is not supported\n",
1257 				map->name, mname);
1258 			return -ENOTSUP;
1259 		}
1260 
1261 		kern_moff = kern_member->offset / 8;
1262 		kern_mdata = kern_data + kern_moff;
1263 
1264 		mtype = skip_mods_and_typedefs(btf, member->type, &mtype_id);
1265 		kern_mtype = skip_mods_and_typedefs(kern_btf, kern_member->type,
1266 						    &kern_mtype_id);
1267 		if (BTF_INFO_KIND(mtype->info) !=
1268 		    BTF_INFO_KIND(kern_mtype->info)) {
1269 			pr_warn("struct_ops init_kern %s: Unmatched member type %s %u != %u(kernel)\n",
1270 				map->name, mname, BTF_INFO_KIND(mtype->info),
1271 				BTF_INFO_KIND(kern_mtype->info));
1272 			return -ENOTSUP;
1273 		}
1274 
1275 		if (btf_is_ptr(mtype)) {
1276 			prog = *(void **)mdata;
1277 			/* just like for !kern_member case above, reset declaratively
1278 			 * set (at compile time) program's autload to false,
1279 			 * if user replaced it with another program or NULL
1280 			 */
1281 			if (st_ops->progs[i] && st_ops->progs[i] != prog)
1282 				st_ops->progs[i]->autoload = false;
1283 
1284 			/* Update the value from the shadow type */
1285 			st_ops->progs[i] = prog;
1286 			if (!prog)
1287 				continue;
1288 
1289 			if (!is_valid_st_ops_program(obj, prog)) {
1290 				pr_warn("struct_ops init_kern %s: member %s is not a struct_ops program\n",
1291 					map->name, mname);
1292 				return -ENOTSUP;
1293 			}
1294 
1295 			kern_mtype = skip_mods_and_typedefs(kern_btf,
1296 							    kern_mtype->type,
1297 							    &kern_mtype_id);
1298 
1299 			/* mtype->type must be a func_proto which was
1300 			 * guaranteed in bpf_object__collect_st_ops_relos(),
1301 			 * so only check kern_mtype for func_proto here.
1302 			 */
1303 			if (!btf_is_func_proto(kern_mtype)) {
1304 				pr_warn("struct_ops init_kern %s: kernel member %s is not a func ptr\n",
1305 					map->name, mname);
1306 				return -ENOTSUP;
1307 			}
1308 
1309 			if (mod_btf)
1310 				prog->attach_btf_obj_fd = mod_btf->fd;
1311 
1312 			/* if we haven't yet processed this BPF program, record proper
1313 			 * attach_btf_id and member_idx
1314 			 */
1315 			if (!prog->attach_btf_id) {
1316 				prog->attach_btf_id = kern_type_id;
1317 				prog->expected_attach_type = kern_member_idx;
1318 			}
1319 
1320 			/* struct_ops BPF prog can be re-used between multiple
1321 			 * .struct_ops & .struct_ops.link as long as it's the
1322 			 * same struct_ops struct definition and the same
1323 			 * function pointer field
1324 			 */
1325 			if (prog->attach_btf_id != kern_type_id) {
1326 				pr_warn("struct_ops init_kern %s func ptr %s: invalid reuse of prog %s in sec %s with type %u: attach_btf_id %u != kern_type_id %u\n",
1327 					map->name, mname, prog->name, prog->sec_name, prog->type,
1328 					prog->attach_btf_id, kern_type_id);
1329 				return -EINVAL;
1330 			}
1331 			if (prog->expected_attach_type != kern_member_idx) {
1332 				pr_warn("struct_ops init_kern %s func ptr %s: invalid reuse of prog %s in sec %s with type %u: expected_attach_type %u != kern_member_idx %u\n",
1333 					map->name, mname, prog->name, prog->sec_name, prog->type,
1334 					prog->expected_attach_type, kern_member_idx);
1335 				return -EINVAL;
1336 			}
1337 
1338 			st_ops->kern_func_off[i] = kern_data_off + kern_moff;
1339 
1340 			pr_debug("struct_ops init_kern %s: func ptr %s is set to prog %s from data(+%u) to kern_data(+%u)\n",
1341 				 map->name, mname, prog->name, moff,
1342 				 kern_moff);
1343 
1344 			continue;
1345 		}
1346 
1347 		kern_msize = btf__resolve_size(kern_btf, kern_mtype_id);
1348 		if (kern_msize < 0 || msize != kern_msize) {
1349 			pr_warn("struct_ops init_kern %s: Error in size of member %s: %zd != %zd(kernel)\n",
1350 				map->name, mname, (ssize_t)msize,
1351 				(ssize_t)kern_msize);
1352 			return -ENOTSUP;
1353 		}
1354 
1355 		pr_debug("struct_ops init_kern %s: copy %s %u bytes from data(+%u) to kern_data(+%u)\n",
1356 			 map->name, mname, (unsigned int)msize,
1357 			 moff, kern_moff);
1358 		memcpy(kern_mdata, mdata, msize);
1359 	}
1360 
1361 	return 0;
1362 }
1363 
1364 static int bpf_object__init_kern_struct_ops_maps(struct bpf_object *obj)
1365 {
1366 	struct bpf_map *map;
1367 	size_t i;
1368 	int err;
1369 
1370 	for (i = 0; i < obj->nr_maps; i++) {
1371 		map = &obj->maps[i];
1372 
1373 		if (!bpf_map__is_struct_ops(map))
1374 			continue;
1375 
1376 		if (!map->autocreate)
1377 			continue;
1378 
1379 		err = bpf_map__init_kern_struct_ops(map);
1380 		if (err)
1381 			return err;
1382 	}
1383 
1384 	return 0;
1385 }
1386 
1387 static int init_struct_ops_maps(struct bpf_object *obj, const char *sec_name,
1388 				int shndx, Elf_Data *data)
1389 {
1390 	const struct btf_type *type, *datasec;
1391 	const struct btf_var_secinfo *vsi;
1392 	struct bpf_struct_ops *st_ops;
1393 	const char *tname, *var_name;
1394 	__s32 type_id, datasec_id;
1395 	const struct btf *btf;
1396 	struct bpf_map *map;
1397 	__u32 i;
1398 
1399 	if (shndx == -1)
1400 		return 0;
1401 
1402 	btf = obj->btf;
1403 	datasec_id = btf__find_by_name_kind(btf, sec_name,
1404 					    BTF_KIND_DATASEC);
1405 	if (datasec_id < 0) {
1406 		pr_warn("struct_ops init: DATASEC %s not found\n",
1407 			sec_name);
1408 		return -EINVAL;
1409 	}
1410 
1411 	datasec = btf__type_by_id(btf, datasec_id);
1412 	vsi = btf_var_secinfos(datasec);
1413 	for (i = 0; i < btf_vlen(datasec); i++, vsi++) {
1414 		type = btf__type_by_id(obj->btf, vsi->type);
1415 		var_name = btf__name_by_offset(obj->btf, type->name_off);
1416 
1417 		type_id = btf__resolve_type(obj->btf, vsi->type);
1418 		if (type_id < 0) {
1419 			pr_warn("struct_ops init: Cannot resolve var type_id %u in DATASEC %s\n",
1420 				vsi->type, sec_name);
1421 			return -EINVAL;
1422 		}
1423 
1424 		type = btf__type_by_id(obj->btf, type_id);
1425 		tname = btf__name_by_offset(obj->btf, type->name_off);
1426 		if (!tname[0]) {
1427 			pr_warn("struct_ops init: anonymous type is not supported\n");
1428 			return -ENOTSUP;
1429 		}
1430 		if (!btf_is_struct(type)) {
1431 			pr_warn("struct_ops init: %s is not a struct\n", tname);
1432 			return -EINVAL;
1433 		}
1434 
1435 		map = bpf_object__add_map(obj);
1436 		if (IS_ERR(map))
1437 			return PTR_ERR(map);
1438 
1439 		map->sec_idx = shndx;
1440 		map->sec_offset = vsi->offset;
1441 		map->name = strdup(var_name);
1442 		if (!map->name)
1443 			return -ENOMEM;
1444 		map->btf_value_type_id = type_id;
1445 
1446 		/* Follow same convention as for programs autoload:
1447 		 * SEC("?.struct_ops") means map is not created by default.
1448 		 */
1449 		if (sec_name[0] == '?') {
1450 			map->autocreate = false;
1451 			/* from now on forget there was ? in section name */
1452 			sec_name++;
1453 		}
1454 
1455 		map->def.type = BPF_MAP_TYPE_STRUCT_OPS;
1456 		map->def.key_size = sizeof(int);
1457 		map->def.value_size = type->size;
1458 		map->def.max_entries = 1;
1459 		map->def.map_flags = strcmp(sec_name, STRUCT_OPS_LINK_SEC) == 0 ? BPF_F_LINK : 0;
1460 		map->autoattach = true;
1461 
1462 		map->st_ops = calloc(1, sizeof(*map->st_ops));
1463 		if (!map->st_ops)
1464 			return -ENOMEM;
1465 		st_ops = map->st_ops;
1466 		st_ops->data = malloc(type->size);
1467 		st_ops->progs = calloc(btf_vlen(type), sizeof(*st_ops->progs));
1468 		st_ops->kern_func_off = malloc(btf_vlen(type) *
1469 					       sizeof(*st_ops->kern_func_off));
1470 		if (!st_ops->data || !st_ops->progs || !st_ops->kern_func_off)
1471 			return -ENOMEM;
1472 
1473 		if (vsi->offset + type->size > data->d_size) {
1474 			pr_warn("struct_ops init: var %s is beyond the end of DATASEC %s\n",
1475 				var_name, sec_name);
1476 			return -EINVAL;
1477 		}
1478 
1479 		memcpy(st_ops->data,
1480 		       data->d_buf + vsi->offset,
1481 		       type->size);
1482 		st_ops->type_id = type_id;
1483 
1484 		pr_debug("struct_ops init: struct %s(type_id=%u) %s found at offset %u\n",
1485 			 tname, type_id, var_name, vsi->offset);
1486 	}
1487 
1488 	return 0;
1489 }
1490 
1491 static int bpf_object_init_struct_ops(struct bpf_object *obj)
1492 {
1493 	const char *sec_name;
1494 	int sec_idx, err;
1495 
1496 	for (sec_idx = 0; sec_idx < obj->efile.sec_cnt; ++sec_idx) {
1497 		struct elf_sec_desc *desc = &obj->efile.secs[sec_idx];
1498 
1499 		if (desc->sec_type != SEC_ST_OPS)
1500 			continue;
1501 
1502 		sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, sec_idx));
1503 		if (!sec_name)
1504 			return -LIBBPF_ERRNO__FORMAT;
1505 
1506 		err = init_struct_ops_maps(obj, sec_name, sec_idx, desc->data);
1507 		if (err)
1508 			return err;
1509 	}
1510 
1511 	return 0;
1512 }
1513 
1514 static struct bpf_object *bpf_object__new(const char *path,
1515 					  const void *obj_buf,
1516 					  size_t obj_buf_sz,
1517 					  const char *obj_name)
1518 {
1519 	struct bpf_object *obj;
1520 	char *end;
1521 
1522 	obj = calloc(1, sizeof(struct bpf_object) + strlen(path) + 1);
1523 	if (!obj) {
1524 		pr_warn("alloc memory failed for %s\n", path);
1525 		return ERR_PTR(-ENOMEM);
1526 	}
1527 
1528 	strcpy(obj->path, path);
1529 	if (obj_name) {
1530 		libbpf_strlcpy(obj->name, obj_name, sizeof(obj->name));
1531 	} else {
1532 		/* Using basename() GNU version which doesn't modify arg. */
1533 		libbpf_strlcpy(obj->name, basename((void *)path), sizeof(obj->name));
1534 		end = strchr(obj->name, '.');
1535 		if (end)
1536 			*end = 0;
1537 	}
1538 
1539 	obj->efile.fd = -1;
1540 	/*
1541 	 * Caller of this function should also call
1542 	 * bpf_object__elf_finish() after data collection to return
1543 	 * obj_buf to user. If not, we should duplicate the buffer to
1544 	 * avoid user freeing them before elf finish.
1545 	 */
1546 	obj->efile.obj_buf = obj_buf;
1547 	obj->efile.obj_buf_sz = obj_buf_sz;
1548 	obj->efile.btf_maps_shndx = -1;
1549 	obj->kconfig_map_idx = -1;
1550 	obj->arena_map_idx = -1;
1551 
1552 	obj->kern_version = get_kernel_version();
1553 	obj->state  = OBJ_OPEN;
1554 
1555 	return obj;
1556 }
1557 
1558 static void bpf_object__elf_finish(struct bpf_object *obj)
1559 {
1560 	if (!obj->efile.elf)
1561 		return;
1562 
1563 	elf_end(obj->efile.elf);
1564 	obj->efile.elf = NULL;
1565 	obj->efile.ehdr = NULL;
1566 	obj->efile.symbols = NULL;
1567 	obj->efile.arena_data = NULL;
1568 
1569 	zfree(&obj->efile.secs);
1570 	obj->efile.sec_cnt = 0;
1571 	zclose(obj->efile.fd);
1572 	obj->efile.obj_buf = NULL;
1573 	obj->efile.obj_buf_sz = 0;
1574 }
1575 
1576 static int bpf_object__elf_init(struct bpf_object *obj)
1577 {
1578 	Elf64_Ehdr *ehdr;
1579 	int err = 0;
1580 	Elf *elf;
1581 
1582 	if (obj->efile.elf) {
1583 		pr_warn("elf: init internal error\n");
1584 		return -LIBBPF_ERRNO__LIBELF;
1585 	}
1586 
1587 	if (obj->efile.obj_buf_sz > 0) {
1588 		/* obj_buf should have been validated by bpf_object__open_mem(). */
1589 		elf = elf_memory((char *)obj->efile.obj_buf, obj->efile.obj_buf_sz);
1590 	} else {
1591 		obj->efile.fd = open(obj->path, O_RDONLY | O_CLOEXEC);
1592 		if (obj->efile.fd < 0) {
1593 			err = -errno;
1594 			pr_warn("elf: failed to open %s: %s\n", obj->path, errstr(err));
1595 			return err;
1596 		}
1597 
1598 		elf = elf_begin(obj->efile.fd, ELF_C_READ_MMAP, NULL);
1599 	}
1600 
1601 	if (!elf) {
1602 		pr_warn("elf: failed to open %s as ELF file: %s\n", obj->path, elf_errmsg(-1));
1603 		err = -LIBBPF_ERRNO__LIBELF;
1604 		goto errout;
1605 	}
1606 
1607 	obj->efile.elf = elf;
1608 
1609 	if (elf_kind(elf) != ELF_K_ELF) {
1610 		err = -LIBBPF_ERRNO__FORMAT;
1611 		pr_warn("elf: '%s' is not a proper ELF object\n", obj->path);
1612 		goto errout;
1613 	}
1614 
1615 	if (gelf_getclass(elf) != ELFCLASS64) {
1616 		err = -LIBBPF_ERRNO__FORMAT;
1617 		pr_warn("elf: '%s' is not a 64-bit ELF object\n", obj->path);
1618 		goto errout;
1619 	}
1620 
1621 	obj->efile.ehdr = ehdr = elf64_getehdr(elf);
1622 	if (!obj->efile.ehdr) {
1623 		pr_warn("elf: failed to get ELF header from %s: %s\n", obj->path, elf_errmsg(-1));
1624 		err = -LIBBPF_ERRNO__FORMAT;
1625 		goto errout;
1626 	}
1627 
1628 	/* Validate ELF object endianness... */
1629 	if (ehdr->e_ident[EI_DATA] != ELFDATA2LSB &&
1630 	    ehdr->e_ident[EI_DATA] != ELFDATA2MSB) {
1631 		err = -LIBBPF_ERRNO__ENDIAN;
1632 		pr_warn("elf: '%s' has unknown byte order\n", obj->path);
1633 		goto errout;
1634 	}
1635 	/* and save after bpf_object_open() frees ELF data */
1636 	obj->byteorder = ehdr->e_ident[EI_DATA];
1637 
1638 	if (elf_getshdrstrndx(elf, &obj->efile.shstrndx)) {
1639 		pr_warn("elf: failed to get section names section index for %s: %s\n",
1640 			obj->path, elf_errmsg(-1));
1641 		err = -LIBBPF_ERRNO__FORMAT;
1642 		goto errout;
1643 	}
1644 
1645 	/* ELF is corrupted/truncated, avoid calling elf_strptr. */
1646 	if (!elf_rawdata(elf_getscn(elf, obj->efile.shstrndx), NULL)) {
1647 		pr_warn("elf: failed to get section names strings from %s: %s\n",
1648 			obj->path, elf_errmsg(-1));
1649 		err = -LIBBPF_ERRNO__FORMAT;
1650 		goto errout;
1651 	}
1652 
1653 	/* Old LLVM set e_machine to EM_NONE */
1654 	if (ehdr->e_type != ET_REL || (ehdr->e_machine && ehdr->e_machine != EM_BPF)) {
1655 		pr_warn("elf: %s is not a valid eBPF object file\n", obj->path);
1656 		err = -LIBBPF_ERRNO__FORMAT;
1657 		goto errout;
1658 	}
1659 
1660 	return 0;
1661 errout:
1662 	bpf_object__elf_finish(obj);
1663 	return err;
1664 }
1665 
1666 static bool is_native_endianness(struct bpf_object *obj)
1667 {
1668 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
1669 	return obj->byteorder == ELFDATA2LSB;
1670 #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
1671 	return obj->byteorder == ELFDATA2MSB;
1672 #else
1673 # error "Unrecognized __BYTE_ORDER__"
1674 #endif
1675 }
1676 
1677 static int
1678 bpf_object__init_license(struct bpf_object *obj, void *data, size_t size)
1679 {
1680 	if (!data) {
1681 		pr_warn("invalid license section in %s\n", obj->path);
1682 		return -LIBBPF_ERRNO__FORMAT;
1683 	}
1684 	/* libbpf_strlcpy() only copies first N - 1 bytes, so size + 1 won't
1685 	 * go over allowed ELF data section buffer
1686 	 */
1687 	libbpf_strlcpy(obj->license, data, min(size + 1, sizeof(obj->license)));
1688 	pr_debug("license of %s is %s\n", obj->path, obj->license);
1689 	return 0;
1690 }
1691 
1692 static int
1693 bpf_object__init_kversion(struct bpf_object *obj, void *data, size_t size)
1694 {
1695 	__u32 kver;
1696 
1697 	if (!data || size != sizeof(kver)) {
1698 		pr_warn("invalid kver section in %s\n", obj->path);
1699 		return -LIBBPF_ERRNO__FORMAT;
1700 	}
1701 	memcpy(&kver, data, sizeof(kver));
1702 	obj->kern_version = kver;
1703 	pr_debug("kernel version of %s is %x\n", obj->path, obj->kern_version);
1704 	return 0;
1705 }
1706 
1707 static bool bpf_map_type__is_map_in_map(enum bpf_map_type type)
1708 {
1709 	if (type == BPF_MAP_TYPE_ARRAY_OF_MAPS ||
1710 	    type == BPF_MAP_TYPE_HASH_OF_MAPS)
1711 		return true;
1712 	return false;
1713 }
1714 
1715 static int find_elf_sec_sz(const struct bpf_object *obj, const char *name, __u32 *size)
1716 {
1717 	Elf_Data *data;
1718 	Elf_Scn *scn;
1719 
1720 	if (!name)
1721 		return -EINVAL;
1722 
1723 	scn = elf_sec_by_name(obj, name);
1724 	data = elf_sec_data(obj, scn);
1725 	if (data) {
1726 		*size = data->d_size;
1727 		return 0; /* found it */
1728 	}
1729 
1730 	return -ENOENT;
1731 }
1732 
1733 static Elf64_Sym *find_elf_var_sym(const struct bpf_object *obj, const char *name)
1734 {
1735 	Elf_Data *symbols = obj->efile.symbols;
1736 	const char *sname;
1737 	size_t si;
1738 
1739 	for (si = 0; si < symbols->d_size / sizeof(Elf64_Sym); si++) {
1740 		Elf64_Sym *sym = elf_sym_by_idx(obj, si);
1741 
1742 		if (ELF64_ST_TYPE(sym->st_info) != STT_OBJECT)
1743 			continue;
1744 
1745 		if (ELF64_ST_BIND(sym->st_info) != STB_GLOBAL &&
1746 		    ELF64_ST_BIND(sym->st_info) != STB_WEAK)
1747 			continue;
1748 
1749 		sname = elf_sym_str(obj, sym->st_name);
1750 		if (!sname) {
1751 			pr_warn("failed to get sym name string for var %s\n", name);
1752 			return ERR_PTR(-EIO);
1753 		}
1754 		if (strcmp(name, sname) == 0)
1755 			return sym;
1756 	}
1757 
1758 	return ERR_PTR(-ENOENT);
1759 }
1760 
1761 #ifndef MFD_CLOEXEC
1762 #define MFD_CLOEXEC 0x0001U
1763 #endif
1764 #ifndef MFD_NOEXEC_SEAL
1765 #define MFD_NOEXEC_SEAL 0x0008U
1766 #endif
1767 
1768 static int create_placeholder_fd(void)
1769 {
1770 	unsigned int flags = MFD_CLOEXEC | MFD_NOEXEC_SEAL;
1771 	const char *name = "libbpf-placeholder-fd";
1772 	int fd;
1773 
1774 	fd = ensure_good_fd(sys_memfd_create(name, flags));
1775 	if (fd >= 0)
1776 		return fd;
1777 	else if (errno != EINVAL)
1778 		return -errno;
1779 
1780 	/* Possibly running on kernel without MFD_NOEXEC_SEAL */
1781 	fd = ensure_good_fd(sys_memfd_create(name, flags & ~MFD_NOEXEC_SEAL));
1782 	if (fd < 0)
1783 		return -errno;
1784 	return fd;
1785 }
1786 
1787 static struct bpf_map *bpf_object__add_map(struct bpf_object *obj)
1788 {
1789 	struct bpf_map *map;
1790 	int err;
1791 
1792 	err = libbpf_ensure_mem((void **)&obj->maps, &obj->maps_cap,
1793 				sizeof(*obj->maps), obj->nr_maps + 1);
1794 	if (err)
1795 		return ERR_PTR(err);
1796 
1797 	map = &obj->maps[obj->nr_maps++];
1798 	map->obj = obj;
1799 	/* Preallocate map FD without actually creating BPF map just yet.
1800 	 * These map FD "placeholders" will be reused later without changing
1801 	 * FD value when map is actually created in the kernel.
1802 	 *
1803 	 * This is useful to be able to perform BPF program relocations
1804 	 * without having to create BPF maps before that step. This allows us
1805 	 * to finalize and load BTF very late in BPF object's loading phase,
1806 	 * right before BPF maps have to be created and BPF programs have to
1807 	 * be loaded. By having these map FD placeholders we can perform all
1808 	 * the sanitizations, relocations, and any other adjustments before we
1809 	 * start creating actual BPF kernel objects (BTF, maps, progs).
1810 	 */
1811 	map->fd = create_placeholder_fd();
1812 	if (map->fd < 0)
1813 		return ERR_PTR(map->fd);
1814 	map->inner_map_fd = -1;
1815 	map->autocreate = true;
1816 
1817 	return map;
1818 }
1819 
1820 static size_t array_map_mmap_sz(unsigned int value_sz, unsigned int max_entries)
1821 {
1822 	const long page_sz = sysconf(_SC_PAGE_SIZE);
1823 	size_t map_sz;
1824 
1825 	map_sz = (size_t)roundup(value_sz, 8) * max_entries;
1826 	map_sz = roundup(map_sz, page_sz);
1827 	return map_sz;
1828 }
1829 
1830 static size_t bpf_map_mmap_sz(const struct bpf_map *map)
1831 {
1832 	const long page_sz = sysconf(_SC_PAGE_SIZE);
1833 
1834 	switch (map->def.type) {
1835 	case BPF_MAP_TYPE_ARRAY:
1836 		return array_map_mmap_sz(map->def.value_size, map->def.max_entries);
1837 	case BPF_MAP_TYPE_ARENA:
1838 		return page_sz * map->def.max_entries;
1839 	default:
1840 		return 0; /* not supported */
1841 	}
1842 }
1843 
1844 static int bpf_map_mmap_resize(struct bpf_map *map, size_t old_sz, size_t new_sz)
1845 {
1846 	void *mmaped;
1847 
1848 	if (!map->mmaped)
1849 		return -EINVAL;
1850 
1851 	if (old_sz == new_sz)
1852 		return 0;
1853 
1854 	mmaped = mmap(NULL, new_sz, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS, -1, 0);
1855 	if (mmaped == MAP_FAILED)
1856 		return -errno;
1857 
1858 	memcpy(mmaped, map->mmaped, min(old_sz, new_sz));
1859 	munmap(map->mmaped, old_sz);
1860 	map->mmaped = mmaped;
1861 	return 0;
1862 }
1863 
1864 static char *internal_map_name(struct bpf_object *obj, const char *real_name)
1865 {
1866 	char map_name[BPF_OBJ_NAME_LEN], *p;
1867 	int pfx_len, sfx_len = max((size_t)7, strlen(real_name));
1868 
1869 	/* This is one of the more confusing parts of libbpf for various
1870 	 * reasons, some of which are historical. The original idea for naming
1871 	 * internal names was to include as much of BPF object name prefix as
1872 	 * possible, so that it can be distinguished from similar internal
1873 	 * maps of a different BPF object.
1874 	 * As an example, let's say we have bpf_object named 'my_object_name'
1875 	 * and internal map corresponding to '.rodata' ELF section. The final
1876 	 * map name advertised to user and to the kernel will be
1877 	 * 'my_objec.rodata', taking first 8 characters of object name and
1878 	 * entire 7 characters of '.rodata'.
1879 	 * Somewhat confusingly, if internal map ELF section name is shorter
1880 	 * than 7 characters, e.g., '.bss', we still reserve 7 characters
1881 	 * for the suffix, even though we only have 4 actual characters, and
1882 	 * resulting map will be called 'my_objec.bss', not even using all 15
1883 	 * characters allowed by the kernel. Oh well, at least the truncated
1884 	 * object name is somewhat consistent in this case. But if the map
1885 	 * name is '.kconfig', we'll still have entirety of '.kconfig' added
1886 	 * (8 chars) and thus will be left with only first 7 characters of the
1887 	 * object name ('my_obje'). Happy guessing, user, that the final map
1888 	 * name will be "my_obje.kconfig".
1889 	 * Now, with libbpf starting to support arbitrarily named .rodata.*
1890 	 * and .data.* data sections, it's possible that ELF section name is
1891 	 * longer than allowed 15 chars, so we now need to be careful to take
1892 	 * only up to 15 first characters of ELF name, taking no BPF object
1893 	 * name characters at all. So '.rodata.abracadabra' will result in
1894 	 * '.rodata.abracad' kernel and user-visible name.
1895 	 * We need to keep this convoluted logic intact for .data, .bss and
1896 	 * .rodata maps, but for new custom .data.custom and .rodata.custom
1897 	 * maps we use their ELF names as is, not prepending bpf_object name
1898 	 * in front. We still need to truncate them to 15 characters for the
1899 	 * kernel. Full name can be recovered for such maps by using DATASEC
1900 	 * BTF type associated with such map's value type, though.
1901 	 */
1902 	if (sfx_len >= BPF_OBJ_NAME_LEN)
1903 		sfx_len = BPF_OBJ_NAME_LEN - 1;
1904 
1905 	/* if there are two or more dots in map name, it's a custom dot map */
1906 	if (strchr(real_name + 1, '.') != NULL)
1907 		pfx_len = 0;
1908 	else
1909 		pfx_len = min((size_t)BPF_OBJ_NAME_LEN - sfx_len - 1, strlen(obj->name));
1910 
1911 	snprintf(map_name, sizeof(map_name), "%.*s%.*s", pfx_len, obj->name,
1912 		 sfx_len, real_name);
1913 
1914 	/* sanities map name to characters allowed by kernel */
1915 	for (p = map_name; *p && p < map_name + sizeof(map_name); p++)
1916 		if (!isalnum(*p) && *p != '_' && *p != '.')
1917 			*p = '_';
1918 
1919 	return strdup(map_name);
1920 }
1921 
1922 static int
1923 map_fill_btf_type_info(struct bpf_object *obj, struct bpf_map *map);
1924 
1925 /* Internal BPF map is mmap()'able only if at least one of corresponding
1926  * DATASEC's VARs are to be exposed through BPF skeleton. I.e., it's a GLOBAL
1927  * variable and it's not marked as __hidden (which turns it into, effectively,
1928  * a STATIC variable).
1929  */
1930 static bool map_is_mmapable(struct bpf_object *obj, struct bpf_map *map)
1931 {
1932 	const struct btf_type *t, *vt;
1933 	struct btf_var_secinfo *vsi;
1934 	int i, n;
1935 
1936 	if (!map->btf_value_type_id)
1937 		return false;
1938 
1939 	t = btf__type_by_id(obj->btf, map->btf_value_type_id);
1940 	if (!btf_is_datasec(t))
1941 		return false;
1942 
1943 	vsi = btf_var_secinfos(t);
1944 	for (i = 0, n = btf_vlen(t); i < n; i++, vsi++) {
1945 		vt = btf__type_by_id(obj->btf, vsi->type);
1946 		if (!btf_is_var(vt))
1947 			continue;
1948 
1949 		if (btf_var(vt)->linkage != BTF_VAR_STATIC)
1950 			return true;
1951 	}
1952 
1953 	return false;
1954 }
1955 
1956 static int
1957 bpf_object__init_internal_map(struct bpf_object *obj, enum libbpf_map_type type,
1958 			      const char *real_name, int sec_idx, void *data, size_t data_sz)
1959 {
1960 	struct bpf_map_def *def;
1961 	struct bpf_map *map;
1962 	size_t mmap_sz;
1963 	int err;
1964 
1965 	map = bpf_object__add_map(obj);
1966 	if (IS_ERR(map))
1967 		return PTR_ERR(map);
1968 
1969 	map->libbpf_type = type;
1970 	map->sec_idx = sec_idx;
1971 	map->sec_offset = 0;
1972 	map->real_name = strdup(real_name);
1973 	map->name = internal_map_name(obj, real_name);
1974 	if (!map->real_name || !map->name) {
1975 		zfree(&map->real_name);
1976 		zfree(&map->name);
1977 		return -ENOMEM;
1978 	}
1979 
1980 	def = &map->def;
1981 	def->type = BPF_MAP_TYPE_ARRAY;
1982 	def->key_size = sizeof(int);
1983 	def->value_size = data_sz;
1984 	def->max_entries = 1;
1985 	def->map_flags = type == LIBBPF_MAP_RODATA || type == LIBBPF_MAP_KCONFIG
1986 		? BPF_F_RDONLY_PROG : 0;
1987 
1988 	/* failures are fine because of maps like .rodata.str1.1 */
1989 	(void) map_fill_btf_type_info(obj, map);
1990 
1991 	if (map_is_mmapable(obj, map))
1992 		def->map_flags |= BPF_F_MMAPABLE;
1993 
1994 	pr_debug("map '%s' (global data): at sec_idx %d, offset %zu, flags %x.\n",
1995 		 map->name, map->sec_idx, map->sec_offset, def->map_flags);
1996 
1997 	mmap_sz = bpf_map_mmap_sz(map);
1998 	map->mmaped = mmap(NULL, mmap_sz, PROT_READ | PROT_WRITE,
1999 			   MAP_SHARED | MAP_ANONYMOUS, -1, 0);
2000 	if (map->mmaped == MAP_FAILED) {
2001 		err = -errno;
2002 		map->mmaped = NULL;
2003 		pr_warn("failed to alloc map '%s' content buffer: %s\n", map->name, errstr(err));
2004 		zfree(&map->real_name);
2005 		zfree(&map->name);
2006 		return err;
2007 	}
2008 
2009 	if (data)
2010 		memcpy(map->mmaped, data, data_sz);
2011 
2012 	pr_debug("map %td is \"%s\"\n", map - obj->maps, map->name);
2013 	return 0;
2014 }
2015 
2016 static int bpf_object__init_global_data_maps(struct bpf_object *obj)
2017 {
2018 	struct elf_sec_desc *sec_desc;
2019 	const char *sec_name;
2020 	int err = 0, sec_idx;
2021 
2022 	/*
2023 	 * Populate obj->maps with libbpf internal maps.
2024 	 */
2025 	for (sec_idx = 1; sec_idx < obj->efile.sec_cnt; sec_idx++) {
2026 		sec_desc = &obj->efile.secs[sec_idx];
2027 
2028 		/* Skip recognized sections with size 0. */
2029 		if (!sec_desc->data || sec_desc->data->d_size == 0)
2030 			continue;
2031 
2032 		switch (sec_desc->sec_type) {
2033 		case SEC_DATA:
2034 			sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, sec_idx));
2035 			err = bpf_object__init_internal_map(obj, LIBBPF_MAP_DATA,
2036 							    sec_name, sec_idx,
2037 							    sec_desc->data->d_buf,
2038 							    sec_desc->data->d_size);
2039 			break;
2040 		case SEC_RODATA:
2041 			obj->has_rodata = true;
2042 			sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, sec_idx));
2043 			err = bpf_object__init_internal_map(obj, LIBBPF_MAP_RODATA,
2044 							    sec_name, sec_idx,
2045 							    sec_desc->data->d_buf,
2046 							    sec_desc->data->d_size);
2047 			break;
2048 		case SEC_BSS:
2049 			sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, sec_idx));
2050 			err = bpf_object__init_internal_map(obj, LIBBPF_MAP_BSS,
2051 							    sec_name, sec_idx,
2052 							    NULL,
2053 							    sec_desc->data->d_size);
2054 			break;
2055 		default:
2056 			/* skip */
2057 			break;
2058 		}
2059 		if (err)
2060 			return err;
2061 	}
2062 	return 0;
2063 }
2064 
2065 
2066 static struct extern_desc *find_extern_by_name(const struct bpf_object *obj,
2067 					       const void *name)
2068 {
2069 	int i;
2070 
2071 	for (i = 0; i < obj->nr_extern; i++) {
2072 		if (strcmp(obj->externs[i].name, name) == 0)
2073 			return &obj->externs[i];
2074 	}
2075 	return NULL;
2076 }
2077 
2078 static struct extern_desc *find_extern_by_name_with_len(const struct bpf_object *obj,
2079 							const void *name, int len)
2080 {
2081 	const char *ext_name;
2082 	int i;
2083 
2084 	for (i = 0; i < obj->nr_extern; i++) {
2085 		ext_name = obj->externs[i].name;
2086 		if (strlen(ext_name) == len && strncmp(ext_name, name, len) == 0)
2087 			return &obj->externs[i];
2088 	}
2089 	return NULL;
2090 }
2091 
2092 static int set_kcfg_value_tri(struct extern_desc *ext, void *ext_val,
2093 			      char value)
2094 {
2095 	switch (ext->kcfg.type) {
2096 	case KCFG_BOOL:
2097 		if (value == 'm') {
2098 			pr_warn("extern (kcfg) '%s': value '%c' implies tristate or char type\n",
2099 				ext->name, value);
2100 			return -EINVAL;
2101 		}
2102 		*(bool *)ext_val = value == 'y' ? true : false;
2103 		break;
2104 	case KCFG_TRISTATE:
2105 		if (value == 'y')
2106 			*(enum libbpf_tristate *)ext_val = TRI_YES;
2107 		else if (value == 'm')
2108 			*(enum libbpf_tristate *)ext_val = TRI_MODULE;
2109 		else /* value == 'n' */
2110 			*(enum libbpf_tristate *)ext_val = TRI_NO;
2111 		break;
2112 	case KCFG_CHAR:
2113 		*(char *)ext_val = value;
2114 		break;
2115 	case KCFG_UNKNOWN:
2116 	case KCFG_INT:
2117 	case KCFG_CHAR_ARR:
2118 	default:
2119 		pr_warn("extern (kcfg) '%s': value '%c' implies bool, tristate, or char type\n",
2120 			ext->name, value);
2121 		return -EINVAL;
2122 	}
2123 	ext->is_set = true;
2124 	return 0;
2125 }
2126 
2127 static int set_kcfg_value_str(struct extern_desc *ext, char *ext_val,
2128 			      const char *value)
2129 {
2130 	size_t len;
2131 
2132 	if (ext->kcfg.type != KCFG_CHAR_ARR) {
2133 		pr_warn("extern (kcfg) '%s': value '%s' implies char array type\n",
2134 			ext->name, value);
2135 		return -EINVAL;
2136 	}
2137 
2138 	len = strlen(value);
2139 	if (len < 2 || value[len - 1] != '"') {
2140 		pr_warn("extern (kcfg) '%s': invalid string config '%s'\n",
2141 			ext->name, value);
2142 		return -EINVAL;
2143 	}
2144 
2145 	/* strip quotes */
2146 	len -= 2;
2147 	if (len >= ext->kcfg.sz) {
2148 		pr_warn("extern (kcfg) '%s': long string '%s' of (%zu bytes) truncated to %d bytes\n",
2149 			ext->name, value, len, ext->kcfg.sz - 1);
2150 		len = ext->kcfg.sz - 1;
2151 	}
2152 	memcpy(ext_val, value + 1, len);
2153 	ext_val[len] = '\0';
2154 	ext->is_set = true;
2155 	return 0;
2156 }
2157 
2158 static int parse_u64(const char *value, __u64 *res)
2159 {
2160 	char *value_end;
2161 	int err;
2162 
2163 	errno = 0;
2164 	*res = strtoull(value, &value_end, 0);
2165 	if (errno) {
2166 		err = -errno;
2167 		pr_warn("failed to parse '%s': %s\n", value, errstr(err));
2168 		return err;
2169 	}
2170 	if (*value_end) {
2171 		pr_warn("failed to parse '%s' as integer completely\n", value);
2172 		return -EINVAL;
2173 	}
2174 	return 0;
2175 }
2176 
2177 static bool is_kcfg_value_in_range(const struct extern_desc *ext, __u64 v)
2178 {
2179 	int bit_sz = ext->kcfg.sz * 8;
2180 
2181 	if (ext->kcfg.sz == 8)
2182 		return true;
2183 
2184 	/* Validate that value stored in u64 fits in integer of `ext->sz`
2185 	 * bytes size without any loss of information. If the target integer
2186 	 * is signed, we rely on the following limits of integer type of
2187 	 * Y bits and subsequent transformation:
2188 	 *
2189 	 *     -2^(Y-1) <= X           <= 2^(Y-1) - 1
2190 	 *            0 <= X + 2^(Y-1) <= 2^Y - 1
2191 	 *            0 <= X + 2^(Y-1) <  2^Y
2192 	 *
2193 	 *  For unsigned target integer, check that all the (64 - Y) bits are
2194 	 *  zero.
2195 	 */
2196 	if (ext->kcfg.is_signed)
2197 		return v + (1ULL << (bit_sz - 1)) < (1ULL << bit_sz);
2198 	else
2199 		return (v >> bit_sz) == 0;
2200 }
2201 
2202 static int set_kcfg_value_num(struct extern_desc *ext, void *ext_val,
2203 			      __u64 value)
2204 {
2205 	if (ext->kcfg.type != KCFG_INT && ext->kcfg.type != KCFG_CHAR &&
2206 	    ext->kcfg.type != KCFG_BOOL) {
2207 		pr_warn("extern (kcfg) '%s': value '%llu' implies integer, char, or boolean type\n",
2208 			ext->name, (unsigned long long)value);
2209 		return -EINVAL;
2210 	}
2211 	if (ext->kcfg.type == KCFG_BOOL && value > 1) {
2212 		pr_warn("extern (kcfg) '%s': value '%llu' isn't boolean compatible\n",
2213 			ext->name, (unsigned long long)value);
2214 		return -EINVAL;
2215 
2216 	}
2217 	if (!is_kcfg_value_in_range(ext, value)) {
2218 		pr_warn("extern (kcfg) '%s': value '%llu' doesn't fit in %d bytes\n",
2219 			ext->name, (unsigned long long)value, ext->kcfg.sz);
2220 		return -ERANGE;
2221 	}
2222 	switch (ext->kcfg.sz) {
2223 	case 1:
2224 		*(__u8 *)ext_val = value;
2225 		break;
2226 	case 2:
2227 		*(__u16 *)ext_val = value;
2228 		break;
2229 	case 4:
2230 		*(__u32 *)ext_val = value;
2231 		break;
2232 	case 8:
2233 		*(__u64 *)ext_val = value;
2234 		break;
2235 	default:
2236 		return -EINVAL;
2237 	}
2238 	ext->is_set = true;
2239 	return 0;
2240 }
2241 
2242 static int bpf_object__process_kconfig_line(struct bpf_object *obj,
2243 					    char *buf, void *data)
2244 {
2245 	struct extern_desc *ext;
2246 	char *sep, *value;
2247 	int len, err = 0;
2248 	void *ext_val;
2249 	__u64 num;
2250 
2251 	if (!str_has_pfx(buf, "CONFIG_"))
2252 		return 0;
2253 
2254 	sep = strchr(buf, '=');
2255 	if (!sep) {
2256 		pr_warn("failed to parse '%s': no separator\n", buf);
2257 		return -EINVAL;
2258 	}
2259 
2260 	/* Trim ending '\n' */
2261 	len = strlen(buf);
2262 	if (buf[len - 1] == '\n')
2263 		buf[len - 1] = '\0';
2264 	/* Split on '=' and ensure that a value is present. */
2265 	*sep = '\0';
2266 	if (!sep[1]) {
2267 		*sep = '=';
2268 		pr_warn("failed to parse '%s': no value\n", buf);
2269 		return -EINVAL;
2270 	}
2271 
2272 	ext = find_extern_by_name(obj, buf);
2273 	if (!ext || ext->is_set)
2274 		return 0;
2275 
2276 	ext_val = data + ext->kcfg.data_off;
2277 	value = sep + 1;
2278 
2279 	switch (*value) {
2280 	case 'y': case 'n': case 'm':
2281 		err = set_kcfg_value_tri(ext, ext_val, *value);
2282 		break;
2283 	case '"':
2284 		err = set_kcfg_value_str(ext, ext_val, value);
2285 		break;
2286 	default:
2287 		/* assume integer */
2288 		err = parse_u64(value, &num);
2289 		if (err) {
2290 			pr_warn("extern (kcfg) '%s': value '%s' isn't a valid integer\n", ext->name, value);
2291 			return err;
2292 		}
2293 		if (ext->kcfg.type != KCFG_INT && ext->kcfg.type != KCFG_CHAR) {
2294 			pr_warn("extern (kcfg) '%s': value '%s' implies integer type\n", ext->name, value);
2295 			return -EINVAL;
2296 		}
2297 		err = set_kcfg_value_num(ext, ext_val, num);
2298 		break;
2299 	}
2300 	if (err)
2301 		return err;
2302 	pr_debug("extern (kcfg) '%s': set to %s\n", ext->name, value);
2303 	return 0;
2304 }
2305 
2306 static int bpf_object__read_kconfig_file(struct bpf_object *obj, void *data)
2307 {
2308 	char buf[PATH_MAX];
2309 	struct utsname uts;
2310 	int len, err = 0;
2311 	gzFile file;
2312 
2313 	uname(&uts);
2314 	len = snprintf(buf, PATH_MAX, "/boot/config-%s", uts.release);
2315 	if (len < 0)
2316 		return -EINVAL;
2317 	else if (len >= PATH_MAX)
2318 		return -ENAMETOOLONG;
2319 
2320 	/* gzopen also accepts uncompressed files. */
2321 	file = gzopen(buf, "re");
2322 	if (!file)
2323 		file = gzopen("/proc/config.gz", "re");
2324 
2325 	if (!file) {
2326 		pr_warn("failed to open system Kconfig\n");
2327 		return -ENOENT;
2328 	}
2329 
2330 	while (gzgets(file, buf, sizeof(buf))) {
2331 		err = bpf_object__process_kconfig_line(obj, buf, data);
2332 		if (err) {
2333 			pr_warn("error parsing system Kconfig line '%s': %s\n",
2334 				buf, errstr(err));
2335 			goto out;
2336 		}
2337 	}
2338 
2339 out:
2340 	gzclose(file);
2341 	return err;
2342 }
2343 
2344 static int bpf_object__read_kconfig_mem(struct bpf_object *obj,
2345 					const char *config, void *data)
2346 {
2347 	char buf[PATH_MAX];
2348 	int err = 0;
2349 	FILE *file;
2350 
2351 	file = fmemopen((void *)config, strlen(config), "r");
2352 	if (!file) {
2353 		err = -errno;
2354 		pr_warn("failed to open in-memory Kconfig: %s\n", errstr(err));
2355 		return err;
2356 	}
2357 
2358 	while (fgets(buf, sizeof(buf), file)) {
2359 		err = bpf_object__process_kconfig_line(obj, buf, data);
2360 		if (err) {
2361 			pr_warn("error parsing in-memory Kconfig line '%s': %s\n",
2362 				buf, errstr(err));
2363 			break;
2364 		}
2365 	}
2366 
2367 	fclose(file);
2368 	return err;
2369 }
2370 
2371 static int bpf_object__init_kconfig_map(struct bpf_object *obj)
2372 {
2373 	struct extern_desc *last_ext = NULL, *ext;
2374 	size_t map_sz;
2375 	int i, err;
2376 
2377 	for (i = 0; i < obj->nr_extern; i++) {
2378 		ext = &obj->externs[i];
2379 		if (ext->type == EXT_KCFG)
2380 			last_ext = ext;
2381 	}
2382 
2383 	if (!last_ext)
2384 		return 0;
2385 
2386 	map_sz = last_ext->kcfg.data_off + last_ext->kcfg.sz;
2387 	err = bpf_object__init_internal_map(obj, LIBBPF_MAP_KCONFIG,
2388 					    ".kconfig", obj->efile.symbols_shndx,
2389 					    NULL, map_sz);
2390 	if (err)
2391 		return err;
2392 
2393 	obj->kconfig_map_idx = obj->nr_maps - 1;
2394 
2395 	return 0;
2396 }
2397 
2398 const struct btf_type *
2399 skip_mods_and_typedefs(const struct btf *btf, __u32 id, __u32 *res_id)
2400 {
2401 	const struct btf_type *t = btf__type_by_id(btf, id);
2402 
2403 	if (res_id)
2404 		*res_id = id;
2405 
2406 	while (btf_is_mod(t) || btf_is_typedef(t)) {
2407 		if (res_id)
2408 			*res_id = t->type;
2409 		t = btf__type_by_id(btf, t->type);
2410 	}
2411 
2412 	return t;
2413 }
2414 
2415 static const struct btf_type *
2416 resolve_func_ptr(const struct btf *btf, __u32 id, __u32 *res_id)
2417 {
2418 	const struct btf_type *t;
2419 
2420 	t = skip_mods_and_typedefs(btf, id, NULL);
2421 	if (!btf_is_ptr(t))
2422 		return NULL;
2423 
2424 	t = skip_mods_and_typedefs(btf, t->type, res_id);
2425 
2426 	return btf_is_func_proto(t) ? t : NULL;
2427 }
2428 
2429 static const char *__btf_kind_str(__u16 kind)
2430 {
2431 	switch (kind) {
2432 	case BTF_KIND_UNKN: return "void";
2433 	case BTF_KIND_INT: return "int";
2434 	case BTF_KIND_PTR: return "ptr";
2435 	case BTF_KIND_ARRAY: return "array";
2436 	case BTF_KIND_STRUCT: return "struct";
2437 	case BTF_KIND_UNION: return "union";
2438 	case BTF_KIND_ENUM: return "enum";
2439 	case BTF_KIND_FWD: return "fwd";
2440 	case BTF_KIND_TYPEDEF: return "typedef";
2441 	case BTF_KIND_VOLATILE: return "volatile";
2442 	case BTF_KIND_CONST: return "const";
2443 	case BTF_KIND_RESTRICT: return "restrict";
2444 	case BTF_KIND_FUNC: return "func";
2445 	case BTF_KIND_FUNC_PROTO: return "func_proto";
2446 	case BTF_KIND_VAR: return "var";
2447 	case BTF_KIND_DATASEC: return "datasec";
2448 	case BTF_KIND_FLOAT: return "float";
2449 	case BTF_KIND_DECL_TAG: return "decl_tag";
2450 	case BTF_KIND_TYPE_TAG: return "type_tag";
2451 	case BTF_KIND_ENUM64: return "enum64";
2452 	default: return "unknown";
2453 	}
2454 }
2455 
2456 const char *btf_kind_str(const struct btf_type *t)
2457 {
2458 	return __btf_kind_str(btf_kind(t));
2459 }
2460 
2461 /*
2462  * Fetch integer attribute of BTF map definition. Such attributes are
2463  * represented using a pointer to an array, in which dimensionality of array
2464  * encodes specified integer value. E.g., int (*type)[BPF_MAP_TYPE_ARRAY];
2465  * encodes `type => BPF_MAP_TYPE_ARRAY` key/value pair completely using BTF
2466  * type definition, while using only sizeof(void *) space in ELF data section.
2467  */
2468 static bool get_map_field_int(const char *map_name, const struct btf *btf,
2469 			      const struct btf_member *m, __u32 *res)
2470 {
2471 	const struct btf_type *t = skip_mods_and_typedefs(btf, m->type, NULL);
2472 	const char *name = btf__name_by_offset(btf, m->name_off);
2473 	const struct btf_array *arr_info;
2474 	const struct btf_type *arr_t;
2475 
2476 	if (!btf_is_ptr(t)) {
2477 		pr_warn("map '%s': attr '%s': expected PTR, got %s.\n",
2478 			map_name, name, btf_kind_str(t));
2479 		return false;
2480 	}
2481 
2482 	arr_t = btf__type_by_id(btf, t->type);
2483 	if (!arr_t) {
2484 		pr_warn("map '%s': attr '%s': type [%u] not found.\n",
2485 			map_name, name, t->type);
2486 		return false;
2487 	}
2488 	if (!btf_is_array(arr_t)) {
2489 		pr_warn("map '%s': attr '%s': expected ARRAY, got %s.\n",
2490 			map_name, name, btf_kind_str(arr_t));
2491 		return false;
2492 	}
2493 	arr_info = btf_array(arr_t);
2494 	*res = arr_info->nelems;
2495 	return true;
2496 }
2497 
2498 static bool get_map_field_long(const char *map_name, const struct btf *btf,
2499 			       const struct btf_member *m, __u64 *res)
2500 {
2501 	const struct btf_type *t = skip_mods_and_typedefs(btf, m->type, NULL);
2502 	const char *name = btf__name_by_offset(btf, m->name_off);
2503 
2504 	if (btf_is_ptr(t)) {
2505 		__u32 res32;
2506 		bool ret;
2507 
2508 		ret = get_map_field_int(map_name, btf, m, &res32);
2509 		if (ret)
2510 			*res = (__u64)res32;
2511 		return ret;
2512 	}
2513 
2514 	if (!btf_is_enum(t) && !btf_is_enum64(t)) {
2515 		pr_warn("map '%s': attr '%s': expected ENUM or ENUM64, got %s.\n",
2516 			map_name, name, btf_kind_str(t));
2517 		return false;
2518 	}
2519 
2520 	if (btf_vlen(t) != 1) {
2521 		pr_warn("map '%s': attr '%s': invalid __ulong\n",
2522 			map_name, name);
2523 		return false;
2524 	}
2525 
2526 	if (btf_is_enum(t)) {
2527 		const struct btf_enum *e = btf_enum(t);
2528 
2529 		*res = e->val;
2530 	} else {
2531 		const struct btf_enum64 *e = btf_enum64(t);
2532 
2533 		*res = btf_enum64_value(e);
2534 	}
2535 	return true;
2536 }
2537 
2538 static int pathname_concat(char *buf, size_t buf_sz, const char *path, const char *name)
2539 {
2540 	int len;
2541 
2542 	len = snprintf(buf, buf_sz, "%s/%s", path, name);
2543 	if (len < 0)
2544 		return -EINVAL;
2545 	if (len >= buf_sz)
2546 		return -ENAMETOOLONG;
2547 
2548 	return 0;
2549 }
2550 
2551 static int build_map_pin_path(struct bpf_map *map, const char *path)
2552 {
2553 	char buf[PATH_MAX];
2554 	int err;
2555 
2556 	if (!path)
2557 		path = BPF_FS_DEFAULT_PATH;
2558 
2559 	err = pathname_concat(buf, sizeof(buf), path, bpf_map__name(map));
2560 	if (err)
2561 		return err;
2562 
2563 	return bpf_map__set_pin_path(map, buf);
2564 }
2565 
2566 /* should match definition in bpf_helpers.h */
2567 enum libbpf_pin_type {
2568 	LIBBPF_PIN_NONE,
2569 	/* PIN_BY_NAME: pin maps by name (in /sys/fs/bpf by default) */
2570 	LIBBPF_PIN_BY_NAME,
2571 };
2572 
2573 int parse_btf_map_def(const char *map_name, struct btf *btf,
2574 		      const struct btf_type *def_t, bool strict,
2575 		      struct btf_map_def *map_def, struct btf_map_def *inner_def)
2576 {
2577 	const struct btf_type *t;
2578 	const struct btf_member *m;
2579 	bool is_inner = inner_def == NULL;
2580 	int vlen, i;
2581 
2582 	vlen = btf_vlen(def_t);
2583 	m = btf_members(def_t);
2584 	for (i = 0; i < vlen; i++, m++) {
2585 		const char *name = btf__name_by_offset(btf, m->name_off);
2586 
2587 		if (!name) {
2588 			pr_warn("map '%s': invalid field #%d.\n", map_name, i);
2589 			return -EINVAL;
2590 		}
2591 		if (strcmp(name, "type") == 0) {
2592 			if (!get_map_field_int(map_name, btf, m, &map_def->map_type))
2593 				return -EINVAL;
2594 			map_def->parts |= MAP_DEF_MAP_TYPE;
2595 		} else if (strcmp(name, "max_entries") == 0) {
2596 			if (!get_map_field_int(map_name, btf, m, &map_def->max_entries))
2597 				return -EINVAL;
2598 			map_def->parts |= MAP_DEF_MAX_ENTRIES;
2599 		} else if (strcmp(name, "map_flags") == 0) {
2600 			if (!get_map_field_int(map_name, btf, m, &map_def->map_flags))
2601 				return -EINVAL;
2602 			map_def->parts |= MAP_DEF_MAP_FLAGS;
2603 		} else if (strcmp(name, "numa_node") == 0) {
2604 			if (!get_map_field_int(map_name, btf, m, &map_def->numa_node))
2605 				return -EINVAL;
2606 			map_def->parts |= MAP_DEF_NUMA_NODE;
2607 		} else if (strcmp(name, "key_size") == 0) {
2608 			__u32 sz;
2609 
2610 			if (!get_map_field_int(map_name, btf, m, &sz))
2611 				return -EINVAL;
2612 			if (map_def->key_size && map_def->key_size != sz) {
2613 				pr_warn("map '%s': conflicting key size %u != %u.\n",
2614 					map_name, map_def->key_size, sz);
2615 				return -EINVAL;
2616 			}
2617 			map_def->key_size = sz;
2618 			map_def->parts |= MAP_DEF_KEY_SIZE;
2619 		} else if (strcmp(name, "key") == 0) {
2620 			__s64 sz;
2621 
2622 			t = btf__type_by_id(btf, m->type);
2623 			if (!t) {
2624 				pr_warn("map '%s': key type [%d] not found.\n",
2625 					map_name, m->type);
2626 				return -EINVAL;
2627 			}
2628 			if (!btf_is_ptr(t)) {
2629 				pr_warn("map '%s': key spec is not PTR: %s.\n",
2630 					map_name, btf_kind_str(t));
2631 				return -EINVAL;
2632 			}
2633 			sz = btf__resolve_size(btf, t->type);
2634 			if (sz < 0) {
2635 				pr_warn("map '%s': can't determine key size for type [%u]: %zd.\n",
2636 					map_name, t->type, (ssize_t)sz);
2637 				return sz;
2638 			}
2639 			if (map_def->key_size && map_def->key_size != sz) {
2640 				pr_warn("map '%s': conflicting key size %u != %zd.\n",
2641 					map_name, map_def->key_size, (ssize_t)sz);
2642 				return -EINVAL;
2643 			}
2644 			map_def->key_size = sz;
2645 			map_def->key_type_id = t->type;
2646 			map_def->parts |= MAP_DEF_KEY_SIZE | MAP_DEF_KEY_TYPE;
2647 		} else if (strcmp(name, "value_size") == 0) {
2648 			__u32 sz;
2649 
2650 			if (!get_map_field_int(map_name, btf, m, &sz))
2651 				return -EINVAL;
2652 			if (map_def->value_size && map_def->value_size != sz) {
2653 				pr_warn("map '%s': conflicting value size %u != %u.\n",
2654 					map_name, map_def->value_size, sz);
2655 				return -EINVAL;
2656 			}
2657 			map_def->value_size = sz;
2658 			map_def->parts |= MAP_DEF_VALUE_SIZE;
2659 		} else if (strcmp(name, "value") == 0) {
2660 			__s64 sz;
2661 
2662 			t = btf__type_by_id(btf, m->type);
2663 			if (!t) {
2664 				pr_warn("map '%s': value type [%d] not found.\n",
2665 					map_name, m->type);
2666 				return -EINVAL;
2667 			}
2668 			if (!btf_is_ptr(t)) {
2669 				pr_warn("map '%s': value spec is not PTR: %s.\n",
2670 					map_name, btf_kind_str(t));
2671 				return -EINVAL;
2672 			}
2673 			sz = btf__resolve_size(btf, t->type);
2674 			if (sz < 0) {
2675 				pr_warn("map '%s': can't determine value size for type [%u]: %zd.\n",
2676 					map_name, t->type, (ssize_t)sz);
2677 				return sz;
2678 			}
2679 			if (map_def->value_size && map_def->value_size != sz) {
2680 				pr_warn("map '%s': conflicting value size %u != %zd.\n",
2681 					map_name, map_def->value_size, (ssize_t)sz);
2682 				return -EINVAL;
2683 			}
2684 			map_def->value_size = sz;
2685 			map_def->value_type_id = t->type;
2686 			map_def->parts |= MAP_DEF_VALUE_SIZE | MAP_DEF_VALUE_TYPE;
2687 		}
2688 		else if (strcmp(name, "values") == 0) {
2689 			bool is_map_in_map = bpf_map_type__is_map_in_map(map_def->map_type);
2690 			bool is_prog_array = map_def->map_type == BPF_MAP_TYPE_PROG_ARRAY;
2691 			const char *desc = is_map_in_map ? "map-in-map inner" : "prog-array value";
2692 			char inner_map_name[128];
2693 			int err;
2694 
2695 			if (is_inner) {
2696 				pr_warn("map '%s': multi-level inner maps not supported.\n",
2697 					map_name);
2698 				return -ENOTSUP;
2699 			}
2700 			if (i != vlen - 1) {
2701 				pr_warn("map '%s': '%s' member should be last.\n",
2702 					map_name, name);
2703 				return -EINVAL;
2704 			}
2705 			if (!is_map_in_map && !is_prog_array) {
2706 				pr_warn("map '%s': should be map-in-map or prog-array.\n",
2707 					map_name);
2708 				return -ENOTSUP;
2709 			}
2710 			if (map_def->value_size && map_def->value_size != 4) {
2711 				pr_warn("map '%s': conflicting value size %u != 4.\n",
2712 					map_name, map_def->value_size);
2713 				return -EINVAL;
2714 			}
2715 			map_def->value_size = 4;
2716 			t = btf__type_by_id(btf, m->type);
2717 			if (!t) {
2718 				pr_warn("map '%s': %s type [%d] not found.\n",
2719 					map_name, desc, m->type);
2720 				return -EINVAL;
2721 			}
2722 			if (!btf_is_array(t) || btf_array(t)->nelems) {
2723 				pr_warn("map '%s': %s spec is not a zero-sized array.\n",
2724 					map_name, desc);
2725 				return -EINVAL;
2726 			}
2727 			t = skip_mods_and_typedefs(btf, btf_array(t)->type, NULL);
2728 			if (!btf_is_ptr(t)) {
2729 				pr_warn("map '%s': %s def is of unexpected kind %s.\n",
2730 					map_name, desc, btf_kind_str(t));
2731 				return -EINVAL;
2732 			}
2733 			t = skip_mods_and_typedefs(btf, t->type, NULL);
2734 			if (is_prog_array) {
2735 				if (!btf_is_func_proto(t)) {
2736 					pr_warn("map '%s': prog-array value def is of unexpected kind %s.\n",
2737 						map_name, btf_kind_str(t));
2738 					return -EINVAL;
2739 				}
2740 				continue;
2741 			}
2742 			if (!btf_is_struct(t)) {
2743 				pr_warn("map '%s': map-in-map inner def is of unexpected kind %s.\n",
2744 					map_name, btf_kind_str(t));
2745 				return -EINVAL;
2746 			}
2747 
2748 			snprintf(inner_map_name, sizeof(inner_map_name), "%s.inner", map_name);
2749 			err = parse_btf_map_def(inner_map_name, btf, t, strict, inner_def, NULL);
2750 			if (err)
2751 				return err;
2752 
2753 			map_def->parts |= MAP_DEF_INNER_MAP;
2754 		} else if (strcmp(name, "pinning") == 0) {
2755 			__u32 val;
2756 
2757 			if (is_inner) {
2758 				pr_warn("map '%s': inner def can't be pinned.\n", map_name);
2759 				return -EINVAL;
2760 			}
2761 			if (!get_map_field_int(map_name, btf, m, &val))
2762 				return -EINVAL;
2763 			if (val != LIBBPF_PIN_NONE && val != LIBBPF_PIN_BY_NAME) {
2764 				pr_warn("map '%s': invalid pinning value %u.\n",
2765 					map_name, val);
2766 				return -EINVAL;
2767 			}
2768 			map_def->pinning = val;
2769 			map_def->parts |= MAP_DEF_PINNING;
2770 		} else if (strcmp(name, "map_extra") == 0) {
2771 			__u64 map_extra;
2772 
2773 			if (!get_map_field_long(map_name, btf, m, &map_extra))
2774 				return -EINVAL;
2775 			map_def->map_extra = map_extra;
2776 			map_def->parts |= MAP_DEF_MAP_EXTRA;
2777 		} else {
2778 			if (strict) {
2779 				pr_warn("map '%s': unknown field '%s'.\n", map_name, name);
2780 				return -ENOTSUP;
2781 			}
2782 			pr_debug("map '%s': ignoring unknown field '%s'.\n", map_name, name);
2783 		}
2784 	}
2785 
2786 	if (map_def->map_type == BPF_MAP_TYPE_UNSPEC) {
2787 		pr_warn("map '%s': map type isn't specified.\n", map_name);
2788 		return -EINVAL;
2789 	}
2790 
2791 	return 0;
2792 }
2793 
2794 static size_t adjust_ringbuf_sz(size_t sz)
2795 {
2796 	__u32 page_sz = sysconf(_SC_PAGE_SIZE);
2797 	__u32 mul;
2798 
2799 	/* if user forgot to set any size, make sure they see error */
2800 	if (sz == 0)
2801 		return 0;
2802 	/* Kernel expects BPF_MAP_TYPE_RINGBUF's max_entries to be
2803 	 * a power-of-2 multiple of kernel's page size. If user diligently
2804 	 * satisified these conditions, pass the size through.
2805 	 */
2806 	if ((sz % page_sz) == 0 && is_pow_of_2(sz / page_sz))
2807 		return sz;
2808 
2809 	/* Otherwise find closest (page_sz * power_of_2) product bigger than
2810 	 * user-set size to satisfy both user size request and kernel
2811 	 * requirements and substitute correct max_entries for map creation.
2812 	 */
2813 	for (mul = 1; mul <= UINT_MAX / page_sz; mul <<= 1) {
2814 		if (mul * page_sz > sz)
2815 			return mul * page_sz;
2816 	}
2817 
2818 	/* if it's impossible to satisfy the conditions (i.e., user size is
2819 	 * very close to UINT_MAX but is not a power-of-2 multiple of
2820 	 * page_size) then just return original size and let kernel reject it
2821 	 */
2822 	return sz;
2823 }
2824 
2825 static bool map_is_ringbuf(const struct bpf_map *map)
2826 {
2827 	return map->def.type == BPF_MAP_TYPE_RINGBUF ||
2828 	       map->def.type == BPF_MAP_TYPE_USER_RINGBUF;
2829 }
2830 
2831 static void fill_map_from_def(struct bpf_map *map, const struct btf_map_def *def)
2832 {
2833 	map->def.type = def->map_type;
2834 	map->def.key_size = def->key_size;
2835 	map->def.value_size = def->value_size;
2836 	map->def.max_entries = def->max_entries;
2837 	map->def.map_flags = def->map_flags;
2838 	map->map_extra = def->map_extra;
2839 
2840 	map->numa_node = def->numa_node;
2841 	map->btf_key_type_id = def->key_type_id;
2842 	map->btf_value_type_id = def->value_type_id;
2843 
2844 	/* auto-adjust BPF ringbuf map max_entries to be a multiple of page size */
2845 	if (map_is_ringbuf(map))
2846 		map->def.max_entries = adjust_ringbuf_sz(map->def.max_entries);
2847 
2848 	if (def->parts & MAP_DEF_MAP_TYPE)
2849 		pr_debug("map '%s': found type = %u.\n", map->name, def->map_type);
2850 
2851 	if (def->parts & MAP_DEF_KEY_TYPE)
2852 		pr_debug("map '%s': found key [%u], sz = %u.\n",
2853 			 map->name, def->key_type_id, def->key_size);
2854 	else if (def->parts & MAP_DEF_KEY_SIZE)
2855 		pr_debug("map '%s': found key_size = %u.\n", map->name, def->key_size);
2856 
2857 	if (def->parts & MAP_DEF_VALUE_TYPE)
2858 		pr_debug("map '%s': found value [%u], sz = %u.\n",
2859 			 map->name, def->value_type_id, def->value_size);
2860 	else if (def->parts & MAP_DEF_VALUE_SIZE)
2861 		pr_debug("map '%s': found value_size = %u.\n", map->name, def->value_size);
2862 
2863 	if (def->parts & MAP_DEF_MAX_ENTRIES)
2864 		pr_debug("map '%s': found max_entries = %u.\n", map->name, def->max_entries);
2865 	if (def->parts & MAP_DEF_MAP_FLAGS)
2866 		pr_debug("map '%s': found map_flags = 0x%x.\n", map->name, def->map_flags);
2867 	if (def->parts & MAP_DEF_MAP_EXTRA)
2868 		pr_debug("map '%s': found map_extra = 0x%llx.\n", map->name,
2869 			 (unsigned long long)def->map_extra);
2870 	if (def->parts & MAP_DEF_PINNING)
2871 		pr_debug("map '%s': found pinning = %u.\n", map->name, def->pinning);
2872 	if (def->parts & MAP_DEF_NUMA_NODE)
2873 		pr_debug("map '%s': found numa_node = %u.\n", map->name, def->numa_node);
2874 
2875 	if (def->parts & MAP_DEF_INNER_MAP)
2876 		pr_debug("map '%s': found inner map definition.\n", map->name);
2877 }
2878 
2879 static const char *btf_var_linkage_str(__u32 linkage)
2880 {
2881 	switch (linkage) {
2882 	case BTF_VAR_STATIC: return "static";
2883 	case BTF_VAR_GLOBAL_ALLOCATED: return "global";
2884 	case BTF_VAR_GLOBAL_EXTERN: return "extern";
2885 	default: return "unknown";
2886 	}
2887 }
2888 
2889 static int bpf_object__init_user_btf_map(struct bpf_object *obj,
2890 					 const struct btf_type *sec,
2891 					 int var_idx, int sec_idx,
2892 					 const Elf_Data *data, bool strict,
2893 					 const char *pin_root_path)
2894 {
2895 	struct btf_map_def map_def = {}, inner_def = {};
2896 	const struct btf_type *var, *def;
2897 	const struct btf_var_secinfo *vi;
2898 	const struct btf_var *var_extra;
2899 	const char *map_name;
2900 	struct bpf_map *map;
2901 	int err;
2902 
2903 	vi = btf_var_secinfos(sec) + var_idx;
2904 	var = btf__type_by_id(obj->btf, vi->type);
2905 	var_extra = btf_var(var);
2906 	map_name = btf__name_by_offset(obj->btf, var->name_off);
2907 
2908 	if (str_is_empty(map_name)) {
2909 		pr_warn("map #%d: empty name.\n", var_idx);
2910 		return -EINVAL;
2911 	}
2912 	if ((__u64)vi->offset + vi->size > data->d_size) {
2913 		pr_warn("map '%s' BTF data is corrupted.\n", map_name);
2914 		return -EINVAL;
2915 	}
2916 	if (!btf_is_var(var)) {
2917 		pr_warn("map '%s': unexpected var kind %s.\n",
2918 			map_name, btf_kind_str(var));
2919 		return -EINVAL;
2920 	}
2921 	if (var_extra->linkage != BTF_VAR_GLOBAL_ALLOCATED) {
2922 		pr_warn("map '%s': unsupported map linkage %s.\n",
2923 			map_name, btf_var_linkage_str(var_extra->linkage));
2924 		return -EOPNOTSUPP;
2925 	}
2926 
2927 	def = skip_mods_and_typedefs(obj->btf, var->type, NULL);
2928 	if (!btf_is_struct(def)) {
2929 		pr_warn("map '%s': unexpected def kind %s.\n",
2930 			map_name, btf_kind_str(var));
2931 		return -EINVAL;
2932 	}
2933 	if (def->size > vi->size) {
2934 		pr_warn("map '%s': invalid def size.\n", map_name);
2935 		return -EINVAL;
2936 	}
2937 
2938 	map = bpf_object__add_map(obj);
2939 	if (IS_ERR(map))
2940 		return PTR_ERR(map);
2941 	map->name = strdup(map_name);
2942 	if (!map->name) {
2943 		pr_warn("map '%s': failed to alloc map name.\n", map_name);
2944 		return -ENOMEM;
2945 	}
2946 	map->libbpf_type = LIBBPF_MAP_UNSPEC;
2947 	map->def.type = BPF_MAP_TYPE_UNSPEC;
2948 	map->sec_idx = sec_idx;
2949 	map->sec_offset = vi->offset;
2950 	map->btf_var_idx = var_idx;
2951 	pr_debug("map '%s': at sec_idx %d, offset %zu.\n",
2952 		 map_name, map->sec_idx, map->sec_offset);
2953 
2954 	err = parse_btf_map_def(map->name, obj->btf, def, strict, &map_def, &inner_def);
2955 	if (err)
2956 		return err;
2957 
2958 	fill_map_from_def(map, &map_def);
2959 
2960 	if (map_def.pinning == LIBBPF_PIN_BY_NAME) {
2961 		err = build_map_pin_path(map, pin_root_path);
2962 		if (err) {
2963 			pr_warn("map '%s': couldn't build pin path.\n", map->name);
2964 			return err;
2965 		}
2966 	}
2967 
2968 	if (map_def.parts & MAP_DEF_INNER_MAP) {
2969 		map->inner_map = calloc(1, sizeof(*map->inner_map));
2970 		if (!map->inner_map)
2971 			return -ENOMEM;
2972 		map->inner_map->fd = create_placeholder_fd();
2973 		if (map->inner_map->fd < 0)
2974 			return map->inner_map->fd;
2975 		map->inner_map->sec_idx = sec_idx;
2976 		map->inner_map->name = malloc(strlen(map_name) + sizeof(".inner") + 1);
2977 		if (!map->inner_map->name)
2978 			return -ENOMEM;
2979 		sprintf(map->inner_map->name, "%s.inner", map_name);
2980 
2981 		fill_map_from_def(map->inner_map, &inner_def);
2982 	}
2983 
2984 	err = map_fill_btf_type_info(obj, map);
2985 	if (err)
2986 		return err;
2987 
2988 	return 0;
2989 }
2990 
2991 static int init_arena_map_data(struct bpf_object *obj, struct bpf_map *map,
2992 			       const char *sec_name, int sec_idx,
2993 			       void *data, size_t data_sz)
2994 {
2995 	const long page_sz = sysconf(_SC_PAGE_SIZE);
2996 	const size_t data_alloc_sz = roundup(data_sz, page_sz);
2997 	size_t mmap_sz;
2998 
2999 	mmap_sz = bpf_map_mmap_sz(map);
3000 	if (data_alloc_sz > mmap_sz) {
3001 		pr_warn("elf: sec '%s': declared ARENA map size (%zu) is too small to hold global __arena variables of size %zu\n",
3002 			sec_name, mmap_sz, data_sz);
3003 		return -E2BIG;
3004 	}
3005 
3006 	obj->arena_data = malloc(data_sz);
3007 	if (!obj->arena_data)
3008 		return -ENOMEM;
3009 	memcpy(obj->arena_data, data, data_sz);
3010 	obj->arena_data_sz = data_sz;
3011 
3012 	/* make bpf_map__init_value() work for ARENA maps */
3013 	map->mmaped = obj->arena_data;
3014 
3015 	return 0;
3016 }
3017 
3018 static int bpf_object__init_user_btf_maps(struct bpf_object *obj, bool strict,
3019 					  const char *pin_root_path)
3020 {
3021 	const struct btf_type *sec = NULL;
3022 	int nr_types, i, vlen, err;
3023 	const struct btf_type *t;
3024 	const char *name;
3025 	Elf_Data *data;
3026 	Elf_Scn *scn;
3027 
3028 	if (obj->efile.btf_maps_shndx < 0)
3029 		return 0;
3030 
3031 	scn = elf_sec_by_idx(obj, obj->efile.btf_maps_shndx);
3032 	data = elf_sec_data(obj, scn);
3033 	if (!data) {
3034 		pr_warn("elf: failed to get %s map definitions for %s\n",
3035 			MAPS_ELF_SEC, obj->path);
3036 		return -EINVAL;
3037 	}
3038 
3039 	nr_types = btf__type_cnt(obj->btf);
3040 	for (i = 1; i < nr_types; i++) {
3041 		t = btf__type_by_id(obj->btf, i);
3042 		if (!btf_is_datasec(t))
3043 			continue;
3044 		name = btf__name_by_offset(obj->btf, t->name_off);
3045 		if (strcmp(name, MAPS_ELF_SEC) == 0) {
3046 			sec = t;
3047 			obj->efile.btf_maps_sec_btf_id = i;
3048 			break;
3049 		}
3050 	}
3051 
3052 	if (!sec) {
3053 		pr_warn("DATASEC '%s' not found.\n", MAPS_ELF_SEC);
3054 		return -ENOENT;
3055 	}
3056 
3057 	vlen = btf_vlen(sec);
3058 	for (i = 0; i < vlen; i++) {
3059 		err = bpf_object__init_user_btf_map(obj, sec, i,
3060 						    obj->efile.btf_maps_shndx,
3061 						    data, strict,
3062 						    pin_root_path);
3063 		if (err)
3064 			return err;
3065 	}
3066 
3067 	for (i = 0; i < obj->nr_maps; i++) {
3068 		struct bpf_map *map = &obj->maps[i];
3069 
3070 		if (map->def.type != BPF_MAP_TYPE_ARENA)
3071 			continue;
3072 
3073 		if (obj->arena_map_idx >= 0) {
3074 			pr_warn("map '%s': only single ARENA map is supported (map '%s' is also ARENA)\n",
3075 				map->name, obj->maps[obj->arena_map_idx].name);
3076 			return -EINVAL;
3077 		}
3078 		obj->arena_map_idx = i;
3079 
3080 		if (obj->efile.arena_data) {
3081 			err = init_arena_map_data(obj, map, ARENA_SEC, obj->efile.arena_data_shndx,
3082 						  obj->efile.arena_data->d_buf,
3083 						  obj->efile.arena_data->d_size);
3084 			if (err)
3085 				return err;
3086 		}
3087 	}
3088 	if (obj->efile.arena_data && obj->arena_map_idx < 0) {
3089 		pr_warn("elf: sec '%s': to use global __arena variables the ARENA map should be explicitly declared in SEC(\".maps\")\n",
3090 			ARENA_SEC);
3091 		return -ENOENT;
3092 	}
3093 
3094 	return 0;
3095 }
3096 
3097 static int bpf_object__init_maps(struct bpf_object *obj,
3098 				 const struct bpf_object_open_opts *opts)
3099 {
3100 	const char *pin_root_path;
3101 	bool strict;
3102 	int err = 0;
3103 
3104 	strict = !OPTS_GET(opts, relaxed_maps, false);
3105 	pin_root_path = OPTS_GET(opts, pin_root_path, NULL);
3106 
3107 	err = bpf_object__init_user_btf_maps(obj, strict, pin_root_path);
3108 	err = err ?: bpf_object__init_global_data_maps(obj);
3109 	err = err ?: bpf_object__init_kconfig_map(obj);
3110 	err = err ?: bpf_object_init_struct_ops(obj);
3111 
3112 	return err;
3113 }
3114 
3115 static bool section_have_execinstr(struct bpf_object *obj, int idx)
3116 {
3117 	Elf64_Shdr *sh;
3118 
3119 	sh = elf_sec_hdr(obj, elf_sec_by_idx(obj, idx));
3120 	if (!sh)
3121 		return false;
3122 
3123 	return sh->sh_flags & SHF_EXECINSTR;
3124 }
3125 
3126 static bool starts_with_qmark(const char *s)
3127 {
3128 	return s && s[0] == '?';
3129 }
3130 
3131 static bool btf_needs_sanitization(struct bpf_object *obj)
3132 {
3133 	bool has_func_global = kernel_supports(obj, FEAT_BTF_GLOBAL_FUNC);
3134 	bool has_datasec = kernel_supports(obj, FEAT_BTF_DATASEC);
3135 	bool has_float = kernel_supports(obj, FEAT_BTF_FLOAT);
3136 	bool has_func = kernel_supports(obj, FEAT_BTF_FUNC);
3137 	bool has_decl_tag = kernel_supports(obj, FEAT_BTF_DECL_TAG);
3138 	bool has_type_tag = kernel_supports(obj, FEAT_BTF_TYPE_TAG);
3139 	bool has_enum64 = kernel_supports(obj, FEAT_BTF_ENUM64);
3140 	bool has_qmark_datasec = kernel_supports(obj, FEAT_BTF_QMARK_DATASEC);
3141 
3142 	return !has_func || !has_datasec || !has_func_global || !has_float ||
3143 	       !has_decl_tag || !has_type_tag || !has_enum64 || !has_qmark_datasec;
3144 }
3145 
3146 static int bpf_object__sanitize_btf(struct bpf_object *obj, struct btf *btf)
3147 {
3148 	bool has_func_global = kernel_supports(obj, FEAT_BTF_GLOBAL_FUNC);
3149 	bool has_datasec = kernel_supports(obj, FEAT_BTF_DATASEC);
3150 	bool has_float = kernel_supports(obj, FEAT_BTF_FLOAT);
3151 	bool has_func = kernel_supports(obj, FEAT_BTF_FUNC);
3152 	bool has_decl_tag = kernel_supports(obj, FEAT_BTF_DECL_TAG);
3153 	bool has_type_tag = kernel_supports(obj, FEAT_BTF_TYPE_TAG);
3154 	bool has_enum64 = kernel_supports(obj, FEAT_BTF_ENUM64);
3155 	bool has_qmark_datasec = kernel_supports(obj, FEAT_BTF_QMARK_DATASEC);
3156 	int enum64_placeholder_id = 0;
3157 	struct btf_type *t;
3158 	int i, j, vlen;
3159 
3160 	for (i = 1; i < btf__type_cnt(btf); i++) {
3161 		t = (struct btf_type *)btf__type_by_id(btf, i);
3162 
3163 		if ((!has_datasec && btf_is_var(t)) || (!has_decl_tag && btf_is_decl_tag(t))) {
3164 			/* replace VAR/DECL_TAG with INT */
3165 			t->info = BTF_INFO_ENC(BTF_KIND_INT, 0, 0);
3166 			/*
3167 			 * using size = 1 is the safest choice, 4 will be too
3168 			 * big and cause kernel BTF validation failure if
3169 			 * original variable took less than 4 bytes
3170 			 */
3171 			t->size = 1;
3172 			*(int *)(t + 1) = BTF_INT_ENC(0, 0, 8);
3173 		} else if (!has_datasec && btf_is_datasec(t)) {
3174 			/* replace DATASEC with STRUCT */
3175 			const struct btf_var_secinfo *v = btf_var_secinfos(t);
3176 			struct btf_member *m = btf_members(t);
3177 			struct btf_type *vt;
3178 			char *name;
3179 
3180 			name = (char *)btf__name_by_offset(btf, t->name_off);
3181 			while (*name) {
3182 				if (*name == '.' || *name == '?')
3183 					*name = '_';
3184 				name++;
3185 			}
3186 
3187 			vlen = btf_vlen(t);
3188 			t->info = BTF_INFO_ENC(BTF_KIND_STRUCT, 0, vlen);
3189 			for (j = 0; j < vlen; j++, v++, m++) {
3190 				/* order of field assignments is important */
3191 				m->offset = v->offset * 8;
3192 				m->type = v->type;
3193 				/* preserve variable name as member name */
3194 				vt = (void *)btf__type_by_id(btf, v->type);
3195 				m->name_off = vt->name_off;
3196 			}
3197 		} else if (!has_qmark_datasec && btf_is_datasec(t) &&
3198 			   starts_with_qmark(btf__name_by_offset(btf, t->name_off))) {
3199 			/* replace '?' prefix with '_' for DATASEC names */
3200 			char *name;
3201 
3202 			name = (char *)btf__name_by_offset(btf, t->name_off);
3203 			if (name[0] == '?')
3204 				name[0] = '_';
3205 		} else if (!has_func && btf_is_func_proto(t)) {
3206 			/* replace FUNC_PROTO with ENUM */
3207 			vlen = btf_vlen(t);
3208 			t->info = BTF_INFO_ENC(BTF_KIND_ENUM, 0, vlen);
3209 			t->size = sizeof(__u32); /* kernel enforced */
3210 		} else if (!has_func && btf_is_func(t)) {
3211 			/* replace FUNC with TYPEDEF */
3212 			t->info = BTF_INFO_ENC(BTF_KIND_TYPEDEF, 0, 0);
3213 		} else if (!has_func_global && btf_is_func(t)) {
3214 			/* replace BTF_FUNC_GLOBAL with BTF_FUNC_STATIC */
3215 			t->info = BTF_INFO_ENC(BTF_KIND_FUNC, 0, 0);
3216 		} else if (!has_float && btf_is_float(t)) {
3217 			/* replace FLOAT with an equally-sized empty STRUCT;
3218 			 * since C compilers do not accept e.g. "float" as a
3219 			 * valid struct name, make it anonymous
3220 			 */
3221 			t->name_off = 0;
3222 			t->info = BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 0);
3223 		} else if (!has_type_tag && btf_is_type_tag(t)) {
3224 			/* replace TYPE_TAG with a CONST */
3225 			t->name_off = 0;
3226 			t->info = BTF_INFO_ENC(BTF_KIND_CONST, 0, 0);
3227 		} else if (!has_enum64 && btf_is_enum(t)) {
3228 			/* clear the kflag */
3229 			t->info = btf_type_info(btf_kind(t), btf_vlen(t), false);
3230 		} else if (!has_enum64 && btf_is_enum64(t)) {
3231 			/* replace ENUM64 with a union */
3232 			struct btf_member *m;
3233 
3234 			if (enum64_placeholder_id == 0) {
3235 				enum64_placeholder_id = btf__add_int(btf, "enum64_placeholder", 1, 0);
3236 				if (enum64_placeholder_id < 0)
3237 					return enum64_placeholder_id;
3238 
3239 				t = (struct btf_type *)btf__type_by_id(btf, i);
3240 			}
3241 
3242 			m = btf_members(t);
3243 			vlen = btf_vlen(t);
3244 			t->info = BTF_INFO_ENC(BTF_KIND_UNION, 0, vlen);
3245 			for (j = 0; j < vlen; j++, m++) {
3246 				m->type = enum64_placeholder_id;
3247 				m->offset = 0;
3248 			}
3249 		}
3250 	}
3251 
3252 	return 0;
3253 }
3254 
3255 static bool libbpf_needs_btf(const struct bpf_object *obj)
3256 {
3257 	return obj->efile.btf_maps_shndx >= 0 ||
3258 	       obj->efile.has_st_ops ||
3259 	       obj->nr_extern > 0;
3260 }
3261 
3262 static bool kernel_needs_btf(const struct bpf_object *obj)
3263 {
3264 	return obj->efile.has_st_ops;
3265 }
3266 
3267 static int bpf_object__init_btf(struct bpf_object *obj,
3268 				Elf_Data *btf_data,
3269 				Elf_Data *btf_ext_data)
3270 {
3271 	int err = -ENOENT;
3272 
3273 	if (btf_data) {
3274 		obj->btf = btf__new(btf_data->d_buf, btf_data->d_size);
3275 		err = libbpf_get_error(obj->btf);
3276 		if (err) {
3277 			obj->btf = NULL;
3278 			pr_warn("Error loading ELF section %s: %s.\n", BTF_ELF_SEC, errstr(err));
3279 			goto out;
3280 		}
3281 		/* enforce 8-byte pointers for BPF-targeted BTFs */
3282 		btf__set_pointer_size(obj->btf, 8);
3283 	}
3284 	if (btf_ext_data) {
3285 		struct btf_ext_info *ext_segs[3];
3286 		int seg_num, sec_num;
3287 
3288 		if (!obj->btf) {
3289 			pr_debug("Ignore ELF section %s because its depending ELF section %s is not found.\n",
3290 				 BTF_EXT_ELF_SEC, BTF_ELF_SEC);
3291 			goto out;
3292 		}
3293 		obj->btf_ext = btf_ext__new(btf_ext_data->d_buf, btf_ext_data->d_size);
3294 		err = libbpf_get_error(obj->btf_ext);
3295 		if (err) {
3296 			pr_warn("Error loading ELF section %s: %s. Ignored and continue.\n",
3297 				BTF_EXT_ELF_SEC, errstr(err));
3298 			obj->btf_ext = NULL;
3299 			goto out;
3300 		}
3301 
3302 		/* setup .BTF.ext to ELF section mapping */
3303 		ext_segs[0] = &obj->btf_ext->func_info;
3304 		ext_segs[1] = &obj->btf_ext->line_info;
3305 		ext_segs[2] = &obj->btf_ext->core_relo_info;
3306 		for (seg_num = 0; seg_num < ARRAY_SIZE(ext_segs); seg_num++) {
3307 			struct btf_ext_info *seg = ext_segs[seg_num];
3308 			const struct btf_ext_info_sec *sec;
3309 			const char *sec_name;
3310 			Elf_Scn *scn;
3311 
3312 			if (seg->sec_cnt == 0)
3313 				continue;
3314 
3315 			seg->sec_idxs = calloc(seg->sec_cnt, sizeof(*seg->sec_idxs));
3316 			if (!seg->sec_idxs) {
3317 				err = -ENOMEM;
3318 				goto out;
3319 			}
3320 
3321 			sec_num = 0;
3322 			for_each_btf_ext_sec(seg, sec) {
3323 				/* preventively increment index to avoid doing
3324 				 * this before every continue below
3325 				 */
3326 				sec_num++;
3327 
3328 				sec_name = btf__name_by_offset(obj->btf, sec->sec_name_off);
3329 				if (str_is_empty(sec_name))
3330 					continue;
3331 				scn = elf_sec_by_name(obj, sec_name);
3332 				if (!scn)
3333 					continue;
3334 
3335 				seg->sec_idxs[sec_num - 1] = elf_ndxscn(scn);
3336 			}
3337 		}
3338 	}
3339 out:
3340 	if (err && libbpf_needs_btf(obj)) {
3341 		pr_warn("BTF is required, but is missing or corrupted.\n");
3342 		return err;
3343 	}
3344 	return 0;
3345 }
3346 
3347 static int compare_vsi_off(const void *_a, const void *_b)
3348 {
3349 	const struct btf_var_secinfo *a = _a;
3350 	const struct btf_var_secinfo *b = _b;
3351 
3352 	return a->offset - b->offset;
3353 }
3354 
3355 static int btf_fixup_datasec(struct bpf_object *obj, struct btf *btf,
3356 			     struct btf_type *t)
3357 {
3358 	__u32 size = 0, i, vars = btf_vlen(t);
3359 	const char *sec_name = btf__name_by_offset(btf, t->name_off);
3360 	struct btf_var_secinfo *vsi;
3361 	bool fixup_offsets = false;
3362 	int err;
3363 
3364 	if (!sec_name) {
3365 		pr_debug("No name found in string section for DATASEC kind.\n");
3366 		return -ENOENT;
3367 	}
3368 
3369 	/* Extern-backing datasecs (.ksyms, .kconfig) have their size and
3370 	 * variable offsets set at the previous step. Further, not every
3371 	 * extern BTF VAR has corresponding ELF symbol preserved, so we skip
3372 	 * all fixups altogether for such sections and go straight to sorting
3373 	 * VARs within their DATASEC.
3374 	 */
3375 	if (strcmp(sec_name, KCONFIG_SEC) == 0 || strcmp(sec_name, KSYMS_SEC) == 0)
3376 		goto sort_vars;
3377 
3378 	/* Clang leaves DATASEC size and VAR offsets as zeroes, so we need to
3379 	 * fix this up. But BPF static linker already fixes this up and fills
3380 	 * all the sizes and offsets during static linking. So this step has
3381 	 * to be optional. But the STV_HIDDEN handling is non-optional for any
3382 	 * non-extern DATASEC, so the variable fixup loop below handles both
3383 	 * functions at the same time, paying the cost of BTF VAR <-> ELF
3384 	 * symbol matching just once.
3385 	 */
3386 	if (t->size == 0) {
3387 		err = find_elf_sec_sz(obj, sec_name, &size);
3388 		if (err || !size) {
3389 			pr_debug("sec '%s': failed to determine size from ELF: size %u, err %s\n",
3390 				 sec_name, size, errstr(err));
3391 			return -ENOENT;
3392 		}
3393 
3394 		t->size = size;
3395 		fixup_offsets = true;
3396 	}
3397 
3398 	for (i = 0, vsi = btf_var_secinfos(t); i < vars; i++, vsi++) {
3399 		const struct btf_type *t_var;
3400 		struct btf_var *var;
3401 		const char *var_name;
3402 		Elf64_Sym *sym;
3403 
3404 		t_var = btf__type_by_id(btf, vsi->type);
3405 		if (!t_var || !btf_is_var(t_var)) {
3406 			pr_debug("sec '%s': unexpected non-VAR type found\n", sec_name);
3407 			return -EINVAL;
3408 		}
3409 
3410 		var = btf_var(t_var);
3411 		if (var->linkage == BTF_VAR_STATIC || var->linkage == BTF_VAR_GLOBAL_EXTERN)
3412 			continue;
3413 
3414 		var_name = btf__name_by_offset(btf, t_var->name_off);
3415 		if (!var_name) {
3416 			pr_debug("sec '%s': failed to find name of DATASEC's member #%d\n",
3417 				 sec_name, i);
3418 			return -ENOENT;
3419 		}
3420 
3421 		sym = find_elf_var_sym(obj, var_name);
3422 		if (IS_ERR(sym)) {
3423 			pr_debug("sec '%s': failed to find ELF symbol for VAR '%s'\n",
3424 				 sec_name, var_name);
3425 			return -ENOENT;
3426 		}
3427 
3428 		if (fixup_offsets)
3429 			vsi->offset = sym->st_value;
3430 
3431 		/* if variable is a global/weak symbol, but has restricted
3432 		 * (STV_HIDDEN or STV_INTERNAL) visibility, mark its BTF VAR
3433 		 * as static. This follows similar logic for functions (BPF
3434 		 * subprogs) and influences libbpf's further decisions about
3435 		 * whether to make global data BPF array maps as
3436 		 * BPF_F_MMAPABLE.
3437 		 */
3438 		if (ELF64_ST_VISIBILITY(sym->st_other) == STV_HIDDEN
3439 		    || ELF64_ST_VISIBILITY(sym->st_other) == STV_INTERNAL)
3440 			var->linkage = BTF_VAR_STATIC;
3441 	}
3442 
3443 sort_vars:
3444 	qsort(btf_var_secinfos(t), vars, sizeof(*vsi), compare_vsi_off);
3445 	return 0;
3446 }
3447 
3448 static int bpf_object_fixup_btf(struct bpf_object *obj)
3449 {
3450 	int i, n, err = 0;
3451 
3452 	if (!obj->btf)
3453 		return 0;
3454 
3455 	n = btf__type_cnt(obj->btf);
3456 	for (i = 1; i < n; i++) {
3457 		struct btf_type *t = btf_type_by_id(obj->btf, i);
3458 
3459 		/* Loader needs to fix up some of the things compiler
3460 		 * couldn't get its hands on while emitting BTF. This
3461 		 * is section size and global variable offset. We use
3462 		 * the info from the ELF itself for this purpose.
3463 		 */
3464 		if (btf_is_datasec(t)) {
3465 			err = btf_fixup_datasec(obj, obj->btf, t);
3466 			if (err)
3467 				return err;
3468 		}
3469 	}
3470 
3471 	return 0;
3472 }
3473 
3474 static bool prog_needs_vmlinux_btf(struct bpf_program *prog)
3475 {
3476 	if (prog->type == BPF_PROG_TYPE_STRUCT_OPS ||
3477 	    prog->type == BPF_PROG_TYPE_LSM)
3478 		return true;
3479 
3480 	/* BPF_PROG_TYPE_TRACING programs which do not attach to other programs
3481 	 * also need vmlinux BTF
3482 	 */
3483 	if (prog->type == BPF_PROG_TYPE_TRACING && !prog->attach_prog_fd)
3484 		return true;
3485 
3486 	return false;
3487 }
3488 
3489 static bool map_needs_vmlinux_btf(struct bpf_map *map)
3490 {
3491 	return bpf_map__is_struct_ops(map);
3492 }
3493 
3494 static bool obj_needs_vmlinux_btf(const struct bpf_object *obj)
3495 {
3496 	struct bpf_program *prog;
3497 	struct bpf_map *map;
3498 	int i;
3499 
3500 	/* CO-RE relocations need kernel BTF, only when btf_custom_path
3501 	 * is not specified
3502 	 */
3503 	if (obj->btf_ext && obj->btf_ext->core_relo_info.len && !obj->btf_custom_path)
3504 		return true;
3505 
3506 	/* Support for typed ksyms needs kernel BTF */
3507 	for (i = 0; i < obj->nr_extern; i++) {
3508 		const struct extern_desc *ext;
3509 
3510 		ext = &obj->externs[i];
3511 		if (ext->type == EXT_KSYM && ext->ksym.type_id)
3512 			return true;
3513 	}
3514 
3515 	bpf_object__for_each_program(prog, obj) {
3516 		if (!prog->autoload)
3517 			continue;
3518 		if (prog_needs_vmlinux_btf(prog))
3519 			return true;
3520 	}
3521 
3522 	bpf_object__for_each_map(map, obj) {
3523 		if (map_needs_vmlinux_btf(map))
3524 			return true;
3525 	}
3526 
3527 	return false;
3528 }
3529 
3530 static int bpf_object__load_vmlinux_btf(struct bpf_object *obj, bool force)
3531 {
3532 	int err;
3533 
3534 	/* btf_vmlinux could be loaded earlier */
3535 	if (obj->btf_vmlinux || obj->gen_loader)
3536 		return 0;
3537 
3538 	if (!force && !obj_needs_vmlinux_btf(obj))
3539 		return 0;
3540 
3541 	obj->btf_vmlinux = btf__load_vmlinux_btf();
3542 	err = libbpf_get_error(obj->btf_vmlinux);
3543 	if (err) {
3544 		pr_warn("Error loading vmlinux BTF: %s\n", errstr(err));
3545 		obj->btf_vmlinux = NULL;
3546 		return err;
3547 	}
3548 	return 0;
3549 }
3550 
3551 static int bpf_object__sanitize_and_load_btf(struct bpf_object *obj)
3552 {
3553 	struct btf *kern_btf = obj->btf;
3554 	bool btf_mandatory, sanitize;
3555 	int i, err = 0;
3556 
3557 	if (!obj->btf)
3558 		return 0;
3559 
3560 	if (!kernel_supports(obj, FEAT_BTF)) {
3561 		if (kernel_needs_btf(obj)) {
3562 			err = -EOPNOTSUPP;
3563 			goto report;
3564 		}
3565 		pr_debug("Kernel doesn't support BTF, skipping uploading it.\n");
3566 		return 0;
3567 	}
3568 
3569 	/* Even though some subprogs are global/weak, user might prefer more
3570 	 * permissive BPF verification process that BPF verifier performs for
3571 	 * static functions, taking into account more context from the caller
3572 	 * functions. In such case, they need to mark such subprogs with
3573 	 * __attribute__((visibility("hidden"))) and libbpf will adjust
3574 	 * corresponding FUNC BTF type to be marked as static and trigger more
3575 	 * involved BPF verification process.
3576 	 */
3577 	for (i = 0; i < obj->nr_programs; i++) {
3578 		struct bpf_program *prog = &obj->programs[i];
3579 		struct btf_type *t;
3580 		const char *name;
3581 		int j, n;
3582 
3583 		if (!prog->mark_btf_static || !prog_is_subprog(obj, prog))
3584 			continue;
3585 
3586 		n = btf__type_cnt(obj->btf);
3587 		for (j = 1; j < n; j++) {
3588 			t = btf_type_by_id(obj->btf, j);
3589 			if (!btf_is_func(t) || btf_func_linkage(t) != BTF_FUNC_GLOBAL)
3590 				continue;
3591 
3592 			name = btf__str_by_offset(obj->btf, t->name_off);
3593 			if (strcmp(name, prog->name) != 0)
3594 				continue;
3595 
3596 			t->info = btf_type_info(BTF_KIND_FUNC, BTF_FUNC_STATIC, 0);
3597 			break;
3598 		}
3599 	}
3600 
3601 	sanitize = btf_needs_sanitization(obj);
3602 	if (sanitize) {
3603 		const void *raw_data;
3604 		__u32 sz;
3605 
3606 		/* clone BTF to sanitize a copy and leave the original intact */
3607 		raw_data = btf__raw_data(obj->btf, &sz);
3608 		kern_btf = btf__new(raw_data, sz);
3609 		err = libbpf_get_error(kern_btf);
3610 		if (err)
3611 			return err;
3612 
3613 		/* enforce 8-byte pointers for BPF-targeted BTFs */
3614 		btf__set_pointer_size(obj->btf, 8);
3615 		err = bpf_object__sanitize_btf(obj, kern_btf);
3616 		if (err)
3617 			return err;
3618 	}
3619 
3620 	if (obj->gen_loader) {
3621 		__u32 raw_size = 0;
3622 		const void *raw_data = btf__raw_data(kern_btf, &raw_size);
3623 
3624 		if (!raw_data)
3625 			return -ENOMEM;
3626 		bpf_gen__load_btf(obj->gen_loader, raw_data, raw_size);
3627 		/* Pretend to have valid FD to pass various fd >= 0 checks.
3628 		 * This fd == 0 will not be used with any syscall and will be reset to -1 eventually.
3629 		 */
3630 		btf__set_fd(kern_btf, 0);
3631 	} else {
3632 		/* currently BPF_BTF_LOAD only supports log_level 1 */
3633 		err = btf_load_into_kernel(kern_btf, obj->log_buf, obj->log_size,
3634 					   obj->log_level ? 1 : 0, obj->token_fd);
3635 	}
3636 	if (sanitize) {
3637 		if (!err) {
3638 			/* move fd to libbpf's BTF */
3639 			btf__set_fd(obj->btf, btf__fd(kern_btf));
3640 			btf__set_fd(kern_btf, -1);
3641 		}
3642 		btf__free(kern_btf);
3643 	}
3644 report:
3645 	if (err) {
3646 		btf_mandatory = kernel_needs_btf(obj);
3647 		if (btf_mandatory) {
3648 			pr_warn("Error loading .BTF into kernel: %s. BTF is mandatory, can't proceed.\n",
3649 				errstr(err));
3650 		} else {
3651 			pr_info("Error loading .BTF into kernel: %s. BTF is optional, ignoring.\n",
3652 				errstr(err));
3653 			err = 0;
3654 		}
3655 	}
3656 	return err;
3657 }
3658 
3659 static const char *elf_sym_str(const struct bpf_object *obj, size_t off)
3660 {
3661 	const char *name;
3662 
3663 	name = elf_strptr(obj->efile.elf, obj->efile.strtabidx, off);
3664 	if (!name) {
3665 		pr_warn("elf: failed to get section name string at offset %zu from %s: %s\n",
3666 			off, obj->path, elf_errmsg(-1));
3667 		return NULL;
3668 	}
3669 
3670 	return name;
3671 }
3672 
3673 static const char *elf_sec_str(const struct bpf_object *obj, size_t off)
3674 {
3675 	const char *name;
3676 
3677 	name = elf_strptr(obj->efile.elf, obj->efile.shstrndx, off);
3678 	if (!name) {
3679 		pr_warn("elf: failed to get section name string at offset %zu from %s: %s\n",
3680 			off, obj->path, elf_errmsg(-1));
3681 		return NULL;
3682 	}
3683 
3684 	return name;
3685 }
3686 
3687 static Elf_Scn *elf_sec_by_idx(const struct bpf_object *obj, size_t idx)
3688 {
3689 	Elf_Scn *scn;
3690 
3691 	scn = elf_getscn(obj->efile.elf, idx);
3692 	if (!scn) {
3693 		pr_warn("elf: failed to get section(%zu) from %s: %s\n",
3694 			idx, obj->path, elf_errmsg(-1));
3695 		return NULL;
3696 	}
3697 	return scn;
3698 }
3699 
3700 static Elf_Scn *elf_sec_by_name(const struct bpf_object *obj, const char *name)
3701 {
3702 	Elf_Scn *scn = NULL;
3703 	Elf *elf = obj->efile.elf;
3704 	const char *sec_name;
3705 
3706 	while ((scn = elf_nextscn(elf, scn)) != NULL) {
3707 		sec_name = elf_sec_name(obj, scn);
3708 		if (!sec_name)
3709 			return NULL;
3710 
3711 		if (strcmp(sec_name, name) != 0)
3712 			continue;
3713 
3714 		return scn;
3715 	}
3716 	return NULL;
3717 }
3718 
3719 static Elf64_Shdr *elf_sec_hdr(const struct bpf_object *obj, Elf_Scn *scn)
3720 {
3721 	Elf64_Shdr *shdr;
3722 
3723 	if (!scn)
3724 		return NULL;
3725 
3726 	shdr = elf64_getshdr(scn);
3727 	if (!shdr) {
3728 		pr_warn("elf: failed to get section(%zu) header from %s: %s\n",
3729 			elf_ndxscn(scn), obj->path, elf_errmsg(-1));
3730 		return NULL;
3731 	}
3732 
3733 	return shdr;
3734 }
3735 
3736 static const char *elf_sec_name(const struct bpf_object *obj, Elf_Scn *scn)
3737 {
3738 	const char *name;
3739 	Elf64_Shdr *sh;
3740 
3741 	if (!scn)
3742 		return NULL;
3743 
3744 	sh = elf_sec_hdr(obj, scn);
3745 	if (!sh)
3746 		return NULL;
3747 
3748 	name = elf_sec_str(obj, sh->sh_name);
3749 	if (!name) {
3750 		pr_warn("elf: failed to get section(%zu) name from %s: %s\n",
3751 			elf_ndxscn(scn), obj->path, elf_errmsg(-1));
3752 		return NULL;
3753 	}
3754 
3755 	return name;
3756 }
3757 
3758 static Elf_Data *elf_sec_data(const struct bpf_object *obj, Elf_Scn *scn)
3759 {
3760 	Elf_Data *data;
3761 
3762 	if (!scn)
3763 		return NULL;
3764 
3765 	data = elf_getdata(scn, 0);
3766 	if (!data) {
3767 		pr_warn("elf: failed to get section(%zu) %s data from %s: %s\n",
3768 			elf_ndxscn(scn), elf_sec_name(obj, scn) ?: "<?>",
3769 			obj->path, elf_errmsg(-1));
3770 		return NULL;
3771 	}
3772 
3773 	return data;
3774 }
3775 
3776 static Elf64_Sym *elf_sym_by_idx(const struct bpf_object *obj, size_t idx)
3777 {
3778 	if (idx >= obj->efile.symbols->d_size / sizeof(Elf64_Sym))
3779 		return NULL;
3780 
3781 	return (Elf64_Sym *)obj->efile.symbols->d_buf + idx;
3782 }
3783 
3784 static Elf64_Rel *elf_rel_by_idx(Elf_Data *data, size_t idx)
3785 {
3786 	if (idx >= data->d_size / sizeof(Elf64_Rel))
3787 		return NULL;
3788 
3789 	return (Elf64_Rel *)data->d_buf + idx;
3790 }
3791 
3792 static bool is_sec_name_dwarf(const char *name)
3793 {
3794 	/* approximation, but the actual list is too long */
3795 	return str_has_pfx(name, ".debug_");
3796 }
3797 
3798 static bool ignore_elf_section(Elf64_Shdr *hdr, const char *name)
3799 {
3800 	/* no special handling of .strtab */
3801 	if (hdr->sh_type == SHT_STRTAB)
3802 		return true;
3803 
3804 	/* ignore .llvm_addrsig section as well */
3805 	if (hdr->sh_type == SHT_LLVM_ADDRSIG)
3806 		return true;
3807 
3808 	/* no subprograms will lead to an empty .text section, ignore it */
3809 	if (hdr->sh_type == SHT_PROGBITS && hdr->sh_size == 0 &&
3810 	    strcmp(name, ".text") == 0)
3811 		return true;
3812 
3813 	/* DWARF sections */
3814 	if (is_sec_name_dwarf(name))
3815 		return true;
3816 
3817 	if (str_has_pfx(name, ".rel")) {
3818 		name += sizeof(".rel") - 1;
3819 		/* DWARF section relocations */
3820 		if (is_sec_name_dwarf(name))
3821 			return true;
3822 
3823 		/* .BTF and .BTF.ext don't need relocations */
3824 		if (strcmp(name, BTF_ELF_SEC) == 0 ||
3825 		    strcmp(name, BTF_EXT_ELF_SEC) == 0)
3826 			return true;
3827 	}
3828 
3829 	return false;
3830 }
3831 
3832 static int cmp_progs(const void *_a, const void *_b)
3833 {
3834 	const struct bpf_program *a = _a;
3835 	const struct bpf_program *b = _b;
3836 
3837 	if (a->sec_idx != b->sec_idx)
3838 		return a->sec_idx < b->sec_idx ? -1 : 1;
3839 
3840 	/* sec_insn_off can't be the same within the section */
3841 	return a->sec_insn_off < b->sec_insn_off ? -1 : 1;
3842 }
3843 
3844 static int bpf_object__elf_collect(struct bpf_object *obj)
3845 {
3846 	struct elf_sec_desc *sec_desc;
3847 	Elf *elf = obj->efile.elf;
3848 	Elf_Data *btf_ext_data = NULL;
3849 	Elf_Data *btf_data = NULL;
3850 	int idx = 0, err = 0;
3851 	const char *name;
3852 	Elf_Data *data;
3853 	Elf_Scn *scn;
3854 	Elf64_Shdr *sh;
3855 
3856 	/* ELF section indices are 0-based, but sec #0 is special "invalid"
3857 	 * section. Since section count retrieved by elf_getshdrnum() does
3858 	 * include sec #0, it is already the necessary size of an array to keep
3859 	 * all the sections.
3860 	 */
3861 	if (elf_getshdrnum(obj->efile.elf, &obj->efile.sec_cnt)) {
3862 		pr_warn("elf: failed to get the number of sections for %s: %s\n",
3863 			obj->path, elf_errmsg(-1));
3864 		return -LIBBPF_ERRNO__FORMAT;
3865 	}
3866 	obj->efile.secs = calloc(obj->efile.sec_cnt, sizeof(*obj->efile.secs));
3867 	if (!obj->efile.secs)
3868 		return -ENOMEM;
3869 
3870 	/* a bunch of ELF parsing functionality depends on processing symbols,
3871 	 * so do the first pass and find the symbol table
3872 	 */
3873 	scn = NULL;
3874 	while ((scn = elf_nextscn(elf, scn)) != NULL) {
3875 		sh = elf_sec_hdr(obj, scn);
3876 		if (!sh)
3877 			return -LIBBPF_ERRNO__FORMAT;
3878 
3879 		if (sh->sh_type == SHT_SYMTAB) {
3880 			if (obj->efile.symbols) {
3881 				pr_warn("elf: multiple symbol tables in %s\n", obj->path);
3882 				return -LIBBPF_ERRNO__FORMAT;
3883 			}
3884 
3885 			data = elf_sec_data(obj, scn);
3886 			if (!data)
3887 				return -LIBBPF_ERRNO__FORMAT;
3888 
3889 			idx = elf_ndxscn(scn);
3890 
3891 			obj->efile.symbols = data;
3892 			obj->efile.symbols_shndx = idx;
3893 			obj->efile.strtabidx = sh->sh_link;
3894 		}
3895 	}
3896 
3897 	if (!obj->efile.symbols) {
3898 		pr_warn("elf: couldn't find symbol table in %s, stripped object file?\n",
3899 			obj->path);
3900 		return -ENOENT;
3901 	}
3902 
3903 	scn = NULL;
3904 	while ((scn = elf_nextscn(elf, scn)) != NULL) {
3905 		idx = elf_ndxscn(scn);
3906 		sec_desc = &obj->efile.secs[idx];
3907 
3908 		sh = elf_sec_hdr(obj, scn);
3909 		if (!sh)
3910 			return -LIBBPF_ERRNO__FORMAT;
3911 
3912 		name = elf_sec_str(obj, sh->sh_name);
3913 		if (!name)
3914 			return -LIBBPF_ERRNO__FORMAT;
3915 
3916 		if (ignore_elf_section(sh, name))
3917 			continue;
3918 
3919 		data = elf_sec_data(obj, scn);
3920 		if (!data)
3921 			return -LIBBPF_ERRNO__FORMAT;
3922 
3923 		pr_debug("elf: section(%d) %s, size %ld, link %d, flags %lx, type=%d\n",
3924 			 idx, name, (unsigned long)data->d_size,
3925 			 (int)sh->sh_link, (unsigned long)sh->sh_flags,
3926 			 (int)sh->sh_type);
3927 
3928 		if (strcmp(name, "license") == 0) {
3929 			err = bpf_object__init_license(obj, data->d_buf, data->d_size);
3930 			if (err)
3931 				return err;
3932 		} else if (strcmp(name, "version") == 0) {
3933 			err = bpf_object__init_kversion(obj, data->d_buf, data->d_size);
3934 			if (err)
3935 				return err;
3936 		} else if (strcmp(name, "maps") == 0) {
3937 			pr_warn("elf: legacy map definitions in 'maps' section are not supported by libbpf v1.0+\n");
3938 			return -ENOTSUP;
3939 		} else if (strcmp(name, MAPS_ELF_SEC) == 0) {
3940 			obj->efile.btf_maps_shndx = idx;
3941 		} else if (strcmp(name, BTF_ELF_SEC) == 0) {
3942 			if (sh->sh_type != SHT_PROGBITS)
3943 				return -LIBBPF_ERRNO__FORMAT;
3944 			btf_data = data;
3945 		} else if (strcmp(name, BTF_EXT_ELF_SEC) == 0) {
3946 			if (sh->sh_type != SHT_PROGBITS)
3947 				return -LIBBPF_ERRNO__FORMAT;
3948 			btf_ext_data = data;
3949 		} else if (sh->sh_type == SHT_SYMTAB) {
3950 			/* already processed during the first pass above */
3951 		} else if (sh->sh_type == SHT_PROGBITS && data->d_size > 0) {
3952 			if (sh->sh_flags & SHF_EXECINSTR) {
3953 				if (strcmp(name, ".text") == 0)
3954 					obj->efile.text_shndx = idx;
3955 				err = bpf_object__add_programs(obj, data, name, idx);
3956 				if (err)
3957 					return err;
3958 			} else if (strcmp(name, DATA_SEC) == 0 ||
3959 				   str_has_pfx(name, DATA_SEC ".")) {
3960 				sec_desc->sec_type = SEC_DATA;
3961 				sec_desc->shdr = sh;
3962 				sec_desc->data = data;
3963 			} else if (strcmp(name, RODATA_SEC) == 0 ||
3964 				   str_has_pfx(name, RODATA_SEC ".")) {
3965 				sec_desc->sec_type = SEC_RODATA;
3966 				sec_desc->shdr = sh;
3967 				sec_desc->data = data;
3968 			} else if (strcmp(name, STRUCT_OPS_SEC) == 0 ||
3969 				   strcmp(name, STRUCT_OPS_LINK_SEC) == 0 ||
3970 				   strcmp(name, "?" STRUCT_OPS_SEC) == 0 ||
3971 				   strcmp(name, "?" STRUCT_OPS_LINK_SEC) == 0) {
3972 				sec_desc->sec_type = SEC_ST_OPS;
3973 				sec_desc->shdr = sh;
3974 				sec_desc->data = data;
3975 				obj->efile.has_st_ops = true;
3976 			} else if (strcmp(name, ARENA_SEC) == 0) {
3977 				obj->efile.arena_data = data;
3978 				obj->efile.arena_data_shndx = idx;
3979 			} else if (strcmp(name, JUMPTABLES_SEC) == 0) {
3980 				obj->jumptables_data = malloc(data->d_size);
3981 				if (!obj->jumptables_data)
3982 					return -ENOMEM;
3983 				memcpy(obj->jumptables_data, data->d_buf, data->d_size);
3984 				obj->jumptables_data_sz = data->d_size;
3985 				obj->efile.jumptables_data_shndx = idx;
3986 			} else {
3987 				pr_info("elf: skipping unrecognized data section(%d) %s\n",
3988 					idx, name);
3989 			}
3990 		} else if (sh->sh_type == SHT_REL) {
3991 			int targ_sec_idx = sh->sh_info; /* points to other section */
3992 
3993 			if (sh->sh_entsize != sizeof(Elf64_Rel) ||
3994 			    targ_sec_idx >= obj->efile.sec_cnt)
3995 				return -LIBBPF_ERRNO__FORMAT;
3996 
3997 			/* Only do relo for section with exec instructions */
3998 			if (!section_have_execinstr(obj, targ_sec_idx) &&
3999 			    strcmp(name, ".rel" STRUCT_OPS_SEC) &&
4000 			    strcmp(name, ".rel" STRUCT_OPS_LINK_SEC) &&
4001 			    strcmp(name, ".rel?" STRUCT_OPS_SEC) &&
4002 			    strcmp(name, ".rel?" STRUCT_OPS_LINK_SEC) &&
4003 			    strcmp(name, ".rel" MAPS_ELF_SEC)) {
4004 				pr_info("elf: skipping relo section(%d) %s for section(%d) %s\n",
4005 					idx, name, targ_sec_idx,
4006 					elf_sec_name(obj, elf_sec_by_idx(obj, targ_sec_idx)) ?: "<?>");
4007 				continue;
4008 			}
4009 
4010 			sec_desc->sec_type = SEC_RELO;
4011 			sec_desc->shdr = sh;
4012 			sec_desc->data = data;
4013 		} else if (sh->sh_type == SHT_NOBITS && (strcmp(name, BSS_SEC) == 0 ||
4014 							 str_has_pfx(name, BSS_SEC "."))) {
4015 			sec_desc->sec_type = SEC_BSS;
4016 			sec_desc->shdr = sh;
4017 			sec_desc->data = data;
4018 		} else {
4019 			pr_info("elf: skipping section(%d) %s (size %zu)\n", idx, name,
4020 				(size_t)sh->sh_size);
4021 		}
4022 	}
4023 
4024 	if (!obj->efile.strtabidx || obj->efile.strtabidx > idx) {
4025 		pr_warn("elf: symbol strings section missing or invalid in %s\n", obj->path);
4026 		return -LIBBPF_ERRNO__FORMAT;
4027 	}
4028 
4029 	/* change BPF program insns to native endianness for introspection */
4030 	if (!is_native_endianness(obj))
4031 		bpf_object_bswap_progs(obj);
4032 
4033 	/* sort BPF programs by section name and in-section instruction offset
4034 	 * for faster search
4035 	 */
4036 	if (obj->nr_programs)
4037 		qsort(obj->programs, obj->nr_programs, sizeof(*obj->programs), cmp_progs);
4038 
4039 	return bpf_object__init_btf(obj, btf_data, btf_ext_data);
4040 }
4041 
4042 static bool sym_is_extern(const Elf64_Sym *sym)
4043 {
4044 	int bind = ELF64_ST_BIND(sym->st_info);
4045 	/* externs are symbols w/ type=NOTYPE, bind=GLOBAL|WEAK, section=UND */
4046 	return sym->st_shndx == SHN_UNDEF &&
4047 	       (bind == STB_GLOBAL || bind == STB_WEAK) &&
4048 	       ELF64_ST_TYPE(sym->st_info) == STT_NOTYPE;
4049 }
4050 
4051 static bool sym_is_subprog(const Elf64_Sym *sym, int text_shndx)
4052 {
4053 	int bind = ELF64_ST_BIND(sym->st_info);
4054 	int type = ELF64_ST_TYPE(sym->st_info);
4055 
4056 	/* in .text section */
4057 	if (sym->st_shndx != text_shndx)
4058 		return false;
4059 
4060 	/* local function */
4061 	if (bind == STB_LOCAL && type == STT_SECTION)
4062 		return true;
4063 
4064 	/* global function */
4065 	return (bind == STB_GLOBAL || bind == STB_WEAK) && type == STT_FUNC;
4066 }
4067 
4068 static int find_extern_btf_id(const struct btf *btf, const char *ext_name)
4069 {
4070 	const struct btf_type *t;
4071 	const char *tname;
4072 	int i, n;
4073 
4074 	if (!btf)
4075 		return -ESRCH;
4076 
4077 	n = btf__type_cnt(btf);
4078 	for (i = 1; i < n; i++) {
4079 		t = btf__type_by_id(btf, i);
4080 
4081 		if (!btf_is_var(t) && !btf_is_func(t))
4082 			continue;
4083 
4084 		tname = btf__name_by_offset(btf, t->name_off);
4085 		if (strcmp(tname, ext_name))
4086 			continue;
4087 
4088 		if (btf_is_var(t) &&
4089 		    btf_var(t)->linkage != BTF_VAR_GLOBAL_EXTERN)
4090 			return -EINVAL;
4091 
4092 		if (btf_is_func(t) && btf_func_linkage(t) != BTF_FUNC_EXTERN)
4093 			return -EINVAL;
4094 
4095 		return i;
4096 	}
4097 
4098 	return -ENOENT;
4099 }
4100 
4101 static int find_extern_sec_btf_id(struct btf *btf, int ext_btf_id) {
4102 	const struct btf_var_secinfo *vs;
4103 	const struct btf_type *t;
4104 	int i, j, n;
4105 
4106 	if (!btf)
4107 		return -ESRCH;
4108 
4109 	n = btf__type_cnt(btf);
4110 	for (i = 1; i < n; i++) {
4111 		t = btf__type_by_id(btf, i);
4112 
4113 		if (!btf_is_datasec(t))
4114 			continue;
4115 
4116 		vs = btf_var_secinfos(t);
4117 		for (j = 0; j < btf_vlen(t); j++, vs++) {
4118 			if (vs->type == ext_btf_id)
4119 				return i;
4120 		}
4121 	}
4122 
4123 	return -ENOENT;
4124 }
4125 
4126 static enum kcfg_type find_kcfg_type(const struct btf *btf, int id,
4127 				     bool *is_signed)
4128 {
4129 	const struct btf_type *t;
4130 	const char *name;
4131 
4132 	t = skip_mods_and_typedefs(btf, id, NULL);
4133 	name = btf__name_by_offset(btf, t->name_off);
4134 
4135 	if (is_signed)
4136 		*is_signed = false;
4137 	switch (btf_kind(t)) {
4138 	case BTF_KIND_INT: {
4139 		int enc = btf_int_encoding(t);
4140 
4141 		if (enc & BTF_INT_BOOL)
4142 			return t->size == 1 ? KCFG_BOOL : KCFG_UNKNOWN;
4143 		if (is_signed)
4144 			*is_signed = enc & BTF_INT_SIGNED;
4145 		if (t->size == 1)
4146 			return KCFG_CHAR;
4147 		if (t->size < 1 || t->size > 8 || (t->size & (t->size - 1)))
4148 			return KCFG_UNKNOWN;
4149 		return KCFG_INT;
4150 	}
4151 	case BTF_KIND_ENUM:
4152 		if (t->size != 4)
4153 			return KCFG_UNKNOWN;
4154 		if (strcmp(name, "libbpf_tristate"))
4155 			return KCFG_UNKNOWN;
4156 		return KCFG_TRISTATE;
4157 	case BTF_KIND_ENUM64:
4158 		if (strcmp(name, "libbpf_tristate"))
4159 			return KCFG_UNKNOWN;
4160 		return KCFG_TRISTATE;
4161 	case BTF_KIND_ARRAY:
4162 		if (btf_array(t)->nelems == 0)
4163 			return KCFG_UNKNOWN;
4164 		if (find_kcfg_type(btf, btf_array(t)->type, NULL) != KCFG_CHAR)
4165 			return KCFG_UNKNOWN;
4166 		return KCFG_CHAR_ARR;
4167 	default:
4168 		return KCFG_UNKNOWN;
4169 	}
4170 }
4171 
4172 static int cmp_externs(const void *_a, const void *_b)
4173 {
4174 	const struct extern_desc *a = _a;
4175 	const struct extern_desc *b = _b;
4176 
4177 	if (a->type != b->type)
4178 		return a->type < b->type ? -1 : 1;
4179 
4180 	if (a->type == EXT_KCFG) {
4181 		/* descending order by alignment requirements */
4182 		if (a->kcfg.align != b->kcfg.align)
4183 			return a->kcfg.align > b->kcfg.align ? -1 : 1;
4184 		/* ascending order by size, within same alignment class */
4185 		if (a->kcfg.sz != b->kcfg.sz)
4186 			return a->kcfg.sz < b->kcfg.sz ? -1 : 1;
4187 	}
4188 
4189 	/* resolve ties by name */
4190 	return strcmp(a->name, b->name);
4191 }
4192 
4193 static int find_int_btf_id(const struct btf *btf)
4194 {
4195 	const struct btf_type *t;
4196 	int i, n;
4197 
4198 	n = btf__type_cnt(btf);
4199 	for (i = 1; i < n; i++) {
4200 		t = btf__type_by_id(btf, i);
4201 
4202 		if (btf_is_int(t) && btf_int_bits(t) == 32)
4203 			return i;
4204 	}
4205 
4206 	return 0;
4207 }
4208 
4209 static int add_dummy_ksym_var(struct btf *btf)
4210 {
4211 	int i, int_btf_id, sec_btf_id, dummy_var_btf_id;
4212 	const struct btf_var_secinfo *vs;
4213 	const struct btf_type *sec;
4214 
4215 	if (!btf)
4216 		return 0;
4217 
4218 	sec_btf_id = btf__find_by_name_kind(btf, KSYMS_SEC,
4219 					    BTF_KIND_DATASEC);
4220 	if (sec_btf_id < 0)
4221 		return 0;
4222 
4223 	sec = btf__type_by_id(btf, sec_btf_id);
4224 	vs = btf_var_secinfos(sec);
4225 	for (i = 0; i < btf_vlen(sec); i++, vs++) {
4226 		const struct btf_type *vt;
4227 
4228 		vt = btf__type_by_id(btf, vs->type);
4229 		if (btf_is_func(vt))
4230 			break;
4231 	}
4232 
4233 	/* No func in ksyms sec.  No need to add dummy var. */
4234 	if (i == btf_vlen(sec))
4235 		return 0;
4236 
4237 	int_btf_id = find_int_btf_id(btf);
4238 	dummy_var_btf_id = btf__add_var(btf,
4239 					"dummy_ksym",
4240 					BTF_VAR_GLOBAL_ALLOCATED,
4241 					int_btf_id);
4242 	if (dummy_var_btf_id < 0)
4243 		pr_warn("cannot create a dummy_ksym var\n");
4244 
4245 	return dummy_var_btf_id;
4246 }
4247 
4248 static int bpf_object__collect_externs(struct bpf_object *obj)
4249 {
4250 	struct btf_type *sec, *kcfg_sec = NULL, *ksym_sec = NULL;
4251 	const struct btf_type *t;
4252 	struct extern_desc *ext;
4253 	int i, n, off, dummy_var_btf_id;
4254 	const char *ext_name, *sec_name;
4255 	size_t ext_essent_len;
4256 	Elf_Scn *scn;
4257 	Elf64_Shdr *sh;
4258 
4259 	if (!obj->efile.symbols)
4260 		return 0;
4261 
4262 	scn = elf_sec_by_idx(obj, obj->efile.symbols_shndx);
4263 	sh = elf_sec_hdr(obj, scn);
4264 	if (!sh || sh->sh_entsize != sizeof(Elf64_Sym))
4265 		return -LIBBPF_ERRNO__FORMAT;
4266 
4267 	dummy_var_btf_id = add_dummy_ksym_var(obj->btf);
4268 	if (dummy_var_btf_id < 0)
4269 		return dummy_var_btf_id;
4270 
4271 	n = sh->sh_size / sh->sh_entsize;
4272 	pr_debug("looking for externs among %d symbols...\n", n);
4273 
4274 	for (i = 0; i < n; i++) {
4275 		Elf64_Sym *sym = elf_sym_by_idx(obj, i);
4276 
4277 		if (!sym)
4278 			return -LIBBPF_ERRNO__FORMAT;
4279 		if (!sym_is_extern(sym))
4280 			continue;
4281 		ext_name = elf_sym_str(obj, sym->st_name);
4282 		if (str_is_empty(ext_name))
4283 			continue;
4284 
4285 		ext = obj->externs;
4286 		ext = libbpf_reallocarray(ext, obj->nr_extern + 1, sizeof(*ext));
4287 		if (!ext)
4288 			return -ENOMEM;
4289 		obj->externs = ext;
4290 		ext = &ext[obj->nr_extern];
4291 		memset(ext, 0, sizeof(*ext));
4292 		obj->nr_extern++;
4293 
4294 		ext->btf_id = find_extern_btf_id(obj->btf, ext_name);
4295 		if (ext->btf_id <= 0) {
4296 			pr_warn("failed to find BTF for extern '%s': %d\n",
4297 				ext_name, ext->btf_id);
4298 			return ext->btf_id;
4299 		}
4300 		t = btf__type_by_id(obj->btf, ext->btf_id);
4301 		ext->name = strdup(btf__name_by_offset(obj->btf, t->name_off));
4302 		if (!ext->name)
4303 			return -ENOMEM;
4304 		ext->sym_idx = i;
4305 		ext->is_weak = ELF64_ST_BIND(sym->st_info) == STB_WEAK;
4306 
4307 		ext_essent_len = bpf_core_essential_name_len(ext->name);
4308 		ext->essent_name = NULL;
4309 		if (ext_essent_len != strlen(ext->name)) {
4310 			ext->essent_name = strndup(ext->name, ext_essent_len);
4311 			if (!ext->essent_name)
4312 				return -ENOMEM;
4313 		}
4314 
4315 		ext->sec_btf_id = find_extern_sec_btf_id(obj->btf, ext->btf_id);
4316 		if (ext->sec_btf_id <= 0) {
4317 			pr_warn("failed to find BTF for extern '%s' [%d] section: %d\n",
4318 				ext_name, ext->btf_id, ext->sec_btf_id);
4319 			return ext->sec_btf_id;
4320 		}
4321 		sec = (void *)btf__type_by_id(obj->btf, ext->sec_btf_id);
4322 		sec_name = btf__name_by_offset(obj->btf, sec->name_off);
4323 
4324 		if (strcmp(sec_name, KCONFIG_SEC) == 0) {
4325 			if (btf_is_func(t)) {
4326 				pr_warn("extern function %s is unsupported under %s section\n",
4327 					ext->name, KCONFIG_SEC);
4328 				return -ENOTSUP;
4329 			}
4330 			kcfg_sec = sec;
4331 			ext->type = EXT_KCFG;
4332 			ext->kcfg.sz = btf__resolve_size(obj->btf, t->type);
4333 			if (ext->kcfg.sz <= 0) {
4334 				pr_warn("failed to resolve size of extern (kcfg) '%s': %d\n",
4335 					ext_name, ext->kcfg.sz);
4336 				return ext->kcfg.sz;
4337 			}
4338 			ext->kcfg.align = btf__align_of(obj->btf, t->type);
4339 			if (ext->kcfg.align <= 0) {
4340 				pr_warn("failed to determine alignment of extern (kcfg) '%s': %d\n",
4341 					ext_name, ext->kcfg.align);
4342 				return -EINVAL;
4343 			}
4344 			ext->kcfg.type = find_kcfg_type(obj->btf, t->type,
4345 							&ext->kcfg.is_signed);
4346 			if (ext->kcfg.type == KCFG_UNKNOWN) {
4347 				pr_warn("extern (kcfg) '%s': type is unsupported\n", ext_name);
4348 				return -ENOTSUP;
4349 			}
4350 		} else if (strcmp(sec_name, KSYMS_SEC) == 0) {
4351 			ksym_sec = sec;
4352 			ext->type = EXT_KSYM;
4353 			skip_mods_and_typedefs(obj->btf, t->type,
4354 					       &ext->ksym.type_id);
4355 		} else {
4356 			pr_warn("unrecognized extern section '%s'\n", sec_name);
4357 			return -ENOTSUP;
4358 		}
4359 	}
4360 	pr_debug("collected %d externs total\n", obj->nr_extern);
4361 
4362 	if (!obj->nr_extern)
4363 		return 0;
4364 
4365 	/* sort externs by type, for kcfg ones also by (align, size, name) */
4366 	qsort(obj->externs, obj->nr_extern, sizeof(*ext), cmp_externs);
4367 
4368 	/* for .ksyms section, we need to turn all externs into allocated
4369 	 * variables in BTF to pass kernel verification; we do this by
4370 	 * pretending that each extern is a 8-byte variable
4371 	 */
4372 	if (ksym_sec) {
4373 		/* find existing 4-byte integer type in BTF to use for fake
4374 		 * extern variables in DATASEC
4375 		 */
4376 		int int_btf_id = find_int_btf_id(obj->btf);
4377 		/* For extern function, a dummy_var added earlier
4378 		 * will be used to replace the vs->type and
4379 		 * its name string will be used to refill
4380 		 * the missing param's name.
4381 		 */
4382 		const struct btf_type *dummy_var;
4383 
4384 		dummy_var = btf__type_by_id(obj->btf, dummy_var_btf_id);
4385 		for (i = 0; i < obj->nr_extern; i++) {
4386 			ext = &obj->externs[i];
4387 			if (ext->type != EXT_KSYM)
4388 				continue;
4389 			pr_debug("extern (ksym) #%d: symbol %d, name %s\n",
4390 				 i, ext->sym_idx, ext->name);
4391 		}
4392 
4393 		sec = ksym_sec;
4394 		n = btf_vlen(sec);
4395 		for (i = 0, off = 0; i < n; i++, off += sizeof(int)) {
4396 			struct btf_var_secinfo *vs = btf_var_secinfos(sec) + i;
4397 			struct btf_type *vt;
4398 
4399 			vt = (void *)btf__type_by_id(obj->btf, vs->type);
4400 			ext_name = btf__name_by_offset(obj->btf, vt->name_off);
4401 			ext = find_extern_by_name(obj, ext_name);
4402 			if (!ext) {
4403 				pr_warn("failed to find extern definition for BTF %s '%s'\n",
4404 					btf_kind_str(vt), ext_name);
4405 				return -ESRCH;
4406 			}
4407 			if (btf_is_func(vt)) {
4408 				const struct btf_type *func_proto;
4409 				struct btf_param *param;
4410 				int j;
4411 
4412 				func_proto = btf__type_by_id(obj->btf,
4413 							     vt->type);
4414 				param = btf_params(func_proto);
4415 				/* Reuse the dummy_var string if the
4416 				 * func proto does not have param name.
4417 				 */
4418 				for (j = 0; j < btf_vlen(func_proto); j++)
4419 					if (param[j].type && !param[j].name_off)
4420 						param[j].name_off =
4421 							dummy_var->name_off;
4422 				vs->type = dummy_var_btf_id;
4423 				vt->info &= ~0xffff;
4424 				vt->info |= BTF_FUNC_GLOBAL;
4425 			} else {
4426 				btf_var(vt)->linkage = BTF_VAR_GLOBAL_ALLOCATED;
4427 				vt->type = int_btf_id;
4428 			}
4429 			vs->offset = off;
4430 			vs->size = sizeof(int);
4431 		}
4432 		sec->size = off;
4433 	}
4434 
4435 	if (kcfg_sec) {
4436 		sec = kcfg_sec;
4437 		/* for kcfg externs calculate their offsets within a .kconfig map */
4438 		off = 0;
4439 		for (i = 0; i < obj->nr_extern; i++) {
4440 			ext = &obj->externs[i];
4441 			if (ext->type != EXT_KCFG)
4442 				continue;
4443 
4444 			ext->kcfg.data_off = roundup(off, ext->kcfg.align);
4445 			off = ext->kcfg.data_off + ext->kcfg.sz;
4446 			pr_debug("extern (kcfg) #%d: symbol %d, off %u, name %s\n",
4447 				 i, ext->sym_idx, ext->kcfg.data_off, ext->name);
4448 		}
4449 		sec->size = off;
4450 		n = btf_vlen(sec);
4451 		for (i = 0; i < n; i++) {
4452 			struct btf_var_secinfo *vs = btf_var_secinfos(sec) + i;
4453 
4454 			t = btf__type_by_id(obj->btf, vs->type);
4455 			ext_name = btf__name_by_offset(obj->btf, t->name_off);
4456 			ext = find_extern_by_name(obj, ext_name);
4457 			if (!ext) {
4458 				pr_warn("failed to find extern definition for BTF var '%s'\n",
4459 					ext_name);
4460 				return -ESRCH;
4461 			}
4462 			btf_var(t)->linkage = BTF_VAR_GLOBAL_ALLOCATED;
4463 			vs->offset = ext->kcfg.data_off;
4464 		}
4465 	}
4466 	return 0;
4467 }
4468 
4469 static bool prog_is_subprog(const struct bpf_object *obj, const struct bpf_program *prog)
4470 {
4471 	return prog->sec_idx == obj->efile.text_shndx;
4472 }
4473 
4474 struct bpf_program *
4475 bpf_object__find_program_by_name(const struct bpf_object *obj,
4476 				 const char *name)
4477 {
4478 	struct bpf_program *prog;
4479 
4480 	bpf_object__for_each_program(prog, obj) {
4481 		if (prog_is_subprog(obj, prog))
4482 			continue;
4483 		if (!strcmp(prog->name, name))
4484 			return prog;
4485 	}
4486 	return errno = ENOENT, NULL;
4487 }
4488 
4489 static bool bpf_object__shndx_is_data(const struct bpf_object *obj,
4490 				      int shndx)
4491 {
4492 	switch (obj->efile.secs[shndx].sec_type) {
4493 	case SEC_BSS:
4494 	case SEC_DATA:
4495 	case SEC_RODATA:
4496 		return true;
4497 	default:
4498 		return false;
4499 	}
4500 }
4501 
4502 static bool bpf_object__shndx_is_maps(const struct bpf_object *obj,
4503 				      int shndx)
4504 {
4505 	return shndx == obj->efile.btf_maps_shndx;
4506 }
4507 
4508 static enum libbpf_map_type
4509 bpf_object__section_to_libbpf_map_type(const struct bpf_object *obj, int shndx)
4510 {
4511 	if (shndx == obj->efile.symbols_shndx)
4512 		return LIBBPF_MAP_KCONFIG;
4513 
4514 	switch (obj->efile.secs[shndx].sec_type) {
4515 	case SEC_BSS:
4516 		return LIBBPF_MAP_BSS;
4517 	case SEC_DATA:
4518 		return LIBBPF_MAP_DATA;
4519 	case SEC_RODATA:
4520 		return LIBBPF_MAP_RODATA;
4521 	default:
4522 		return LIBBPF_MAP_UNSPEC;
4523 	}
4524 }
4525 
4526 static int bpf_prog_compute_hash(struct bpf_program *prog)
4527 {
4528 	struct bpf_insn *purged;
4529 	int i, err = 0;
4530 
4531 	purged = calloc(prog->insns_cnt, BPF_INSN_SZ);
4532 	if (!purged)
4533 		return -ENOMEM;
4534 
4535 	/* If relocations have been done, the map_fd needs to be
4536 	 * discarded for the digest calculation.
4537 	 */
4538 	for (i = 0; i < prog->insns_cnt; i++) {
4539 		purged[i] = prog->insns[i];
4540 		if (purged[i].code == (BPF_LD | BPF_IMM | BPF_DW) &&
4541 		    (purged[i].src_reg == BPF_PSEUDO_MAP_FD ||
4542 		     purged[i].src_reg == BPF_PSEUDO_MAP_VALUE)) {
4543 			purged[i].imm = 0;
4544 			i++;
4545 			if (i >= prog->insns_cnt ||
4546 			    prog->insns[i].code != 0 ||
4547 			    prog->insns[i].dst_reg != 0 ||
4548 			    prog->insns[i].src_reg != 0 ||
4549 			    prog->insns[i].off != 0) {
4550 				err = -EINVAL;
4551 				goto out;
4552 			}
4553 			purged[i] = prog->insns[i];
4554 			purged[i].imm = 0;
4555 		}
4556 	}
4557 	libbpf_sha256(purged, prog->insns_cnt * sizeof(struct bpf_insn),
4558 		      prog->hash);
4559 out:
4560 	free(purged);
4561 	return err;
4562 }
4563 
4564 static int bpf_program__record_reloc(struct bpf_program *prog,
4565 				     struct reloc_desc *reloc_desc,
4566 				     __u32 insn_idx, const char *sym_name,
4567 				     const Elf64_Sym *sym, const Elf64_Rel *rel)
4568 {
4569 	struct bpf_insn *insn = &prog->insns[insn_idx];
4570 	size_t map_idx, nr_maps = prog->obj->nr_maps;
4571 	struct bpf_object *obj = prog->obj;
4572 	__u32 shdr_idx = sym->st_shndx;
4573 	enum libbpf_map_type type;
4574 	const char *sym_sec_name;
4575 	struct bpf_map *map;
4576 
4577 	if (!is_call_insn(insn) && !is_ldimm64_insn(insn)) {
4578 		pr_warn("prog '%s': invalid relo against '%s' for insns[%d].code 0x%x\n",
4579 			prog->name, sym_name, insn_idx, insn->code);
4580 		return -LIBBPF_ERRNO__RELOC;
4581 	}
4582 
4583 	if (sym_is_extern(sym)) {
4584 		int sym_idx = ELF64_R_SYM(rel->r_info);
4585 		int i, n = obj->nr_extern;
4586 		struct extern_desc *ext;
4587 
4588 		for (i = 0; i < n; i++) {
4589 			ext = &obj->externs[i];
4590 			if (ext->sym_idx == sym_idx)
4591 				break;
4592 		}
4593 		if (i >= n) {
4594 			pr_warn("prog '%s': extern relo failed to find extern for '%s' (%d)\n",
4595 				prog->name, sym_name, sym_idx);
4596 			return -LIBBPF_ERRNO__RELOC;
4597 		}
4598 		pr_debug("prog '%s': found extern #%d '%s' (sym %d) for insn #%u\n",
4599 			 prog->name, i, ext->name, ext->sym_idx, insn_idx);
4600 		if (insn->code == (BPF_JMP | BPF_CALL))
4601 			reloc_desc->type = RELO_EXTERN_CALL;
4602 		else
4603 			reloc_desc->type = RELO_EXTERN_LD64;
4604 		reloc_desc->insn_idx = insn_idx;
4605 		reloc_desc->ext_idx = i;
4606 		return 0;
4607 	}
4608 
4609 	/* sub-program call relocation */
4610 	if (is_call_insn(insn)) {
4611 		if (insn->src_reg != BPF_PSEUDO_CALL) {
4612 			pr_warn("prog '%s': incorrect bpf_call opcode\n", prog->name);
4613 			return -LIBBPF_ERRNO__RELOC;
4614 		}
4615 		/* text_shndx can be 0, if no default "main" program exists */
4616 		if (!shdr_idx || shdr_idx != obj->efile.text_shndx) {
4617 			sym_sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, shdr_idx));
4618 			pr_warn("prog '%s': bad call relo against '%s' in section '%s'\n",
4619 				prog->name, sym_name, sym_sec_name);
4620 			return -LIBBPF_ERRNO__RELOC;
4621 		}
4622 		if (sym->st_value % BPF_INSN_SZ) {
4623 			pr_warn("prog '%s': bad call relo against '%s' at offset %zu\n",
4624 				prog->name, sym_name, (size_t)sym->st_value);
4625 			return -LIBBPF_ERRNO__RELOC;
4626 		}
4627 		reloc_desc->type = RELO_CALL;
4628 		reloc_desc->insn_idx = insn_idx;
4629 		reloc_desc->sym_off = sym->st_value;
4630 		return 0;
4631 	}
4632 
4633 	if (!shdr_idx || shdr_idx >= SHN_LORESERVE) {
4634 		pr_warn("prog '%s': invalid relo against '%s' in special section 0x%x; forgot to initialize global var?..\n",
4635 			prog->name, sym_name, shdr_idx);
4636 		return -LIBBPF_ERRNO__RELOC;
4637 	}
4638 
4639 	/* loading subprog addresses */
4640 	if (sym_is_subprog(sym, obj->efile.text_shndx)) {
4641 		/* global_func: sym->st_value = offset in the section, insn->imm = 0.
4642 		 * local_func: sym->st_value = 0, insn->imm = offset in the section.
4643 		 */
4644 		if ((sym->st_value % BPF_INSN_SZ) || (insn->imm % BPF_INSN_SZ)) {
4645 			pr_warn("prog '%s': bad subprog addr relo against '%s' at offset %zu+%d\n",
4646 				prog->name, sym_name, (size_t)sym->st_value, insn->imm);
4647 			return -LIBBPF_ERRNO__RELOC;
4648 		}
4649 
4650 		reloc_desc->type = RELO_SUBPROG_ADDR;
4651 		reloc_desc->insn_idx = insn_idx;
4652 		reloc_desc->sym_off = sym->st_value;
4653 		return 0;
4654 	}
4655 
4656 	type = bpf_object__section_to_libbpf_map_type(obj, shdr_idx);
4657 	sym_sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, shdr_idx));
4658 
4659 	/* arena data relocation */
4660 	if (shdr_idx == obj->efile.arena_data_shndx) {
4661 		if (obj->arena_map_idx < 0) {
4662 			pr_warn("prog '%s': bad arena data relocation at insn %u, no arena maps defined\n",
4663 				prog->name, insn_idx);
4664 			return -LIBBPF_ERRNO__RELOC;
4665 		}
4666 		reloc_desc->type = RELO_DATA;
4667 		reloc_desc->insn_idx = insn_idx;
4668 		reloc_desc->map_idx = obj->arena_map_idx;
4669 		reloc_desc->sym_off = sym->st_value;
4670 
4671 		map = &obj->maps[obj->arena_map_idx];
4672 		pr_debug("prog '%s': found arena map %d (%s, sec %d, off %zu) for insn %u\n",
4673 			 prog->name, obj->arena_map_idx, map->name, map->sec_idx,
4674 			 map->sec_offset, insn_idx);
4675 		return 0;
4676 	}
4677 
4678 	/* jump table data relocation */
4679 	if (shdr_idx == obj->efile.jumptables_data_shndx) {
4680 		reloc_desc->type = RELO_INSN_ARRAY;
4681 		reloc_desc->insn_idx = insn_idx;
4682 		reloc_desc->map_idx = -1;
4683 		reloc_desc->sym_off = sym->st_value;
4684 		reloc_desc->sym_size = sym->st_size;
4685 		return 0;
4686 	}
4687 
4688 	/* generic map reference relocation */
4689 	if (type == LIBBPF_MAP_UNSPEC) {
4690 		if (!bpf_object__shndx_is_maps(obj, shdr_idx)) {
4691 			pr_warn("prog '%s': bad map relo against '%s' in section '%s'\n",
4692 				prog->name, sym_name, sym_sec_name);
4693 			return -LIBBPF_ERRNO__RELOC;
4694 		}
4695 		for (map_idx = 0; map_idx < nr_maps; map_idx++) {
4696 			map = &obj->maps[map_idx];
4697 			if (map->libbpf_type != type ||
4698 			    map->sec_idx != sym->st_shndx ||
4699 			    map->sec_offset != sym->st_value)
4700 				continue;
4701 			pr_debug("prog '%s': found map %zd (%s, sec %d, off %zu) for insn #%u\n",
4702 				 prog->name, map_idx, map->name, map->sec_idx,
4703 				 map->sec_offset, insn_idx);
4704 			break;
4705 		}
4706 		if (map_idx >= nr_maps) {
4707 			pr_warn("prog '%s': map relo failed to find map for section '%s', off %zu\n",
4708 				prog->name, sym_sec_name, (size_t)sym->st_value);
4709 			return -LIBBPF_ERRNO__RELOC;
4710 		}
4711 		reloc_desc->type = RELO_LD64;
4712 		reloc_desc->insn_idx = insn_idx;
4713 		reloc_desc->map_idx = map_idx;
4714 		reloc_desc->sym_off = 0; /* sym->st_value determines map_idx */
4715 		return 0;
4716 	}
4717 
4718 	/* global data map relocation */
4719 	if (!bpf_object__shndx_is_data(obj, shdr_idx)) {
4720 		pr_warn("prog '%s': bad data relo against section '%s'\n",
4721 			prog->name, sym_sec_name);
4722 		return -LIBBPF_ERRNO__RELOC;
4723 	}
4724 	for (map_idx = 0; map_idx < nr_maps; map_idx++) {
4725 		map = &obj->maps[map_idx];
4726 		if (map->libbpf_type != type || map->sec_idx != sym->st_shndx)
4727 			continue;
4728 		pr_debug("prog '%s': found data map %zd (%s, sec %d, off %zu) for insn %u\n",
4729 			 prog->name, map_idx, map->name, map->sec_idx,
4730 			 map->sec_offset, insn_idx);
4731 		break;
4732 	}
4733 	if (map_idx >= nr_maps) {
4734 		pr_warn("prog '%s': data relo failed to find map for section '%s'\n",
4735 			prog->name, sym_sec_name);
4736 		return -LIBBPF_ERRNO__RELOC;
4737 	}
4738 
4739 	reloc_desc->type = RELO_DATA;
4740 	reloc_desc->insn_idx = insn_idx;
4741 	reloc_desc->map_idx = map_idx;
4742 	reloc_desc->sym_off = sym->st_value;
4743 	return 0;
4744 }
4745 
4746 static bool prog_contains_insn(const struct bpf_program *prog, size_t insn_idx)
4747 {
4748 	return insn_idx >= prog->sec_insn_off &&
4749 	       insn_idx < prog->sec_insn_off + prog->sec_insn_cnt;
4750 }
4751 
4752 static struct bpf_program *find_prog_by_sec_insn(const struct bpf_object *obj,
4753 						 size_t sec_idx, size_t insn_idx)
4754 {
4755 	int l = 0, r = obj->nr_programs - 1, m;
4756 	struct bpf_program *prog;
4757 
4758 	if (!obj->nr_programs)
4759 		return NULL;
4760 
4761 	while (l < r) {
4762 		m = l + (r - l + 1) / 2;
4763 		prog = &obj->programs[m];
4764 
4765 		if (prog->sec_idx < sec_idx ||
4766 		    (prog->sec_idx == sec_idx && prog->sec_insn_off <= insn_idx))
4767 			l = m;
4768 		else
4769 			r = m - 1;
4770 	}
4771 	/* matching program could be at index l, but it still might be the
4772 	 * wrong one, so we need to double check conditions for the last time
4773 	 */
4774 	prog = &obj->programs[l];
4775 	if (prog->sec_idx == sec_idx && prog_contains_insn(prog, insn_idx))
4776 		return prog;
4777 	return NULL;
4778 }
4779 
4780 static int
4781 bpf_object__collect_prog_relos(struct bpf_object *obj, Elf64_Shdr *shdr, Elf_Data *data)
4782 {
4783 	const char *relo_sec_name, *sec_name;
4784 	size_t sec_idx = shdr->sh_info, sym_idx;
4785 	struct bpf_program *prog;
4786 	struct reloc_desc *relos;
4787 	int err, i, nrels;
4788 	const char *sym_name;
4789 	__u32 insn_idx;
4790 	Elf_Scn *scn;
4791 	Elf_Data *scn_data;
4792 	Elf64_Sym *sym;
4793 	Elf64_Rel *rel;
4794 
4795 	if (sec_idx >= obj->efile.sec_cnt)
4796 		return -EINVAL;
4797 
4798 	scn = elf_sec_by_idx(obj, sec_idx);
4799 	scn_data = elf_sec_data(obj, scn);
4800 	if (!scn_data)
4801 		return -LIBBPF_ERRNO__FORMAT;
4802 
4803 	relo_sec_name = elf_sec_str(obj, shdr->sh_name);
4804 	sec_name = elf_sec_name(obj, scn);
4805 	if (!relo_sec_name || !sec_name)
4806 		return -EINVAL;
4807 
4808 	pr_debug("sec '%s': collecting relocation for section(%zu) '%s'\n",
4809 		 relo_sec_name, sec_idx, sec_name);
4810 	nrels = shdr->sh_size / shdr->sh_entsize;
4811 
4812 	for (i = 0; i < nrels; i++) {
4813 		rel = elf_rel_by_idx(data, i);
4814 		if (!rel) {
4815 			pr_warn("sec '%s': failed to get relo #%d\n", relo_sec_name, i);
4816 			return -LIBBPF_ERRNO__FORMAT;
4817 		}
4818 
4819 		sym_idx = ELF64_R_SYM(rel->r_info);
4820 		sym = elf_sym_by_idx(obj, sym_idx);
4821 		if (!sym) {
4822 			pr_warn("sec '%s': symbol #%zu not found for relo #%d\n",
4823 				relo_sec_name, sym_idx, i);
4824 			return -LIBBPF_ERRNO__FORMAT;
4825 		}
4826 
4827 		if (sym->st_shndx >= obj->efile.sec_cnt) {
4828 			pr_warn("sec '%s': corrupted symbol #%zu pointing to invalid section #%zu for relo #%d\n",
4829 				relo_sec_name, sym_idx, (size_t)sym->st_shndx, i);
4830 			return -LIBBPF_ERRNO__FORMAT;
4831 		}
4832 
4833 		if (rel->r_offset % BPF_INSN_SZ || rel->r_offset >= scn_data->d_size) {
4834 			pr_warn("sec '%s': invalid offset 0x%zx for relo #%d\n",
4835 				relo_sec_name, (size_t)rel->r_offset, i);
4836 			return -LIBBPF_ERRNO__FORMAT;
4837 		}
4838 
4839 		insn_idx = rel->r_offset / BPF_INSN_SZ;
4840 		/* relocations against static functions are recorded as
4841 		 * relocations against the section that contains a function;
4842 		 * in such case, symbol will be STT_SECTION and sym.st_name
4843 		 * will point to empty string (0), so fetch section name
4844 		 * instead
4845 		 */
4846 		if (ELF64_ST_TYPE(sym->st_info) == STT_SECTION && sym->st_name == 0)
4847 			sym_name = elf_sec_name(obj, elf_sec_by_idx(obj, sym->st_shndx));
4848 		else
4849 			sym_name = elf_sym_str(obj, sym->st_name);
4850 		sym_name = sym_name ?: "<?";
4851 
4852 		pr_debug("sec '%s': relo #%d: insn #%u against '%s'\n",
4853 			 relo_sec_name, i, insn_idx, sym_name);
4854 
4855 		prog = find_prog_by_sec_insn(obj, sec_idx, insn_idx);
4856 		if (!prog) {
4857 			pr_debug("sec '%s': relo #%d: couldn't find program in section '%s' for insn #%u, probably overridden weak function, skipping...\n",
4858 				relo_sec_name, i, sec_name, insn_idx);
4859 			continue;
4860 		}
4861 
4862 		relos = libbpf_reallocarray(prog->reloc_desc,
4863 					    prog->nr_reloc + 1, sizeof(*relos));
4864 		if (!relos)
4865 			return -ENOMEM;
4866 		prog->reloc_desc = relos;
4867 
4868 		/* adjust insn_idx to local BPF program frame of reference */
4869 		insn_idx -= prog->sec_insn_off;
4870 		err = bpf_program__record_reloc(prog, &relos[prog->nr_reloc],
4871 						insn_idx, sym_name, sym, rel);
4872 		if (err)
4873 			return err;
4874 
4875 		prog->nr_reloc++;
4876 	}
4877 	return 0;
4878 }
4879 
4880 static int map_fill_btf_type_info(struct bpf_object *obj, struct bpf_map *map)
4881 {
4882 	int id;
4883 
4884 	if (!obj->btf)
4885 		return -ENOENT;
4886 
4887 	/* if it's BTF-defined map, we don't need to search for type IDs.
4888 	 * For struct_ops map, it does not need btf_key_type_id and
4889 	 * btf_value_type_id.
4890 	 */
4891 	if (map->sec_idx == obj->efile.btf_maps_shndx || bpf_map__is_struct_ops(map))
4892 		return 0;
4893 
4894 	/*
4895 	 * LLVM annotates global data differently in BTF, that is,
4896 	 * only as '.data', '.bss' or '.rodata'.
4897 	 */
4898 	if (!bpf_map__is_internal(map))
4899 		return -ENOENT;
4900 
4901 	id = btf__find_by_name(obj->btf, map->real_name);
4902 	if (id < 0)
4903 		return id;
4904 
4905 	map->btf_key_type_id = 0;
4906 	map->btf_value_type_id = id;
4907 	return 0;
4908 }
4909 
4910 static int bpf_get_map_info_from_fdinfo(int fd, struct bpf_map_info *info)
4911 {
4912 	char file[PATH_MAX], buff[4096];
4913 	FILE *fp;
4914 	__u32 val;
4915 	int err;
4916 
4917 	snprintf(file, sizeof(file), "/proc/%d/fdinfo/%d", getpid(), fd);
4918 	memset(info, 0, sizeof(*info));
4919 
4920 	fp = fopen(file, "re");
4921 	if (!fp) {
4922 		err = -errno;
4923 		pr_warn("failed to open %s: %s. No procfs support?\n", file,
4924 			errstr(err));
4925 		return err;
4926 	}
4927 
4928 	while (fgets(buff, sizeof(buff), fp)) {
4929 		if (sscanf(buff, "map_type:\t%u", &val) == 1)
4930 			info->type = val;
4931 		else if (sscanf(buff, "key_size:\t%u", &val) == 1)
4932 			info->key_size = val;
4933 		else if (sscanf(buff, "value_size:\t%u", &val) == 1)
4934 			info->value_size = val;
4935 		else if (sscanf(buff, "max_entries:\t%u", &val) == 1)
4936 			info->max_entries = val;
4937 		else if (sscanf(buff, "map_flags:\t%i", &val) == 1)
4938 			info->map_flags = val;
4939 	}
4940 
4941 	fclose(fp);
4942 
4943 	return 0;
4944 }
4945 
4946 static bool map_is_created(const struct bpf_map *map)
4947 {
4948 	return map->obj->state >= OBJ_PREPARED || map->reused;
4949 }
4950 
4951 bool bpf_map__autocreate(const struct bpf_map *map)
4952 {
4953 	return map->autocreate;
4954 }
4955 
4956 int bpf_map__set_autocreate(struct bpf_map *map, bool autocreate)
4957 {
4958 	if (map_is_created(map))
4959 		return libbpf_err(-EBUSY);
4960 
4961 	map->autocreate = autocreate;
4962 	return 0;
4963 }
4964 
4965 int bpf_map__set_autoattach(struct bpf_map *map, bool autoattach)
4966 {
4967 	if (!bpf_map__is_struct_ops(map))
4968 		return libbpf_err(-EINVAL);
4969 
4970 	map->autoattach = autoattach;
4971 	return 0;
4972 }
4973 
4974 bool bpf_map__autoattach(const struct bpf_map *map)
4975 {
4976 	return map->autoattach;
4977 }
4978 
4979 int bpf_map__reuse_fd(struct bpf_map *map, int fd)
4980 {
4981 	struct bpf_map_info info;
4982 	__u32 len = sizeof(info), name_len;
4983 	int new_fd, err;
4984 	char *new_name;
4985 
4986 	memset(&info, 0, len);
4987 	err = bpf_map_get_info_by_fd(fd, &info, &len);
4988 	if (err && errno == EINVAL)
4989 		err = bpf_get_map_info_from_fdinfo(fd, &info);
4990 	if (err)
4991 		return libbpf_err(err);
4992 
4993 	name_len = strlen(info.name);
4994 	if (name_len == BPF_OBJ_NAME_LEN - 1 && strncmp(map->name, info.name, name_len) == 0)
4995 		new_name = strdup(map->name);
4996 	else
4997 		new_name = strdup(info.name);
4998 
4999 	if (!new_name)
5000 		return libbpf_err(-errno);
5001 
5002 	/*
5003 	 * Like dup(), but make sure new FD is >= 3 and has O_CLOEXEC set.
5004 	 * This is similar to what we do in ensure_good_fd(), but without
5005 	 * closing original FD.
5006 	 */
5007 	new_fd = fcntl(fd, F_DUPFD_CLOEXEC, 3);
5008 	if (new_fd < 0) {
5009 		err = -errno;
5010 		goto err_free_new_name;
5011 	}
5012 
5013 	err = reuse_fd(map->fd, new_fd);
5014 	if (err)
5015 		goto err_free_new_name;
5016 
5017 	free(map->name);
5018 
5019 	map->name = new_name;
5020 	map->def.type = info.type;
5021 	map->def.key_size = info.key_size;
5022 	map->def.value_size = info.value_size;
5023 	map->def.max_entries = info.max_entries;
5024 	map->def.map_flags = info.map_flags;
5025 	map->btf_key_type_id = info.btf_key_type_id;
5026 	map->btf_value_type_id = info.btf_value_type_id;
5027 	map->reused = true;
5028 	map->map_extra = info.map_extra;
5029 
5030 	return 0;
5031 
5032 err_free_new_name:
5033 	free(new_name);
5034 	return libbpf_err(err);
5035 }
5036 
5037 __u32 bpf_map__max_entries(const struct bpf_map *map)
5038 {
5039 	return map->def.max_entries;
5040 }
5041 
5042 struct bpf_map *bpf_map__inner_map(struct bpf_map *map)
5043 {
5044 	if (!bpf_map_type__is_map_in_map(map->def.type))
5045 		return errno = EINVAL, NULL;
5046 
5047 	return map->inner_map;
5048 }
5049 
5050 int bpf_map__set_max_entries(struct bpf_map *map, __u32 max_entries)
5051 {
5052 	if (map_is_created(map))
5053 		return libbpf_err(-EBUSY);
5054 
5055 	map->def.max_entries = max_entries;
5056 
5057 	/* auto-adjust BPF ringbuf map max_entries to be a multiple of page size */
5058 	if (map_is_ringbuf(map))
5059 		map->def.max_entries = adjust_ringbuf_sz(map->def.max_entries);
5060 
5061 	return 0;
5062 }
5063 
5064 static int bpf_object_prepare_token(struct bpf_object *obj)
5065 {
5066 	const char *bpffs_path;
5067 	int bpffs_fd = -1, token_fd, err;
5068 	bool mandatory;
5069 	enum libbpf_print_level level;
5070 
5071 	/* token is explicitly prevented */
5072 	if (obj->token_path && obj->token_path[0] == '\0') {
5073 		pr_debug("object '%s': token is prevented, skipping...\n", obj->name);
5074 		return 0;
5075 	}
5076 
5077 	mandatory = obj->token_path != NULL;
5078 	level = mandatory ? LIBBPF_WARN : LIBBPF_DEBUG;
5079 
5080 	bpffs_path = obj->token_path ?: BPF_FS_DEFAULT_PATH;
5081 	bpffs_fd = open(bpffs_path, O_DIRECTORY, O_RDWR);
5082 	if (bpffs_fd < 0) {
5083 		err = -errno;
5084 		__pr(level, "object '%s': failed (%s) to open BPF FS mount at '%s'%s\n",
5085 		     obj->name, errstr(err), bpffs_path,
5086 		     mandatory ? "" : ", skipping optional step...");
5087 		return mandatory ? err : 0;
5088 	}
5089 
5090 	token_fd = bpf_token_create(bpffs_fd, 0);
5091 	close(bpffs_fd);
5092 	if (token_fd < 0) {
5093 		if (!mandatory && token_fd == -ENOENT) {
5094 			pr_debug("object '%s': BPF FS at '%s' doesn't have BPF token delegation set up, skipping...\n",
5095 				 obj->name, bpffs_path);
5096 			return 0;
5097 		}
5098 		__pr(level, "object '%s': failed (%d) to create BPF token from '%s'%s\n",
5099 		     obj->name, token_fd, bpffs_path,
5100 		     mandatory ? "" : ", skipping optional step...");
5101 		return mandatory ? token_fd : 0;
5102 	}
5103 
5104 	obj->feat_cache = calloc(1, sizeof(*obj->feat_cache));
5105 	if (!obj->feat_cache) {
5106 		close(token_fd);
5107 		return -ENOMEM;
5108 	}
5109 
5110 	obj->token_fd = token_fd;
5111 	obj->feat_cache->token_fd = token_fd;
5112 
5113 	return 0;
5114 }
5115 
5116 static int
5117 bpf_object__probe_loading(struct bpf_object *obj)
5118 {
5119 	struct bpf_insn insns[] = {
5120 		BPF_MOV64_IMM(BPF_REG_0, 0),
5121 		BPF_EXIT_INSN(),
5122 	};
5123 	int ret, insn_cnt = ARRAY_SIZE(insns);
5124 	LIBBPF_OPTS(bpf_prog_load_opts, opts,
5125 		.token_fd = obj->token_fd,
5126 		.prog_flags = obj->token_fd ? BPF_F_TOKEN_FD : 0,
5127 	);
5128 
5129 	if (obj->gen_loader)
5130 		return 0;
5131 
5132 	ret = bump_rlimit_memlock();
5133 	if (ret)
5134 		pr_warn("Failed to bump RLIMIT_MEMLOCK (err = %s), you might need to do it explicitly!\n",
5135 			errstr(ret));
5136 
5137 	/* make sure basic loading works */
5138 	ret = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, NULL, "GPL", insns, insn_cnt, &opts);
5139 	if (ret < 0)
5140 		ret = bpf_prog_load(BPF_PROG_TYPE_TRACEPOINT, NULL, "GPL", insns, insn_cnt, &opts);
5141 	if (ret < 0) {
5142 		ret = errno;
5143 		pr_warn("Error in %s(): %s. Couldn't load trivial BPF program. Make sure your kernel supports BPF (CONFIG_BPF_SYSCALL=y) and/or that RLIMIT_MEMLOCK is set to big enough value.\n",
5144 			__func__, errstr(ret));
5145 		return -ret;
5146 	}
5147 	close(ret);
5148 
5149 	return 0;
5150 }
5151 
5152 bool kernel_supports(const struct bpf_object *obj, enum kern_feature_id feat_id)
5153 {
5154 	if (obj->gen_loader)
5155 		/* To generate loader program assume the latest kernel
5156 		 * to avoid doing extra prog_load, map_create syscalls.
5157 		 */
5158 		return true;
5159 
5160 	if (obj->token_fd)
5161 		return feat_supported(obj->feat_cache, feat_id);
5162 
5163 	return feat_supported(NULL, feat_id);
5164 }
5165 
5166 static bool map_is_reuse_compat(const struct bpf_map *map, int map_fd)
5167 {
5168 	struct bpf_map_info map_info;
5169 	__u32 map_info_len = sizeof(map_info);
5170 	int err;
5171 
5172 	memset(&map_info, 0, map_info_len);
5173 	err = bpf_map_get_info_by_fd(map_fd, &map_info, &map_info_len);
5174 	if (err && errno == EINVAL)
5175 		err = bpf_get_map_info_from_fdinfo(map_fd, &map_info);
5176 	if (err) {
5177 		pr_warn("failed to get map info for map FD %d: %s\n", map_fd,
5178 			errstr(err));
5179 		return false;
5180 	}
5181 
5182 	/*
5183 	 * bpf_get_map_info_by_fd() for DEVMAP will always return flags with
5184 	 * BPF_F_RDONLY_PROG set, but it generally is not set at map creation time.
5185 	 * Thus, ignore the BPF_F_RDONLY_PROG flag in the flags returned from
5186 	 * bpf_get_map_info_by_fd() when checking for compatibility with an
5187 	 * existing DEVMAP.
5188 	 */
5189 	if (map->def.type == BPF_MAP_TYPE_DEVMAP || map->def.type == BPF_MAP_TYPE_DEVMAP_HASH)
5190 		map_info.map_flags &= ~BPF_F_RDONLY_PROG;
5191 
5192 	return (map_info.type == map->def.type &&
5193 		map_info.key_size == map->def.key_size &&
5194 		map_info.value_size == map->def.value_size &&
5195 		map_info.max_entries == map->def.max_entries &&
5196 		map_info.map_flags == map->def.map_flags &&
5197 		map_info.map_extra == map->map_extra);
5198 }
5199 
5200 static int
5201 bpf_object__reuse_map(struct bpf_map *map)
5202 {
5203 	int err, pin_fd;
5204 
5205 	pin_fd = bpf_obj_get(map->pin_path);
5206 	if (pin_fd < 0) {
5207 		err = -errno;
5208 		if (err == -ENOENT) {
5209 			pr_debug("found no pinned map to reuse at '%s'\n",
5210 				 map->pin_path);
5211 			return 0;
5212 		}
5213 
5214 		pr_warn("couldn't retrieve pinned map '%s': %s\n",
5215 			map->pin_path, errstr(err));
5216 		return err;
5217 	}
5218 
5219 	if (!map_is_reuse_compat(map, pin_fd)) {
5220 		pr_warn("couldn't reuse pinned map at '%s': parameter mismatch\n",
5221 			map->pin_path);
5222 		close(pin_fd);
5223 		return -EINVAL;
5224 	}
5225 
5226 	err = bpf_map__reuse_fd(map, pin_fd);
5227 	close(pin_fd);
5228 	if (err)
5229 		return err;
5230 
5231 	map->pinned = true;
5232 	pr_debug("reused pinned map at '%s'\n", map->pin_path);
5233 
5234 	return 0;
5235 }
5236 
5237 static int
5238 bpf_object__populate_internal_map(struct bpf_object *obj, struct bpf_map *map)
5239 {
5240 	enum libbpf_map_type map_type = map->libbpf_type;
5241 	int err, zero = 0;
5242 	size_t mmap_sz;
5243 
5244 	if (obj->gen_loader) {
5245 		bpf_gen__map_update_elem(obj->gen_loader, map - obj->maps,
5246 					 map->mmaped, map->def.value_size);
5247 		if (map_type == LIBBPF_MAP_RODATA || map_type == LIBBPF_MAP_KCONFIG)
5248 			bpf_gen__map_freeze(obj->gen_loader, map - obj->maps);
5249 		return 0;
5250 	}
5251 
5252 	err = bpf_map_update_elem(map->fd, &zero, map->mmaped, 0);
5253 	if (err) {
5254 		err = -errno;
5255 		pr_warn("map '%s': failed to set initial contents: %s\n",
5256 			bpf_map__name(map), errstr(err));
5257 		return err;
5258 	}
5259 
5260 	/* Freeze .rodata and .kconfig map as read-only from syscall side. */
5261 	if (map_type == LIBBPF_MAP_RODATA || map_type == LIBBPF_MAP_KCONFIG) {
5262 		err = bpf_map_freeze(map->fd);
5263 		if (err) {
5264 			err = -errno;
5265 			pr_warn("map '%s': failed to freeze as read-only: %s\n",
5266 				bpf_map__name(map), errstr(err));
5267 			return err;
5268 		}
5269 	}
5270 
5271 	/* Remap anonymous mmap()-ed "map initialization image" as
5272 	 * a BPF map-backed mmap()-ed memory, but preserving the same
5273 	 * memory address. This will cause kernel to change process'
5274 	 * page table to point to a different piece of kernel memory,
5275 	 * but from userspace point of view memory address (and its
5276 	 * contents, being identical at this point) will stay the
5277 	 * same. This mapping will be released by bpf_object__close()
5278 	 * as per normal clean up procedure.
5279 	 */
5280 	mmap_sz = bpf_map_mmap_sz(map);
5281 	if (map->def.map_flags & BPF_F_MMAPABLE) {
5282 		void *mmaped;
5283 		int prot;
5284 
5285 		if (map->def.map_flags & BPF_F_RDONLY_PROG)
5286 			prot = PROT_READ;
5287 		else
5288 			prot = PROT_READ | PROT_WRITE;
5289 		mmaped = mmap(map->mmaped, mmap_sz, prot, MAP_SHARED | MAP_FIXED, map->fd, 0);
5290 		if (mmaped == MAP_FAILED) {
5291 			err = -errno;
5292 			pr_warn("map '%s': failed to re-mmap() contents: %s\n",
5293 				bpf_map__name(map), errstr(err));
5294 			return err;
5295 		}
5296 		map->mmaped = mmaped;
5297 	} else if (map->mmaped) {
5298 		munmap(map->mmaped, mmap_sz);
5299 		map->mmaped = NULL;
5300 	}
5301 
5302 	return 0;
5303 }
5304 
5305 static void bpf_map__destroy(struct bpf_map *map);
5306 
5307 static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, bool is_inner)
5308 {
5309 	LIBBPF_OPTS(bpf_map_create_opts, create_attr);
5310 	struct bpf_map_def *def = &map->def;
5311 	const char *map_name = NULL;
5312 	int err = 0, map_fd;
5313 
5314 	if (kernel_supports(obj, FEAT_PROG_NAME))
5315 		map_name = map->name;
5316 	create_attr.map_ifindex = map->map_ifindex;
5317 	create_attr.map_flags = def->map_flags;
5318 	create_attr.numa_node = map->numa_node;
5319 	create_attr.map_extra = map->map_extra;
5320 	create_attr.token_fd = obj->token_fd;
5321 	if (obj->token_fd)
5322 		create_attr.map_flags |= BPF_F_TOKEN_FD;
5323 	if (map->excl_prog) {
5324 		err = bpf_prog_compute_hash(map->excl_prog);
5325 		if (err)
5326 			return err;
5327 
5328 		create_attr.excl_prog_hash = map->excl_prog->hash;
5329 		create_attr.excl_prog_hash_size = SHA256_DIGEST_LENGTH;
5330 	}
5331 
5332 	if (bpf_map__is_struct_ops(map)) {
5333 		create_attr.btf_vmlinux_value_type_id = map->btf_vmlinux_value_type_id;
5334 		if (map->mod_btf_fd >= 0) {
5335 			create_attr.value_type_btf_obj_fd = map->mod_btf_fd;
5336 			create_attr.map_flags |= BPF_F_VTYPE_BTF_OBJ_FD;
5337 		}
5338 	}
5339 
5340 	if (obj->btf && btf__fd(obj->btf) >= 0) {
5341 		create_attr.btf_fd = btf__fd(obj->btf);
5342 		create_attr.btf_key_type_id = map->btf_key_type_id;
5343 		create_attr.btf_value_type_id = map->btf_value_type_id;
5344 	}
5345 
5346 	if (bpf_map_type__is_map_in_map(def->type)) {
5347 		if (map->inner_map) {
5348 			err = map_set_def_max_entries(map->inner_map);
5349 			if (err)
5350 				return err;
5351 			err = bpf_object__create_map(obj, map->inner_map, true);
5352 			if (err) {
5353 				pr_warn("map '%s': failed to create inner map: %s\n",
5354 					map->name, errstr(err));
5355 				return err;
5356 			}
5357 			map->inner_map_fd = map->inner_map->fd;
5358 		}
5359 		if (map->inner_map_fd >= 0)
5360 			create_attr.inner_map_fd = map->inner_map_fd;
5361 	}
5362 
5363 	switch (def->type) {
5364 	case BPF_MAP_TYPE_PERF_EVENT_ARRAY:
5365 	case BPF_MAP_TYPE_CGROUP_ARRAY:
5366 	case BPF_MAP_TYPE_STACK_TRACE:
5367 	case BPF_MAP_TYPE_ARRAY_OF_MAPS:
5368 	case BPF_MAP_TYPE_HASH_OF_MAPS:
5369 	case BPF_MAP_TYPE_DEVMAP:
5370 	case BPF_MAP_TYPE_DEVMAP_HASH:
5371 	case BPF_MAP_TYPE_CPUMAP:
5372 	case BPF_MAP_TYPE_XSKMAP:
5373 	case BPF_MAP_TYPE_SOCKMAP:
5374 	case BPF_MAP_TYPE_SOCKHASH:
5375 	case BPF_MAP_TYPE_QUEUE:
5376 	case BPF_MAP_TYPE_STACK:
5377 	case BPF_MAP_TYPE_ARENA:
5378 		create_attr.btf_fd = 0;
5379 		create_attr.btf_key_type_id = 0;
5380 		create_attr.btf_value_type_id = 0;
5381 		map->btf_key_type_id = 0;
5382 		map->btf_value_type_id = 0;
5383 		break;
5384 	case BPF_MAP_TYPE_STRUCT_OPS:
5385 		create_attr.btf_value_type_id = 0;
5386 		break;
5387 	default:
5388 		break;
5389 	}
5390 
5391 	if (obj->gen_loader) {
5392 		bpf_gen__map_create(obj->gen_loader, def->type, map_name,
5393 				    def->key_size, def->value_size, def->max_entries,
5394 				    &create_attr, is_inner ? -1 : map - obj->maps);
5395 		/* We keep pretenting we have valid FD to pass various fd >= 0
5396 		 * checks by just keeping original placeholder FDs in place.
5397 		 * See bpf_object__add_map() comment.
5398 		 * This placeholder fd will not be used with any syscall and
5399 		 * will be reset to -1 eventually.
5400 		 */
5401 		map_fd = map->fd;
5402 	} else {
5403 		map_fd = bpf_map_create(def->type, map_name,
5404 					def->key_size, def->value_size,
5405 					def->max_entries, &create_attr);
5406 	}
5407 	if (map_fd < 0 && (create_attr.btf_key_type_id || create_attr.btf_value_type_id)) {
5408 		err = -errno;
5409 		pr_warn("Error in bpf_create_map_xattr(%s): %s. Retrying without BTF.\n",
5410 			map->name, errstr(err));
5411 		create_attr.btf_fd = 0;
5412 		create_attr.btf_key_type_id = 0;
5413 		create_attr.btf_value_type_id = 0;
5414 		map->btf_key_type_id = 0;
5415 		map->btf_value_type_id = 0;
5416 		map_fd = bpf_map_create(def->type, map_name,
5417 					def->key_size, def->value_size,
5418 					def->max_entries, &create_attr);
5419 	}
5420 
5421 	if (bpf_map_type__is_map_in_map(def->type) && map->inner_map) {
5422 		if (obj->gen_loader)
5423 			map->inner_map->fd = -1;
5424 		bpf_map__destroy(map->inner_map);
5425 		zfree(&map->inner_map);
5426 	}
5427 
5428 	if (map_fd < 0)
5429 		return map_fd;
5430 
5431 	/* obj->gen_loader case, prevent reuse_fd() from closing map_fd */
5432 	if (map->fd == map_fd)
5433 		return 0;
5434 
5435 	/* Keep placeholder FD value but now point it to the BPF map object.
5436 	 * This way everything that relied on this map's FD (e.g., relocated
5437 	 * ldimm64 instructions) will stay valid and won't need adjustments.
5438 	 * map->fd stays valid but now point to what map_fd points to.
5439 	 */
5440 	return reuse_fd(map->fd, map_fd);
5441 }
5442 
5443 static int init_map_in_map_slots(struct bpf_object *obj, struct bpf_map *map)
5444 {
5445 	const struct bpf_map *targ_map;
5446 	unsigned int i;
5447 	int fd, err = 0;
5448 
5449 	for (i = 0; i < map->init_slots_sz; i++) {
5450 		if (!map->init_slots[i])
5451 			continue;
5452 
5453 		targ_map = map->init_slots[i];
5454 		fd = targ_map->fd;
5455 
5456 		if (obj->gen_loader) {
5457 			bpf_gen__populate_outer_map(obj->gen_loader,
5458 						    map - obj->maps, i,
5459 						    targ_map - obj->maps);
5460 		} else {
5461 			err = bpf_map_update_elem(map->fd, &i, &fd, 0);
5462 		}
5463 		if (err) {
5464 			err = -errno;
5465 			pr_warn("map '%s': failed to initialize slot [%d] to map '%s' fd=%d: %s\n",
5466 				map->name, i, targ_map->name, fd, errstr(err));
5467 			return err;
5468 		}
5469 		pr_debug("map '%s': slot [%d] set to map '%s' fd=%d\n",
5470 			 map->name, i, targ_map->name, fd);
5471 	}
5472 
5473 	zfree(&map->init_slots);
5474 	map->init_slots_sz = 0;
5475 
5476 	return 0;
5477 }
5478 
5479 static int init_prog_array_slots(struct bpf_object *obj, struct bpf_map *map)
5480 {
5481 	const struct bpf_program *targ_prog;
5482 	unsigned int i;
5483 	int fd, err;
5484 
5485 	if (obj->gen_loader)
5486 		return -ENOTSUP;
5487 
5488 	for (i = 0; i < map->init_slots_sz; i++) {
5489 		if (!map->init_slots[i])
5490 			continue;
5491 
5492 		targ_prog = map->init_slots[i];
5493 		fd = bpf_program__fd(targ_prog);
5494 
5495 		err = bpf_map_update_elem(map->fd, &i, &fd, 0);
5496 		if (err) {
5497 			err = -errno;
5498 			pr_warn("map '%s': failed to initialize slot [%d] to prog '%s' fd=%d: %s\n",
5499 				map->name, i, targ_prog->name, fd, errstr(err));
5500 			return err;
5501 		}
5502 		pr_debug("map '%s': slot [%d] set to prog '%s' fd=%d\n",
5503 			 map->name, i, targ_prog->name, fd);
5504 	}
5505 
5506 	zfree(&map->init_slots);
5507 	map->init_slots_sz = 0;
5508 
5509 	return 0;
5510 }
5511 
5512 static int bpf_object_init_prog_arrays(struct bpf_object *obj)
5513 {
5514 	struct bpf_map *map;
5515 	int i, err;
5516 
5517 	for (i = 0; i < obj->nr_maps; i++) {
5518 		map = &obj->maps[i];
5519 
5520 		if (!map->init_slots_sz || map->def.type != BPF_MAP_TYPE_PROG_ARRAY)
5521 			continue;
5522 
5523 		err = init_prog_array_slots(obj, map);
5524 		if (err < 0)
5525 			return err;
5526 	}
5527 	return 0;
5528 }
5529 
5530 static int map_set_def_max_entries(struct bpf_map *map)
5531 {
5532 	if (map->def.type == BPF_MAP_TYPE_PERF_EVENT_ARRAY && !map->def.max_entries) {
5533 		int nr_cpus;
5534 
5535 		nr_cpus = libbpf_num_possible_cpus();
5536 		if (nr_cpus < 0) {
5537 			pr_warn("map '%s': failed to determine number of system CPUs: %d\n",
5538 				map->name, nr_cpus);
5539 			return nr_cpus;
5540 		}
5541 		pr_debug("map '%s': setting size to %d\n", map->name, nr_cpus);
5542 		map->def.max_entries = nr_cpus;
5543 	}
5544 
5545 	return 0;
5546 }
5547 
5548 static int
5549 bpf_object__create_maps(struct bpf_object *obj)
5550 {
5551 	struct bpf_map *map;
5552 	unsigned int i, j;
5553 	int err;
5554 	bool retried;
5555 
5556 	for (i = 0; i < obj->nr_maps; i++) {
5557 		map = &obj->maps[i];
5558 
5559 		/* To support old kernels, we skip creating global data maps
5560 		 * (.rodata, .data, .kconfig, etc); later on, during program
5561 		 * loading, if we detect that at least one of the to-be-loaded
5562 		 * programs is referencing any global data map, we'll error
5563 		 * out with program name and relocation index logged.
5564 		 * This approach allows to accommodate Clang emitting
5565 		 * unnecessary .rodata.str1.1 sections for string literals,
5566 		 * but also it allows to have CO-RE applications that use
5567 		 * global variables in some of BPF programs, but not others.
5568 		 * If those global variable-using programs are not loaded at
5569 		 * runtime due to bpf_program__set_autoload(prog, false),
5570 		 * bpf_object loading will succeed just fine even on old
5571 		 * kernels.
5572 		 */
5573 		if (bpf_map__is_internal(map) && !kernel_supports(obj, FEAT_GLOBAL_DATA))
5574 			map->autocreate = false;
5575 
5576 		if (!map->autocreate) {
5577 			pr_debug("map '%s': skipped auto-creating...\n", map->name);
5578 			continue;
5579 		}
5580 
5581 		err = map_set_def_max_entries(map);
5582 		if (err)
5583 			goto err_out;
5584 
5585 		retried = false;
5586 retry:
5587 		if (map->pin_path) {
5588 			err = bpf_object__reuse_map(map);
5589 			if (err) {
5590 				pr_warn("map '%s': error reusing pinned map\n",
5591 					map->name);
5592 				goto err_out;
5593 			}
5594 			if (retried && map->fd < 0) {
5595 				pr_warn("map '%s': cannot find pinned map\n",
5596 					map->name);
5597 				err = -ENOENT;
5598 				goto err_out;
5599 			}
5600 		}
5601 
5602 		if (map->reused) {
5603 			pr_debug("map '%s': skipping creation (preset fd=%d)\n",
5604 				 map->name, map->fd);
5605 		} else {
5606 			err = bpf_object__create_map(obj, map, false);
5607 			if (err)
5608 				goto err_out;
5609 
5610 			pr_debug("map '%s': created successfully, fd=%d\n",
5611 				 map->name, map->fd);
5612 
5613 			if (bpf_map__is_internal(map)) {
5614 				err = bpf_object__populate_internal_map(obj, map);
5615 				if (err < 0)
5616 					goto err_out;
5617 			} else if (map->def.type == BPF_MAP_TYPE_ARENA) {
5618 				map->mmaped = mmap((void *)(long)map->map_extra,
5619 						   bpf_map_mmap_sz(map), PROT_READ | PROT_WRITE,
5620 						   map->map_extra ? MAP_SHARED | MAP_FIXED : MAP_SHARED,
5621 						   map->fd, 0);
5622 				if (map->mmaped == MAP_FAILED) {
5623 					err = -errno;
5624 					map->mmaped = NULL;
5625 					pr_warn("map '%s': failed to mmap arena: %s\n",
5626 						map->name, errstr(err));
5627 					return err;
5628 				}
5629 				if (obj->arena_data) {
5630 					memcpy(map->mmaped + obj->arena_data_off, obj->arena_data,
5631 						obj->arena_data_sz);
5632 					zfree(&obj->arena_data);
5633 				}
5634 			}
5635 			if (map->init_slots_sz && map->def.type != BPF_MAP_TYPE_PROG_ARRAY) {
5636 				err = init_map_in_map_slots(obj, map);
5637 				if (err < 0)
5638 					goto err_out;
5639 			}
5640 		}
5641 
5642 		if (map->pin_path && !map->pinned) {
5643 			err = bpf_map__pin(map, NULL);
5644 			if (err) {
5645 				if (!retried && err == -EEXIST) {
5646 					retried = true;
5647 					goto retry;
5648 				}
5649 				pr_warn("map '%s': failed to auto-pin at '%s': %s\n",
5650 					map->name, map->pin_path, errstr(err));
5651 				goto err_out;
5652 			}
5653 		}
5654 	}
5655 
5656 	return 0;
5657 
5658 err_out:
5659 	pr_warn("map '%s': failed to create: %s\n", map->name, errstr(err));
5660 	pr_perm_msg(err);
5661 	for (j = 0; j < i; j++)
5662 		zclose(obj->maps[j].fd);
5663 	return err;
5664 }
5665 
5666 static bool bpf_core_is_flavor_sep(const char *s)
5667 {
5668 	/* check X___Y name pattern, where X and Y are not underscores */
5669 	return s[0] != '_' &&				      /* X */
5670 	       s[1] == '_' && s[2] == '_' && s[3] == '_' &&   /* ___ */
5671 	       s[4] != '_';				      /* Y */
5672 }
5673 
5674 /* Given 'some_struct_name___with_flavor' return the length of a name prefix
5675  * before last triple underscore. Struct name part after last triple
5676  * underscore is ignored by BPF CO-RE relocation during relocation matching.
5677  */
5678 size_t bpf_core_essential_name_len(const char *name)
5679 {
5680 	size_t n = strlen(name);
5681 	int i;
5682 
5683 	for (i = n - 5; i >= 0; i--) {
5684 		if (bpf_core_is_flavor_sep(name + i))
5685 			return i + 1;
5686 	}
5687 	return n;
5688 }
5689 
5690 void bpf_core_free_cands(struct bpf_core_cand_list *cands)
5691 {
5692 	if (!cands)
5693 		return;
5694 
5695 	free(cands->cands);
5696 	free(cands);
5697 }
5698 
5699 int bpf_core_add_cands(struct bpf_core_cand *local_cand,
5700 		       size_t local_essent_len,
5701 		       const struct btf *targ_btf,
5702 		       const char *targ_btf_name,
5703 		       int targ_start_id,
5704 		       struct bpf_core_cand_list *cands)
5705 {
5706 	struct bpf_core_cand *new_cands, *cand;
5707 	const struct btf_type *t, *local_t;
5708 	const char *targ_name, *local_name;
5709 	size_t targ_essent_len;
5710 	int n, i;
5711 
5712 	local_t = btf__type_by_id(local_cand->btf, local_cand->id);
5713 	local_name = btf__str_by_offset(local_cand->btf, local_t->name_off);
5714 
5715 	n = btf__type_cnt(targ_btf);
5716 	for (i = targ_start_id; i < n; i++) {
5717 		t = btf__type_by_id(targ_btf, i);
5718 		if (!btf_kind_core_compat(t, local_t))
5719 			continue;
5720 
5721 		targ_name = btf__name_by_offset(targ_btf, t->name_off);
5722 		if (str_is_empty(targ_name))
5723 			continue;
5724 
5725 		targ_essent_len = bpf_core_essential_name_len(targ_name);
5726 		if (targ_essent_len != local_essent_len)
5727 			continue;
5728 
5729 		if (strncmp(local_name, targ_name, local_essent_len) != 0)
5730 			continue;
5731 
5732 		pr_debug("CO-RE relocating [%d] %s %s: found target candidate [%d] %s %s in [%s]\n",
5733 			 local_cand->id, btf_kind_str(local_t),
5734 			 local_name, i, btf_kind_str(t), targ_name,
5735 			 targ_btf_name);
5736 		new_cands = libbpf_reallocarray(cands->cands, cands->len + 1,
5737 					      sizeof(*cands->cands));
5738 		if (!new_cands)
5739 			return -ENOMEM;
5740 
5741 		cand = &new_cands[cands->len];
5742 		cand->btf = targ_btf;
5743 		cand->id = i;
5744 
5745 		cands->cands = new_cands;
5746 		cands->len++;
5747 	}
5748 	return 0;
5749 }
5750 
5751 static int load_module_btfs(struct bpf_object *obj)
5752 {
5753 	struct bpf_btf_info info;
5754 	struct module_btf *mod_btf;
5755 	struct btf *btf;
5756 	char name[64];
5757 	__u32 id = 0, len;
5758 	int err, fd;
5759 
5760 	if (obj->btf_modules_loaded)
5761 		return 0;
5762 
5763 	if (obj->gen_loader)
5764 		return 0;
5765 
5766 	/* don't do this again, even if we find no module BTFs */
5767 	obj->btf_modules_loaded = true;
5768 
5769 	/* kernel too old to support module BTFs */
5770 	if (!kernel_supports(obj, FEAT_MODULE_BTF))
5771 		return 0;
5772 
5773 	while (true) {
5774 		err = bpf_btf_get_next_id(id, &id);
5775 		if (err && errno == ENOENT)
5776 			return 0;
5777 		if (err && errno == EPERM) {
5778 			pr_debug("skipping module BTFs loading, missing privileges\n");
5779 			return 0;
5780 		}
5781 		if (err) {
5782 			err = -errno;
5783 			pr_warn("failed to iterate BTF objects: %s\n", errstr(err));
5784 			return err;
5785 		}
5786 
5787 		fd = bpf_btf_get_fd_by_id(id);
5788 		if (fd < 0) {
5789 			if (errno == ENOENT)
5790 				continue; /* expected race: BTF was unloaded */
5791 			err = -errno;
5792 			pr_warn("failed to get BTF object #%d FD: %s\n", id, errstr(err));
5793 			return err;
5794 		}
5795 
5796 		len = sizeof(info);
5797 		memset(&info, 0, sizeof(info));
5798 		info.name = ptr_to_u64(name);
5799 		info.name_len = sizeof(name);
5800 
5801 		err = bpf_btf_get_info_by_fd(fd, &info, &len);
5802 		if (err) {
5803 			err = -errno;
5804 			pr_warn("failed to get BTF object #%d info: %s\n", id, errstr(err));
5805 			goto err_out;
5806 		}
5807 
5808 		/* ignore non-module BTFs */
5809 		if (!info.kernel_btf || strcmp(name, "vmlinux") == 0) {
5810 			close(fd);
5811 			continue;
5812 		}
5813 
5814 		btf = btf_get_from_fd(fd, obj->btf_vmlinux);
5815 		err = libbpf_get_error(btf);
5816 		if (err) {
5817 			pr_warn("failed to load module [%s]'s BTF object #%d: %s\n",
5818 				name, id, errstr(err));
5819 			goto err_out;
5820 		}
5821 
5822 		err = libbpf_ensure_mem((void **)&obj->btf_modules, &obj->btf_module_cap,
5823 					sizeof(*obj->btf_modules), obj->btf_module_cnt + 1);
5824 		if (err)
5825 			goto err_out;
5826 
5827 		mod_btf = &obj->btf_modules[obj->btf_module_cnt++];
5828 
5829 		mod_btf->btf = btf;
5830 		mod_btf->id = id;
5831 		mod_btf->fd = fd;
5832 		mod_btf->name = strdup(name);
5833 		if (!mod_btf->name) {
5834 			err = -ENOMEM;
5835 			goto err_out;
5836 		}
5837 		continue;
5838 
5839 err_out:
5840 		close(fd);
5841 		return err;
5842 	}
5843 
5844 	return 0;
5845 }
5846 
5847 static struct bpf_core_cand_list *
5848 bpf_core_find_cands(struct bpf_object *obj, const struct btf *local_btf, __u32 local_type_id)
5849 {
5850 	struct bpf_core_cand local_cand = {};
5851 	struct bpf_core_cand_list *cands;
5852 	const struct btf *main_btf;
5853 	const struct btf_type *local_t;
5854 	const char *local_name;
5855 	size_t local_essent_len;
5856 	int err, i;
5857 
5858 	local_cand.btf = local_btf;
5859 	local_cand.id = local_type_id;
5860 	local_t = btf__type_by_id(local_btf, local_type_id);
5861 	if (!local_t)
5862 		return ERR_PTR(-EINVAL);
5863 
5864 	local_name = btf__name_by_offset(local_btf, local_t->name_off);
5865 	if (str_is_empty(local_name))
5866 		return ERR_PTR(-EINVAL);
5867 	local_essent_len = bpf_core_essential_name_len(local_name);
5868 
5869 	cands = calloc(1, sizeof(*cands));
5870 	if (!cands)
5871 		return ERR_PTR(-ENOMEM);
5872 
5873 	/* Attempt to find target candidates in vmlinux BTF first */
5874 	main_btf = obj->btf_vmlinux_override ?: obj->btf_vmlinux;
5875 	err = bpf_core_add_cands(&local_cand, local_essent_len, main_btf, "vmlinux", 1, cands);
5876 	if (err)
5877 		goto err_out;
5878 
5879 	/* if vmlinux BTF has any candidate, don't got for module BTFs */
5880 	if (cands->len)
5881 		return cands;
5882 
5883 	/* if vmlinux BTF was overridden, don't attempt to load module BTFs */
5884 	if (obj->btf_vmlinux_override)
5885 		return cands;
5886 
5887 	/* now look through module BTFs, trying to still find candidates */
5888 	err = load_module_btfs(obj);
5889 	if (err)
5890 		goto err_out;
5891 
5892 	for (i = 0; i < obj->btf_module_cnt; i++) {
5893 		err = bpf_core_add_cands(&local_cand, local_essent_len,
5894 					 obj->btf_modules[i].btf,
5895 					 obj->btf_modules[i].name,
5896 					 btf__type_cnt(obj->btf_vmlinux),
5897 					 cands);
5898 		if (err)
5899 			goto err_out;
5900 	}
5901 
5902 	return cands;
5903 err_out:
5904 	bpf_core_free_cands(cands);
5905 	return ERR_PTR(err);
5906 }
5907 
5908 /* Check local and target types for compatibility. This check is used for
5909  * type-based CO-RE relocations and follow slightly different rules than
5910  * field-based relocations. This function assumes that root types were already
5911  * checked for name match. Beyond that initial root-level name check, names
5912  * are completely ignored. Compatibility rules are as follows:
5913  *   - any two STRUCTs/UNIONs/FWDs/ENUMs/INTs are considered compatible, but
5914  *     kind should match for local and target types (i.e., STRUCT is not
5915  *     compatible with UNION);
5916  *   - for ENUMs, the size is ignored;
5917  *   - for INT, size and signedness are ignored;
5918  *   - for ARRAY, dimensionality is ignored, element types are checked for
5919  *     compatibility recursively;
5920  *   - CONST/VOLATILE/RESTRICT modifiers are ignored;
5921  *   - TYPEDEFs/PTRs are compatible if types they pointing to are compatible;
5922  *   - FUNC_PROTOs are compatible if they have compatible signature: same
5923  *     number of input args and compatible return and argument types.
5924  * These rules are not set in stone and probably will be adjusted as we get
5925  * more experience with using BPF CO-RE relocations.
5926  */
5927 int bpf_core_types_are_compat(const struct btf *local_btf, __u32 local_id,
5928 			      const struct btf *targ_btf, __u32 targ_id)
5929 {
5930 	return __bpf_core_types_are_compat(local_btf, local_id, targ_btf, targ_id, 32);
5931 }
5932 
5933 int bpf_core_types_match(const struct btf *local_btf, __u32 local_id,
5934 			 const struct btf *targ_btf, __u32 targ_id)
5935 {
5936 	return __bpf_core_types_match(local_btf, local_id, targ_btf, targ_id, false, 32);
5937 }
5938 
5939 static size_t bpf_core_hash_fn(const long key, void *ctx)
5940 {
5941 	return key;
5942 }
5943 
5944 static bool bpf_core_equal_fn(const long k1, const long k2, void *ctx)
5945 {
5946 	return k1 == k2;
5947 }
5948 
5949 static int record_relo_core(struct bpf_program *prog,
5950 			    const struct bpf_core_relo *core_relo, int insn_idx)
5951 {
5952 	struct reloc_desc *relos, *relo;
5953 
5954 	relos = libbpf_reallocarray(prog->reloc_desc,
5955 				    prog->nr_reloc + 1, sizeof(*relos));
5956 	if (!relos)
5957 		return -ENOMEM;
5958 	relo = &relos[prog->nr_reloc];
5959 	relo->type = RELO_CORE;
5960 	relo->insn_idx = insn_idx;
5961 	relo->core_relo = core_relo;
5962 	prog->reloc_desc = relos;
5963 	prog->nr_reloc++;
5964 	return 0;
5965 }
5966 
5967 static const struct bpf_core_relo *find_relo_core(struct bpf_program *prog, int insn_idx)
5968 {
5969 	struct reloc_desc *relo;
5970 	int i;
5971 
5972 	for (i = 0; i < prog->nr_reloc; i++) {
5973 		relo = &prog->reloc_desc[i];
5974 		if (relo->type != RELO_CORE || relo->insn_idx != insn_idx)
5975 			continue;
5976 
5977 		return relo->core_relo;
5978 	}
5979 
5980 	return NULL;
5981 }
5982 
5983 static int bpf_core_resolve_relo(struct bpf_program *prog,
5984 				 const struct bpf_core_relo *relo,
5985 				 int relo_idx,
5986 				 const struct btf *local_btf,
5987 				 struct hashmap *cand_cache,
5988 				 struct bpf_core_relo_res *targ_res)
5989 {
5990 	struct bpf_core_spec specs_scratch[3] = {};
5991 	struct bpf_core_cand_list *cands = NULL;
5992 	const char *prog_name = prog->name;
5993 	const struct btf_type *local_type;
5994 	const char *local_name;
5995 	__u32 local_id = relo->type_id;
5996 	int err;
5997 
5998 	local_type = btf__type_by_id(local_btf, local_id);
5999 	if (!local_type)
6000 		return -EINVAL;
6001 
6002 	local_name = btf__name_by_offset(local_btf, local_type->name_off);
6003 	if (!local_name)
6004 		return -EINVAL;
6005 
6006 	if (relo->kind != BPF_CORE_TYPE_ID_LOCAL &&
6007 	    !hashmap__find(cand_cache, local_id, &cands)) {
6008 		cands = bpf_core_find_cands(prog->obj, local_btf, local_id);
6009 		if (IS_ERR(cands)) {
6010 			pr_warn("prog '%s': relo #%d: target candidate search failed for [%d] %s %s: %ld\n",
6011 				prog_name, relo_idx, local_id, btf_kind_str(local_type),
6012 				local_name, PTR_ERR(cands));
6013 			return PTR_ERR(cands);
6014 		}
6015 		err = hashmap__set(cand_cache, local_id, cands, NULL, NULL);
6016 		if (err) {
6017 			bpf_core_free_cands(cands);
6018 			return err;
6019 		}
6020 	}
6021 
6022 	return bpf_core_calc_relo_insn(prog_name, relo, relo_idx, local_btf, cands, specs_scratch,
6023 				       targ_res);
6024 }
6025 
6026 static int
6027 bpf_object__relocate_core(struct bpf_object *obj, const char *targ_btf_path)
6028 {
6029 	const struct btf_ext_info_sec *sec;
6030 	struct bpf_core_relo_res targ_res;
6031 	const struct bpf_core_relo *rec;
6032 	const struct btf_ext_info *seg;
6033 	struct hashmap_entry *entry;
6034 	struct hashmap *cand_cache = NULL;
6035 	struct bpf_program *prog;
6036 	struct bpf_insn *insn;
6037 	const char *sec_name;
6038 	int i, err = 0, insn_idx, sec_idx, sec_num;
6039 
6040 	if (obj->btf_ext->core_relo_info.len == 0)
6041 		return 0;
6042 
6043 	if (targ_btf_path) {
6044 		obj->btf_vmlinux_override = btf__parse(targ_btf_path, NULL);
6045 		err = libbpf_get_error(obj->btf_vmlinux_override);
6046 		if (err) {
6047 			pr_warn("failed to parse target BTF: %s\n", errstr(err));
6048 			return err;
6049 		}
6050 	}
6051 
6052 	cand_cache = hashmap__new(bpf_core_hash_fn, bpf_core_equal_fn, NULL);
6053 	if (IS_ERR(cand_cache)) {
6054 		err = PTR_ERR(cand_cache);
6055 		goto out;
6056 	}
6057 
6058 	seg = &obj->btf_ext->core_relo_info;
6059 	sec_num = 0;
6060 	for_each_btf_ext_sec(seg, sec) {
6061 		sec_idx = seg->sec_idxs[sec_num];
6062 		sec_num++;
6063 
6064 		sec_name = btf__name_by_offset(obj->btf, sec->sec_name_off);
6065 		if (str_is_empty(sec_name)) {
6066 			err = -EINVAL;
6067 			goto out;
6068 		}
6069 
6070 		pr_debug("sec '%s': found %d CO-RE relocations\n", sec_name, sec->num_info);
6071 
6072 		for_each_btf_ext_rec(seg, sec, i, rec) {
6073 			if (rec->insn_off % BPF_INSN_SZ)
6074 				return -EINVAL;
6075 			insn_idx = rec->insn_off / BPF_INSN_SZ;
6076 			prog = find_prog_by_sec_insn(obj, sec_idx, insn_idx);
6077 			if (!prog) {
6078 				/* When __weak subprog is "overridden" by another instance
6079 				 * of the subprog from a different object file, linker still
6080 				 * appends all the .BTF.ext info that used to belong to that
6081 				 * eliminated subprogram.
6082 				 * This is similar to what x86-64 linker does for relocations.
6083 				 * So just ignore such relocations just like we ignore
6084 				 * subprog instructions when discovering subprograms.
6085 				 */
6086 				pr_debug("sec '%s': skipping CO-RE relocation #%d for insn #%d belonging to eliminated weak subprogram\n",
6087 					 sec_name, i, insn_idx);
6088 				continue;
6089 			}
6090 			/* no need to apply CO-RE relocation if the program is
6091 			 * not going to be loaded
6092 			 */
6093 			if (!prog->autoload)
6094 				continue;
6095 
6096 			/* adjust insn_idx from section frame of reference to the local
6097 			 * program's frame of reference; (sub-)program code is not yet
6098 			 * relocated, so it's enough to just subtract in-section offset
6099 			 */
6100 			insn_idx = insn_idx - prog->sec_insn_off;
6101 			if (insn_idx >= prog->insns_cnt)
6102 				return -EINVAL;
6103 			insn = &prog->insns[insn_idx];
6104 
6105 			err = record_relo_core(prog, rec, insn_idx);
6106 			if (err) {
6107 				pr_warn("prog '%s': relo #%d: failed to record relocation: %s\n",
6108 					prog->name, i, errstr(err));
6109 				goto out;
6110 			}
6111 
6112 			if (prog->obj->gen_loader)
6113 				continue;
6114 
6115 			err = bpf_core_resolve_relo(prog, rec, i, obj->btf, cand_cache, &targ_res);
6116 			if (err) {
6117 				pr_warn("prog '%s': relo #%d: failed to relocate: %s\n",
6118 					prog->name, i, errstr(err));
6119 				goto out;
6120 			}
6121 
6122 			err = bpf_core_patch_insn(prog->name, insn, insn_idx, rec, i, &targ_res);
6123 			if (err) {
6124 				pr_warn("prog '%s': relo #%d: failed to patch insn #%u: %s\n",
6125 					prog->name, i, insn_idx, errstr(err));
6126 				goto out;
6127 			}
6128 		}
6129 	}
6130 
6131 out:
6132 	/* obj->btf_vmlinux and module BTFs are freed after object load */
6133 	btf__free(obj->btf_vmlinux_override);
6134 	obj->btf_vmlinux_override = NULL;
6135 
6136 	if (!IS_ERR_OR_NULL(cand_cache)) {
6137 		hashmap__for_each_entry(cand_cache, entry, i) {
6138 			bpf_core_free_cands(entry->pvalue);
6139 		}
6140 		hashmap__free(cand_cache);
6141 	}
6142 	return err;
6143 }
6144 
6145 /* base map load ldimm64 special constant, used also for log fixup logic */
6146 #define POISON_LDIMM64_MAP_BASE 2001000000
6147 #define POISON_LDIMM64_MAP_PFX "200100"
6148 
6149 static void poison_map_ldimm64(struct bpf_program *prog, int relo_idx,
6150 			       int insn_idx, struct bpf_insn *insn,
6151 			       int map_idx, const struct bpf_map *map)
6152 {
6153 	int i;
6154 
6155 	pr_debug("prog '%s': relo #%d: poisoning insn #%d that loads map #%d '%s'\n",
6156 		 prog->name, relo_idx, insn_idx, map_idx, map->name);
6157 
6158 	/* we turn single ldimm64 into two identical invalid calls */
6159 	for (i = 0; i < 2; i++) {
6160 		insn->code = BPF_JMP | BPF_CALL;
6161 		insn->dst_reg = 0;
6162 		insn->src_reg = 0;
6163 		insn->off = 0;
6164 		/* if this instruction is reachable (not a dead code),
6165 		 * verifier will complain with something like:
6166 		 * invalid func unknown#2001000123
6167 		 * where lower 123 is map index into obj->maps[] array
6168 		 */
6169 		insn->imm = POISON_LDIMM64_MAP_BASE + map_idx;
6170 
6171 		insn++;
6172 	}
6173 }
6174 
6175 /* unresolved kfunc call special constant, used also for log fixup logic */
6176 #define POISON_CALL_KFUNC_BASE 2002000000
6177 #define POISON_CALL_KFUNC_PFX "2002"
6178 
6179 static void poison_kfunc_call(struct bpf_program *prog, int relo_idx,
6180 			      int insn_idx, struct bpf_insn *insn,
6181 			      int ext_idx, const struct extern_desc *ext)
6182 {
6183 	pr_debug("prog '%s': relo #%d: poisoning insn #%d that calls kfunc '%s'\n",
6184 		 prog->name, relo_idx, insn_idx, ext->name);
6185 
6186 	/* we turn kfunc call into invalid helper call with identifiable constant */
6187 	insn->code = BPF_JMP | BPF_CALL;
6188 	insn->dst_reg = 0;
6189 	insn->src_reg = 0;
6190 	insn->off = 0;
6191 	/* if this instruction is reachable (not a dead code),
6192 	 * verifier will complain with something like:
6193 	 * invalid func unknown#2001000123
6194 	 * where lower 123 is extern index into obj->externs[] array
6195 	 */
6196 	insn->imm = POISON_CALL_KFUNC_BASE + ext_idx;
6197 }
6198 
6199 static int find_jt_map(struct bpf_object *obj, struct bpf_program *prog, unsigned int sym_off)
6200 {
6201 	size_t i;
6202 
6203 	for (i = 0; i < obj->jumptable_map_cnt; i++) {
6204 		/*
6205 		 * This might happen that same offset is used for two different
6206 		 * programs (as jump tables can be the same). However, for
6207 		 * different programs different maps should be created.
6208 		 */
6209 		if (obj->jumptable_maps[i].sym_off == sym_off &&
6210 		    obj->jumptable_maps[i].prog == prog)
6211 			return obj->jumptable_maps[i].fd;
6212 	}
6213 
6214 	return -ENOENT;
6215 }
6216 
6217 static int add_jt_map(struct bpf_object *obj, struct bpf_program *prog, unsigned int sym_off, int map_fd)
6218 {
6219 	size_t cnt = obj->jumptable_map_cnt;
6220 	size_t size = sizeof(obj->jumptable_maps[0]);
6221 	void *tmp;
6222 
6223 	tmp = libbpf_reallocarray(obj->jumptable_maps, cnt + 1, size);
6224 	if (!tmp)
6225 		return -ENOMEM;
6226 
6227 	obj->jumptable_maps = tmp;
6228 	obj->jumptable_maps[cnt].prog = prog;
6229 	obj->jumptable_maps[cnt].sym_off = sym_off;
6230 	obj->jumptable_maps[cnt].fd = map_fd;
6231 	obj->jumptable_map_cnt++;
6232 
6233 	return 0;
6234 }
6235 
6236 static int find_subprog_idx(struct bpf_program *prog, int insn_idx)
6237 {
6238 	int i;
6239 
6240 	for (i = prog->subprog_cnt - 1; i >= 0; i--) {
6241 		if (insn_idx >= prog->subprogs[i].sub_insn_off)
6242 			return i;
6243 	}
6244 
6245 	return -1;
6246 }
6247 
6248 static int create_jt_map(struct bpf_object *obj, struct bpf_program *prog, struct reloc_desc *relo)
6249 {
6250 	const __u32 jt_entry_size = 8;
6251 	unsigned int sym_off = relo->sym_off;
6252 	int jt_size = relo->sym_size;
6253 	__u32 max_entries = jt_size / jt_entry_size;
6254 	__u32 value_size = sizeof(struct bpf_insn_array_value);
6255 	struct bpf_insn_array_value val = {};
6256 	int subprog_idx;
6257 	int map_fd, err;
6258 	__u64 insn_off;
6259 	__u64 *jt;
6260 	__u32 i;
6261 
6262 	map_fd = find_jt_map(obj, prog, sym_off);
6263 	if (map_fd >= 0)
6264 		return map_fd;
6265 
6266 	if (sym_off % jt_entry_size) {
6267 		pr_warn("map '.jumptables': jumptable start %u should be multiple of %u\n",
6268 			sym_off, jt_entry_size);
6269 		return -EINVAL;
6270 	}
6271 
6272 	if (jt_size % jt_entry_size) {
6273 		pr_warn("map '.jumptables': jumptable size %d should be multiple of %u\n",
6274 			jt_size, jt_entry_size);
6275 		return -EINVAL;
6276 	}
6277 
6278 	map_fd = bpf_map_create(BPF_MAP_TYPE_INSN_ARRAY, ".jumptables",
6279 				4, value_size, max_entries, NULL);
6280 	if (map_fd < 0)
6281 		return map_fd;
6282 
6283 	if (!obj->jumptables_data) {
6284 		pr_warn("map '.jumptables': ELF file is missing jump table data\n");
6285 		err = -EINVAL;
6286 		goto err_close;
6287 	}
6288 	if (sym_off + jt_size > obj->jumptables_data_sz) {
6289 		pr_warn("map '.jumptables': jumptables_data size is %zd, trying to access %d\n",
6290 			obj->jumptables_data_sz, sym_off + jt_size);
6291 		err = -EINVAL;
6292 		goto err_close;
6293 	}
6294 
6295 	subprog_idx = -1; /* main program */
6296 	if (relo->insn_idx < 0 || relo->insn_idx >= prog->insns_cnt) {
6297 		pr_warn("map '.jumptables': invalid instruction index %d\n", relo->insn_idx);
6298 		err = -EINVAL;
6299 		goto err_close;
6300 	}
6301 	if (prog->subprogs)
6302 		subprog_idx = find_subprog_idx(prog, relo->insn_idx);
6303 
6304 	jt = (__u64 *)(obj->jumptables_data + sym_off);
6305 	for (i = 0; i < max_entries; i++) {
6306 		/*
6307 		 * The offset should be made to be relative to the beginning of
6308 		 * the main function, not the subfunction.
6309 		 */
6310 		insn_off = jt[i]/sizeof(struct bpf_insn);
6311 		if (subprog_idx >= 0) {
6312 			insn_off -= prog->subprogs[subprog_idx].sec_insn_off;
6313 			insn_off += prog->subprogs[subprog_idx].sub_insn_off;
6314 		} else {
6315 			insn_off -= prog->sec_insn_off;
6316 		}
6317 
6318 		/*
6319 		 * LLVM-generated jump tables contain u64 records, however
6320 		 * should contain values that fit in u32.
6321 		 */
6322 		if (insn_off > UINT32_MAX) {
6323 			pr_warn("map '.jumptables': invalid jump table value 0x%llx at offset %u\n",
6324 				(long long)jt[i], sym_off + i * jt_entry_size);
6325 			err = -EINVAL;
6326 			goto err_close;
6327 		}
6328 
6329 		val.orig_off = insn_off;
6330 		err = bpf_map_update_elem(map_fd, &i, &val, 0);
6331 		if (err)
6332 			goto err_close;
6333 	}
6334 
6335 	err = bpf_map_freeze(map_fd);
6336 	if (err)
6337 		goto err_close;
6338 
6339 	err = add_jt_map(obj, prog, sym_off, map_fd);
6340 	if (err)
6341 		goto err_close;
6342 
6343 	return map_fd;
6344 
6345 err_close:
6346 	close(map_fd);
6347 	return err;
6348 }
6349 
6350 /* Relocate data references within program code:
6351  *  - map references;
6352  *  - global variable references;
6353  *  - extern references.
6354  */
6355 static int
6356 bpf_object__relocate_data(struct bpf_object *obj, struct bpf_program *prog)
6357 {
6358 	int i;
6359 
6360 	for (i = 0; i < prog->nr_reloc; i++) {
6361 		struct reloc_desc *relo = &prog->reloc_desc[i];
6362 		struct bpf_insn *insn = &prog->insns[relo->insn_idx];
6363 		const struct bpf_map *map;
6364 		struct extern_desc *ext;
6365 
6366 		switch (relo->type) {
6367 		case RELO_LD64:
6368 			map = &obj->maps[relo->map_idx];
6369 			if (obj->gen_loader) {
6370 				insn[0].src_reg = BPF_PSEUDO_MAP_IDX;
6371 				insn[0].imm = relo->map_idx;
6372 			} else if (map->autocreate) {
6373 				insn[0].src_reg = BPF_PSEUDO_MAP_FD;
6374 				insn[0].imm = map->fd;
6375 			} else {
6376 				poison_map_ldimm64(prog, i, relo->insn_idx, insn,
6377 						   relo->map_idx, map);
6378 			}
6379 			break;
6380 		case RELO_DATA:
6381 			map = &obj->maps[relo->map_idx];
6382 			insn[1].imm = insn[0].imm + relo->sym_off;
6383 
6384 			if (relo->map_idx == obj->arena_map_idx)
6385 				insn[1].imm += obj->arena_data_off;
6386 
6387 			if (obj->gen_loader) {
6388 				insn[0].src_reg = BPF_PSEUDO_MAP_IDX_VALUE;
6389 				insn[0].imm = relo->map_idx;
6390 			} else if (map->autocreate) {
6391 				insn[0].src_reg = BPF_PSEUDO_MAP_VALUE;
6392 				insn[0].imm = map->fd;
6393 			} else {
6394 				poison_map_ldimm64(prog, i, relo->insn_idx, insn,
6395 						   relo->map_idx, map);
6396 			}
6397 			break;
6398 		case RELO_EXTERN_LD64:
6399 			ext = &obj->externs[relo->ext_idx];
6400 			if (ext->type == EXT_KCFG) {
6401 				if (obj->gen_loader) {
6402 					insn[0].src_reg = BPF_PSEUDO_MAP_IDX_VALUE;
6403 					insn[0].imm = obj->kconfig_map_idx;
6404 				} else {
6405 					insn[0].src_reg = BPF_PSEUDO_MAP_VALUE;
6406 					insn[0].imm = obj->maps[obj->kconfig_map_idx].fd;
6407 				}
6408 				insn[1].imm = ext->kcfg.data_off;
6409 			} else /* EXT_KSYM */ {
6410 				if (ext->ksym.type_id && ext->is_set) { /* typed ksyms */
6411 					insn[0].src_reg = BPF_PSEUDO_BTF_ID;
6412 					insn[0].imm = ext->ksym.kernel_btf_id;
6413 					insn[1].imm = ext->ksym.kernel_btf_obj_fd;
6414 				} else { /* typeless ksyms or unresolved typed ksyms */
6415 					insn[0].imm = (__u32)ext->ksym.addr;
6416 					insn[1].imm = ext->ksym.addr >> 32;
6417 				}
6418 			}
6419 			break;
6420 		case RELO_EXTERN_CALL:
6421 			ext = &obj->externs[relo->ext_idx];
6422 			insn[0].src_reg = BPF_PSEUDO_KFUNC_CALL;
6423 			if (ext->is_set) {
6424 				insn[0].imm = ext->ksym.kernel_btf_id;
6425 				insn[0].off = ext->ksym.btf_fd_idx;
6426 			} else { /* unresolved weak kfunc call */
6427 				poison_kfunc_call(prog, i, relo->insn_idx, insn,
6428 						  relo->ext_idx, ext);
6429 			}
6430 			break;
6431 		case RELO_SUBPROG_ADDR:
6432 			if (insn[0].src_reg != BPF_PSEUDO_FUNC) {
6433 				pr_warn("prog '%s': relo #%d: bad insn\n",
6434 					prog->name, i);
6435 				return -EINVAL;
6436 			}
6437 			/* handled already */
6438 			break;
6439 		case RELO_CALL:
6440 			/* handled already */
6441 			break;
6442 		case RELO_CORE:
6443 			/* will be handled by bpf_program_record_relos() */
6444 			break;
6445 		case RELO_INSN_ARRAY: {
6446 			int map_fd;
6447 
6448 			map_fd = create_jt_map(obj, prog, relo);
6449 			if (map_fd < 0) {
6450 				pr_warn("prog '%s': relo #%d: can't create jump table: sym_off %u\n",
6451 					prog->name, i, relo->sym_off);
6452 				return map_fd;
6453 			}
6454 			insn[0].src_reg = BPF_PSEUDO_MAP_VALUE;
6455 			insn->imm = map_fd;
6456 			insn->off = 0;
6457 		}
6458 			break;
6459 		default:
6460 			pr_warn("prog '%s': relo #%d: bad relo type %d\n",
6461 				prog->name, i, relo->type);
6462 			return -EINVAL;
6463 		}
6464 	}
6465 
6466 	return 0;
6467 }
6468 
6469 static int adjust_prog_btf_ext_info(const struct bpf_object *obj,
6470 				    const struct bpf_program *prog,
6471 				    const struct btf_ext_info *ext_info,
6472 				    void **prog_info, __u32 *prog_rec_cnt,
6473 				    __u32 *prog_rec_sz)
6474 {
6475 	void *copy_start = NULL, *copy_end = NULL;
6476 	void *rec, *rec_end, *new_prog_info;
6477 	const struct btf_ext_info_sec *sec;
6478 	size_t old_sz, new_sz;
6479 	int i, sec_num, sec_idx, off_adj;
6480 
6481 	sec_num = 0;
6482 	for_each_btf_ext_sec(ext_info, sec) {
6483 		sec_idx = ext_info->sec_idxs[sec_num];
6484 		sec_num++;
6485 		if (prog->sec_idx != sec_idx)
6486 			continue;
6487 
6488 		for_each_btf_ext_rec(ext_info, sec, i, rec) {
6489 			__u32 insn_off = *(__u32 *)rec / BPF_INSN_SZ;
6490 
6491 			if (insn_off < prog->sec_insn_off)
6492 				continue;
6493 			if (insn_off >= prog->sec_insn_off + prog->sec_insn_cnt)
6494 				break;
6495 
6496 			if (!copy_start)
6497 				copy_start = rec;
6498 			copy_end = rec + ext_info->rec_size;
6499 		}
6500 
6501 		if (!copy_start)
6502 			return -ENOENT;
6503 
6504 		/* append func/line info of a given (sub-)program to the main
6505 		 * program func/line info
6506 		 */
6507 		old_sz = (size_t)(*prog_rec_cnt) * ext_info->rec_size;
6508 		new_sz = old_sz + (copy_end - copy_start);
6509 		new_prog_info = realloc(*prog_info, new_sz);
6510 		if (!new_prog_info)
6511 			return -ENOMEM;
6512 		*prog_info = new_prog_info;
6513 		*prog_rec_cnt = new_sz / ext_info->rec_size;
6514 		memcpy(new_prog_info + old_sz, copy_start, copy_end - copy_start);
6515 
6516 		/* Kernel instruction offsets are in units of 8-byte
6517 		 * instructions, while .BTF.ext instruction offsets generated
6518 		 * by Clang are in units of bytes. So convert Clang offsets
6519 		 * into kernel offsets and adjust offset according to program
6520 		 * relocated position.
6521 		 */
6522 		off_adj = prog->sub_insn_off - prog->sec_insn_off;
6523 		rec = new_prog_info + old_sz;
6524 		rec_end = new_prog_info + new_sz;
6525 		for (; rec < rec_end; rec += ext_info->rec_size) {
6526 			__u32 *insn_off = rec;
6527 
6528 			*insn_off = *insn_off / BPF_INSN_SZ + off_adj;
6529 		}
6530 		*prog_rec_sz = ext_info->rec_size;
6531 		return 0;
6532 	}
6533 
6534 	return -ENOENT;
6535 }
6536 
6537 static int
6538 reloc_prog_func_and_line_info(const struct bpf_object *obj,
6539 			      struct bpf_program *main_prog,
6540 			      const struct bpf_program *prog)
6541 {
6542 	int err;
6543 
6544 	/* no .BTF.ext relocation if .BTF.ext is missing or kernel doesn't
6545 	 * support func/line info
6546 	 */
6547 	if (!obj->btf_ext || !kernel_supports(obj, FEAT_BTF_FUNC))
6548 		return 0;
6549 
6550 	/* only attempt func info relocation if main program's func_info
6551 	 * relocation was successful
6552 	 */
6553 	if (main_prog != prog && !main_prog->func_info)
6554 		goto line_info;
6555 
6556 	err = adjust_prog_btf_ext_info(obj, prog, &obj->btf_ext->func_info,
6557 				       &main_prog->func_info,
6558 				       &main_prog->func_info_cnt,
6559 				       &main_prog->func_info_rec_size);
6560 	if (err) {
6561 		if (err != -ENOENT) {
6562 			pr_warn("prog '%s': error relocating .BTF.ext function info: %s\n",
6563 				prog->name, errstr(err));
6564 			return err;
6565 		}
6566 		if (main_prog->func_info) {
6567 			/*
6568 			 * Some info has already been found but has problem
6569 			 * in the last btf_ext reloc. Must have to error out.
6570 			 */
6571 			pr_warn("prog '%s': missing .BTF.ext function info.\n", prog->name);
6572 			return err;
6573 		}
6574 		/* Have problem loading the very first info. Ignore the rest. */
6575 		pr_warn("prog '%s': missing .BTF.ext function info for the main program, skipping all of .BTF.ext func info.\n",
6576 			prog->name);
6577 	}
6578 
6579 line_info:
6580 	/* don't relocate line info if main program's relocation failed */
6581 	if (main_prog != prog && !main_prog->line_info)
6582 		return 0;
6583 
6584 	err = adjust_prog_btf_ext_info(obj, prog, &obj->btf_ext->line_info,
6585 				       &main_prog->line_info,
6586 				       &main_prog->line_info_cnt,
6587 				       &main_prog->line_info_rec_size);
6588 	if (err) {
6589 		if (err != -ENOENT) {
6590 			pr_warn("prog '%s': error relocating .BTF.ext line info: %s\n",
6591 				prog->name, errstr(err));
6592 			return err;
6593 		}
6594 		if (main_prog->line_info) {
6595 			/*
6596 			 * Some info has already been found but has problem
6597 			 * in the last btf_ext reloc. Must have to error out.
6598 			 */
6599 			pr_warn("prog '%s': missing .BTF.ext line info.\n", prog->name);
6600 			return err;
6601 		}
6602 		/* Have problem loading the very first info. Ignore the rest. */
6603 		pr_warn("prog '%s': missing .BTF.ext line info for the main program, skipping all of .BTF.ext line info.\n",
6604 			prog->name);
6605 	}
6606 	return 0;
6607 }
6608 
6609 static int cmp_relo_by_insn_idx(const void *key, const void *elem)
6610 {
6611 	size_t insn_idx = *(const size_t *)key;
6612 	const struct reloc_desc *relo = elem;
6613 
6614 	if (insn_idx == relo->insn_idx)
6615 		return 0;
6616 	return insn_idx < relo->insn_idx ? -1 : 1;
6617 }
6618 
6619 static struct reloc_desc *find_prog_insn_relo(const struct bpf_program *prog, size_t insn_idx)
6620 {
6621 	if (!prog->nr_reloc)
6622 		return NULL;
6623 	return bsearch(&insn_idx, prog->reloc_desc, prog->nr_reloc,
6624 		       sizeof(*prog->reloc_desc), cmp_relo_by_insn_idx);
6625 }
6626 
6627 static int append_subprog_relos(struct bpf_program *main_prog, struct bpf_program *subprog)
6628 {
6629 	int new_cnt = main_prog->nr_reloc + subprog->nr_reloc;
6630 	struct reloc_desc *relos;
6631 	int i;
6632 
6633 	if (main_prog == subprog)
6634 		return 0;
6635 	relos = libbpf_reallocarray(main_prog->reloc_desc, new_cnt, sizeof(*relos));
6636 	/* if new count is zero, reallocarray can return a valid NULL result;
6637 	 * in this case the previous pointer will be freed, so we *have to*
6638 	 * reassign old pointer to the new value (even if it's NULL)
6639 	 */
6640 	if (!relos && new_cnt)
6641 		return -ENOMEM;
6642 	if (subprog->nr_reloc)
6643 		memcpy(relos + main_prog->nr_reloc, subprog->reloc_desc,
6644 		       sizeof(*relos) * subprog->nr_reloc);
6645 
6646 	for (i = main_prog->nr_reloc; i < new_cnt; i++)
6647 		relos[i].insn_idx += subprog->sub_insn_off;
6648 	/* After insn_idx adjustment the 'relos' array is still sorted
6649 	 * by insn_idx and doesn't break bsearch.
6650 	 */
6651 	main_prog->reloc_desc = relos;
6652 	main_prog->nr_reloc = new_cnt;
6653 	return 0;
6654 }
6655 
6656 static int save_subprog_offsets(struct bpf_program *main_prog, struct bpf_program *subprog)
6657 {
6658 	size_t size = sizeof(main_prog->subprogs[0]);
6659 	int cnt = main_prog->subprog_cnt;
6660 	void *tmp;
6661 
6662 	tmp = libbpf_reallocarray(main_prog->subprogs, cnt + 1, size);
6663 	if (!tmp)
6664 		return -ENOMEM;
6665 
6666 	main_prog->subprogs = tmp;
6667 	main_prog->subprogs[cnt].sec_insn_off = subprog->sec_insn_off;
6668 	main_prog->subprogs[cnt].sub_insn_off = subprog->sub_insn_off;
6669 	main_prog->subprog_cnt++;
6670 
6671 	return 0;
6672 }
6673 
6674 static int
6675 bpf_object__append_subprog_code(struct bpf_object *obj, struct bpf_program *main_prog,
6676 				struct bpf_program *subprog)
6677 {
6678 	struct bpf_insn *insns;
6679 	size_t new_cnt;
6680 	int err;
6681 
6682 	subprog->sub_insn_off = main_prog->insns_cnt;
6683 
6684 	new_cnt = main_prog->insns_cnt + subprog->insns_cnt;
6685 	insns = libbpf_reallocarray(main_prog->insns, new_cnt, sizeof(*insns));
6686 	if (!insns) {
6687 		pr_warn("prog '%s': failed to realloc prog code\n", main_prog->name);
6688 		return -ENOMEM;
6689 	}
6690 	main_prog->insns = insns;
6691 	main_prog->insns_cnt = new_cnt;
6692 
6693 	memcpy(main_prog->insns + subprog->sub_insn_off, subprog->insns,
6694 	       subprog->insns_cnt * sizeof(*insns));
6695 
6696 	pr_debug("prog '%s': added %zu insns from sub-prog '%s'\n",
6697 		 main_prog->name, subprog->insns_cnt, subprog->name);
6698 
6699 	/* The subprog insns are now appended. Append its relos too. */
6700 	err = append_subprog_relos(main_prog, subprog);
6701 	if (err)
6702 		return err;
6703 
6704 	err = save_subprog_offsets(main_prog, subprog);
6705 	if (err) {
6706 		pr_warn("prog '%s': failed to add subprog offsets: %s\n",
6707 			main_prog->name, errstr(err));
6708 		return err;
6709 	}
6710 
6711 	return 0;
6712 }
6713 
6714 static int
6715 bpf_object__reloc_code(struct bpf_object *obj, struct bpf_program *main_prog,
6716 		       struct bpf_program *prog)
6717 {
6718 	size_t sub_insn_idx, insn_idx;
6719 	struct bpf_program *subprog;
6720 	struct reloc_desc *relo;
6721 	struct bpf_insn *insn;
6722 	int err;
6723 
6724 	err = reloc_prog_func_and_line_info(obj, main_prog, prog);
6725 	if (err)
6726 		return err;
6727 
6728 	for (insn_idx = 0; insn_idx < prog->sec_insn_cnt; insn_idx++) {
6729 		insn = &main_prog->insns[prog->sub_insn_off + insn_idx];
6730 		if (!insn_is_subprog_call(insn) && !insn_is_pseudo_func(insn))
6731 			continue;
6732 
6733 		relo = find_prog_insn_relo(prog, insn_idx);
6734 		if (relo && relo->type == RELO_EXTERN_CALL)
6735 			/* kfunc relocations will be handled later
6736 			 * in bpf_object__relocate_data()
6737 			 */
6738 			continue;
6739 		if (relo && relo->type != RELO_CALL && relo->type != RELO_SUBPROG_ADDR) {
6740 			pr_warn("prog '%s': unexpected relo for insn #%zu, type %d\n",
6741 				prog->name, insn_idx, relo->type);
6742 			return -LIBBPF_ERRNO__RELOC;
6743 		}
6744 		if (relo) {
6745 			/* sub-program instruction index is a combination of
6746 			 * an offset of a symbol pointed to by relocation and
6747 			 * call instruction's imm field; for global functions,
6748 			 * call always has imm = -1, but for static functions
6749 			 * relocation is against STT_SECTION and insn->imm
6750 			 * points to a start of a static function
6751 			 *
6752 			 * for subprog addr relocation, the relo->sym_off + insn->imm is
6753 			 * the byte offset in the corresponding section.
6754 			 */
6755 			if (relo->type == RELO_CALL)
6756 				sub_insn_idx = relo->sym_off / BPF_INSN_SZ + insn->imm + 1;
6757 			else
6758 				sub_insn_idx = (relo->sym_off + insn->imm) / BPF_INSN_SZ;
6759 		} else if (insn_is_pseudo_func(insn)) {
6760 			/*
6761 			 * RELO_SUBPROG_ADDR relo is always emitted even if both
6762 			 * functions are in the same section, so it shouldn't reach here.
6763 			 */
6764 			pr_warn("prog '%s': missing subprog addr relo for insn #%zu\n",
6765 				prog->name, insn_idx);
6766 			return -LIBBPF_ERRNO__RELOC;
6767 		} else {
6768 			/* if subprogram call is to a static function within
6769 			 * the same ELF section, there won't be any relocation
6770 			 * emitted, but it also means there is no additional
6771 			 * offset necessary, insns->imm is relative to
6772 			 * instruction's original position within the section
6773 			 */
6774 			sub_insn_idx = prog->sec_insn_off + insn_idx + insn->imm + 1;
6775 		}
6776 
6777 		/* we enforce that sub-programs should be in .text section */
6778 		subprog = find_prog_by_sec_insn(obj, obj->efile.text_shndx, sub_insn_idx);
6779 		if (!subprog) {
6780 			pr_warn("prog '%s': no .text section found yet sub-program call exists\n",
6781 				prog->name);
6782 			return -LIBBPF_ERRNO__RELOC;
6783 		}
6784 
6785 		/* if it's the first call instruction calling into this
6786 		 * subprogram (meaning this subprog hasn't been processed
6787 		 * yet) within the context of current main program:
6788 		 *   - append it at the end of main program's instructions blog;
6789 		 *   - process is recursively, while current program is put on hold;
6790 		 *   - if that subprogram calls some other not yet processes
6791 		 *   subprogram, same thing will happen recursively until
6792 		 *   there are no more unprocesses subprograms left to append
6793 		 *   and relocate.
6794 		 */
6795 		if (subprog->sub_insn_off == 0) {
6796 			err = bpf_object__append_subprog_code(obj, main_prog, subprog);
6797 			if (err)
6798 				return err;
6799 			err = bpf_object__reloc_code(obj, main_prog, subprog);
6800 			if (err)
6801 				return err;
6802 		}
6803 
6804 		/* main_prog->insns memory could have been re-allocated, so
6805 		 * calculate pointer again
6806 		 */
6807 		insn = &main_prog->insns[prog->sub_insn_off + insn_idx];
6808 		/* calculate correct instruction position within current main
6809 		 * prog; each main prog can have a different set of
6810 		 * subprograms appended (potentially in different order as
6811 		 * well), so position of any subprog can be different for
6812 		 * different main programs
6813 		 */
6814 		insn->imm = subprog->sub_insn_off - (prog->sub_insn_off + insn_idx) - 1;
6815 
6816 		pr_debug("prog '%s': insn #%zu relocated, imm %d points to subprog '%s' (now at %zu offset)\n",
6817 			 prog->name, insn_idx, insn->imm, subprog->name, subprog->sub_insn_off);
6818 	}
6819 
6820 	return 0;
6821 }
6822 
6823 /*
6824  * Relocate sub-program calls.
6825  *
6826  * Algorithm operates as follows. Each entry-point BPF program (referred to as
6827  * main prog) is processed separately. For each subprog (non-entry functions,
6828  * that can be called from either entry progs or other subprogs) gets their
6829  * sub_insn_off reset to zero. This serves as indicator that this subprogram
6830  * hasn't been yet appended and relocated within current main prog. Once its
6831  * relocated, sub_insn_off will point at the position within current main prog
6832  * where given subprog was appended. This will further be used to relocate all
6833  * the call instructions jumping into this subprog.
6834  *
6835  * We start with main program and process all call instructions. If the call
6836  * is into a subprog that hasn't been processed (i.e., subprog->sub_insn_off
6837  * is zero), subprog instructions are appended at the end of main program's
6838  * instruction array. Then main program is "put on hold" while we recursively
6839  * process newly appended subprogram. If that subprogram calls into another
6840  * subprogram that hasn't been appended, new subprogram is appended again to
6841  * the *main* prog's instructions (subprog's instructions are always left
6842  * untouched, as they need to be in unmodified state for subsequent main progs
6843  * and subprog instructions are always sent only as part of a main prog) and
6844  * the process continues recursively. Once all the subprogs called from a main
6845  * prog or any of its subprogs are appended (and relocated), all their
6846  * positions within finalized instructions array are known, so it's easy to
6847  * rewrite call instructions with correct relative offsets, corresponding to
6848  * desired target subprog.
6849  *
6850  * Its important to realize that some subprogs might not be called from some
6851  * main prog and any of its called/used subprogs. Those will keep their
6852  * subprog->sub_insn_off as zero at all times and won't be appended to current
6853  * main prog and won't be relocated within the context of current main prog.
6854  * They might still be used from other main progs later.
6855  *
6856  * Visually this process can be shown as below. Suppose we have two main
6857  * programs mainA and mainB and BPF object contains three subprogs: subA,
6858  * subB, and subC. mainA calls only subA, mainB calls only subC, but subA and
6859  * subC both call subB:
6860  *
6861  *        +--------+ +-------+
6862  *        |        v v       |
6863  *     +--+---+ +--+-+-+ +---+--+
6864  *     | subA | | subB | | subC |
6865  *     +--+---+ +------+ +---+--+
6866  *        ^                  ^
6867  *        |                  |
6868  *    +---+-------+   +------+----+
6869  *    |   mainA   |   |   mainB   |
6870  *    +-----------+   +-----------+
6871  *
6872  * We'll start relocating mainA, will find subA, append it and start
6873  * processing sub A recursively:
6874  *
6875  *    +-----------+------+
6876  *    |   mainA   | subA |
6877  *    +-----------+------+
6878  *
6879  * At this point we notice that subB is used from subA, so we append it and
6880  * relocate (there are no further subcalls from subB):
6881  *
6882  *    +-----------+------+------+
6883  *    |   mainA   | subA | subB |
6884  *    +-----------+------+------+
6885  *
6886  * At this point, we relocate subA calls, then go one level up and finish with
6887  * relocatin mainA calls. mainA is done.
6888  *
6889  * For mainB process is similar but results in different order. We start with
6890  * mainB and skip subA and subB, as mainB never calls them (at least
6891  * directly), but we see subC is needed, so we append and start processing it:
6892  *
6893  *    +-----------+------+
6894  *    |   mainB   | subC |
6895  *    +-----------+------+
6896  * Now we see subC needs subB, so we go back to it, append and relocate it:
6897  *
6898  *    +-----------+------+------+
6899  *    |   mainB   | subC | subB |
6900  *    +-----------+------+------+
6901  *
6902  * At this point we unwind recursion, relocate calls in subC, then in mainB.
6903  */
6904 static int
6905 bpf_object__relocate_calls(struct bpf_object *obj, struct bpf_program *prog)
6906 {
6907 	struct bpf_program *subprog;
6908 	int i, err;
6909 
6910 	/* mark all subprogs as not relocated (yet) within the context of
6911 	 * current main program
6912 	 */
6913 	for (i = 0; i < obj->nr_programs; i++) {
6914 		subprog = &obj->programs[i];
6915 		if (!prog_is_subprog(obj, subprog))
6916 			continue;
6917 
6918 		subprog->sub_insn_off = 0;
6919 	}
6920 
6921 	err = bpf_object__reloc_code(obj, prog, prog);
6922 	if (err)
6923 		return err;
6924 
6925 	return 0;
6926 }
6927 
6928 static void
6929 bpf_object__free_relocs(struct bpf_object *obj)
6930 {
6931 	struct bpf_program *prog;
6932 	int i;
6933 
6934 	/* free up relocation descriptors */
6935 	for (i = 0; i < obj->nr_programs; i++) {
6936 		prog = &obj->programs[i];
6937 		zfree(&prog->reloc_desc);
6938 		prog->nr_reloc = 0;
6939 	}
6940 }
6941 
6942 static int cmp_relocs(const void *_a, const void *_b)
6943 {
6944 	const struct reloc_desc *a = _a;
6945 	const struct reloc_desc *b = _b;
6946 
6947 	if (a->insn_idx != b->insn_idx)
6948 		return a->insn_idx < b->insn_idx ? -1 : 1;
6949 
6950 	/* no two relocations should have the same insn_idx, but ... */
6951 	if (a->type != b->type)
6952 		return a->type < b->type ? -1 : 1;
6953 
6954 	return 0;
6955 }
6956 
6957 static void bpf_object__sort_relos(struct bpf_object *obj)
6958 {
6959 	int i;
6960 
6961 	for (i = 0; i < obj->nr_programs; i++) {
6962 		struct bpf_program *p = &obj->programs[i];
6963 
6964 		if (!p->nr_reloc)
6965 			continue;
6966 
6967 		qsort(p->reloc_desc, p->nr_reloc, sizeof(*p->reloc_desc), cmp_relocs);
6968 	}
6969 }
6970 
6971 static int bpf_prog_assign_exc_cb(struct bpf_object *obj, struct bpf_program *prog)
6972 {
6973 	const char *str = "exception_callback:";
6974 	size_t pfx_len = strlen(str);
6975 	int i, j, n;
6976 
6977 	if (!obj->btf || !kernel_supports(obj, FEAT_BTF_DECL_TAG))
6978 		return 0;
6979 
6980 	n = btf__type_cnt(obj->btf);
6981 	for (i = 1; i < n; i++) {
6982 		const char *name;
6983 		struct btf_type *t;
6984 
6985 		t = btf_type_by_id(obj->btf, i);
6986 		if (!btf_is_decl_tag(t) || btf_decl_tag(t)->component_idx != -1)
6987 			continue;
6988 
6989 		name = btf__str_by_offset(obj->btf, t->name_off);
6990 		if (strncmp(name, str, pfx_len) != 0)
6991 			continue;
6992 
6993 		t = btf_type_by_id(obj->btf, t->type);
6994 		if (!btf_is_func(t) || btf_func_linkage(t) != BTF_FUNC_GLOBAL) {
6995 			pr_warn("prog '%s': exception_callback:<value> decl tag not applied to the main program\n",
6996 				prog->name);
6997 			return -EINVAL;
6998 		}
6999 		if (strcmp(prog->name, btf__str_by_offset(obj->btf, t->name_off)) != 0)
7000 			continue;
7001 		/* Multiple callbacks are specified for the same prog,
7002 		 * the verifier will eventually return an error for this
7003 		 * case, hence simply skip appending a subprog.
7004 		 */
7005 		if (prog->exception_cb_idx >= 0) {
7006 			prog->exception_cb_idx = -1;
7007 			break;
7008 		}
7009 
7010 		name += pfx_len;
7011 		if (str_is_empty(name)) {
7012 			pr_warn("prog '%s': exception_callback:<value> decl tag contains empty value\n",
7013 				prog->name);
7014 			return -EINVAL;
7015 		}
7016 
7017 		for (j = 0; j < obj->nr_programs; j++) {
7018 			struct bpf_program *subprog = &obj->programs[j];
7019 
7020 			if (!prog_is_subprog(obj, subprog))
7021 				continue;
7022 			if (strcmp(name, subprog->name) != 0)
7023 				continue;
7024 			/* Enforce non-hidden, as from verifier point of
7025 			 * view it expects global functions, whereas the
7026 			 * mark_btf_static fixes up linkage as static.
7027 			 */
7028 			if (!subprog->sym_global || subprog->mark_btf_static) {
7029 				pr_warn("prog '%s': exception callback %s must be a global non-hidden function\n",
7030 					prog->name, subprog->name);
7031 				return -EINVAL;
7032 			}
7033 			/* Let's see if we already saw a static exception callback with the same name */
7034 			if (prog->exception_cb_idx >= 0) {
7035 				pr_warn("prog '%s': multiple subprogs with same name as exception callback '%s'\n",
7036 					prog->name, subprog->name);
7037 				return -EINVAL;
7038 			}
7039 			prog->exception_cb_idx = j;
7040 			break;
7041 		}
7042 
7043 		if (prog->exception_cb_idx >= 0)
7044 			continue;
7045 
7046 		pr_warn("prog '%s': cannot find exception callback '%s'\n", prog->name, name);
7047 		return -ENOENT;
7048 	}
7049 
7050 	return 0;
7051 }
7052 
7053 static struct {
7054 	enum bpf_prog_type prog_type;
7055 	const char *ctx_name;
7056 } global_ctx_map[] = {
7057 	{ BPF_PROG_TYPE_CGROUP_DEVICE,           "bpf_cgroup_dev_ctx" },
7058 	{ BPF_PROG_TYPE_CGROUP_SKB,              "__sk_buff" },
7059 	{ BPF_PROG_TYPE_CGROUP_SOCK,             "bpf_sock" },
7060 	{ BPF_PROG_TYPE_CGROUP_SOCK_ADDR,        "bpf_sock_addr" },
7061 	{ BPF_PROG_TYPE_CGROUP_SOCKOPT,          "bpf_sockopt" },
7062 	{ BPF_PROG_TYPE_CGROUP_SYSCTL,           "bpf_sysctl" },
7063 	{ BPF_PROG_TYPE_FLOW_DISSECTOR,          "__sk_buff" },
7064 	{ BPF_PROG_TYPE_KPROBE,                  "bpf_user_pt_regs_t" },
7065 	{ BPF_PROG_TYPE_LWT_IN,                  "__sk_buff" },
7066 	{ BPF_PROG_TYPE_LWT_OUT,                 "__sk_buff" },
7067 	{ BPF_PROG_TYPE_LWT_SEG6LOCAL,           "__sk_buff" },
7068 	{ BPF_PROG_TYPE_LWT_XMIT,                "__sk_buff" },
7069 	{ BPF_PROG_TYPE_NETFILTER,               "bpf_nf_ctx" },
7070 	{ BPF_PROG_TYPE_PERF_EVENT,              "bpf_perf_event_data" },
7071 	{ BPF_PROG_TYPE_RAW_TRACEPOINT,          "bpf_raw_tracepoint_args" },
7072 	{ BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE, "bpf_raw_tracepoint_args" },
7073 	{ BPF_PROG_TYPE_SCHED_ACT,               "__sk_buff" },
7074 	{ BPF_PROG_TYPE_SCHED_CLS,               "__sk_buff" },
7075 	{ BPF_PROG_TYPE_SK_LOOKUP,               "bpf_sk_lookup" },
7076 	{ BPF_PROG_TYPE_SK_MSG,                  "sk_msg_md" },
7077 	{ BPF_PROG_TYPE_SK_REUSEPORT,            "sk_reuseport_md" },
7078 	{ BPF_PROG_TYPE_SK_SKB,                  "__sk_buff" },
7079 	{ BPF_PROG_TYPE_SOCK_OPS,                "bpf_sock_ops" },
7080 	{ BPF_PROG_TYPE_SOCKET_FILTER,           "__sk_buff" },
7081 	{ BPF_PROG_TYPE_XDP,                     "xdp_md" },
7082 	/* all other program types don't have "named" context structs */
7083 };
7084 
7085 /* forward declarations for arch-specific underlying types of bpf_user_pt_regs_t typedef,
7086  * for below __builtin_types_compatible_p() checks;
7087  * with this approach we don't need any extra arch-specific #ifdef guards
7088  */
7089 struct pt_regs;
7090 struct user_pt_regs;
7091 struct user_regs_struct;
7092 
7093 static bool need_func_arg_type_fixup(const struct btf *btf, const struct bpf_program *prog,
7094 				     const char *subprog_name, int arg_idx,
7095 				     int arg_type_id, const char *ctx_name)
7096 {
7097 	const struct btf_type *t;
7098 	const char *tname;
7099 
7100 	/* check if existing parameter already matches verifier expectations */
7101 	t = skip_mods_and_typedefs(btf, arg_type_id, NULL);
7102 	if (!btf_is_ptr(t))
7103 		goto out_warn;
7104 
7105 	/* typedef bpf_user_pt_regs_t is a special PITA case, valid for kprobe
7106 	 * and perf_event programs, so check this case early on and forget
7107 	 * about it for subsequent checks
7108 	 */
7109 	while (btf_is_mod(t))
7110 		t = btf__type_by_id(btf, t->type);
7111 	if (btf_is_typedef(t) &&
7112 	    (prog->type == BPF_PROG_TYPE_KPROBE || prog->type == BPF_PROG_TYPE_PERF_EVENT)) {
7113 		tname = btf__str_by_offset(btf, t->name_off) ?: "<anon>";
7114 		if (strcmp(tname, "bpf_user_pt_regs_t") == 0)
7115 			return false; /* canonical type for kprobe/perf_event */
7116 	}
7117 
7118 	/* now we can ignore typedefs moving forward */
7119 	t = skip_mods_and_typedefs(btf, t->type, NULL);
7120 
7121 	/* if it's `void *`, definitely fix up BTF info */
7122 	if (btf_is_void(t))
7123 		return true;
7124 
7125 	/* if it's already proper canonical type, no need to fix up */
7126 	tname = btf__str_by_offset(btf, t->name_off) ?: "<anon>";
7127 	if (btf_is_struct(t) && strcmp(tname, ctx_name) == 0)
7128 		return false;
7129 
7130 	/* special cases */
7131 	switch (prog->type) {
7132 	case BPF_PROG_TYPE_KPROBE:
7133 		/* `struct pt_regs *` is expected, but we need to fix up */
7134 		if (btf_is_struct(t) && strcmp(tname, "pt_regs") == 0)
7135 			return true;
7136 		break;
7137 	case BPF_PROG_TYPE_PERF_EVENT:
7138 		if (__builtin_types_compatible_p(bpf_user_pt_regs_t, struct pt_regs) &&
7139 		    btf_is_struct(t) && strcmp(tname, "pt_regs") == 0)
7140 			return true;
7141 		if (__builtin_types_compatible_p(bpf_user_pt_regs_t, struct user_pt_regs) &&
7142 		    btf_is_struct(t) && strcmp(tname, "user_pt_regs") == 0)
7143 			return true;
7144 		if (__builtin_types_compatible_p(bpf_user_pt_regs_t, struct user_regs_struct) &&
7145 		    btf_is_struct(t) && strcmp(tname, "user_regs_struct") == 0)
7146 			return true;
7147 		break;
7148 	case BPF_PROG_TYPE_RAW_TRACEPOINT:
7149 	case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE:
7150 		/* allow u64* as ctx */
7151 		if (btf_is_int(t) && t->size == 8)
7152 			return true;
7153 		break;
7154 	default:
7155 		break;
7156 	}
7157 
7158 out_warn:
7159 	pr_warn("prog '%s': subprog '%s' arg#%d is expected to be of `struct %s *` type\n",
7160 		prog->name, subprog_name, arg_idx, ctx_name);
7161 	return false;
7162 }
7163 
7164 static int clone_func_btf_info(struct btf *btf, int orig_fn_id, struct bpf_program *prog)
7165 {
7166 	int fn_id, fn_proto_id, ret_type_id, orig_proto_id;
7167 	int i, err, arg_cnt, fn_name_off, linkage;
7168 	struct btf_type *fn_t, *fn_proto_t, *t;
7169 	struct btf_param *p;
7170 
7171 	/* caller already validated FUNC -> FUNC_PROTO validity */
7172 	fn_t = btf_type_by_id(btf, orig_fn_id);
7173 	fn_proto_t = btf_type_by_id(btf, fn_t->type);
7174 
7175 	/* Note that each btf__add_xxx() operation invalidates
7176 	 * all btf_type and string pointers, so we need to be
7177 	 * very careful when cloning BTF types. BTF type
7178 	 * pointers have to be always refetched. And to avoid
7179 	 * problems with invalidated string pointers, we
7180 	 * add empty strings initially, then just fix up
7181 	 * name_off offsets in place. Offsets are stable for
7182 	 * existing strings, so that works out.
7183 	 */
7184 	fn_name_off = fn_t->name_off; /* we are about to invalidate fn_t */
7185 	linkage = btf_func_linkage(fn_t);
7186 	orig_proto_id = fn_t->type; /* original FUNC_PROTO ID */
7187 	ret_type_id = fn_proto_t->type; /* fn_proto_t will be invalidated */
7188 	arg_cnt = btf_vlen(fn_proto_t);
7189 
7190 	/* clone FUNC_PROTO and its params */
7191 	fn_proto_id = btf__add_func_proto(btf, ret_type_id);
7192 	if (fn_proto_id < 0)
7193 		return -EINVAL;
7194 
7195 	for (i = 0; i < arg_cnt; i++) {
7196 		int name_off;
7197 
7198 		/* copy original parameter data */
7199 		t = btf_type_by_id(btf, orig_proto_id);
7200 		p = &btf_params(t)[i];
7201 		name_off = p->name_off;
7202 
7203 		err = btf__add_func_param(btf, "", p->type);
7204 		if (err)
7205 			return err;
7206 
7207 		fn_proto_t = btf_type_by_id(btf, fn_proto_id);
7208 		p = &btf_params(fn_proto_t)[i];
7209 		p->name_off = name_off; /* use remembered str offset */
7210 	}
7211 
7212 	/* clone FUNC now, btf__add_func() enforces non-empty name, so use
7213 	 * entry program's name as a placeholder, which we replace immediately
7214 	 * with original name_off
7215 	 */
7216 	fn_id = btf__add_func(btf, prog->name, linkage, fn_proto_id);
7217 	if (fn_id < 0)
7218 		return -EINVAL;
7219 
7220 	fn_t = btf_type_by_id(btf, fn_id);
7221 	fn_t->name_off = fn_name_off; /* reuse original string */
7222 
7223 	return fn_id;
7224 }
7225 
7226 /* Check if main program or global subprog's function prototype has `arg:ctx`
7227  * argument tags, and, if necessary, substitute correct type to match what BPF
7228  * verifier would expect, taking into account specific program type. This
7229  * allows to support __arg_ctx tag transparently on old kernels that don't yet
7230  * have a native support for it in the verifier, making user's life much
7231  * easier.
7232  */
7233 static int bpf_program_fixup_func_info(struct bpf_object *obj, struct bpf_program *prog)
7234 {
7235 	const char *ctx_name = NULL, *ctx_tag = "arg:ctx", *fn_name;
7236 	struct bpf_func_info_min *func_rec;
7237 	struct btf_type *fn_t, *fn_proto_t;
7238 	struct btf *btf = obj->btf;
7239 	const struct btf_type *t;
7240 	struct btf_param *p;
7241 	int ptr_id = 0, struct_id, tag_id, orig_fn_id;
7242 	int i, n, arg_idx, arg_cnt, err, rec_idx;
7243 	int *orig_ids;
7244 
7245 	/* no .BTF.ext, no problem */
7246 	if (!obj->btf_ext || !prog->func_info)
7247 		return 0;
7248 
7249 	/* don't do any fix ups if kernel natively supports __arg_ctx */
7250 	if (kernel_supports(obj, FEAT_ARG_CTX_TAG))
7251 		return 0;
7252 
7253 	/* some BPF program types just don't have named context structs, so
7254 	 * this fallback mechanism doesn't work for them
7255 	 */
7256 	for (i = 0; i < ARRAY_SIZE(global_ctx_map); i++) {
7257 		if (global_ctx_map[i].prog_type != prog->type)
7258 			continue;
7259 		ctx_name = global_ctx_map[i].ctx_name;
7260 		break;
7261 	}
7262 	if (!ctx_name)
7263 		return 0;
7264 
7265 	/* remember original func BTF IDs to detect if we already cloned them */
7266 	orig_ids = calloc(prog->func_info_cnt, sizeof(*orig_ids));
7267 	if (!orig_ids)
7268 		return -ENOMEM;
7269 	for (i = 0; i < prog->func_info_cnt; i++) {
7270 		func_rec = prog->func_info + prog->func_info_rec_size * i;
7271 		orig_ids[i] = func_rec->type_id;
7272 	}
7273 
7274 	/* go through each DECL_TAG with "arg:ctx" and see if it points to one
7275 	 * of our subprogs; if yes and subprog is global and needs adjustment,
7276 	 * clone and adjust FUNC -> FUNC_PROTO combo
7277 	 */
7278 	for (i = 1, n = btf__type_cnt(btf); i < n; i++) {
7279 		/* only DECL_TAG with "arg:ctx" value are interesting */
7280 		t = btf__type_by_id(btf, i);
7281 		if (!btf_is_decl_tag(t))
7282 			continue;
7283 		if (strcmp(btf__str_by_offset(btf, t->name_off), ctx_tag) != 0)
7284 			continue;
7285 
7286 		/* only global funcs need adjustment, if at all */
7287 		orig_fn_id = t->type;
7288 		fn_t = btf_type_by_id(btf, orig_fn_id);
7289 		if (!btf_is_func(fn_t) || btf_func_linkage(fn_t) != BTF_FUNC_GLOBAL)
7290 			continue;
7291 
7292 		/* sanity check FUNC -> FUNC_PROTO chain, just in case */
7293 		fn_proto_t = btf_type_by_id(btf, fn_t->type);
7294 		if (!fn_proto_t || !btf_is_func_proto(fn_proto_t))
7295 			continue;
7296 
7297 		/* find corresponding func_info record */
7298 		func_rec = NULL;
7299 		for (rec_idx = 0; rec_idx < prog->func_info_cnt; rec_idx++) {
7300 			if (orig_ids[rec_idx] == t->type) {
7301 				func_rec = prog->func_info + prog->func_info_rec_size * rec_idx;
7302 				break;
7303 			}
7304 		}
7305 		/* current main program doesn't call into this subprog */
7306 		if (!func_rec)
7307 			continue;
7308 
7309 		/* some more sanity checking of DECL_TAG */
7310 		arg_cnt = btf_vlen(fn_proto_t);
7311 		arg_idx = btf_decl_tag(t)->component_idx;
7312 		if (arg_idx < 0 || arg_idx >= arg_cnt)
7313 			continue;
7314 
7315 		/* check if we should fix up argument type */
7316 		p = &btf_params(fn_proto_t)[arg_idx];
7317 		fn_name = btf__str_by_offset(btf, fn_t->name_off) ?: "<anon>";
7318 		if (!need_func_arg_type_fixup(btf, prog, fn_name, arg_idx, p->type, ctx_name))
7319 			continue;
7320 
7321 		/* clone fn/fn_proto, unless we already did it for another arg */
7322 		if (func_rec->type_id == orig_fn_id) {
7323 			int fn_id;
7324 
7325 			fn_id = clone_func_btf_info(btf, orig_fn_id, prog);
7326 			if (fn_id < 0) {
7327 				err = fn_id;
7328 				goto err_out;
7329 			}
7330 
7331 			/* point func_info record to a cloned FUNC type */
7332 			func_rec->type_id = fn_id;
7333 		}
7334 
7335 		/* create PTR -> STRUCT type chain to mark PTR_TO_CTX argument;
7336 		 * we do it just once per main BPF program, as all global
7337 		 * funcs share the same program type, so need only PTR ->
7338 		 * STRUCT type chain
7339 		 */
7340 		if (ptr_id == 0) {
7341 			struct_id = btf__add_struct(btf, ctx_name, 0);
7342 			ptr_id = btf__add_ptr(btf, struct_id);
7343 			if (ptr_id < 0 || struct_id < 0) {
7344 				err = -EINVAL;
7345 				goto err_out;
7346 			}
7347 		}
7348 
7349 		/* for completeness, clone DECL_TAG and point it to cloned param */
7350 		tag_id = btf__add_decl_tag(btf, ctx_tag, func_rec->type_id, arg_idx);
7351 		if (tag_id < 0) {
7352 			err = -EINVAL;
7353 			goto err_out;
7354 		}
7355 
7356 		/* all the BTF manipulations invalidated pointers, refetch them */
7357 		fn_t = btf_type_by_id(btf, func_rec->type_id);
7358 		fn_proto_t = btf_type_by_id(btf, fn_t->type);
7359 
7360 		/* fix up type ID pointed to by param */
7361 		p = &btf_params(fn_proto_t)[arg_idx];
7362 		p->type = ptr_id;
7363 	}
7364 
7365 	free(orig_ids);
7366 	return 0;
7367 err_out:
7368 	free(orig_ids);
7369 	return err;
7370 }
7371 
7372 static int bpf_object__relocate(struct bpf_object *obj, const char *targ_btf_path)
7373 {
7374 	struct bpf_program *prog;
7375 	size_t i, j;
7376 	int err;
7377 
7378 	if (obj->btf_ext) {
7379 		err = bpf_object__relocate_core(obj, targ_btf_path);
7380 		if (err) {
7381 			pr_warn("failed to perform CO-RE relocations: %s\n",
7382 				errstr(err));
7383 			return err;
7384 		}
7385 		bpf_object__sort_relos(obj);
7386 	}
7387 
7388 	/* place globals at the end of the arena (if supported) */
7389 	if (obj->arena_map_idx >= 0 && kernel_supports(obj, FEAT_LDIMM64_FULL_RANGE_OFF)) {
7390 		struct bpf_map *arena_map = &obj->maps[obj->arena_map_idx];
7391 
7392 		obj->arena_data_off = bpf_map_mmap_sz(arena_map) -
7393 				      roundup(obj->arena_data_sz, sysconf(_SC_PAGE_SIZE));
7394 	}
7395 
7396 	/* Before relocating calls pre-process relocations and mark
7397 	 * few ld_imm64 instructions that points to subprogs.
7398 	 * Otherwise bpf_object__reloc_code() later would have to consider
7399 	 * all ld_imm64 insns as relocation candidates. That would
7400 	 * reduce relocation speed, since amount of find_prog_insn_relo()
7401 	 * would increase and most of them will fail to find a relo.
7402 	 */
7403 	for (i = 0; i < obj->nr_programs; i++) {
7404 		prog = &obj->programs[i];
7405 		for (j = 0; j < prog->nr_reloc; j++) {
7406 			struct reloc_desc *relo = &prog->reloc_desc[j];
7407 			struct bpf_insn *insn = &prog->insns[relo->insn_idx];
7408 
7409 			/* mark the insn, so it's recognized by insn_is_pseudo_func() */
7410 			if (relo->type == RELO_SUBPROG_ADDR)
7411 				insn[0].src_reg = BPF_PSEUDO_FUNC;
7412 		}
7413 	}
7414 
7415 	/* relocate subprogram calls and append used subprograms to main
7416 	 * programs; each copy of subprogram code needs to be relocated
7417 	 * differently for each main program, because its code location might
7418 	 * have changed.
7419 	 * Append subprog relos to main programs to allow data relos to be
7420 	 * processed after text is completely relocated.
7421 	 */
7422 	for (i = 0; i < obj->nr_programs; i++) {
7423 		prog = &obj->programs[i];
7424 		/* sub-program's sub-calls are relocated within the context of
7425 		 * its main program only
7426 		 */
7427 		if (prog_is_subprog(obj, prog))
7428 			continue;
7429 		if (!prog->autoload)
7430 			continue;
7431 
7432 		err = bpf_object__relocate_calls(obj, prog);
7433 		if (err) {
7434 			pr_warn("prog '%s': failed to relocate calls: %s\n",
7435 				prog->name, errstr(err));
7436 			return err;
7437 		}
7438 
7439 		err = bpf_prog_assign_exc_cb(obj, prog);
7440 		if (err)
7441 			return err;
7442 		/* Now, also append exception callback if it has not been done already. */
7443 		if (prog->exception_cb_idx >= 0) {
7444 			struct bpf_program *subprog = &obj->programs[prog->exception_cb_idx];
7445 
7446 			/* Calling exception callback directly is disallowed, which the
7447 			 * verifier will reject later. In case it was processed already,
7448 			 * we can skip this step, otherwise for all other valid cases we
7449 			 * have to append exception callback now.
7450 			 */
7451 			if (subprog->sub_insn_off == 0) {
7452 				err = bpf_object__append_subprog_code(obj, prog, subprog);
7453 				if (err)
7454 					return err;
7455 				err = bpf_object__reloc_code(obj, prog, subprog);
7456 				if (err)
7457 					return err;
7458 			}
7459 		}
7460 	}
7461 	for (i = 0; i < obj->nr_programs; i++) {
7462 		prog = &obj->programs[i];
7463 		if (prog_is_subprog(obj, prog))
7464 			continue;
7465 		if (!prog->autoload)
7466 			continue;
7467 
7468 		/* Process data relos for main programs */
7469 		err = bpf_object__relocate_data(obj, prog);
7470 		if (err) {
7471 			pr_warn("prog '%s': failed to relocate data references: %s\n",
7472 				prog->name, errstr(err));
7473 			return err;
7474 		}
7475 
7476 		/* Fix up .BTF.ext information, if necessary */
7477 		err = bpf_program_fixup_func_info(obj, prog);
7478 		if (err) {
7479 			pr_warn("prog '%s': failed to perform .BTF.ext fix ups: %s\n",
7480 				prog->name, errstr(err));
7481 			return err;
7482 		}
7483 	}
7484 
7485 	return 0;
7486 }
7487 
7488 static int bpf_object__collect_st_ops_relos(struct bpf_object *obj,
7489 					    Elf64_Shdr *shdr, Elf_Data *data);
7490 
7491 static int bpf_object__collect_map_relos(struct bpf_object *obj,
7492 					 Elf64_Shdr *shdr, Elf_Data *data)
7493 {
7494 	const int bpf_ptr_sz = 8, host_ptr_sz = sizeof(void *);
7495 	int i, j, nrels, new_sz;
7496 	const struct btf_var_secinfo *vi = NULL;
7497 	const struct btf_type *sec, *var, *def;
7498 	struct bpf_map *map = NULL, *targ_map = NULL;
7499 	struct bpf_program *targ_prog = NULL;
7500 	bool is_prog_array, is_map_in_map;
7501 	const struct btf_member *member;
7502 	const char *name, *mname, *type;
7503 	unsigned int moff;
7504 	Elf64_Sym *sym;
7505 	Elf64_Rel *rel;
7506 	void *tmp;
7507 
7508 	if (!obj->efile.btf_maps_sec_btf_id || !obj->btf)
7509 		return -EINVAL;
7510 	sec = btf__type_by_id(obj->btf, obj->efile.btf_maps_sec_btf_id);
7511 	if (!sec)
7512 		return -EINVAL;
7513 
7514 	nrels = shdr->sh_size / shdr->sh_entsize;
7515 	for (i = 0; i < nrels; i++) {
7516 		rel = elf_rel_by_idx(data, i);
7517 		if (!rel) {
7518 			pr_warn(".maps relo #%d: failed to get ELF relo\n", i);
7519 			return -LIBBPF_ERRNO__FORMAT;
7520 		}
7521 
7522 		sym = elf_sym_by_idx(obj, ELF64_R_SYM(rel->r_info));
7523 		if (!sym) {
7524 			pr_warn(".maps relo #%d: symbol %zx not found\n",
7525 				i, (size_t)ELF64_R_SYM(rel->r_info));
7526 			return -LIBBPF_ERRNO__FORMAT;
7527 		}
7528 		name = elf_sym_str(obj, sym->st_name) ?: "<?>";
7529 
7530 		pr_debug(".maps relo #%d: for %zd value %zd rel->r_offset %zu name %d ('%s')\n",
7531 			 i, (ssize_t)(rel->r_info >> 32), (size_t)sym->st_value,
7532 			 (size_t)rel->r_offset, sym->st_name, name);
7533 
7534 		for (j = 0; j < obj->nr_maps; j++) {
7535 			map = &obj->maps[j];
7536 			if (map->sec_idx != obj->efile.btf_maps_shndx)
7537 				continue;
7538 
7539 			vi = btf_var_secinfos(sec) + map->btf_var_idx;
7540 			if (vi->offset <= rel->r_offset &&
7541 			    rel->r_offset + bpf_ptr_sz <= vi->offset + vi->size)
7542 				break;
7543 		}
7544 		if (j == obj->nr_maps) {
7545 			pr_warn(".maps relo #%d: cannot find map '%s' at rel->r_offset %zu\n",
7546 				i, name, (size_t)rel->r_offset);
7547 			return -EINVAL;
7548 		}
7549 
7550 		is_map_in_map = bpf_map_type__is_map_in_map(map->def.type);
7551 		is_prog_array = map->def.type == BPF_MAP_TYPE_PROG_ARRAY;
7552 		type = is_map_in_map ? "map" : "prog";
7553 		if (is_map_in_map) {
7554 			if (sym->st_shndx != obj->efile.btf_maps_shndx) {
7555 				pr_warn(".maps relo #%d: '%s' isn't a BTF-defined map\n",
7556 					i, name);
7557 				return -LIBBPF_ERRNO__RELOC;
7558 			}
7559 			if (map->def.type == BPF_MAP_TYPE_HASH_OF_MAPS &&
7560 			    map->def.key_size != sizeof(int)) {
7561 				pr_warn(".maps relo #%d: hash-of-maps '%s' should have key size %zu.\n",
7562 					i, map->name, sizeof(int));
7563 				return -EINVAL;
7564 			}
7565 			targ_map = bpf_object__find_map_by_name(obj, name);
7566 			if (!targ_map) {
7567 				pr_warn(".maps relo #%d: '%s' isn't a valid map reference\n",
7568 					i, name);
7569 				return -ESRCH;
7570 			}
7571 		} else if (is_prog_array) {
7572 			targ_prog = bpf_object__find_program_by_name(obj, name);
7573 			if (!targ_prog) {
7574 				pr_warn(".maps relo #%d: '%s' isn't a valid program reference\n",
7575 					i, name);
7576 				return -ESRCH;
7577 			}
7578 			if (targ_prog->sec_idx != sym->st_shndx ||
7579 			    targ_prog->sec_insn_off * 8 != sym->st_value ||
7580 			    prog_is_subprog(obj, targ_prog)) {
7581 				pr_warn(".maps relo #%d: '%s' isn't an entry-point program\n",
7582 					i, name);
7583 				return -LIBBPF_ERRNO__RELOC;
7584 			}
7585 		} else {
7586 			return -EINVAL;
7587 		}
7588 
7589 		var = btf__type_by_id(obj->btf, vi->type);
7590 		def = skip_mods_and_typedefs(obj->btf, var->type, NULL);
7591 		if (btf_vlen(def) == 0)
7592 			return -EINVAL;
7593 		member = btf_members(def) + btf_vlen(def) - 1;
7594 		mname = btf__name_by_offset(obj->btf, member->name_off);
7595 		if (strcmp(mname, "values"))
7596 			return -EINVAL;
7597 
7598 		moff = btf_member_bit_offset(def, btf_vlen(def) - 1) / 8;
7599 		if (rel->r_offset - vi->offset < moff)
7600 			return -EINVAL;
7601 
7602 		moff = rel->r_offset - vi->offset - moff;
7603 		/* here we use BPF pointer size, which is always 64 bit, as we
7604 		 * are parsing ELF that was built for BPF target
7605 		 */
7606 		if (moff % bpf_ptr_sz)
7607 			return -EINVAL;
7608 		moff /= bpf_ptr_sz;
7609 		if (moff >= map->init_slots_sz) {
7610 			new_sz = moff + 1;
7611 			tmp = libbpf_reallocarray(map->init_slots, new_sz, host_ptr_sz);
7612 			if (!tmp)
7613 				return -ENOMEM;
7614 			map->init_slots = tmp;
7615 			memset(map->init_slots + map->init_slots_sz, 0,
7616 			       (new_sz - map->init_slots_sz) * host_ptr_sz);
7617 			map->init_slots_sz = new_sz;
7618 		}
7619 		map->init_slots[moff] = is_map_in_map ? (void *)targ_map : (void *)targ_prog;
7620 
7621 		pr_debug(".maps relo #%d: map '%s' slot [%d] points to %s '%s'\n",
7622 			 i, map->name, moff, type, name);
7623 	}
7624 
7625 	return 0;
7626 }
7627 
7628 static int bpf_object__collect_relos(struct bpf_object *obj)
7629 {
7630 	int i, err;
7631 
7632 	for (i = 0; i < obj->efile.sec_cnt; i++) {
7633 		struct elf_sec_desc *sec_desc = &obj->efile.secs[i];
7634 		Elf64_Shdr *shdr;
7635 		Elf_Data *data;
7636 		int idx;
7637 
7638 		if (sec_desc->sec_type != SEC_RELO)
7639 			continue;
7640 
7641 		shdr = sec_desc->shdr;
7642 		data = sec_desc->data;
7643 		idx = shdr->sh_info;
7644 
7645 		if (shdr->sh_type != SHT_REL || idx < 0 || idx >= obj->efile.sec_cnt) {
7646 			pr_warn("internal error at %d\n", __LINE__);
7647 			return -LIBBPF_ERRNO__INTERNAL;
7648 		}
7649 
7650 		if (obj->efile.secs[idx].sec_type == SEC_ST_OPS)
7651 			err = bpf_object__collect_st_ops_relos(obj, shdr, data);
7652 		else if (idx == obj->efile.btf_maps_shndx)
7653 			err = bpf_object__collect_map_relos(obj, shdr, data);
7654 		else
7655 			err = bpf_object__collect_prog_relos(obj, shdr, data);
7656 		if (err)
7657 			return err;
7658 	}
7659 
7660 	bpf_object__sort_relos(obj);
7661 	return 0;
7662 }
7663 
7664 static bool insn_is_helper_call(struct bpf_insn *insn, enum bpf_func_id *func_id)
7665 {
7666 	if (BPF_CLASS(insn->code) == BPF_JMP &&
7667 	    BPF_OP(insn->code) == BPF_CALL &&
7668 	    BPF_SRC(insn->code) == BPF_K &&
7669 	    insn->src_reg == 0 &&
7670 	    insn->dst_reg == 0) {
7671 		    *func_id = insn->imm;
7672 		    return true;
7673 	}
7674 	return false;
7675 }
7676 
7677 static int bpf_object__sanitize_prog(struct bpf_object *obj, struct bpf_program *prog)
7678 {
7679 	struct bpf_insn *insn = prog->insns;
7680 	enum bpf_func_id func_id;
7681 	int i;
7682 
7683 	if (obj->gen_loader)
7684 		return 0;
7685 
7686 	for (i = 0; i < prog->insns_cnt; i++, insn++) {
7687 		if (!insn_is_helper_call(insn, &func_id))
7688 			continue;
7689 
7690 		/* on kernels that don't yet support
7691 		 * bpf_probe_read_{kernel,user}[_str] helpers, fall back
7692 		 * to bpf_probe_read() which works well for old kernels
7693 		 */
7694 		switch (func_id) {
7695 		case BPF_FUNC_probe_read_kernel:
7696 		case BPF_FUNC_probe_read_user:
7697 			if (!kernel_supports(obj, FEAT_PROBE_READ_KERN))
7698 				insn->imm = BPF_FUNC_probe_read;
7699 			break;
7700 		case BPF_FUNC_probe_read_kernel_str:
7701 		case BPF_FUNC_probe_read_user_str:
7702 			if (!kernel_supports(obj, FEAT_PROBE_READ_KERN))
7703 				insn->imm = BPF_FUNC_probe_read_str;
7704 			break;
7705 		default:
7706 			break;
7707 		}
7708 	}
7709 	return 0;
7710 }
7711 
7712 static int libbpf_find_attach_btf_id(struct bpf_program *prog, const char *attach_name,
7713 				     int *btf_obj_fd, int *btf_type_id);
7714 
7715 /* this is called as prog->sec_def->prog_prepare_load_fn for libbpf-supported sec_defs */
7716 static int libbpf_prepare_prog_load(struct bpf_program *prog,
7717 				    struct bpf_prog_load_opts *opts, long cookie)
7718 {
7719 	enum sec_def_flags def = cookie;
7720 
7721 	/* old kernels might not support specifying expected_attach_type */
7722 	if ((def & SEC_EXP_ATTACH_OPT) && !kernel_supports(prog->obj, FEAT_EXP_ATTACH_TYPE))
7723 		opts->expected_attach_type = 0;
7724 
7725 	if (def & SEC_SLEEPABLE)
7726 		opts->prog_flags |= BPF_F_SLEEPABLE;
7727 
7728 	if (prog->type == BPF_PROG_TYPE_XDP && (def & SEC_XDP_FRAGS))
7729 		opts->prog_flags |= BPF_F_XDP_HAS_FRAGS;
7730 
7731 	/* special check for usdt to use uprobe_multi link */
7732 	if ((def & SEC_USDT) && kernel_supports(prog->obj, FEAT_UPROBE_MULTI_LINK)) {
7733 		/* for BPF_TRACE_UPROBE_MULTI, user might want to query expected_attach_type
7734 		 * in prog, and expected_attach_type we set in kernel is from opts, so we
7735 		 * update both.
7736 		 */
7737 		prog->expected_attach_type = BPF_TRACE_UPROBE_MULTI;
7738 		opts->expected_attach_type = BPF_TRACE_UPROBE_MULTI;
7739 	}
7740 
7741 	if ((def & SEC_ATTACH_BTF) && !prog->attach_btf_id) {
7742 		int btf_obj_fd = 0, btf_type_id = 0, err;
7743 		const char *attach_name;
7744 
7745 		attach_name = strchr(prog->sec_name, '/');
7746 		if (!attach_name) {
7747 			/* if BPF program is annotated with just SEC("fentry")
7748 			 * (or similar) without declaratively specifying
7749 			 * target, then it is expected that target will be
7750 			 * specified with bpf_program__set_attach_target() at
7751 			 * runtime before BPF object load step. If not, then
7752 			 * there is nothing to load into the kernel as BPF
7753 			 * verifier won't be able to validate BPF program
7754 			 * correctness anyways.
7755 			 */
7756 			pr_warn("prog '%s': no BTF-based attach target is specified, use bpf_program__set_attach_target()\n",
7757 				prog->name);
7758 			return -EINVAL;
7759 		}
7760 		attach_name++; /* skip over / */
7761 
7762 		err = libbpf_find_attach_btf_id(prog, attach_name, &btf_obj_fd, &btf_type_id);
7763 		if (err)
7764 			return err;
7765 
7766 		/* cache resolved BTF FD and BTF type ID in the prog */
7767 		prog->attach_btf_obj_fd = btf_obj_fd;
7768 		prog->attach_btf_id = btf_type_id;
7769 
7770 		/* but by now libbpf common logic is not utilizing
7771 		 * prog->atach_btf_obj_fd/prog->attach_btf_id anymore because
7772 		 * this callback is called after opts were populated by
7773 		 * libbpf, so this callback has to update opts explicitly here
7774 		 */
7775 		opts->attach_btf_obj_fd = btf_obj_fd;
7776 		opts->attach_btf_id = btf_type_id;
7777 	}
7778 	return 0;
7779 }
7780 
7781 static void fixup_verifier_log(struct bpf_program *prog, char *buf, size_t buf_sz);
7782 
7783 static int bpf_object_load_prog(struct bpf_object *obj, struct bpf_program *prog,
7784 				struct bpf_insn *insns, int insns_cnt,
7785 				const char *license, __u32 kern_version, int *prog_fd)
7786 {
7787 	LIBBPF_OPTS(bpf_prog_load_opts, load_attr);
7788 	const char *prog_name = NULL;
7789 	size_t log_buf_size = 0;
7790 	char *log_buf = NULL, *tmp;
7791 	bool own_log_buf = true;
7792 	__u32 log_level = prog->log_level;
7793 	int ret, err;
7794 
7795 	/* Be more helpful by rejecting programs that can't be validated early
7796 	 * with more meaningful and actionable error message.
7797 	 */
7798 	switch (prog->type) {
7799 	case BPF_PROG_TYPE_UNSPEC:
7800 		/*
7801 		 * The program type must be set.  Most likely we couldn't find a proper
7802 		 * section definition at load time, and thus we didn't infer the type.
7803 		 */
7804 		pr_warn("prog '%s': missing BPF prog type, check ELF section name '%s'\n",
7805 			prog->name, prog->sec_name);
7806 		return -EINVAL;
7807 	case BPF_PROG_TYPE_STRUCT_OPS:
7808 		if (prog->attach_btf_id == 0) {
7809 			pr_warn("prog '%s': SEC(\"struct_ops\") program isn't referenced anywhere, did you forget to use it?\n",
7810 				prog->name);
7811 			return -EINVAL;
7812 		}
7813 		break;
7814 	default:
7815 		break;
7816 	}
7817 
7818 	if (!insns || !insns_cnt)
7819 		return -EINVAL;
7820 
7821 	if (kernel_supports(obj, FEAT_PROG_NAME))
7822 		prog_name = prog->name;
7823 	load_attr.attach_prog_fd = prog->attach_prog_fd;
7824 	load_attr.attach_btf_obj_fd = prog->attach_btf_obj_fd;
7825 	load_attr.attach_btf_id = prog->attach_btf_id;
7826 	load_attr.kern_version = kern_version;
7827 	load_attr.prog_ifindex = prog->prog_ifindex;
7828 	load_attr.expected_attach_type = prog->expected_attach_type;
7829 
7830 	/* specify func_info/line_info only if kernel supports them */
7831 	if (obj->btf && btf__fd(obj->btf) >= 0 && kernel_supports(obj, FEAT_BTF_FUNC)) {
7832 		load_attr.prog_btf_fd = btf__fd(obj->btf);
7833 		load_attr.func_info = prog->func_info;
7834 		load_attr.func_info_rec_size = prog->func_info_rec_size;
7835 		load_attr.func_info_cnt = prog->func_info_cnt;
7836 		load_attr.line_info = prog->line_info;
7837 		load_attr.line_info_rec_size = prog->line_info_rec_size;
7838 		load_attr.line_info_cnt = prog->line_info_cnt;
7839 	}
7840 	load_attr.log_level = log_level;
7841 	load_attr.prog_flags = prog->prog_flags;
7842 	load_attr.fd_array = obj->fd_array;
7843 
7844 	load_attr.token_fd = obj->token_fd;
7845 	if (obj->token_fd)
7846 		load_attr.prog_flags |= BPF_F_TOKEN_FD;
7847 
7848 	/* adjust load_attr if sec_def provides custom preload callback */
7849 	if (prog->sec_def && prog->sec_def->prog_prepare_load_fn) {
7850 		err = prog->sec_def->prog_prepare_load_fn(prog, &load_attr, prog->sec_def->cookie);
7851 		if (err < 0) {
7852 			pr_warn("prog '%s': failed to prepare load attributes: %s\n",
7853 				prog->name, errstr(err));
7854 			return err;
7855 		}
7856 		insns = prog->insns;
7857 		insns_cnt = prog->insns_cnt;
7858 	}
7859 
7860 	if (obj->gen_loader) {
7861 		bpf_gen__prog_load(obj->gen_loader, prog->type, prog->name,
7862 				   license, insns, insns_cnt, &load_attr,
7863 				   prog - obj->programs);
7864 		*prog_fd = -1;
7865 		return 0;
7866 	}
7867 
7868 retry_load:
7869 	/* if log_level is zero, we don't request logs initially even if
7870 	 * custom log_buf is specified; if the program load fails, then we'll
7871 	 * bump log_level to 1 and use either custom log_buf or we'll allocate
7872 	 * our own and retry the load to get details on what failed
7873 	 */
7874 	if (log_level) {
7875 		if (prog->log_buf) {
7876 			log_buf = prog->log_buf;
7877 			log_buf_size = prog->log_size;
7878 			own_log_buf = false;
7879 		} else if (obj->log_buf) {
7880 			log_buf = obj->log_buf;
7881 			log_buf_size = obj->log_size;
7882 			own_log_buf = false;
7883 		} else {
7884 			log_buf_size = max((size_t)BPF_LOG_BUF_SIZE, log_buf_size * 2);
7885 			tmp = realloc(log_buf, log_buf_size);
7886 			if (!tmp) {
7887 				ret = -ENOMEM;
7888 				goto out;
7889 			}
7890 			log_buf = tmp;
7891 			log_buf[0] = '\0';
7892 			own_log_buf = true;
7893 		}
7894 	}
7895 
7896 	load_attr.log_buf = log_buf;
7897 	load_attr.log_size = log_buf_size;
7898 	load_attr.log_level = log_level;
7899 
7900 	ret = bpf_prog_load(prog->type, prog_name, license, insns, insns_cnt, &load_attr);
7901 	if (ret >= 0) {
7902 		if (log_level && own_log_buf) {
7903 			pr_debug("prog '%s': -- BEGIN PROG LOAD LOG --\n%s-- END PROG LOAD LOG --\n",
7904 				 prog->name, log_buf);
7905 		}
7906 
7907 		if (obj->has_rodata && kernel_supports(obj, FEAT_PROG_BIND_MAP)) {
7908 			struct bpf_map *map;
7909 			int i;
7910 
7911 			for (i = 0; i < obj->nr_maps; i++) {
7912 				map = &prog->obj->maps[i];
7913 				if (map->libbpf_type != LIBBPF_MAP_RODATA)
7914 					continue;
7915 
7916 				if (bpf_prog_bind_map(ret, map->fd, NULL)) {
7917 					pr_warn("prog '%s': failed to bind map '%s': %s\n",
7918 						prog->name, map->real_name, errstr(errno));
7919 					/* Don't fail hard if can't bind rodata. */
7920 				}
7921 			}
7922 		}
7923 
7924 		*prog_fd = ret;
7925 		ret = 0;
7926 		goto out;
7927 	}
7928 
7929 	if (log_level == 0) {
7930 		log_level = 1;
7931 		goto retry_load;
7932 	}
7933 	/* On ENOSPC, increase log buffer size and retry, unless custom
7934 	 * log_buf is specified.
7935 	 * Be careful to not overflow u32, though. Kernel's log buf size limit
7936 	 * isn't part of UAPI so it can always be bumped to full 4GB. So don't
7937 	 * multiply by 2 unless we are sure we'll fit within 32 bits.
7938 	 * Currently, we'll get -EINVAL when we reach (UINT_MAX >> 2).
7939 	 */
7940 	if (own_log_buf && errno == ENOSPC && log_buf_size <= UINT_MAX / 2)
7941 		goto retry_load;
7942 
7943 	ret = -errno;
7944 
7945 	/* post-process verifier log to improve error descriptions */
7946 	fixup_verifier_log(prog, log_buf, log_buf_size);
7947 
7948 	pr_warn("prog '%s': BPF program load failed: %s\n", prog->name, errstr(errno));
7949 	pr_perm_msg(ret);
7950 
7951 	if (own_log_buf && log_buf && log_buf[0] != '\0') {
7952 		pr_warn("prog '%s': -- BEGIN PROG LOAD LOG --\n%s-- END PROG LOAD LOG --\n",
7953 			prog->name, log_buf);
7954 	}
7955 
7956 out:
7957 	if (own_log_buf)
7958 		free(log_buf);
7959 	return ret;
7960 }
7961 
7962 static char *find_prev_line(char *buf, char *cur)
7963 {
7964 	char *p;
7965 
7966 	if (cur == buf) /* end of a log buf */
7967 		return NULL;
7968 
7969 	p = cur - 1;
7970 	while (p - 1 >= buf && *(p - 1) != '\n')
7971 		p--;
7972 
7973 	return p;
7974 }
7975 
7976 static void patch_log(char *buf, size_t buf_sz, size_t log_sz,
7977 		      char *orig, size_t orig_sz, const char *patch)
7978 {
7979 	/* size of the remaining log content to the right from the to-be-replaced part */
7980 	size_t rem_sz = (buf + log_sz) - (orig + orig_sz);
7981 	size_t patch_sz = strlen(patch);
7982 
7983 	if (patch_sz != orig_sz) {
7984 		/* If patch line(s) are longer than original piece of verifier log,
7985 		 * shift log contents by (patch_sz - orig_sz) bytes to the right
7986 		 * starting from after to-be-replaced part of the log.
7987 		 *
7988 		 * If patch line(s) are shorter than original piece of verifier log,
7989 		 * shift log contents by (orig_sz - patch_sz) bytes to the left
7990 		 * starting from after to-be-replaced part of the log
7991 		 *
7992 		 * We need to be careful about not overflowing available
7993 		 * buf_sz capacity. If that's the case, we'll truncate the end
7994 		 * of the original log, as necessary.
7995 		 */
7996 		if (patch_sz > orig_sz) {
7997 			if (orig + patch_sz >= buf + buf_sz) {
7998 				/* patch is big enough to cover remaining space completely */
7999 				patch_sz -= (orig + patch_sz) - (buf + buf_sz) + 1;
8000 				rem_sz = 0;
8001 			} else if (patch_sz - orig_sz > buf_sz - log_sz) {
8002 				/* patch causes part of remaining log to be truncated */
8003 				rem_sz -= (patch_sz - orig_sz) - (buf_sz - log_sz);
8004 			}
8005 		}
8006 		/* shift remaining log to the right by calculated amount */
8007 		memmove(orig + patch_sz, orig + orig_sz, rem_sz);
8008 	}
8009 
8010 	memcpy(orig, patch, patch_sz);
8011 }
8012 
8013 static void fixup_log_failed_core_relo(struct bpf_program *prog,
8014 				       char *buf, size_t buf_sz, size_t log_sz,
8015 				       char *line1, char *line2, char *line3)
8016 {
8017 	/* Expected log for failed and not properly guarded CO-RE relocation:
8018 	 * line1 -> 123: (85) call unknown#195896080
8019 	 * line2 -> invalid func unknown#195896080
8020 	 * line3 -> <anything else or end of buffer>
8021 	 *
8022 	 * "123" is the index of the instruction that was poisoned. We extract
8023 	 * instruction index to find corresponding CO-RE relocation and
8024 	 * replace this part of the log with more relevant information about
8025 	 * failed CO-RE relocation.
8026 	 */
8027 	const struct bpf_core_relo *relo;
8028 	struct bpf_core_spec spec;
8029 	char patch[512], spec_buf[256];
8030 	int insn_idx, err, spec_len;
8031 
8032 	if (sscanf(line1, "%d: (%*d) call unknown#195896080\n", &insn_idx) != 1)
8033 		return;
8034 
8035 	relo = find_relo_core(prog, insn_idx);
8036 	if (!relo)
8037 		return;
8038 
8039 	err = bpf_core_parse_spec(prog->name, prog->obj->btf, relo, &spec);
8040 	if (err)
8041 		return;
8042 
8043 	spec_len = bpf_core_format_spec(spec_buf, sizeof(spec_buf), &spec);
8044 	snprintf(patch, sizeof(patch),
8045 		 "%d: <invalid CO-RE relocation>\n"
8046 		 "failed to resolve CO-RE relocation %s%s\n",
8047 		 insn_idx, spec_buf, spec_len >= sizeof(spec_buf) ? "..." : "");
8048 
8049 	patch_log(buf, buf_sz, log_sz, line1, line3 - line1, patch);
8050 }
8051 
8052 static void fixup_log_missing_map_load(struct bpf_program *prog,
8053 				       char *buf, size_t buf_sz, size_t log_sz,
8054 				       char *line1, char *line2, char *line3)
8055 {
8056 	/* Expected log for failed and not properly guarded map reference:
8057 	 * line1 -> 123: (85) call unknown#2001000345
8058 	 * line2 -> invalid func unknown#2001000345
8059 	 * line3 -> <anything else or end of buffer>
8060 	 *
8061 	 * "123" is the index of the instruction that was poisoned.
8062 	 * "345" in "2001000345" is a map index in obj->maps to fetch map name.
8063 	 */
8064 	struct bpf_object *obj = prog->obj;
8065 	const struct bpf_map *map;
8066 	int insn_idx, map_idx;
8067 	char patch[128];
8068 
8069 	if (sscanf(line1, "%d: (%*d) call unknown#%d\n", &insn_idx, &map_idx) != 2)
8070 		return;
8071 
8072 	map_idx -= POISON_LDIMM64_MAP_BASE;
8073 	if (map_idx < 0 || map_idx >= obj->nr_maps)
8074 		return;
8075 	map = &obj->maps[map_idx];
8076 
8077 	snprintf(patch, sizeof(patch),
8078 		 "%d: <invalid BPF map reference>\n"
8079 		 "BPF map '%s' is referenced but wasn't created\n",
8080 		 insn_idx, map->name);
8081 
8082 	patch_log(buf, buf_sz, log_sz, line1, line3 - line1, patch);
8083 }
8084 
8085 static void fixup_log_missing_kfunc_call(struct bpf_program *prog,
8086 					 char *buf, size_t buf_sz, size_t log_sz,
8087 					 char *line1, char *line2, char *line3)
8088 {
8089 	/* Expected log for failed and not properly guarded kfunc call:
8090 	 * line1 -> 123: (85) call unknown#2002000345
8091 	 * line2 -> invalid func unknown#2002000345
8092 	 * line3 -> <anything else or end of buffer>
8093 	 *
8094 	 * "123" is the index of the instruction that was poisoned.
8095 	 * "345" in "2002000345" is an extern index in obj->externs to fetch kfunc name.
8096 	 */
8097 	struct bpf_object *obj = prog->obj;
8098 	const struct extern_desc *ext;
8099 	int insn_idx, ext_idx;
8100 	char patch[128];
8101 
8102 	if (sscanf(line1, "%d: (%*d) call unknown#%d\n", &insn_idx, &ext_idx) != 2)
8103 		return;
8104 
8105 	ext_idx -= POISON_CALL_KFUNC_BASE;
8106 	if (ext_idx < 0 || ext_idx >= obj->nr_extern)
8107 		return;
8108 	ext = &obj->externs[ext_idx];
8109 
8110 	snprintf(patch, sizeof(patch),
8111 		 "%d: <invalid kfunc call>\n"
8112 		 "kfunc '%s' is referenced but wasn't resolved\n",
8113 		 insn_idx, ext->name);
8114 
8115 	patch_log(buf, buf_sz, log_sz, line1, line3 - line1, patch);
8116 }
8117 
8118 static void fixup_verifier_log(struct bpf_program *prog, char *buf, size_t buf_sz)
8119 {
8120 	/* look for familiar error patterns in last N lines of the log */
8121 	const size_t max_last_line_cnt = 10;
8122 	char *prev_line, *cur_line, *next_line;
8123 	size_t log_sz;
8124 	int i;
8125 
8126 	if (!buf)
8127 		return;
8128 
8129 	log_sz = strlen(buf) + 1;
8130 	next_line = buf + log_sz - 1;
8131 
8132 	for (i = 0; i < max_last_line_cnt; i++, next_line = cur_line) {
8133 		cur_line = find_prev_line(buf, next_line);
8134 		if (!cur_line)
8135 			return;
8136 
8137 		if (str_has_pfx(cur_line, "invalid func unknown#195896080\n")) {
8138 			prev_line = find_prev_line(buf, cur_line);
8139 			if (!prev_line)
8140 				continue;
8141 
8142 			/* failed CO-RE relocation case */
8143 			fixup_log_failed_core_relo(prog, buf, buf_sz, log_sz,
8144 						   prev_line, cur_line, next_line);
8145 			return;
8146 		} else if (str_has_pfx(cur_line, "invalid func unknown#"POISON_LDIMM64_MAP_PFX)) {
8147 			prev_line = find_prev_line(buf, cur_line);
8148 			if (!prev_line)
8149 				continue;
8150 
8151 			/* reference to uncreated BPF map */
8152 			fixup_log_missing_map_load(prog, buf, buf_sz, log_sz,
8153 						   prev_line, cur_line, next_line);
8154 			return;
8155 		} else if (str_has_pfx(cur_line, "invalid func unknown#"POISON_CALL_KFUNC_PFX)) {
8156 			prev_line = find_prev_line(buf, cur_line);
8157 			if (!prev_line)
8158 				continue;
8159 
8160 			/* reference to unresolved kfunc */
8161 			fixup_log_missing_kfunc_call(prog, buf, buf_sz, log_sz,
8162 						     prev_line, cur_line, next_line);
8163 			return;
8164 		}
8165 	}
8166 }
8167 
8168 static int bpf_program_record_relos(struct bpf_program *prog)
8169 {
8170 	struct bpf_object *obj = prog->obj;
8171 	int i;
8172 
8173 	for (i = 0; i < prog->nr_reloc; i++) {
8174 		struct reloc_desc *relo = &prog->reloc_desc[i];
8175 		struct extern_desc *ext = &obj->externs[relo->ext_idx];
8176 		int kind;
8177 
8178 		switch (relo->type) {
8179 		case RELO_EXTERN_LD64:
8180 			if (ext->type != EXT_KSYM)
8181 				continue;
8182 			kind = btf_is_var(btf__type_by_id(obj->btf, ext->btf_id)) ?
8183 				BTF_KIND_VAR : BTF_KIND_FUNC;
8184 			bpf_gen__record_extern(obj->gen_loader, ext->name,
8185 					       ext->is_weak, !ext->ksym.type_id,
8186 					       true, kind, relo->insn_idx);
8187 			break;
8188 		case RELO_EXTERN_CALL:
8189 			bpf_gen__record_extern(obj->gen_loader, ext->name,
8190 					       ext->is_weak, false, false, BTF_KIND_FUNC,
8191 					       relo->insn_idx);
8192 			break;
8193 		case RELO_CORE: {
8194 			struct bpf_core_relo cr = {
8195 				.insn_off = relo->insn_idx * 8,
8196 				.type_id = relo->core_relo->type_id,
8197 				.access_str_off = relo->core_relo->access_str_off,
8198 				.kind = relo->core_relo->kind,
8199 			};
8200 
8201 			bpf_gen__record_relo_core(obj->gen_loader, &cr);
8202 			break;
8203 		}
8204 		default:
8205 			continue;
8206 		}
8207 	}
8208 	return 0;
8209 }
8210 
8211 static int
8212 bpf_object__load_progs(struct bpf_object *obj, int log_level)
8213 {
8214 	struct bpf_program *prog;
8215 	size_t i;
8216 	int err;
8217 
8218 	for (i = 0; i < obj->nr_programs; i++) {
8219 		prog = &obj->programs[i];
8220 		if (prog_is_subprog(obj, prog))
8221 			continue;
8222 		if (!prog->autoload) {
8223 			pr_debug("prog '%s': skipped loading\n", prog->name);
8224 			continue;
8225 		}
8226 		prog->log_level |= log_level;
8227 
8228 		if (obj->gen_loader)
8229 			bpf_program_record_relos(prog);
8230 
8231 		err = bpf_object_load_prog(obj, prog, prog->insns, prog->insns_cnt,
8232 					   obj->license, obj->kern_version, &prog->fd);
8233 		if (err) {
8234 			pr_warn("prog '%s': failed to load: %s\n", prog->name, errstr(err));
8235 			return err;
8236 		}
8237 	}
8238 
8239 	bpf_object__free_relocs(obj);
8240 	return 0;
8241 }
8242 
8243 static int bpf_object_prepare_progs(struct bpf_object *obj)
8244 {
8245 	struct bpf_program *prog;
8246 	size_t i;
8247 	int err;
8248 
8249 	for (i = 0; i < obj->nr_programs; i++) {
8250 		prog = &obj->programs[i];
8251 		err = bpf_object__sanitize_prog(obj, prog);
8252 		if (err)
8253 			return err;
8254 	}
8255 	return 0;
8256 }
8257 
8258 static const struct bpf_sec_def *find_sec_def(const char *sec_name);
8259 
8260 static int bpf_object_init_progs(struct bpf_object *obj, const struct bpf_object_open_opts *opts)
8261 {
8262 	struct bpf_program *prog;
8263 	int err;
8264 
8265 	bpf_object__for_each_program(prog, obj) {
8266 		prog->sec_def = find_sec_def(prog->sec_name);
8267 		if (!prog->sec_def) {
8268 			/* couldn't guess, but user might manually specify */
8269 			pr_debug("prog '%s': unrecognized ELF section name '%s'\n",
8270 				prog->name, prog->sec_name);
8271 			continue;
8272 		}
8273 
8274 		prog->type = prog->sec_def->prog_type;
8275 		prog->expected_attach_type = prog->sec_def->expected_attach_type;
8276 
8277 		/* sec_def can have custom callback which should be called
8278 		 * after bpf_program is initialized to adjust its properties
8279 		 */
8280 		if (prog->sec_def->prog_setup_fn) {
8281 			err = prog->sec_def->prog_setup_fn(prog, prog->sec_def->cookie);
8282 			if (err < 0) {
8283 				pr_warn("prog '%s': failed to initialize: %s\n",
8284 					prog->name, errstr(err));
8285 				return err;
8286 			}
8287 		}
8288 	}
8289 
8290 	return 0;
8291 }
8292 
8293 static struct bpf_object *bpf_object_open(const char *path, const void *obj_buf, size_t obj_buf_sz,
8294 					  const char *obj_name,
8295 					  const struct bpf_object_open_opts *opts)
8296 {
8297 	const char *kconfig, *btf_tmp_path, *token_path;
8298 	struct bpf_object *obj;
8299 	int err;
8300 	char *log_buf;
8301 	size_t log_size;
8302 	__u32 log_level;
8303 
8304 	if (obj_buf && !obj_name)
8305 		return ERR_PTR(-EINVAL);
8306 
8307 	if (elf_version(EV_CURRENT) == EV_NONE) {
8308 		pr_warn("failed to init libelf for %s\n",
8309 			path ? : "(mem buf)");
8310 		return ERR_PTR(-LIBBPF_ERRNO__LIBELF);
8311 	}
8312 
8313 	if (!OPTS_VALID(opts, bpf_object_open_opts))
8314 		return ERR_PTR(-EINVAL);
8315 
8316 	obj_name = OPTS_GET(opts, object_name, NULL) ?: obj_name;
8317 	if (obj_buf) {
8318 		path = obj_name;
8319 		pr_debug("loading object '%s' from buffer\n", obj_name);
8320 	} else {
8321 		pr_debug("loading object from %s\n", path);
8322 	}
8323 
8324 	log_buf = OPTS_GET(opts, kernel_log_buf, NULL);
8325 	log_size = OPTS_GET(opts, kernel_log_size, 0);
8326 	log_level = OPTS_GET(opts, kernel_log_level, 0);
8327 	if (log_size > UINT_MAX)
8328 		return ERR_PTR(-EINVAL);
8329 	if (log_size && !log_buf)
8330 		return ERR_PTR(-EINVAL);
8331 
8332 	token_path = OPTS_GET(opts, bpf_token_path, NULL);
8333 	/* if user didn't specify bpf_token_path explicitly, check if
8334 	 * LIBBPF_BPF_TOKEN_PATH envvar was set and treat it as bpf_token_path
8335 	 * option
8336 	 */
8337 	if (!token_path)
8338 		token_path = getenv("LIBBPF_BPF_TOKEN_PATH");
8339 	if (token_path && strlen(token_path) >= PATH_MAX)
8340 		return ERR_PTR(-ENAMETOOLONG);
8341 
8342 	obj = bpf_object__new(path, obj_buf, obj_buf_sz, obj_name);
8343 	if (IS_ERR(obj))
8344 		return obj;
8345 
8346 	obj->log_buf = log_buf;
8347 	obj->log_size = log_size;
8348 	obj->log_level = log_level;
8349 
8350 	if (token_path) {
8351 		obj->token_path = strdup(token_path);
8352 		if (!obj->token_path) {
8353 			err = -ENOMEM;
8354 			goto out;
8355 		}
8356 	}
8357 
8358 	btf_tmp_path = OPTS_GET(opts, btf_custom_path, NULL);
8359 	if (btf_tmp_path) {
8360 		if (strlen(btf_tmp_path) >= PATH_MAX) {
8361 			err = -ENAMETOOLONG;
8362 			goto out;
8363 		}
8364 		obj->btf_custom_path = strdup(btf_tmp_path);
8365 		if (!obj->btf_custom_path) {
8366 			err = -ENOMEM;
8367 			goto out;
8368 		}
8369 	}
8370 
8371 	kconfig = OPTS_GET(opts, kconfig, NULL);
8372 	if (kconfig) {
8373 		obj->kconfig = strdup(kconfig);
8374 		if (!obj->kconfig) {
8375 			err = -ENOMEM;
8376 			goto out;
8377 		}
8378 	}
8379 
8380 	err = bpf_object__elf_init(obj);
8381 	err = err ? : bpf_object__elf_collect(obj);
8382 	err = err ? : bpf_object__collect_externs(obj);
8383 	err = err ? : bpf_object_fixup_btf(obj);
8384 	err = err ? : bpf_object__init_maps(obj, opts);
8385 	err = err ? : bpf_object_init_progs(obj, opts);
8386 	err = err ? : bpf_object__collect_relos(obj);
8387 	if (err)
8388 		goto out;
8389 
8390 	bpf_object__elf_finish(obj);
8391 
8392 	return obj;
8393 out:
8394 	bpf_object__close(obj);
8395 	return ERR_PTR(err);
8396 }
8397 
8398 struct bpf_object *
8399 bpf_object__open_file(const char *path, const struct bpf_object_open_opts *opts)
8400 {
8401 	if (!path)
8402 		return libbpf_err_ptr(-EINVAL);
8403 
8404 	return libbpf_ptr(bpf_object_open(path, NULL, 0, NULL, opts));
8405 }
8406 
8407 struct bpf_object *bpf_object__open(const char *path)
8408 {
8409 	return bpf_object__open_file(path, NULL);
8410 }
8411 
8412 struct bpf_object *
8413 bpf_object__open_mem(const void *obj_buf, size_t obj_buf_sz,
8414 		     const struct bpf_object_open_opts *opts)
8415 {
8416 	char tmp_name[64];
8417 
8418 	if (!obj_buf || obj_buf_sz == 0)
8419 		return libbpf_err_ptr(-EINVAL);
8420 
8421 	/* create a (quite useless) default "name" for this memory buffer object */
8422 	snprintf(tmp_name, sizeof(tmp_name), "%lx-%zx", (unsigned long)obj_buf, obj_buf_sz);
8423 
8424 	return libbpf_ptr(bpf_object_open(NULL, obj_buf, obj_buf_sz, tmp_name, opts));
8425 }
8426 
8427 static int bpf_object_unload(struct bpf_object *obj)
8428 {
8429 	size_t i;
8430 
8431 	if (!obj)
8432 		return libbpf_err(-EINVAL);
8433 
8434 	for (i = 0; i < obj->nr_maps; i++) {
8435 		zclose(obj->maps[i].fd);
8436 		if (obj->maps[i].st_ops)
8437 			zfree(&obj->maps[i].st_ops->kern_vdata);
8438 	}
8439 
8440 	for (i = 0; i < obj->nr_programs; i++)
8441 		bpf_program__unload(&obj->programs[i]);
8442 
8443 	return 0;
8444 }
8445 
8446 static int bpf_object__sanitize_maps(struct bpf_object *obj)
8447 {
8448 	struct bpf_map *m;
8449 
8450 	bpf_object__for_each_map(m, obj) {
8451 		if (!bpf_map__is_internal(m))
8452 			continue;
8453 		if (!kernel_supports(obj, FEAT_ARRAY_MMAP))
8454 			m->def.map_flags &= ~BPF_F_MMAPABLE;
8455 	}
8456 
8457 	return 0;
8458 }
8459 
8460 typedef int (*kallsyms_cb_t)(unsigned long long sym_addr, char sym_type,
8461 			     const char *sym_name, void *ctx);
8462 
8463 static int libbpf_kallsyms_parse(kallsyms_cb_t cb, void *ctx)
8464 {
8465 	char sym_type, sym_name[500];
8466 	unsigned long long sym_addr;
8467 	int ret, err = 0;
8468 	FILE *f;
8469 
8470 	f = fopen("/proc/kallsyms", "re");
8471 	if (!f) {
8472 		err = -errno;
8473 		pr_warn("failed to open /proc/kallsyms: %s\n", errstr(err));
8474 		return err;
8475 	}
8476 
8477 	while (true) {
8478 		ret = fscanf(f, "%llx %c %499s%*[^\n]\n",
8479 			     &sym_addr, &sym_type, sym_name);
8480 		if (ret == EOF && feof(f))
8481 			break;
8482 		if (ret != 3) {
8483 			pr_warn("failed to read kallsyms entry: %d\n", ret);
8484 			err = -EINVAL;
8485 			break;
8486 		}
8487 
8488 		err = cb(sym_addr, sym_type, sym_name, ctx);
8489 		if (err)
8490 			break;
8491 	}
8492 
8493 	fclose(f);
8494 	return err;
8495 }
8496 
8497 static int kallsyms_cb(unsigned long long sym_addr, char sym_type,
8498 		       const char *sym_name, void *ctx)
8499 {
8500 	struct bpf_object *obj = ctx;
8501 	const struct btf_type *t;
8502 	struct extern_desc *ext;
8503 	const char *res;
8504 
8505 	res = strstr(sym_name, ".llvm.");
8506 	if (sym_type == 'd' && res)
8507 		ext = find_extern_by_name_with_len(obj, sym_name, res - sym_name);
8508 	else
8509 		ext = find_extern_by_name(obj, sym_name);
8510 	if (!ext || ext->type != EXT_KSYM)
8511 		return 0;
8512 
8513 	t = btf__type_by_id(obj->btf, ext->btf_id);
8514 	if (!btf_is_var(t))
8515 		return 0;
8516 
8517 	if (ext->is_set && ext->ksym.addr != sym_addr) {
8518 		pr_warn("extern (ksym) '%s': resolution is ambiguous: 0x%llx or 0x%llx\n",
8519 			sym_name, ext->ksym.addr, sym_addr);
8520 		return -EINVAL;
8521 	}
8522 	if (!ext->is_set) {
8523 		ext->is_set = true;
8524 		ext->ksym.addr = sym_addr;
8525 		pr_debug("extern (ksym) '%s': set to 0x%llx\n", sym_name, sym_addr);
8526 	}
8527 	return 0;
8528 }
8529 
8530 static int bpf_object__read_kallsyms_file(struct bpf_object *obj)
8531 {
8532 	return libbpf_kallsyms_parse(kallsyms_cb, obj);
8533 }
8534 
8535 static int find_ksym_btf_id(struct bpf_object *obj, const char *ksym_name,
8536 			    __u16 kind, struct btf **res_btf,
8537 			    struct module_btf **res_mod_btf)
8538 {
8539 	struct module_btf *mod_btf;
8540 	struct btf *btf;
8541 	int i, id, err;
8542 
8543 	btf = obj->btf_vmlinux;
8544 	mod_btf = NULL;
8545 	id = btf__find_by_name_kind(btf, ksym_name, kind);
8546 
8547 	if (id == -ENOENT) {
8548 		err = load_module_btfs(obj);
8549 		if (err)
8550 			return err;
8551 
8552 		for (i = 0; i < obj->btf_module_cnt; i++) {
8553 			/* we assume module_btf's BTF FD is always >0 */
8554 			mod_btf = &obj->btf_modules[i];
8555 			btf = mod_btf->btf;
8556 			id = btf__find_by_name_kind_own(btf, ksym_name, kind);
8557 			if (id != -ENOENT)
8558 				break;
8559 		}
8560 	}
8561 	if (id <= 0)
8562 		return -ESRCH;
8563 
8564 	*res_btf = btf;
8565 	*res_mod_btf = mod_btf;
8566 	return id;
8567 }
8568 
8569 static int bpf_object__resolve_ksym_var_btf_id(struct bpf_object *obj,
8570 					       struct extern_desc *ext)
8571 {
8572 	const struct btf_type *targ_var, *targ_type;
8573 	__u32 targ_type_id, local_type_id;
8574 	struct module_btf *mod_btf = NULL;
8575 	const char *targ_var_name;
8576 	struct btf *btf = NULL;
8577 	int id, err;
8578 
8579 	id = find_ksym_btf_id(obj, ext->name, BTF_KIND_VAR, &btf, &mod_btf);
8580 	if (id < 0) {
8581 		if (id == -ESRCH && ext->is_weak)
8582 			return 0;
8583 		pr_warn("extern (var ksym) '%s': not found in kernel BTF\n",
8584 			ext->name);
8585 		return id;
8586 	}
8587 
8588 	/* find local type_id */
8589 	local_type_id = ext->ksym.type_id;
8590 
8591 	/* find target type_id */
8592 	targ_var = btf__type_by_id(btf, id);
8593 	targ_var_name = btf__name_by_offset(btf, targ_var->name_off);
8594 	targ_type = skip_mods_and_typedefs(btf, targ_var->type, &targ_type_id);
8595 
8596 	err = bpf_core_types_are_compat(obj->btf, local_type_id,
8597 					btf, targ_type_id);
8598 	if (err <= 0) {
8599 		const struct btf_type *local_type;
8600 		const char *targ_name, *local_name;
8601 
8602 		local_type = btf__type_by_id(obj->btf, local_type_id);
8603 		local_name = btf__name_by_offset(obj->btf, local_type->name_off);
8604 		targ_name = btf__name_by_offset(btf, targ_type->name_off);
8605 
8606 		pr_warn("extern (var ksym) '%s': incompatible types, expected [%d] %s %s, but kernel has [%d] %s %s\n",
8607 			ext->name, local_type_id,
8608 			btf_kind_str(local_type), local_name, targ_type_id,
8609 			btf_kind_str(targ_type), targ_name);
8610 		return -EINVAL;
8611 	}
8612 
8613 	ext->is_set = true;
8614 	ext->ksym.kernel_btf_obj_fd = mod_btf ? mod_btf->fd : 0;
8615 	ext->ksym.kernel_btf_id = id;
8616 	pr_debug("extern (var ksym) '%s': resolved to [%d] %s %s\n",
8617 		 ext->name, id, btf_kind_str(targ_var), targ_var_name);
8618 
8619 	return 0;
8620 }
8621 
8622 static int bpf_object__resolve_ksym_func_btf_id(struct bpf_object *obj,
8623 						struct extern_desc *ext)
8624 {
8625 	int local_func_proto_id, kfunc_proto_id, kfunc_id;
8626 	struct module_btf *mod_btf = NULL;
8627 	const struct btf_type *kern_func;
8628 	struct btf *kern_btf = NULL;
8629 	int ret;
8630 
8631 	local_func_proto_id = ext->ksym.type_id;
8632 
8633 	kfunc_id = find_ksym_btf_id(obj, ext->essent_name ?: ext->name, BTF_KIND_FUNC, &kern_btf,
8634 				    &mod_btf);
8635 	if (kfunc_id < 0) {
8636 		if (kfunc_id == -ESRCH && ext->is_weak)
8637 			return 0;
8638 		pr_warn("extern (func ksym) '%s': not found in kernel or module BTFs\n",
8639 			ext->name);
8640 		return kfunc_id;
8641 	}
8642 
8643 	kern_func = btf__type_by_id(kern_btf, kfunc_id);
8644 	kfunc_proto_id = kern_func->type;
8645 
8646 	ret = bpf_core_types_are_compat(obj->btf, local_func_proto_id,
8647 					kern_btf, kfunc_proto_id);
8648 	if (ret <= 0) {
8649 		if (ext->is_weak)
8650 			return 0;
8651 
8652 		pr_warn("extern (func ksym) '%s': func_proto [%d] incompatible with %s [%d]\n",
8653 			ext->name, local_func_proto_id,
8654 			mod_btf ? mod_btf->name : "vmlinux", kfunc_proto_id);
8655 		return -EINVAL;
8656 	}
8657 
8658 	/* set index for module BTF fd in fd_array, if unset */
8659 	if (mod_btf && !mod_btf->fd_array_idx) {
8660 		/* insn->off is s16 */
8661 		if (obj->fd_array_cnt == INT16_MAX) {
8662 			pr_warn("extern (func ksym) '%s': module BTF fd index %d too big to fit in bpf_insn offset\n",
8663 				ext->name, mod_btf->fd_array_idx);
8664 			return -E2BIG;
8665 		}
8666 		/* Cannot use index 0 for module BTF fd */
8667 		if (!obj->fd_array_cnt)
8668 			obj->fd_array_cnt = 1;
8669 
8670 		ret = libbpf_ensure_mem((void **)&obj->fd_array, &obj->fd_array_cap, sizeof(int),
8671 					obj->fd_array_cnt + 1);
8672 		if (ret)
8673 			return ret;
8674 		mod_btf->fd_array_idx = obj->fd_array_cnt;
8675 		/* we assume module BTF FD is always >0 */
8676 		obj->fd_array[obj->fd_array_cnt++] = mod_btf->fd;
8677 	}
8678 
8679 	ext->is_set = true;
8680 	ext->ksym.kernel_btf_id = kfunc_id;
8681 	ext->ksym.btf_fd_idx = mod_btf ? mod_btf->fd_array_idx : 0;
8682 	/* Also set kernel_btf_obj_fd to make sure that bpf_object__relocate_data()
8683 	 * populates FD into ld_imm64 insn when it's used to point to kfunc.
8684 	 * {kernel_btf_id, btf_fd_idx} -> fixup bpf_call.
8685 	 * {kernel_btf_id, kernel_btf_obj_fd} -> fixup ld_imm64.
8686 	 */
8687 	ext->ksym.kernel_btf_obj_fd = mod_btf ? mod_btf->fd : 0;
8688 	pr_debug("extern (func ksym) '%s': resolved to %s [%d]\n",
8689 		 ext->name, mod_btf ? mod_btf->name : "vmlinux", kfunc_id);
8690 
8691 	return 0;
8692 }
8693 
8694 static int bpf_object__resolve_ksyms_btf_id(struct bpf_object *obj)
8695 {
8696 	const struct btf_type *t;
8697 	struct extern_desc *ext;
8698 	int i, err;
8699 
8700 	for (i = 0; i < obj->nr_extern; i++) {
8701 		ext = &obj->externs[i];
8702 		if (ext->type != EXT_KSYM || !ext->ksym.type_id)
8703 			continue;
8704 
8705 		if (obj->gen_loader) {
8706 			ext->is_set = true;
8707 			ext->ksym.kernel_btf_obj_fd = 0;
8708 			ext->ksym.kernel_btf_id = 0;
8709 			continue;
8710 		}
8711 		t = btf__type_by_id(obj->btf, ext->btf_id);
8712 		if (btf_is_var(t))
8713 			err = bpf_object__resolve_ksym_var_btf_id(obj, ext);
8714 		else
8715 			err = bpf_object__resolve_ksym_func_btf_id(obj, ext);
8716 		if (err)
8717 			return err;
8718 	}
8719 	return 0;
8720 }
8721 
8722 static int bpf_object__resolve_externs(struct bpf_object *obj,
8723 				       const char *extra_kconfig)
8724 {
8725 	bool need_config = false, need_kallsyms = false;
8726 	bool need_vmlinux_btf = false;
8727 	struct extern_desc *ext;
8728 	void *kcfg_data = NULL;
8729 	int err, i;
8730 
8731 	if (obj->nr_extern == 0)
8732 		return 0;
8733 
8734 	if (obj->kconfig_map_idx >= 0)
8735 		kcfg_data = obj->maps[obj->kconfig_map_idx].mmaped;
8736 
8737 	for (i = 0; i < obj->nr_extern; i++) {
8738 		ext = &obj->externs[i];
8739 
8740 		if (ext->type == EXT_KSYM) {
8741 			if (ext->ksym.type_id)
8742 				need_vmlinux_btf = true;
8743 			else
8744 				need_kallsyms = true;
8745 			continue;
8746 		} else if (ext->type == EXT_KCFG) {
8747 			void *ext_ptr = kcfg_data + ext->kcfg.data_off;
8748 			__u64 value = 0;
8749 
8750 			/* Kconfig externs need actual /proc/config.gz */
8751 			if (str_has_pfx(ext->name, "CONFIG_")) {
8752 				need_config = true;
8753 				continue;
8754 			}
8755 
8756 			/* Virtual kcfg externs are customly handled by libbpf */
8757 			if (strcmp(ext->name, "LINUX_KERNEL_VERSION") == 0) {
8758 				value = get_kernel_version();
8759 				if (!value) {
8760 					pr_warn("extern (kcfg) '%s': failed to get kernel version\n", ext->name);
8761 					return -EINVAL;
8762 				}
8763 			} else if (strcmp(ext->name, "LINUX_HAS_BPF_COOKIE") == 0) {
8764 				value = kernel_supports(obj, FEAT_BPF_COOKIE);
8765 			} else if (strcmp(ext->name, "LINUX_HAS_SYSCALL_WRAPPER") == 0) {
8766 				value = kernel_supports(obj, FEAT_SYSCALL_WRAPPER);
8767 			} else if (!str_has_pfx(ext->name, "LINUX_") || !ext->is_weak) {
8768 				/* Currently libbpf supports only CONFIG_ and LINUX_ prefixed
8769 				 * __kconfig externs, where LINUX_ ones are virtual and filled out
8770 				 * customly by libbpf (their values don't come from Kconfig).
8771 				 * If LINUX_xxx variable is not recognized by libbpf, but is marked
8772 				 * __weak, it defaults to zero value, just like for CONFIG_xxx
8773 				 * externs.
8774 				 */
8775 				pr_warn("extern (kcfg) '%s': unrecognized virtual extern\n", ext->name);
8776 				return -EINVAL;
8777 			}
8778 
8779 			err = set_kcfg_value_num(ext, ext_ptr, value);
8780 			if (err)
8781 				return err;
8782 			pr_debug("extern (kcfg) '%s': set to 0x%llx\n",
8783 				 ext->name, (long long)value);
8784 		} else {
8785 			pr_warn("extern '%s': unrecognized extern kind\n", ext->name);
8786 			return -EINVAL;
8787 		}
8788 	}
8789 	if (need_config && extra_kconfig) {
8790 		err = bpf_object__read_kconfig_mem(obj, extra_kconfig, kcfg_data);
8791 		if (err)
8792 			return -EINVAL;
8793 		need_config = false;
8794 		for (i = 0; i < obj->nr_extern; i++) {
8795 			ext = &obj->externs[i];
8796 			if (ext->type == EXT_KCFG && !ext->is_set) {
8797 				need_config = true;
8798 				break;
8799 			}
8800 		}
8801 	}
8802 	if (need_config) {
8803 		err = bpf_object__read_kconfig_file(obj, kcfg_data);
8804 		if (err)
8805 			return -EINVAL;
8806 	}
8807 	if (need_kallsyms) {
8808 		err = bpf_object__read_kallsyms_file(obj);
8809 		if (err)
8810 			return -EINVAL;
8811 	}
8812 	if (need_vmlinux_btf) {
8813 		err = bpf_object__resolve_ksyms_btf_id(obj);
8814 		if (err)
8815 			return -EINVAL;
8816 	}
8817 	for (i = 0; i < obj->nr_extern; i++) {
8818 		ext = &obj->externs[i];
8819 
8820 		if (!ext->is_set && !ext->is_weak) {
8821 			pr_warn("extern '%s' (strong): not resolved\n", ext->name);
8822 			return -ESRCH;
8823 		} else if (!ext->is_set) {
8824 			pr_debug("extern '%s' (weak): not resolved, defaulting to zero\n",
8825 				 ext->name);
8826 		}
8827 	}
8828 
8829 	return 0;
8830 }
8831 
8832 static void bpf_map_prepare_vdata(const struct bpf_map *map)
8833 {
8834 	const struct btf_type *type;
8835 	struct bpf_struct_ops *st_ops;
8836 	__u32 i;
8837 
8838 	st_ops = map->st_ops;
8839 	type = btf__type_by_id(map->obj->btf, st_ops->type_id);
8840 	for (i = 0; i < btf_vlen(type); i++) {
8841 		struct bpf_program *prog = st_ops->progs[i];
8842 		void *kern_data;
8843 		int prog_fd;
8844 
8845 		if (!prog)
8846 			continue;
8847 
8848 		prog_fd = bpf_program__fd(prog);
8849 		kern_data = st_ops->kern_vdata + st_ops->kern_func_off[i];
8850 		*(unsigned long *)kern_data = prog_fd;
8851 	}
8852 }
8853 
8854 static int bpf_object_prepare_struct_ops(struct bpf_object *obj)
8855 {
8856 	struct bpf_map *map;
8857 	int i;
8858 
8859 	for (i = 0; i < obj->nr_maps; i++) {
8860 		map = &obj->maps[i];
8861 
8862 		if (!bpf_map__is_struct_ops(map))
8863 			continue;
8864 
8865 		if (!map->autocreate)
8866 			continue;
8867 
8868 		bpf_map_prepare_vdata(map);
8869 	}
8870 
8871 	return 0;
8872 }
8873 
8874 static void bpf_object_unpin(struct bpf_object *obj)
8875 {
8876 	int i;
8877 
8878 	/* unpin any maps that were auto-pinned during load */
8879 	for (i = 0; i < obj->nr_maps; i++)
8880 		if (obj->maps[i].pinned && !obj->maps[i].reused)
8881 			bpf_map__unpin(&obj->maps[i], NULL);
8882 }
8883 
8884 static void bpf_object_post_load_cleanup(struct bpf_object *obj)
8885 {
8886 	int i;
8887 
8888 	/* clean up fd_array */
8889 	zfree(&obj->fd_array);
8890 
8891 	/* clean up module BTFs */
8892 	for (i = 0; i < obj->btf_module_cnt; i++) {
8893 		close(obj->btf_modules[i].fd);
8894 		btf__free(obj->btf_modules[i].btf);
8895 		free(obj->btf_modules[i].name);
8896 	}
8897 	obj->btf_module_cnt = 0;
8898 	zfree(&obj->btf_modules);
8899 
8900 	/* clean up vmlinux BTF */
8901 	btf__free(obj->btf_vmlinux);
8902 	obj->btf_vmlinux = NULL;
8903 }
8904 
8905 static int bpf_object_prepare(struct bpf_object *obj, const char *target_btf_path)
8906 {
8907 	int err;
8908 
8909 	if (obj->state >= OBJ_PREPARED) {
8910 		pr_warn("object '%s': prepare loading can't be attempted twice\n", obj->name);
8911 		return -EINVAL;
8912 	}
8913 
8914 	err = bpf_object_prepare_token(obj);
8915 	err = err ? : bpf_object__probe_loading(obj);
8916 	err = err ? : bpf_object__load_vmlinux_btf(obj, false);
8917 	err = err ? : bpf_object__resolve_externs(obj, obj->kconfig);
8918 	err = err ? : bpf_object__sanitize_maps(obj);
8919 	err = err ? : bpf_object__init_kern_struct_ops_maps(obj);
8920 	err = err ? : bpf_object_adjust_struct_ops_autoload(obj);
8921 	err = err ? : bpf_object__relocate(obj, obj->btf_custom_path ? : target_btf_path);
8922 	err = err ? : bpf_object__sanitize_and_load_btf(obj);
8923 	err = err ? : bpf_object__create_maps(obj);
8924 	err = err ? : bpf_object_prepare_progs(obj);
8925 
8926 	if (err) {
8927 		bpf_object_unpin(obj);
8928 		bpf_object_unload(obj);
8929 		obj->state = OBJ_LOADED;
8930 		return err;
8931 	}
8932 
8933 	obj->state = OBJ_PREPARED;
8934 	return 0;
8935 }
8936 
8937 static int bpf_object_load(struct bpf_object *obj, int extra_log_level, const char *target_btf_path)
8938 {
8939 	int err;
8940 
8941 	if (!obj)
8942 		return libbpf_err(-EINVAL);
8943 
8944 	if (obj->state >= OBJ_LOADED) {
8945 		pr_warn("object '%s': load can't be attempted twice\n", obj->name);
8946 		return libbpf_err(-EINVAL);
8947 	}
8948 
8949 	/* Disallow kernel loading programs of non-native endianness but
8950 	 * permit cross-endian creation of "light skeleton".
8951 	 */
8952 	if (obj->gen_loader) {
8953 		bpf_gen__init(obj->gen_loader, extra_log_level, obj->nr_programs, obj->nr_maps);
8954 	} else if (!is_native_endianness(obj)) {
8955 		pr_warn("object '%s': loading non-native endianness is unsupported\n", obj->name);
8956 		return libbpf_err(-LIBBPF_ERRNO__ENDIAN);
8957 	}
8958 
8959 	if (obj->state < OBJ_PREPARED) {
8960 		err = bpf_object_prepare(obj, target_btf_path);
8961 		if (err)
8962 			return libbpf_err(err);
8963 	}
8964 	err = bpf_object__load_progs(obj, extra_log_level);
8965 	err = err ? : bpf_object_init_prog_arrays(obj);
8966 	err = err ? : bpf_object_prepare_struct_ops(obj);
8967 
8968 	if (obj->gen_loader) {
8969 		/* reset FDs */
8970 		if (obj->btf)
8971 			btf__set_fd(obj->btf, -1);
8972 		if (!err)
8973 			err = bpf_gen__finish(obj->gen_loader, obj->nr_programs, obj->nr_maps);
8974 	}
8975 
8976 	bpf_object_post_load_cleanup(obj);
8977 	obj->state = OBJ_LOADED; /* doesn't matter if successfully or not */
8978 
8979 	if (err) {
8980 		bpf_object_unpin(obj);
8981 		bpf_object_unload(obj);
8982 		pr_warn("failed to load object '%s'\n", obj->path);
8983 		return libbpf_err(err);
8984 	}
8985 
8986 	return 0;
8987 }
8988 
8989 int bpf_object__prepare(struct bpf_object *obj)
8990 {
8991 	return libbpf_err(bpf_object_prepare(obj, NULL));
8992 }
8993 
8994 int bpf_object__load(struct bpf_object *obj)
8995 {
8996 	return bpf_object_load(obj, 0, NULL);
8997 }
8998 
8999 static int make_parent_dir(const char *path)
9000 {
9001 	char *dname, *dir;
9002 	int err = 0;
9003 
9004 	dname = strdup(path);
9005 	if (dname == NULL)
9006 		return -ENOMEM;
9007 
9008 	dir = dirname(dname);
9009 	if (mkdir(dir, 0700) && errno != EEXIST)
9010 		err = -errno;
9011 
9012 	free(dname);
9013 	if (err) {
9014 		pr_warn("failed to mkdir %s: %s\n", path, errstr(err));
9015 	}
9016 	return err;
9017 }
9018 
9019 static int check_path(const char *path)
9020 {
9021 	struct statfs st_fs;
9022 	char *dname, *dir;
9023 	int err = 0;
9024 
9025 	if (path == NULL)
9026 		return -EINVAL;
9027 
9028 	dname = strdup(path);
9029 	if (dname == NULL)
9030 		return -ENOMEM;
9031 
9032 	dir = dirname(dname);
9033 	if (statfs(dir, &st_fs)) {
9034 		pr_warn("failed to statfs %s: %s\n", dir, errstr(errno));
9035 		err = -errno;
9036 	}
9037 	free(dname);
9038 
9039 	if (!err && st_fs.f_type != BPF_FS_MAGIC) {
9040 		pr_warn("specified path %s is not on BPF FS\n", path);
9041 		err = -EINVAL;
9042 	}
9043 
9044 	return err;
9045 }
9046 
9047 int bpf_program__pin(struct bpf_program *prog, const char *path)
9048 {
9049 	int err;
9050 
9051 	if (prog->fd < 0) {
9052 		pr_warn("prog '%s': can't pin program that wasn't loaded\n", prog->name);
9053 		return libbpf_err(-EINVAL);
9054 	}
9055 
9056 	err = make_parent_dir(path);
9057 	if (err)
9058 		return libbpf_err(err);
9059 
9060 	err = check_path(path);
9061 	if (err)
9062 		return libbpf_err(err);
9063 
9064 	if (bpf_obj_pin(prog->fd, path)) {
9065 		err = -errno;
9066 		pr_warn("prog '%s': failed to pin at '%s': %s\n", prog->name, path, errstr(err));
9067 		return libbpf_err(err);
9068 	}
9069 
9070 	pr_debug("prog '%s': pinned at '%s'\n", prog->name, path);
9071 	return 0;
9072 }
9073 
9074 int bpf_program__unpin(struct bpf_program *prog, const char *path)
9075 {
9076 	int err;
9077 
9078 	if (prog->fd < 0) {
9079 		pr_warn("prog '%s': can't unpin program that wasn't loaded\n", prog->name);
9080 		return libbpf_err(-EINVAL);
9081 	}
9082 
9083 	err = check_path(path);
9084 	if (err)
9085 		return libbpf_err(err);
9086 
9087 	err = unlink(path);
9088 	if (err)
9089 		return libbpf_err(-errno);
9090 
9091 	pr_debug("prog '%s': unpinned from '%s'\n", prog->name, path);
9092 	return 0;
9093 }
9094 
9095 int bpf_map__pin(struct bpf_map *map, const char *path)
9096 {
9097 	int err;
9098 
9099 	if (map == NULL) {
9100 		pr_warn("invalid map pointer\n");
9101 		return libbpf_err(-EINVAL);
9102 	}
9103 
9104 	if (map->fd < 0) {
9105 		pr_warn("map '%s': can't pin BPF map without FD (was it created?)\n", map->name);
9106 		return libbpf_err(-EINVAL);
9107 	}
9108 
9109 	if (map->pin_path) {
9110 		if (path && strcmp(path, map->pin_path)) {
9111 			pr_warn("map '%s' already has pin path '%s' different from '%s'\n",
9112 				bpf_map__name(map), map->pin_path, path);
9113 			return libbpf_err(-EINVAL);
9114 		} else if (map->pinned) {
9115 			pr_debug("map '%s' already pinned at '%s'; not re-pinning\n",
9116 				 bpf_map__name(map), map->pin_path);
9117 			return 0;
9118 		}
9119 	} else {
9120 		if (!path) {
9121 			pr_warn("missing a path to pin map '%s' at\n",
9122 				bpf_map__name(map));
9123 			return libbpf_err(-EINVAL);
9124 		} else if (map->pinned) {
9125 			pr_warn("map '%s' already pinned\n", bpf_map__name(map));
9126 			return libbpf_err(-EEXIST);
9127 		}
9128 
9129 		map->pin_path = strdup(path);
9130 		if (!map->pin_path) {
9131 			err = -errno;
9132 			goto out_err;
9133 		}
9134 	}
9135 
9136 	err = make_parent_dir(map->pin_path);
9137 	if (err)
9138 		return libbpf_err(err);
9139 
9140 	err = check_path(map->pin_path);
9141 	if (err)
9142 		return libbpf_err(err);
9143 
9144 	if (bpf_obj_pin(map->fd, map->pin_path)) {
9145 		err = -errno;
9146 		goto out_err;
9147 	}
9148 
9149 	map->pinned = true;
9150 	pr_debug("pinned map '%s'\n", map->pin_path);
9151 
9152 	return 0;
9153 
9154 out_err:
9155 	pr_warn("failed to pin map: %s\n", errstr(err));
9156 	return libbpf_err(err);
9157 }
9158 
9159 int bpf_map__unpin(struct bpf_map *map, const char *path)
9160 {
9161 	int err;
9162 
9163 	if (map == NULL) {
9164 		pr_warn("invalid map pointer\n");
9165 		return libbpf_err(-EINVAL);
9166 	}
9167 
9168 	if (map->pin_path) {
9169 		if (path && strcmp(path, map->pin_path)) {
9170 			pr_warn("map '%s' already has pin path '%s' different from '%s'\n",
9171 				bpf_map__name(map), map->pin_path, path);
9172 			return libbpf_err(-EINVAL);
9173 		}
9174 		path = map->pin_path;
9175 	} else if (!path) {
9176 		pr_warn("no path to unpin map '%s' from\n",
9177 			bpf_map__name(map));
9178 		return libbpf_err(-EINVAL);
9179 	}
9180 
9181 	err = check_path(path);
9182 	if (err)
9183 		return libbpf_err(err);
9184 
9185 	err = unlink(path);
9186 	if (err != 0)
9187 		return libbpf_err(-errno);
9188 
9189 	map->pinned = false;
9190 	pr_debug("unpinned map '%s' from '%s'\n", bpf_map__name(map), path);
9191 
9192 	return 0;
9193 }
9194 
9195 int bpf_map__set_pin_path(struct bpf_map *map, const char *path)
9196 {
9197 	char *new = NULL;
9198 
9199 	if (path) {
9200 		new = strdup(path);
9201 		if (!new)
9202 			return libbpf_err(-errno);
9203 	}
9204 
9205 	free(map->pin_path);
9206 	map->pin_path = new;
9207 	return 0;
9208 }
9209 
9210 __alias(bpf_map__pin_path)
9211 const char *bpf_map__get_pin_path(const struct bpf_map *map);
9212 
9213 const char *bpf_map__pin_path(const struct bpf_map *map)
9214 {
9215 	return map->pin_path;
9216 }
9217 
9218 bool bpf_map__is_pinned(const struct bpf_map *map)
9219 {
9220 	return map->pinned;
9221 }
9222 
9223 static void sanitize_pin_path(char *s)
9224 {
9225 	/* bpffs disallows periods in path names */
9226 	while (*s) {
9227 		if (*s == '.')
9228 			*s = '_';
9229 		s++;
9230 	}
9231 }
9232 
9233 int bpf_object__pin_maps(struct bpf_object *obj, const char *path)
9234 {
9235 	struct bpf_map *map;
9236 	int err;
9237 
9238 	if (!obj)
9239 		return libbpf_err(-ENOENT);
9240 
9241 	if (obj->state < OBJ_PREPARED) {
9242 		pr_warn("object not yet loaded; load it first\n");
9243 		return libbpf_err(-ENOENT);
9244 	}
9245 
9246 	bpf_object__for_each_map(map, obj) {
9247 		char *pin_path = NULL;
9248 		char buf[PATH_MAX];
9249 
9250 		if (!map->autocreate)
9251 			continue;
9252 
9253 		if (path) {
9254 			err = pathname_concat(buf, sizeof(buf), path, bpf_map__name(map));
9255 			if (err)
9256 				goto err_unpin_maps;
9257 			sanitize_pin_path(buf);
9258 			pin_path = buf;
9259 		} else if (!map->pin_path) {
9260 			continue;
9261 		}
9262 
9263 		err = bpf_map__pin(map, pin_path);
9264 		if (err)
9265 			goto err_unpin_maps;
9266 	}
9267 
9268 	return 0;
9269 
9270 err_unpin_maps:
9271 	while ((map = bpf_object__prev_map(obj, map))) {
9272 		if (!map->pin_path)
9273 			continue;
9274 
9275 		bpf_map__unpin(map, NULL);
9276 	}
9277 
9278 	return libbpf_err(err);
9279 }
9280 
9281 int bpf_object__unpin_maps(struct bpf_object *obj, const char *path)
9282 {
9283 	struct bpf_map *map;
9284 	int err;
9285 
9286 	if (!obj)
9287 		return libbpf_err(-ENOENT);
9288 
9289 	bpf_object__for_each_map(map, obj) {
9290 		char *pin_path = NULL;
9291 		char buf[PATH_MAX];
9292 
9293 		if (path) {
9294 			err = pathname_concat(buf, sizeof(buf), path, bpf_map__name(map));
9295 			if (err)
9296 				return libbpf_err(err);
9297 			sanitize_pin_path(buf);
9298 			pin_path = buf;
9299 		} else if (!map->pin_path) {
9300 			continue;
9301 		}
9302 
9303 		err = bpf_map__unpin(map, pin_path);
9304 		if (err)
9305 			return libbpf_err(err);
9306 	}
9307 
9308 	return 0;
9309 }
9310 
9311 int bpf_object__pin_programs(struct bpf_object *obj, const char *path)
9312 {
9313 	struct bpf_program *prog;
9314 	char buf[PATH_MAX];
9315 	int err;
9316 
9317 	if (!obj)
9318 		return libbpf_err(-ENOENT);
9319 
9320 	if (obj->state < OBJ_LOADED) {
9321 		pr_warn("object not yet loaded; load it first\n");
9322 		return libbpf_err(-ENOENT);
9323 	}
9324 
9325 	bpf_object__for_each_program(prog, obj) {
9326 		err = pathname_concat(buf, sizeof(buf), path, prog->name);
9327 		if (err)
9328 			goto err_unpin_programs;
9329 
9330 		err = bpf_program__pin(prog, buf);
9331 		if (err)
9332 			goto err_unpin_programs;
9333 	}
9334 
9335 	return 0;
9336 
9337 err_unpin_programs:
9338 	while ((prog = bpf_object__prev_program(obj, prog))) {
9339 		if (pathname_concat(buf, sizeof(buf), path, prog->name))
9340 			continue;
9341 
9342 		bpf_program__unpin(prog, buf);
9343 	}
9344 
9345 	return libbpf_err(err);
9346 }
9347 
9348 int bpf_object__unpin_programs(struct bpf_object *obj, const char *path)
9349 {
9350 	struct bpf_program *prog;
9351 	int err;
9352 
9353 	if (!obj)
9354 		return libbpf_err(-ENOENT);
9355 
9356 	bpf_object__for_each_program(prog, obj) {
9357 		char buf[PATH_MAX];
9358 
9359 		err = pathname_concat(buf, sizeof(buf), path, prog->name);
9360 		if (err)
9361 			return libbpf_err(err);
9362 
9363 		err = bpf_program__unpin(prog, buf);
9364 		if (err)
9365 			return libbpf_err(err);
9366 	}
9367 
9368 	return 0;
9369 }
9370 
9371 int bpf_object__pin(struct bpf_object *obj, const char *path)
9372 {
9373 	int err;
9374 
9375 	err = bpf_object__pin_maps(obj, path);
9376 	if (err)
9377 		return libbpf_err(err);
9378 
9379 	err = bpf_object__pin_programs(obj, path);
9380 	if (err) {
9381 		bpf_object__unpin_maps(obj, path);
9382 		return libbpf_err(err);
9383 	}
9384 
9385 	return 0;
9386 }
9387 
9388 int bpf_object__unpin(struct bpf_object *obj, const char *path)
9389 {
9390 	int err;
9391 
9392 	err = bpf_object__unpin_programs(obj, path);
9393 	if (err)
9394 		return libbpf_err(err);
9395 
9396 	err = bpf_object__unpin_maps(obj, path);
9397 	if (err)
9398 		return libbpf_err(err);
9399 
9400 	return 0;
9401 }
9402 
9403 static void bpf_map__destroy(struct bpf_map *map)
9404 {
9405 	if (map->inner_map) {
9406 		bpf_map__destroy(map->inner_map);
9407 		zfree(&map->inner_map);
9408 	}
9409 
9410 	zfree(&map->init_slots);
9411 	map->init_slots_sz = 0;
9412 
9413 	if (map->mmaped && map->mmaped != map->obj->arena_data)
9414 		munmap(map->mmaped, bpf_map_mmap_sz(map));
9415 	map->mmaped = NULL;
9416 
9417 	if (map->st_ops) {
9418 		zfree(&map->st_ops->data);
9419 		zfree(&map->st_ops->progs);
9420 		zfree(&map->st_ops->kern_func_off);
9421 		zfree(&map->st_ops);
9422 	}
9423 
9424 	zfree(&map->name);
9425 	zfree(&map->real_name);
9426 	zfree(&map->pin_path);
9427 
9428 	if (map->fd >= 0)
9429 		zclose(map->fd);
9430 }
9431 
9432 void bpf_object__close(struct bpf_object *obj)
9433 {
9434 	size_t i;
9435 
9436 	if (IS_ERR_OR_NULL(obj))
9437 		return;
9438 
9439 	/*
9440 	 * if user called bpf_object__prepare() without ever getting to
9441 	 * bpf_object__load(), we need to clean up stuff that is normally
9442 	 * cleaned up at the end of loading step
9443 	 */
9444 	bpf_object_post_load_cleanup(obj);
9445 
9446 	usdt_manager_free(obj->usdt_man);
9447 	obj->usdt_man = NULL;
9448 
9449 	bpf_gen__free(obj->gen_loader);
9450 	bpf_object__elf_finish(obj);
9451 	bpf_object_unload(obj);
9452 	btf__free(obj->btf);
9453 	btf__free(obj->btf_vmlinux);
9454 	btf_ext__free(obj->btf_ext);
9455 
9456 	for (i = 0; i < obj->nr_maps; i++)
9457 		bpf_map__destroy(&obj->maps[i]);
9458 
9459 	zfree(&obj->btf_custom_path);
9460 	zfree(&obj->kconfig);
9461 
9462 	for (i = 0; i < obj->nr_extern; i++) {
9463 		zfree(&obj->externs[i].name);
9464 		zfree(&obj->externs[i].essent_name);
9465 	}
9466 
9467 	zfree(&obj->externs);
9468 	obj->nr_extern = 0;
9469 
9470 	zfree(&obj->maps);
9471 	obj->nr_maps = 0;
9472 
9473 	if (obj->programs && obj->nr_programs) {
9474 		for (i = 0; i < obj->nr_programs; i++)
9475 			bpf_program__exit(&obj->programs[i]);
9476 	}
9477 	zfree(&obj->programs);
9478 
9479 	zfree(&obj->feat_cache);
9480 	zfree(&obj->token_path);
9481 	if (obj->token_fd > 0)
9482 		close(obj->token_fd);
9483 
9484 	zfree(&obj->arena_data);
9485 
9486 	zfree(&obj->jumptables_data);
9487 	obj->jumptables_data_sz = 0;
9488 
9489 	for (i = 0; i < obj->jumptable_map_cnt; i++)
9490 		close(obj->jumptable_maps[i].fd);
9491 	zfree(&obj->jumptable_maps);
9492 
9493 	free(obj);
9494 }
9495 
9496 const char *bpf_object__name(const struct bpf_object *obj)
9497 {
9498 	return obj ? obj->name : libbpf_err_ptr(-EINVAL);
9499 }
9500 
9501 unsigned int bpf_object__kversion(const struct bpf_object *obj)
9502 {
9503 	return obj ? obj->kern_version : 0;
9504 }
9505 
9506 int bpf_object__token_fd(const struct bpf_object *obj)
9507 {
9508 	return obj->token_fd ?: -1;
9509 }
9510 
9511 struct btf *bpf_object__btf(const struct bpf_object *obj)
9512 {
9513 	return obj ? obj->btf : NULL;
9514 }
9515 
9516 int bpf_object__btf_fd(const struct bpf_object *obj)
9517 {
9518 	return obj->btf ? btf__fd(obj->btf) : -1;
9519 }
9520 
9521 int bpf_object__set_kversion(struct bpf_object *obj, __u32 kern_version)
9522 {
9523 	if (obj->state >= OBJ_LOADED)
9524 		return libbpf_err(-EINVAL);
9525 
9526 	obj->kern_version = kern_version;
9527 
9528 	return 0;
9529 }
9530 
9531 int bpf_object__gen_loader(struct bpf_object *obj, struct gen_loader_opts *opts)
9532 {
9533 	struct bpf_gen *gen;
9534 
9535 	if (!opts)
9536 		return libbpf_err(-EFAULT);
9537 	if (!OPTS_VALID(opts, gen_loader_opts))
9538 		return libbpf_err(-EINVAL);
9539 	gen = calloc(1, sizeof(*gen));
9540 	if (!gen)
9541 		return libbpf_err(-ENOMEM);
9542 	gen->opts = opts;
9543 	gen->swapped_endian = !is_native_endianness(obj);
9544 	obj->gen_loader = gen;
9545 	return 0;
9546 }
9547 
9548 static struct bpf_program *
9549 __bpf_program__iter(const struct bpf_program *p, const struct bpf_object *obj,
9550 		    bool forward)
9551 {
9552 	size_t nr_programs = obj->nr_programs;
9553 	ssize_t idx;
9554 
9555 	if (!nr_programs)
9556 		return NULL;
9557 
9558 	if (!p)
9559 		/* Iter from the beginning */
9560 		return forward ? &obj->programs[0] :
9561 			&obj->programs[nr_programs - 1];
9562 
9563 	if (p->obj != obj) {
9564 		pr_warn("error: program handler doesn't match object\n");
9565 		return errno = EINVAL, NULL;
9566 	}
9567 
9568 	idx = (p - obj->programs) + (forward ? 1 : -1);
9569 	if (idx >= obj->nr_programs || idx < 0)
9570 		return NULL;
9571 	return &obj->programs[idx];
9572 }
9573 
9574 struct bpf_program *
9575 bpf_object__next_program(const struct bpf_object *obj, struct bpf_program *prev)
9576 {
9577 	struct bpf_program *prog = prev;
9578 
9579 	do {
9580 		prog = __bpf_program__iter(prog, obj, true);
9581 	} while (prog && prog_is_subprog(obj, prog));
9582 
9583 	return prog;
9584 }
9585 
9586 struct bpf_program *
9587 bpf_object__prev_program(const struct bpf_object *obj, struct bpf_program *next)
9588 {
9589 	struct bpf_program *prog = next;
9590 
9591 	do {
9592 		prog = __bpf_program__iter(prog, obj, false);
9593 	} while (prog && prog_is_subprog(obj, prog));
9594 
9595 	return prog;
9596 }
9597 
9598 void bpf_program__set_ifindex(struct bpf_program *prog, __u32 ifindex)
9599 {
9600 	prog->prog_ifindex = ifindex;
9601 }
9602 
9603 const char *bpf_program__name(const struct bpf_program *prog)
9604 {
9605 	return prog->name;
9606 }
9607 
9608 const char *bpf_program__section_name(const struct bpf_program *prog)
9609 {
9610 	return prog->sec_name;
9611 }
9612 
9613 bool bpf_program__autoload(const struct bpf_program *prog)
9614 {
9615 	return prog->autoload;
9616 }
9617 
9618 int bpf_program__set_autoload(struct bpf_program *prog, bool autoload)
9619 {
9620 	if (prog->obj->state >= OBJ_LOADED)
9621 		return libbpf_err(-EINVAL);
9622 
9623 	prog->autoload = autoload;
9624 	return 0;
9625 }
9626 
9627 bool bpf_program__autoattach(const struct bpf_program *prog)
9628 {
9629 	return prog->autoattach;
9630 }
9631 
9632 void bpf_program__set_autoattach(struct bpf_program *prog, bool autoattach)
9633 {
9634 	prog->autoattach = autoattach;
9635 }
9636 
9637 const struct bpf_insn *bpf_program__insns(const struct bpf_program *prog)
9638 {
9639 	return prog->insns;
9640 }
9641 
9642 size_t bpf_program__insn_cnt(const struct bpf_program *prog)
9643 {
9644 	return prog->insns_cnt;
9645 }
9646 
9647 int bpf_program__set_insns(struct bpf_program *prog,
9648 			   struct bpf_insn *new_insns, size_t new_insn_cnt)
9649 {
9650 	struct bpf_insn *insns;
9651 
9652 	if (prog->obj->state >= OBJ_LOADED)
9653 		return libbpf_err(-EBUSY);
9654 
9655 	insns = libbpf_reallocarray(prog->insns, new_insn_cnt, sizeof(*insns));
9656 	/* NULL is a valid return from reallocarray if the new count is zero */
9657 	if (!insns && new_insn_cnt) {
9658 		pr_warn("prog '%s': failed to realloc prog code\n", prog->name);
9659 		return libbpf_err(-ENOMEM);
9660 	}
9661 	memcpy(insns, new_insns, new_insn_cnt * sizeof(*insns));
9662 
9663 	prog->insns = insns;
9664 	prog->insns_cnt = new_insn_cnt;
9665 	return 0;
9666 }
9667 
9668 int bpf_program__fd(const struct bpf_program *prog)
9669 {
9670 	if (!prog)
9671 		return libbpf_err(-EINVAL);
9672 
9673 	if (prog->fd < 0)
9674 		return libbpf_err(-ENOENT);
9675 
9676 	return prog->fd;
9677 }
9678 
9679 __alias(bpf_program__type)
9680 enum bpf_prog_type bpf_program__get_type(const struct bpf_program *prog);
9681 
9682 enum bpf_prog_type bpf_program__type(const struct bpf_program *prog)
9683 {
9684 	return prog->type;
9685 }
9686 
9687 static size_t custom_sec_def_cnt;
9688 static struct bpf_sec_def *custom_sec_defs;
9689 static struct bpf_sec_def custom_fallback_def;
9690 static bool has_custom_fallback_def;
9691 static int last_custom_sec_def_handler_id;
9692 
9693 int bpf_program__set_type(struct bpf_program *prog, enum bpf_prog_type type)
9694 {
9695 	if (prog->obj->state >= OBJ_LOADED)
9696 		return libbpf_err(-EBUSY);
9697 
9698 	/* if type is not changed, do nothing */
9699 	if (prog->type == type)
9700 		return 0;
9701 
9702 	prog->type = type;
9703 
9704 	/* If a program type was changed, we need to reset associated SEC()
9705 	 * handler, as it will be invalid now. The only exception is a generic
9706 	 * fallback handler, which by definition is program type-agnostic and
9707 	 * is a catch-all custom handler, optionally set by the application,
9708 	 * so should be able to handle any type of BPF program.
9709 	 */
9710 	if (prog->sec_def != &custom_fallback_def)
9711 		prog->sec_def = NULL;
9712 	return 0;
9713 }
9714 
9715 __alias(bpf_program__expected_attach_type)
9716 enum bpf_attach_type bpf_program__get_expected_attach_type(const struct bpf_program *prog);
9717 
9718 enum bpf_attach_type bpf_program__expected_attach_type(const struct bpf_program *prog)
9719 {
9720 	return prog->expected_attach_type;
9721 }
9722 
9723 int bpf_program__set_expected_attach_type(struct bpf_program *prog,
9724 					   enum bpf_attach_type type)
9725 {
9726 	if (prog->obj->state >= OBJ_LOADED)
9727 		return libbpf_err(-EBUSY);
9728 
9729 	prog->expected_attach_type = type;
9730 	return 0;
9731 }
9732 
9733 __u32 bpf_program__flags(const struct bpf_program *prog)
9734 {
9735 	return prog->prog_flags;
9736 }
9737 
9738 int bpf_program__set_flags(struct bpf_program *prog, __u32 flags)
9739 {
9740 	if (prog->obj->state >= OBJ_LOADED)
9741 		return libbpf_err(-EBUSY);
9742 
9743 	prog->prog_flags = flags;
9744 	return 0;
9745 }
9746 
9747 __u32 bpf_program__log_level(const struct bpf_program *prog)
9748 {
9749 	return prog->log_level;
9750 }
9751 
9752 int bpf_program__set_log_level(struct bpf_program *prog, __u32 log_level)
9753 {
9754 	if (prog->obj->state >= OBJ_LOADED)
9755 		return libbpf_err(-EBUSY);
9756 
9757 	prog->log_level = log_level;
9758 	return 0;
9759 }
9760 
9761 const char *bpf_program__log_buf(const struct bpf_program *prog, size_t *log_size)
9762 {
9763 	*log_size = prog->log_size;
9764 	return prog->log_buf;
9765 }
9766 
9767 int bpf_program__set_log_buf(struct bpf_program *prog, char *log_buf, size_t log_size)
9768 {
9769 	if (log_size && !log_buf)
9770 		return libbpf_err(-EINVAL);
9771 	if (prog->log_size > UINT_MAX)
9772 		return libbpf_err(-EINVAL);
9773 	if (prog->obj->state >= OBJ_LOADED)
9774 		return libbpf_err(-EBUSY);
9775 
9776 	prog->log_buf = log_buf;
9777 	prog->log_size = log_size;
9778 	return 0;
9779 }
9780 
9781 struct bpf_func_info *bpf_program__func_info(const struct bpf_program *prog)
9782 {
9783 	if (prog->func_info_rec_size != sizeof(struct bpf_func_info))
9784 		return libbpf_err_ptr(-EOPNOTSUPP);
9785 	return prog->func_info;
9786 }
9787 
9788 __u32 bpf_program__func_info_cnt(const struct bpf_program *prog)
9789 {
9790 	return prog->func_info_cnt;
9791 }
9792 
9793 struct bpf_line_info *bpf_program__line_info(const struct bpf_program *prog)
9794 {
9795 	if (prog->line_info_rec_size != sizeof(struct bpf_line_info))
9796 		return libbpf_err_ptr(-EOPNOTSUPP);
9797 	return prog->line_info;
9798 }
9799 
9800 __u32 bpf_program__line_info_cnt(const struct bpf_program *prog)
9801 {
9802 	return prog->line_info_cnt;
9803 }
9804 
9805 #define SEC_DEF(sec_pfx, ptype, atype, flags, ...) {			    \
9806 	.sec = (char *)sec_pfx,						    \
9807 	.prog_type = BPF_PROG_TYPE_##ptype,				    \
9808 	.expected_attach_type = atype,					    \
9809 	.cookie = (long)(flags),					    \
9810 	.prog_prepare_load_fn = libbpf_prepare_prog_load,		    \
9811 	__VA_ARGS__							    \
9812 }
9813 
9814 static int attach_kprobe(const struct bpf_program *prog, long cookie, struct bpf_link **link);
9815 static int attach_uprobe(const struct bpf_program *prog, long cookie, struct bpf_link **link);
9816 static int attach_ksyscall(const struct bpf_program *prog, long cookie, struct bpf_link **link);
9817 static int attach_usdt(const struct bpf_program *prog, long cookie, struct bpf_link **link);
9818 static int attach_tp(const struct bpf_program *prog, long cookie, struct bpf_link **link);
9819 static int attach_raw_tp(const struct bpf_program *prog, long cookie, struct bpf_link **link);
9820 static int attach_trace(const struct bpf_program *prog, long cookie, struct bpf_link **link);
9821 static int attach_kprobe_multi(const struct bpf_program *prog, long cookie, struct bpf_link **link);
9822 static int attach_kprobe_session(const struct bpf_program *prog, long cookie, struct bpf_link **link);
9823 static int attach_uprobe_multi(const struct bpf_program *prog, long cookie, struct bpf_link **link);
9824 static int attach_lsm(const struct bpf_program *prog, long cookie, struct bpf_link **link);
9825 static int attach_iter(const struct bpf_program *prog, long cookie, struct bpf_link **link);
9826 
9827 static const struct bpf_sec_def section_defs[] = {
9828 	SEC_DEF("socket",		SOCKET_FILTER, 0, SEC_NONE),
9829 	SEC_DEF("sk_reuseport/migrate",	SK_REUSEPORT, BPF_SK_REUSEPORT_SELECT_OR_MIGRATE, SEC_ATTACHABLE),
9830 	SEC_DEF("sk_reuseport",		SK_REUSEPORT, BPF_SK_REUSEPORT_SELECT, SEC_ATTACHABLE),
9831 	SEC_DEF("kprobe+",		KPROBE,	0, SEC_NONE, attach_kprobe),
9832 	SEC_DEF("uprobe+",		KPROBE,	0, SEC_NONE, attach_uprobe),
9833 	SEC_DEF("uprobe.s+",		KPROBE,	0, SEC_SLEEPABLE, attach_uprobe),
9834 	SEC_DEF("kretprobe+",		KPROBE, 0, SEC_NONE, attach_kprobe),
9835 	SEC_DEF("uretprobe+",		KPROBE, 0, SEC_NONE, attach_uprobe),
9836 	SEC_DEF("uretprobe.s+",		KPROBE, 0, SEC_SLEEPABLE, attach_uprobe),
9837 	SEC_DEF("kprobe.multi+",	KPROBE,	BPF_TRACE_KPROBE_MULTI, SEC_NONE, attach_kprobe_multi),
9838 	SEC_DEF("kretprobe.multi+",	KPROBE,	BPF_TRACE_KPROBE_MULTI, SEC_NONE, attach_kprobe_multi),
9839 	SEC_DEF("kprobe.session+",	KPROBE,	BPF_TRACE_KPROBE_SESSION, SEC_NONE, attach_kprobe_session),
9840 	SEC_DEF("uprobe.multi+",	KPROBE,	BPF_TRACE_UPROBE_MULTI, SEC_NONE, attach_uprobe_multi),
9841 	SEC_DEF("uretprobe.multi+",	KPROBE,	BPF_TRACE_UPROBE_MULTI, SEC_NONE, attach_uprobe_multi),
9842 	SEC_DEF("uprobe.session+",	KPROBE,	BPF_TRACE_UPROBE_SESSION, SEC_NONE, attach_uprobe_multi),
9843 	SEC_DEF("uprobe.multi.s+",	KPROBE,	BPF_TRACE_UPROBE_MULTI, SEC_SLEEPABLE, attach_uprobe_multi),
9844 	SEC_DEF("uretprobe.multi.s+",	KPROBE,	BPF_TRACE_UPROBE_MULTI, SEC_SLEEPABLE, attach_uprobe_multi),
9845 	SEC_DEF("uprobe.session.s+",	KPROBE,	BPF_TRACE_UPROBE_SESSION, SEC_SLEEPABLE, attach_uprobe_multi),
9846 	SEC_DEF("ksyscall+",		KPROBE,	0, SEC_NONE, attach_ksyscall),
9847 	SEC_DEF("kretsyscall+",		KPROBE, 0, SEC_NONE, attach_ksyscall),
9848 	SEC_DEF("usdt+",		KPROBE,	0, SEC_USDT, attach_usdt),
9849 	SEC_DEF("usdt.s+",		KPROBE,	0, SEC_USDT | SEC_SLEEPABLE, attach_usdt),
9850 	SEC_DEF("tc/ingress",		SCHED_CLS, BPF_TCX_INGRESS, SEC_NONE), /* alias for tcx */
9851 	SEC_DEF("tc/egress",		SCHED_CLS, BPF_TCX_EGRESS, SEC_NONE),  /* alias for tcx */
9852 	SEC_DEF("tcx/ingress",		SCHED_CLS, BPF_TCX_INGRESS, SEC_NONE),
9853 	SEC_DEF("tcx/egress",		SCHED_CLS, BPF_TCX_EGRESS, SEC_NONE),
9854 	SEC_DEF("tc",			SCHED_CLS, 0, SEC_NONE), /* deprecated / legacy, use tcx */
9855 	SEC_DEF("classifier",		SCHED_CLS, 0, SEC_NONE), /* deprecated / legacy, use tcx */
9856 	SEC_DEF("action",		SCHED_ACT, 0, SEC_NONE), /* deprecated / legacy, use tcx */
9857 	SEC_DEF("netkit/primary",	SCHED_CLS, BPF_NETKIT_PRIMARY, SEC_NONE),
9858 	SEC_DEF("netkit/peer",		SCHED_CLS, BPF_NETKIT_PEER, SEC_NONE),
9859 	SEC_DEF("tracepoint+",		TRACEPOINT, 0, SEC_NONE, attach_tp),
9860 	SEC_DEF("tp+",			TRACEPOINT, 0, SEC_NONE, attach_tp),
9861 	SEC_DEF("raw_tracepoint+",	RAW_TRACEPOINT, 0, SEC_NONE, attach_raw_tp),
9862 	SEC_DEF("raw_tp+",		RAW_TRACEPOINT, 0, SEC_NONE, attach_raw_tp),
9863 	SEC_DEF("raw_tracepoint.w+",	RAW_TRACEPOINT_WRITABLE, 0, SEC_NONE, attach_raw_tp),
9864 	SEC_DEF("raw_tp.w+",		RAW_TRACEPOINT_WRITABLE, 0, SEC_NONE, attach_raw_tp),
9865 	SEC_DEF("tp_btf+",		TRACING, BPF_TRACE_RAW_TP, SEC_ATTACH_BTF, attach_trace),
9866 	SEC_DEF("fentry+",		TRACING, BPF_TRACE_FENTRY, SEC_ATTACH_BTF, attach_trace),
9867 	SEC_DEF("fmod_ret+",		TRACING, BPF_MODIFY_RETURN, SEC_ATTACH_BTF, attach_trace),
9868 	SEC_DEF("fexit+",		TRACING, BPF_TRACE_FEXIT, SEC_ATTACH_BTF, attach_trace),
9869 	SEC_DEF("fentry.s+",		TRACING, BPF_TRACE_FENTRY, SEC_ATTACH_BTF | SEC_SLEEPABLE, attach_trace),
9870 	SEC_DEF("fmod_ret.s+",		TRACING, BPF_MODIFY_RETURN, SEC_ATTACH_BTF | SEC_SLEEPABLE, attach_trace),
9871 	SEC_DEF("fexit.s+",		TRACING, BPF_TRACE_FEXIT, SEC_ATTACH_BTF | SEC_SLEEPABLE, attach_trace),
9872 	SEC_DEF("fsession+",		TRACING, BPF_TRACE_FSESSION, SEC_ATTACH_BTF, attach_trace),
9873 	SEC_DEF("fsession.s+",		TRACING, BPF_TRACE_FSESSION, SEC_ATTACH_BTF | SEC_SLEEPABLE, attach_trace),
9874 	SEC_DEF("freplace+",		EXT, 0, SEC_ATTACH_BTF, attach_trace),
9875 	SEC_DEF("lsm+",			LSM, BPF_LSM_MAC, SEC_ATTACH_BTF, attach_lsm),
9876 	SEC_DEF("lsm.s+",		LSM, BPF_LSM_MAC, SEC_ATTACH_BTF | SEC_SLEEPABLE, attach_lsm),
9877 	SEC_DEF("lsm_cgroup+",		LSM, BPF_LSM_CGROUP, SEC_ATTACH_BTF),
9878 	SEC_DEF("iter+",		TRACING, BPF_TRACE_ITER, SEC_ATTACH_BTF, attach_iter),
9879 	SEC_DEF("iter.s+",		TRACING, BPF_TRACE_ITER, SEC_ATTACH_BTF | SEC_SLEEPABLE, attach_iter),
9880 	SEC_DEF("syscall",		SYSCALL, 0, SEC_SLEEPABLE),
9881 	SEC_DEF("xdp.frags/devmap",	XDP, BPF_XDP_DEVMAP, SEC_XDP_FRAGS),
9882 	SEC_DEF("xdp/devmap",		XDP, BPF_XDP_DEVMAP, SEC_ATTACHABLE),
9883 	SEC_DEF("xdp.frags/cpumap",	XDP, BPF_XDP_CPUMAP, SEC_XDP_FRAGS),
9884 	SEC_DEF("xdp/cpumap",		XDP, BPF_XDP_CPUMAP, SEC_ATTACHABLE),
9885 	SEC_DEF("xdp.frags",		XDP, BPF_XDP, SEC_XDP_FRAGS),
9886 	SEC_DEF("xdp",			XDP, BPF_XDP, SEC_ATTACHABLE_OPT),
9887 	SEC_DEF("perf_event",		PERF_EVENT, 0, SEC_NONE),
9888 	SEC_DEF("lwt_in",		LWT_IN, 0, SEC_NONE),
9889 	SEC_DEF("lwt_out",		LWT_OUT, 0, SEC_NONE),
9890 	SEC_DEF("lwt_xmit",		LWT_XMIT, 0, SEC_NONE),
9891 	SEC_DEF("lwt_seg6local",	LWT_SEG6LOCAL, 0, SEC_NONE),
9892 	SEC_DEF("sockops",		SOCK_OPS, BPF_CGROUP_SOCK_OPS, SEC_ATTACHABLE_OPT),
9893 	SEC_DEF("sk_skb/stream_parser",	SK_SKB, BPF_SK_SKB_STREAM_PARSER, SEC_ATTACHABLE_OPT),
9894 	SEC_DEF("sk_skb/stream_verdict",SK_SKB, BPF_SK_SKB_STREAM_VERDICT, SEC_ATTACHABLE_OPT),
9895 	SEC_DEF("sk_skb/verdict",	SK_SKB, BPF_SK_SKB_VERDICT, SEC_ATTACHABLE_OPT),
9896 	SEC_DEF("sk_skb",		SK_SKB, 0, SEC_NONE),
9897 	SEC_DEF("sk_msg",		SK_MSG, BPF_SK_MSG_VERDICT, SEC_ATTACHABLE_OPT),
9898 	SEC_DEF("lirc_mode2",		LIRC_MODE2, BPF_LIRC_MODE2, SEC_ATTACHABLE_OPT),
9899 	SEC_DEF("flow_dissector",	FLOW_DISSECTOR, BPF_FLOW_DISSECTOR, SEC_ATTACHABLE_OPT),
9900 	SEC_DEF("cgroup_skb/ingress",	CGROUP_SKB, BPF_CGROUP_INET_INGRESS, SEC_ATTACHABLE_OPT),
9901 	SEC_DEF("cgroup_skb/egress",	CGROUP_SKB, BPF_CGROUP_INET_EGRESS, SEC_ATTACHABLE_OPT),
9902 	SEC_DEF("cgroup/skb",		CGROUP_SKB, 0, SEC_NONE),
9903 	SEC_DEF("cgroup/sock_create",	CGROUP_SOCK, BPF_CGROUP_INET_SOCK_CREATE, SEC_ATTACHABLE),
9904 	SEC_DEF("cgroup/sock_release",	CGROUP_SOCK, BPF_CGROUP_INET_SOCK_RELEASE, SEC_ATTACHABLE),
9905 	SEC_DEF("cgroup/sock",		CGROUP_SOCK, BPF_CGROUP_INET_SOCK_CREATE, SEC_ATTACHABLE_OPT),
9906 	SEC_DEF("cgroup/post_bind4",	CGROUP_SOCK, BPF_CGROUP_INET4_POST_BIND, SEC_ATTACHABLE),
9907 	SEC_DEF("cgroup/post_bind6",	CGROUP_SOCK, BPF_CGROUP_INET6_POST_BIND, SEC_ATTACHABLE),
9908 	SEC_DEF("cgroup/bind4",		CGROUP_SOCK_ADDR, BPF_CGROUP_INET4_BIND, SEC_ATTACHABLE),
9909 	SEC_DEF("cgroup/bind6",		CGROUP_SOCK_ADDR, BPF_CGROUP_INET6_BIND, SEC_ATTACHABLE),
9910 	SEC_DEF("cgroup/connect4",	CGROUP_SOCK_ADDR, BPF_CGROUP_INET4_CONNECT, SEC_ATTACHABLE),
9911 	SEC_DEF("cgroup/connect6",	CGROUP_SOCK_ADDR, BPF_CGROUP_INET6_CONNECT, SEC_ATTACHABLE),
9912 	SEC_DEF("cgroup/connect_unix",	CGROUP_SOCK_ADDR, BPF_CGROUP_UNIX_CONNECT, SEC_ATTACHABLE),
9913 	SEC_DEF("cgroup/sendmsg4",	CGROUP_SOCK_ADDR, BPF_CGROUP_UDP4_SENDMSG, SEC_ATTACHABLE),
9914 	SEC_DEF("cgroup/sendmsg6",	CGROUP_SOCK_ADDR, BPF_CGROUP_UDP6_SENDMSG, SEC_ATTACHABLE),
9915 	SEC_DEF("cgroup/sendmsg_unix",	CGROUP_SOCK_ADDR, BPF_CGROUP_UNIX_SENDMSG, SEC_ATTACHABLE),
9916 	SEC_DEF("cgroup/recvmsg4",	CGROUP_SOCK_ADDR, BPF_CGROUP_UDP4_RECVMSG, SEC_ATTACHABLE),
9917 	SEC_DEF("cgroup/recvmsg6",	CGROUP_SOCK_ADDR, BPF_CGROUP_UDP6_RECVMSG, SEC_ATTACHABLE),
9918 	SEC_DEF("cgroup/recvmsg_unix",	CGROUP_SOCK_ADDR, BPF_CGROUP_UNIX_RECVMSG, SEC_ATTACHABLE),
9919 	SEC_DEF("cgroup/getpeername4",	CGROUP_SOCK_ADDR, BPF_CGROUP_INET4_GETPEERNAME, SEC_ATTACHABLE),
9920 	SEC_DEF("cgroup/getpeername6",	CGROUP_SOCK_ADDR, BPF_CGROUP_INET6_GETPEERNAME, SEC_ATTACHABLE),
9921 	SEC_DEF("cgroup/getpeername_unix", CGROUP_SOCK_ADDR, BPF_CGROUP_UNIX_GETPEERNAME, SEC_ATTACHABLE),
9922 	SEC_DEF("cgroup/getsockname4",	CGROUP_SOCK_ADDR, BPF_CGROUP_INET4_GETSOCKNAME, SEC_ATTACHABLE),
9923 	SEC_DEF("cgroup/getsockname6",	CGROUP_SOCK_ADDR, BPF_CGROUP_INET6_GETSOCKNAME, SEC_ATTACHABLE),
9924 	SEC_DEF("cgroup/getsockname_unix", CGROUP_SOCK_ADDR, BPF_CGROUP_UNIX_GETSOCKNAME, SEC_ATTACHABLE),
9925 	SEC_DEF("cgroup/sysctl",	CGROUP_SYSCTL, BPF_CGROUP_SYSCTL, SEC_ATTACHABLE),
9926 	SEC_DEF("cgroup/getsockopt",	CGROUP_SOCKOPT, BPF_CGROUP_GETSOCKOPT, SEC_ATTACHABLE),
9927 	SEC_DEF("cgroup/setsockopt",	CGROUP_SOCKOPT, BPF_CGROUP_SETSOCKOPT, SEC_ATTACHABLE),
9928 	SEC_DEF("cgroup/dev",		CGROUP_DEVICE, BPF_CGROUP_DEVICE, SEC_ATTACHABLE_OPT),
9929 	SEC_DEF("struct_ops+",		STRUCT_OPS, 0, SEC_NONE),
9930 	SEC_DEF("struct_ops.s+",	STRUCT_OPS, 0, SEC_SLEEPABLE),
9931 	SEC_DEF("sk_lookup",		SK_LOOKUP, BPF_SK_LOOKUP, SEC_ATTACHABLE),
9932 	SEC_DEF("netfilter",		NETFILTER, BPF_NETFILTER, SEC_NONE),
9933 };
9934 
9935 int libbpf_register_prog_handler(const char *sec,
9936 				 enum bpf_prog_type prog_type,
9937 				 enum bpf_attach_type exp_attach_type,
9938 				 const struct libbpf_prog_handler_opts *opts)
9939 {
9940 	struct bpf_sec_def *sec_def;
9941 
9942 	if (!OPTS_VALID(opts, libbpf_prog_handler_opts))
9943 		return libbpf_err(-EINVAL);
9944 
9945 	if (last_custom_sec_def_handler_id == INT_MAX) /* prevent overflow */
9946 		return libbpf_err(-E2BIG);
9947 
9948 	if (sec) {
9949 		sec_def = libbpf_reallocarray(custom_sec_defs, custom_sec_def_cnt + 1,
9950 					      sizeof(*sec_def));
9951 		if (!sec_def)
9952 			return libbpf_err(-ENOMEM);
9953 
9954 		custom_sec_defs = sec_def;
9955 		sec_def = &custom_sec_defs[custom_sec_def_cnt];
9956 	} else {
9957 		if (has_custom_fallback_def)
9958 			return libbpf_err(-EBUSY);
9959 
9960 		sec_def = &custom_fallback_def;
9961 	}
9962 
9963 	sec_def->sec = sec ? strdup(sec) : NULL;
9964 	if (sec && !sec_def->sec)
9965 		return libbpf_err(-ENOMEM);
9966 
9967 	sec_def->prog_type = prog_type;
9968 	sec_def->expected_attach_type = exp_attach_type;
9969 	sec_def->cookie = OPTS_GET(opts, cookie, 0);
9970 
9971 	sec_def->prog_setup_fn = OPTS_GET(opts, prog_setup_fn, NULL);
9972 	sec_def->prog_prepare_load_fn = OPTS_GET(opts, prog_prepare_load_fn, NULL);
9973 	sec_def->prog_attach_fn = OPTS_GET(opts, prog_attach_fn, NULL);
9974 
9975 	sec_def->handler_id = ++last_custom_sec_def_handler_id;
9976 
9977 	if (sec)
9978 		custom_sec_def_cnt++;
9979 	else
9980 		has_custom_fallback_def = true;
9981 
9982 	return sec_def->handler_id;
9983 }
9984 
9985 int libbpf_unregister_prog_handler(int handler_id)
9986 {
9987 	struct bpf_sec_def *sec_defs;
9988 	int i;
9989 
9990 	if (handler_id <= 0)
9991 		return libbpf_err(-EINVAL);
9992 
9993 	if (has_custom_fallback_def && custom_fallback_def.handler_id == handler_id) {
9994 		memset(&custom_fallback_def, 0, sizeof(custom_fallback_def));
9995 		has_custom_fallback_def = false;
9996 		return 0;
9997 	}
9998 
9999 	for (i = 0; i < custom_sec_def_cnt; i++) {
10000 		if (custom_sec_defs[i].handler_id == handler_id)
10001 			break;
10002 	}
10003 
10004 	if (i == custom_sec_def_cnt)
10005 		return libbpf_err(-ENOENT);
10006 
10007 	free(custom_sec_defs[i].sec);
10008 	for (i = i + 1; i < custom_sec_def_cnt; i++)
10009 		custom_sec_defs[i - 1] = custom_sec_defs[i];
10010 	custom_sec_def_cnt--;
10011 
10012 	/* try to shrink the array, but it's ok if we couldn't */
10013 	sec_defs = libbpf_reallocarray(custom_sec_defs, custom_sec_def_cnt, sizeof(*sec_defs));
10014 	/* if new count is zero, reallocarray can return a valid NULL result;
10015 	 * in this case the previous pointer will be freed, so we *have to*
10016 	 * reassign old pointer to the new value (even if it's NULL)
10017 	 */
10018 	if (sec_defs || custom_sec_def_cnt == 0)
10019 		custom_sec_defs = sec_defs;
10020 
10021 	return 0;
10022 }
10023 
10024 static bool sec_def_matches(const struct bpf_sec_def *sec_def, const char *sec_name)
10025 {
10026 	size_t len = strlen(sec_def->sec);
10027 
10028 	/* "type/" always has to have proper SEC("type/extras") form */
10029 	if (sec_def->sec[len - 1] == '/') {
10030 		if (str_has_pfx(sec_name, sec_def->sec))
10031 			return true;
10032 		return false;
10033 	}
10034 
10035 	/* "type+" means it can be either exact SEC("type") or
10036 	 * well-formed SEC("type/extras") with proper '/' separator
10037 	 */
10038 	if (sec_def->sec[len - 1] == '+') {
10039 		len--;
10040 		/* not even a prefix */
10041 		if (strncmp(sec_name, sec_def->sec, len) != 0)
10042 			return false;
10043 		/* exact match or has '/' separator */
10044 		if (sec_name[len] == '\0' || sec_name[len] == '/')
10045 			return true;
10046 		return false;
10047 	}
10048 
10049 	return strcmp(sec_name, sec_def->sec) == 0;
10050 }
10051 
10052 static const struct bpf_sec_def *find_sec_def(const char *sec_name)
10053 {
10054 	const struct bpf_sec_def *sec_def;
10055 	int i, n;
10056 
10057 	n = custom_sec_def_cnt;
10058 	for (i = 0; i < n; i++) {
10059 		sec_def = &custom_sec_defs[i];
10060 		if (sec_def_matches(sec_def, sec_name))
10061 			return sec_def;
10062 	}
10063 
10064 	n = ARRAY_SIZE(section_defs);
10065 	for (i = 0; i < n; i++) {
10066 		sec_def = &section_defs[i];
10067 		if (sec_def_matches(sec_def, sec_name))
10068 			return sec_def;
10069 	}
10070 
10071 	if (has_custom_fallback_def)
10072 		return &custom_fallback_def;
10073 
10074 	return NULL;
10075 }
10076 
10077 #define MAX_TYPE_NAME_SIZE 32
10078 
10079 static char *libbpf_get_type_names(bool attach_type)
10080 {
10081 	int i, len = ARRAY_SIZE(section_defs) * MAX_TYPE_NAME_SIZE;
10082 	char *buf;
10083 
10084 	buf = malloc(len);
10085 	if (!buf)
10086 		return NULL;
10087 
10088 	buf[0] = '\0';
10089 	/* Forge string buf with all available names */
10090 	for (i = 0; i < ARRAY_SIZE(section_defs); i++) {
10091 		const struct bpf_sec_def *sec_def = &section_defs[i];
10092 
10093 		if (attach_type) {
10094 			if (sec_def->prog_prepare_load_fn != libbpf_prepare_prog_load)
10095 				continue;
10096 
10097 			if (!(sec_def->cookie & SEC_ATTACHABLE))
10098 				continue;
10099 		}
10100 
10101 		if (strlen(buf) + strlen(section_defs[i].sec) + 2 > len) {
10102 			free(buf);
10103 			return NULL;
10104 		}
10105 		strcat(buf, " ");
10106 		strcat(buf, section_defs[i].sec);
10107 	}
10108 
10109 	return buf;
10110 }
10111 
10112 int libbpf_prog_type_by_name(const char *name, enum bpf_prog_type *prog_type,
10113 			     enum bpf_attach_type *expected_attach_type)
10114 {
10115 	const struct bpf_sec_def *sec_def;
10116 	char *type_names;
10117 
10118 	if (!name)
10119 		return libbpf_err(-EINVAL);
10120 
10121 	sec_def = find_sec_def(name);
10122 	if (sec_def) {
10123 		*prog_type = sec_def->prog_type;
10124 		*expected_attach_type = sec_def->expected_attach_type;
10125 		return 0;
10126 	}
10127 
10128 	pr_debug("failed to guess program type from ELF section '%s'\n", name);
10129 	type_names = libbpf_get_type_names(false);
10130 	if (type_names != NULL) {
10131 		pr_debug("supported section(type) names are:%s\n", type_names);
10132 		free(type_names);
10133 	}
10134 
10135 	return libbpf_err(-ESRCH);
10136 }
10137 
10138 const char *libbpf_bpf_attach_type_str(enum bpf_attach_type t)
10139 {
10140 	if (t < 0 || t >= ARRAY_SIZE(attach_type_name))
10141 		return NULL;
10142 
10143 	return attach_type_name[t];
10144 }
10145 
10146 const char *libbpf_bpf_link_type_str(enum bpf_link_type t)
10147 {
10148 	if (t < 0 || t >= ARRAY_SIZE(link_type_name))
10149 		return NULL;
10150 
10151 	return link_type_name[t];
10152 }
10153 
10154 const char *libbpf_bpf_map_type_str(enum bpf_map_type t)
10155 {
10156 	if (t < 0 || t >= ARRAY_SIZE(map_type_name))
10157 		return NULL;
10158 
10159 	return map_type_name[t];
10160 }
10161 
10162 const char *libbpf_bpf_prog_type_str(enum bpf_prog_type t)
10163 {
10164 	if (t < 0 || t >= ARRAY_SIZE(prog_type_name))
10165 		return NULL;
10166 
10167 	return prog_type_name[t];
10168 }
10169 
10170 static struct bpf_map *find_struct_ops_map_by_offset(struct bpf_object *obj,
10171 						     int sec_idx,
10172 						     size_t offset)
10173 {
10174 	struct bpf_map *map;
10175 	size_t i;
10176 
10177 	for (i = 0; i < obj->nr_maps; i++) {
10178 		map = &obj->maps[i];
10179 		if (!bpf_map__is_struct_ops(map))
10180 			continue;
10181 		if (map->sec_idx == sec_idx &&
10182 		    map->sec_offset <= offset &&
10183 		    offset - map->sec_offset < map->def.value_size)
10184 			return map;
10185 	}
10186 
10187 	return NULL;
10188 }
10189 
10190 /* Collect the reloc from ELF, populate the st_ops->progs[], and update
10191  * st_ops->data for shadow type.
10192  */
10193 static int bpf_object__collect_st_ops_relos(struct bpf_object *obj,
10194 					    Elf64_Shdr *shdr, Elf_Data *data)
10195 {
10196 	const struct btf_type *type;
10197 	const struct btf_member *member;
10198 	struct bpf_struct_ops *st_ops;
10199 	struct bpf_program *prog;
10200 	unsigned int shdr_idx;
10201 	const struct btf *btf;
10202 	struct bpf_map *map;
10203 	unsigned int moff, insn_idx;
10204 	const char *name;
10205 	__u32 member_idx;
10206 	Elf64_Sym *sym;
10207 	Elf64_Rel *rel;
10208 	int i, nrels;
10209 
10210 	btf = obj->btf;
10211 	nrels = shdr->sh_size / shdr->sh_entsize;
10212 	for (i = 0; i < nrels; i++) {
10213 		rel = elf_rel_by_idx(data, i);
10214 		if (!rel) {
10215 			pr_warn("struct_ops reloc: failed to get %d reloc\n", i);
10216 			return -LIBBPF_ERRNO__FORMAT;
10217 		}
10218 
10219 		sym = elf_sym_by_idx(obj, ELF64_R_SYM(rel->r_info));
10220 		if (!sym) {
10221 			pr_warn("struct_ops reloc: symbol %zx not found\n",
10222 				(size_t)ELF64_R_SYM(rel->r_info));
10223 			return -LIBBPF_ERRNO__FORMAT;
10224 		}
10225 
10226 		name = elf_sym_str(obj, sym->st_name) ?: "<?>";
10227 		map = find_struct_ops_map_by_offset(obj, shdr->sh_info, rel->r_offset);
10228 		if (!map) {
10229 			pr_warn("struct_ops reloc: cannot find map at rel->r_offset %zu\n",
10230 				(size_t)rel->r_offset);
10231 			return -EINVAL;
10232 		}
10233 
10234 		moff = rel->r_offset - map->sec_offset;
10235 		shdr_idx = sym->st_shndx;
10236 		st_ops = map->st_ops;
10237 		pr_debug("struct_ops reloc %s: for %lld value %lld shdr_idx %u rel->r_offset %zu map->sec_offset %zu name %d (\'%s\')\n",
10238 			 map->name,
10239 			 (long long)(rel->r_info >> 32),
10240 			 (long long)sym->st_value,
10241 			 shdr_idx, (size_t)rel->r_offset,
10242 			 map->sec_offset, sym->st_name, name);
10243 
10244 		if (shdr_idx >= SHN_LORESERVE) {
10245 			pr_warn("struct_ops reloc %s: rel->r_offset %zu shdr_idx %u unsupported non-static function\n",
10246 				map->name, (size_t)rel->r_offset, shdr_idx);
10247 			return -LIBBPF_ERRNO__RELOC;
10248 		}
10249 		if (sym->st_value % BPF_INSN_SZ) {
10250 			pr_warn("struct_ops reloc %s: invalid target program offset %llu\n",
10251 				map->name, (unsigned long long)sym->st_value);
10252 			return -LIBBPF_ERRNO__FORMAT;
10253 		}
10254 		insn_idx = sym->st_value / BPF_INSN_SZ;
10255 
10256 		type = btf__type_by_id(btf, st_ops->type_id);
10257 		member = find_member_by_offset(type, moff * 8);
10258 		if (!member) {
10259 			pr_warn("struct_ops reloc %s: cannot find member at moff %u\n",
10260 				map->name, moff);
10261 			return -EINVAL;
10262 		}
10263 		member_idx = member - btf_members(type);
10264 		name = btf__name_by_offset(btf, member->name_off);
10265 
10266 		if (!resolve_func_ptr(btf, member->type, NULL)) {
10267 			pr_warn("struct_ops reloc %s: cannot relocate non func ptr %s\n",
10268 				map->name, name);
10269 			return -EINVAL;
10270 		}
10271 
10272 		prog = find_prog_by_sec_insn(obj, shdr_idx, insn_idx);
10273 		if (!prog) {
10274 			pr_warn("struct_ops reloc %s: cannot find prog at shdr_idx %u to relocate func ptr %s\n",
10275 				map->name, shdr_idx, name);
10276 			return -EINVAL;
10277 		}
10278 
10279 		/* prevent the use of BPF prog with invalid type */
10280 		if (prog->type != BPF_PROG_TYPE_STRUCT_OPS) {
10281 			pr_warn("struct_ops reloc %s: prog %s is not struct_ops BPF program\n",
10282 				map->name, prog->name);
10283 			return -EINVAL;
10284 		}
10285 
10286 		st_ops->progs[member_idx] = prog;
10287 
10288 		/* st_ops->data will be exposed to users, being returned by
10289 		 * bpf_map__initial_value() as a pointer to the shadow
10290 		 * type. All function pointers in the original struct type
10291 		 * should be converted to a pointer to struct bpf_program
10292 		 * in the shadow type.
10293 		 */
10294 		*((struct bpf_program **)(st_ops->data + moff)) = prog;
10295 	}
10296 
10297 	return 0;
10298 }
10299 
10300 #define BTF_TRACE_PREFIX "btf_trace_"
10301 #define BTF_LSM_PREFIX "bpf_lsm_"
10302 #define BTF_ITER_PREFIX "bpf_iter_"
10303 #define BTF_MAX_NAME_SIZE 128
10304 
10305 void btf_get_kernel_prefix_kind(enum bpf_attach_type attach_type,
10306 				const char **prefix, int *kind)
10307 {
10308 	switch (attach_type) {
10309 	case BPF_TRACE_RAW_TP:
10310 		*prefix = BTF_TRACE_PREFIX;
10311 		*kind = BTF_KIND_TYPEDEF;
10312 		break;
10313 	case BPF_LSM_MAC:
10314 	case BPF_LSM_CGROUP:
10315 		*prefix = BTF_LSM_PREFIX;
10316 		*kind = BTF_KIND_FUNC;
10317 		break;
10318 	case BPF_TRACE_ITER:
10319 		*prefix = BTF_ITER_PREFIX;
10320 		*kind = BTF_KIND_FUNC;
10321 		break;
10322 	default:
10323 		*prefix = "";
10324 		*kind = BTF_KIND_FUNC;
10325 	}
10326 }
10327 
10328 static int find_btf_by_prefix_kind(const struct btf *btf, const char *prefix,
10329 				   const char *name, __u32 kind)
10330 {
10331 	char btf_type_name[BTF_MAX_NAME_SIZE];
10332 	int ret;
10333 
10334 	ret = snprintf(btf_type_name, sizeof(btf_type_name),
10335 		       "%s%s", prefix, name);
10336 	/* snprintf returns the number of characters written excluding the
10337 	 * terminating null. So, if >= BTF_MAX_NAME_SIZE are written, it
10338 	 * indicates truncation.
10339 	 */
10340 	if (ret < 0 || ret >= sizeof(btf_type_name))
10341 		return -ENAMETOOLONG;
10342 	return btf__find_by_name_kind(btf, btf_type_name, kind);
10343 }
10344 
10345 static inline int find_attach_btf_id(struct btf *btf, const char *name,
10346 				     enum bpf_attach_type attach_type)
10347 {
10348 	const char *prefix;
10349 	int kind;
10350 
10351 	btf_get_kernel_prefix_kind(attach_type, &prefix, &kind);
10352 	return find_btf_by_prefix_kind(btf, prefix, name, kind);
10353 }
10354 
10355 int libbpf_find_vmlinux_btf_id(const char *name,
10356 			       enum bpf_attach_type attach_type)
10357 {
10358 	struct btf *btf;
10359 	int err;
10360 
10361 	btf = btf__load_vmlinux_btf();
10362 	err = libbpf_get_error(btf);
10363 	if (err) {
10364 		pr_warn("vmlinux BTF is not found\n");
10365 		return libbpf_err(err);
10366 	}
10367 
10368 	err = find_attach_btf_id(btf, name, attach_type);
10369 	if (err <= 0)
10370 		pr_warn("%s is not found in vmlinux BTF\n", name);
10371 
10372 	btf__free(btf);
10373 	return libbpf_err(err);
10374 }
10375 
10376 static int libbpf_find_prog_btf_id(const char *name, __u32 attach_prog_fd, int token_fd)
10377 {
10378 	struct bpf_prog_info info;
10379 	__u32 info_len = sizeof(info);
10380 	struct btf *btf;
10381 	int err;
10382 
10383 	memset(&info, 0, info_len);
10384 	err = bpf_prog_get_info_by_fd(attach_prog_fd, &info, &info_len);
10385 	if (err) {
10386 		pr_warn("failed bpf_prog_get_info_by_fd for FD %d: %s\n",
10387 			attach_prog_fd, errstr(err));
10388 		return err;
10389 	}
10390 
10391 	err = -EINVAL;
10392 	if (!info.btf_id) {
10393 		pr_warn("The target program doesn't have BTF\n");
10394 		goto out;
10395 	}
10396 	btf = btf_load_from_kernel(info.btf_id, NULL, token_fd);
10397 	err = libbpf_get_error(btf);
10398 	if (err) {
10399 		pr_warn("Failed to get BTF %d of the program: %s\n", info.btf_id, errstr(err));
10400 		goto out;
10401 	}
10402 	err = btf__find_by_name_kind(btf, name, BTF_KIND_FUNC);
10403 	btf__free(btf);
10404 	if (err <= 0) {
10405 		pr_warn("%s is not found in prog's BTF\n", name);
10406 		goto out;
10407 	}
10408 out:
10409 	return err;
10410 }
10411 
10412 static int find_kernel_btf_id(struct bpf_object *obj, const char *attach_name,
10413 			      enum bpf_attach_type attach_type,
10414 			      int *btf_obj_fd, int *btf_type_id)
10415 {
10416 	int ret, i, mod_len = 0;
10417 	const char *fn_name, *mod_name = NULL;
10418 
10419 	fn_name = strchr(attach_name, ':');
10420 	if (fn_name) {
10421 		mod_name = attach_name;
10422 		mod_len = fn_name - mod_name;
10423 		fn_name++;
10424 	}
10425 
10426 	if (!mod_name || strncmp(mod_name, "vmlinux", mod_len) == 0) {
10427 		ret = find_attach_btf_id(obj->btf_vmlinux,
10428 					 mod_name ? fn_name : attach_name,
10429 					 attach_type);
10430 		if (ret > 0) {
10431 			*btf_obj_fd = 0; /* vmlinux BTF */
10432 			*btf_type_id = ret;
10433 			return 0;
10434 		}
10435 		if (ret != -ENOENT)
10436 			return ret;
10437 	}
10438 
10439 	ret = load_module_btfs(obj);
10440 	if (ret)
10441 		return ret;
10442 
10443 	for (i = 0; i < obj->btf_module_cnt; i++) {
10444 		const struct module_btf *mod = &obj->btf_modules[i];
10445 
10446 		if (mod_name && strncmp(mod->name, mod_name, mod_len) != 0)
10447 			continue;
10448 
10449 		ret = find_attach_btf_id(mod->btf,
10450 					 mod_name ? fn_name : attach_name,
10451 					 attach_type);
10452 		if (ret > 0) {
10453 			*btf_obj_fd = mod->fd;
10454 			*btf_type_id = ret;
10455 			return 0;
10456 		}
10457 		if (ret == -ENOENT)
10458 			continue;
10459 
10460 		return ret;
10461 	}
10462 
10463 	return -ESRCH;
10464 }
10465 
10466 static int libbpf_find_attach_btf_id(struct bpf_program *prog, const char *attach_name,
10467 				     int *btf_obj_fd, int *btf_type_id)
10468 {
10469 	enum bpf_attach_type attach_type = prog->expected_attach_type;
10470 	__u32 attach_prog_fd = prog->attach_prog_fd;
10471 	int err = 0;
10472 
10473 	/* BPF program's BTF ID */
10474 	if (prog->type == BPF_PROG_TYPE_EXT || attach_prog_fd) {
10475 		if (!attach_prog_fd) {
10476 			pr_warn("prog '%s': attach program FD is not set\n", prog->name);
10477 			return -EINVAL;
10478 		}
10479 		err = libbpf_find_prog_btf_id(attach_name, attach_prog_fd, prog->obj->token_fd);
10480 		if (err < 0) {
10481 			pr_warn("prog '%s': failed to find BPF program (FD %d) BTF ID for '%s': %s\n",
10482 				prog->name, attach_prog_fd, attach_name, errstr(err));
10483 			return err;
10484 		}
10485 		*btf_obj_fd = 0;
10486 		*btf_type_id = err;
10487 		return 0;
10488 	}
10489 
10490 	/* kernel/module BTF ID */
10491 	if (prog->obj->gen_loader) {
10492 		bpf_gen__record_attach_target(prog->obj->gen_loader, attach_name, attach_type);
10493 		*btf_obj_fd = 0;
10494 		*btf_type_id = 1;
10495 	} else {
10496 		err = find_kernel_btf_id(prog->obj, attach_name,
10497 					 attach_type, btf_obj_fd,
10498 					 btf_type_id);
10499 	}
10500 	if (err) {
10501 		pr_warn("prog '%s': failed to find kernel BTF type ID of '%s': %s\n",
10502 			prog->name, attach_name, errstr(err));
10503 		return err;
10504 	}
10505 	return 0;
10506 }
10507 
10508 int libbpf_attach_type_by_name(const char *name,
10509 			       enum bpf_attach_type *attach_type)
10510 {
10511 	char *type_names;
10512 	const struct bpf_sec_def *sec_def;
10513 
10514 	if (!name)
10515 		return libbpf_err(-EINVAL);
10516 
10517 	sec_def = find_sec_def(name);
10518 	if (!sec_def) {
10519 		pr_debug("failed to guess attach type based on ELF section name '%s'\n", name);
10520 		type_names = libbpf_get_type_names(true);
10521 		if (type_names != NULL) {
10522 			pr_debug("attachable section(type) names are:%s\n", type_names);
10523 			free(type_names);
10524 		}
10525 
10526 		return libbpf_err(-EINVAL);
10527 	}
10528 
10529 	if (sec_def->prog_prepare_load_fn != libbpf_prepare_prog_load)
10530 		return libbpf_err(-EINVAL);
10531 	if (!(sec_def->cookie & SEC_ATTACHABLE))
10532 		return libbpf_err(-EINVAL);
10533 
10534 	*attach_type = sec_def->expected_attach_type;
10535 	return 0;
10536 }
10537 
10538 int bpf_map__fd(const struct bpf_map *map)
10539 {
10540 	if (!map)
10541 		return libbpf_err(-EINVAL);
10542 	if (!map_is_created(map))
10543 		return -1;
10544 	return map->fd;
10545 }
10546 
10547 static bool map_uses_real_name(const struct bpf_map *map)
10548 {
10549 	/* Since libbpf started to support custom .data.* and .rodata.* maps,
10550 	 * their user-visible name differs from kernel-visible name. Users see
10551 	 * such map's corresponding ELF section name as a map name.
10552 	 * This check distinguishes .data/.rodata from .data.* and .rodata.*
10553 	 * maps to know which name has to be returned to the user.
10554 	 */
10555 	if (map->libbpf_type == LIBBPF_MAP_DATA && strcmp(map->real_name, DATA_SEC) != 0)
10556 		return true;
10557 	if (map->libbpf_type == LIBBPF_MAP_RODATA && strcmp(map->real_name, RODATA_SEC) != 0)
10558 		return true;
10559 	return false;
10560 }
10561 
10562 const char *bpf_map__name(const struct bpf_map *map)
10563 {
10564 	if (!map)
10565 		return NULL;
10566 
10567 	if (map_uses_real_name(map))
10568 		return map->real_name;
10569 
10570 	return map->name;
10571 }
10572 
10573 enum bpf_map_type bpf_map__type(const struct bpf_map *map)
10574 {
10575 	return map->def.type;
10576 }
10577 
10578 int bpf_map__set_type(struct bpf_map *map, enum bpf_map_type type)
10579 {
10580 	if (map_is_created(map))
10581 		return libbpf_err(-EBUSY);
10582 	map->def.type = type;
10583 	return 0;
10584 }
10585 
10586 __u32 bpf_map__map_flags(const struct bpf_map *map)
10587 {
10588 	return map->def.map_flags;
10589 }
10590 
10591 int bpf_map__set_map_flags(struct bpf_map *map, __u32 flags)
10592 {
10593 	if (map_is_created(map))
10594 		return libbpf_err(-EBUSY);
10595 	map->def.map_flags = flags;
10596 	return 0;
10597 }
10598 
10599 __u64 bpf_map__map_extra(const struct bpf_map *map)
10600 {
10601 	return map->map_extra;
10602 }
10603 
10604 int bpf_map__set_map_extra(struct bpf_map *map, __u64 map_extra)
10605 {
10606 	if (map_is_created(map))
10607 		return libbpf_err(-EBUSY);
10608 	map->map_extra = map_extra;
10609 	return 0;
10610 }
10611 
10612 __u32 bpf_map__numa_node(const struct bpf_map *map)
10613 {
10614 	return map->numa_node;
10615 }
10616 
10617 int bpf_map__set_numa_node(struct bpf_map *map, __u32 numa_node)
10618 {
10619 	if (map_is_created(map))
10620 		return libbpf_err(-EBUSY);
10621 	map->numa_node = numa_node;
10622 	return 0;
10623 }
10624 
10625 __u32 bpf_map__key_size(const struct bpf_map *map)
10626 {
10627 	return map->def.key_size;
10628 }
10629 
10630 int bpf_map__set_key_size(struct bpf_map *map, __u32 size)
10631 {
10632 	if (map_is_created(map))
10633 		return libbpf_err(-EBUSY);
10634 	map->def.key_size = size;
10635 	return 0;
10636 }
10637 
10638 __u32 bpf_map__value_size(const struct bpf_map *map)
10639 {
10640 	return map->def.value_size;
10641 }
10642 
10643 static int map_btf_datasec_resize(struct bpf_map *map, __u32 size)
10644 {
10645 	struct btf *btf;
10646 	struct btf_type *datasec_type, *var_type;
10647 	struct btf_var_secinfo *var;
10648 	const struct btf_type *array_type;
10649 	const struct btf_array *array;
10650 	int vlen, element_sz, new_array_id;
10651 	__u32 nr_elements;
10652 
10653 	/* check btf existence */
10654 	btf = bpf_object__btf(map->obj);
10655 	if (!btf)
10656 		return -ENOENT;
10657 
10658 	/* verify map is datasec */
10659 	datasec_type = btf_type_by_id(btf, bpf_map__btf_value_type_id(map));
10660 	if (!btf_is_datasec(datasec_type)) {
10661 		pr_warn("map '%s': cannot be resized, map value type is not a datasec\n",
10662 			bpf_map__name(map));
10663 		return -EINVAL;
10664 	}
10665 
10666 	/* verify datasec has at least one var */
10667 	vlen = btf_vlen(datasec_type);
10668 	if (vlen == 0) {
10669 		pr_warn("map '%s': cannot be resized, map value datasec is empty\n",
10670 			bpf_map__name(map));
10671 		return -EINVAL;
10672 	}
10673 
10674 	/* verify last var in the datasec is an array */
10675 	var = &btf_var_secinfos(datasec_type)[vlen - 1];
10676 	var_type = btf_type_by_id(btf, var->type);
10677 	array_type = skip_mods_and_typedefs(btf, var_type->type, NULL);
10678 	if (!btf_is_array(array_type)) {
10679 		pr_warn("map '%s': cannot be resized, last var must be an array\n",
10680 			bpf_map__name(map));
10681 		return -EINVAL;
10682 	}
10683 
10684 	/* verify request size aligns with array */
10685 	array = btf_array(array_type);
10686 	element_sz = btf__resolve_size(btf, array->type);
10687 	if (element_sz <= 0 || (size - var->offset) % element_sz != 0) {
10688 		pr_warn("map '%s': cannot be resized, element size (%d) doesn't align with new total size (%u)\n",
10689 			bpf_map__name(map), element_sz, size);
10690 		return -EINVAL;
10691 	}
10692 
10693 	/* create a new array based on the existing array, but with new length */
10694 	nr_elements = (size - var->offset) / element_sz;
10695 	new_array_id = btf__add_array(btf, array->index_type, array->type, nr_elements);
10696 	if (new_array_id < 0)
10697 		return new_array_id;
10698 
10699 	/* adding a new btf type invalidates existing pointers to btf objects,
10700 	 * so refresh pointers before proceeding
10701 	 */
10702 	datasec_type = btf_type_by_id(btf, map->btf_value_type_id);
10703 	var = &btf_var_secinfos(datasec_type)[vlen - 1];
10704 	var_type = btf_type_by_id(btf, var->type);
10705 
10706 	/* finally update btf info */
10707 	datasec_type->size = size;
10708 	var->size = size - var->offset;
10709 	var_type->type = new_array_id;
10710 
10711 	return 0;
10712 }
10713 
10714 int bpf_map__set_value_size(struct bpf_map *map, __u32 size)
10715 {
10716 	if (map_is_created(map))
10717 		return libbpf_err(-EBUSY);
10718 
10719 	if (map->mmaped) {
10720 		size_t mmap_old_sz, mmap_new_sz;
10721 		int err;
10722 
10723 		if (map->def.type != BPF_MAP_TYPE_ARRAY)
10724 			return libbpf_err(-EOPNOTSUPP);
10725 
10726 		mmap_old_sz = bpf_map_mmap_sz(map);
10727 		mmap_new_sz = array_map_mmap_sz(size, map->def.max_entries);
10728 		err = bpf_map_mmap_resize(map, mmap_old_sz, mmap_new_sz);
10729 		if (err) {
10730 			pr_warn("map '%s': failed to resize memory-mapped region: %s\n",
10731 				bpf_map__name(map), errstr(err));
10732 			return libbpf_err(err);
10733 		}
10734 		err = map_btf_datasec_resize(map, size);
10735 		if (err && err != -ENOENT) {
10736 			pr_warn("map '%s': failed to adjust resized BTF, clearing BTF key/value info: %s\n",
10737 				bpf_map__name(map), errstr(err));
10738 			map->btf_value_type_id = 0;
10739 			map->btf_key_type_id = 0;
10740 		}
10741 	}
10742 
10743 	map->def.value_size = size;
10744 	return 0;
10745 }
10746 
10747 __u32 bpf_map__btf_key_type_id(const struct bpf_map *map)
10748 {
10749 	return map ? map->btf_key_type_id : 0;
10750 }
10751 
10752 __u32 bpf_map__btf_value_type_id(const struct bpf_map *map)
10753 {
10754 	return map ? map->btf_value_type_id : 0;
10755 }
10756 
10757 int bpf_map__set_initial_value(struct bpf_map *map,
10758 			       const void *data, size_t size)
10759 {
10760 	size_t actual_sz;
10761 
10762 	if (map_is_created(map))
10763 		return libbpf_err(-EBUSY);
10764 
10765 	if (!map->mmaped || map->libbpf_type == LIBBPF_MAP_KCONFIG)
10766 		return libbpf_err(-EINVAL);
10767 
10768 	if (map->def.type == BPF_MAP_TYPE_ARENA)
10769 		actual_sz = map->obj->arena_data_sz;
10770 	else
10771 		actual_sz = map->def.value_size;
10772 	if (size != actual_sz)
10773 		return libbpf_err(-EINVAL);
10774 
10775 	memcpy(map->mmaped, data, size);
10776 	return 0;
10777 }
10778 
10779 void *bpf_map__initial_value(const struct bpf_map *map, size_t *psize)
10780 {
10781 	if (bpf_map__is_struct_ops(map)) {
10782 		if (psize)
10783 			*psize = map->def.value_size;
10784 		return map->st_ops->data;
10785 	}
10786 
10787 	if (!map->mmaped)
10788 		return NULL;
10789 
10790 	if (map->def.type == BPF_MAP_TYPE_ARENA)
10791 		*psize = map->obj->arena_data_sz;
10792 	else
10793 		*psize = map->def.value_size;
10794 
10795 	return map->mmaped;
10796 }
10797 
10798 bool bpf_map__is_internal(const struct bpf_map *map)
10799 {
10800 	return map->libbpf_type != LIBBPF_MAP_UNSPEC;
10801 }
10802 
10803 __u32 bpf_map__ifindex(const struct bpf_map *map)
10804 {
10805 	return map->map_ifindex;
10806 }
10807 
10808 int bpf_map__set_ifindex(struct bpf_map *map, __u32 ifindex)
10809 {
10810 	if (map_is_created(map))
10811 		return libbpf_err(-EBUSY);
10812 	map->map_ifindex = ifindex;
10813 	return 0;
10814 }
10815 
10816 int bpf_map__set_inner_map_fd(struct bpf_map *map, int fd)
10817 {
10818 	if (!bpf_map_type__is_map_in_map(map->def.type)) {
10819 		pr_warn("error: unsupported map type\n");
10820 		return libbpf_err(-EINVAL);
10821 	}
10822 	if (map->inner_map_fd != -1) {
10823 		pr_warn("error: inner_map_fd already specified\n");
10824 		return libbpf_err(-EINVAL);
10825 	}
10826 	if (map->inner_map) {
10827 		bpf_map__destroy(map->inner_map);
10828 		zfree(&map->inner_map);
10829 	}
10830 	map->inner_map_fd = fd;
10831 	return 0;
10832 }
10833 
10834 int bpf_map__set_exclusive_program(struct bpf_map *map, struct bpf_program *prog)
10835 {
10836 	if (map_is_created(map)) {
10837 		pr_warn("exclusive programs must be set before map creation\n");
10838 		return libbpf_err(-EINVAL);
10839 	}
10840 
10841 	if (map->obj != prog->obj) {
10842 		pr_warn("excl_prog and map must be from the same bpf object\n");
10843 		return libbpf_err(-EINVAL);
10844 	}
10845 
10846 	map->excl_prog = prog;
10847 	return 0;
10848 }
10849 
10850 struct bpf_program *bpf_map__exclusive_program(struct bpf_map *map)
10851 {
10852 	return map->excl_prog;
10853 }
10854 
10855 static struct bpf_map *
10856 __bpf_map__iter(const struct bpf_map *m, const struct bpf_object *obj, int i)
10857 {
10858 	ssize_t idx;
10859 	struct bpf_map *s, *e;
10860 
10861 	if (!obj || !obj->maps)
10862 		return errno = EINVAL, NULL;
10863 
10864 	s = obj->maps;
10865 	e = obj->maps + obj->nr_maps;
10866 
10867 	if ((m < s) || (m >= e)) {
10868 		pr_warn("error in %s: map handler doesn't belong to object\n",
10869 			 __func__);
10870 		return errno = EINVAL, NULL;
10871 	}
10872 
10873 	idx = (m - obj->maps) + i;
10874 	if (idx >= obj->nr_maps || idx < 0)
10875 		return NULL;
10876 	return &obj->maps[idx];
10877 }
10878 
10879 struct bpf_map *
10880 bpf_object__next_map(const struct bpf_object *obj, const struct bpf_map *prev)
10881 {
10882 	if (prev == NULL && obj != NULL)
10883 		return obj->maps;
10884 
10885 	return __bpf_map__iter(prev, obj, 1);
10886 }
10887 
10888 struct bpf_map *
10889 bpf_object__prev_map(const struct bpf_object *obj, const struct bpf_map *next)
10890 {
10891 	if (next == NULL && obj != NULL) {
10892 		if (!obj->nr_maps)
10893 			return NULL;
10894 		return obj->maps + obj->nr_maps - 1;
10895 	}
10896 
10897 	return __bpf_map__iter(next, obj, -1);
10898 }
10899 
10900 struct bpf_map *
10901 bpf_object__find_map_by_name(const struct bpf_object *obj, const char *name)
10902 {
10903 	struct bpf_map *pos;
10904 
10905 	bpf_object__for_each_map(pos, obj) {
10906 		/* if it's a special internal map name (which always starts
10907 		 * with dot) then check if that special name matches the
10908 		 * real map name (ELF section name)
10909 		 */
10910 		if (name[0] == '.') {
10911 			if (pos->real_name && strcmp(pos->real_name, name) == 0)
10912 				return pos;
10913 			continue;
10914 		}
10915 		/* otherwise map name has to be an exact match */
10916 		if (map_uses_real_name(pos)) {
10917 			if (strcmp(pos->real_name, name) == 0)
10918 				return pos;
10919 			continue;
10920 		}
10921 		if (strcmp(pos->name, name) == 0)
10922 			return pos;
10923 	}
10924 	return errno = ENOENT, NULL;
10925 }
10926 
10927 int
10928 bpf_object__find_map_fd_by_name(const struct bpf_object *obj, const char *name)
10929 {
10930 	return bpf_map__fd(bpf_object__find_map_by_name(obj, name));
10931 }
10932 
10933 static int validate_map_op(const struct bpf_map *map, size_t key_sz,
10934 			   size_t value_sz, bool check_value_sz, __u64 flags)
10935 {
10936 	if (!map_is_created(map)) /* map is not yet created */
10937 		return -ENOENT;
10938 
10939 	if (map->def.key_size != key_sz) {
10940 		pr_warn("map '%s': unexpected key size %zu provided, expected %u\n",
10941 			map->name, key_sz, map->def.key_size);
10942 		return -EINVAL;
10943 	}
10944 
10945 	if (map->fd < 0) {
10946 		pr_warn("map '%s': can't use BPF map without FD (was it created?)\n", map->name);
10947 		return -EINVAL;
10948 	}
10949 
10950 	if (!check_value_sz)
10951 		return 0;
10952 
10953 	switch (map->def.type) {
10954 	case BPF_MAP_TYPE_PERCPU_ARRAY:
10955 	case BPF_MAP_TYPE_PERCPU_HASH:
10956 	case BPF_MAP_TYPE_LRU_PERCPU_HASH:
10957 	case BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE: {
10958 		int num_cpu = libbpf_num_possible_cpus();
10959 		size_t elem_sz = roundup(map->def.value_size, 8);
10960 
10961 		if (flags & (BPF_F_CPU | BPF_F_ALL_CPUS)) {
10962 			if ((flags & BPF_F_CPU) && (flags & BPF_F_ALL_CPUS)) {
10963 				pr_warn("map '%s': BPF_F_CPU and BPF_F_ALL_CPUS are mutually exclusive\n",
10964 					map->name);
10965 				return -EINVAL;
10966 			}
10967 			if (map->def.value_size != value_sz) {
10968 				pr_warn("map '%s': unexpected value size %zu provided for either BPF_F_CPU or BPF_F_ALL_CPUS, expected %u\n",
10969 					map->name, value_sz, map->def.value_size);
10970 				return -EINVAL;
10971 			}
10972 			break;
10973 		}
10974 
10975 		if (value_sz != num_cpu * elem_sz) {
10976 			pr_warn("map '%s': unexpected value size %zu provided for per-CPU map, expected %d * %zu = %zd\n",
10977 				map->name, value_sz, num_cpu, elem_sz, num_cpu * elem_sz);
10978 			return -EINVAL;
10979 		}
10980 		break;
10981 	}
10982 	default:
10983 		if (map->def.value_size != value_sz) {
10984 			pr_warn("map '%s': unexpected value size %zu provided, expected %u\n",
10985 				map->name, value_sz, map->def.value_size);
10986 			return -EINVAL;
10987 		}
10988 		break;
10989 	}
10990 	return 0;
10991 }
10992 
10993 int bpf_map__lookup_elem(const struct bpf_map *map,
10994 			 const void *key, size_t key_sz,
10995 			 void *value, size_t value_sz, __u64 flags)
10996 {
10997 	int err;
10998 
10999 	err = validate_map_op(map, key_sz, value_sz, true, flags);
11000 	if (err)
11001 		return libbpf_err(err);
11002 
11003 	return bpf_map_lookup_elem_flags(map->fd, key, value, flags);
11004 }
11005 
11006 int bpf_map__update_elem(const struct bpf_map *map,
11007 			 const void *key, size_t key_sz,
11008 			 const void *value, size_t value_sz, __u64 flags)
11009 {
11010 	int err;
11011 
11012 	err = validate_map_op(map, key_sz, value_sz, true, flags);
11013 	if (err)
11014 		return libbpf_err(err);
11015 
11016 	return bpf_map_update_elem(map->fd, key, value, flags);
11017 }
11018 
11019 int bpf_map__delete_elem(const struct bpf_map *map,
11020 			 const void *key, size_t key_sz, __u64 flags)
11021 {
11022 	int err;
11023 
11024 	err = validate_map_op(map, key_sz, 0, false /* check_value_sz */, flags);
11025 	if (err)
11026 		return libbpf_err(err);
11027 
11028 	return bpf_map_delete_elem_flags(map->fd, key, flags);
11029 }
11030 
11031 int bpf_map__lookup_and_delete_elem(const struct bpf_map *map,
11032 				    const void *key, size_t key_sz,
11033 				    void *value, size_t value_sz, __u64 flags)
11034 {
11035 	int err;
11036 
11037 	err = validate_map_op(map, key_sz, value_sz, true, flags);
11038 	if (err)
11039 		return libbpf_err(err);
11040 
11041 	return bpf_map_lookup_and_delete_elem_flags(map->fd, key, value, flags);
11042 }
11043 
11044 int bpf_map__get_next_key(const struct bpf_map *map,
11045 			  const void *cur_key, void *next_key, size_t key_sz)
11046 {
11047 	int err;
11048 
11049 	err = validate_map_op(map, key_sz, 0, false /* check_value_sz */, 0);
11050 	if (err)
11051 		return libbpf_err(err);
11052 
11053 	return bpf_map_get_next_key(map->fd, cur_key, next_key);
11054 }
11055 
11056 long libbpf_get_error(const void *ptr)
11057 {
11058 	if (!IS_ERR_OR_NULL(ptr))
11059 		return 0;
11060 
11061 	if (IS_ERR(ptr))
11062 		errno = -PTR_ERR(ptr);
11063 
11064 	/* If ptr == NULL, then errno should be already set by the failing
11065 	 * API, because libbpf never returns NULL on success and it now always
11066 	 * sets errno on error. So no extra errno handling for ptr == NULL
11067 	 * case.
11068 	 */
11069 	return -errno;
11070 }
11071 
11072 /* Replace link's underlying BPF program with the new one */
11073 int bpf_link__update_program(struct bpf_link *link, struct bpf_program *prog)
11074 {
11075 	int ret;
11076 	int prog_fd = bpf_program__fd(prog);
11077 
11078 	if (prog_fd < 0) {
11079 		pr_warn("prog '%s': can't use BPF program without FD (was it loaded?)\n",
11080 			prog->name);
11081 		return libbpf_err(-EINVAL);
11082 	}
11083 
11084 	ret = bpf_link_update(bpf_link__fd(link), prog_fd, NULL);
11085 	return libbpf_err_errno(ret);
11086 }
11087 
11088 /* Release "ownership" of underlying BPF resource (typically, BPF program
11089  * attached to some BPF hook, e.g., tracepoint, kprobe, etc). Disconnected
11090  * link, when destructed through bpf_link__destroy() call won't attempt to
11091  * detach/unregisted that BPF resource. This is useful in situations where,
11092  * say, attached BPF program has to outlive userspace program that attached it
11093  * in the system. Depending on type of BPF program, though, there might be
11094  * additional steps (like pinning BPF program in BPF FS) necessary to ensure
11095  * exit of userspace program doesn't trigger automatic detachment and clean up
11096  * inside the kernel.
11097  */
11098 void bpf_link__disconnect(struct bpf_link *link)
11099 {
11100 	link->disconnected = true;
11101 }
11102 
11103 int bpf_link__destroy(struct bpf_link *link)
11104 {
11105 	int err = 0;
11106 
11107 	if (IS_ERR_OR_NULL(link))
11108 		return 0;
11109 
11110 	if (!link->disconnected && link->detach)
11111 		err = link->detach(link);
11112 	if (link->pin_path)
11113 		free(link->pin_path);
11114 	if (link->dealloc)
11115 		link->dealloc(link);
11116 	else
11117 		free(link);
11118 
11119 	return libbpf_err(err);
11120 }
11121 
11122 int bpf_link__fd(const struct bpf_link *link)
11123 {
11124 	return link->fd;
11125 }
11126 
11127 const char *bpf_link__pin_path(const struct bpf_link *link)
11128 {
11129 	return link->pin_path;
11130 }
11131 
11132 static int bpf_link__detach_fd(struct bpf_link *link)
11133 {
11134 	return libbpf_err_errno(close(link->fd));
11135 }
11136 
11137 struct bpf_link *bpf_link__open(const char *path)
11138 {
11139 	struct bpf_link *link;
11140 	int fd;
11141 
11142 	fd = bpf_obj_get(path);
11143 	if (fd < 0) {
11144 		fd = -errno;
11145 		pr_warn("failed to open link at %s: %d\n", path, fd);
11146 		return libbpf_err_ptr(fd);
11147 	}
11148 
11149 	link = calloc(1, sizeof(*link));
11150 	if (!link) {
11151 		close(fd);
11152 		return libbpf_err_ptr(-ENOMEM);
11153 	}
11154 	link->detach = &bpf_link__detach_fd;
11155 	link->fd = fd;
11156 
11157 	link->pin_path = strdup(path);
11158 	if (!link->pin_path) {
11159 		bpf_link__destroy(link);
11160 		return libbpf_err_ptr(-ENOMEM);
11161 	}
11162 
11163 	return link;
11164 }
11165 
11166 int bpf_link__detach(struct bpf_link *link)
11167 {
11168 	return bpf_link_detach(link->fd) ? -errno : 0;
11169 }
11170 
11171 int bpf_link__pin(struct bpf_link *link, const char *path)
11172 {
11173 	int err;
11174 
11175 	if (link->pin_path)
11176 		return libbpf_err(-EBUSY);
11177 	err = make_parent_dir(path);
11178 	if (err)
11179 		return libbpf_err(err);
11180 	err = check_path(path);
11181 	if (err)
11182 		return libbpf_err(err);
11183 
11184 	link->pin_path = strdup(path);
11185 	if (!link->pin_path)
11186 		return libbpf_err(-ENOMEM);
11187 
11188 	if (bpf_obj_pin(link->fd, link->pin_path)) {
11189 		err = -errno;
11190 		zfree(&link->pin_path);
11191 		return libbpf_err(err);
11192 	}
11193 
11194 	pr_debug("link fd=%d: pinned at %s\n", link->fd, link->pin_path);
11195 	return 0;
11196 }
11197 
11198 int bpf_link__unpin(struct bpf_link *link)
11199 {
11200 	int err;
11201 
11202 	if (!link->pin_path)
11203 		return libbpf_err(-EINVAL);
11204 
11205 	err = unlink(link->pin_path);
11206 	if (err != 0)
11207 		return -errno;
11208 
11209 	pr_debug("link fd=%d: unpinned from %s\n", link->fd, link->pin_path);
11210 	zfree(&link->pin_path);
11211 	return 0;
11212 }
11213 
11214 struct bpf_link_perf {
11215 	struct bpf_link link;
11216 	int perf_event_fd;
11217 	/* legacy kprobe support: keep track of probe identifier and type */
11218 	char *legacy_probe_name;
11219 	bool legacy_is_kprobe;
11220 	bool legacy_is_retprobe;
11221 };
11222 
11223 static int remove_kprobe_event_legacy(const char *probe_name, bool retprobe);
11224 static int remove_uprobe_event_legacy(const char *probe_name, bool retprobe);
11225 
11226 static int bpf_link_perf_detach(struct bpf_link *link)
11227 {
11228 	struct bpf_link_perf *perf_link = container_of(link, struct bpf_link_perf, link);
11229 	int err = 0;
11230 
11231 	if (ioctl(perf_link->perf_event_fd, PERF_EVENT_IOC_DISABLE, 0) < 0)
11232 		err = -errno;
11233 
11234 	if (perf_link->perf_event_fd != link->fd)
11235 		close(perf_link->perf_event_fd);
11236 	close(link->fd);
11237 
11238 	/* legacy uprobe/kprobe needs to be removed after perf event fd closure */
11239 	if (perf_link->legacy_probe_name) {
11240 		if (perf_link->legacy_is_kprobe) {
11241 			err = remove_kprobe_event_legacy(perf_link->legacy_probe_name,
11242 							 perf_link->legacy_is_retprobe);
11243 		} else {
11244 			err = remove_uprobe_event_legacy(perf_link->legacy_probe_name,
11245 							 perf_link->legacy_is_retprobe);
11246 		}
11247 	}
11248 
11249 	return err;
11250 }
11251 
11252 static void bpf_link_perf_dealloc(struct bpf_link *link)
11253 {
11254 	struct bpf_link_perf *perf_link = container_of(link, struct bpf_link_perf, link);
11255 
11256 	free(perf_link->legacy_probe_name);
11257 	free(perf_link);
11258 }
11259 
11260 struct bpf_link *bpf_program__attach_perf_event_opts(const struct bpf_program *prog, int pfd,
11261 						     const struct bpf_perf_event_opts *opts)
11262 {
11263 	struct bpf_link_perf *link;
11264 	int prog_fd, link_fd = -1, err;
11265 	bool force_ioctl_attach;
11266 
11267 	if (!OPTS_VALID(opts, bpf_perf_event_opts))
11268 		return libbpf_err_ptr(-EINVAL);
11269 
11270 	if (pfd < 0) {
11271 		pr_warn("prog '%s': invalid perf event FD %d\n",
11272 			prog->name, pfd);
11273 		return libbpf_err_ptr(-EINVAL);
11274 	}
11275 	prog_fd = bpf_program__fd(prog);
11276 	if (prog_fd < 0) {
11277 		pr_warn("prog '%s': can't attach BPF program without FD (was it loaded?)\n",
11278 			prog->name);
11279 		return libbpf_err_ptr(-EINVAL);
11280 	}
11281 
11282 	link = calloc(1, sizeof(*link));
11283 	if (!link)
11284 		return libbpf_err_ptr(-ENOMEM);
11285 	link->link.detach = &bpf_link_perf_detach;
11286 	link->link.dealloc = &bpf_link_perf_dealloc;
11287 	link->perf_event_fd = pfd;
11288 
11289 	force_ioctl_attach = OPTS_GET(opts, force_ioctl_attach, false);
11290 	if (kernel_supports(prog->obj, FEAT_PERF_LINK) && !force_ioctl_attach) {
11291 		DECLARE_LIBBPF_OPTS(bpf_link_create_opts, link_opts,
11292 			.perf_event.bpf_cookie = OPTS_GET(opts, bpf_cookie, 0));
11293 
11294 		link_fd = bpf_link_create(prog_fd, pfd, BPF_PERF_EVENT, &link_opts);
11295 		if (link_fd < 0) {
11296 			err = -errno;
11297 			pr_warn("prog '%s': failed to create BPF link for perf_event FD %d: %s\n",
11298 				prog->name, pfd, errstr(err));
11299 			goto err_out;
11300 		}
11301 		link->link.fd = link_fd;
11302 	} else {
11303 		if (OPTS_GET(opts, bpf_cookie, 0)) {
11304 			pr_warn("prog '%s': user context value is not supported\n", prog->name);
11305 			err = -EOPNOTSUPP;
11306 			goto err_out;
11307 		}
11308 
11309 		if (ioctl(pfd, PERF_EVENT_IOC_SET_BPF, prog_fd) < 0) {
11310 			err = -errno;
11311 			pr_warn("prog '%s': failed to attach to perf_event FD %d: %s\n",
11312 				prog->name, pfd, errstr(err));
11313 			if (err == -EPROTO)
11314 				pr_warn("prog '%s': try add PERF_SAMPLE_CALLCHAIN to or remove exclude_callchain_[kernel|user] from pfd %d\n",
11315 					prog->name, pfd);
11316 			goto err_out;
11317 		}
11318 		link->link.fd = pfd;
11319 	}
11320 
11321 	if (!OPTS_GET(opts, dont_enable, false)) {
11322 		if (ioctl(pfd, PERF_EVENT_IOC_ENABLE, 0) < 0) {
11323 			err = -errno;
11324 			pr_warn("prog '%s': failed to enable perf_event FD %d: %s\n",
11325 				prog->name, pfd, errstr(err));
11326 			goto err_out;
11327 		}
11328 	}
11329 
11330 	return &link->link;
11331 err_out:
11332 	if (link_fd >= 0)
11333 		close(link_fd);
11334 	free(link);
11335 	return libbpf_err_ptr(err);
11336 }
11337 
11338 struct bpf_link *bpf_program__attach_perf_event(const struct bpf_program *prog, int pfd)
11339 {
11340 	return bpf_program__attach_perf_event_opts(prog, pfd, NULL);
11341 }
11342 
11343 /*
11344  * this function is expected to parse integer in the range of [0, 2^31-1] from
11345  * given file using scanf format string fmt. If actual parsed value is
11346  * negative, the result might be indistinguishable from error
11347  */
11348 static int parse_uint_from_file(const char *file, const char *fmt)
11349 {
11350 	int err, ret;
11351 	FILE *f;
11352 
11353 	f = fopen(file, "re");
11354 	if (!f) {
11355 		err = -errno;
11356 		pr_debug("failed to open '%s': %s\n", file, errstr(err));
11357 		return err;
11358 	}
11359 	err = fscanf(f, fmt, &ret);
11360 	if (err != 1) {
11361 		err = err == EOF ? -EIO : -errno;
11362 		pr_debug("failed to parse '%s': %s\n", file, errstr(err));
11363 		fclose(f);
11364 		return err;
11365 	}
11366 	fclose(f);
11367 	return ret;
11368 }
11369 
11370 static int determine_kprobe_perf_type(void)
11371 {
11372 	const char *file = "/sys/bus/event_source/devices/kprobe/type";
11373 
11374 	return parse_uint_from_file(file, "%d\n");
11375 }
11376 
11377 static int determine_uprobe_perf_type(void)
11378 {
11379 	const char *file = "/sys/bus/event_source/devices/uprobe/type";
11380 
11381 	return parse_uint_from_file(file, "%d\n");
11382 }
11383 
11384 static int determine_kprobe_retprobe_bit(void)
11385 {
11386 	const char *file = "/sys/bus/event_source/devices/kprobe/format/retprobe";
11387 
11388 	return parse_uint_from_file(file, "config:%d\n");
11389 }
11390 
11391 static int determine_uprobe_retprobe_bit(void)
11392 {
11393 	const char *file = "/sys/bus/event_source/devices/uprobe/format/retprobe";
11394 
11395 	return parse_uint_from_file(file, "config:%d\n");
11396 }
11397 
11398 #define PERF_UPROBE_REF_CTR_OFFSET_BITS 32
11399 #define PERF_UPROBE_REF_CTR_OFFSET_SHIFT 32
11400 
11401 static int perf_event_open_probe(bool uprobe, bool retprobe, const char *name,
11402 				 uint64_t offset, int pid, size_t ref_ctr_off)
11403 {
11404 	const size_t attr_sz = sizeof(struct perf_event_attr);
11405 	struct perf_event_attr attr;
11406 	int type, pfd;
11407 
11408 	if ((__u64)ref_ctr_off >= (1ULL << PERF_UPROBE_REF_CTR_OFFSET_BITS))
11409 		return -EINVAL;
11410 
11411 	memset(&attr, 0, attr_sz);
11412 
11413 	type = uprobe ? determine_uprobe_perf_type()
11414 		      : determine_kprobe_perf_type();
11415 	if (type < 0) {
11416 		pr_warn("failed to determine %s perf type: %s\n",
11417 			uprobe ? "uprobe" : "kprobe",
11418 			errstr(type));
11419 		return type;
11420 	}
11421 	if (retprobe) {
11422 		int bit = uprobe ? determine_uprobe_retprobe_bit()
11423 				 : determine_kprobe_retprobe_bit();
11424 
11425 		if (bit < 0) {
11426 			pr_warn("failed to determine %s retprobe bit: %s\n",
11427 				uprobe ? "uprobe" : "kprobe",
11428 				errstr(bit));
11429 			return bit;
11430 		}
11431 		attr.config |= 1 << bit;
11432 	}
11433 	attr.size = attr_sz;
11434 	attr.type = type;
11435 	attr.config |= (__u64)ref_ctr_off << PERF_UPROBE_REF_CTR_OFFSET_SHIFT;
11436 	attr.config1 = ptr_to_u64(name); /* kprobe_func or uprobe_path */
11437 	attr.config2 = offset;		 /* kprobe_addr or probe_offset */
11438 
11439 	/* pid filter is meaningful only for uprobes */
11440 	pfd = syscall(__NR_perf_event_open, &attr,
11441 		      pid < 0 ? -1 : pid /* pid */,
11442 		      pid == -1 ? 0 : -1 /* cpu */,
11443 		      -1 /* group_fd */, PERF_FLAG_FD_CLOEXEC);
11444 	return pfd >= 0 ? pfd : -errno;
11445 }
11446 
11447 static int append_to_file(const char *file, const char *fmt, ...)
11448 {
11449 	int fd, n, err = 0;
11450 	va_list ap;
11451 	char buf[1024];
11452 
11453 	va_start(ap, fmt);
11454 	n = vsnprintf(buf, sizeof(buf), fmt, ap);
11455 	va_end(ap);
11456 
11457 	if (n < 0 || n >= sizeof(buf))
11458 		return -EINVAL;
11459 
11460 	fd = open(file, O_WRONLY | O_APPEND | O_CLOEXEC, 0);
11461 	if (fd < 0)
11462 		return -errno;
11463 
11464 	if (write(fd, buf, n) < 0)
11465 		err = -errno;
11466 
11467 	close(fd);
11468 	return err;
11469 }
11470 
11471 #define DEBUGFS "/sys/kernel/debug/tracing"
11472 #define TRACEFS "/sys/kernel/tracing"
11473 
11474 static bool use_debugfs(void)
11475 {
11476 	static int has_debugfs = -1;
11477 
11478 	if (has_debugfs < 0)
11479 		has_debugfs = faccessat(AT_FDCWD, DEBUGFS, F_OK, AT_EACCESS) == 0;
11480 
11481 	return has_debugfs == 1;
11482 }
11483 
11484 static const char *tracefs_path(void)
11485 {
11486 	return use_debugfs() ? DEBUGFS : TRACEFS;
11487 }
11488 
11489 static const char *tracefs_kprobe_events(void)
11490 {
11491 	return use_debugfs() ? DEBUGFS"/kprobe_events" : TRACEFS"/kprobe_events";
11492 }
11493 
11494 static const char *tracefs_uprobe_events(void)
11495 {
11496 	return use_debugfs() ? DEBUGFS"/uprobe_events" : TRACEFS"/uprobe_events";
11497 }
11498 
11499 static const char *tracefs_available_filter_functions(void)
11500 {
11501 	return use_debugfs() ? DEBUGFS"/available_filter_functions"
11502 			     : TRACEFS"/available_filter_functions";
11503 }
11504 
11505 static const char *tracefs_available_filter_functions_addrs(void)
11506 {
11507 	return use_debugfs() ? DEBUGFS"/available_filter_functions_addrs"
11508 			     : TRACEFS"/available_filter_functions_addrs";
11509 }
11510 
11511 static void gen_probe_legacy_event_name(char *buf, size_t buf_sz,
11512 					const char *name, size_t offset)
11513 {
11514 	static int index = 0;
11515 	int i;
11516 
11517 	snprintf(buf, buf_sz, "libbpf_%u_%d_%s_0x%zx", getpid(),
11518 		 __sync_fetch_and_add(&index, 1), name, offset);
11519 
11520 	/* sanitize name in the probe name */
11521 	for (i = 0; buf[i]; i++) {
11522 		if (!isalnum(buf[i]))
11523 			buf[i] = '_';
11524 	}
11525 }
11526 
11527 static int add_kprobe_event_legacy(const char *probe_name, bool retprobe,
11528 				   const char *kfunc_name, size_t offset)
11529 {
11530 	return append_to_file(tracefs_kprobe_events(), "%c:%s/%s %s+0x%zx",
11531 			      retprobe ? 'r' : 'p',
11532 			      retprobe ? "kretprobes" : "kprobes",
11533 			      probe_name, kfunc_name, offset);
11534 }
11535 
11536 static int remove_kprobe_event_legacy(const char *probe_name, bool retprobe)
11537 {
11538 	return append_to_file(tracefs_kprobe_events(), "-:%s/%s",
11539 			      retprobe ? "kretprobes" : "kprobes", probe_name);
11540 }
11541 
11542 static int determine_kprobe_perf_type_legacy(const char *probe_name, bool retprobe)
11543 {
11544 	char file[256];
11545 
11546 	snprintf(file, sizeof(file), "%s/events/%s/%s/id",
11547 		 tracefs_path(), retprobe ? "kretprobes" : "kprobes", probe_name);
11548 
11549 	return parse_uint_from_file(file, "%d\n");
11550 }
11551 
11552 static int perf_event_kprobe_open_legacy(const char *probe_name, bool retprobe,
11553 					 const char *kfunc_name, size_t offset, int pid)
11554 {
11555 	const size_t attr_sz = sizeof(struct perf_event_attr);
11556 	struct perf_event_attr attr;
11557 	int type, pfd, err;
11558 
11559 	err = add_kprobe_event_legacy(probe_name, retprobe, kfunc_name, offset);
11560 	if (err < 0) {
11561 		pr_warn("failed to add legacy kprobe event for '%s+0x%zx': %s\n",
11562 			kfunc_name, offset,
11563 			errstr(err));
11564 		return err;
11565 	}
11566 	type = determine_kprobe_perf_type_legacy(probe_name, retprobe);
11567 	if (type < 0) {
11568 		err = type;
11569 		pr_warn("failed to determine legacy kprobe event id for '%s+0x%zx': %s\n",
11570 			kfunc_name, offset,
11571 			errstr(err));
11572 		goto err_clean_legacy;
11573 	}
11574 
11575 	memset(&attr, 0, attr_sz);
11576 	attr.size = attr_sz;
11577 	attr.config = type;
11578 	attr.type = PERF_TYPE_TRACEPOINT;
11579 
11580 	pfd = syscall(__NR_perf_event_open, &attr,
11581 		      pid < 0 ? -1 : pid, /* pid */
11582 		      pid == -1 ? 0 : -1, /* cpu */
11583 		      -1 /* group_fd */,  PERF_FLAG_FD_CLOEXEC);
11584 	if (pfd < 0) {
11585 		err = -errno;
11586 		pr_warn("legacy kprobe perf_event_open() failed: %s\n",
11587 			errstr(err));
11588 		goto err_clean_legacy;
11589 	}
11590 	return pfd;
11591 
11592 err_clean_legacy:
11593 	/* Clear the newly added legacy kprobe_event */
11594 	remove_kprobe_event_legacy(probe_name, retprobe);
11595 	return err;
11596 }
11597 
11598 static const char *arch_specific_syscall_pfx(void)
11599 {
11600 #if defined(__x86_64__)
11601 	return "x64";
11602 #elif defined(__i386__)
11603 	return "ia32";
11604 #elif defined(__s390x__)
11605 	return "s390x";
11606 #elif defined(__arm__)
11607 	return "arm";
11608 #elif defined(__aarch64__)
11609 	return "arm64";
11610 #elif defined(__mips__)
11611 	return "mips";
11612 #elif defined(__riscv)
11613 	return "riscv";
11614 #elif defined(__powerpc__)
11615 	return "powerpc";
11616 #elif defined(__powerpc64__)
11617 	return "powerpc64";
11618 #else
11619 	return NULL;
11620 #endif
11621 }
11622 
11623 int probe_kern_syscall_wrapper(int token_fd)
11624 {
11625 	char syscall_name[64];
11626 	const char *ksys_pfx;
11627 
11628 	ksys_pfx = arch_specific_syscall_pfx();
11629 	if (!ksys_pfx)
11630 		return 0;
11631 
11632 	snprintf(syscall_name, sizeof(syscall_name), "__%s_sys_bpf", ksys_pfx);
11633 
11634 	if (determine_kprobe_perf_type() >= 0) {
11635 		int pfd;
11636 
11637 		pfd = perf_event_open_probe(false, false, syscall_name, 0, getpid(), 0);
11638 		if (pfd >= 0)
11639 			close(pfd);
11640 
11641 		return pfd >= 0 ? 1 : 0;
11642 	} else { /* legacy mode */
11643 		char probe_name[MAX_EVENT_NAME_LEN];
11644 
11645 		gen_probe_legacy_event_name(probe_name, sizeof(probe_name), syscall_name, 0);
11646 		if (add_kprobe_event_legacy(probe_name, false, syscall_name, 0) < 0)
11647 			return 0;
11648 
11649 		(void)remove_kprobe_event_legacy(probe_name, false);
11650 		return 1;
11651 	}
11652 }
11653 
11654 struct bpf_link *
11655 bpf_program__attach_kprobe_opts(const struct bpf_program *prog,
11656 				const char *func_name,
11657 				const struct bpf_kprobe_opts *opts)
11658 {
11659 	DECLARE_LIBBPF_OPTS(bpf_perf_event_opts, pe_opts);
11660 	enum probe_attach_mode attach_mode;
11661 	char *legacy_probe = NULL;
11662 	struct bpf_link *link;
11663 	size_t offset;
11664 	bool retprobe, legacy;
11665 	int pfd, err;
11666 
11667 	if (!OPTS_VALID(opts, bpf_kprobe_opts))
11668 		return libbpf_err_ptr(-EINVAL);
11669 
11670 	attach_mode = OPTS_GET(opts, attach_mode, PROBE_ATTACH_MODE_DEFAULT);
11671 	retprobe = OPTS_GET(opts, retprobe, false);
11672 	offset = OPTS_GET(opts, offset, 0);
11673 	pe_opts.bpf_cookie = OPTS_GET(opts, bpf_cookie, 0);
11674 
11675 	legacy = determine_kprobe_perf_type() < 0;
11676 	switch (attach_mode) {
11677 	case PROBE_ATTACH_MODE_LEGACY:
11678 		legacy = true;
11679 		pe_opts.force_ioctl_attach = true;
11680 		break;
11681 	case PROBE_ATTACH_MODE_PERF:
11682 		if (legacy)
11683 			return libbpf_err_ptr(-ENOTSUP);
11684 		pe_opts.force_ioctl_attach = true;
11685 		break;
11686 	case PROBE_ATTACH_MODE_LINK:
11687 		if (legacy || !kernel_supports(prog->obj, FEAT_PERF_LINK))
11688 			return libbpf_err_ptr(-ENOTSUP);
11689 		break;
11690 	case PROBE_ATTACH_MODE_DEFAULT:
11691 		break;
11692 	default:
11693 		return libbpf_err_ptr(-EINVAL);
11694 	}
11695 
11696 	if (!legacy) {
11697 		pfd = perf_event_open_probe(false /* uprobe */, retprobe,
11698 					    func_name, offset,
11699 					    -1 /* pid */, 0 /* ref_ctr_off */);
11700 	} else {
11701 		char probe_name[MAX_EVENT_NAME_LEN];
11702 
11703 		gen_probe_legacy_event_name(probe_name, sizeof(probe_name),
11704 					    func_name, offset);
11705 
11706 		legacy_probe = strdup(probe_name);
11707 		if (!legacy_probe)
11708 			return libbpf_err_ptr(-ENOMEM);
11709 
11710 		pfd = perf_event_kprobe_open_legacy(legacy_probe, retprobe, func_name,
11711 						    offset, -1 /* pid */);
11712 	}
11713 	if (pfd < 0) {
11714 		err = -errno;
11715 		pr_warn("prog '%s': failed to create %s '%s+0x%zx' perf event: %s\n",
11716 			prog->name, retprobe ? "kretprobe" : "kprobe",
11717 			func_name, offset,
11718 			errstr(err));
11719 		goto err_out;
11720 	}
11721 	link = bpf_program__attach_perf_event_opts(prog, pfd, &pe_opts);
11722 	err = libbpf_get_error(link);
11723 	if (err) {
11724 		close(pfd);
11725 		pr_warn("prog '%s': failed to attach to %s '%s+0x%zx': %s\n",
11726 			prog->name, retprobe ? "kretprobe" : "kprobe",
11727 			func_name, offset,
11728 			errstr(err));
11729 		goto err_clean_legacy;
11730 	}
11731 	if (legacy) {
11732 		struct bpf_link_perf *perf_link = container_of(link, struct bpf_link_perf, link);
11733 
11734 		perf_link->legacy_probe_name = legacy_probe;
11735 		perf_link->legacy_is_kprobe = true;
11736 		perf_link->legacy_is_retprobe = retprobe;
11737 	}
11738 
11739 	return link;
11740 
11741 err_clean_legacy:
11742 	if (legacy)
11743 		remove_kprobe_event_legacy(legacy_probe, retprobe);
11744 err_out:
11745 	free(legacy_probe);
11746 	return libbpf_err_ptr(err);
11747 }
11748 
11749 struct bpf_link *bpf_program__attach_kprobe(const struct bpf_program *prog,
11750 					    bool retprobe,
11751 					    const char *func_name)
11752 {
11753 	DECLARE_LIBBPF_OPTS(bpf_kprobe_opts, opts,
11754 		.retprobe = retprobe,
11755 	);
11756 
11757 	return bpf_program__attach_kprobe_opts(prog, func_name, &opts);
11758 }
11759 
11760 struct bpf_link *bpf_program__attach_ksyscall(const struct bpf_program *prog,
11761 					      const char *syscall_name,
11762 					      const struct bpf_ksyscall_opts *opts)
11763 {
11764 	LIBBPF_OPTS(bpf_kprobe_opts, kprobe_opts);
11765 	char func_name[128];
11766 
11767 	if (!OPTS_VALID(opts, bpf_ksyscall_opts))
11768 		return libbpf_err_ptr(-EINVAL);
11769 
11770 	if (kernel_supports(prog->obj, FEAT_SYSCALL_WRAPPER)) {
11771 		/* arch_specific_syscall_pfx() should never return NULL here
11772 		 * because it is guarded by kernel_supports(). However, since
11773 		 * compiler does not know that we have an explicit conditional
11774 		 * as well.
11775 		 */
11776 		snprintf(func_name, sizeof(func_name), "__%s_sys_%s",
11777 			 arch_specific_syscall_pfx() ? : "", syscall_name);
11778 	} else {
11779 		snprintf(func_name, sizeof(func_name), "__se_sys_%s", syscall_name);
11780 	}
11781 
11782 	kprobe_opts.retprobe = OPTS_GET(opts, retprobe, false);
11783 	kprobe_opts.bpf_cookie = OPTS_GET(opts, bpf_cookie, 0);
11784 
11785 	return bpf_program__attach_kprobe_opts(prog, func_name, &kprobe_opts);
11786 }
11787 
11788 /* Adapted from perf/util/string.c */
11789 bool glob_match(const char *str, const char *pat)
11790 {
11791 	while (*str && *pat && *pat != '*') {
11792 		if (*pat == '?') {      /* Matches any single character */
11793 			str++;
11794 			pat++;
11795 			continue;
11796 		}
11797 		if (*str != *pat)
11798 			return false;
11799 		str++;
11800 		pat++;
11801 	}
11802 	/* Check wild card */
11803 	if (*pat == '*') {
11804 		while (*pat == '*')
11805 			pat++;
11806 		if (!*pat) /* Tail wild card matches all */
11807 			return true;
11808 		while (*str)
11809 			if (glob_match(str++, pat))
11810 				return true;
11811 	}
11812 	return !*str && !*pat;
11813 }
11814 
11815 struct kprobe_multi_resolve {
11816 	const char *pattern;
11817 	unsigned long *addrs;
11818 	size_t cap;
11819 	size_t cnt;
11820 };
11821 
11822 struct avail_kallsyms_data {
11823 	char **syms;
11824 	size_t cnt;
11825 	struct kprobe_multi_resolve *res;
11826 };
11827 
11828 static int avail_func_cmp(const void *a, const void *b)
11829 {
11830 	return strcmp(*(const char **)a, *(const char **)b);
11831 }
11832 
11833 static int avail_kallsyms_cb(unsigned long long sym_addr, char sym_type,
11834 			     const char *sym_name, void *ctx)
11835 {
11836 	struct avail_kallsyms_data *data = ctx;
11837 	struct kprobe_multi_resolve *res = data->res;
11838 	int err;
11839 
11840 	if (!glob_match(sym_name, res->pattern))
11841 		return 0;
11842 
11843 	if (!bsearch(&sym_name, data->syms, data->cnt, sizeof(*data->syms), avail_func_cmp)) {
11844 		/* Some versions of kernel strip out .llvm.<hash> suffix from
11845 		 * function names reported in available_filter_functions, but
11846 		 * don't do so for kallsyms. While this is clearly a kernel
11847 		 * bug (fixed by [0]) we try to accommodate that in libbpf to
11848 		 * make multi-kprobe usability a bit better: if no match is
11849 		 * found, we will strip .llvm. suffix and try one more time.
11850 		 *
11851 		 *   [0] fb6a421fb615 ("kallsyms: Match symbols exactly with CONFIG_LTO_CLANG")
11852 		 */
11853 		char sym_trim[256], *psym_trim = sym_trim;
11854 		const char *sym_sfx;
11855 
11856 		if (!(sym_sfx = strstr(sym_name, ".llvm.")))
11857 			return 0;
11858 
11859 		/* psym_trim vs sym_trim dance is done to avoid pointer vs array
11860 		 * coercion differences and get proper `const char **` pointer
11861 		 * which avail_func_cmp() expects
11862 		 */
11863 		snprintf(sym_trim, sizeof(sym_trim), "%.*s", (int)(sym_sfx - sym_name), sym_name);
11864 		if (!bsearch(&psym_trim, data->syms, data->cnt, sizeof(*data->syms), avail_func_cmp))
11865 			return 0;
11866 	}
11867 
11868 	err = libbpf_ensure_mem((void **)&res->addrs, &res->cap, sizeof(*res->addrs), res->cnt + 1);
11869 	if (err)
11870 		return err;
11871 
11872 	res->addrs[res->cnt++] = (unsigned long)sym_addr;
11873 	return 0;
11874 }
11875 
11876 static int libbpf_available_kallsyms_parse(struct kprobe_multi_resolve *res)
11877 {
11878 	const char *available_functions_file = tracefs_available_filter_functions();
11879 	struct avail_kallsyms_data data;
11880 	char sym_name[500];
11881 	FILE *f;
11882 	int err = 0, ret, i;
11883 	char **syms = NULL;
11884 	size_t cap = 0, cnt = 0;
11885 
11886 	f = fopen(available_functions_file, "re");
11887 	if (!f) {
11888 		err = -errno;
11889 		pr_warn("failed to open %s: %s\n", available_functions_file, errstr(err));
11890 		return err;
11891 	}
11892 
11893 	while (true) {
11894 		char *name;
11895 
11896 		ret = fscanf(f, "%499s%*[^\n]\n", sym_name);
11897 		if (ret == EOF && feof(f))
11898 			break;
11899 
11900 		if (ret != 1) {
11901 			pr_warn("failed to parse available_filter_functions entry: %d\n", ret);
11902 			err = -EINVAL;
11903 			goto cleanup;
11904 		}
11905 
11906 		if (!glob_match(sym_name, res->pattern))
11907 			continue;
11908 
11909 		err = libbpf_ensure_mem((void **)&syms, &cap, sizeof(*syms), cnt + 1);
11910 		if (err)
11911 			goto cleanup;
11912 
11913 		name = strdup(sym_name);
11914 		if (!name) {
11915 			err = -errno;
11916 			goto cleanup;
11917 		}
11918 
11919 		syms[cnt++] = name;
11920 	}
11921 
11922 	/* no entries found, bail out */
11923 	if (cnt == 0) {
11924 		err = -ENOENT;
11925 		goto cleanup;
11926 	}
11927 
11928 	/* sort available functions */
11929 	qsort(syms, cnt, sizeof(*syms), avail_func_cmp);
11930 
11931 	data.syms = syms;
11932 	data.res = res;
11933 	data.cnt = cnt;
11934 	libbpf_kallsyms_parse(avail_kallsyms_cb, &data);
11935 
11936 	if (res->cnt == 0)
11937 		err = -ENOENT;
11938 
11939 cleanup:
11940 	for (i = 0; i < cnt; i++)
11941 		free((char *)syms[i]);
11942 	free(syms);
11943 
11944 	fclose(f);
11945 	return err;
11946 }
11947 
11948 static bool has_available_filter_functions_addrs(void)
11949 {
11950 	return access(tracefs_available_filter_functions_addrs(), R_OK) != -1;
11951 }
11952 
11953 static int libbpf_available_kprobes_parse(struct kprobe_multi_resolve *res)
11954 {
11955 	const char *available_path = tracefs_available_filter_functions_addrs();
11956 	char sym_name[500];
11957 	FILE *f;
11958 	int ret, err = 0;
11959 	unsigned long long sym_addr;
11960 
11961 	f = fopen(available_path, "re");
11962 	if (!f) {
11963 		err = -errno;
11964 		pr_warn("failed to open %s: %s\n", available_path, errstr(err));
11965 		return err;
11966 	}
11967 
11968 	while (true) {
11969 		ret = fscanf(f, "%llx %499s%*[^\n]\n", &sym_addr, sym_name);
11970 		if (ret == EOF && feof(f))
11971 			break;
11972 
11973 		if (ret != 2) {
11974 			pr_warn("failed to parse available_filter_functions_addrs entry: %d\n",
11975 				ret);
11976 			err = -EINVAL;
11977 			goto cleanup;
11978 		}
11979 
11980 		if (!glob_match(sym_name, res->pattern))
11981 			continue;
11982 
11983 		err = libbpf_ensure_mem((void **)&res->addrs, &res->cap,
11984 					sizeof(*res->addrs), res->cnt + 1);
11985 		if (err)
11986 			goto cleanup;
11987 
11988 		res->addrs[res->cnt++] = (unsigned long)sym_addr;
11989 	}
11990 
11991 	if (res->cnt == 0)
11992 		err = -ENOENT;
11993 
11994 cleanup:
11995 	fclose(f);
11996 	return err;
11997 }
11998 
11999 struct bpf_link *
12000 bpf_program__attach_kprobe_multi_opts(const struct bpf_program *prog,
12001 				      const char *pattern,
12002 				      const struct bpf_kprobe_multi_opts *opts)
12003 {
12004 	LIBBPF_OPTS(bpf_link_create_opts, lopts);
12005 	struct kprobe_multi_resolve res = {
12006 		.pattern = pattern,
12007 	};
12008 	enum bpf_attach_type attach_type;
12009 	struct bpf_link *link = NULL;
12010 	const unsigned long *addrs;
12011 	int err, link_fd, prog_fd;
12012 	bool retprobe, session, unique_match;
12013 	const __u64 *cookies;
12014 	const char **syms;
12015 	size_t cnt;
12016 
12017 	if (!OPTS_VALID(opts, bpf_kprobe_multi_opts))
12018 		return libbpf_err_ptr(-EINVAL);
12019 
12020 	prog_fd = bpf_program__fd(prog);
12021 	if (prog_fd < 0) {
12022 		pr_warn("prog '%s': can't attach BPF program without FD (was it loaded?)\n",
12023 			prog->name);
12024 		return libbpf_err_ptr(-EINVAL);
12025 	}
12026 
12027 	syms    = OPTS_GET(opts, syms, false);
12028 	addrs   = OPTS_GET(opts, addrs, false);
12029 	cnt     = OPTS_GET(opts, cnt, false);
12030 	cookies = OPTS_GET(opts, cookies, false);
12031 	unique_match = OPTS_GET(opts, unique_match, false);
12032 
12033 	if (!pattern && !addrs && !syms)
12034 		return libbpf_err_ptr(-EINVAL);
12035 	if (pattern && (addrs || syms || cookies || cnt))
12036 		return libbpf_err_ptr(-EINVAL);
12037 	if (!pattern && !cnt)
12038 		return libbpf_err_ptr(-EINVAL);
12039 	if (!pattern && unique_match)
12040 		return libbpf_err_ptr(-EINVAL);
12041 	if (addrs && syms)
12042 		return libbpf_err_ptr(-EINVAL);
12043 
12044 	if (pattern) {
12045 		if (has_available_filter_functions_addrs())
12046 			err = libbpf_available_kprobes_parse(&res);
12047 		else
12048 			err = libbpf_available_kallsyms_parse(&res);
12049 		if (err)
12050 			goto error;
12051 
12052 		if (unique_match && res.cnt != 1) {
12053 			pr_warn("prog '%s': failed to find a unique match for '%s' (%zu matches)\n",
12054 				prog->name, pattern, res.cnt);
12055 			err = -EINVAL;
12056 			goto error;
12057 		}
12058 
12059 		addrs = res.addrs;
12060 		cnt = res.cnt;
12061 	}
12062 
12063 	retprobe = OPTS_GET(opts, retprobe, false);
12064 	session  = OPTS_GET(opts, session, false);
12065 
12066 	if (retprobe && session)
12067 		return libbpf_err_ptr(-EINVAL);
12068 
12069 	attach_type = session ? BPF_TRACE_KPROBE_SESSION : BPF_TRACE_KPROBE_MULTI;
12070 
12071 	lopts.kprobe_multi.syms = syms;
12072 	lopts.kprobe_multi.addrs = addrs;
12073 	lopts.kprobe_multi.cookies = cookies;
12074 	lopts.kprobe_multi.cnt = cnt;
12075 	lopts.kprobe_multi.flags = retprobe ? BPF_F_KPROBE_MULTI_RETURN : 0;
12076 
12077 	link = calloc(1, sizeof(*link));
12078 	if (!link) {
12079 		err = -ENOMEM;
12080 		goto error;
12081 	}
12082 	link->detach = &bpf_link__detach_fd;
12083 
12084 	link_fd = bpf_link_create(prog_fd, 0, attach_type, &lopts);
12085 	if (link_fd < 0) {
12086 		err = -errno;
12087 		pr_warn("prog '%s': failed to attach: %s\n",
12088 			prog->name, errstr(err));
12089 		goto error;
12090 	}
12091 	link->fd = link_fd;
12092 	free(res.addrs);
12093 	return link;
12094 
12095 error:
12096 	free(link);
12097 	free(res.addrs);
12098 	return libbpf_err_ptr(err);
12099 }
12100 
12101 static int attach_kprobe(const struct bpf_program *prog, long cookie, struct bpf_link **link)
12102 {
12103 	DECLARE_LIBBPF_OPTS(bpf_kprobe_opts, opts);
12104 	unsigned long offset = 0;
12105 	const char *func_name;
12106 	char *func;
12107 	int n;
12108 
12109 	*link = NULL;
12110 
12111 	/* no auto-attach for SEC("kprobe") and SEC("kretprobe") */
12112 	if (strcmp(prog->sec_name, "kprobe") == 0 || strcmp(prog->sec_name, "kretprobe") == 0)
12113 		return 0;
12114 
12115 	opts.retprobe = str_has_pfx(prog->sec_name, "kretprobe/");
12116 	if (opts.retprobe)
12117 		func_name = prog->sec_name + sizeof("kretprobe/") - 1;
12118 	else
12119 		func_name = prog->sec_name + sizeof("kprobe/") - 1;
12120 
12121 	n = sscanf(func_name, "%m[a-zA-Z0-9_.]+%li", &func, &offset);
12122 	if (n < 1) {
12123 		pr_warn("kprobe name is invalid: %s\n", func_name);
12124 		return -EINVAL;
12125 	}
12126 	if (opts.retprobe && offset != 0) {
12127 		free(func);
12128 		pr_warn("kretprobes do not support offset specification\n");
12129 		return -EINVAL;
12130 	}
12131 
12132 	opts.offset = offset;
12133 	*link = bpf_program__attach_kprobe_opts(prog, func, &opts);
12134 	free(func);
12135 	return libbpf_get_error(*link);
12136 }
12137 
12138 static int attach_ksyscall(const struct bpf_program *prog, long cookie, struct bpf_link **link)
12139 {
12140 	LIBBPF_OPTS(bpf_ksyscall_opts, opts);
12141 	const char *syscall_name;
12142 
12143 	*link = NULL;
12144 
12145 	/* no auto-attach for SEC("ksyscall") and SEC("kretsyscall") */
12146 	if (strcmp(prog->sec_name, "ksyscall") == 0 || strcmp(prog->sec_name, "kretsyscall") == 0)
12147 		return 0;
12148 
12149 	opts.retprobe = str_has_pfx(prog->sec_name, "kretsyscall/");
12150 	if (opts.retprobe)
12151 		syscall_name = prog->sec_name + sizeof("kretsyscall/") - 1;
12152 	else
12153 		syscall_name = prog->sec_name + sizeof("ksyscall/") - 1;
12154 
12155 	*link = bpf_program__attach_ksyscall(prog, syscall_name, &opts);
12156 	return *link ? 0 : -errno;
12157 }
12158 
12159 static int attach_kprobe_multi(const struct bpf_program *prog, long cookie, struct bpf_link **link)
12160 {
12161 	LIBBPF_OPTS(bpf_kprobe_multi_opts, opts);
12162 	const char *spec;
12163 	char *pattern;
12164 	int n;
12165 
12166 	*link = NULL;
12167 
12168 	/* no auto-attach for SEC("kprobe.multi") and SEC("kretprobe.multi") */
12169 	if (strcmp(prog->sec_name, "kprobe.multi") == 0 ||
12170 	    strcmp(prog->sec_name, "kretprobe.multi") == 0)
12171 		return 0;
12172 
12173 	opts.retprobe = str_has_pfx(prog->sec_name, "kretprobe.multi/");
12174 	if (opts.retprobe)
12175 		spec = prog->sec_name + sizeof("kretprobe.multi/") - 1;
12176 	else
12177 		spec = prog->sec_name + sizeof("kprobe.multi/") - 1;
12178 
12179 	n = sscanf(spec, "%m[a-zA-Z0-9_.*?]", &pattern);
12180 	if (n < 1) {
12181 		pr_warn("kprobe multi pattern is invalid: %s\n", spec);
12182 		return -EINVAL;
12183 	}
12184 
12185 	*link = bpf_program__attach_kprobe_multi_opts(prog, pattern, &opts);
12186 	free(pattern);
12187 	return libbpf_get_error(*link);
12188 }
12189 
12190 static int attach_kprobe_session(const struct bpf_program *prog, long cookie,
12191 				 struct bpf_link **link)
12192 {
12193 	LIBBPF_OPTS(bpf_kprobe_multi_opts, opts, .session = true);
12194 	const char *spec;
12195 	char *pattern;
12196 	int n;
12197 
12198 	*link = NULL;
12199 
12200 	/* no auto-attach for SEC("kprobe.session") */
12201 	if (strcmp(prog->sec_name, "kprobe.session") == 0)
12202 		return 0;
12203 
12204 	spec = prog->sec_name + sizeof("kprobe.session/") - 1;
12205 	n = sscanf(spec, "%m[a-zA-Z0-9_.*?]", &pattern);
12206 	if (n < 1) {
12207 		pr_warn("kprobe session pattern is invalid: %s\n", spec);
12208 		return -EINVAL;
12209 	}
12210 
12211 	*link = bpf_program__attach_kprobe_multi_opts(prog, pattern, &opts);
12212 	free(pattern);
12213 	return *link ? 0 : -errno;
12214 }
12215 
12216 static int attach_uprobe_multi(const struct bpf_program *prog, long cookie, struct bpf_link **link)
12217 {
12218 	char *probe_type = NULL, *binary_path = NULL, *func_name = NULL;
12219 	LIBBPF_OPTS(bpf_uprobe_multi_opts, opts);
12220 	int n, ret = -EINVAL;
12221 
12222 	*link = NULL;
12223 
12224 	n = sscanf(prog->sec_name, "%m[^/]/%m[^:]:%m[^\n]",
12225 		   &probe_type, &binary_path, &func_name);
12226 	switch (n) {
12227 	case 1:
12228 		/* handle SEC("u[ret]probe") - format is valid, but auto-attach is impossible. */
12229 		ret = 0;
12230 		break;
12231 	case 3:
12232 		opts.session = str_has_pfx(probe_type, "uprobe.session");
12233 		opts.retprobe = str_has_pfx(probe_type, "uretprobe.multi");
12234 
12235 		*link = bpf_program__attach_uprobe_multi(prog, -1, binary_path, func_name, &opts);
12236 		ret = libbpf_get_error(*link);
12237 		break;
12238 	default:
12239 		pr_warn("prog '%s': invalid format of section definition '%s'\n", prog->name,
12240 			prog->sec_name);
12241 		break;
12242 	}
12243 	free(probe_type);
12244 	free(binary_path);
12245 	free(func_name);
12246 	return ret;
12247 }
12248 
12249 static inline int add_uprobe_event_legacy(const char *probe_name, bool retprobe,
12250 					  const char *binary_path, size_t offset)
12251 {
12252 	return append_to_file(tracefs_uprobe_events(), "%c:%s/%s %s:0x%zx",
12253 			      retprobe ? 'r' : 'p',
12254 			      retprobe ? "uretprobes" : "uprobes",
12255 			      probe_name, binary_path, offset);
12256 }
12257 
12258 static inline int remove_uprobe_event_legacy(const char *probe_name, bool retprobe)
12259 {
12260 	return append_to_file(tracefs_uprobe_events(), "-:%s/%s",
12261 			      retprobe ? "uretprobes" : "uprobes", probe_name);
12262 }
12263 
12264 static int determine_uprobe_perf_type_legacy(const char *probe_name, bool retprobe)
12265 {
12266 	char file[512];
12267 
12268 	snprintf(file, sizeof(file), "%s/events/%s/%s/id",
12269 		 tracefs_path(), retprobe ? "uretprobes" : "uprobes", probe_name);
12270 
12271 	return parse_uint_from_file(file, "%d\n");
12272 }
12273 
12274 static int perf_event_uprobe_open_legacy(const char *probe_name, bool retprobe,
12275 					 const char *binary_path, size_t offset, int pid)
12276 {
12277 	const size_t attr_sz = sizeof(struct perf_event_attr);
12278 	struct perf_event_attr attr;
12279 	int type, pfd, err;
12280 
12281 	err = add_uprobe_event_legacy(probe_name, retprobe, binary_path, offset);
12282 	if (err < 0) {
12283 		pr_warn("failed to add legacy uprobe event for %s:0x%zx: %s\n",
12284 			binary_path, (size_t)offset, errstr(err));
12285 		return err;
12286 	}
12287 	type = determine_uprobe_perf_type_legacy(probe_name, retprobe);
12288 	if (type < 0) {
12289 		err = type;
12290 		pr_warn("failed to determine legacy uprobe event id for %s:0x%zx: %s\n",
12291 			binary_path, offset, errstr(err));
12292 		goto err_clean_legacy;
12293 	}
12294 
12295 	memset(&attr, 0, attr_sz);
12296 	attr.size = attr_sz;
12297 	attr.config = type;
12298 	attr.type = PERF_TYPE_TRACEPOINT;
12299 
12300 	pfd = syscall(__NR_perf_event_open, &attr,
12301 		      pid < 0 ? -1 : pid, /* pid */
12302 		      pid == -1 ? 0 : -1, /* cpu */
12303 		      -1 /* group_fd */,  PERF_FLAG_FD_CLOEXEC);
12304 	if (pfd < 0) {
12305 		err = -errno;
12306 		pr_warn("legacy uprobe perf_event_open() failed: %s\n", errstr(err));
12307 		goto err_clean_legacy;
12308 	}
12309 	return pfd;
12310 
12311 err_clean_legacy:
12312 	/* Clear the newly added legacy uprobe_event */
12313 	remove_uprobe_event_legacy(probe_name, retprobe);
12314 	return err;
12315 }
12316 
12317 /* Find offset of function name in archive specified by path. Currently
12318  * supported are .zip files that do not compress their contents, as used on
12319  * Android in the form of APKs, for example. "file_name" is the name of the ELF
12320  * file inside the archive. "func_name" matches symbol name or name@@LIB for
12321  * library functions.
12322  *
12323  * An overview of the APK format specifically provided here:
12324  * https://en.wikipedia.org/w/index.php?title=Apk_(file_format)&oldid=1139099120#Package_contents
12325  */
12326 static long elf_find_func_offset_from_archive(const char *archive_path, const char *file_name,
12327 					      const char *func_name)
12328 {
12329 	struct zip_archive *archive;
12330 	struct zip_entry entry;
12331 	long ret;
12332 	Elf *elf;
12333 
12334 	archive = zip_archive_open(archive_path);
12335 	if (IS_ERR(archive)) {
12336 		ret = PTR_ERR(archive);
12337 		pr_warn("zip: failed to open %s: %ld\n", archive_path, ret);
12338 		return ret;
12339 	}
12340 
12341 	ret = zip_archive_find_entry(archive, file_name, &entry);
12342 	if (ret) {
12343 		pr_warn("zip: could not find archive member %s in %s: %ld\n", file_name,
12344 			archive_path, ret);
12345 		goto out;
12346 	}
12347 	pr_debug("zip: found entry for %s in %s at 0x%lx\n", file_name, archive_path,
12348 		 (unsigned long)entry.data_offset);
12349 
12350 	if (entry.compression) {
12351 		pr_warn("zip: entry %s of %s is compressed and cannot be handled\n", file_name,
12352 			archive_path);
12353 		ret = -LIBBPF_ERRNO__FORMAT;
12354 		goto out;
12355 	}
12356 
12357 	elf = elf_memory((void *)entry.data, entry.data_length);
12358 	if (!elf) {
12359 		pr_warn("elf: could not read elf file %s from %s: %s\n", file_name, archive_path,
12360 			elf_errmsg(-1));
12361 		ret = -LIBBPF_ERRNO__LIBELF;
12362 		goto out;
12363 	}
12364 
12365 	ret = elf_find_func_offset(elf, file_name, func_name);
12366 	if (ret > 0) {
12367 		pr_debug("elf: symbol address match for %s of %s in %s: 0x%x + 0x%lx = 0x%lx\n",
12368 			 func_name, file_name, archive_path, entry.data_offset, ret,
12369 			 ret + entry.data_offset);
12370 		ret += entry.data_offset;
12371 	}
12372 	elf_end(elf);
12373 
12374 out:
12375 	zip_archive_close(archive);
12376 	return ret;
12377 }
12378 
12379 static const char *arch_specific_lib_paths(void)
12380 {
12381 	/*
12382 	 * Based on https://packages.debian.org/sid/libc6.
12383 	 *
12384 	 * Assume that the traced program is built for the same architecture
12385 	 * as libbpf, which should cover the vast majority of cases.
12386 	 */
12387 #if defined(__x86_64__)
12388 	return "/lib/x86_64-linux-gnu";
12389 #elif defined(__i386__)
12390 	return "/lib/i386-linux-gnu";
12391 #elif defined(__s390x__)
12392 	return "/lib/s390x-linux-gnu";
12393 #elif defined(__arm__) && defined(__SOFTFP__)
12394 	return "/lib/arm-linux-gnueabi";
12395 #elif defined(__arm__) && !defined(__SOFTFP__)
12396 	return "/lib/arm-linux-gnueabihf";
12397 #elif defined(__aarch64__)
12398 	return "/lib/aarch64-linux-gnu";
12399 #elif defined(__mips__) && defined(__MIPSEL__) && _MIPS_SZLONG == 64
12400 	return "/lib/mips64el-linux-gnuabi64";
12401 #elif defined(__mips__) && defined(__MIPSEL__) && _MIPS_SZLONG == 32
12402 	return "/lib/mipsel-linux-gnu";
12403 #elif defined(__powerpc64__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
12404 	return "/lib/powerpc64le-linux-gnu";
12405 #elif defined(__sparc__) && defined(__arch64__)
12406 	return "/lib/sparc64-linux-gnu";
12407 #elif defined(__riscv) && __riscv_xlen == 64
12408 	return "/lib/riscv64-linux-gnu";
12409 #else
12410 	return NULL;
12411 #endif
12412 }
12413 
12414 /* Get full path to program/shared library. */
12415 static int resolve_full_path(const char *file, char *result, size_t result_sz)
12416 {
12417 	const char *search_paths[3] = {};
12418 	int i, perm;
12419 
12420 	if (str_has_sfx(file, ".so") || strstr(file, ".so.")) {
12421 		search_paths[0] = getenv("LD_LIBRARY_PATH");
12422 		search_paths[1] = "/usr/lib64:/usr/lib";
12423 		search_paths[2] = arch_specific_lib_paths();
12424 		perm = R_OK;
12425 	} else {
12426 		search_paths[0] = getenv("PATH");
12427 		search_paths[1] = "/usr/bin:/usr/sbin";
12428 		perm = R_OK | X_OK;
12429 	}
12430 
12431 	for (i = 0; i < ARRAY_SIZE(search_paths); i++) {
12432 		const char *s;
12433 
12434 		if (!search_paths[i])
12435 			continue;
12436 		for (s = search_paths[i]; s != NULL; s = strchr(s, ':')) {
12437 			const char *next_path;
12438 			int seg_len;
12439 
12440 			if (s[0] == ':')
12441 				s++;
12442 			next_path = strchr(s, ':');
12443 			seg_len = next_path ? next_path - s : strlen(s);
12444 			if (!seg_len)
12445 				continue;
12446 			snprintf(result, result_sz, "%.*s/%s", seg_len, s, file);
12447 			/* ensure it has required permissions */
12448 			if (faccessat(AT_FDCWD, result, perm, AT_EACCESS) < 0)
12449 				continue;
12450 			pr_debug("resolved '%s' to '%s'\n", file, result);
12451 			return 0;
12452 		}
12453 	}
12454 	return -ENOENT;
12455 }
12456 
12457 struct bpf_link *
12458 bpf_program__attach_uprobe_multi(const struct bpf_program *prog,
12459 				 pid_t pid,
12460 				 const char *path,
12461 				 const char *func_pattern,
12462 				 const struct bpf_uprobe_multi_opts *opts)
12463 {
12464 	const unsigned long *ref_ctr_offsets = NULL, *offsets = NULL;
12465 	LIBBPF_OPTS(bpf_link_create_opts, lopts);
12466 	unsigned long *resolved_offsets = NULL;
12467 	enum bpf_attach_type attach_type;
12468 	int err = 0, link_fd, prog_fd;
12469 	struct bpf_link *link = NULL;
12470 	char full_path[PATH_MAX];
12471 	bool retprobe, session;
12472 	const __u64 *cookies;
12473 	const char **syms;
12474 	size_t cnt;
12475 
12476 	if (!OPTS_VALID(opts, bpf_uprobe_multi_opts))
12477 		return libbpf_err_ptr(-EINVAL);
12478 
12479 	prog_fd = bpf_program__fd(prog);
12480 	if (prog_fd < 0) {
12481 		pr_warn("prog '%s': can't attach BPF program without FD (was it loaded?)\n",
12482 			prog->name);
12483 		return libbpf_err_ptr(-EINVAL);
12484 	}
12485 
12486 	syms = OPTS_GET(opts, syms, NULL);
12487 	offsets = OPTS_GET(opts, offsets, NULL);
12488 	ref_ctr_offsets = OPTS_GET(opts, ref_ctr_offsets, NULL);
12489 	cookies = OPTS_GET(opts, cookies, NULL);
12490 	cnt = OPTS_GET(opts, cnt, 0);
12491 	retprobe = OPTS_GET(opts, retprobe, false);
12492 	session  = OPTS_GET(opts, session, false);
12493 
12494 	/*
12495 	 * User can specify 2 mutually exclusive set of inputs:
12496 	 *
12497 	 * 1) use only path/func_pattern/pid arguments
12498 	 *
12499 	 * 2) use path/pid with allowed combinations of:
12500 	 *    syms/offsets/ref_ctr_offsets/cookies/cnt
12501 	 *
12502 	 *    - syms and offsets are mutually exclusive
12503 	 *    - ref_ctr_offsets and cookies are optional
12504 	 *
12505 	 * Any other usage results in error.
12506 	 */
12507 
12508 	if (!path)
12509 		return libbpf_err_ptr(-EINVAL);
12510 	if (!func_pattern && cnt == 0)
12511 		return libbpf_err_ptr(-EINVAL);
12512 
12513 	if (func_pattern) {
12514 		if (syms || offsets || ref_ctr_offsets || cookies || cnt)
12515 			return libbpf_err_ptr(-EINVAL);
12516 	} else {
12517 		if (!!syms == !!offsets)
12518 			return libbpf_err_ptr(-EINVAL);
12519 	}
12520 
12521 	if (retprobe && session)
12522 		return libbpf_err_ptr(-EINVAL);
12523 
12524 	if (func_pattern) {
12525 		if (!strchr(path, '/')) {
12526 			err = resolve_full_path(path, full_path, sizeof(full_path));
12527 			if (err) {
12528 				pr_warn("prog '%s': failed to resolve full path for '%s': %s\n",
12529 					prog->name, path, errstr(err));
12530 				return libbpf_err_ptr(err);
12531 			}
12532 			path = full_path;
12533 		}
12534 
12535 		err = elf_resolve_pattern_offsets(path, func_pattern,
12536 						  &resolved_offsets, &cnt);
12537 		if (err < 0)
12538 			return libbpf_err_ptr(err);
12539 		offsets = resolved_offsets;
12540 	} else if (syms) {
12541 		err = elf_resolve_syms_offsets(path, cnt, syms, &resolved_offsets, STT_FUNC);
12542 		if (err < 0)
12543 			return libbpf_err_ptr(err);
12544 		offsets = resolved_offsets;
12545 	}
12546 
12547 	attach_type = session ? BPF_TRACE_UPROBE_SESSION : BPF_TRACE_UPROBE_MULTI;
12548 
12549 	lopts.uprobe_multi.path = path;
12550 	lopts.uprobe_multi.offsets = offsets;
12551 	lopts.uprobe_multi.ref_ctr_offsets = ref_ctr_offsets;
12552 	lopts.uprobe_multi.cookies = cookies;
12553 	lopts.uprobe_multi.cnt = cnt;
12554 	lopts.uprobe_multi.flags = retprobe ? BPF_F_UPROBE_MULTI_RETURN : 0;
12555 
12556 	if (pid == 0)
12557 		pid = getpid();
12558 	if (pid > 0)
12559 		lopts.uprobe_multi.pid = pid;
12560 
12561 	link = calloc(1, sizeof(*link));
12562 	if (!link) {
12563 		err = -ENOMEM;
12564 		goto error;
12565 	}
12566 	link->detach = &bpf_link__detach_fd;
12567 
12568 	link_fd = bpf_link_create(prog_fd, 0, attach_type, &lopts);
12569 	if (link_fd < 0) {
12570 		err = -errno;
12571 		pr_warn("prog '%s': failed to attach multi-uprobe: %s\n",
12572 			prog->name, errstr(err));
12573 		goto error;
12574 	}
12575 	link->fd = link_fd;
12576 	free(resolved_offsets);
12577 	return link;
12578 
12579 error:
12580 	free(resolved_offsets);
12581 	free(link);
12582 	return libbpf_err_ptr(err);
12583 }
12584 
12585 LIBBPF_API struct bpf_link *
12586 bpf_program__attach_uprobe_opts(const struct bpf_program *prog, pid_t pid,
12587 				const char *binary_path, size_t func_offset,
12588 				const struct bpf_uprobe_opts *opts)
12589 {
12590 	const char *archive_path = NULL, *archive_sep = NULL;
12591 	char *legacy_probe = NULL;
12592 	DECLARE_LIBBPF_OPTS(bpf_perf_event_opts, pe_opts);
12593 	enum probe_attach_mode attach_mode;
12594 	char full_path[PATH_MAX];
12595 	struct bpf_link *link;
12596 	size_t ref_ctr_off;
12597 	int pfd, err;
12598 	bool retprobe, legacy;
12599 	const char *func_name;
12600 
12601 	if (!OPTS_VALID(opts, bpf_uprobe_opts))
12602 		return libbpf_err_ptr(-EINVAL);
12603 
12604 	attach_mode = OPTS_GET(opts, attach_mode, PROBE_ATTACH_MODE_DEFAULT);
12605 	retprobe = OPTS_GET(opts, retprobe, false);
12606 	ref_ctr_off = OPTS_GET(opts, ref_ctr_offset, 0);
12607 	pe_opts.bpf_cookie = OPTS_GET(opts, bpf_cookie, 0);
12608 
12609 	if (!binary_path)
12610 		return libbpf_err_ptr(-EINVAL);
12611 
12612 	/* Check if "binary_path" refers to an archive. */
12613 	archive_sep = strstr(binary_path, "!/");
12614 	if (archive_sep) {
12615 		full_path[0] = '\0';
12616 		libbpf_strlcpy(full_path, binary_path,
12617 			       min(sizeof(full_path), (size_t)(archive_sep - binary_path + 1)));
12618 		archive_path = full_path;
12619 		binary_path = archive_sep + 2;
12620 	} else if (!strchr(binary_path, '/')) {
12621 		err = resolve_full_path(binary_path, full_path, sizeof(full_path));
12622 		if (err) {
12623 			pr_warn("prog '%s': failed to resolve full path for '%s': %s\n",
12624 				prog->name, binary_path, errstr(err));
12625 			return libbpf_err_ptr(err);
12626 		}
12627 		binary_path = full_path;
12628 	}
12629 	func_name = OPTS_GET(opts, func_name, NULL);
12630 	if (func_name) {
12631 		long sym_off;
12632 
12633 		if (archive_path) {
12634 			sym_off = elf_find_func_offset_from_archive(archive_path, binary_path,
12635 								    func_name);
12636 			binary_path = archive_path;
12637 		} else {
12638 			sym_off = elf_find_func_offset_from_file(binary_path, func_name);
12639 		}
12640 		if (sym_off < 0)
12641 			return libbpf_err_ptr(sym_off);
12642 		func_offset += sym_off;
12643 	}
12644 
12645 	legacy = determine_uprobe_perf_type() < 0;
12646 	switch (attach_mode) {
12647 	case PROBE_ATTACH_MODE_LEGACY:
12648 		legacy = true;
12649 		pe_opts.force_ioctl_attach = true;
12650 		break;
12651 	case PROBE_ATTACH_MODE_PERF:
12652 		if (legacy)
12653 			return libbpf_err_ptr(-ENOTSUP);
12654 		pe_opts.force_ioctl_attach = true;
12655 		break;
12656 	case PROBE_ATTACH_MODE_LINK:
12657 		if (legacy || !kernel_supports(prog->obj, FEAT_PERF_LINK))
12658 			return libbpf_err_ptr(-ENOTSUP);
12659 		break;
12660 	case PROBE_ATTACH_MODE_DEFAULT:
12661 		break;
12662 	default:
12663 		return libbpf_err_ptr(-EINVAL);
12664 	}
12665 
12666 	if (!legacy) {
12667 		pfd = perf_event_open_probe(true /* uprobe */, retprobe, binary_path,
12668 					    func_offset, pid, ref_ctr_off);
12669 	} else {
12670 		char probe_name[MAX_EVENT_NAME_LEN];
12671 
12672 		if (ref_ctr_off)
12673 			return libbpf_err_ptr(-EINVAL);
12674 
12675 		gen_probe_legacy_event_name(probe_name, sizeof(probe_name),
12676 					    strrchr(binary_path, '/') ? : binary_path,
12677 					    func_offset);
12678 
12679 		legacy_probe = strdup(probe_name);
12680 		if (!legacy_probe)
12681 			return libbpf_err_ptr(-ENOMEM);
12682 
12683 		pfd = perf_event_uprobe_open_legacy(legacy_probe, retprobe,
12684 						    binary_path, func_offset, pid);
12685 	}
12686 	if (pfd < 0) {
12687 		err = -errno;
12688 		pr_warn("prog '%s': failed to create %s '%s:0x%zx' perf event: %s\n",
12689 			prog->name, retprobe ? "uretprobe" : "uprobe",
12690 			binary_path, func_offset,
12691 			errstr(err));
12692 		goto err_out;
12693 	}
12694 
12695 	link = bpf_program__attach_perf_event_opts(prog, pfd, &pe_opts);
12696 	err = libbpf_get_error(link);
12697 	if (err) {
12698 		close(pfd);
12699 		pr_warn("prog '%s': failed to attach to %s '%s:0x%zx': %s\n",
12700 			prog->name, retprobe ? "uretprobe" : "uprobe",
12701 			binary_path, func_offset,
12702 			errstr(err));
12703 		goto err_clean_legacy;
12704 	}
12705 	if (legacy) {
12706 		struct bpf_link_perf *perf_link = container_of(link, struct bpf_link_perf, link);
12707 
12708 		perf_link->legacy_probe_name = legacy_probe;
12709 		perf_link->legacy_is_kprobe = false;
12710 		perf_link->legacy_is_retprobe = retprobe;
12711 	}
12712 	return link;
12713 
12714 err_clean_legacy:
12715 	if (legacy)
12716 		remove_uprobe_event_legacy(legacy_probe, retprobe);
12717 err_out:
12718 	free(legacy_probe);
12719 	return libbpf_err_ptr(err);
12720 }
12721 
12722 /* Format of u[ret]probe section definition supporting auto-attach:
12723  * u[ret]probe/binary:function[+offset]
12724  *
12725  * binary can be an absolute/relative path or a filename; the latter is resolved to a
12726  * full binary path via bpf_program__attach_uprobe_opts.
12727  *
12728  * Specifying uprobe+ ensures we carry out strict matching; either "uprobe" must be
12729  * specified (and auto-attach is not possible) or the above format is specified for
12730  * auto-attach.
12731  */
12732 static int attach_uprobe(const struct bpf_program *prog, long cookie, struct bpf_link **link)
12733 {
12734 	DECLARE_LIBBPF_OPTS(bpf_uprobe_opts, opts);
12735 	char *probe_type = NULL, *binary_path = NULL, *func_name = NULL, *func_off;
12736 	int n, c, ret = -EINVAL;
12737 	long offset = 0;
12738 
12739 	*link = NULL;
12740 
12741 	n = sscanf(prog->sec_name, "%m[^/]/%m[^:]:%m[^\n]",
12742 		   &probe_type, &binary_path, &func_name);
12743 	switch (n) {
12744 	case 1:
12745 		/* handle SEC("u[ret]probe") - format is valid, but auto-attach is impossible. */
12746 		ret = 0;
12747 		break;
12748 	case 2:
12749 		pr_warn("prog '%s': section '%s' missing ':function[+offset]' specification\n",
12750 			prog->name, prog->sec_name);
12751 		break;
12752 	case 3:
12753 		/* check if user specifies `+offset`, if yes, this should be
12754 		 * the last part of the string, make sure sscanf read to EOL
12755 		 */
12756 		func_off = strrchr(func_name, '+');
12757 		if (func_off) {
12758 			n = sscanf(func_off, "+%li%n", &offset, &c);
12759 			if (n == 1 && *(func_off + c) == '\0')
12760 				func_off[0] = '\0';
12761 			else
12762 				offset = 0;
12763 		}
12764 		opts.retprobe = strcmp(probe_type, "uretprobe") == 0 ||
12765 				strcmp(probe_type, "uretprobe.s") == 0;
12766 		if (opts.retprobe && offset != 0) {
12767 			pr_warn("prog '%s': uretprobes do not support offset specification\n",
12768 				prog->name);
12769 			break;
12770 		}
12771 		opts.func_name = func_name;
12772 		*link = bpf_program__attach_uprobe_opts(prog, -1, binary_path, offset, &opts);
12773 		ret = libbpf_get_error(*link);
12774 		break;
12775 	default:
12776 		pr_warn("prog '%s': invalid format of section definition '%s'\n", prog->name,
12777 			prog->sec_name);
12778 		break;
12779 	}
12780 	free(probe_type);
12781 	free(binary_path);
12782 	free(func_name);
12783 
12784 	return ret;
12785 }
12786 
12787 struct bpf_link *bpf_program__attach_uprobe(const struct bpf_program *prog,
12788 					    bool retprobe, pid_t pid,
12789 					    const char *binary_path,
12790 					    size_t func_offset)
12791 {
12792 	DECLARE_LIBBPF_OPTS(bpf_uprobe_opts, opts, .retprobe = retprobe);
12793 
12794 	return bpf_program__attach_uprobe_opts(prog, pid, binary_path, func_offset, &opts);
12795 }
12796 
12797 struct bpf_link *bpf_program__attach_usdt(const struct bpf_program *prog,
12798 					  pid_t pid, const char *binary_path,
12799 					  const char *usdt_provider, const char *usdt_name,
12800 					  const struct bpf_usdt_opts *opts)
12801 {
12802 	char resolved_path[512];
12803 	struct bpf_object *obj = prog->obj;
12804 	struct bpf_link *link;
12805 	__u64 usdt_cookie;
12806 	int err;
12807 
12808 	if (!OPTS_VALID(opts, bpf_uprobe_opts))
12809 		return libbpf_err_ptr(-EINVAL);
12810 
12811 	if (bpf_program__fd(prog) < 0) {
12812 		pr_warn("prog '%s': can't attach BPF program without FD (was it loaded?)\n",
12813 			prog->name);
12814 		return libbpf_err_ptr(-EINVAL);
12815 	}
12816 
12817 	if (!binary_path)
12818 		return libbpf_err_ptr(-EINVAL);
12819 
12820 	if (!strchr(binary_path, '/')) {
12821 		err = resolve_full_path(binary_path, resolved_path, sizeof(resolved_path));
12822 		if (err) {
12823 			pr_warn("prog '%s': failed to resolve full path for '%s': %s\n",
12824 				prog->name, binary_path, errstr(err));
12825 			return libbpf_err_ptr(err);
12826 		}
12827 		binary_path = resolved_path;
12828 	}
12829 
12830 	/* USDT manager is instantiated lazily on first USDT attach. It will
12831 	 * be destroyed together with BPF object in bpf_object__close().
12832 	 */
12833 	if (IS_ERR(obj->usdt_man))
12834 		return libbpf_ptr(obj->usdt_man);
12835 	if (!obj->usdt_man) {
12836 		obj->usdt_man = usdt_manager_new(obj);
12837 		if (IS_ERR(obj->usdt_man))
12838 			return libbpf_ptr(obj->usdt_man);
12839 	}
12840 
12841 	usdt_cookie = OPTS_GET(opts, usdt_cookie, 0);
12842 	link = usdt_manager_attach_usdt(obj->usdt_man, prog, pid, binary_path,
12843 					usdt_provider, usdt_name, usdt_cookie);
12844 	err = libbpf_get_error(link);
12845 	if (err)
12846 		return libbpf_err_ptr(err);
12847 	return link;
12848 }
12849 
12850 static int attach_usdt(const struct bpf_program *prog, long cookie, struct bpf_link **link)
12851 {
12852 	char *path = NULL, *provider = NULL, *name = NULL;
12853 	const char *sec_name;
12854 	int n, err;
12855 
12856 	sec_name = bpf_program__section_name(prog);
12857 	if (strcmp(sec_name, "usdt") == 0) {
12858 		/* no auto-attach for just SEC("usdt") */
12859 		*link = NULL;
12860 		return 0;
12861 	}
12862 
12863 	n = sscanf(sec_name, "usdt/%m[^:]:%m[^:]:%m[^:]", &path, &provider, &name);
12864 	if (n != 3) {
12865 		pr_warn("invalid section '%s', expected SEC(\"usdt/<path>:<provider>:<name>\")\n",
12866 			sec_name);
12867 		err = -EINVAL;
12868 	} else {
12869 		*link = bpf_program__attach_usdt(prog, -1 /* any process */, path,
12870 						 provider, name, NULL);
12871 		err = libbpf_get_error(*link);
12872 	}
12873 	free(path);
12874 	free(provider);
12875 	free(name);
12876 	return err;
12877 }
12878 
12879 static int determine_tracepoint_id(const char *tp_category,
12880 				   const char *tp_name)
12881 {
12882 	char file[PATH_MAX];
12883 	int ret;
12884 
12885 	ret = snprintf(file, sizeof(file), "%s/events/%s/%s/id",
12886 		       tracefs_path(), tp_category, tp_name);
12887 	if (ret < 0)
12888 		return -errno;
12889 	if (ret >= sizeof(file)) {
12890 		pr_debug("tracepoint %s/%s path is too long\n",
12891 			 tp_category, tp_name);
12892 		return -E2BIG;
12893 	}
12894 	return parse_uint_from_file(file, "%d\n");
12895 }
12896 
12897 static int perf_event_open_tracepoint(const char *tp_category,
12898 				      const char *tp_name)
12899 {
12900 	const size_t attr_sz = sizeof(struct perf_event_attr);
12901 	struct perf_event_attr attr;
12902 	int tp_id, pfd, err;
12903 
12904 	tp_id = determine_tracepoint_id(tp_category, tp_name);
12905 	if (tp_id < 0) {
12906 		pr_warn("failed to determine tracepoint '%s/%s' perf event ID: %s\n",
12907 			tp_category, tp_name,
12908 			errstr(tp_id));
12909 		return tp_id;
12910 	}
12911 
12912 	memset(&attr, 0, attr_sz);
12913 	attr.type = PERF_TYPE_TRACEPOINT;
12914 	attr.size = attr_sz;
12915 	attr.config = tp_id;
12916 
12917 	pfd = syscall(__NR_perf_event_open, &attr, -1 /* pid */, 0 /* cpu */,
12918 		      -1 /* group_fd */, PERF_FLAG_FD_CLOEXEC);
12919 	if (pfd < 0) {
12920 		err = -errno;
12921 		pr_warn("tracepoint '%s/%s' perf_event_open() failed: %s\n",
12922 			tp_category, tp_name,
12923 			errstr(err));
12924 		return err;
12925 	}
12926 	return pfd;
12927 }
12928 
12929 struct bpf_link *bpf_program__attach_tracepoint_opts(const struct bpf_program *prog,
12930 						     const char *tp_category,
12931 						     const char *tp_name,
12932 						     const struct bpf_tracepoint_opts *opts)
12933 {
12934 	DECLARE_LIBBPF_OPTS(bpf_perf_event_opts, pe_opts);
12935 	struct bpf_link *link;
12936 	int pfd, err;
12937 
12938 	if (!OPTS_VALID(opts, bpf_tracepoint_opts))
12939 		return libbpf_err_ptr(-EINVAL);
12940 
12941 	pe_opts.bpf_cookie = OPTS_GET(opts, bpf_cookie, 0);
12942 
12943 	pfd = perf_event_open_tracepoint(tp_category, tp_name);
12944 	if (pfd < 0) {
12945 		pr_warn("prog '%s': failed to create tracepoint '%s/%s' perf event: %s\n",
12946 			prog->name, tp_category, tp_name,
12947 			errstr(pfd));
12948 		return libbpf_err_ptr(pfd);
12949 	}
12950 	link = bpf_program__attach_perf_event_opts(prog, pfd, &pe_opts);
12951 	err = libbpf_get_error(link);
12952 	if (err) {
12953 		close(pfd);
12954 		pr_warn("prog '%s': failed to attach to tracepoint '%s/%s': %s\n",
12955 			prog->name, tp_category, tp_name,
12956 			errstr(err));
12957 		return libbpf_err_ptr(err);
12958 	}
12959 	return link;
12960 }
12961 
12962 struct bpf_link *bpf_program__attach_tracepoint(const struct bpf_program *prog,
12963 						const char *tp_category,
12964 						const char *tp_name)
12965 {
12966 	return bpf_program__attach_tracepoint_opts(prog, tp_category, tp_name, NULL);
12967 }
12968 
12969 static int attach_tp(const struct bpf_program *prog, long cookie, struct bpf_link **link)
12970 {
12971 	char *sec_name, *tp_cat, *tp_name;
12972 
12973 	*link = NULL;
12974 
12975 	/* no auto-attach for SEC("tp") or SEC("tracepoint") */
12976 	if (strcmp(prog->sec_name, "tp") == 0 || strcmp(prog->sec_name, "tracepoint") == 0)
12977 		return 0;
12978 
12979 	sec_name = strdup(prog->sec_name);
12980 	if (!sec_name)
12981 		return -ENOMEM;
12982 
12983 	/* extract "tp/<category>/<name>" or "tracepoint/<category>/<name>" */
12984 	if (str_has_pfx(prog->sec_name, "tp/"))
12985 		tp_cat = sec_name + sizeof("tp/") - 1;
12986 	else
12987 		tp_cat = sec_name + sizeof("tracepoint/") - 1;
12988 	tp_name = strchr(tp_cat, '/');
12989 	if (!tp_name) {
12990 		free(sec_name);
12991 		return -EINVAL;
12992 	}
12993 	*tp_name = '\0';
12994 	tp_name++;
12995 
12996 	*link = bpf_program__attach_tracepoint(prog, tp_cat, tp_name);
12997 	free(sec_name);
12998 	return libbpf_get_error(*link);
12999 }
13000 
13001 struct bpf_link *
13002 bpf_program__attach_raw_tracepoint_opts(const struct bpf_program *prog,
13003 					const char *tp_name,
13004 					struct bpf_raw_tracepoint_opts *opts)
13005 {
13006 	LIBBPF_OPTS(bpf_raw_tp_opts, raw_opts);
13007 	struct bpf_link *link;
13008 	int prog_fd, pfd;
13009 
13010 	if (!OPTS_VALID(opts, bpf_raw_tracepoint_opts))
13011 		return libbpf_err_ptr(-EINVAL);
13012 
13013 	prog_fd = bpf_program__fd(prog);
13014 	if (prog_fd < 0) {
13015 		pr_warn("prog '%s': can't attach before loaded\n", prog->name);
13016 		return libbpf_err_ptr(-EINVAL);
13017 	}
13018 
13019 	link = calloc(1, sizeof(*link));
13020 	if (!link)
13021 		return libbpf_err_ptr(-ENOMEM);
13022 	link->detach = &bpf_link__detach_fd;
13023 
13024 	raw_opts.tp_name = tp_name;
13025 	raw_opts.cookie = OPTS_GET(opts, cookie, 0);
13026 	pfd = bpf_raw_tracepoint_open_opts(prog_fd, &raw_opts);
13027 	if (pfd < 0) {
13028 		pfd = -errno;
13029 		free(link);
13030 		pr_warn("prog '%s': failed to attach to raw tracepoint '%s': %s\n",
13031 			prog->name, tp_name, errstr(pfd));
13032 		return libbpf_err_ptr(pfd);
13033 	}
13034 	link->fd = pfd;
13035 	return link;
13036 }
13037 
13038 struct bpf_link *bpf_program__attach_raw_tracepoint(const struct bpf_program *prog,
13039 						    const char *tp_name)
13040 {
13041 	return bpf_program__attach_raw_tracepoint_opts(prog, tp_name, NULL);
13042 }
13043 
13044 static int attach_raw_tp(const struct bpf_program *prog, long cookie, struct bpf_link **link)
13045 {
13046 	static const char *const prefixes[] = {
13047 		"raw_tp",
13048 		"raw_tracepoint",
13049 		"raw_tp.w",
13050 		"raw_tracepoint.w",
13051 	};
13052 	size_t i;
13053 	const char *tp_name = NULL;
13054 
13055 	*link = NULL;
13056 
13057 	for (i = 0; i < ARRAY_SIZE(prefixes); i++) {
13058 		size_t pfx_len;
13059 
13060 		if (!str_has_pfx(prog->sec_name, prefixes[i]))
13061 			continue;
13062 
13063 		pfx_len = strlen(prefixes[i]);
13064 		/* no auto-attach case of, e.g., SEC("raw_tp") */
13065 		if (prog->sec_name[pfx_len] == '\0')
13066 			return 0;
13067 
13068 		if (prog->sec_name[pfx_len] != '/')
13069 			continue;
13070 
13071 		tp_name = prog->sec_name + pfx_len + 1;
13072 		break;
13073 	}
13074 
13075 	if (!tp_name) {
13076 		pr_warn("prog '%s': invalid section name '%s'\n",
13077 			prog->name, prog->sec_name);
13078 		return -EINVAL;
13079 	}
13080 
13081 	*link = bpf_program__attach_raw_tracepoint(prog, tp_name);
13082 	return libbpf_get_error(*link);
13083 }
13084 
13085 /* Common logic for all BPF program types that attach to a btf_id */
13086 static struct bpf_link *bpf_program__attach_btf_id(const struct bpf_program *prog,
13087 						   const struct bpf_trace_opts *opts)
13088 {
13089 	LIBBPF_OPTS(bpf_link_create_opts, link_opts);
13090 	struct bpf_link *link;
13091 	int prog_fd, pfd;
13092 
13093 	if (!OPTS_VALID(opts, bpf_trace_opts))
13094 		return libbpf_err_ptr(-EINVAL);
13095 
13096 	prog_fd = bpf_program__fd(prog);
13097 	if (prog_fd < 0) {
13098 		pr_warn("prog '%s': can't attach before loaded\n", prog->name);
13099 		return libbpf_err_ptr(-EINVAL);
13100 	}
13101 
13102 	link = calloc(1, sizeof(*link));
13103 	if (!link)
13104 		return libbpf_err_ptr(-ENOMEM);
13105 	link->detach = &bpf_link__detach_fd;
13106 
13107 	/* libbpf is smart enough to redirect to BPF_RAW_TRACEPOINT_OPEN on old kernels */
13108 	link_opts.tracing.cookie = OPTS_GET(opts, cookie, 0);
13109 	pfd = bpf_link_create(prog_fd, 0, bpf_program__expected_attach_type(prog), &link_opts);
13110 	if (pfd < 0) {
13111 		pfd = -errno;
13112 		free(link);
13113 		pr_warn("prog '%s': failed to attach: %s\n",
13114 			prog->name, errstr(pfd));
13115 		return libbpf_err_ptr(pfd);
13116 	}
13117 	link->fd = pfd;
13118 	return link;
13119 }
13120 
13121 struct bpf_link *bpf_program__attach_trace(const struct bpf_program *prog)
13122 {
13123 	return bpf_program__attach_btf_id(prog, NULL);
13124 }
13125 
13126 struct bpf_link *bpf_program__attach_trace_opts(const struct bpf_program *prog,
13127 						const struct bpf_trace_opts *opts)
13128 {
13129 	return bpf_program__attach_btf_id(prog, opts);
13130 }
13131 
13132 struct bpf_link *bpf_program__attach_lsm(const struct bpf_program *prog)
13133 {
13134 	return bpf_program__attach_btf_id(prog, NULL);
13135 }
13136 
13137 static int attach_trace(const struct bpf_program *prog, long cookie, struct bpf_link **link)
13138 {
13139 	*link = bpf_program__attach_trace(prog);
13140 	return libbpf_get_error(*link);
13141 }
13142 
13143 static int attach_lsm(const struct bpf_program *prog, long cookie, struct bpf_link **link)
13144 {
13145 	*link = bpf_program__attach_lsm(prog);
13146 	return libbpf_get_error(*link);
13147 }
13148 
13149 static struct bpf_link *
13150 bpf_program_attach_fd(const struct bpf_program *prog,
13151 		      int target_fd, const char *target_name,
13152 		      const struct bpf_link_create_opts *opts)
13153 {
13154 	enum bpf_attach_type attach_type;
13155 	struct bpf_link *link;
13156 	int prog_fd, link_fd;
13157 
13158 	prog_fd = bpf_program__fd(prog);
13159 	if (prog_fd < 0) {
13160 		pr_warn("prog '%s': can't attach before loaded\n", prog->name);
13161 		return libbpf_err_ptr(-EINVAL);
13162 	}
13163 
13164 	link = calloc(1, sizeof(*link));
13165 	if (!link)
13166 		return libbpf_err_ptr(-ENOMEM);
13167 	link->detach = &bpf_link__detach_fd;
13168 
13169 	attach_type = bpf_program__expected_attach_type(prog);
13170 	link_fd = bpf_link_create(prog_fd, target_fd, attach_type, opts);
13171 	if (link_fd < 0) {
13172 		link_fd = -errno;
13173 		free(link);
13174 		pr_warn("prog '%s': failed to attach to %s: %s\n",
13175 			prog->name, target_name,
13176 			errstr(link_fd));
13177 		return libbpf_err_ptr(link_fd);
13178 	}
13179 	link->fd = link_fd;
13180 	return link;
13181 }
13182 
13183 struct bpf_link *
13184 bpf_program__attach_cgroup(const struct bpf_program *prog, int cgroup_fd)
13185 {
13186 	return bpf_program_attach_fd(prog, cgroup_fd, "cgroup", NULL);
13187 }
13188 
13189 struct bpf_link *
13190 bpf_program__attach_netns(const struct bpf_program *prog, int netns_fd)
13191 {
13192 	return bpf_program_attach_fd(prog, netns_fd, "netns", NULL);
13193 }
13194 
13195 struct bpf_link *
13196 bpf_program__attach_sockmap(const struct bpf_program *prog, int map_fd)
13197 {
13198 	return bpf_program_attach_fd(prog, map_fd, "sockmap", NULL);
13199 }
13200 
13201 struct bpf_link *bpf_program__attach_xdp(const struct bpf_program *prog, int ifindex)
13202 {
13203 	/* target_fd/target_ifindex use the same field in LINK_CREATE */
13204 	return bpf_program_attach_fd(prog, ifindex, "xdp", NULL);
13205 }
13206 
13207 struct bpf_link *
13208 bpf_program__attach_cgroup_opts(const struct bpf_program *prog, int cgroup_fd,
13209 				const struct bpf_cgroup_opts *opts)
13210 {
13211 	LIBBPF_OPTS(bpf_link_create_opts, link_create_opts);
13212 	__u32 relative_id;
13213 	int relative_fd;
13214 
13215 	if (!OPTS_VALID(opts, bpf_cgroup_opts))
13216 		return libbpf_err_ptr(-EINVAL);
13217 
13218 	relative_id = OPTS_GET(opts, relative_id, 0);
13219 	relative_fd = OPTS_GET(opts, relative_fd, 0);
13220 
13221 	if (relative_fd && relative_id) {
13222 		pr_warn("prog '%s': relative_fd and relative_id cannot be set at the same time\n",
13223 			prog->name);
13224 		return libbpf_err_ptr(-EINVAL);
13225 	}
13226 
13227 	link_create_opts.cgroup.expected_revision = OPTS_GET(opts, expected_revision, 0);
13228 	link_create_opts.cgroup.relative_fd = relative_fd;
13229 	link_create_opts.cgroup.relative_id = relative_id;
13230 	link_create_opts.flags = OPTS_GET(opts, flags, 0);
13231 
13232 	return bpf_program_attach_fd(prog, cgroup_fd, "cgroup", &link_create_opts);
13233 }
13234 
13235 struct bpf_link *
13236 bpf_program__attach_tcx(const struct bpf_program *prog, int ifindex,
13237 			const struct bpf_tcx_opts *opts)
13238 {
13239 	LIBBPF_OPTS(bpf_link_create_opts, link_create_opts);
13240 	__u32 relative_id;
13241 	int relative_fd;
13242 
13243 	if (!OPTS_VALID(opts, bpf_tcx_opts))
13244 		return libbpf_err_ptr(-EINVAL);
13245 
13246 	relative_id = OPTS_GET(opts, relative_id, 0);
13247 	relative_fd = OPTS_GET(opts, relative_fd, 0);
13248 
13249 	/* validate we don't have unexpected combinations of non-zero fields */
13250 	if (!ifindex) {
13251 		pr_warn("prog '%s': target netdevice ifindex cannot be zero\n",
13252 			prog->name);
13253 		return libbpf_err_ptr(-EINVAL);
13254 	}
13255 	if (relative_fd && relative_id) {
13256 		pr_warn("prog '%s': relative_fd and relative_id cannot be set at the same time\n",
13257 			prog->name);
13258 		return libbpf_err_ptr(-EINVAL);
13259 	}
13260 
13261 	link_create_opts.tcx.expected_revision = OPTS_GET(opts, expected_revision, 0);
13262 	link_create_opts.tcx.relative_fd = relative_fd;
13263 	link_create_opts.tcx.relative_id = relative_id;
13264 	link_create_opts.flags = OPTS_GET(opts, flags, 0);
13265 
13266 	/* target_fd/target_ifindex use the same field in LINK_CREATE */
13267 	return bpf_program_attach_fd(prog, ifindex, "tcx", &link_create_opts);
13268 }
13269 
13270 struct bpf_link *
13271 bpf_program__attach_netkit(const struct bpf_program *prog, int ifindex,
13272 			   const struct bpf_netkit_opts *opts)
13273 {
13274 	LIBBPF_OPTS(bpf_link_create_opts, link_create_opts);
13275 	__u32 relative_id;
13276 	int relative_fd;
13277 
13278 	if (!OPTS_VALID(opts, bpf_netkit_opts))
13279 		return libbpf_err_ptr(-EINVAL);
13280 
13281 	relative_id = OPTS_GET(opts, relative_id, 0);
13282 	relative_fd = OPTS_GET(opts, relative_fd, 0);
13283 
13284 	/* validate we don't have unexpected combinations of non-zero fields */
13285 	if (!ifindex) {
13286 		pr_warn("prog '%s': target netdevice ifindex cannot be zero\n",
13287 			prog->name);
13288 		return libbpf_err_ptr(-EINVAL);
13289 	}
13290 	if (relative_fd && relative_id) {
13291 		pr_warn("prog '%s': relative_fd and relative_id cannot be set at the same time\n",
13292 			prog->name);
13293 		return libbpf_err_ptr(-EINVAL);
13294 	}
13295 
13296 	link_create_opts.netkit.expected_revision = OPTS_GET(opts, expected_revision, 0);
13297 	link_create_opts.netkit.relative_fd = relative_fd;
13298 	link_create_opts.netkit.relative_id = relative_id;
13299 	link_create_opts.flags = OPTS_GET(opts, flags, 0);
13300 
13301 	return bpf_program_attach_fd(prog, ifindex, "netkit", &link_create_opts);
13302 }
13303 
13304 struct bpf_link *bpf_program__attach_freplace(const struct bpf_program *prog,
13305 					      int target_fd,
13306 					      const char *attach_func_name)
13307 {
13308 	int btf_id;
13309 
13310 	if (!!target_fd != !!attach_func_name) {
13311 		pr_warn("prog '%s': supply none or both of target_fd and attach_func_name\n",
13312 			prog->name);
13313 		return libbpf_err_ptr(-EINVAL);
13314 	}
13315 
13316 	if (prog->type != BPF_PROG_TYPE_EXT) {
13317 		pr_warn("prog '%s': only BPF_PROG_TYPE_EXT can attach as freplace\n",
13318 			prog->name);
13319 		return libbpf_err_ptr(-EINVAL);
13320 	}
13321 
13322 	if (target_fd) {
13323 		LIBBPF_OPTS(bpf_link_create_opts, target_opts);
13324 
13325 		btf_id = libbpf_find_prog_btf_id(attach_func_name, target_fd, prog->obj->token_fd);
13326 		if (btf_id < 0)
13327 			return libbpf_err_ptr(btf_id);
13328 
13329 		target_opts.target_btf_id = btf_id;
13330 
13331 		return bpf_program_attach_fd(prog, target_fd, "freplace",
13332 					     &target_opts);
13333 	} else {
13334 		/* no target, so use raw_tracepoint_open for compatibility
13335 		 * with old kernels
13336 		 */
13337 		return bpf_program__attach_trace(prog);
13338 	}
13339 }
13340 
13341 struct bpf_link *
13342 bpf_program__attach_iter(const struct bpf_program *prog,
13343 			 const struct bpf_iter_attach_opts *opts)
13344 {
13345 	DECLARE_LIBBPF_OPTS(bpf_link_create_opts, link_create_opts);
13346 	struct bpf_link *link;
13347 	int prog_fd, link_fd;
13348 	__u32 target_fd = 0;
13349 
13350 	if (!OPTS_VALID(opts, bpf_iter_attach_opts))
13351 		return libbpf_err_ptr(-EINVAL);
13352 
13353 	link_create_opts.iter_info = OPTS_GET(opts, link_info, (void *)0);
13354 	link_create_opts.iter_info_len = OPTS_GET(opts, link_info_len, 0);
13355 
13356 	prog_fd = bpf_program__fd(prog);
13357 	if (prog_fd < 0) {
13358 		pr_warn("prog '%s': can't attach before loaded\n", prog->name);
13359 		return libbpf_err_ptr(-EINVAL);
13360 	}
13361 
13362 	link = calloc(1, sizeof(*link));
13363 	if (!link)
13364 		return libbpf_err_ptr(-ENOMEM);
13365 	link->detach = &bpf_link__detach_fd;
13366 
13367 	link_fd = bpf_link_create(prog_fd, target_fd, BPF_TRACE_ITER,
13368 				  &link_create_opts);
13369 	if (link_fd < 0) {
13370 		link_fd = -errno;
13371 		free(link);
13372 		pr_warn("prog '%s': failed to attach to iterator: %s\n",
13373 			prog->name, errstr(link_fd));
13374 		return libbpf_err_ptr(link_fd);
13375 	}
13376 	link->fd = link_fd;
13377 	return link;
13378 }
13379 
13380 static int attach_iter(const struct bpf_program *prog, long cookie, struct bpf_link **link)
13381 {
13382 	*link = bpf_program__attach_iter(prog, NULL);
13383 	return libbpf_get_error(*link);
13384 }
13385 
13386 struct bpf_link *bpf_program__attach_netfilter(const struct bpf_program *prog,
13387 					       const struct bpf_netfilter_opts *opts)
13388 {
13389 	LIBBPF_OPTS(bpf_link_create_opts, lopts);
13390 	struct bpf_link *link;
13391 	int prog_fd, link_fd;
13392 
13393 	if (!OPTS_VALID(opts, bpf_netfilter_opts))
13394 		return libbpf_err_ptr(-EINVAL);
13395 
13396 	prog_fd = bpf_program__fd(prog);
13397 	if (prog_fd < 0) {
13398 		pr_warn("prog '%s': can't attach before loaded\n", prog->name);
13399 		return libbpf_err_ptr(-EINVAL);
13400 	}
13401 
13402 	link = calloc(1, sizeof(*link));
13403 	if (!link)
13404 		return libbpf_err_ptr(-ENOMEM);
13405 
13406 	link->detach = &bpf_link__detach_fd;
13407 
13408 	lopts.netfilter.pf = OPTS_GET(opts, pf, 0);
13409 	lopts.netfilter.hooknum = OPTS_GET(opts, hooknum, 0);
13410 	lopts.netfilter.priority = OPTS_GET(opts, priority, 0);
13411 	lopts.netfilter.flags = OPTS_GET(opts, flags, 0);
13412 
13413 	link_fd = bpf_link_create(prog_fd, 0, BPF_NETFILTER, &lopts);
13414 	if (link_fd < 0) {
13415 		link_fd = -errno;
13416 		free(link);
13417 		pr_warn("prog '%s': failed to attach to netfilter: %s\n",
13418 			prog->name, errstr(link_fd));
13419 		return libbpf_err_ptr(link_fd);
13420 	}
13421 	link->fd = link_fd;
13422 
13423 	return link;
13424 }
13425 
13426 struct bpf_link *bpf_program__attach(const struct bpf_program *prog)
13427 {
13428 	struct bpf_link *link = NULL;
13429 	int err;
13430 
13431 	if (!prog->sec_def || !prog->sec_def->prog_attach_fn)
13432 		return libbpf_err_ptr(-EOPNOTSUPP);
13433 
13434 	if (bpf_program__fd(prog) < 0) {
13435 		pr_warn("prog '%s': can't attach BPF program without FD (was it loaded?)\n",
13436 			prog->name);
13437 		return libbpf_err_ptr(-EINVAL);
13438 	}
13439 
13440 	err = prog->sec_def->prog_attach_fn(prog, prog->sec_def->cookie, &link);
13441 	if (err)
13442 		return libbpf_err_ptr(err);
13443 
13444 	/* When calling bpf_program__attach() explicitly, auto-attach support
13445 	 * is expected to work, so NULL returned link is considered an error.
13446 	 * This is different for skeleton's attach, see comment in
13447 	 * bpf_object__attach_skeleton().
13448 	 */
13449 	if (!link)
13450 		return libbpf_err_ptr(-EOPNOTSUPP);
13451 
13452 	return link;
13453 }
13454 
13455 struct bpf_link_struct_ops {
13456 	struct bpf_link link;
13457 	int map_fd;
13458 };
13459 
13460 static int bpf_link__detach_struct_ops(struct bpf_link *link)
13461 {
13462 	struct bpf_link_struct_ops *st_link;
13463 	__u32 zero = 0;
13464 
13465 	st_link = container_of(link, struct bpf_link_struct_ops, link);
13466 
13467 	if (st_link->map_fd < 0)
13468 		/* w/o a real link */
13469 		return bpf_map_delete_elem(link->fd, &zero);
13470 
13471 	return close(link->fd);
13472 }
13473 
13474 struct bpf_link *bpf_map__attach_struct_ops(const struct bpf_map *map)
13475 {
13476 	struct bpf_link_struct_ops *link;
13477 	__u32 zero = 0;
13478 	int err, fd;
13479 
13480 	if (!bpf_map__is_struct_ops(map)) {
13481 		pr_warn("map '%s': can't attach non-struct_ops map\n", map->name);
13482 		return libbpf_err_ptr(-EINVAL);
13483 	}
13484 
13485 	if (map->fd < 0) {
13486 		pr_warn("map '%s': can't attach BPF map without FD (was it created?)\n", map->name);
13487 		return libbpf_err_ptr(-EINVAL);
13488 	}
13489 
13490 	link = calloc(1, sizeof(*link));
13491 	if (!link)
13492 		return libbpf_err_ptr(-EINVAL);
13493 
13494 	/* kern_vdata should be prepared during the loading phase. */
13495 	err = bpf_map_update_elem(map->fd, &zero, map->st_ops->kern_vdata, 0);
13496 	/* It can be EBUSY if the map has been used to create or
13497 	 * update a link before.  We don't allow updating the value of
13498 	 * a struct_ops once it is set.  That ensures that the value
13499 	 * never changed.  So, it is safe to skip EBUSY.
13500 	 */
13501 	if (err && (!(map->def.map_flags & BPF_F_LINK) || err != -EBUSY)) {
13502 		free(link);
13503 		return libbpf_err_ptr(err);
13504 	}
13505 
13506 	link->link.detach = bpf_link__detach_struct_ops;
13507 
13508 	if (!(map->def.map_flags & BPF_F_LINK)) {
13509 		/* w/o a real link */
13510 		link->link.fd = map->fd;
13511 		link->map_fd = -1;
13512 		return &link->link;
13513 	}
13514 
13515 	fd = bpf_link_create(map->fd, 0, BPF_STRUCT_OPS, NULL);
13516 	if (fd < 0) {
13517 		free(link);
13518 		return libbpf_err_ptr(fd);
13519 	}
13520 
13521 	link->link.fd = fd;
13522 	link->map_fd = map->fd;
13523 
13524 	return &link->link;
13525 }
13526 
13527 /*
13528  * Swap the back struct_ops of a link with a new struct_ops map.
13529  */
13530 int bpf_link__update_map(struct bpf_link *link, const struct bpf_map *map)
13531 {
13532 	struct bpf_link_struct_ops *st_ops_link;
13533 	__u32 zero = 0;
13534 	int err;
13535 
13536 	if (!bpf_map__is_struct_ops(map))
13537 		return libbpf_err(-EINVAL);
13538 
13539 	if (map->fd < 0) {
13540 		pr_warn("map '%s': can't use BPF map without FD (was it created?)\n", map->name);
13541 		return libbpf_err(-EINVAL);
13542 	}
13543 
13544 	st_ops_link = container_of(link, struct bpf_link_struct_ops, link);
13545 	/* Ensure the type of a link is correct */
13546 	if (st_ops_link->map_fd < 0)
13547 		return libbpf_err(-EINVAL);
13548 
13549 	err = bpf_map_update_elem(map->fd, &zero, map->st_ops->kern_vdata, 0);
13550 	/* It can be EBUSY if the map has been used to create or
13551 	 * update a link before.  We don't allow updating the value of
13552 	 * a struct_ops once it is set.  That ensures that the value
13553 	 * never changed.  So, it is safe to skip EBUSY.
13554 	 */
13555 	if (err && err != -EBUSY)
13556 		return err;
13557 
13558 	err = bpf_link_update(link->fd, map->fd, NULL);
13559 	if (err < 0)
13560 		return err;
13561 
13562 	st_ops_link->map_fd = map->fd;
13563 
13564 	return 0;
13565 }
13566 
13567 typedef enum bpf_perf_event_ret (*bpf_perf_event_print_t)(struct perf_event_header *hdr,
13568 							  void *private_data);
13569 
13570 static enum bpf_perf_event_ret
13571 perf_event_read_simple(void *mmap_mem, size_t mmap_size, size_t page_size,
13572 		       void **copy_mem, size_t *copy_size,
13573 		       bpf_perf_event_print_t fn, void *private_data)
13574 {
13575 	struct perf_event_mmap_page *header = mmap_mem;
13576 	__u64 data_head = ring_buffer_read_head(header);
13577 	__u64 data_tail = header->data_tail;
13578 	void *base = ((__u8 *)header) + page_size;
13579 	int ret = LIBBPF_PERF_EVENT_CONT;
13580 	struct perf_event_header *ehdr;
13581 	size_t ehdr_size;
13582 
13583 	while (data_head != data_tail) {
13584 		ehdr = base + (data_tail & (mmap_size - 1));
13585 		ehdr_size = ehdr->size;
13586 
13587 		if (((void *)ehdr) + ehdr_size > base + mmap_size) {
13588 			void *copy_start = ehdr;
13589 			size_t len_first = base + mmap_size - copy_start;
13590 			size_t len_secnd = ehdr_size - len_first;
13591 
13592 			if (*copy_size < ehdr_size) {
13593 				free(*copy_mem);
13594 				*copy_mem = malloc(ehdr_size);
13595 				if (!*copy_mem) {
13596 					*copy_size = 0;
13597 					ret = LIBBPF_PERF_EVENT_ERROR;
13598 					break;
13599 				}
13600 				*copy_size = ehdr_size;
13601 			}
13602 
13603 			memcpy(*copy_mem, copy_start, len_first);
13604 			memcpy(*copy_mem + len_first, base, len_secnd);
13605 			ehdr = *copy_mem;
13606 		}
13607 
13608 		ret = fn(ehdr, private_data);
13609 		data_tail += ehdr_size;
13610 		if (ret != LIBBPF_PERF_EVENT_CONT)
13611 			break;
13612 	}
13613 
13614 	ring_buffer_write_tail(header, data_tail);
13615 	return libbpf_err(ret);
13616 }
13617 
13618 struct perf_buffer;
13619 
13620 struct perf_buffer_params {
13621 	struct perf_event_attr *attr;
13622 	/* if event_cb is specified, it takes precendence */
13623 	perf_buffer_event_fn event_cb;
13624 	/* sample_cb and lost_cb are higher-level common-case callbacks */
13625 	perf_buffer_sample_fn sample_cb;
13626 	perf_buffer_lost_fn lost_cb;
13627 	void *ctx;
13628 	int cpu_cnt;
13629 	int *cpus;
13630 	int *map_keys;
13631 };
13632 
13633 struct perf_cpu_buf {
13634 	struct perf_buffer *pb;
13635 	void *base; /* mmap()'ed memory */
13636 	void *buf; /* for reconstructing segmented data */
13637 	size_t buf_size;
13638 	int fd;
13639 	int cpu;
13640 	int map_key;
13641 };
13642 
13643 struct perf_buffer {
13644 	perf_buffer_event_fn event_cb;
13645 	perf_buffer_sample_fn sample_cb;
13646 	perf_buffer_lost_fn lost_cb;
13647 	void *ctx; /* passed into callbacks */
13648 
13649 	size_t page_size;
13650 	size_t mmap_size;
13651 	struct perf_cpu_buf **cpu_bufs;
13652 	struct epoll_event *events;
13653 	int cpu_cnt; /* number of allocated CPU buffers */
13654 	int epoll_fd; /* perf event FD */
13655 	int map_fd; /* BPF_MAP_TYPE_PERF_EVENT_ARRAY BPF map FD */
13656 };
13657 
13658 static void perf_buffer__free_cpu_buf(struct perf_buffer *pb,
13659 				      struct perf_cpu_buf *cpu_buf)
13660 {
13661 	if (!cpu_buf)
13662 		return;
13663 	if (cpu_buf->base &&
13664 	    munmap(cpu_buf->base, pb->mmap_size + pb->page_size))
13665 		pr_warn("failed to munmap cpu_buf #%d\n", cpu_buf->cpu);
13666 	if (cpu_buf->fd >= 0) {
13667 		ioctl(cpu_buf->fd, PERF_EVENT_IOC_DISABLE, 0);
13668 		close(cpu_buf->fd);
13669 	}
13670 	free(cpu_buf->buf);
13671 	free(cpu_buf);
13672 }
13673 
13674 void perf_buffer__free(struct perf_buffer *pb)
13675 {
13676 	int i;
13677 
13678 	if (IS_ERR_OR_NULL(pb))
13679 		return;
13680 	if (pb->cpu_bufs) {
13681 		for (i = 0; i < pb->cpu_cnt; i++) {
13682 			struct perf_cpu_buf *cpu_buf = pb->cpu_bufs[i];
13683 
13684 			if (!cpu_buf)
13685 				continue;
13686 
13687 			bpf_map_delete_elem(pb->map_fd, &cpu_buf->map_key);
13688 			perf_buffer__free_cpu_buf(pb, cpu_buf);
13689 		}
13690 		free(pb->cpu_bufs);
13691 	}
13692 	if (pb->epoll_fd >= 0)
13693 		close(pb->epoll_fd);
13694 	free(pb->events);
13695 	free(pb);
13696 }
13697 
13698 static struct perf_cpu_buf *
13699 perf_buffer__open_cpu_buf(struct perf_buffer *pb, struct perf_event_attr *attr,
13700 			  int cpu, int map_key)
13701 {
13702 	struct perf_cpu_buf *cpu_buf;
13703 	int err;
13704 
13705 	cpu_buf = calloc(1, sizeof(*cpu_buf));
13706 	if (!cpu_buf)
13707 		return ERR_PTR(-ENOMEM);
13708 
13709 	cpu_buf->pb = pb;
13710 	cpu_buf->cpu = cpu;
13711 	cpu_buf->map_key = map_key;
13712 
13713 	cpu_buf->fd = syscall(__NR_perf_event_open, attr, -1 /* pid */, cpu,
13714 			      -1, PERF_FLAG_FD_CLOEXEC);
13715 	if (cpu_buf->fd < 0) {
13716 		err = -errno;
13717 		pr_warn("failed to open perf buffer event on cpu #%d: %s\n",
13718 			cpu, errstr(err));
13719 		goto error;
13720 	}
13721 
13722 	cpu_buf->base = mmap(NULL, pb->mmap_size + pb->page_size,
13723 			     PROT_READ | PROT_WRITE, MAP_SHARED,
13724 			     cpu_buf->fd, 0);
13725 	if (cpu_buf->base == MAP_FAILED) {
13726 		cpu_buf->base = NULL;
13727 		err = -errno;
13728 		pr_warn("failed to mmap perf buffer on cpu #%d: %s\n",
13729 			cpu, errstr(err));
13730 		goto error;
13731 	}
13732 
13733 	if (ioctl(cpu_buf->fd, PERF_EVENT_IOC_ENABLE, 0) < 0) {
13734 		err = -errno;
13735 		pr_warn("failed to enable perf buffer event on cpu #%d: %s\n",
13736 			cpu, errstr(err));
13737 		goto error;
13738 	}
13739 
13740 	return cpu_buf;
13741 
13742 error:
13743 	perf_buffer__free_cpu_buf(pb, cpu_buf);
13744 	return (struct perf_cpu_buf *)ERR_PTR(err);
13745 }
13746 
13747 static struct perf_buffer *__perf_buffer__new(int map_fd, size_t page_cnt,
13748 					      struct perf_buffer_params *p);
13749 
13750 struct perf_buffer *perf_buffer__new(int map_fd, size_t page_cnt,
13751 				     perf_buffer_sample_fn sample_cb,
13752 				     perf_buffer_lost_fn lost_cb,
13753 				     void *ctx,
13754 				     const struct perf_buffer_opts *opts)
13755 {
13756 	const size_t attr_sz = sizeof(struct perf_event_attr);
13757 	struct perf_buffer_params p = {};
13758 	struct perf_event_attr attr;
13759 	__u32 sample_period;
13760 
13761 	if (!OPTS_VALID(opts, perf_buffer_opts))
13762 		return libbpf_err_ptr(-EINVAL);
13763 
13764 	sample_period = OPTS_GET(opts, sample_period, 1);
13765 	if (!sample_period)
13766 		sample_period = 1;
13767 
13768 	memset(&attr, 0, attr_sz);
13769 	attr.size = attr_sz;
13770 	attr.config = PERF_COUNT_SW_BPF_OUTPUT;
13771 	attr.type = PERF_TYPE_SOFTWARE;
13772 	attr.sample_type = PERF_SAMPLE_RAW;
13773 	attr.wakeup_events = sample_period;
13774 
13775 	p.attr = &attr;
13776 	p.sample_cb = sample_cb;
13777 	p.lost_cb = lost_cb;
13778 	p.ctx = ctx;
13779 
13780 	return libbpf_ptr(__perf_buffer__new(map_fd, page_cnt, &p));
13781 }
13782 
13783 struct perf_buffer *perf_buffer__new_raw(int map_fd, size_t page_cnt,
13784 					 struct perf_event_attr *attr,
13785 					 perf_buffer_event_fn event_cb, void *ctx,
13786 					 const struct perf_buffer_raw_opts *opts)
13787 {
13788 	struct perf_buffer_params p = {};
13789 
13790 	if (!attr)
13791 		return libbpf_err_ptr(-EINVAL);
13792 
13793 	if (!OPTS_VALID(opts, perf_buffer_raw_opts))
13794 		return libbpf_err_ptr(-EINVAL);
13795 
13796 	p.attr = attr;
13797 	p.event_cb = event_cb;
13798 	p.ctx = ctx;
13799 	p.cpu_cnt = OPTS_GET(opts, cpu_cnt, 0);
13800 	p.cpus = OPTS_GET(opts, cpus, NULL);
13801 	p.map_keys = OPTS_GET(opts, map_keys, NULL);
13802 
13803 	return libbpf_ptr(__perf_buffer__new(map_fd, page_cnt, &p));
13804 }
13805 
13806 static struct perf_buffer *__perf_buffer__new(int map_fd, size_t page_cnt,
13807 					      struct perf_buffer_params *p)
13808 {
13809 	const char *online_cpus_file = "/sys/devices/system/cpu/online";
13810 	struct bpf_map_info map;
13811 	struct perf_buffer *pb;
13812 	bool *online = NULL;
13813 	__u32 map_info_len;
13814 	int err, i, j, n;
13815 
13816 	if (page_cnt == 0 || (page_cnt & (page_cnt - 1))) {
13817 		pr_warn("page count should be power of two, but is %zu\n",
13818 			page_cnt);
13819 		return ERR_PTR(-EINVAL);
13820 	}
13821 
13822 	/* best-effort sanity checks */
13823 	memset(&map, 0, sizeof(map));
13824 	map_info_len = sizeof(map);
13825 	err = bpf_map_get_info_by_fd(map_fd, &map, &map_info_len);
13826 	if (err) {
13827 		err = -errno;
13828 		/* if BPF_OBJ_GET_INFO_BY_FD is supported, will return
13829 		 * -EBADFD, -EFAULT, or -E2BIG on real error
13830 		 */
13831 		if (err != -EINVAL) {
13832 			pr_warn("failed to get map info for map FD %d: %s\n",
13833 				map_fd, errstr(err));
13834 			return ERR_PTR(err);
13835 		}
13836 		pr_debug("failed to get map info for FD %d; API not supported? Ignoring...\n",
13837 			 map_fd);
13838 	} else {
13839 		if (map.type != BPF_MAP_TYPE_PERF_EVENT_ARRAY) {
13840 			pr_warn("map '%s' should be BPF_MAP_TYPE_PERF_EVENT_ARRAY\n",
13841 				map.name);
13842 			return ERR_PTR(-EINVAL);
13843 		}
13844 	}
13845 
13846 	pb = calloc(1, sizeof(*pb));
13847 	if (!pb)
13848 		return ERR_PTR(-ENOMEM);
13849 
13850 	pb->event_cb = p->event_cb;
13851 	pb->sample_cb = p->sample_cb;
13852 	pb->lost_cb = p->lost_cb;
13853 	pb->ctx = p->ctx;
13854 
13855 	pb->page_size = getpagesize();
13856 	pb->mmap_size = pb->page_size * page_cnt;
13857 	pb->map_fd = map_fd;
13858 
13859 	pb->epoll_fd = epoll_create1(EPOLL_CLOEXEC);
13860 	if (pb->epoll_fd < 0) {
13861 		err = -errno;
13862 		pr_warn("failed to create epoll instance: %s\n",
13863 			errstr(err));
13864 		goto error;
13865 	}
13866 
13867 	if (p->cpu_cnt > 0) {
13868 		pb->cpu_cnt = p->cpu_cnt;
13869 	} else {
13870 		pb->cpu_cnt = libbpf_num_possible_cpus();
13871 		if (pb->cpu_cnt < 0) {
13872 			err = pb->cpu_cnt;
13873 			goto error;
13874 		}
13875 		if (map.max_entries && map.max_entries < pb->cpu_cnt)
13876 			pb->cpu_cnt = map.max_entries;
13877 	}
13878 
13879 	pb->events = calloc(pb->cpu_cnt, sizeof(*pb->events));
13880 	if (!pb->events) {
13881 		err = -ENOMEM;
13882 		pr_warn("failed to allocate events: out of memory\n");
13883 		goto error;
13884 	}
13885 	pb->cpu_bufs = calloc(pb->cpu_cnt, sizeof(*pb->cpu_bufs));
13886 	if (!pb->cpu_bufs) {
13887 		err = -ENOMEM;
13888 		pr_warn("failed to allocate buffers: out of memory\n");
13889 		goto error;
13890 	}
13891 
13892 	err = parse_cpu_mask_file(online_cpus_file, &online, &n);
13893 	if (err) {
13894 		pr_warn("failed to get online CPU mask: %s\n", errstr(err));
13895 		goto error;
13896 	}
13897 
13898 	for (i = 0, j = 0; i < pb->cpu_cnt; i++) {
13899 		struct perf_cpu_buf *cpu_buf;
13900 		int cpu, map_key;
13901 
13902 		cpu = p->cpu_cnt > 0 ? p->cpus[i] : i;
13903 		map_key = p->cpu_cnt > 0 ? p->map_keys[i] : i;
13904 
13905 		/* in case user didn't explicitly requested particular CPUs to
13906 		 * be attached to, skip offline/not present CPUs
13907 		 */
13908 		if (p->cpu_cnt <= 0 && (cpu >= n || !online[cpu]))
13909 			continue;
13910 
13911 		cpu_buf = perf_buffer__open_cpu_buf(pb, p->attr, cpu, map_key);
13912 		if (IS_ERR(cpu_buf)) {
13913 			err = PTR_ERR(cpu_buf);
13914 			goto error;
13915 		}
13916 
13917 		pb->cpu_bufs[j] = cpu_buf;
13918 
13919 		err = bpf_map_update_elem(pb->map_fd, &map_key,
13920 					  &cpu_buf->fd, 0);
13921 		if (err) {
13922 			err = -errno;
13923 			pr_warn("failed to set cpu #%d, key %d -> perf FD %d: %s\n",
13924 				cpu, map_key, cpu_buf->fd,
13925 				errstr(err));
13926 			goto error;
13927 		}
13928 
13929 		pb->events[j].events = EPOLLIN;
13930 		pb->events[j].data.ptr = cpu_buf;
13931 		if (epoll_ctl(pb->epoll_fd, EPOLL_CTL_ADD, cpu_buf->fd,
13932 			      &pb->events[j]) < 0) {
13933 			err = -errno;
13934 			pr_warn("failed to epoll_ctl cpu #%d perf FD %d: %s\n",
13935 				cpu, cpu_buf->fd,
13936 				errstr(err));
13937 			goto error;
13938 		}
13939 		j++;
13940 	}
13941 	pb->cpu_cnt = j;
13942 	free(online);
13943 
13944 	return pb;
13945 
13946 error:
13947 	free(online);
13948 	if (pb)
13949 		perf_buffer__free(pb);
13950 	return ERR_PTR(err);
13951 }
13952 
13953 struct perf_sample_raw {
13954 	struct perf_event_header header;
13955 	uint32_t size;
13956 	char data[];
13957 };
13958 
13959 struct perf_sample_lost {
13960 	struct perf_event_header header;
13961 	uint64_t id;
13962 	uint64_t lost;
13963 	uint64_t sample_id;
13964 };
13965 
13966 static enum bpf_perf_event_ret
13967 perf_buffer__process_record(struct perf_event_header *e, void *ctx)
13968 {
13969 	struct perf_cpu_buf *cpu_buf = ctx;
13970 	struct perf_buffer *pb = cpu_buf->pb;
13971 	void *data = e;
13972 
13973 	/* user wants full control over parsing perf event */
13974 	if (pb->event_cb)
13975 		return pb->event_cb(pb->ctx, cpu_buf->cpu, e);
13976 
13977 	switch (e->type) {
13978 	case PERF_RECORD_SAMPLE: {
13979 		struct perf_sample_raw *s = data;
13980 
13981 		if (pb->sample_cb)
13982 			pb->sample_cb(pb->ctx, cpu_buf->cpu, s->data, s->size);
13983 		break;
13984 	}
13985 	case PERF_RECORD_LOST: {
13986 		struct perf_sample_lost *s = data;
13987 
13988 		if (pb->lost_cb)
13989 			pb->lost_cb(pb->ctx, cpu_buf->cpu, s->lost);
13990 		break;
13991 	}
13992 	default:
13993 		pr_warn("unknown perf sample type %d\n", e->type);
13994 		return LIBBPF_PERF_EVENT_ERROR;
13995 	}
13996 	return LIBBPF_PERF_EVENT_CONT;
13997 }
13998 
13999 static int perf_buffer__process_records(struct perf_buffer *pb,
14000 					struct perf_cpu_buf *cpu_buf)
14001 {
14002 	enum bpf_perf_event_ret ret;
14003 
14004 	ret = perf_event_read_simple(cpu_buf->base, pb->mmap_size,
14005 				     pb->page_size, &cpu_buf->buf,
14006 				     &cpu_buf->buf_size,
14007 				     perf_buffer__process_record, cpu_buf);
14008 	if (ret != LIBBPF_PERF_EVENT_CONT)
14009 		return ret;
14010 	return 0;
14011 }
14012 
14013 int perf_buffer__epoll_fd(const struct perf_buffer *pb)
14014 {
14015 	return pb->epoll_fd;
14016 }
14017 
14018 int perf_buffer__poll(struct perf_buffer *pb, int timeout_ms)
14019 {
14020 	int i, cnt, err;
14021 
14022 	cnt = epoll_wait(pb->epoll_fd, pb->events, pb->cpu_cnt, timeout_ms);
14023 	if (cnt < 0)
14024 		return -errno;
14025 
14026 	for (i = 0; i < cnt; i++) {
14027 		struct perf_cpu_buf *cpu_buf = pb->events[i].data.ptr;
14028 
14029 		err = perf_buffer__process_records(pb, cpu_buf);
14030 		if (err) {
14031 			pr_warn("error while processing records: %s\n", errstr(err));
14032 			return libbpf_err(err);
14033 		}
14034 	}
14035 	return cnt;
14036 }
14037 
14038 /* Return number of PERF_EVENT_ARRAY map slots set up by this perf_buffer
14039  * manager.
14040  */
14041 size_t perf_buffer__buffer_cnt(const struct perf_buffer *pb)
14042 {
14043 	return pb->cpu_cnt;
14044 }
14045 
14046 /*
14047  * Return perf_event FD of a ring buffer in *buf_idx* slot of
14048  * PERF_EVENT_ARRAY BPF map. This FD can be polled for new data using
14049  * select()/poll()/epoll() Linux syscalls.
14050  */
14051 int perf_buffer__buffer_fd(const struct perf_buffer *pb, size_t buf_idx)
14052 {
14053 	struct perf_cpu_buf *cpu_buf;
14054 
14055 	if (buf_idx >= pb->cpu_cnt)
14056 		return libbpf_err(-EINVAL);
14057 
14058 	cpu_buf = pb->cpu_bufs[buf_idx];
14059 	if (!cpu_buf)
14060 		return libbpf_err(-ENOENT);
14061 
14062 	return cpu_buf->fd;
14063 }
14064 
14065 int perf_buffer__buffer(struct perf_buffer *pb, int buf_idx, void **buf, size_t *buf_size)
14066 {
14067 	struct perf_cpu_buf *cpu_buf;
14068 
14069 	if (buf_idx >= pb->cpu_cnt)
14070 		return libbpf_err(-EINVAL);
14071 
14072 	cpu_buf = pb->cpu_bufs[buf_idx];
14073 	if (!cpu_buf)
14074 		return libbpf_err(-ENOENT);
14075 
14076 	*buf = cpu_buf->base;
14077 	*buf_size = pb->mmap_size;
14078 	return 0;
14079 }
14080 
14081 /*
14082  * Consume data from perf ring buffer corresponding to slot *buf_idx* in
14083  * PERF_EVENT_ARRAY BPF map without waiting/polling. If there is no data to
14084  * consume, do nothing and return success.
14085  * Returns:
14086  *   - 0 on success;
14087  *   - <0 on failure.
14088  */
14089 int perf_buffer__consume_buffer(struct perf_buffer *pb, size_t buf_idx)
14090 {
14091 	struct perf_cpu_buf *cpu_buf;
14092 
14093 	if (buf_idx >= pb->cpu_cnt)
14094 		return libbpf_err(-EINVAL);
14095 
14096 	cpu_buf = pb->cpu_bufs[buf_idx];
14097 	if (!cpu_buf)
14098 		return libbpf_err(-ENOENT);
14099 
14100 	return perf_buffer__process_records(pb, cpu_buf);
14101 }
14102 
14103 int perf_buffer__consume(struct perf_buffer *pb)
14104 {
14105 	int i, err;
14106 
14107 	for (i = 0; i < pb->cpu_cnt; i++) {
14108 		struct perf_cpu_buf *cpu_buf = pb->cpu_bufs[i];
14109 
14110 		if (!cpu_buf)
14111 			continue;
14112 
14113 		err = perf_buffer__process_records(pb, cpu_buf);
14114 		if (err) {
14115 			pr_warn("perf_buffer: failed to process records in buffer #%d: %s\n",
14116 				i, errstr(err));
14117 			return libbpf_err(err);
14118 		}
14119 	}
14120 	return 0;
14121 }
14122 
14123 int bpf_program__set_attach_target(struct bpf_program *prog,
14124 				   int attach_prog_fd,
14125 				   const char *attach_func_name)
14126 {
14127 	int btf_obj_fd = 0, btf_id = 0, err;
14128 
14129 	if (!prog || attach_prog_fd < 0)
14130 		return libbpf_err(-EINVAL);
14131 
14132 	if (prog->obj->state >= OBJ_LOADED)
14133 		return libbpf_err(-EINVAL);
14134 
14135 	if (attach_prog_fd && !attach_func_name) {
14136 		/* Store attach_prog_fd. The BTF ID will be resolved later during
14137 		 * the normal object/program load phase.
14138 		 */
14139 		prog->attach_prog_fd = attach_prog_fd;
14140 		return 0;
14141 	}
14142 
14143 	if (attach_prog_fd) {
14144 		btf_id = libbpf_find_prog_btf_id(attach_func_name,
14145 						 attach_prog_fd, prog->obj->token_fd);
14146 		if (btf_id < 0)
14147 			return libbpf_err(btf_id);
14148 	} else {
14149 		if (!attach_func_name)
14150 			return libbpf_err(-EINVAL);
14151 
14152 		/* load btf_vmlinux, if not yet */
14153 		err = bpf_object__load_vmlinux_btf(prog->obj, true);
14154 		if (err)
14155 			return libbpf_err(err);
14156 		err = find_kernel_btf_id(prog->obj, attach_func_name,
14157 					 prog->expected_attach_type,
14158 					 &btf_obj_fd, &btf_id);
14159 		if (err)
14160 			return libbpf_err(err);
14161 	}
14162 
14163 	prog->attach_btf_id = btf_id;
14164 	prog->attach_btf_obj_fd = btf_obj_fd;
14165 	prog->attach_prog_fd = attach_prog_fd;
14166 	return 0;
14167 }
14168 
14169 int bpf_program__assoc_struct_ops(struct bpf_program *prog, struct bpf_map *map,
14170 				  struct bpf_prog_assoc_struct_ops_opts *opts)
14171 {
14172 	int prog_fd, map_fd;
14173 
14174 	prog_fd = bpf_program__fd(prog);
14175 	if (prog_fd < 0) {
14176 		pr_warn("prog '%s': can't associate BPF program without FD (was it loaded?)\n",
14177 			prog->name);
14178 		return libbpf_err(-EINVAL);
14179 	}
14180 
14181 	if (prog->type == BPF_PROG_TYPE_STRUCT_OPS) {
14182 		pr_warn("prog '%s': can't associate struct_ops program\n", prog->name);
14183 		return libbpf_err(-EINVAL);
14184 	}
14185 
14186 	map_fd = bpf_map__fd(map);
14187 	if (map_fd < 0) {
14188 		pr_warn("map '%s': can't associate BPF map without FD (was it created?)\n", map->name);
14189 		return libbpf_err(-EINVAL);
14190 	}
14191 
14192 	if (!bpf_map__is_struct_ops(map)) {
14193 		pr_warn("map '%s': can't associate non-struct_ops map\n", map->name);
14194 		return libbpf_err(-EINVAL);
14195 	}
14196 
14197 	return bpf_prog_assoc_struct_ops(prog_fd, map_fd, opts);
14198 }
14199 
14200 int parse_cpu_mask_str(const char *s, bool **mask, int *mask_sz)
14201 {
14202 	int err = 0, n, len, start, end = -1;
14203 	bool *tmp;
14204 
14205 	*mask = NULL;
14206 	*mask_sz = 0;
14207 
14208 	/* Each sub string separated by ',' has format \d+-\d+ or \d+ */
14209 	while (*s) {
14210 		if (*s == ',' || *s == '\n') {
14211 			s++;
14212 			continue;
14213 		}
14214 		n = sscanf(s, "%d%n-%d%n", &start, &len, &end, &len);
14215 		if (n <= 0 || n > 2) {
14216 			pr_warn("Failed to get CPU range %s: %d\n", s, n);
14217 			err = -EINVAL;
14218 			goto cleanup;
14219 		} else if (n == 1) {
14220 			end = start;
14221 		}
14222 		if (start < 0 || start > end) {
14223 			pr_warn("Invalid CPU range [%d,%d] in %s\n",
14224 				start, end, s);
14225 			err = -EINVAL;
14226 			goto cleanup;
14227 		}
14228 		tmp = realloc(*mask, end + 1);
14229 		if (!tmp) {
14230 			err = -ENOMEM;
14231 			goto cleanup;
14232 		}
14233 		*mask = tmp;
14234 		memset(tmp + *mask_sz, 0, start - *mask_sz);
14235 		memset(tmp + start, 1, end - start + 1);
14236 		*mask_sz = end + 1;
14237 		s += len;
14238 	}
14239 	if (!*mask_sz) {
14240 		pr_warn("Empty CPU range\n");
14241 		return -EINVAL;
14242 	}
14243 	return 0;
14244 cleanup:
14245 	free(*mask);
14246 	*mask = NULL;
14247 	return err;
14248 }
14249 
14250 int parse_cpu_mask_file(const char *fcpu, bool **mask, int *mask_sz)
14251 {
14252 	int fd, err = 0, len;
14253 	char buf[128];
14254 
14255 	fd = open(fcpu, O_RDONLY | O_CLOEXEC);
14256 	if (fd < 0) {
14257 		err = -errno;
14258 		pr_warn("Failed to open cpu mask file %s: %s\n", fcpu, errstr(err));
14259 		return err;
14260 	}
14261 	len = read(fd, buf, sizeof(buf));
14262 	close(fd);
14263 	if (len <= 0) {
14264 		err = len ? -errno : -EINVAL;
14265 		pr_warn("Failed to read cpu mask from %s: %s\n", fcpu, errstr(err));
14266 		return err;
14267 	}
14268 	if (len >= sizeof(buf)) {
14269 		pr_warn("CPU mask is too big in file %s\n", fcpu);
14270 		return -E2BIG;
14271 	}
14272 	buf[len] = '\0';
14273 
14274 	return parse_cpu_mask_str(buf, mask, mask_sz);
14275 }
14276 
14277 int libbpf_num_possible_cpus(void)
14278 {
14279 	static const char *fcpu = "/sys/devices/system/cpu/possible";
14280 	static int cpus;
14281 	int err, n, i, tmp_cpus;
14282 	bool *mask;
14283 
14284 	tmp_cpus = READ_ONCE(cpus);
14285 	if (tmp_cpus > 0)
14286 		return tmp_cpus;
14287 
14288 	err = parse_cpu_mask_file(fcpu, &mask, &n);
14289 	if (err)
14290 		return libbpf_err(err);
14291 
14292 	tmp_cpus = 0;
14293 	for (i = 0; i < n; i++) {
14294 		if (mask[i])
14295 			tmp_cpus++;
14296 	}
14297 	free(mask);
14298 
14299 	WRITE_ONCE(cpus, tmp_cpus);
14300 	return tmp_cpus;
14301 }
14302 
14303 static int populate_skeleton_maps(const struct bpf_object *obj,
14304 				  struct bpf_map_skeleton *maps,
14305 				  size_t map_cnt, size_t map_skel_sz)
14306 {
14307 	int i;
14308 
14309 	for (i = 0; i < map_cnt; i++) {
14310 		struct bpf_map_skeleton *map_skel = (void *)maps + i * map_skel_sz;
14311 		struct bpf_map **map = map_skel->map;
14312 		const char *name = map_skel->name;
14313 		void **mmaped = map_skel->mmaped;
14314 
14315 		*map = bpf_object__find_map_by_name(obj, name);
14316 		if (!*map) {
14317 			pr_warn("failed to find skeleton map '%s'\n", name);
14318 			return -ESRCH;
14319 		}
14320 
14321 		/* externs shouldn't be pre-setup from user code */
14322 		if (mmaped && (*map)->libbpf_type != LIBBPF_MAP_KCONFIG)
14323 			*mmaped = (*map)->mmaped;
14324 	}
14325 	return 0;
14326 }
14327 
14328 static int populate_skeleton_progs(const struct bpf_object *obj,
14329 				   struct bpf_prog_skeleton *progs,
14330 				   size_t prog_cnt, size_t prog_skel_sz)
14331 {
14332 	int i;
14333 
14334 	for (i = 0; i < prog_cnt; i++) {
14335 		struct bpf_prog_skeleton *prog_skel = (void *)progs + i * prog_skel_sz;
14336 		struct bpf_program **prog = prog_skel->prog;
14337 		const char *name = prog_skel->name;
14338 
14339 		*prog = bpf_object__find_program_by_name(obj, name);
14340 		if (!*prog) {
14341 			pr_warn("failed to find skeleton program '%s'\n", name);
14342 			return -ESRCH;
14343 		}
14344 	}
14345 	return 0;
14346 }
14347 
14348 int bpf_object__open_skeleton(struct bpf_object_skeleton *s,
14349 			      const struct bpf_object_open_opts *opts)
14350 {
14351 	struct bpf_object *obj;
14352 	int err;
14353 
14354 	obj = bpf_object_open(NULL, s->data, s->data_sz, s->name, opts);
14355 	if (IS_ERR(obj)) {
14356 		err = PTR_ERR(obj);
14357 		pr_warn("failed to initialize skeleton BPF object '%s': %s\n",
14358 			s->name, errstr(err));
14359 		return libbpf_err(err);
14360 	}
14361 
14362 	*s->obj = obj;
14363 	err = populate_skeleton_maps(obj, s->maps, s->map_cnt, s->map_skel_sz);
14364 	if (err) {
14365 		pr_warn("failed to populate skeleton maps for '%s': %s\n", s->name, errstr(err));
14366 		return libbpf_err(err);
14367 	}
14368 
14369 	err = populate_skeleton_progs(obj, s->progs, s->prog_cnt, s->prog_skel_sz);
14370 	if (err) {
14371 		pr_warn("failed to populate skeleton progs for '%s': %s\n", s->name, errstr(err));
14372 		return libbpf_err(err);
14373 	}
14374 
14375 	return 0;
14376 }
14377 
14378 int bpf_object__open_subskeleton(struct bpf_object_subskeleton *s)
14379 {
14380 	int err, len, var_idx, i;
14381 	const char *var_name;
14382 	const struct bpf_map *map;
14383 	struct btf *btf;
14384 	__u32 map_type_id;
14385 	const struct btf_type *map_type, *var_type;
14386 	const struct bpf_var_skeleton *var_skel;
14387 	struct btf_var_secinfo *var;
14388 
14389 	if (!s->obj)
14390 		return libbpf_err(-EINVAL);
14391 
14392 	btf = bpf_object__btf(s->obj);
14393 	if (!btf) {
14394 		pr_warn("subskeletons require BTF at runtime (object %s)\n",
14395 			bpf_object__name(s->obj));
14396 		return libbpf_err(-errno);
14397 	}
14398 
14399 	err = populate_skeleton_maps(s->obj, s->maps, s->map_cnt, s->map_skel_sz);
14400 	if (err) {
14401 		pr_warn("failed to populate subskeleton maps: %s\n", errstr(err));
14402 		return libbpf_err(err);
14403 	}
14404 
14405 	err = populate_skeleton_progs(s->obj, s->progs, s->prog_cnt, s->prog_skel_sz);
14406 	if (err) {
14407 		pr_warn("failed to populate subskeleton maps: %s\n", errstr(err));
14408 		return libbpf_err(err);
14409 	}
14410 
14411 	for (var_idx = 0; var_idx < s->var_cnt; var_idx++) {
14412 		var_skel = (void *)s->vars + var_idx * s->var_skel_sz;
14413 		map = *var_skel->map;
14414 		map_type_id = bpf_map__btf_value_type_id(map);
14415 		map_type = btf__type_by_id(btf, map_type_id);
14416 
14417 		if (!btf_is_datasec(map_type)) {
14418 			pr_warn("type for map '%1$s' is not a datasec: %2$s\n",
14419 				bpf_map__name(map),
14420 				__btf_kind_str(btf_kind(map_type)));
14421 			return libbpf_err(-EINVAL);
14422 		}
14423 
14424 		len = btf_vlen(map_type);
14425 		var = btf_var_secinfos(map_type);
14426 		for (i = 0; i < len; i++, var++) {
14427 			var_type = btf__type_by_id(btf, var->type);
14428 			var_name = btf__name_by_offset(btf, var_type->name_off);
14429 			if (strcmp(var_name, var_skel->name) == 0) {
14430 				*var_skel->addr = map->mmaped + var->offset;
14431 				break;
14432 			}
14433 		}
14434 	}
14435 	return 0;
14436 }
14437 
14438 void bpf_object__destroy_subskeleton(struct bpf_object_subskeleton *s)
14439 {
14440 	if (!s)
14441 		return;
14442 	free(s->maps);
14443 	free(s->progs);
14444 	free(s->vars);
14445 	free(s);
14446 }
14447 
14448 int bpf_object__load_skeleton(struct bpf_object_skeleton *s)
14449 {
14450 	int i, err;
14451 
14452 	err = bpf_object__load(*s->obj);
14453 	if (err) {
14454 		pr_warn("failed to load BPF skeleton '%s': %s\n", s->name, errstr(err));
14455 		return libbpf_err(err);
14456 	}
14457 
14458 	for (i = 0; i < s->map_cnt; i++) {
14459 		struct bpf_map_skeleton *map_skel = (void *)s->maps + i * s->map_skel_sz;
14460 		struct bpf_map *map = *map_skel->map;
14461 
14462 		if (!map_skel->mmaped)
14463 			continue;
14464 
14465 		if (map->def.type == BPF_MAP_TYPE_ARENA)
14466 			*map_skel->mmaped = map->mmaped + map->obj->arena_data_off;
14467 		else
14468 			*map_skel->mmaped = map->mmaped;
14469 	}
14470 
14471 	return 0;
14472 }
14473 
14474 int bpf_object__attach_skeleton(struct bpf_object_skeleton *s)
14475 {
14476 	int i, err;
14477 
14478 	for (i = 0; i < s->prog_cnt; i++) {
14479 		struct bpf_prog_skeleton *prog_skel = (void *)s->progs + i * s->prog_skel_sz;
14480 		struct bpf_program *prog = *prog_skel->prog;
14481 		struct bpf_link **link = prog_skel->link;
14482 
14483 		if (!prog->autoload || !prog->autoattach)
14484 			continue;
14485 
14486 		/* auto-attaching not supported for this program */
14487 		if (!prog->sec_def || !prog->sec_def->prog_attach_fn)
14488 			continue;
14489 
14490 		/* if user already set the link manually, don't attempt auto-attach */
14491 		if (*link)
14492 			continue;
14493 
14494 		err = prog->sec_def->prog_attach_fn(prog, prog->sec_def->cookie, link);
14495 		if (err) {
14496 			pr_warn("prog '%s': failed to auto-attach: %s\n",
14497 				bpf_program__name(prog), errstr(err));
14498 			return libbpf_err(err);
14499 		}
14500 
14501 		/* It's possible that for some SEC() definitions auto-attach
14502 		 * is supported in some cases (e.g., if definition completely
14503 		 * specifies target information), but is not in other cases.
14504 		 * SEC("uprobe") is one such case. If user specified target
14505 		 * binary and function name, such BPF program can be
14506 		 * auto-attached. But if not, it shouldn't trigger skeleton's
14507 		 * attach to fail. It should just be skipped.
14508 		 * attach_fn signals such case with returning 0 (no error) and
14509 		 * setting link to NULL.
14510 		 */
14511 	}
14512 
14513 
14514 	for (i = 0; i < s->map_cnt; i++) {
14515 		struct bpf_map_skeleton *map_skel = (void *)s->maps + i * s->map_skel_sz;
14516 		struct bpf_map *map = *map_skel->map;
14517 		struct bpf_link **link;
14518 
14519 		if (!map->autocreate || !map->autoattach)
14520 			continue;
14521 
14522 		/* only struct_ops maps can be attached */
14523 		if (!bpf_map__is_struct_ops(map))
14524 			continue;
14525 
14526 		/* skeleton is created with earlier version of bpftool, notify user */
14527 		if (s->map_skel_sz < offsetofend(struct bpf_map_skeleton, link)) {
14528 			pr_warn("map '%s': BPF skeleton version is old, skipping map auto-attachment...\n",
14529 				bpf_map__name(map));
14530 			continue;
14531 		}
14532 
14533 		link = map_skel->link;
14534 		if (!link) {
14535 			pr_warn("map '%s': BPF map skeleton link is uninitialized\n",
14536 				bpf_map__name(map));
14537 			continue;
14538 		}
14539 
14540 		if (*link)
14541 			continue;
14542 
14543 		*link = bpf_map__attach_struct_ops(map);
14544 		if (!*link) {
14545 			err = -errno;
14546 			pr_warn("map '%s': failed to auto-attach: %s\n",
14547 				bpf_map__name(map), errstr(err));
14548 			return libbpf_err(err);
14549 		}
14550 	}
14551 
14552 	return 0;
14553 }
14554 
14555 void bpf_object__detach_skeleton(struct bpf_object_skeleton *s)
14556 {
14557 	int i;
14558 
14559 	for (i = 0; i < s->prog_cnt; i++) {
14560 		struct bpf_prog_skeleton *prog_skel = (void *)s->progs + i * s->prog_skel_sz;
14561 		struct bpf_link **link = prog_skel->link;
14562 
14563 		bpf_link__destroy(*link);
14564 		*link = NULL;
14565 	}
14566 
14567 	if (s->map_skel_sz < sizeof(struct bpf_map_skeleton))
14568 		return;
14569 
14570 	for (i = 0; i < s->map_cnt; i++) {
14571 		struct bpf_map_skeleton *map_skel = (void *)s->maps + i * s->map_skel_sz;
14572 		struct bpf_link **link = map_skel->link;
14573 
14574 		if (link) {
14575 			bpf_link__destroy(*link);
14576 			*link = NULL;
14577 		}
14578 	}
14579 }
14580 
14581 void bpf_object__destroy_skeleton(struct bpf_object_skeleton *s)
14582 {
14583 	if (!s)
14584 		return;
14585 
14586 	bpf_object__detach_skeleton(s);
14587 	if (s->obj)
14588 		bpf_object__close(*s->obj);
14589 	free(s->maps);
14590 	free(s->progs);
14591 	free(s);
14592 }
14593