1 // SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) 2 3 /* 4 * Common eBPF ELF object loading operations. 5 * 6 * Copyright (C) 2013-2015 Alexei Starovoitov <ast@kernel.org> 7 * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com> 8 * Copyright (C) 2015 Huawei Inc. 9 * Copyright (C) 2017 Nicira, Inc. 10 * Copyright (C) 2019 Isovalent, Inc. 11 */ 12 13 #ifndef _GNU_SOURCE 14 #define _GNU_SOURCE 15 #endif 16 #include <stdlib.h> 17 #include <stdio.h> 18 #include <stdarg.h> 19 #include <libgen.h> 20 #include <inttypes.h> 21 #include <limits.h> 22 #include <string.h> 23 #include <unistd.h> 24 #include <endian.h> 25 #include <fcntl.h> 26 #include <errno.h> 27 #include <ctype.h> 28 #include <asm/unistd.h> 29 #include <linux/err.h> 30 #include <linux/kernel.h> 31 #include <linux/bpf.h> 32 #include <linux/btf.h> 33 #include <linux/filter.h> 34 #include <linux/limits.h> 35 #include <linux/perf_event.h> 36 #include <linux/bpf_perf_event.h> 37 #include <linux/ring_buffer.h> 38 #include <sys/epoll.h> 39 #include <sys/ioctl.h> 40 #include <sys/mman.h> 41 #include <sys/stat.h> 42 #include <sys/types.h> 43 #include <sys/vfs.h> 44 #include <sys/utsname.h> 45 #include <sys/resource.h> 46 #include <libelf.h> 47 #include <gelf.h> 48 #include <zlib.h> 49 50 #include "libbpf.h" 51 #include "bpf.h" 52 #include "btf.h" 53 #include "libbpf_internal.h" 54 #include "hashmap.h" 55 #include "bpf_gen_internal.h" 56 #include "zip.h" 57 58 #ifndef BPF_FS_MAGIC 59 #define BPF_FS_MAGIC 0xcafe4a11 60 #endif 61 62 #define MAX_EVENT_NAME_LEN 64 63 64 #define BPF_FS_DEFAULT_PATH "/sys/fs/bpf" 65 66 #define BPF_INSN_SZ (sizeof(struct bpf_insn)) 67 68 /* vsprintf() in __base_pr() uses nonliteral format string. It may break 69 * compilation if user enables corresponding warning. Disable it explicitly. 70 */ 71 #pragma GCC diagnostic ignored "-Wformat-nonliteral" 72 73 #define __printf(a, b) __attribute__((format(printf, a, b))) 74 75 static struct bpf_map *bpf_object__add_map(struct bpf_object *obj); 76 static bool prog_is_subprog(const struct bpf_object *obj, const struct bpf_program *prog); 77 static int map_set_def_max_entries(struct bpf_map *map); 78 79 static const char * const attach_type_name[] = { 80 [BPF_CGROUP_INET_INGRESS] = "cgroup_inet_ingress", 81 [BPF_CGROUP_INET_EGRESS] = "cgroup_inet_egress", 82 [BPF_CGROUP_INET_SOCK_CREATE] = "cgroup_inet_sock_create", 83 [BPF_CGROUP_INET_SOCK_RELEASE] = "cgroup_inet_sock_release", 84 [BPF_CGROUP_SOCK_OPS] = "cgroup_sock_ops", 85 [BPF_CGROUP_DEVICE] = "cgroup_device", 86 [BPF_CGROUP_INET4_BIND] = "cgroup_inet4_bind", 87 [BPF_CGROUP_INET6_BIND] = "cgroup_inet6_bind", 88 [BPF_CGROUP_INET4_CONNECT] = "cgroup_inet4_connect", 89 [BPF_CGROUP_INET6_CONNECT] = "cgroup_inet6_connect", 90 [BPF_CGROUP_UNIX_CONNECT] = "cgroup_unix_connect", 91 [BPF_CGROUP_INET4_POST_BIND] = "cgroup_inet4_post_bind", 92 [BPF_CGROUP_INET6_POST_BIND] = "cgroup_inet6_post_bind", 93 [BPF_CGROUP_INET4_GETPEERNAME] = "cgroup_inet4_getpeername", 94 [BPF_CGROUP_INET6_GETPEERNAME] = "cgroup_inet6_getpeername", 95 [BPF_CGROUP_UNIX_GETPEERNAME] = "cgroup_unix_getpeername", 96 [BPF_CGROUP_INET4_GETSOCKNAME] = "cgroup_inet4_getsockname", 97 [BPF_CGROUP_INET6_GETSOCKNAME] = "cgroup_inet6_getsockname", 98 [BPF_CGROUP_UNIX_GETSOCKNAME] = "cgroup_unix_getsockname", 99 [BPF_CGROUP_UDP4_SENDMSG] = "cgroup_udp4_sendmsg", 100 [BPF_CGROUP_UDP6_SENDMSG] = "cgroup_udp6_sendmsg", 101 [BPF_CGROUP_UNIX_SENDMSG] = "cgroup_unix_sendmsg", 102 [BPF_CGROUP_SYSCTL] = "cgroup_sysctl", 103 [BPF_CGROUP_UDP4_RECVMSG] = "cgroup_udp4_recvmsg", 104 [BPF_CGROUP_UDP6_RECVMSG] = "cgroup_udp6_recvmsg", 105 [BPF_CGROUP_UNIX_RECVMSG] = "cgroup_unix_recvmsg", 106 [BPF_CGROUP_GETSOCKOPT] = "cgroup_getsockopt", 107 [BPF_CGROUP_SETSOCKOPT] = "cgroup_setsockopt", 108 [BPF_SK_SKB_STREAM_PARSER] = "sk_skb_stream_parser", 109 [BPF_SK_SKB_STREAM_VERDICT] = "sk_skb_stream_verdict", 110 [BPF_SK_SKB_VERDICT] = "sk_skb_verdict", 111 [BPF_SK_MSG_VERDICT] = "sk_msg_verdict", 112 [BPF_LIRC_MODE2] = "lirc_mode2", 113 [BPF_FLOW_DISSECTOR] = "flow_dissector", 114 [BPF_TRACE_RAW_TP] = "trace_raw_tp", 115 [BPF_TRACE_FENTRY] = "trace_fentry", 116 [BPF_TRACE_FEXIT] = "trace_fexit", 117 [BPF_MODIFY_RETURN] = "modify_return", 118 [BPF_TRACE_FSESSION] = "trace_fsession", 119 [BPF_LSM_MAC] = "lsm_mac", 120 [BPF_LSM_CGROUP] = "lsm_cgroup", 121 [BPF_SK_LOOKUP] = "sk_lookup", 122 [BPF_TRACE_ITER] = "trace_iter", 123 [BPF_XDP_DEVMAP] = "xdp_devmap", 124 [BPF_XDP_CPUMAP] = "xdp_cpumap", 125 [BPF_XDP] = "xdp", 126 [BPF_SK_REUSEPORT_SELECT] = "sk_reuseport_select", 127 [BPF_SK_REUSEPORT_SELECT_OR_MIGRATE] = "sk_reuseport_select_or_migrate", 128 [BPF_PERF_EVENT] = "perf_event", 129 [BPF_TRACE_KPROBE_MULTI] = "trace_kprobe_multi", 130 [BPF_STRUCT_OPS] = "struct_ops", 131 [BPF_NETFILTER] = "netfilter", 132 [BPF_TCX_INGRESS] = "tcx_ingress", 133 [BPF_TCX_EGRESS] = "tcx_egress", 134 [BPF_TRACE_UPROBE_MULTI] = "trace_uprobe_multi", 135 [BPF_NETKIT_PRIMARY] = "netkit_primary", 136 [BPF_NETKIT_PEER] = "netkit_peer", 137 [BPF_TRACE_KPROBE_SESSION] = "trace_kprobe_session", 138 [BPF_TRACE_UPROBE_SESSION] = "trace_uprobe_session", 139 }; 140 141 static const char * const link_type_name[] = { 142 [BPF_LINK_TYPE_UNSPEC] = "unspec", 143 [BPF_LINK_TYPE_RAW_TRACEPOINT] = "raw_tracepoint", 144 [BPF_LINK_TYPE_TRACING] = "tracing", 145 [BPF_LINK_TYPE_CGROUP] = "cgroup", 146 [BPF_LINK_TYPE_ITER] = "iter", 147 [BPF_LINK_TYPE_NETNS] = "netns", 148 [BPF_LINK_TYPE_XDP] = "xdp", 149 [BPF_LINK_TYPE_PERF_EVENT] = "perf_event", 150 [BPF_LINK_TYPE_KPROBE_MULTI] = "kprobe_multi", 151 [BPF_LINK_TYPE_STRUCT_OPS] = "struct_ops", 152 [BPF_LINK_TYPE_NETFILTER] = "netfilter", 153 [BPF_LINK_TYPE_TCX] = "tcx", 154 [BPF_LINK_TYPE_UPROBE_MULTI] = "uprobe_multi", 155 [BPF_LINK_TYPE_NETKIT] = "netkit", 156 [BPF_LINK_TYPE_SOCKMAP] = "sockmap", 157 }; 158 159 static const char * const map_type_name[] = { 160 [BPF_MAP_TYPE_UNSPEC] = "unspec", 161 [BPF_MAP_TYPE_HASH] = "hash", 162 [BPF_MAP_TYPE_ARRAY] = "array", 163 [BPF_MAP_TYPE_PROG_ARRAY] = "prog_array", 164 [BPF_MAP_TYPE_PERF_EVENT_ARRAY] = "perf_event_array", 165 [BPF_MAP_TYPE_PERCPU_HASH] = "percpu_hash", 166 [BPF_MAP_TYPE_PERCPU_ARRAY] = "percpu_array", 167 [BPF_MAP_TYPE_STACK_TRACE] = "stack_trace", 168 [BPF_MAP_TYPE_CGROUP_ARRAY] = "cgroup_array", 169 [BPF_MAP_TYPE_LRU_HASH] = "lru_hash", 170 [BPF_MAP_TYPE_LRU_PERCPU_HASH] = "lru_percpu_hash", 171 [BPF_MAP_TYPE_LPM_TRIE] = "lpm_trie", 172 [BPF_MAP_TYPE_ARRAY_OF_MAPS] = "array_of_maps", 173 [BPF_MAP_TYPE_HASH_OF_MAPS] = "hash_of_maps", 174 [BPF_MAP_TYPE_DEVMAP] = "devmap", 175 [BPF_MAP_TYPE_DEVMAP_HASH] = "devmap_hash", 176 [BPF_MAP_TYPE_SOCKMAP] = "sockmap", 177 [BPF_MAP_TYPE_CPUMAP] = "cpumap", 178 [BPF_MAP_TYPE_XSKMAP] = "xskmap", 179 [BPF_MAP_TYPE_SOCKHASH] = "sockhash", 180 [BPF_MAP_TYPE_CGROUP_STORAGE] = "cgroup_storage", 181 [BPF_MAP_TYPE_REUSEPORT_SOCKARRAY] = "reuseport_sockarray", 182 [BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE] = "percpu_cgroup_storage", 183 [BPF_MAP_TYPE_QUEUE] = "queue", 184 [BPF_MAP_TYPE_STACK] = "stack", 185 [BPF_MAP_TYPE_SK_STORAGE] = "sk_storage", 186 [BPF_MAP_TYPE_STRUCT_OPS] = "struct_ops", 187 [BPF_MAP_TYPE_RINGBUF] = "ringbuf", 188 [BPF_MAP_TYPE_INODE_STORAGE] = "inode_storage", 189 [BPF_MAP_TYPE_TASK_STORAGE] = "task_storage", 190 [BPF_MAP_TYPE_BLOOM_FILTER] = "bloom_filter", 191 [BPF_MAP_TYPE_USER_RINGBUF] = "user_ringbuf", 192 [BPF_MAP_TYPE_CGRP_STORAGE] = "cgrp_storage", 193 [BPF_MAP_TYPE_ARENA] = "arena", 194 [BPF_MAP_TYPE_INSN_ARRAY] = "insn_array", 195 }; 196 197 static const char * const prog_type_name[] = { 198 [BPF_PROG_TYPE_UNSPEC] = "unspec", 199 [BPF_PROG_TYPE_SOCKET_FILTER] = "socket_filter", 200 [BPF_PROG_TYPE_KPROBE] = "kprobe", 201 [BPF_PROG_TYPE_SCHED_CLS] = "sched_cls", 202 [BPF_PROG_TYPE_SCHED_ACT] = "sched_act", 203 [BPF_PROG_TYPE_TRACEPOINT] = "tracepoint", 204 [BPF_PROG_TYPE_XDP] = "xdp", 205 [BPF_PROG_TYPE_PERF_EVENT] = "perf_event", 206 [BPF_PROG_TYPE_CGROUP_SKB] = "cgroup_skb", 207 [BPF_PROG_TYPE_CGROUP_SOCK] = "cgroup_sock", 208 [BPF_PROG_TYPE_LWT_IN] = "lwt_in", 209 [BPF_PROG_TYPE_LWT_OUT] = "lwt_out", 210 [BPF_PROG_TYPE_LWT_XMIT] = "lwt_xmit", 211 [BPF_PROG_TYPE_SOCK_OPS] = "sock_ops", 212 [BPF_PROG_TYPE_SK_SKB] = "sk_skb", 213 [BPF_PROG_TYPE_CGROUP_DEVICE] = "cgroup_device", 214 [BPF_PROG_TYPE_SK_MSG] = "sk_msg", 215 [BPF_PROG_TYPE_RAW_TRACEPOINT] = "raw_tracepoint", 216 [BPF_PROG_TYPE_CGROUP_SOCK_ADDR] = "cgroup_sock_addr", 217 [BPF_PROG_TYPE_LWT_SEG6LOCAL] = "lwt_seg6local", 218 [BPF_PROG_TYPE_LIRC_MODE2] = "lirc_mode2", 219 [BPF_PROG_TYPE_SK_REUSEPORT] = "sk_reuseport", 220 [BPF_PROG_TYPE_FLOW_DISSECTOR] = "flow_dissector", 221 [BPF_PROG_TYPE_CGROUP_SYSCTL] = "cgroup_sysctl", 222 [BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE] = "raw_tracepoint_writable", 223 [BPF_PROG_TYPE_CGROUP_SOCKOPT] = "cgroup_sockopt", 224 [BPF_PROG_TYPE_TRACING] = "tracing", 225 [BPF_PROG_TYPE_STRUCT_OPS] = "struct_ops", 226 [BPF_PROG_TYPE_EXT] = "ext", 227 [BPF_PROG_TYPE_LSM] = "lsm", 228 [BPF_PROG_TYPE_SK_LOOKUP] = "sk_lookup", 229 [BPF_PROG_TYPE_SYSCALL] = "syscall", 230 [BPF_PROG_TYPE_NETFILTER] = "netfilter", 231 }; 232 233 static int __base_pr(enum libbpf_print_level level, const char *format, 234 va_list args) 235 { 236 const char *env_var = "LIBBPF_LOG_LEVEL"; 237 static enum libbpf_print_level min_level = LIBBPF_INFO; 238 static bool initialized; 239 240 if (!initialized) { 241 char *verbosity; 242 243 initialized = true; 244 verbosity = getenv(env_var); 245 if (verbosity) { 246 if (strcasecmp(verbosity, "warn") == 0) 247 min_level = LIBBPF_WARN; 248 else if (strcasecmp(verbosity, "debug") == 0) 249 min_level = LIBBPF_DEBUG; 250 else if (strcasecmp(verbosity, "info") == 0) 251 min_level = LIBBPF_INFO; 252 else 253 fprintf(stderr, "libbpf: unrecognized '%s' envvar value: '%s', should be one of 'warn', 'debug', or 'info'.\n", 254 env_var, verbosity); 255 } 256 } 257 258 /* if too verbose, skip logging */ 259 if (level > min_level) 260 return 0; 261 262 return vfprintf(stderr, format, args); 263 } 264 265 static libbpf_print_fn_t __libbpf_pr = __base_pr; 266 267 libbpf_print_fn_t libbpf_set_print(libbpf_print_fn_t fn) 268 { 269 libbpf_print_fn_t old_print_fn; 270 271 old_print_fn = __atomic_exchange_n(&__libbpf_pr, fn, __ATOMIC_RELAXED); 272 273 return old_print_fn; 274 } 275 276 __printf(2, 3) 277 void libbpf_print(enum libbpf_print_level level, const char *format, ...) 278 { 279 va_list args; 280 int old_errno; 281 libbpf_print_fn_t print_fn; 282 283 print_fn = __atomic_load_n(&__libbpf_pr, __ATOMIC_RELAXED); 284 if (!print_fn) 285 return; 286 287 old_errno = errno; 288 289 va_start(args, format); 290 print_fn(level, format, args); 291 va_end(args); 292 293 errno = old_errno; 294 } 295 296 static void pr_perm_msg(int err) 297 { 298 struct rlimit limit; 299 char buf[100]; 300 301 if (err != -EPERM || geteuid() != 0) 302 return; 303 304 err = getrlimit(RLIMIT_MEMLOCK, &limit); 305 if (err) 306 return; 307 308 if (limit.rlim_cur == RLIM_INFINITY) 309 return; 310 311 if (limit.rlim_cur < 1024) 312 snprintf(buf, sizeof(buf), "%zu bytes", (size_t)limit.rlim_cur); 313 else if (limit.rlim_cur < 1024*1024) 314 snprintf(buf, sizeof(buf), "%.1f KiB", (double)limit.rlim_cur / 1024); 315 else 316 snprintf(buf, sizeof(buf), "%.1f MiB", (double)limit.rlim_cur / (1024*1024)); 317 318 pr_warn("permission error while running as root; try raising 'ulimit -l'? current value: %s\n", 319 buf); 320 } 321 322 /* Copied from tools/perf/util/util.h */ 323 #ifndef zfree 324 # define zfree(ptr) ({ free(*ptr); *ptr = NULL; }) 325 #endif 326 327 #ifndef zclose 328 # define zclose(fd) ({ \ 329 int ___err = 0; \ 330 if ((fd) >= 0) \ 331 ___err = close((fd)); \ 332 fd = -1; \ 333 ___err; }) 334 #endif 335 336 static inline __u64 ptr_to_u64(const void *ptr) 337 { 338 return (__u64) (unsigned long) ptr; 339 } 340 341 int libbpf_set_strict_mode(enum libbpf_strict_mode mode) 342 { 343 /* as of v1.0 libbpf_set_strict_mode() is a no-op */ 344 return 0; 345 } 346 347 __u32 libbpf_major_version(void) 348 { 349 return LIBBPF_MAJOR_VERSION; 350 } 351 352 __u32 libbpf_minor_version(void) 353 { 354 return LIBBPF_MINOR_VERSION; 355 } 356 357 const char *libbpf_version_string(void) 358 { 359 #define __S(X) #X 360 #define _S(X) __S(X) 361 return "v" _S(LIBBPF_MAJOR_VERSION) "." _S(LIBBPF_MINOR_VERSION); 362 #undef _S 363 #undef __S 364 } 365 366 enum reloc_type { 367 RELO_LD64, 368 RELO_CALL, 369 RELO_DATA, 370 RELO_EXTERN_LD64, 371 RELO_EXTERN_CALL, 372 RELO_SUBPROG_ADDR, 373 RELO_CORE, 374 RELO_INSN_ARRAY, 375 }; 376 377 struct reloc_desc { 378 enum reloc_type type; 379 int insn_idx; 380 union { 381 const struct bpf_core_relo *core_relo; /* used when type == RELO_CORE */ 382 struct { 383 int map_idx; 384 unsigned int sym_off; 385 /* 386 * The following two fields can be unionized, as the 387 * ext_idx field is used for extern symbols, and the 388 * sym_size is used for jump tables, which are never 389 * extern 390 */ 391 union { 392 int ext_idx; 393 int sym_size; 394 }; 395 }; 396 }; 397 }; 398 399 /* stored as sec_def->cookie for all libbpf-supported SEC()s */ 400 enum sec_def_flags { 401 SEC_NONE = 0, 402 /* expected_attach_type is optional, if kernel doesn't support that */ 403 SEC_EXP_ATTACH_OPT = 1, 404 /* legacy, only used by libbpf_get_type_names() and 405 * libbpf_attach_type_by_name(), not used by libbpf itself at all. 406 * This used to be associated with cgroup (and few other) BPF programs 407 * that were attachable through BPF_PROG_ATTACH command. Pretty 408 * meaningless nowadays, though. 409 */ 410 SEC_ATTACHABLE = 2, 411 SEC_ATTACHABLE_OPT = SEC_ATTACHABLE | SEC_EXP_ATTACH_OPT, 412 /* attachment target is specified through BTF ID in either kernel or 413 * other BPF program's BTF object 414 */ 415 SEC_ATTACH_BTF = 4, 416 /* BPF program type allows sleeping/blocking in kernel */ 417 SEC_SLEEPABLE = 8, 418 /* BPF program support non-linear XDP buffer */ 419 SEC_XDP_FRAGS = 16, 420 /* Setup proper attach type for usdt probes. */ 421 SEC_USDT = 32, 422 }; 423 424 struct bpf_sec_def { 425 char *sec; 426 enum bpf_prog_type prog_type; 427 enum bpf_attach_type expected_attach_type; 428 long cookie; 429 int handler_id; 430 431 libbpf_prog_setup_fn_t prog_setup_fn; 432 libbpf_prog_prepare_load_fn_t prog_prepare_load_fn; 433 libbpf_prog_attach_fn_t prog_attach_fn; 434 }; 435 436 struct bpf_light_subprog { 437 __u32 sec_insn_off; 438 __u32 sub_insn_off; 439 }; 440 441 /* 442 * bpf_prog should be a better name but it has been used in 443 * linux/filter.h. 444 */ 445 struct bpf_program { 446 char *name; 447 char *sec_name; 448 size_t sec_idx; 449 const struct bpf_sec_def *sec_def; 450 /* this program's instruction offset (in number of instructions) 451 * within its containing ELF section 452 */ 453 size_t sec_insn_off; 454 /* number of original instructions in ELF section belonging to this 455 * program, not taking into account subprogram instructions possible 456 * appended later during relocation 457 */ 458 size_t sec_insn_cnt; 459 /* Offset (in number of instructions) of the start of instruction 460 * belonging to this BPF program within its containing main BPF 461 * program. For the entry-point (main) BPF program, this is always 462 * zero. For a sub-program, this gets reset before each of main BPF 463 * programs are processed and relocated and is used to determined 464 * whether sub-program was already appended to the main program, and 465 * if yes, at which instruction offset. 466 */ 467 size_t sub_insn_off; 468 469 /* instructions that belong to BPF program; insns[0] is located at 470 * sec_insn_off instruction within its ELF section in ELF file, so 471 * when mapping ELF file instruction index to the local instruction, 472 * one needs to subtract sec_insn_off; and vice versa. 473 */ 474 struct bpf_insn *insns; 475 /* actual number of instruction in this BPF program's image; for 476 * entry-point BPF programs this includes the size of main program 477 * itself plus all the used sub-programs, appended at the end 478 */ 479 size_t insns_cnt; 480 481 struct reloc_desc *reloc_desc; 482 int nr_reloc; 483 484 /* BPF verifier log settings */ 485 char *log_buf; 486 size_t log_size; 487 __u32 log_level; 488 489 struct bpf_object *obj; 490 491 int fd; 492 bool autoload; 493 bool autoattach; 494 bool sym_global; 495 bool mark_btf_static; 496 enum bpf_prog_type type; 497 enum bpf_attach_type expected_attach_type; 498 int exception_cb_idx; 499 500 int prog_ifindex; 501 __u32 attach_btf_obj_fd; 502 __u32 attach_btf_id; 503 __u32 attach_prog_fd; 504 505 void *func_info; 506 __u32 func_info_rec_size; 507 __u32 func_info_cnt; 508 509 void *line_info; 510 __u32 line_info_rec_size; 511 __u32 line_info_cnt; 512 __u32 prog_flags; 513 __u8 hash[SHA256_DIGEST_LENGTH]; 514 515 struct bpf_light_subprog *subprogs; 516 __u32 subprog_cnt; 517 }; 518 519 struct bpf_struct_ops { 520 struct bpf_program **progs; 521 __u32 *kern_func_off; 522 /* e.g. struct tcp_congestion_ops in bpf_prog's btf format */ 523 void *data; 524 /* e.g. struct bpf_struct_ops_tcp_congestion_ops in 525 * btf_vmlinux's format. 526 * struct bpf_struct_ops_tcp_congestion_ops { 527 * [... some other kernel fields ...] 528 * struct tcp_congestion_ops data; 529 * } 530 * kern_vdata-size == sizeof(struct bpf_struct_ops_tcp_congestion_ops) 531 * bpf_map__init_kern_struct_ops() will populate the "kern_vdata" 532 * from "data". 533 */ 534 void *kern_vdata; 535 __u32 type_id; 536 }; 537 538 #define DATA_SEC ".data" 539 #define BSS_SEC ".bss" 540 #define RODATA_SEC ".rodata" 541 #define KCONFIG_SEC ".kconfig" 542 #define KSYMS_SEC ".ksyms" 543 #define STRUCT_OPS_SEC ".struct_ops" 544 #define STRUCT_OPS_LINK_SEC ".struct_ops.link" 545 #define ARENA_SEC ".addr_space.1" 546 547 enum libbpf_map_type { 548 LIBBPF_MAP_UNSPEC, 549 LIBBPF_MAP_DATA, 550 LIBBPF_MAP_BSS, 551 LIBBPF_MAP_RODATA, 552 LIBBPF_MAP_KCONFIG, 553 }; 554 555 struct bpf_map_def { 556 unsigned int type; 557 unsigned int key_size; 558 unsigned int value_size; 559 unsigned int max_entries; 560 unsigned int map_flags; 561 }; 562 563 struct bpf_map { 564 struct bpf_object *obj; 565 char *name; 566 /* real_name is defined for special internal maps (.rodata*, 567 * .data*, .bss, .kconfig) and preserves their original ELF section 568 * name. This is important to be able to find corresponding BTF 569 * DATASEC information. 570 */ 571 char *real_name; 572 int fd; 573 int sec_idx; 574 size_t sec_offset; 575 int map_ifindex; 576 int inner_map_fd; 577 struct bpf_map_def def; 578 __u32 numa_node; 579 __u32 btf_var_idx; 580 int mod_btf_fd; 581 __u32 btf_key_type_id; 582 __u32 btf_value_type_id; 583 __u32 btf_vmlinux_value_type_id; 584 enum libbpf_map_type libbpf_type; 585 void *mmaped; 586 struct bpf_struct_ops *st_ops; 587 struct bpf_map *inner_map; 588 void **init_slots; 589 int init_slots_sz; 590 char *pin_path; 591 bool pinned; 592 bool reused; 593 bool autocreate; 594 bool autoattach; 595 __u64 map_extra; 596 struct bpf_program *excl_prog; 597 }; 598 599 enum extern_type { 600 EXT_UNKNOWN, 601 EXT_KCFG, 602 EXT_KSYM, 603 }; 604 605 enum kcfg_type { 606 KCFG_UNKNOWN, 607 KCFG_CHAR, 608 KCFG_BOOL, 609 KCFG_INT, 610 KCFG_TRISTATE, 611 KCFG_CHAR_ARR, 612 }; 613 614 struct extern_desc { 615 enum extern_type type; 616 int sym_idx; 617 int btf_id; 618 int sec_btf_id; 619 char *name; 620 char *essent_name; 621 bool is_set; 622 bool is_weak; 623 union { 624 struct { 625 enum kcfg_type type; 626 int sz; 627 int align; 628 int data_off; 629 bool is_signed; 630 } kcfg; 631 struct { 632 unsigned long long addr; 633 634 /* target btf_id of the corresponding kernel var. */ 635 int kernel_btf_obj_fd; 636 int kernel_btf_id; 637 638 /* local btf_id of the ksym extern's type. */ 639 __u32 type_id; 640 /* BTF fd index to be patched in for insn->off, this is 641 * 0 for vmlinux BTF, index in obj->fd_array for module 642 * BTF 643 */ 644 __s16 btf_fd_idx; 645 } ksym; 646 }; 647 }; 648 649 struct module_btf { 650 struct btf *btf; 651 char *name; 652 __u32 id; 653 int fd; 654 int fd_array_idx; 655 }; 656 657 enum sec_type { 658 SEC_UNUSED = 0, 659 SEC_RELO, 660 SEC_BSS, 661 SEC_DATA, 662 SEC_RODATA, 663 SEC_ST_OPS, 664 }; 665 666 struct elf_sec_desc { 667 enum sec_type sec_type; 668 Elf64_Shdr *shdr; 669 Elf_Data *data; 670 }; 671 672 struct elf_state { 673 int fd; 674 const void *obj_buf; 675 size_t obj_buf_sz; 676 Elf *elf; 677 Elf64_Ehdr *ehdr; 678 Elf_Data *symbols; 679 Elf_Data *arena_data; 680 size_t shstrndx; /* section index for section name strings */ 681 size_t strtabidx; 682 struct elf_sec_desc *secs; 683 size_t sec_cnt; 684 int btf_maps_shndx; 685 __u32 btf_maps_sec_btf_id; 686 int text_shndx; 687 int symbols_shndx; 688 bool has_st_ops; 689 int arena_data_shndx; 690 int jumptables_data_shndx; 691 }; 692 693 struct usdt_manager; 694 695 enum bpf_object_state { 696 OBJ_OPEN, 697 OBJ_PREPARED, 698 OBJ_LOADED, 699 }; 700 701 struct bpf_object { 702 char name[BPF_OBJ_NAME_LEN]; 703 char license[64]; 704 __u32 kern_version; 705 706 enum bpf_object_state state; 707 struct bpf_program *programs; 708 size_t nr_programs; 709 struct bpf_map *maps; 710 size_t nr_maps; 711 size_t maps_cap; 712 713 char *kconfig; 714 struct extern_desc *externs; 715 int nr_extern; 716 int kconfig_map_idx; 717 718 bool has_subcalls; 719 bool has_rodata; 720 721 struct bpf_gen *gen_loader; 722 723 /* Information when doing ELF related work. Only valid if efile.elf is not NULL */ 724 struct elf_state efile; 725 726 unsigned char byteorder; 727 728 struct btf *btf; 729 struct btf_ext *btf_ext; 730 731 /* Parse and load BTF vmlinux if any of the programs in the object need 732 * it at load time. 733 */ 734 struct btf *btf_vmlinux; 735 /* Path to the custom BTF to be used for BPF CO-RE relocations as an 736 * override for vmlinux BTF. 737 */ 738 char *btf_custom_path; 739 /* vmlinux BTF override for CO-RE relocations */ 740 struct btf *btf_vmlinux_override; 741 /* Lazily initialized kernel module BTFs */ 742 struct module_btf *btf_modules; 743 bool btf_modules_loaded; 744 size_t btf_module_cnt; 745 size_t btf_module_cap; 746 747 /* optional log settings passed to BPF_BTF_LOAD and BPF_PROG_LOAD commands */ 748 char *log_buf; 749 size_t log_size; 750 __u32 log_level; 751 752 int *fd_array; 753 size_t fd_array_cap; 754 size_t fd_array_cnt; 755 756 struct usdt_manager *usdt_man; 757 758 int arena_map_idx; 759 void *arena_data; 760 size_t arena_data_sz; 761 size_t arena_data_off; 762 763 void *jumptables_data; 764 size_t jumptables_data_sz; 765 766 struct { 767 struct bpf_program *prog; 768 unsigned int sym_off; 769 int fd; 770 } *jumptable_maps; 771 size_t jumptable_map_cnt; 772 773 struct kern_feature_cache *feat_cache; 774 char *token_path; 775 int token_fd; 776 777 char path[]; 778 }; 779 780 static const char *elf_sym_str(const struct bpf_object *obj, size_t off); 781 static const char *elf_sec_str(const struct bpf_object *obj, size_t off); 782 static Elf_Scn *elf_sec_by_idx(const struct bpf_object *obj, size_t idx); 783 static Elf_Scn *elf_sec_by_name(const struct bpf_object *obj, const char *name); 784 static Elf64_Shdr *elf_sec_hdr(const struct bpf_object *obj, Elf_Scn *scn); 785 static const char *elf_sec_name(const struct bpf_object *obj, Elf_Scn *scn); 786 static Elf_Data *elf_sec_data(const struct bpf_object *obj, Elf_Scn *scn); 787 static Elf64_Sym *elf_sym_by_idx(const struct bpf_object *obj, size_t idx); 788 static Elf64_Rel *elf_rel_by_idx(Elf_Data *data, size_t idx); 789 790 void bpf_program__unload(struct bpf_program *prog) 791 { 792 if (!prog) 793 return; 794 795 zclose(prog->fd); 796 797 zfree(&prog->func_info); 798 zfree(&prog->line_info); 799 zfree(&prog->subprogs); 800 } 801 802 static void bpf_program__exit(struct bpf_program *prog) 803 { 804 if (!prog) 805 return; 806 807 bpf_program__unload(prog); 808 zfree(&prog->name); 809 zfree(&prog->sec_name); 810 zfree(&prog->insns); 811 zfree(&prog->reloc_desc); 812 813 prog->nr_reloc = 0; 814 prog->insns_cnt = 0; 815 prog->sec_idx = -1; 816 } 817 818 static bool insn_is_subprog_call(const struct bpf_insn *insn) 819 { 820 return BPF_CLASS(insn->code) == BPF_JMP && 821 BPF_OP(insn->code) == BPF_CALL && 822 BPF_SRC(insn->code) == BPF_K && 823 insn->src_reg == BPF_PSEUDO_CALL && 824 insn->dst_reg == 0 && 825 insn->off == 0; 826 } 827 828 static bool is_call_insn(const struct bpf_insn *insn) 829 { 830 return insn->code == (BPF_JMP | BPF_CALL); 831 } 832 833 static bool insn_is_pseudo_func(struct bpf_insn *insn) 834 { 835 return is_ldimm64_insn(insn) && insn->src_reg == BPF_PSEUDO_FUNC; 836 } 837 838 static int 839 bpf_object__init_prog(struct bpf_object *obj, struct bpf_program *prog, 840 const char *name, size_t sec_idx, const char *sec_name, 841 size_t sec_off, void *insn_data, size_t insn_data_sz) 842 { 843 if (insn_data_sz == 0 || insn_data_sz % BPF_INSN_SZ || sec_off % BPF_INSN_SZ) { 844 pr_warn("sec '%s': corrupted program '%s', offset %zu, size %zu\n", 845 sec_name, name, sec_off, insn_data_sz); 846 return -EINVAL; 847 } 848 849 memset(prog, 0, sizeof(*prog)); 850 prog->obj = obj; 851 852 prog->sec_idx = sec_idx; 853 prog->sec_insn_off = sec_off / BPF_INSN_SZ; 854 prog->sec_insn_cnt = insn_data_sz / BPF_INSN_SZ; 855 /* insns_cnt can later be increased by appending used subprograms */ 856 prog->insns_cnt = prog->sec_insn_cnt; 857 858 prog->type = BPF_PROG_TYPE_UNSPEC; 859 prog->fd = -1; 860 prog->exception_cb_idx = -1; 861 862 /* libbpf's convention for SEC("?abc...") is that it's just like 863 * SEC("abc...") but the corresponding bpf_program starts out with 864 * autoload set to false. 865 */ 866 if (sec_name[0] == '?') { 867 prog->autoload = false; 868 /* from now on forget there was ? in section name */ 869 sec_name++; 870 } else { 871 prog->autoload = true; 872 } 873 874 prog->autoattach = true; 875 876 /* inherit object's log_level */ 877 prog->log_level = obj->log_level; 878 879 prog->sec_name = strdup(sec_name); 880 if (!prog->sec_name) 881 goto errout; 882 883 prog->name = strdup(name); 884 if (!prog->name) 885 goto errout; 886 887 prog->insns = malloc(insn_data_sz); 888 if (!prog->insns) 889 goto errout; 890 memcpy(prog->insns, insn_data, insn_data_sz); 891 892 return 0; 893 errout: 894 pr_warn("sec '%s': failed to allocate memory for prog '%s'\n", sec_name, name); 895 bpf_program__exit(prog); 896 return -ENOMEM; 897 } 898 899 static int 900 bpf_object__add_programs(struct bpf_object *obj, Elf_Data *sec_data, 901 const char *sec_name, int sec_idx) 902 { 903 Elf_Data *symbols = obj->efile.symbols; 904 struct bpf_program *prog, *progs; 905 void *data = sec_data->d_buf; 906 size_t sec_sz = sec_data->d_size, sec_off, prog_sz, nr_syms; 907 int nr_progs, err, i; 908 const char *name; 909 Elf64_Sym *sym; 910 911 progs = obj->programs; 912 nr_progs = obj->nr_programs; 913 nr_syms = symbols->d_size / sizeof(Elf64_Sym); 914 915 for (i = 0; i < nr_syms; i++) { 916 sym = elf_sym_by_idx(obj, i); 917 918 if (sym->st_shndx != sec_idx) 919 continue; 920 if (ELF64_ST_TYPE(sym->st_info) != STT_FUNC) 921 continue; 922 923 prog_sz = sym->st_size; 924 sec_off = sym->st_value; 925 926 name = elf_sym_str(obj, sym->st_name); 927 if (!name) { 928 pr_warn("sec '%s': failed to get symbol name for offset %zu\n", 929 sec_name, sec_off); 930 return -LIBBPF_ERRNO__FORMAT; 931 } 932 933 if (sec_off + prog_sz > sec_sz || sec_off + prog_sz < sec_off) { 934 pr_warn("sec '%s': program at offset %zu crosses section boundary\n", 935 sec_name, sec_off); 936 return -LIBBPF_ERRNO__FORMAT; 937 } 938 939 if (sec_idx != obj->efile.text_shndx && ELF64_ST_BIND(sym->st_info) == STB_LOCAL) { 940 pr_warn("sec '%s': program '%s' is static and not supported\n", sec_name, name); 941 return -ENOTSUP; 942 } 943 944 pr_debug("sec '%s': found program '%s' at insn offset %zu (%zu bytes), code size %zu insns (%zu bytes)\n", 945 sec_name, name, sec_off / BPF_INSN_SZ, sec_off, prog_sz / BPF_INSN_SZ, prog_sz); 946 947 progs = libbpf_reallocarray(progs, nr_progs + 1, sizeof(*progs)); 948 if (!progs) { 949 /* 950 * In this case the original obj->programs 951 * is still valid, so don't need special treat for 952 * bpf_close_object(). 953 */ 954 pr_warn("sec '%s': failed to alloc memory for new program '%s'\n", 955 sec_name, name); 956 return -ENOMEM; 957 } 958 obj->programs = progs; 959 960 prog = &progs[nr_progs]; 961 962 err = bpf_object__init_prog(obj, prog, name, sec_idx, sec_name, 963 sec_off, data + sec_off, prog_sz); 964 if (err) 965 return err; 966 967 if (ELF64_ST_BIND(sym->st_info) != STB_LOCAL) 968 prog->sym_global = true; 969 970 /* if function is a global/weak symbol, but has restricted 971 * (STV_HIDDEN or STV_INTERNAL) visibility, mark its BTF FUNC 972 * as static to enable more permissive BPF verification mode 973 * with more outside context available to BPF verifier 974 */ 975 if (prog->sym_global && (ELF64_ST_VISIBILITY(sym->st_other) == STV_HIDDEN 976 || ELF64_ST_VISIBILITY(sym->st_other) == STV_INTERNAL)) 977 prog->mark_btf_static = true; 978 979 nr_progs++; 980 obj->nr_programs = nr_progs; 981 } 982 983 return 0; 984 } 985 986 static void bpf_object_bswap_progs(struct bpf_object *obj) 987 { 988 struct bpf_program *prog = obj->programs; 989 struct bpf_insn *insn; 990 int p, i; 991 992 for (p = 0; p < obj->nr_programs; p++, prog++) { 993 insn = prog->insns; 994 for (i = 0; i < prog->insns_cnt; i++, insn++) 995 bpf_insn_bswap(insn); 996 } 997 pr_debug("converted %zu BPF programs to native byte order\n", obj->nr_programs); 998 } 999 1000 static const struct btf_member * 1001 find_member_by_offset(const struct btf_type *t, __u32 bit_offset) 1002 { 1003 struct btf_member *m; 1004 int i; 1005 1006 for (i = 0, m = btf_members(t); i < btf_vlen(t); i++, m++) { 1007 if (btf_member_bit_offset(t, i) == bit_offset) 1008 return m; 1009 } 1010 1011 return NULL; 1012 } 1013 1014 static const struct btf_member * 1015 find_member_by_name(const struct btf *btf, const struct btf_type *t, 1016 const char *name) 1017 { 1018 struct btf_member *m; 1019 int i; 1020 1021 for (i = 0, m = btf_members(t); i < btf_vlen(t); i++, m++) { 1022 if (!strcmp(btf__name_by_offset(btf, m->name_off), name)) 1023 return m; 1024 } 1025 1026 return NULL; 1027 } 1028 1029 static int find_ksym_btf_id(struct bpf_object *obj, const char *ksym_name, 1030 __u16 kind, struct btf **res_btf, 1031 struct module_btf **res_mod_btf); 1032 1033 #define STRUCT_OPS_VALUE_PREFIX "bpf_struct_ops_" 1034 static int find_btf_by_prefix_kind(const struct btf *btf, const char *prefix, 1035 const char *name, __u32 kind); 1036 1037 static int 1038 find_struct_ops_kern_types(struct bpf_object *obj, const char *tname_raw, 1039 struct module_btf **mod_btf, 1040 const struct btf_type **type, __u32 *type_id, 1041 const struct btf_type **vtype, __u32 *vtype_id, 1042 const struct btf_member **data_member) 1043 { 1044 const struct btf_type *kern_type, *kern_vtype; 1045 const struct btf_member *kern_data_member; 1046 struct btf *btf = NULL; 1047 __s32 kern_vtype_id, kern_type_id; 1048 char tname[192], stname[256]; 1049 __u32 i; 1050 1051 snprintf(tname, sizeof(tname), "%.*s", 1052 (int)bpf_core_essential_name_len(tname_raw), tname_raw); 1053 1054 snprintf(stname, sizeof(stname), "%s%s", STRUCT_OPS_VALUE_PREFIX, tname); 1055 1056 /* Look for the corresponding "map_value" type that will be used 1057 * in map_update(BPF_MAP_TYPE_STRUCT_OPS) first, figure out the btf 1058 * and the mod_btf. 1059 * For example, find "struct bpf_struct_ops_tcp_congestion_ops". 1060 */ 1061 kern_vtype_id = find_ksym_btf_id(obj, stname, BTF_KIND_STRUCT, &btf, mod_btf); 1062 if (kern_vtype_id < 0) { 1063 pr_warn("struct_ops init_kern: struct %s is not found in kernel BTF\n", stname); 1064 return kern_vtype_id; 1065 } 1066 kern_vtype = btf__type_by_id(btf, kern_vtype_id); 1067 1068 kern_type_id = btf__find_by_name_kind(btf, tname, BTF_KIND_STRUCT); 1069 if (kern_type_id < 0) { 1070 pr_warn("struct_ops init_kern: struct %s is not found in kernel BTF\n", tname); 1071 return kern_type_id; 1072 } 1073 kern_type = btf__type_by_id(btf, kern_type_id); 1074 1075 /* Find "struct tcp_congestion_ops" from 1076 * struct bpf_struct_ops_tcp_congestion_ops { 1077 * [ ... ] 1078 * struct tcp_congestion_ops data; 1079 * } 1080 */ 1081 kern_data_member = btf_members(kern_vtype); 1082 for (i = 0; i < btf_vlen(kern_vtype); i++, kern_data_member++) { 1083 if (kern_data_member->type == kern_type_id) 1084 break; 1085 } 1086 if (i == btf_vlen(kern_vtype)) { 1087 pr_warn("struct_ops init_kern: struct %s data is not found in struct %s\n", 1088 tname, stname); 1089 return -EINVAL; 1090 } 1091 1092 *type = kern_type; 1093 *type_id = kern_type_id; 1094 *vtype = kern_vtype; 1095 *vtype_id = kern_vtype_id; 1096 *data_member = kern_data_member; 1097 1098 return 0; 1099 } 1100 1101 static bool bpf_map__is_struct_ops(const struct bpf_map *map) 1102 { 1103 return map->def.type == BPF_MAP_TYPE_STRUCT_OPS; 1104 } 1105 1106 static bool is_valid_st_ops_program(struct bpf_object *obj, 1107 const struct bpf_program *prog) 1108 { 1109 int i; 1110 1111 for (i = 0; i < obj->nr_programs; i++) { 1112 if (&obj->programs[i] == prog) 1113 return prog->type == BPF_PROG_TYPE_STRUCT_OPS; 1114 } 1115 1116 return false; 1117 } 1118 1119 /* For each struct_ops program P, referenced from some struct_ops map M, 1120 * enable P.autoload if there are Ms for which M.autocreate is true, 1121 * disable P.autoload if for all Ms M.autocreate is false. 1122 * Don't change P.autoload for programs that are not referenced from any maps. 1123 */ 1124 static int bpf_object_adjust_struct_ops_autoload(struct bpf_object *obj) 1125 { 1126 struct bpf_program *prog, *slot_prog; 1127 struct bpf_map *map; 1128 int i, j, k, vlen; 1129 1130 for (i = 0; i < obj->nr_programs; ++i) { 1131 int should_load = false; 1132 int use_cnt = 0; 1133 1134 prog = &obj->programs[i]; 1135 if (prog->type != BPF_PROG_TYPE_STRUCT_OPS) 1136 continue; 1137 1138 for (j = 0; j < obj->nr_maps; ++j) { 1139 const struct btf_type *type; 1140 1141 map = &obj->maps[j]; 1142 if (!bpf_map__is_struct_ops(map)) 1143 continue; 1144 1145 type = btf__type_by_id(obj->btf, map->st_ops->type_id); 1146 vlen = btf_vlen(type); 1147 for (k = 0; k < vlen; ++k) { 1148 slot_prog = map->st_ops->progs[k]; 1149 if (prog != slot_prog) 1150 continue; 1151 1152 use_cnt++; 1153 if (map->autocreate) 1154 should_load = true; 1155 } 1156 } 1157 if (use_cnt) 1158 prog->autoload = should_load; 1159 } 1160 1161 return 0; 1162 } 1163 1164 /* Init the map's fields that depend on kern_btf */ 1165 static int bpf_map__init_kern_struct_ops(struct bpf_map *map) 1166 { 1167 const struct btf_member *member, *kern_member, *kern_data_member; 1168 const struct btf_type *type, *kern_type, *kern_vtype; 1169 __u32 i, kern_type_id, kern_vtype_id, kern_data_off; 1170 struct bpf_object *obj = map->obj; 1171 const struct btf *btf = obj->btf; 1172 struct bpf_struct_ops *st_ops; 1173 const struct btf *kern_btf; 1174 struct module_btf *mod_btf = NULL; 1175 void *data, *kern_data; 1176 const char *tname; 1177 int err; 1178 1179 st_ops = map->st_ops; 1180 type = btf__type_by_id(btf, st_ops->type_id); 1181 tname = btf__name_by_offset(btf, type->name_off); 1182 err = find_struct_ops_kern_types(obj, tname, &mod_btf, 1183 &kern_type, &kern_type_id, 1184 &kern_vtype, &kern_vtype_id, 1185 &kern_data_member); 1186 if (err) 1187 return err; 1188 1189 kern_btf = mod_btf ? mod_btf->btf : obj->btf_vmlinux; 1190 1191 pr_debug("struct_ops init_kern %s: type_id:%u kern_type_id:%u kern_vtype_id:%u\n", 1192 map->name, st_ops->type_id, kern_type_id, kern_vtype_id); 1193 1194 map->mod_btf_fd = mod_btf ? mod_btf->fd : -1; 1195 map->def.value_size = kern_vtype->size; 1196 map->btf_vmlinux_value_type_id = kern_vtype_id; 1197 1198 st_ops->kern_vdata = calloc(1, kern_vtype->size); 1199 if (!st_ops->kern_vdata) 1200 return -ENOMEM; 1201 1202 data = st_ops->data; 1203 kern_data_off = kern_data_member->offset / 8; 1204 kern_data = st_ops->kern_vdata + kern_data_off; 1205 1206 member = btf_members(type); 1207 for (i = 0; i < btf_vlen(type); i++, member++) { 1208 const struct btf_type *mtype, *kern_mtype; 1209 __u32 mtype_id, kern_mtype_id; 1210 void *mdata, *kern_mdata; 1211 struct bpf_program *prog; 1212 __s64 msize, kern_msize; 1213 __u32 moff, kern_moff; 1214 __u32 kern_member_idx; 1215 const char *mname; 1216 1217 mname = btf__name_by_offset(btf, member->name_off); 1218 moff = member->offset / 8; 1219 mdata = data + moff; 1220 msize = btf__resolve_size(btf, member->type); 1221 if (msize < 0) { 1222 pr_warn("struct_ops init_kern %s: failed to resolve the size of member %s\n", 1223 map->name, mname); 1224 return msize; 1225 } 1226 1227 kern_member = find_member_by_name(kern_btf, kern_type, mname); 1228 if (!kern_member) { 1229 if (!libbpf_is_mem_zeroed(mdata, msize)) { 1230 pr_warn("struct_ops init_kern %s: Cannot find member %s in kernel BTF\n", 1231 map->name, mname); 1232 return -ENOTSUP; 1233 } 1234 1235 if (st_ops->progs[i]) { 1236 /* If we had declaratively set struct_ops callback, we need to 1237 * force its autoload to false, because it doesn't have 1238 * a chance of succeeding from POV of the current struct_ops map. 1239 * If this program is still referenced somewhere else, though, 1240 * then bpf_object_adjust_struct_ops_autoload() will update its 1241 * autoload accordingly. 1242 */ 1243 st_ops->progs[i]->autoload = false; 1244 st_ops->progs[i] = NULL; 1245 } 1246 1247 /* Skip all-zero/NULL fields if they are not present in the kernel BTF */ 1248 pr_info("struct_ops %s: member %s not found in kernel, skipping it as it's set to zero\n", 1249 map->name, mname); 1250 continue; 1251 } 1252 1253 kern_member_idx = kern_member - btf_members(kern_type); 1254 if (btf_member_bitfield_size(type, i) || 1255 btf_member_bitfield_size(kern_type, kern_member_idx)) { 1256 pr_warn("struct_ops init_kern %s: bitfield %s is not supported\n", 1257 map->name, mname); 1258 return -ENOTSUP; 1259 } 1260 1261 kern_moff = kern_member->offset / 8; 1262 kern_mdata = kern_data + kern_moff; 1263 1264 mtype = skip_mods_and_typedefs(btf, member->type, &mtype_id); 1265 kern_mtype = skip_mods_and_typedefs(kern_btf, kern_member->type, 1266 &kern_mtype_id); 1267 if (BTF_INFO_KIND(mtype->info) != 1268 BTF_INFO_KIND(kern_mtype->info)) { 1269 pr_warn("struct_ops init_kern %s: Unmatched member type %s %u != %u(kernel)\n", 1270 map->name, mname, BTF_INFO_KIND(mtype->info), 1271 BTF_INFO_KIND(kern_mtype->info)); 1272 return -ENOTSUP; 1273 } 1274 1275 if (btf_is_ptr(mtype)) { 1276 prog = *(void **)mdata; 1277 /* just like for !kern_member case above, reset declaratively 1278 * set (at compile time) program's autload to false, 1279 * if user replaced it with another program or NULL 1280 */ 1281 if (st_ops->progs[i] && st_ops->progs[i] != prog) 1282 st_ops->progs[i]->autoload = false; 1283 1284 /* Update the value from the shadow type */ 1285 st_ops->progs[i] = prog; 1286 if (!prog) 1287 continue; 1288 1289 if (!is_valid_st_ops_program(obj, prog)) { 1290 pr_warn("struct_ops init_kern %s: member %s is not a struct_ops program\n", 1291 map->name, mname); 1292 return -ENOTSUP; 1293 } 1294 1295 kern_mtype = skip_mods_and_typedefs(kern_btf, 1296 kern_mtype->type, 1297 &kern_mtype_id); 1298 1299 /* mtype->type must be a func_proto which was 1300 * guaranteed in bpf_object__collect_st_ops_relos(), 1301 * so only check kern_mtype for func_proto here. 1302 */ 1303 if (!btf_is_func_proto(kern_mtype)) { 1304 pr_warn("struct_ops init_kern %s: kernel member %s is not a func ptr\n", 1305 map->name, mname); 1306 return -ENOTSUP; 1307 } 1308 1309 if (mod_btf) 1310 prog->attach_btf_obj_fd = mod_btf->fd; 1311 1312 /* if we haven't yet processed this BPF program, record proper 1313 * attach_btf_id and member_idx 1314 */ 1315 if (!prog->attach_btf_id) { 1316 prog->attach_btf_id = kern_type_id; 1317 prog->expected_attach_type = kern_member_idx; 1318 } 1319 1320 /* struct_ops BPF prog can be re-used between multiple 1321 * .struct_ops & .struct_ops.link as long as it's the 1322 * same struct_ops struct definition and the same 1323 * function pointer field 1324 */ 1325 if (prog->attach_btf_id != kern_type_id) { 1326 pr_warn("struct_ops init_kern %s func ptr %s: invalid reuse of prog %s in sec %s with type %u: attach_btf_id %u != kern_type_id %u\n", 1327 map->name, mname, prog->name, prog->sec_name, prog->type, 1328 prog->attach_btf_id, kern_type_id); 1329 return -EINVAL; 1330 } 1331 if (prog->expected_attach_type != kern_member_idx) { 1332 pr_warn("struct_ops init_kern %s func ptr %s: invalid reuse of prog %s in sec %s with type %u: expected_attach_type %u != kern_member_idx %u\n", 1333 map->name, mname, prog->name, prog->sec_name, prog->type, 1334 prog->expected_attach_type, kern_member_idx); 1335 return -EINVAL; 1336 } 1337 1338 st_ops->kern_func_off[i] = kern_data_off + kern_moff; 1339 1340 pr_debug("struct_ops init_kern %s: func ptr %s is set to prog %s from data(+%u) to kern_data(+%u)\n", 1341 map->name, mname, prog->name, moff, 1342 kern_moff); 1343 1344 continue; 1345 } 1346 1347 kern_msize = btf__resolve_size(kern_btf, kern_mtype_id); 1348 if (kern_msize < 0 || msize != kern_msize) { 1349 pr_warn("struct_ops init_kern %s: Error in size of member %s: %zd != %zd(kernel)\n", 1350 map->name, mname, (ssize_t)msize, 1351 (ssize_t)kern_msize); 1352 return -ENOTSUP; 1353 } 1354 1355 pr_debug("struct_ops init_kern %s: copy %s %u bytes from data(+%u) to kern_data(+%u)\n", 1356 map->name, mname, (unsigned int)msize, 1357 moff, kern_moff); 1358 memcpy(kern_mdata, mdata, msize); 1359 } 1360 1361 return 0; 1362 } 1363 1364 static int bpf_object__init_kern_struct_ops_maps(struct bpf_object *obj) 1365 { 1366 struct bpf_map *map; 1367 size_t i; 1368 int err; 1369 1370 for (i = 0; i < obj->nr_maps; i++) { 1371 map = &obj->maps[i]; 1372 1373 if (!bpf_map__is_struct_ops(map)) 1374 continue; 1375 1376 if (!map->autocreate) 1377 continue; 1378 1379 err = bpf_map__init_kern_struct_ops(map); 1380 if (err) 1381 return err; 1382 } 1383 1384 return 0; 1385 } 1386 1387 static int init_struct_ops_maps(struct bpf_object *obj, const char *sec_name, 1388 int shndx, Elf_Data *data) 1389 { 1390 const struct btf_type *type, *datasec; 1391 const struct btf_var_secinfo *vsi; 1392 struct bpf_struct_ops *st_ops; 1393 const char *tname, *var_name; 1394 __s32 type_id, datasec_id; 1395 const struct btf *btf; 1396 struct bpf_map *map; 1397 __u32 i; 1398 1399 if (shndx == -1) 1400 return 0; 1401 1402 btf = obj->btf; 1403 datasec_id = btf__find_by_name_kind(btf, sec_name, 1404 BTF_KIND_DATASEC); 1405 if (datasec_id < 0) { 1406 pr_warn("struct_ops init: DATASEC %s not found\n", 1407 sec_name); 1408 return -EINVAL; 1409 } 1410 1411 datasec = btf__type_by_id(btf, datasec_id); 1412 vsi = btf_var_secinfos(datasec); 1413 for (i = 0; i < btf_vlen(datasec); i++, vsi++) { 1414 type = btf__type_by_id(obj->btf, vsi->type); 1415 var_name = btf__name_by_offset(obj->btf, type->name_off); 1416 1417 type_id = btf__resolve_type(obj->btf, vsi->type); 1418 if (type_id < 0) { 1419 pr_warn("struct_ops init: Cannot resolve var type_id %u in DATASEC %s\n", 1420 vsi->type, sec_name); 1421 return -EINVAL; 1422 } 1423 1424 type = btf__type_by_id(obj->btf, type_id); 1425 tname = btf__name_by_offset(obj->btf, type->name_off); 1426 if (!tname[0]) { 1427 pr_warn("struct_ops init: anonymous type is not supported\n"); 1428 return -ENOTSUP; 1429 } 1430 if (!btf_is_struct(type)) { 1431 pr_warn("struct_ops init: %s is not a struct\n", tname); 1432 return -EINVAL; 1433 } 1434 1435 map = bpf_object__add_map(obj); 1436 if (IS_ERR(map)) 1437 return PTR_ERR(map); 1438 1439 map->sec_idx = shndx; 1440 map->sec_offset = vsi->offset; 1441 map->name = strdup(var_name); 1442 if (!map->name) 1443 return -ENOMEM; 1444 map->btf_value_type_id = type_id; 1445 1446 /* Follow same convention as for programs autoload: 1447 * SEC("?.struct_ops") means map is not created by default. 1448 */ 1449 if (sec_name[0] == '?') { 1450 map->autocreate = false; 1451 /* from now on forget there was ? in section name */ 1452 sec_name++; 1453 } 1454 1455 map->def.type = BPF_MAP_TYPE_STRUCT_OPS; 1456 map->def.key_size = sizeof(int); 1457 map->def.value_size = type->size; 1458 map->def.max_entries = 1; 1459 map->def.map_flags = strcmp(sec_name, STRUCT_OPS_LINK_SEC) == 0 ? BPF_F_LINK : 0; 1460 map->autoattach = true; 1461 1462 map->st_ops = calloc(1, sizeof(*map->st_ops)); 1463 if (!map->st_ops) 1464 return -ENOMEM; 1465 st_ops = map->st_ops; 1466 st_ops->data = malloc(type->size); 1467 st_ops->progs = calloc(btf_vlen(type), sizeof(*st_ops->progs)); 1468 st_ops->kern_func_off = malloc(btf_vlen(type) * 1469 sizeof(*st_ops->kern_func_off)); 1470 if (!st_ops->data || !st_ops->progs || !st_ops->kern_func_off) 1471 return -ENOMEM; 1472 1473 if (vsi->offset + type->size > data->d_size) { 1474 pr_warn("struct_ops init: var %s is beyond the end of DATASEC %s\n", 1475 var_name, sec_name); 1476 return -EINVAL; 1477 } 1478 1479 memcpy(st_ops->data, 1480 data->d_buf + vsi->offset, 1481 type->size); 1482 st_ops->type_id = type_id; 1483 1484 pr_debug("struct_ops init: struct %s(type_id=%u) %s found at offset %u\n", 1485 tname, type_id, var_name, vsi->offset); 1486 } 1487 1488 return 0; 1489 } 1490 1491 static int bpf_object_init_struct_ops(struct bpf_object *obj) 1492 { 1493 const char *sec_name; 1494 int sec_idx, err; 1495 1496 for (sec_idx = 0; sec_idx < obj->efile.sec_cnt; ++sec_idx) { 1497 struct elf_sec_desc *desc = &obj->efile.secs[sec_idx]; 1498 1499 if (desc->sec_type != SEC_ST_OPS) 1500 continue; 1501 1502 sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, sec_idx)); 1503 if (!sec_name) 1504 return -LIBBPF_ERRNO__FORMAT; 1505 1506 err = init_struct_ops_maps(obj, sec_name, sec_idx, desc->data); 1507 if (err) 1508 return err; 1509 } 1510 1511 return 0; 1512 } 1513 1514 static struct bpf_object *bpf_object__new(const char *path, 1515 const void *obj_buf, 1516 size_t obj_buf_sz, 1517 const char *obj_name) 1518 { 1519 struct bpf_object *obj; 1520 char *end; 1521 1522 obj = calloc(1, sizeof(struct bpf_object) + strlen(path) + 1); 1523 if (!obj) { 1524 pr_warn("alloc memory failed for %s\n", path); 1525 return ERR_PTR(-ENOMEM); 1526 } 1527 1528 strcpy(obj->path, path); 1529 if (obj_name) { 1530 libbpf_strlcpy(obj->name, obj_name, sizeof(obj->name)); 1531 } else { 1532 /* Using basename() GNU version which doesn't modify arg. */ 1533 libbpf_strlcpy(obj->name, basename((void *)path), sizeof(obj->name)); 1534 end = strchr(obj->name, '.'); 1535 if (end) 1536 *end = 0; 1537 } 1538 1539 obj->efile.fd = -1; 1540 /* 1541 * Caller of this function should also call 1542 * bpf_object__elf_finish() after data collection to return 1543 * obj_buf to user. If not, we should duplicate the buffer to 1544 * avoid user freeing them before elf finish. 1545 */ 1546 obj->efile.obj_buf = obj_buf; 1547 obj->efile.obj_buf_sz = obj_buf_sz; 1548 obj->efile.btf_maps_shndx = -1; 1549 obj->kconfig_map_idx = -1; 1550 obj->arena_map_idx = -1; 1551 1552 obj->kern_version = get_kernel_version(); 1553 obj->state = OBJ_OPEN; 1554 1555 return obj; 1556 } 1557 1558 static void bpf_object__elf_finish(struct bpf_object *obj) 1559 { 1560 if (!obj->efile.elf) 1561 return; 1562 1563 elf_end(obj->efile.elf); 1564 obj->efile.elf = NULL; 1565 obj->efile.ehdr = NULL; 1566 obj->efile.symbols = NULL; 1567 obj->efile.arena_data = NULL; 1568 1569 zfree(&obj->efile.secs); 1570 obj->efile.sec_cnt = 0; 1571 zclose(obj->efile.fd); 1572 obj->efile.obj_buf = NULL; 1573 obj->efile.obj_buf_sz = 0; 1574 } 1575 1576 static int bpf_object__elf_init(struct bpf_object *obj) 1577 { 1578 Elf64_Ehdr *ehdr; 1579 int err = 0; 1580 Elf *elf; 1581 1582 if (obj->efile.elf) { 1583 pr_warn("elf: init internal error\n"); 1584 return -LIBBPF_ERRNO__LIBELF; 1585 } 1586 1587 if (obj->efile.obj_buf_sz > 0) { 1588 /* obj_buf should have been validated by bpf_object__open_mem(). */ 1589 elf = elf_memory((char *)obj->efile.obj_buf, obj->efile.obj_buf_sz); 1590 } else { 1591 obj->efile.fd = open(obj->path, O_RDONLY | O_CLOEXEC); 1592 if (obj->efile.fd < 0) { 1593 err = -errno; 1594 pr_warn("elf: failed to open %s: %s\n", obj->path, errstr(err)); 1595 return err; 1596 } 1597 1598 elf = elf_begin(obj->efile.fd, ELF_C_READ_MMAP, NULL); 1599 } 1600 1601 if (!elf) { 1602 pr_warn("elf: failed to open %s as ELF file: %s\n", obj->path, elf_errmsg(-1)); 1603 err = -LIBBPF_ERRNO__LIBELF; 1604 goto errout; 1605 } 1606 1607 obj->efile.elf = elf; 1608 1609 if (elf_kind(elf) != ELF_K_ELF) { 1610 err = -LIBBPF_ERRNO__FORMAT; 1611 pr_warn("elf: '%s' is not a proper ELF object\n", obj->path); 1612 goto errout; 1613 } 1614 1615 if (gelf_getclass(elf) != ELFCLASS64) { 1616 err = -LIBBPF_ERRNO__FORMAT; 1617 pr_warn("elf: '%s' is not a 64-bit ELF object\n", obj->path); 1618 goto errout; 1619 } 1620 1621 obj->efile.ehdr = ehdr = elf64_getehdr(elf); 1622 if (!obj->efile.ehdr) { 1623 pr_warn("elf: failed to get ELF header from %s: %s\n", obj->path, elf_errmsg(-1)); 1624 err = -LIBBPF_ERRNO__FORMAT; 1625 goto errout; 1626 } 1627 1628 /* Validate ELF object endianness... */ 1629 if (ehdr->e_ident[EI_DATA] != ELFDATA2LSB && 1630 ehdr->e_ident[EI_DATA] != ELFDATA2MSB) { 1631 err = -LIBBPF_ERRNO__ENDIAN; 1632 pr_warn("elf: '%s' has unknown byte order\n", obj->path); 1633 goto errout; 1634 } 1635 /* and save after bpf_object_open() frees ELF data */ 1636 obj->byteorder = ehdr->e_ident[EI_DATA]; 1637 1638 if (elf_getshdrstrndx(elf, &obj->efile.shstrndx)) { 1639 pr_warn("elf: failed to get section names section index for %s: %s\n", 1640 obj->path, elf_errmsg(-1)); 1641 err = -LIBBPF_ERRNO__FORMAT; 1642 goto errout; 1643 } 1644 1645 /* ELF is corrupted/truncated, avoid calling elf_strptr. */ 1646 if (!elf_rawdata(elf_getscn(elf, obj->efile.shstrndx), NULL)) { 1647 pr_warn("elf: failed to get section names strings from %s: %s\n", 1648 obj->path, elf_errmsg(-1)); 1649 err = -LIBBPF_ERRNO__FORMAT; 1650 goto errout; 1651 } 1652 1653 /* Old LLVM set e_machine to EM_NONE */ 1654 if (ehdr->e_type != ET_REL || (ehdr->e_machine && ehdr->e_machine != EM_BPF)) { 1655 pr_warn("elf: %s is not a valid eBPF object file\n", obj->path); 1656 err = -LIBBPF_ERRNO__FORMAT; 1657 goto errout; 1658 } 1659 1660 return 0; 1661 errout: 1662 bpf_object__elf_finish(obj); 1663 return err; 1664 } 1665 1666 static bool is_native_endianness(struct bpf_object *obj) 1667 { 1668 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ 1669 return obj->byteorder == ELFDATA2LSB; 1670 #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ 1671 return obj->byteorder == ELFDATA2MSB; 1672 #else 1673 # error "Unrecognized __BYTE_ORDER__" 1674 #endif 1675 } 1676 1677 static int 1678 bpf_object__init_license(struct bpf_object *obj, void *data, size_t size) 1679 { 1680 if (!data) { 1681 pr_warn("invalid license section in %s\n", obj->path); 1682 return -LIBBPF_ERRNO__FORMAT; 1683 } 1684 /* libbpf_strlcpy() only copies first N - 1 bytes, so size + 1 won't 1685 * go over allowed ELF data section buffer 1686 */ 1687 libbpf_strlcpy(obj->license, data, min(size + 1, sizeof(obj->license))); 1688 pr_debug("license of %s is %s\n", obj->path, obj->license); 1689 return 0; 1690 } 1691 1692 static int 1693 bpf_object__init_kversion(struct bpf_object *obj, void *data, size_t size) 1694 { 1695 __u32 kver; 1696 1697 if (!data || size != sizeof(kver)) { 1698 pr_warn("invalid kver section in %s\n", obj->path); 1699 return -LIBBPF_ERRNO__FORMAT; 1700 } 1701 memcpy(&kver, data, sizeof(kver)); 1702 obj->kern_version = kver; 1703 pr_debug("kernel version of %s is %x\n", obj->path, obj->kern_version); 1704 return 0; 1705 } 1706 1707 static bool bpf_map_type__is_map_in_map(enum bpf_map_type type) 1708 { 1709 if (type == BPF_MAP_TYPE_ARRAY_OF_MAPS || 1710 type == BPF_MAP_TYPE_HASH_OF_MAPS) 1711 return true; 1712 return false; 1713 } 1714 1715 static int find_elf_sec_sz(const struct bpf_object *obj, const char *name, __u32 *size) 1716 { 1717 Elf_Data *data; 1718 Elf_Scn *scn; 1719 1720 if (!name) 1721 return -EINVAL; 1722 1723 scn = elf_sec_by_name(obj, name); 1724 data = elf_sec_data(obj, scn); 1725 if (data) { 1726 *size = data->d_size; 1727 return 0; /* found it */ 1728 } 1729 1730 return -ENOENT; 1731 } 1732 1733 static Elf64_Sym *find_elf_var_sym(const struct bpf_object *obj, const char *name) 1734 { 1735 Elf_Data *symbols = obj->efile.symbols; 1736 const char *sname; 1737 size_t si; 1738 1739 for (si = 0; si < symbols->d_size / sizeof(Elf64_Sym); si++) { 1740 Elf64_Sym *sym = elf_sym_by_idx(obj, si); 1741 1742 if (ELF64_ST_TYPE(sym->st_info) != STT_OBJECT) 1743 continue; 1744 1745 if (ELF64_ST_BIND(sym->st_info) != STB_GLOBAL && 1746 ELF64_ST_BIND(sym->st_info) != STB_WEAK) 1747 continue; 1748 1749 sname = elf_sym_str(obj, sym->st_name); 1750 if (!sname) { 1751 pr_warn("failed to get sym name string for var %s\n", name); 1752 return ERR_PTR(-EIO); 1753 } 1754 if (strcmp(name, sname) == 0) 1755 return sym; 1756 } 1757 1758 return ERR_PTR(-ENOENT); 1759 } 1760 1761 #ifndef MFD_CLOEXEC 1762 #define MFD_CLOEXEC 0x0001U 1763 #endif 1764 #ifndef MFD_NOEXEC_SEAL 1765 #define MFD_NOEXEC_SEAL 0x0008U 1766 #endif 1767 1768 static int create_placeholder_fd(void) 1769 { 1770 unsigned int flags = MFD_CLOEXEC | MFD_NOEXEC_SEAL; 1771 const char *name = "libbpf-placeholder-fd"; 1772 int fd; 1773 1774 fd = ensure_good_fd(sys_memfd_create(name, flags)); 1775 if (fd >= 0) 1776 return fd; 1777 else if (errno != EINVAL) 1778 return -errno; 1779 1780 /* Possibly running on kernel without MFD_NOEXEC_SEAL */ 1781 fd = ensure_good_fd(sys_memfd_create(name, flags & ~MFD_NOEXEC_SEAL)); 1782 if (fd < 0) 1783 return -errno; 1784 return fd; 1785 } 1786 1787 static struct bpf_map *bpf_object__add_map(struct bpf_object *obj) 1788 { 1789 struct bpf_map *map; 1790 int err; 1791 1792 err = libbpf_ensure_mem((void **)&obj->maps, &obj->maps_cap, 1793 sizeof(*obj->maps), obj->nr_maps + 1); 1794 if (err) 1795 return ERR_PTR(err); 1796 1797 map = &obj->maps[obj->nr_maps++]; 1798 map->obj = obj; 1799 /* Preallocate map FD without actually creating BPF map just yet. 1800 * These map FD "placeholders" will be reused later without changing 1801 * FD value when map is actually created in the kernel. 1802 * 1803 * This is useful to be able to perform BPF program relocations 1804 * without having to create BPF maps before that step. This allows us 1805 * to finalize and load BTF very late in BPF object's loading phase, 1806 * right before BPF maps have to be created and BPF programs have to 1807 * be loaded. By having these map FD placeholders we can perform all 1808 * the sanitizations, relocations, and any other adjustments before we 1809 * start creating actual BPF kernel objects (BTF, maps, progs). 1810 */ 1811 map->fd = create_placeholder_fd(); 1812 if (map->fd < 0) 1813 return ERR_PTR(map->fd); 1814 map->inner_map_fd = -1; 1815 map->autocreate = true; 1816 1817 return map; 1818 } 1819 1820 static size_t array_map_mmap_sz(unsigned int value_sz, unsigned int max_entries) 1821 { 1822 const long page_sz = sysconf(_SC_PAGE_SIZE); 1823 size_t map_sz; 1824 1825 map_sz = (size_t)roundup(value_sz, 8) * max_entries; 1826 map_sz = roundup(map_sz, page_sz); 1827 return map_sz; 1828 } 1829 1830 static size_t bpf_map_mmap_sz(const struct bpf_map *map) 1831 { 1832 const long page_sz = sysconf(_SC_PAGE_SIZE); 1833 1834 switch (map->def.type) { 1835 case BPF_MAP_TYPE_ARRAY: 1836 return array_map_mmap_sz(map->def.value_size, map->def.max_entries); 1837 case BPF_MAP_TYPE_ARENA: 1838 return page_sz * map->def.max_entries; 1839 default: 1840 return 0; /* not supported */ 1841 } 1842 } 1843 1844 static int bpf_map_mmap_resize(struct bpf_map *map, size_t old_sz, size_t new_sz) 1845 { 1846 void *mmaped; 1847 1848 if (!map->mmaped) 1849 return -EINVAL; 1850 1851 if (old_sz == new_sz) 1852 return 0; 1853 1854 mmaped = mmap(NULL, new_sz, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS, -1, 0); 1855 if (mmaped == MAP_FAILED) 1856 return -errno; 1857 1858 memcpy(mmaped, map->mmaped, min(old_sz, new_sz)); 1859 munmap(map->mmaped, old_sz); 1860 map->mmaped = mmaped; 1861 return 0; 1862 } 1863 1864 static char *internal_map_name(struct bpf_object *obj, const char *real_name) 1865 { 1866 char map_name[BPF_OBJ_NAME_LEN], *p; 1867 int pfx_len, sfx_len = max((size_t)7, strlen(real_name)); 1868 1869 /* This is one of the more confusing parts of libbpf for various 1870 * reasons, some of which are historical. The original idea for naming 1871 * internal names was to include as much of BPF object name prefix as 1872 * possible, so that it can be distinguished from similar internal 1873 * maps of a different BPF object. 1874 * As an example, let's say we have bpf_object named 'my_object_name' 1875 * and internal map corresponding to '.rodata' ELF section. The final 1876 * map name advertised to user and to the kernel will be 1877 * 'my_objec.rodata', taking first 8 characters of object name and 1878 * entire 7 characters of '.rodata'. 1879 * Somewhat confusingly, if internal map ELF section name is shorter 1880 * than 7 characters, e.g., '.bss', we still reserve 7 characters 1881 * for the suffix, even though we only have 4 actual characters, and 1882 * resulting map will be called 'my_objec.bss', not even using all 15 1883 * characters allowed by the kernel. Oh well, at least the truncated 1884 * object name is somewhat consistent in this case. But if the map 1885 * name is '.kconfig', we'll still have entirety of '.kconfig' added 1886 * (8 chars) and thus will be left with only first 7 characters of the 1887 * object name ('my_obje'). Happy guessing, user, that the final map 1888 * name will be "my_obje.kconfig". 1889 * Now, with libbpf starting to support arbitrarily named .rodata.* 1890 * and .data.* data sections, it's possible that ELF section name is 1891 * longer than allowed 15 chars, so we now need to be careful to take 1892 * only up to 15 first characters of ELF name, taking no BPF object 1893 * name characters at all. So '.rodata.abracadabra' will result in 1894 * '.rodata.abracad' kernel and user-visible name. 1895 * We need to keep this convoluted logic intact for .data, .bss and 1896 * .rodata maps, but for new custom .data.custom and .rodata.custom 1897 * maps we use their ELF names as is, not prepending bpf_object name 1898 * in front. We still need to truncate them to 15 characters for the 1899 * kernel. Full name can be recovered for such maps by using DATASEC 1900 * BTF type associated with such map's value type, though. 1901 */ 1902 if (sfx_len >= BPF_OBJ_NAME_LEN) 1903 sfx_len = BPF_OBJ_NAME_LEN - 1; 1904 1905 /* if there are two or more dots in map name, it's a custom dot map */ 1906 if (strchr(real_name + 1, '.') != NULL) 1907 pfx_len = 0; 1908 else 1909 pfx_len = min((size_t)BPF_OBJ_NAME_LEN - sfx_len - 1, strlen(obj->name)); 1910 1911 snprintf(map_name, sizeof(map_name), "%.*s%.*s", pfx_len, obj->name, 1912 sfx_len, real_name); 1913 1914 /* sanities map name to characters allowed by kernel */ 1915 for (p = map_name; *p && p < map_name + sizeof(map_name); p++) 1916 if (!isalnum(*p) && *p != '_' && *p != '.') 1917 *p = '_'; 1918 1919 return strdup(map_name); 1920 } 1921 1922 static int 1923 map_fill_btf_type_info(struct bpf_object *obj, struct bpf_map *map); 1924 1925 /* Internal BPF map is mmap()'able only if at least one of corresponding 1926 * DATASEC's VARs are to be exposed through BPF skeleton. I.e., it's a GLOBAL 1927 * variable and it's not marked as __hidden (which turns it into, effectively, 1928 * a STATIC variable). 1929 */ 1930 static bool map_is_mmapable(struct bpf_object *obj, struct bpf_map *map) 1931 { 1932 const struct btf_type *t, *vt; 1933 struct btf_var_secinfo *vsi; 1934 int i, n; 1935 1936 if (!map->btf_value_type_id) 1937 return false; 1938 1939 t = btf__type_by_id(obj->btf, map->btf_value_type_id); 1940 if (!btf_is_datasec(t)) 1941 return false; 1942 1943 vsi = btf_var_secinfos(t); 1944 for (i = 0, n = btf_vlen(t); i < n; i++, vsi++) { 1945 vt = btf__type_by_id(obj->btf, vsi->type); 1946 if (!btf_is_var(vt)) 1947 continue; 1948 1949 if (btf_var(vt)->linkage != BTF_VAR_STATIC) 1950 return true; 1951 } 1952 1953 return false; 1954 } 1955 1956 static int 1957 bpf_object__init_internal_map(struct bpf_object *obj, enum libbpf_map_type type, 1958 const char *real_name, int sec_idx, void *data, size_t data_sz) 1959 { 1960 struct bpf_map_def *def; 1961 struct bpf_map *map; 1962 size_t mmap_sz; 1963 int err; 1964 1965 map = bpf_object__add_map(obj); 1966 if (IS_ERR(map)) 1967 return PTR_ERR(map); 1968 1969 map->libbpf_type = type; 1970 map->sec_idx = sec_idx; 1971 map->sec_offset = 0; 1972 map->real_name = strdup(real_name); 1973 map->name = internal_map_name(obj, real_name); 1974 if (!map->real_name || !map->name) { 1975 zfree(&map->real_name); 1976 zfree(&map->name); 1977 return -ENOMEM; 1978 } 1979 1980 def = &map->def; 1981 def->type = BPF_MAP_TYPE_ARRAY; 1982 def->key_size = sizeof(int); 1983 def->value_size = data_sz; 1984 def->max_entries = 1; 1985 def->map_flags = type == LIBBPF_MAP_RODATA || type == LIBBPF_MAP_KCONFIG 1986 ? BPF_F_RDONLY_PROG : 0; 1987 1988 /* failures are fine because of maps like .rodata.str1.1 */ 1989 (void) map_fill_btf_type_info(obj, map); 1990 1991 if (map_is_mmapable(obj, map)) 1992 def->map_flags |= BPF_F_MMAPABLE; 1993 1994 pr_debug("map '%s' (global data): at sec_idx %d, offset %zu, flags %x.\n", 1995 map->name, map->sec_idx, map->sec_offset, def->map_flags); 1996 1997 mmap_sz = bpf_map_mmap_sz(map); 1998 map->mmaped = mmap(NULL, mmap_sz, PROT_READ | PROT_WRITE, 1999 MAP_SHARED | MAP_ANONYMOUS, -1, 0); 2000 if (map->mmaped == MAP_FAILED) { 2001 err = -errno; 2002 map->mmaped = NULL; 2003 pr_warn("failed to alloc map '%s' content buffer: %s\n", map->name, errstr(err)); 2004 zfree(&map->real_name); 2005 zfree(&map->name); 2006 return err; 2007 } 2008 2009 if (data) 2010 memcpy(map->mmaped, data, data_sz); 2011 2012 pr_debug("map %td is \"%s\"\n", map - obj->maps, map->name); 2013 return 0; 2014 } 2015 2016 static int bpf_object__init_global_data_maps(struct bpf_object *obj) 2017 { 2018 struct elf_sec_desc *sec_desc; 2019 const char *sec_name; 2020 int err = 0, sec_idx; 2021 2022 /* 2023 * Populate obj->maps with libbpf internal maps. 2024 */ 2025 for (sec_idx = 1; sec_idx < obj->efile.sec_cnt; sec_idx++) { 2026 sec_desc = &obj->efile.secs[sec_idx]; 2027 2028 /* Skip recognized sections with size 0. */ 2029 if (!sec_desc->data || sec_desc->data->d_size == 0) 2030 continue; 2031 2032 switch (sec_desc->sec_type) { 2033 case SEC_DATA: 2034 sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, sec_idx)); 2035 err = bpf_object__init_internal_map(obj, LIBBPF_MAP_DATA, 2036 sec_name, sec_idx, 2037 sec_desc->data->d_buf, 2038 sec_desc->data->d_size); 2039 break; 2040 case SEC_RODATA: 2041 obj->has_rodata = true; 2042 sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, sec_idx)); 2043 err = bpf_object__init_internal_map(obj, LIBBPF_MAP_RODATA, 2044 sec_name, sec_idx, 2045 sec_desc->data->d_buf, 2046 sec_desc->data->d_size); 2047 break; 2048 case SEC_BSS: 2049 sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, sec_idx)); 2050 err = bpf_object__init_internal_map(obj, LIBBPF_MAP_BSS, 2051 sec_name, sec_idx, 2052 NULL, 2053 sec_desc->data->d_size); 2054 break; 2055 default: 2056 /* skip */ 2057 break; 2058 } 2059 if (err) 2060 return err; 2061 } 2062 return 0; 2063 } 2064 2065 2066 static struct extern_desc *find_extern_by_name(const struct bpf_object *obj, 2067 const void *name) 2068 { 2069 int i; 2070 2071 for (i = 0; i < obj->nr_extern; i++) { 2072 if (strcmp(obj->externs[i].name, name) == 0) 2073 return &obj->externs[i]; 2074 } 2075 return NULL; 2076 } 2077 2078 static struct extern_desc *find_extern_by_name_with_len(const struct bpf_object *obj, 2079 const void *name, int len) 2080 { 2081 const char *ext_name; 2082 int i; 2083 2084 for (i = 0; i < obj->nr_extern; i++) { 2085 ext_name = obj->externs[i].name; 2086 if (strlen(ext_name) == len && strncmp(ext_name, name, len) == 0) 2087 return &obj->externs[i]; 2088 } 2089 return NULL; 2090 } 2091 2092 static int set_kcfg_value_tri(struct extern_desc *ext, void *ext_val, 2093 char value) 2094 { 2095 switch (ext->kcfg.type) { 2096 case KCFG_BOOL: 2097 if (value == 'm') { 2098 pr_warn("extern (kcfg) '%s': value '%c' implies tristate or char type\n", 2099 ext->name, value); 2100 return -EINVAL; 2101 } 2102 *(bool *)ext_val = value == 'y' ? true : false; 2103 break; 2104 case KCFG_TRISTATE: 2105 if (value == 'y') 2106 *(enum libbpf_tristate *)ext_val = TRI_YES; 2107 else if (value == 'm') 2108 *(enum libbpf_tristate *)ext_val = TRI_MODULE; 2109 else /* value == 'n' */ 2110 *(enum libbpf_tristate *)ext_val = TRI_NO; 2111 break; 2112 case KCFG_CHAR: 2113 *(char *)ext_val = value; 2114 break; 2115 case KCFG_UNKNOWN: 2116 case KCFG_INT: 2117 case KCFG_CHAR_ARR: 2118 default: 2119 pr_warn("extern (kcfg) '%s': value '%c' implies bool, tristate, or char type\n", 2120 ext->name, value); 2121 return -EINVAL; 2122 } 2123 ext->is_set = true; 2124 return 0; 2125 } 2126 2127 static int set_kcfg_value_str(struct extern_desc *ext, char *ext_val, 2128 const char *value) 2129 { 2130 size_t len; 2131 2132 if (ext->kcfg.type != KCFG_CHAR_ARR) { 2133 pr_warn("extern (kcfg) '%s': value '%s' implies char array type\n", 2134 ext->name, value); 2135 return -EINVAL; 2136 } 2137 2138 len = strlen(value); 2139 if (len < 2 || value[len - 1] != '"') { 2140 pr_warn("extern (kcfg) '%s': invalid string config '%s'\n", 2141 ext->name, value); 2142 return -EINVAL; 2143 } 2144 2145 /* strip quotes */ 2146 len -= 2; 2147 if (len >= ext->kcfg.sz) { 2148 pr_warn("extern (kcfg) '%s': long string '%s' of (%zu bytes) truncated to %d bytes\n", 2149 ext->name, value, len, ext->kcfg.sz - 1); 2150 len = ext->kcfg.sz - 1; 2151 } 2152 memcpy(ext_val, value + 1, len); 2153 ext_val[len] = '\0'; 2154 ext->is_set = true; 2155 return 0; 2156 } 2157 2158 static int parse_u64(const char *value, __u64 *res) 2159 { 2160 char *value_end; 2161 int err; 2162 2163 errno = 0; 2164 *res = strtoull(value, &value_end, 0); 2165 if (errno) { 2166 err = -errno; 2167 pr_warn("failed to parse '%s': %s\n", value, errstr(err)); 2168 return err; 2169 } 2170 if (*value_end) { 2171 pr_warn("failed to parse '%s' as integer completely\n", value); 2172 return -EINVAL; 2173 } 2174 return 0; 2175 } 2176 2177 static bool is_kcfg_value_in_range(const struct extern_desc *ext, __u64 v) 2178 { 2179 int bit_sz = ext->kcfg.sz * 8; 2180 2181 if (ext->kcfg.sz == 8) 2182 return true; 2183 2184 /* Validate that value stored in u64 fits in integer of `ext->sz` 2185 * bytes size without any loss of information. If the target integer 2186 * is signed, we rely on the following limits of integer type of 2187 * Y bits and subsequent transformation: 2188 * 2189 * -2^(Y-1) <= X <= 2^(Y-1) - 1 2190 * 0 <= X + 2^(Y-1) <= 2^Y - 1 2191 * 0 <= X + 2^(Y-1) < 2^Y 2192 * 2193 * For unsigned target integer, check that all the (64 - Y) bits are 2194 * zero. 2195 */ 2196 if (ext->kcfg.is_signed) 2197 return v + (1ULL << (bit_sz - 1)) < (1ULL << bit_sz); 2198 else 2199 return (v >> bit_sz) == 0; 2200 } 2201 2202 static int set_kcfg_value_num(struct extern_desc *ext, void *ext_val, 2203 __u64 value) 2204 { 2205 if (ext->kcfg.type != KCFG_INT && ext->kcfg.type != KCFG_CHAR && 2206 ext->kcfg.type != KCFG_BOOL) { 2207 pr_warn("extern (kcfg) '%s': value '%llu' implies integer, char, or boolean type\n", 2208 ext->name, (unsigned long long)value); 2209 return -EINVAL; 2210 } 2211 if (ext->kcfg.type == KCFG_BOOL && value > 1) { 2212 pr_warn("extern (kcfg) '%s': value '%llu' isn't boolean compatible\n", 2213 ext->name, (unsigned long long)value); 2214 return -EINVAL; 2215 2216 } 2217 if (!is_kcfg_value_in_range(ext, value)) { 2218 pr_warn("extern (kcfg) '%s': value '%llu' doesn't fit in %d bytes\n", 2219 ext->name, (unsigned long long)value, ext->kcfg.sz); 2220 return -ERANGE; 2221 } 2222 switch (ext->kcfg.sz) { 2223 case 1: 2224 *(__u8 *)ext_val = value; 2225 break; 2226 case 2: 2227 *(__u16 *)ext_val = value; 2228 break; 2229 case 4: 2230 *(__u32 *)ext_val = value; 2231 break; 2232 case 8: 2233 *(__u64 *)ext_val = value; 2234 break; 2235 default: 2236 return -EINVAL; 2237 } 2238 ext->is_set = true; 2239 return 0; 2240 } 2241 2242 static int bpf_object__process_kconfig_line(struct bpf_object *obj, 2243 char *buf, void *data) 2244 { 2245 struct extern_desc *ext; 2246 char *sep, *value; 2247 int len, err = 0; 2248 void *ext_val; 2249 __u64 num; 2250 2251 if (!str_has_pfx(buf, "CONFIG_")) 2252 return 0; 2253 2254 sep = strchr(buf, '='); 2255 if (!sep) { 2256 pr_warn("failed to parse '%s': no separator\n", buf); 2257 return -EINVAL; 2258 } 2259 2260 /* Trim ending '\n' */ 2261 len = strlen(buf); 2262 if (buf[len - 1] == '\n') 2263 buf[len - 1] = '\0'; 2264 /* Split on '=' and ensure that a value is present. */ 2265 *sep = '\0'; 2266 if (!sep[1]) { 2267 *sep = '='; 2268 pr_warn("failed to parse '%s': no value\n", buf); 2269 return -EINVAL; 2270 } 2271 2272 ext = find_extern_by_name(obj, buf); 2273 if (!ext || ext->is_set) 2274 return 0; 2275 2276 ext_val = data + ext->kcfg.data_off; 2277 value = sep + 1; 2278 2279 switch (*value) { 2280 case 'y': case 'n': case 'm': 2281 err = set_kcfg_value_tri(ext, ext_val, *value); 2282 break; 2283 case '"': 2284 err = set_kcfg_value_str(ext, ext_val, value); 2285 break; 2286 default: 2287 /* assume integer */ 2288 err = parse_u64(value, &num); 2289 if (err) { 2290 pr_warn("extern (kcfg) '%s': value '%s' isn't a valid integer\n", ext->name, value); 2291 return err; 2292 } 2293 if (ext->kcfg.type != KCFG_INT && ext->kcfg.type != KCFG_CHAR) { 2294 pr_warn("extern (kcfg) '%s': value '%s' implies integer type\n", ext->name, value); 2295 return -EINVAL; 2296 } 2297 err = set_kcfg_value_num(ext, ext_val, num); 2298 break; 2299 } 2300 if (err) 2301 return err; 2302 pr_debug("extern (kcfg) '%s': set to %s\n", ext->name, value); 2303 return 0; 2304 } 2305 2306 static int bpf_object__read_kconfig_file(struct bpf_object *obj, void *data) 2307 { 2308 char buf[PATH_MAX]; 2309 struct utsname uts; 2310 int len, err = 0; 2311 gzFile file; 2312 2313 uname(&uts); 2314 len = snprintf(buf, PATH_MAX, "/boot/config-%s", uts.release); 2315 if (len < 0) 2316 return -EINVAL; 2317 else if (len >= PATH_MAX) 2318 return -ENAMETOOLONG; 2319 2320 /* gzopen also accepts uncompressed files. */ 2321 file = gzopen(buf, "re"); 2322 if (!file) 2323 file = gzopen("/proc/config.gz", "re"); 2324 2325 if (!file) { 2326 pr_warn("failed to open system Kconfig\n"); 2327 return -ENOENT; 2328 } 2329 2330 while (gzgets(file, buf, sizeof(buf))) { 2331 err = bpf_object__process_kconfig_line(obj, buf, data); 2332 if (err) { 2333 pr_warn("error parsing system Kconfig line '%s': %s\n", 2334 buf, errstr(err)); 2335 goto out; 2336 } 2337 } 2338 2339 out: 2340 gzclose(file); 2341 return err; 2342 } 2343 2344 static int bpf_object__read_kconfig_mem(struct bpf_object *obj, 2345 const char *config, void *data) 2346 { 2347 char buf[PATH_MAX]; 2348 int err = 0; 2349 FILE *file; 2350 2351 file = fmemopen((void *)config, strlen(config), "r"); 2352 if (!file) { 2353 err = -errno; 2354 pr_warn("failed to open in-memory Kconfig: %s\n", errstr(err)); 2355 return err; 2356 } 2357 2358 while (fgets(buf, sizeof(buf), file)) { 2359 err = bpf_object__process_kconfig_line(obj, buf, data); 2360 if (err) { 2361 pr_warn("error parsing in-memory Kconfig line '%s': %s\n", 2362 buf, errstr(err)); 2363 break; 2364 } 2365 } 2366 2367 fclose(file); 2368 return err; 2369 } 2370 2371 static int bpf_object__init_kconfig_map(struct bpf_object *obj) 2372 { 2373 struct extern_desc *last_ext = NULL, *ext; 2374 size_t map_sz; 2375 int i, err; 2376 2377 for (i = 0; i < obj->nr_extern; i++) { 2378 ext = &obj->externs[i]; 2379 if (ext->type == EXT_KCFG) 2380 last_ext = ext; 2381 } 2382 2383 if (!last_ext) 2384 return 0; 2385 2386 map_sz = last_ext->kcfg.data_off + last_ext->kcfg.sz; 2387 err = bpf_object__init_internal_map(obj, LIBBPF_MAP_KCONFIG, 2388 ".kconfig", obj->efile.symbols_shndx, 2389 NULL, map_sz); 2390 if (err) 2391 return err; 2392 2393 obj->kconfig_map_idx = obj->nr_maps - 1; 2394 2395 return 0; 2396 } 2397 2398 const struct btf_type * 2399 skip_mods_and_typedefs(const struct btf *btf, __u32 id, __u32 *res_id) 2400 { 2401 const struct btf_type *t = btf__type_by_id(btf, id); 2402 2403 if (res_id) 2404 *res_id = id; 2405 2406 while (btf_is_mod(t) || btf_is_typedef(t)) { 2407 if (res_id) 2408 *res_id = t->type; 2409 t = btf__type_by_id(btf, t->type); 2410 } 2411 2412 return t; 2413 } 2414 2415 static const struct btf_type * 2416 resolve_func_ptr(const struct btf *btf, __u32 id, __u32 *res_id) 2417 { 2418 const struct btf_type *t; 2419 2420 t = skip_mods_and_typedefs(btf, id, NULL); 2421 if (!btf_is_ptr(t)) 2422 return NULL; 2423 2424 t = skip_mods_and_typedefs(btf, t->type, res_id); 2425 2426 return btf_is_func_proto(t) ? t : NULL; 2427 } 2428 2429 static const char *__btf_kind_str(__u16 kind) 2430 { 2431 switch (kind) { 2432 case BTF_KIND_UNKN: return "void"; 2433 case BTF_KIND_INT: return "int"; 2434 case BTF_KIND_PTR: return "ptr"; 2435 case BTF_KIND_ARRAY: return "array"; 2436 case BTF_KIND_STRUCT: return "struct"; 2437 case BTF_KIND_UNION: return "union"; 2438 case BTF_KIND_ENUM: return "enum"; 2439 case BTF_KIND_FWD: return "fwd"; 2440 case BTF_KIND_TYPEDEF: return "typedef"; 2441 case BTF_KIND_VOLATILE: return "volatile"; 2442 case BTF_KIND_CONST: return "const"; 2443 case BTF_KIND_RESTRICT: return "restrict"; 2444 case BTF_KIND_FUNC: return "func"; 2445 case BTF_KIND_FUNC_PROTO: return "func_proto"; 2446 case BTF_KIND_VAR: return "var"; 2447 case BTF_KIND_DATASEC: return "datasec"; 2448 case BTF_KIND_FLOAT: return "float"; 2449 case BTF_KIND_DECL_TAG: return "decl_tag"; 2450 case BTF_KIND_TYPE_TAG: return "type_tag"; 2451 case BTF_KIND_ENUM64: return "enum64"; 2452 default: return "unknown"; 2453 } 2454 } 2455 2456 const char *btf_kind_str(const struct btf_type *t) 2457 { 2458 return __btf_kind_str(btf_kind(t)); 2459 } 2460 2461 /* 2462 * Fetch integer attribute of BTF map definition. Such attributes are 2463 * represented using a pointer to an array, in which dimensionality of array 2464 * encodes specified integer value. E.g., int (*type)[BPF_MAP_TYPE_ARRAY]; 2465 * encodes `type => BPF_MAP_TYPE_ARRAY` key/value pair completely using BTF 2466 * type definition, while using only sizeof(void *) space in ELF data section. 2467 */ 2468 static bool get_map_field_int(const char *map_name, const struct btf *btf, 2469 const struct btf_member *m, __u32 *res) 2470 { 2471 const struct btf_type *t = skip_mods_and_typedefs(btf, m->type, NULL); 2472 const char *name = btf__name_by_offset(btf, m->name_off); 2473 const struct btf_array *arr_info; 2474 const struct btf_type *arr_t; 2475 2476 if (!btf_is_ptr(t)) { 2477 pr_warn("map '%s': attr '%s': expected PTR, got %s.\n", 2478 map_name, name, btf_kind_str(t)); 2479 return false; 2480 } 2481 2482 arr_t = btf__type_by_id(btf, t->type); 2483 if (!arr_t) { 2484 pr_warn("map '%s': attr '%s': type [%u] not found.\n", 2485 map_name, name, t->type); 2486 return false; 2487 } 2488 if (!btf_is_array(arr_t)) { 2489 pr_warn("map '%s': attr '%s': expected ARRAY, got %s.\n", 2490 map_name, name, btf_kind_str(arr_t)); 2491 return false; 2492 } 2493 arr_info = btf_array(arr_t); 2494 *res = arr_info->nelems; 2495 return true; 2496 } 2497 2498 static bool get_map_field_long(const char *map_name, const struct btf *btf, 2499 const struct btf_member *m, __u64 *res) 2500 { 2501 const struct btf_type *t = skip_mods_and_typedefs(btf, m->type, NULL); 2502 const char *name = btf__name_by_offset(btf, m->name_off); 2503 2504 if (btf_is_ptr(t)) { 2505 __u32 res32; 2506 bool ret; 2507 2508 ret = get_map_field_int(map_name, btf, m, &res32); 2509 if (ret) 2510 *res = (__u64)res32; 2511 return ret; 2512 } 2513 2514 if (!btf_is_enum(t) && !btf_is_enum64(t)) { 2515 pr_warn("map '%s': attr '%s': expected ENUM or ENUM64, got %s.\n", 2516 map_name, name, btf_kind_str(t)); 2517 return false; 2518 } 2519 2520 if (btf_vlen(t) != 1) { 2521 pr_warn("map '%s': attr '%s': invalid __ulong\n", 2522 map_name, name); 2523 return false; 2524 } 2525 2526 if (btf_is_enum(t)) { 2527 const struct btf_enum *e = btf_enum(t); 2528 2529 *res = e->val; 2530 } else { 2531 const struct btf_enum64 *e = btf_enum64(t); 2532 2533 *res = btf_enum64_value(e); 2534 } 2535 return true; 2536 } 2537 2538 static int pathname_concat(char *buf, size_t buf_sz, const char *path, const char *name) 2539 { 2540 int len; 2541 2542 len = snprintf(buf, buf_sz, "%s/%s", path, name); 2543 if (len < 0) 2544 return -EINVAL; 2545 if (len >= buf_sz) 2546 return -ENAMETOOLONG; 2547 2548 return 0; 2549 } 2550 2551 static int build_map_pin_path(struct bpf_map *map, const char *path) 2552 { 2553 char buf[PATH_MAX]; 2554 int err; 2555 2556 if (!path) 2557 path = BPF_FS_DEFAULT_PATH; 2558 2559 err = pathname_concat(buf, sizeof(buf), path, bpf_map__name(map)); 2560 if (err) 2561 return err; 2562 2563 return bpf_map__set_pin_path(map, buf); 2564 } 2565 2566 /* should match definition in bpf_helpers.h */ 2567 enum libbpf_pin_type { 2568 LIBBPF_PIN_NONE, 2569 /* PIN_BY_NAME: pin maps by name (in /sys/fs/bpf by default) */ 2570 LIBBPF_PIN_BY_NAME, 2571 }; 2572 2573 int parse_btf_map_def(const char *map_name, struct btf *btf, 2574 const struct btf_type *def_t, bool strict, 2575 struct btf_map_def *map_def, struct btf_map_def *inner_def) 2576 { 2577 const struct btf_type *t; 2578 const struct btf_member *m; 2579 bool is_inner = inner_def == NULL; 2580 int vlen, i; 2581 2582 vlen = btf_vlen(def_t); 2583 m = btf_members(def_t); 2584 for (i = 0; i < vlen; i++, m++) { 2585 const char *name = btf__name_by_offset(btf, m->name_off); 2586 2587 if (!name) { 2588 pr_warn("map '%s': invalid field #%d.\n", map_name, i); 2589 return -EINVAL; 2590 } 2591 if (strcmp(name, "type") == 0) { 2592 if (!get_map_field_int(map_name, btf, m, &map_def->map_type)) 2593 return -EINVAL; 2594 map_def->parts |= MAP_DEF_MAP_TYPE; 2595 } else if (strcmp(name, "max_entries") == 0) { 2596 if (!get_map_field_int(map_name, btf, m, &map_def->max_entries)) 2597 return -EINVAL; 2598 map_def->parts |= MAP_DEF_MAX_ENTRIES; 2599 } else if (strcmp(name, "map_flags") == 0) { 2600 if (!get_map_field_int(map_name, btf, m, &map_def->map_flags)) 2601 return -EINVAL; 2602 map_def->parts |= MAP_DEF_MAP_FLAGS; 2603 } else if (strcmp(name, "numa_node") == 0) { 2604 if (!get_map_field_int(map_name, btf, m, &map_def->numa_node)) 2605 return -EINVAL; 2606 map_def->parts |= MAP_DEF_NUMA_NODE; 2607 } else if (strcmp(name, "key_size") == 0) { 2608 __u32 sz; 2609 2610 if (!get_map_field_int(map_name, btf, m, &sz)) 2611 return -EINVAL; 2612 if (map_def->key_size && map_def->key_size != sz) { 2613 pr_warn("map '%s': conflicting key size %u != %u.\n", 2614 map_name, map_def->key_size, sz); 2615 return -EINVAL; 2616 } 2617 map_def->key_size = sz; 2618 map_def->parts |= MAP_DEF_KEY_SIZE; 2619 } else if (strcmp(name, "key") == 0) { 2620 __s64 sz; 2621 2622 t = btf__type_by_id(btf, m->type); 2623 if (!t) { 2624 pr_warn("map '%s': key type [%d] not found.\n", 2625 map_name, m->type); 2626 return -EINVAL; 2627 } 2628 if (!btf_is_ptr(t)) { 2629 pr_warn("map '%s': key spec is not PTR: %s.\n", 2630 map_name, btf_kind_str(t)); 2631 return -EINVAL; 2632 } 2633 sz = btf__resolve_size(btf, t->type); 2634 if (sz < 0) { 2635 pr_warn("map '%s': can't determine key size for type [%u]: %zd.\n", 2636 map_name, t->type, (ssize_t)sz); 2637 return sz; 2638 } 2639 if (map_def->key_size && map_def->key_size != sz) { 2640 pr_warn("map '%s': conflicting key size %u != %zd.\n", 2641 map_name, map_def->key_size, (ssize_t)sz); 2642 return -EINVAL; 2643 } 2644 map_def->key_size = sz; 2645 map_def->key_type_id = t->type; 2646 map_def->parts |= MAP_DEF_KEY_SIZE | MAP_DEF_KEY_TYPE; 2647 } else if (strcmp(name, "value_size") == 0) { 2648 __u32 sz; 2649 2650 if (!get_map_field_int(map_name, btf, m, &sz)) 2651 return -EINVAL; 2652 if (map_def->value_size && map_def->value_size != sz) { 2653 pr_warn("map '%s': conflicting value size %u != %u.\n", 2654 map_name, map_def->value_size, sz); 2655 return -EINVAL; 2656 } 2657 map_def->value_size = sz; 2658 map_def->parts |= MAP_DEF_VALUE_SIZE; 2659 } else if (strcmp(name, "value") == 0) { 2660 __s64 sz; 2661 2662 t = btf__type_by_id(btf, m->type); 2663 if (!t) { 2664 pr_warn("map '%s': value type [%d] not found.\n", 2665 map_name, m->type); 2666 return -EINVAL; 2667 } 2668 if (!btf_is_ptr(t)) { 2669 pr_warn("map '%s': value spec is not PTR: %s.\n", 2670 map_name, btf_kind_str(t)); 2671 return -EINVAL; 2672 } 2673 sz = btf__resolve_size(btf, t->type); 2674 if (sz < 0) { 2675 pr_warn("map '%s': can't determine value size for type [%u]: %zd.\n", 2676 map_name, t->type, (ssize_t)sz); 2677 return sz; 2678 } 2679 if (map_def->value_size && map_def->value_size != sz) { 2680 pr_warn("map '%s': conflicting value size %u != %zd.\n", 2681 map_name, map_def->value_size, (ssize_t)sz); 2682 return -EINVAL; 2683 } 2684 map_def->value_size = sz; 2685 map_def->value_type_id = t->type; 2686 map_def->parts |= MAP_DEF_VALUE_SIZE | MAP_DEF_VALUE_TYPE; 2687 } 2688 else if (strcmp(name, "values") == 0) { 2689 bool is_map_in_map = bpf_map_type__is_map_in_map(map_def->map_type); 2690 bool is_prog_array = map_def->map_type == BPF_MAP_TYPE_PROG_ARRAY; 2691 const char *desc = is_map_in_map ? "map-in-map inner" : "prog-array value"; 2692 char inner_map_name[128]; 2693 int err; 2694 2695 if (is_inner) { 2696 pr_warn("map '%s': multi-level inner maps not supported.\n", 2697 map_name); 2698 return -ENOTSUP; 2699 } 2700 if (i != vlen - 1) { 2701 pr_warn("map '%s': '%s' member should be last.\n", 2702 map_name, name); 2703 return -EINVAL; 2704 } 2705 if (!is_map_in_map && !is_prog_array) { 2706 pr_warn("map '%s': should be map-in-map or prog-array.\n", 2707 map_name); 2708 return -ENOTSUP; 2709 } 2710 if (map_def->value_size && map_def->value_size != 4) { 2711 pr_warn("map '%s': conflicting value size %u != 4.\n", 2712 map_name, map_def->value_size); 2713 return -EINVAL; 2714 } 2715 map_def->value_size = 4; 2716 t = btf__type_by_id(btf, m->type); 2717 if (!t) { 2718 pr_warn("map '%s': %s type [%d] not found.\n", 2719 map_name, desc, m->type); 2720 return -EINVAL; 2721 } 2722 if (!btf_is_array(t) || btf_array(t)->nelems) { 2723 pr_warn("map '%s': %s spec is not a zero-sized array.\n", 2724 map_name, desc); 2725 return -EINVAL; 2726 } 2727 t = skip_mods_and_typedefs(btf, btf_array(t)->type, NULL); 2728 if (!btf_is_ptr(t)) { 2729 pr_warn("map '%s': %s def is of unexpected kind %s.\n", 2730 map_name, desc, btf_kind_str(t)); 2731 return -EINVAL; 2732 } 2733 t = skip_mods_and_typedefs(btf, t->type, NULL); 2734 if (is_prog_array) { 2735 if (!btf_is_func_proto(t)) { 2736 pr_warn("map '%s': prog-array value def is of unexpected kind %s.\n", 2737 map_name, btf_kind_str(t)); 2738 return -EINVAL; 2739 } 2740 continue; 2741 } 2742 if (!btf_is_struct(t)) { 2743 pr_warn("map '%s': map-in-map inner def is of unexpected kind %s.\n", 2744 map_name, btf_kind_str(t)); 2745 return -EINVAL; 2746 } 2747 2748 snprintf(inner_map_name, sizeof(inner_map_name), "%s.inner", map_name); 2749 err = parse_btf_map_def(inner_map_name, btf, t, strict, inner_def, NULL); 2750 if (err) 2751 return err; 2752 2753 map_def->parts |= MAP_DEF_INNER_MAP; 2754 } else if (strcmp(name, "pinning") == 0) { 2755 __u32 val; 2756 2757 if (is_inner) { 2758 pr_warn("map '%s': inner def can't be pinned.\n", map_name); 2759 return -EINVAL; 2760 } 2761 if (!get_map_field_int(map_name, btf, m, &val)) 2762 return -EINVAL; 2763 if (val != LIBBPF_PIN_NONE && val != LIBBPF_PIN_BY_NAME) { 2764 pr_warn("map '%s': invalid pinning value %u.\n", 2765 map_name, val); 2766 return -EINVAL; 2767 } 2768 map_def->pinning = val; 2769 map_def->parts |= MAP_DEF_PINNING; 2770 } else if (strcmp(name, "map_extra") == 0) { 2771 __u64 map_extra; 2772 2773 if (!get_map_field_long(map_name, btf, m, &map_extra)) 2774 return -EINVAL; 2775 map_def->map_extra = map_extra; 2776 map_def->parts |= MAP_DEF_MAP_EXTRA; 2777 } else { 2778 if (strict) { 2779 pr_warn("map '%s': unknown field '%s'.\n", map_name, name); 2780 return -ENOTSUP; 2781 } 2782 pr_debug("map '%s': ignoring unknown field '%s'.\n", map_name, name); 2783 } 2784 } 2785 2786 if (map_def->map_type == BPF_MAP_TYPE_UNSPEC) { 2787 pr_warn("map '%s': map type isn't specified.\n", map_name); 2788 return -EINVAL; 2789 } 2790 2791 return 0; 2792 } 2793 2794 static size_t adjust_ringbuf_sz(size_t sz) 2795 { 2796 __u32 page_sz = sysconf(_SC_PAGE_SIZE); 2797 __u32 mul; 2798 2799 /* if user forgot to set any size, make sure they see error */ 2800 if (sz == 0) 2801 return 0; 2802 /* Kernel expects BPF_MAP_TYPE_RINGBUF's max_entries to be 2803 * a power-of-2 multiple of kernel's page size. If user diligently 2804 * satisified these conditions, pass the size through. 2805 */ 2806 if ((sz % page_sz) == 0 && is_pow_of_2(sz / page_sz)) 2807 return sz; 2808 2809 /* Otherwise find closest (page_sz * power_of_2) product bigger than 2810 * user-set size to satisfy both user size request and kernel 2811 * requirements and substitute correct max_entries for map creation. 2812 */ 2813 for (mul = 1; mul <= UINT_MAX / page_sz; mul <<= 1) { 2814 if (mul * page_sz > sz) 2815 return mul * page_sz; 2816 } 2817 2818 /* if it's impossible to satisfy the conditions (i.e., user size is 2819 * very close to UINT_MAX but is not a power-of-2 multiple of 2820 * page_size) then just return original size and let kernel reject it 2821 */ 2822 return sz; 2823 } 2824 2825 static bool map_is_ringbuf(const struct bpf_map *map) 2826 { 2827 return map->def.type == BPF_MAP_TYPE_RINGBUF || 2828 map->def.type == BPF_MAP_TYPE_USER_RINGBUF; 2829 } 2830 2831 static void fill_map_from_def(struct bpf_map *map, const struct btf_map_def *def) 2832 { 2833 map->def.type = def->map_type; 2834 map->def.key_size = def->key_size; 2835 map->def.value_size = def->value_size; 2836 map->def.max_entries = def->max_entries; 2837 map->def.map_flags = def->map_flags; 2838 map->map_extra = def->map_extra; 2839 2840 map->numa_node = def->numa_node; 2841 map->btf_key_type_id = def->key_type_id; 2842 map->btf_value_type_id = def->value_type_id; 2843 2844 /* auto-adjust BPF ringbuf map max_entries to be a multiple of page size */ 2845 if (map_is_ringbuf(map)) 2846 map->def.max_entries = adjust_ringbuf_sz(map->def.max_entries); 2847 2848 if (def->parts & MAP_DEF_MAP_TYPE) 2849 pr_debug("map '%s': found type = %u.\n", map->name, def->map_type); 2850 2851 if (def->parts & MAP_DEF_KEY_TYPE) 2852 pr_debug("map '%s': found key [%u], sz = %u.\n", 2853 map->name, def->key_type_id, def->key_size); 2854 else if (def->parts & MAP_DEF_KEY_SIZE) 2855 pr_debug("map '%s': found key_size = %u.\n", map->name, def->key_size); 2856 2857 if (def->parts & MAP_DEF_VALUE_TYPE) 2858 pr_debug("map '%s': found value [%u], sz = %u.\n", 2859 map->name, def->value_type_id, def->value_size); 2860 else if (def->parts & MAP_DEF_VALUE_SIZE) 2861 pr_debug("map '%s': found value_size = %u.\n", map->name, def->value_size); 2862 2863 if (def->parts & MAP_DEF_MAX_ENTRIES) 2864 pr_debug("map '%s': found max_entries = %u.\n", map->name, def->max_entries); 2865 if (def->parts & MAP_DEF_MAP_FLAGS) 2866 pr_debug("map '%s': found map_flags = 0x%x.\n", map->name, def->map_flags); 2867 if (def->parts & MAP_DEF_MAP_EXTRA) 2868 pr_debug("map '%s': found map_extra = 0x%llx.\n", map->name, 2869 (unsigned long long)def->map_extra); 2870 if (def->parts & MAP_DEF_PINNING) 2871 pr_debug("map '%s': found pinning = %u.\n", map->name, def->pinning); 2872 if (def->parts & MAP_DEF_NUMA_NODE) 2873 pr_debug("map '%s': found numa_node = %u.\n", map->name, def->numa_node); 2874 2875 if (def->parts & MAP_DEF_INNER_MAP) 2876 pr_debug("map '%s': found inner map definition.\n", map->name); 2877 } 2878 2879 static const char *btf_var_linkage_str(__u32 linkage) 2880 { 2881 switch (linkage) { 2882 case BTF_VAR_STATIC: return "static"; 2883 case BTF_VAR_GLOBAL_ALLOCATED: return "global"; 2884 case BTF_VAR_GLOBAL_EXTERN: return "extern"; 2885 default: return "unknown"; 2886 } 2887 } 2888 2889 static int bpf_object__init_user_btf_map(struct bpf_object *obj, 2890 const struct btf_type *sec, 2891 int var_idx, int sec_idx, 2892 const Elf_Data *data, bool strict, 2893 const char *pin_root_path) 2894 { 2895 struct btf_map_def map_def = {}, inner_def = {}; 2896 const struct btf_type *var, *def; 2897 const struct btf_var_secinfo *vi; 2898 const struct btf_var *var_extra; 2899 const char *map_name; 2900 struct bpf_map *map; 2901 int err; 2902 2903 vi = btf_var_secinfos(sec) + var_idx; 2904 var = btf__type_by_id(obj->btf, vi->type); 2905 var_extra = btf_var(var); 2906 map_name = btf__name_by_offset(obj->btf, var->name_off); 2907 2908 if (str_is_empty(map_name)) { 2909 pr_warn("map #%d: empty name.\n", var_idx); 2910 return -EINVAL; 2911 } 2912 if ((__u64)vi->offset + vi->size > data->d_size) { 2913 pr_warn("map '%s' BTF data is corrupted.\n", map_name); 2914 return -EINVAL; 2915 } 2916 if (!btf_is_var(var)) { 2917 pr_warn("map '%s': unexpected var kind %s.\n", 2918 map_name, btf_kind_str(var)); 2919 return -EINVAL; 2920 } 2921 if (var_extra->linkage != BTF_VAR_GLOBAL_ALLOCATED) { 2922 pr_warn("map '%s': unsupported map linkage %s.\n", 2923 map_name, btf_var_linkage_str(var_extra->linkage)); 2924 return -EOPNOTSUPP; 2925 } 2926 2927 def = skip_mods_and_typedefs(obj->btf, var->type, NULL); 2928 if (!btf_is_struct(def)) { 2929 pr_warn("map '%s': unexpected def kind %s.\n", 2930 map_name, btf_kind_str(var)); 2931 return -EINVAL; 2932 } 2933 if (def->size > vi->size) { 2934 pr_warn("map '%s': invalid def size.\n", map_name); 2935 return -EINVAL; 2936 } 2937 2938 map = bpf_object__add_map(obj); 2939 if (IS_ERR(map)) 2940 return PTR_ERR(map); 2941 map->name = strdup(map_name); 2942 if (!map->name) { 2943 pr_warn("map '%s': failed to alloc map name.\n", map_name); 2944 return -ENOMEM; 2945 } 2946 map->libbpf_type = LIBBPF_MAP_UNSPEC; 2947 map->def.type = BPF_MAP_TYPE_UNSPEC; 2948 map->sec_idx = sec_idx; 2949 map->sec_offset = vi->offset; 2950 map->btf_var_idx = var_idx; 2951 pr_debug("map '%s': at sec_idx %d, offset %zu.\n", 2952 map_name, map->sec_idx, map->sec_offset); 2953 2954 err = parse_btf_map_def(map->name, obj->btf, def, strict, &map_def, &inner_def); 2955 if (err) 2956 return err; 2957 2958 fill_map_from_def(map, &map_def); 2959 2960 if (map_def.pinning == LIBBPF_PIN_BY_NAME) { 2961 err = build_map_pin_path(map, pin_root_path); 2962 if (err) { 2963 pr_warn("map '%s': couldn't build pin path.\n", map->name); 2964 return err; 2965 } 2966 } 2967 2968 if (map_def.parts & MAP_DEF_INNER_MAP) { 2969 map->inner_map = calloc(1, sizeof(*map->inner_map)); 2970 if (!map->inner_map) 2971 return -ENOMEM; 2972 map->inner_map->fd = create_placeholder_fd(); 2973 if (map->inner_map->fd < 0) 2974 return map->inner_map->fd; 2975 map->inner_map->sec_idx = sec_idx; 2976 map->inner_map->name = malloc(strlen(map_name) + sizeof(".inner") + 1); 2977 if (!map->inner_map->name) 2978 return -ENOMEM; 2979 sprintf(map->inner_map->name, "%s.inner", map_name); 2980 2981 fill_map_from_def(map->inner_map, &inner_def); 2982 } 2983 2984 err = map_fill_btf_type_info(obj, map); 2985 if (err) 2986 return err; 2987 2988 return 0; 2989 } 2990 2991 static int init_arena_map_data(struct bpf_object *obj, struct bpf_map *map, 2992 const char *sec_name, int sec_idx, 2993 void *data, size_t data_sz) 2994 { 2995 const long page_sz = sysconf(_SC_PAGE_SIZE); 2996 const size_t data_alloc_sz = roundup(data_sz, page_sz); 2997 size_t mmap_sz; 2998 2999 mmap_sz = bpf_map_mmap_sz(map); 3000 if (data_alloc_sz > mmap_sz) { 3001 pr_warn("elf: sec '%s': declared ARENA map size (%zu) is too small to hold global __arena variables of size %zu\n", 3002 sec_name, mmap_sz, data_sz); 3003 return -E2BIG; 3004 } 3005 3006 obj->arena_data = malloc(data_sz); 3007 if (!obj->arena_data) 3008 return -ENOMEM; 3009 memcpy(obj->arena_data, data, data_sz); 3010 obj->arena_data_sz = data_sz; 3011 3012 /* place globals at the end of the arena */ 3013 obj->arena_data_off = mmap_sz - data_alloc_sz; 3014 3015 /* make bpf_map__init_value() work for ARENA maps */ 3016 map->mmaped = obj->arena_data; 3017 3018 return 0; 3019 } 3020 3021 static int bpf_object__init_user_btf_maps(struct bpf_object *obj, bool strict, 3022 const char *pin_root_path) 3023 { 3024 const struct btf_type *sec = NULL; 3025 int nr_types, i, vlen, err; 3026 const struct btf_type *t; 3027 const char *name; 3028 Elf_Data *data; 3029 Elf_Scn *scn; 3030 3031 if (obj->efile.btf_maps_shndx < 0) 3032 return 0; 3033 3034 scn = elf_sec_by_idx(obj, obj->efile.btf_maps_shndx); 3035 data = elf_sec_data(obj, scn); 3036 if (!data) { 3037 pr_warn("elf: failed to get %s map definitions for %s\n", 3038 MAPS_ELF_SEC, obj->path); 3039 return -EINVAL; 3040 } 3041 3042 nr_types = btf__type_cnt(obj->btf); 3043 for (i = 1; i < nr_types; i++) { 3044 t = btf__type_by_id(obj->btf, i); 3045 if (!btf_is_datasec(t)) 3046 continue; 3047 name = btf__name_by_offset(obj->btf, t->name_off); 3048 if (strcmp(name, MAPS_ELF_SEC) == 0) { 3049 sec = t; 3050 obj->efile.btf_maps_sec_btf_id = i; 3051 break; 3052 } 3053 } 3054 3055 if (!sec) { 3056 pr_warn("DATASEC '%s' not found.\n", MAPS_ELF_SEC); 3057 return -ENOENT; 3058 } 3059 3060 vlen = btf_vlen(sec); 3061 for (i = 0; i < vlen; i++) { 3062 err = bpf_object__init_user_btf_map(obj, sec, i, 3063 obj->efile.btf_maps_shndx, 3064 data, strict, 3065 pin_root_path); 3066 if (err) 3067 return err; 3068 } 3069 3070 for (i = 0; i < obj->nr_maps; i++) { 3071 struct bpf_map *map = &obj->maps[i]; 3072 3073 if (map->def.type != BPF_MAP_TYPE_ARENA) 3074 continue; 3075 3076 if (obj->arena_map_idx >= 0) { 3077 pr_warn("map '%s': only single ARENA map is supported (map '%s' is also ARENA)\n", 3078 map->name, obj->maps[obj->arena_map_idx].name); 3079 return -EINVAL; 3080 } 3081 obj->arena_map_idx = i; 3082 3083 if (obj->efile.arena_data) { 3084 err = init_arena_map_data(obj, map, ARENA_SEC, obj->efile.arena_data_shndx, 3085 obj->efile.arena_data->d_buf, 3086 obj->efile.arena_data->d_size); 3087 if (err) 3088 return err; 3089 } 3090 } 3091 if (obj->efile.arena_data && obj->arena_map_idx < 0) { 3092 pr_warn("elf: sec '%s': to use global __arena variables the ARENA map should be explicitly declared in SEC(\".maps\")\n", 3093 ARENA_SEC); 3094 return -ENOENT; 3095 } 3096 3097 return 0; 3098 } 3099 3100 static int bpf_object__init_maps(struct bpf_object *obj, 3101 const struct bpf_object_open_opts *opts) 3102 { 3103 const char *pin_root_path; 3104 bool strict; 3105 int err = 0; 3106 3107 strict = !OPTS_GET(opts, relaxed_maps, false); 3108 pin_root_path = OPTS_GET(opts, pin_root_path, NULL); 3109 3110 err = bpf_object__init_user_btf_maps(obj, strict, pin_root_path); 3111 err = err ?: bpf_object__init_global_data_maps(obj); 3112 err = err ?: bpf_object__init_kconfig_map(obj); 3113 err = err ?: bpf_object_init_struct_ops(obj); 3114 3115 return err; 3116 } 3117 3118 static bool section_have_execinstr(struct bpf_object *obj, int idx) 3119 { 3120 Elf64_Shdr *sh; 3121 3122 sh = elf_sec_hdr(obj, elf_sec_by_idx(obj, idx)); 3123 if (!sh) 3124 return false; 3125 3126 return sh->sh_flags & SHF_EXECINSTR; 3127 } 3128 3129 static bool starts_with_qmark(const char *s) 3130 { 3131 return s && s[0] == '?'; 3132 } 3133 3134 static bool btf_needs_sanitization(struct bpf_object *obj) 3135 { 3136 bool has_func_global = kernel_supports(obj, FEAT_BTF_GLOBAL_FUNC); 3137 bool has_datasec = kernel_supports(obj, FEAT_BTF_DATASEC); 3138 bool has_float = kernel_supports(obj, FEAT_BTF_FLOAT); 3139 bool has_func = kernel_supports(obj, FEAT_BTF_FUNC); 3140 bool has_decl_tag = kernel_supports(obj, FEAT_BTF_DECL_TAG); 3141 bool has_type_tag = kernel_supports(obj, FEAT_BTF_TYPE_TAG); 3142 bool has_enum64 = kernel_supports(obj, FEAT_BTF_ENUM64); 3143 bool has_qmark_datasec = kernel_supports(obj, FEAT_BTF_QMARK_DATASEC); 3144 3145 return !has_func || !has_datasec || !has_func_global || !has_float || 3146 !has_decl_tag || !has_type_tag || !has_enum64 || !has_qmark_datasec; 3147 } 3148 3149 static int bpf_object__sanitize_btf(struct bpf_object *obj, struct btf *btf) 3150 { 3151 bool has_func_global = kernel_supports(obj, FEAT_BTF_GLOBAL_FUNC); 3152 bool has_datasec = kernel_supports(obj, FEAT_BTF_DATASEC); 3153 bool has_float = kernel_supports(obj, FEAT_BTF_FLOAT); 3154 bool has_func = kernel_supports(obj, FEAT_BTF_FUNC); 3155 bool has_decl_tag = kernel_supports(obj, FEAT_BTF_DECL_TAG); 3156 bool has_type_tag = kernel_supports(obj, FEAT_BTF_TYPE_TAG); 3157 bool has_enum64 = kernel_supports(obj, FEAT_BTF_ENUM64); 3158 bool has_qmark_datasec = kernel_supports(obj, FEAT_BTF_QMARK_DATASEC); 3159 int enum64_placeholder_id = 0; 3160 struct btf_type *t; 3161 int i, j, vlen; 3162 3163 for (i = 1; i < btf__type_cnt(btf); i++) { 3164 t = (struct btf_type *)btf__type_by_id(btf, i); 3165 3166 if ((!has_datasec && btf_is_var(t)) || (!has_decl_tag && btf_is_decl_tag(t))) { 3167 /* replace VAR/DECL_TAG with INT */ 3168 t->info = BTF_INFO_ENC(BTF_KIND_INT, 0, 0); 3169 /* 3170 * using size = 1 is the safest choice, 4 will be too 3171 * big and cause kernel BTF validation failure if 3172 * original variable took less than 4 bytes 3173 */ 3174 t->size = 1; 3175 *(int *)(t + 1) = BTF_INT_ENC(0, 0, 8); 3176 } else if (!has_datasec && btf_is_datasec(t)) { 3177 /* replace DATASEC with STRUCT */ 3178 const struct btf_var_secinfo *v = btf_var_secinfos(t); 3179 struct btf_member *m = btf_members(t); 3180 struct btf_type *vt; 3181 char *name; 3182 3183 name = (char *)btf__name_by_offset(btf, t->name_off); 3184 while (*name) { 3185 if (*name == '.' || *name == '?') 3186 *name = '_'; 3187 name++; 3188 } 3189 3190 vlen = btf_vlen(t); 3191 t->info = BTF_INFO_ENC(BTF_KIND_STRUCT, 0, vlen); 3192 for (j = 0; j < vlen; j++, v++, m++) { 3193 /* order of field assignments is important */ 3194 m->offset = v->offset * 8; 3195 m->type = v->type; 3196 /* preserve variable name as member name */ 3197 vt = (void *)btf__type_by_id(btf, v->type); 3198 m->name_off = vt->name_off; 3199 } 3200 } else if (!has_qmark_datasec && btf_is_datasec(t) && 3201 starts_with_qmark(btf__name_by_offset(btf, t->name_off))) { 3202 /* replace '?' prefix with '_' for DATASEC names */ 3203 char *name; 3204 3205 name = (char *)btf__name_by_offset(btf, t->name_off); 3206 if (name[0] == '?') 3207 name[0] = '_'; 3208 } else if (!has_func && btf_is_func_proto(t)) { 3209 /* replace FUNC_PROTO with ENUM */ 3210 vlen = btf_vlen(t); 3211 t->info = BTF_INFO_ENC(BTF_KIND_ENUM, 0, vlen); 3212 t->size = sizeof(__u32); /* kernel enforced */ 3213 } else if (!has_func && btf_is_func(t)) { 3214 /* replace FUNC with TYPEDEF */ 3215 t->info = BTF_INFO_ENC(BTF_KIND_TYPEDEF, 0, 0); 3216 } else if (!has_func_global && btf_is_func(t)) { 3217 /* replace BTF_FUNC_GLOBAL with BTF_FUNC_STATIC */ 3218 t->info = BTF_INFO_ENC(BTF_KIND_FUNC, 0, 0); 3219 } else if (!has_float && btf_is_float(t)) { 3220 /* replace FLOAT with an equally-sized empty STRUCT; 3221 * since C compilers do not accept e.g. "float" as a 3222 * valid struct name, make it anonymous 3223 */ 3224 t->name_off = 0; 3225 t->info = BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 0); 3226 } else if (!has_type_tag && btf_is_type_tag(t)) { 3227 /* replace TYPE_TAG with a CONST */ 3228 t->name_off = 0; 3229 t->info = BTF_INFO_ENC(BTF_KIND_CONST, 0, 0); 3230 } else if (!has_enum64 && btf_is_enum(t)) { 3231 /* clear the kflag */ 3232 t->info = btf_type_info(btf_kind(t), btf_vlen(t), false); 3233 } else if (!has_enum64 && btf_is_enum64(t)) { 3234 /* replace ENUM64 with a union */ 3235 struct btf_member *m; 3236 3237 if (enum64_placeholder_id == 0) { 3238 enum64_placeholder_id = btf__add_int(btf, "enum64_placeholder", 1, 0); 3239 if (enum64_placeholder_id < 0) 3240 return enum64_placeholder_id; 3241 3242 t = (struct btf_type *)btf__type_by_id(btf, i); 3243 } 3244 3245 m = btf_members(t); 3246 vlen = btf_vlen(t); 3247 t->info = BTF_INFO_ENC(BTF_KIND_UNION, 0, vlen); 3248 for (j = 0; j < vlen; j++, m++) { 3249 m->type = enum64_placeholder_id; 3250 m->offset = 0; 3251 } 3252 } 3253 } 3254 3255 return 0; 3256 } 3257 3258 static bool libbpf_needs_btf(const struct bpf_object *obj) 3259 { 3260 return obj->efile.btf_maps_shndx >= 0 || 3261 obj->efile.has_st_ops || 3262 obj->nr_extern > 0; 3263 } 3264 3265 static bool kernel_needs_btf(const struct bpf_object *obj) 3266 { 3267 return obj->efile.has_st_ops; 3268 } 3269 3270 static int bpf_object__init_btf(struct bpf_object *obj, 3271 Elf_Data *btf_data, 3272 Elf_Data *btf_ext_data) 3273 { 3274 int err = -ENOENT; 3275 3276 if (btf_data) { 3277 obj->btf = btf__new(btf_data->d_buf, btf_data->d_size); 3278 err = libbpf_get_error(obj->btf); 3279 if (err) { 3280 obj->btf = NULL; 3281 pr_warn("Error loading ELF section %s: %s.\n", BTF_ELF_SEC, errstr(err)); 3282 goto out; 3283 } 3284 /* enforce 8-byte pointers for BPF-targeted BTFs */ 3285 btf__set_pointer_size(obj->btf, 8); 3286 } 3287 if (btf_ext_data) { 3288 struct btf_ext_info *ext_segs[3]; 3289 int seg_num, sec_num; 3290 3291 if (!obj->btf) { 3292 pr_debug("Ignore ELF section %s because its depending ELF section %s is not found.\n", 3293 BTF_EXT_ELF_SEC, BTF_ELF_SEC); 3294 goto out; 3295 } 3296 obj->btf_ext = btf_ext__new(btf_ext_data->d_buf, btf_ext_data->d_size); 3297 err = libbpf_get_error(obj->btf_ext); 3298 if (err) { 3299 pr_warn("Error loading ELF section %s: %s. Ignored and continue.\n", 3300 BTF_EXT_ELF_SEC, errstr(err)); 3301 obj->btf_ext = NULL; 3302 goto out; 3303 } 3304 3305 /* setup .BTF.ext to ELF section mapping */ 3306 ext_segs[0] = &obj->btf_ext->func_info; 3307 ext_segs[1] = &obj->btf_ext->line_info; 3308 ext_segs[2] = &obj->btf_ext->core_relo_info; 3309 for (seg_num = 0; seg_num < ARRAY_SIZE(ext_segs); seg_num++) { 3310 struct btf_ext_info *seg = ext_segs[seg_num]; 3311 const struct btf_ext_info_sec *sec; 3312 const char *sec_name; 3313 Elf_Scn *scn; 3314 3315 if (seg->sec_cnt == 0) 3316 continue; 3317 3318 seg->sec_idxs = calloc(seg->sec_cnt, sizeof(*seg->sec_idxs)); 3319 if (!seg->sec_idxs) { 3320 err = -ENOMEM; 3321 goto out; 3322 } 3323 3324 sec_num = 0; 3325 for_each_btf_ext_sec(seg, sec) { 3326 /* preventively increment index to avoid doing 3327 * this before every continue below 3328 */ 3329 sec_num++; 3330 3331 sec_name = btf__name_by_offset(obj->btf, sec->sec_name_off); 3332 if (str_is_empty(sec_name)) 3333 continue; 3334 scn = elf_sec_by_name(obj, sec_name); 3335 if (!scn) 3336 continue; 3337 3338 seg->sec_idxs[sec_num - 1] = elf_ndxscn(scn); 3339 } 3340 } 3341 } 3342 out: 3343 if (err && libbpf_needs_btf(obj)) { 3344 pr_warn("BTF is required, but is missing or corrupted.\n"); 3345 return err; 3346 } 3347 return 0; 3348 } 3349 3350 static int compare_vsi_off(const void *_a, const void *_b) 3351 { 3352 const struct btf_var_secinfo *a = _a; 3353 const struct btf_var_secinfo *b = _b; 3354 3355 return a->offset - b->offset; 3356 } 3357 3358 static int btf_fixup_datasec(struct bpf_object *obj, struct btf *btf, 3359 struct btf_type *t) 3360 { 3361 __u32 size = 0, i, vars = btf_vlen(t); 3362 const char *sec_name = btf__name_by_offset(btf, t->name_off); 3363 struct btf_var_secinfo *vsi; 3364 bool fixup_offsets = false; 3365 int err; 3366 3367 if (!sec_name) { 3368 pr_debug("No name found in string section for DATASEC kind.\n"); 3369 return -ENOENT; 3370 } 3371 3372 /* Extern-backing datasecs (.ksyms, .kconfig) have their size and 3373 * variable offsets set at the previous step. Further, not every 3374 * extern BTF VAR has corresponding ELF symbol preserved, so we skip 3375 * all fixups altogether for such sections and go straight to sorting 3376 * VARs within their DATASEC. 3377 */ 3378 if (strcmp(sec_name, KCONFIG_SEC) == 0 || strcmp(sec_name, KSYMS_SEC) == 0) 3379 goto sort_vars; 3380 3381 /* Clang leaves DATASEC size and VAR offsets as zeroes, so we need to 3382 * fix this up. But BPF static linker already fixes this up and fills 3383 * all the sizes and offsets during static linking. So this step has 3384 * to be optional. But the STV_HIDDEN handling is non-optional for any 3385 * non-extern DATASEC, so the variable fixup loop below handles both 3386 * functions at the same time, paying the cost of BTF VAR <-> ELF 3387 * symbol matching just once. 3388 */ 3389 if (t->size == 0) { 3390 err = find_elf_sec_sz(obj, sec_name, &size); 3391 if (err || !size) { 3392 pr_debug("sec '%s': failed to determine size from ELF: size %u, err %s\n", 3393 sec_name, size, errstr(err)); 3394 return -ENOENT; 3395 } 3396 3397 t->size = size; 3398 fixup_offsets = true; 3399 } 3400 3401 for (i = 0, vsi = btf_var_secinfos(t); i < vars; i++, vsi++) { 3402 const struct btf_type *t_var; 3403 struct btf_var *var; 3404 const char *var_name; 3405 Elf64_Sym *sym; 3406 3407 t_var = btf__type_by_id(btf, vsi->type); 3408 if (!t_var || !btf_is_var(t_var)) { 3409 pr_debug("sec '%s': unexpected non-VAR type found\n", sec_name); 3410 return -EINVAL; 3411 } 3412 3413 var = btf_var(t_var); 3414 if (var->linkage == BTF_VAR_STATIC || var->linkage == BTF_VAR_GLOBAL_EXTERN) 3415 continue; 3416 3417 var_name = btf__name_by_offset(btf, t_var->name_off); 3418 if (!var_name) { 3419 pr_debug("sec '%s': failed to find name of DATASEC's member #%d\n", 3420 sec_name, i); 3421 return -ENOENT; 3422 } 3423 3424 sym = find_elf_var_sym(obj, var_name); 3425 if (IS_ERR(sym)) { 3426 pr_debug("sec '%s': failed to find ELF symbol for VAR '%s'\n", 3427 sec_name, var_name); 3428 return -ENOENT; 3429 } 3430 3431 if (fixup_offsets) 3432 vsi->offset = sym->st_value; 3433 3434 /* if variable is a global/weak symbol, but has restricted 3435 * (STV_HIDDEN or STV_INTERNAL) visibility, mark its BTF VAR 3436 * as static. This follows similar logic for functions (BPF 3437 * subprogs) and influences libbpf's further decisions about 3438 * whether to make global data BPF array maps as 3439 * BPF_F_MMAPABLE. 3440 */ 3441 if (ELF64_ST_VISIBILITY(sym->st_other) == STV_HIDDEN 3442 || ELF64_ST_VISIBILITY(sym->st_other) == STV_INTERNAL) 3443 var->linkage = BTF_VAR_STATIC; 3444 } 3445 3446 sort_vars: 3447 qsort(btf_var_secinfos(t), vars, sizeof(*vsi), compare_vsi_off); 3448 return 0; 3449 } 3450 3451 static int bpf_object_fixup_btf(struct bpf_object *obj) 3452 { 3453 int i, n, err = 0; 3454 3455 if (!obj->btf) 3456 return 0; 3457 3458 n = btf__type_cnt(obj->btf); 3459 for (i = 1; i < n; i++) { 3460 struct btf_type *t = btf_type_by_id(obj->btf, i); 3461 3462 /* Loader needs to fix up some of the things compiler 3463 * couldn't get its hands on while emitting BTF. This 3464 * is section size and global variable offset. We use 3465 * the info from the ELF itself for this purpose. 3466 */ 3467 if (btf_is_datasec(t)) { 3468 err = btf_fixup_datasec(obj, obj->btf, t); 3469 if (err) 3470 return err; 3471 } 3472 } 3473 3474 return 0; 3475 } 3476 3477 static bool prog_needs_vmlinux_btf(struct bpf_program *prog) 3478 { 3479 if (prog->type == BPF_PROG_TYPE_STRUCT_OPS || 3480 prog->type == BPF_PROG_TYPE_LSM) 3481 return true; 3482 3483 /* BPF_PROG_TYPE_TRACING programs which do not attach to other programs 3484 * also need vmlinux BTF 3485 */ 3486 if (prog->type == BPF_PROG_TYPE_TRACING && !prog->attach_prog_fd) 3487 return true; 3488 3489 return false; 3490 } 3491 3492 static bool map_needs_vmlinux_btf(struct bpf_map *map) 3493 { 3494 return bpf_map__is_struct_ops(map); 3495 } 3496 3497 static bool obj_needs_vmlinux_btf(const struct bpf_object *obj) 3498 { 3499 struct bpf_program *prog; 3500 struct bpf_map *map; 3501 int i; 3502 3503 /* CO-RE relocations need kernel BTF, only when btf_custom_path 3504 * is not specified 3505 */ 3506 if (obj->btf_ext && obj->btf_ext->core_relo_info.len && !obj->btf_custom_path) 3507 return true; 3508 3509 /* Support for typed ksyms needs kernel BTF */ 3510 for (i = 0; i < obj->nr_extern; i++) { 3511 const struct extern_desc *ext; 3512 3513 ext = &obj->externs[i]; 3514 if (ext->type == EXT_KSYM && ext->ksym.type_id) 3515 return true; 3516 } 3517 3518 bpf_object__for_each_program(prog, obj) { 3519 if (!prog->autoload) 3520 continue; 3521 if (prog_needs_vmlinux_btf(prog)) 3522 return true; 3523 } 3524 3525 bpf_object__for_each_map(map, obj) { 3526 if (map_needs_vmlinux_btf(map)) 3527 return true; 3528 } 3529 3530 return false; 3531 } 3532 3533 static int bpf_object__load_vmlinux_btf(struct bpf_object *obj, bool force) 3534 { 3535 int err; 3536 3537 /* btf_vmlinux could be loaded earlier */ 3538 if (obj->btf_vmlinux || obj->gen_loader) 3539 return 0; 3540 3541 if (!force && !obj_needs_vmlinux_btf(obj)) 3542 return 0; 3543 3544 obj->btf_vmlinux = btf__load_vmlinux_btf(); 3545 err = libbpf_get_error(obj->btf_vmlinux); 3546 if (err) { 3547 pr_warn("Error loading vmlinux BTF: %s\n", errstr(err)); 3548 obj->btf_vmlinux = NULL; 3549 return err; 3550 } 3551 return 0; 3552 } 3553 3554 static int bpf_object__sanitize_and_load_btf(struct bpf_object *obj) 3555 { 3556 struct btf *kern_btf = obj->btf; 3557 bool btf_mandatory, sanitize; 3558 int i, err = 0; 3559 3560 if (!obj->btf) 3561 return 0; 3562 3563 if (!kernel_supports(obj, FEAT_BTF)) { 3564 if (kernel_needs_btf(obj)) { 3565 err = -EOPNOTSUPP; 3566 goto report; 3567 } 3568 pr_debug("Kernel doesn't support BTF, skipping uploading it.\n"); 3569 return 0; 3570 } 3571 3572 /* Even though some subprogs are global/weak, user might prefer more 3573 * permissive BPF verification process that BPF verifier performs for 3574 * static functions, taking into account more context from the caller 3575 * functions. In such case, they need to mark such subprogs with 3576 * __attribute__((visibility("hidden"))) and libbpf will adjust 3577 * corresponding FUNC BTF type to be marked as static and trigger more 3578 * involved BPF verification process. 3579 */ 3580 for (i = 0; i < obj->nr_programs; i++) { 3581 struct bpf_program *prog = &obj->programs[i]; 3582 struct btf_type *t; 3583 const char *name; 3584 int j, n; 3585 3586 if (!prog->mark_btf_static || !prog_is_subprog(obj, prog)) 3587 continue; 3588 3589 n = btf__type_cnt(obj->btf); 3590 for (j = 1; j < n; j++) { 3591 t = btf_type_by_id(obj->btf, j); 3592 if (!btf_is_func(t) || btf_func_linkage(t) != BTF_FUNC_GLOBAL) 3593 continue; 3594 3595 name = btf__str_by_offset(obj->btf, t->name_off); 3596 if (strcmp(name, prog->name) != 0) 3597 continue; 3598 3599 t->info = btf_type_info(BTF_KIND_FUNC, BTF_FUNC_STATIC, 0); 3600 break; 3601 } 3602 } 3603 3604 sanitize = btf_needs_sanitization(obj); 3605 if (sanitize) { 3606 const void *raw_data; 3607 __u32 sz; 3608 3609 /* clone BTF to sanitize a copy and leave the original intact */ 3610 raw_data = btf__raw_data(obj->btf, &sz); 3611 kern_btf = btf__new(raw_data, sz); 3612 err = libbpf_get_error(kern_btf); 3613 if (err) 3614 return err; 3615 3616 /* enforce 8-byte pointers for BPF-targeted BTFs */ 3617 btf__set_pointer_size(obj->btf, 8); 3618 err = bpf_object__sanitize_btf(obj, kern_btf); 3619 if (err) 3620 return err; 3621 } 3622 3623 if (obj->gen_loader) { 3624 __u32 raw_size = 0; 3625 const void *raw_data = btf__raw_data(kern_btf, &raw_size); 3626 3627 if (!raw_data) 3628 return -ENOMEM; 3629 bpf_gen__load_btf(obj->gen_loader, raw_data, raw_size); 3630 /* Pretend to have valid FD to pass various fd >= 0 checks. 3631 * This fd == 0 will not be used with any syscall and will be reset to -1 eventually. 3632 */ 3633 btf__set_fd(kern_btf, 0); 3634 } else { 3635 /* currently BPF_BTF_LOAD only supports log_level 1 */ 3636 err = btf_load_into_kernel(kern_btf, obj->log_buf, obj->log_size, 3637 obj->log_level ? 1 : 0, obj->token_fd); 3638 } 3639 if (sanitize) { 3640 if (!err) { 3641 /* move fd to libbpf's BTF */ 3642 btf__set_fd(obj->btf, btf__fd(kern_btf)); 3643 btf__set_fd(kern_btf, -1); 3644 } 3645 btf__free(kern_btf); 3646 } 3647 report: 3648 if (err) { 3649 btf_mandatory = kernel_needs_btf(obj); 3650 if (btf_mandatory) { 3651 pr_warn("Error loading .BTF into kernel: %s. BTF is mandatory, can't proceed.\n", 3652 errstr(err)); 3653 } else { 3654 pr_info("Error loading .BTF into kernel: %s. BTF is optional, ignoring.\n", 3655 errstr(err)); 3656 err = 0; 3657 } 3658 } 3659 return err; 3660 } 3661 3662 static const char *elf_sym_str(const struct bpf_object *obj, size_t off) 3663 { 3664 const char *name; 3665 3666 name = elf_strptr(obj->efile.elf, obj->efile.strtabidx, off); 3667 if (!name) { 3668 pr_warn("elf: failed to get section name string at offset %zu from %s: %s\n", 3669 off, obj->path, elf_errmsg(-1)); 3670 return NULL; 3671 } 3672 3673 return name; 3674 } 3675 3676 static const char *elf_sec_str(const struct bpf_object *obj, size_t off) 3677 { 3678 const char *name; 3679 3680 name = elf_strptr(obj->efile.elf, obj->efile.shstrndx, off); 3681 if (!name) { 3682 pr_warn("elf: failed to get section name string at offset %zu from %s: %s\n", 3683 off, obj->path, elf_errmsg(-1)); 3684 return NULL; 3685 } 3686 3687 return name; 3688 } 3689 3690 static Elf_Scn *elf_sec_by_idx(const struct bpf_object *obj, size_t idx) 3691 { 3692 Elf_Scn *scn; 3693 3694 scn = elf_getscn(obj->efile.elf, idx); 3695 if (!scn) { 3696 pr_warn("elf: failed to get section(%zu) from %s: %s\n", 3697 idx, obj->path, elf_errmsg(-1)); 3698 return NULL; 3699 } 3700 return scn; 3701 } 3702 3703 static Elf_Scn *elf_sec_by_name(const struct bpf_object *obj, const char *name) 3704 { 3705 Elf_Scn *scn = NULL; 3706 Elf *elf = obj->efile.elf; 3707 const char *sec_name; 3708 3709 while ((scn = elf_nextscn(elf, scn)) != NULL) { 3710 sec_name = elf_sec_name(obj, scn); 3711 if (!sec_name) 3712 return NULL; 3713 3714 if (strcmp(sec_name, name) != 0) 3715 continue; 3716 3717 return scn; 3718 } 3719 return NULL; 3720 } 3721 3722 static Elf64_Shdr *elf_sec_hdr(const struct bpf_object *obj, Elf_Scn *scn) 3723 { 3724 Elf64_Shdr *shdr; 3725 3726 if (!scn) 3727 return NULL; 3728 3729 shdr = elf64_getshdr(scn); 3730 if (!shdr) { 3731 pr_warn("elf: failed to get section(%zu) header from %s: %s\n", 3732 elf_ndxscn(scn), obj->path, elf_errmsg(-1)); 3733 return NULL; 3734 } 3735 3736 return shdr; 3737 } 3738 3739 static const char *elf_sec_name(const struct bpf_object *obj, Elf_Scn *scn) 3740 { 3741 const char *name; 3742 Elf64_Shdr *sh; 3743 3744 if (!scn) 3745 return NULL; 3746 3747 sh = elf_sec_hdr(obj, scn); 3748 if (!sh) 3749 return NULL; 3750 3751 name = elf_sec_str(obj, sh->sh_name); 3752 if (!name) { 3753 pr_warn("elf: failed to get section(%zu) name from %s: %s\n", 3754 elf_ndxscn(scn), obj->path, elf_errmsg(-1)); 3755 return NULL; 3756 } 3757 3758 return name; 3759 } 3760 3761 static Elf_Data *elf_sec_data(const struct bpf_object *obj, Elf_Scn *scn) 3762 { 3763 Elf_Data *data; 3764 3765 if (!scn) 3766 return NULL; 3767 3768 data = elf_getdata(scn, 0); 3769 if (!data) { 3770 pr_warn("elf: failed to get section(%zu) %s data from %s: %s\n", 3771 elf_ndxscn(scn), elf_sec_name(obj, scn) ?: "<?>", 3772 obj->path, elf_errmsg(-1)); 3773 return NULL; 3774 } 3775 3776 return data; 3777 } 3778 3779 static Elf64_Sym *elf_sym_by_idx(const struct bpf_object *obj, size_t idx) 3780 { 3781 if (idx >= obj->efile.symbols->d_size / sizeof(Elf64_Sym)) 3782 return NULL; 3783 3784 return (Elf64_Sym *)obj->efile.symbols->d_buf + idx; 3785 } 3786 3787 static Elf64_Rel *elf_rel_by_idx(Elf_Data *data, size_t idx) 3788 { 3789 if (idx >= data->d_size / sizeof(Elf64_Rel)) 3790 return NULL; 3791 3792 return (Elf64_Rel *)data->d_buf + idx; 3793 } 3794 3795 static bool is_sec_name_dwarf(const char *name) 3796 { 3797 /* approximation, but the actual list is too long */ 3798 return str_has_pfx(name, ".debug_"); 3799 } 3800 3801 static bool ignore_elf_section(Elf64_Shdr *hdr, const char *name) 3802 { 3803 /* no special handling of .strtab */ 3804 if (hdr->sh_type == SHT_STRTAB) 3805 return true; 3806 3807 /* ignore .llvm_addrsig section as well */ 3808 if (hdr->sh_type == SHT_LLVM_ADDRSIG) 3809 return true; 3810 3811 /* no subprograms will lead to an empty .text section, ignore it */ 3812 if (hdr->sh_type == SHT_PROGBITS && hdr->sh_size == 0 && 3813 strcmp(name, ".text") == 0) 3814 return true; 3815 3816 /* DWARF sections */ 3817 if (is_sec_name_dwarf(name)) 3818 return true; 3819 3820 if (str_has_pfx(name, ".rel")) { 3821 name += sizeof(".rel") - 1; 3822 /* DWARF section relocations */ 3823 if (is_sec_name_dwarf(name)) 3824 return true; 3825 3826 /* .BTF and .BTF.ext don't need relocations */ 3827 if (strcmp(name, BTF_ELF_SEC) == 0 || 3828 strcmp(name, BTF_EXT_ELF_SEC) == 0) 3829 return true; 3830 } 3831 3832 return false; 3833 } 3834 3835 static int cmp_progs(const void *_a, const void *_b) 3836 { 3837 const struct bpf_program *a = _a; 3838 const struct bpf_program *b = _b; 3839 3840 if (a->sec_idx != b->sec_idx) 3841 return a->sec_idx < b->sec_idx ? -1 : 1; 3842 3843 /* sec_insn_off can't be the same within the section */ 3844 return a->sec_insn_off < b->sec_insn_off ? -1 : 1; 3845 } 3846 3847 static int bpf_object__elf_collect(struct bpf_object *obj) 3848 { 3849 struct elf_sec_desc *sec_desc; 3850 Elf *elf = obj->efile.elf; 3851 Elf_Data *btf_ext_data = NULL; 3852 Elf_Data *btf_data = NULL; 3853 int idx = 0, err = 0; 3854 const char *name; 3855 Elf_Data *data; 3856 Elf_Scn *scn; 3857 Elf64_Shdr *sh; 3858 3859 /* ELF section indices are 0-based, but sec #0 is special "invalid" 3860 * section. Since section count retrieved by elf_getshdrnum() does 3861 * include sec #0, it is already the necessary size of an array to keep 3862 * all the sections. 3863 */ 3864 if (elf_getshdrnum(obj->efile.elf, &obj->efile.sec_cnt)) { 3865 pr_warn("elf: failed to get the number of sections for %s: %s\n", 3866 obj->path, elf_errmsg(-1)); 3867 return -LIBBPF_ERRNO__FORMAT; 3868 } 3869 obj->efile.secs = calloc(obj->efile.sec_cnt, sizeof(*obj->efile.secs)); 3870 if (!obj->efile.secs) 3871 return -ENOMEM; 3872 3873 /* a bunch of ELF parsing functionality depends on processing symbols, 3874 * so do the first pass and find the symbol table 3875 */ 3876 scn = NULL; 3877 while ((scn = elf_nextscn(elf, scn)) != NULL) { 3878 sh = elf_sec_hdr(obj, scn); 3879 if (!sh) 3880 return -LIBBPF_ERRNO__FORMAT; 3881 3882 if (sh->sh_type == SHT_SYMTAB) { 3883 if (obj->efile.symbols) { 3884 pr_warn("elf: multiple symbol tables in %s\n", obj->path); 3885 return -LIBBPF_ERRNO__FORMAT; 3886 } 3887 3888 data = elf_sec_data(obj, scn); 3889 if (!data) 3890 return -LIBBPF_ERRNO__FORMAT; 3891 3892 idx = elf_ndxscn(scn); 3893 3894 obj->efile.symbols = data; 3895 obj->efile.symbols_shndx = idx; 3896 obj->efile.strtabidx = sh->sh_link; 3897 } 3898 } 3899 3900 if (!obj->efile.symbols) { 3901 pr_warn("elf: couldn't find symbol table in %s, stripped object file?\n", 3902 obj->path); 3903 return -ENOENT; 3904 } 3905 3906 scn = NULL; 3907 while ((scn = elf_nextscn(elf, scn)) != NULL) { 3908 idx = elf_ndxscn(scn); 3909 sec_desc = &obj->efile.secs[idx]; 3910 3911 sh = elf_sec_hdr(obj, scn); 3912 if (!sh) 3913 return -LIBBPF_ERRNO__FORMAT; 3914 3915 name = elf_sec_str(obj, sh->sh_name); 3916 if (!name) 3917 return -LIBBPF_ERRNO__FORMAT; 3918 3919 if (ignore_elf_section(sh, name)) 3920 continue; 3921 3922 data = elf_sec_data(obj, scn); 3923 if (!data) 3924 return -LIBBPF_ERRNO__FORMAT; 3925 3926 pr_debug("elf: section(%d) %s, size %ld, link %d, flags %lx, type=%d\n", 3927 idx, name, (unsigned long)data->d_size, 3928 (int)sh->sh_link, (unsigned long)sh->sh_flags, 3929 (int)sh->sh_type); 3930 3931 if (strcmp(name, "license") == 0) { 3932 err = bpf_object__init_license(obj, data->d_buf, data->d_size); 3933 if (err) 3934 return err; 3935 } else if (strcmp(name, "version") == 0) { 3936 err = bpf_object__init_kversion(obj, data->d_buf, data->d_size); 3937 if (err) 3938 return err; 3939 } else if (strcmp(name, "maps") == 0) { 3940 pr_warn("elf: legacy map definitions in 'maps' section are not supported by libbpf v1.0+\n"); 3941 return -ENOTSUP; 3942 } else if (strcmp(name, MAPS_ELF_SEC) == 0) { 3943 obj->efile.btf_maps_shndx = idx; 3944 } else if (strcmp(name, BTF_ELF_SEC) == 0) { 3945 if (sh->sh_type != SHT_PROGBITS) 3946 return -LIBBPF_ERRNO__FORMAT; 3947 btf_data = data; 3948 } else if (strcmp(name, BTF_EXT_ELF_SEC) == 0) { 3949 if (sh->sh_type != SHT_PROGBITS) 3950 return -LIBBPF_ERRNO__FORMAT; 3951 btf_ext_data = data; 3952 } else if (sh->sh_type == SHT_SYMTAB) { 3953 /* already processed during the first pass above */ 3954 } else if (sh->sh_type == SHT_PROGBITS && data->d_size > 0) { 3955 if (sh->sh_flags & SHF_EXECINSTR) { 3956 if (strcmp(name, ".text") == 0) 3957 obj->efile.text_shndx = idx; 3958 err = bpf_object__add_programs(obj, data, name, idx); 3959 if (err) 3960 return err; 3961 } else if (strcmp(name, DATA_SEC) == 0 || 3962 str_has_pfx(name, DATA_SEC ".")) { 3963 sec_desc->sec_type = SEC_DATA; 3964 sec_desc->shdr = sh; 3965 sec_desc->data = data; 3966 } else if (strcmp(name, RODATA_SEC) == 0 || 3967 str_has_pfx(name, RODATA_SEC ".")) { 3968 sec_desc->sec_type = SEC_RODATA; 3969 sec_desc->shdr = sh; 3970 sec_desc->data = data; 3971 } else if (strcmp(name, STRUCT_OPS_SEC) == 0 || 3972 strcmp(name, STRUCT_OPS_LINK_SEC) == 0 || 3973 strcmp(name, "?" STRUCT_OPS_SEC) == 0 || 3974 strcmp(name, "?" STRUCT_OPS_LINK_SEC) == 0) { 3975 sec_desc->sec_type = SEC_ST_OPS; 3976 sec_desc->shdr = sh; 3977 sec_desc->data = data; 3978 obj->efile.has_st_ops = true; 3979 } else if (strcmp(name, ARENA_SEC) == 0) { 3980 obj->efile.arena_data = data; 3981 obj->efile.arena_data_shndx = idx; 3982 } else if (strcmp(name, JUMPTABLES_SEC) == 0) { 3983 obj->jumptables_data = malloc(data->d_size); 3984 if (!obj->jumptables_data) 3985 return -ENOMEM; 3986 memcpy(obj->jumptables_data, data->d_buf, data->d_size); 3987 obj->jumptables_data_sz = data->d_size; 3988 obj->efile.jumptables_data_shndx = idx; 3989 } else { 3990 pr_info("elf: skipping unrecognized data section(%d) %s\n", 3991 idx, name); 3992 } 3993 } else if (sh->sh_type == SHT_REL) { 3994 int targ_sec_idx = sh->sh_info; /* points to other section */ 3995 3996 if (sh->sh_entsize != sizeof(Elf64_Rel) || 3997 targ_sec_idx >= obj->efile.sec_cnt) 3998 return -LIBBPF_ERRNO__FORMAT; 3999 4000 /* Only do relo for section with exec instructions */ 4001 if (!section_have_execinstr(obj, targ_sec_idx) && 4002 strcmp(name, ".rel" STRUCT_OPS_SEC) && 4003 strcmp(name, ".rel" STRUCT_OPS_LINK_SEC) && 4004 strcmp(name, ".rel?" STRUCT_OPS_SEC) && 4005 strcmp(name, ".rel?" STRUCT_OPS_LINK_SEC) && 4006 strcmp(name, ".rel" MAPS_ELF_SEC)) { 4007 pr_info("elf: skipping relo section(%d) %s for section(%d) %s\n", 4008 idx, name, targ_sec_idx, 4009 elf_sec_name(obj, elf_sec_by_idx(obj, targ_sec_idx)) ?: "<?>"); 4010 continue; 4011 } 4012 4013 sec_desc->sec_type = SEC_RELO; 4014 sec_desc->shdr = sh; 4015 sec_desc->data = data; 4016 } else if (sh->sh_type == SHT_NOBITS && (strcmp(name, BSS_SEC) == 0 || 4017 str_has_pfx(name, BSS_SEC "."))) { 4018 sec_desc->sec_type = SEC_BSS; 4019 sec_desc->shdr = sh; 4020 sec_desc->data = data; 4021 } else { 4022 pr_info("elf: skipping section(%d) %s (size %zu)\n", idx, name, 4023 (size_t)sh->sh_size); 4024 } 4025 } 4026 4027 if (!obj->efile.strtabidx || obj->efile.strtabidx > idx) { 4028 pr_warn("elf: symbol strings section missing or invalid in %s\n", obj->path); 4029 return -LIBBPF_ERRNO__FORMAT; 4030 } 4031 4032 /* change BPF program insns to native endianness for introspection */ 4033 if (!is_native_endianness(obj)) 4034 bpf_object_bswap_progs(obj); 4035 4036 /* sort BPF programs by section name and in-section instruction offset 4037 * for faster search 4038 */ 4039 if (obj->nr_programs) 4040 qsort(obj->programs, obj->nr_programs, sizeof(*obj->programs), cmp_progs); 4041 4042 return bpf_object__init_btf(obj, btf_data, btf_ext_data); 4043 } 4044 4045 static bool sym_is_extern(const Elf64_Sym *sym) 4046 { 4047 int bind = ELF64_ST_BIND(sym->st_info); 4048 /* externs are symbols w/ type=NOTYPE, bind=GLOBAL|WEAK, section=UND */ 4049 return sym->st_shndx == SHN_UNDEF && 4050 (bind == STB_GLOBAL || bind == STB_WEAK) && 4051 ELF64_ST_TYPE(sym->st_info) == STT_NOTYPE; 4052 } 4053 4054 static bool sym_is_subprog(const Elf64_Sym *sym, int text_shndx) 4055 { 4056 int bind = ELF64_ST_BIND(sym->st_info); 4057 int type = ELF64_ST_TYPE(sym->st_info); 4058 4059 /* in .text section */ 4060 if (sym->st_shndx != text_shndx) 4061 return false; 4062 4063 /* local function */ 4064 if (bind == STB_LOCAL && type == STT_SECTION) 4065 return true; 4066 4067 /* global function */ 4068 return (bind == STB_GLOBAL || bind == STB_WEAK) && type == STT_FUNC; 4069 } 4070 4071 static int find_extern_btf_id(const struct btf *btf, const char *ext_name) 4072 { 4073 const struct btf_type *t; 4074 const char *tname; 4075 int i, n; 4076 4077 if (!btf) 4078 return -ESRCH; 4079 4080 n = btf__type_cnt(btf); 4081 for (i = 1; i < n; i++) { 4082 t = btf__type_by_id(btf, i); 4083 4084 if (!btf_is_var(t) && !btf_is_func(t)) 4085 continue; 4086 4087 tname = btf__name_by_offset(btf, t->name_off); 4088 if (strcmp(tname, ext_name)) 4089 continue; 4090 4091 if (btf_is_var(t) && 4092 btf_var(t)->linkage != BTF_VAR_GLOBAL_EXTERN) 4093 return -EINVAL; 4094 4095 if (btf_is_func(t) && btf_func_linkage(t) != BTF_FUNC_EXTERN) 4096 return -EINVAL; 4097 4098 return i; 4099 } 4100 4101 return -ENOENT; 4102 } 4103 4104 static int find_extern_sec_btf_id(struct btf *btf, int ext_btf_id) { 4105 const struct btf_var_secinfo *vs; 4106 const struct btf_type *t; 4107 int i, j, n; 4108 4109 if (!btf) 4110 return -ESRCH; 4111 4112 n = btf__type_cnt(btf); 4113 for (i = 1; i < n; i++) { 4114 t = btf__type_by_id(btf, i); 4115 4116 if (!btf_is_datasec(t)) 4117 continue; 4118 4119 vs = btf_var_secinfos(t); 4120 for (j = 0; j < btf_vlen(t); j++, vs++) { 4121 if (vs->type == ext_btf_id) 4122 return i; 4123 } 4124 } 4125 4126 return -ENOENT; 4127 } 4128 4129 static enum kcfg_type find_kcfg_type(const struct btf *btf, int id, 4130 bool *is_signed) 4131 { 4132 const struct btf_type *t; 4133 const char *name; 4134 4135 t = skip_mods_and_typedefs(btf, id, NULL); 4136 name = btf__name_by_offset(btf, t->name_off); 4137 4138 if (is_signed) 4139 *is_signed = false; 4140 switch (btf_kind(t)) { 4141 case BTF_KIND_INT: { 4142 int enc = btf_int_encoding(t); 4143 4144 if (enc & BTF_INT_BOOL) 4145 return t->size == 1 ? KCFG_BOOL : KCFG_UNKNOWN; 4146 if (is_signed) 4147 *is_signed = enc & BTF_INT_SIGNED; 4148 if (t->size == 1) 4149 return KCFG_CHAR; 4150 if (t->size < 1 || t->size > 8 || (t->size & (t->size - 1))) 4151 return KCFG_UNKNOWN; 4152 return KCFG_INT; 4153 } 4154 case BTF_KIND_ENUM: 4155 if (t->size != 4) 4156 return KCFG_UNKNOWN; 4157 if (strcmp(name, "libbpf_tristate")) 4158 return KCFG_UNKNOWN; 4159 return KCFG_TRISTATE; 4160 case BTF_KIND_ENUM64: 4161 if (strcmp(name, "libbpf_tristate")) 4162 return KCFG_UNKNOWN; 4163 return KCFG_TRISTATE; 4164 case BTF_KIND_ARRAY: 4165 if (btf_array(t)->nelems == 0) 4166 return KCFG_UNKNOWN; 4167 if (find_kcfg_type(btf, btf_array(t)->type, NULL) != KCFG_CHAR) 4168 return KCFG_UNKNOWN; 4169 return KCFG_CHAR_ARR; 4170 default: 4171 return KCFG_UNKNOWN; 4172 } 4173 } 4174 4175 static int cmp_externs(const void *_a, const void *_b) 4176 { 4177 const struct extern_desc *a = _a; 4178 const struct extern_desc *b = _b; 4179 4180 if (a->type != b->type) 4181 return a->type < b->type ? -1 : 1; 4182 4183 if (a->type == EXT_KCFG) { 4184 /* descending order by alignment requirements */ 4185 if (a->kcfg.align != b->kcfg.align) 4186 return a->kcfg.align > b->kcfg.align ? -1 : 1; 4187 /* ascending order by size, within same alignment class */ 4188 if (a->kcfg.sz != b->kcfg.sz) 4189 return a->kcfg.sz < b->kcfg.sz ? -1 : 1; 4190 } 4191 4192 /* resolve ties by name */ 4193 return strcmp(a->name, b->name); 4194 } 4195 4196 static int find_int_btf_id(const struct btf *btf) 4197 { 4198 const struct btf_type *t; 4199 int i, n; 4200 4201 n = btf__type_cnt(btf); 4202 for (i = 1; i < n; i++) { 4203 t = btf__type_by_id(btf, i); 4204 4205 if (btf_is_int(t) && btf_int_bits(t) == 32) 4206 return i; 4207 } 4208 4209 return 0; 4210 } 4211 4212 static int add_dummy_ksym_var(struct btf *btf) 4213 { 4214 int i, int_btf_id, sec_btf_id, dummy_var_btf_id; 4215 const struct btf_var_secinfo *vs; 4216 const struct btf_type *sec; 4217 4218 if (!btf) 4219 return 0; 4220 4221 sec_btf_id = btf__find_by_name_kind(btf, KSYMS_SEC, 4222 BTF_KIND_DATASEC); 4223 if (sec_btf_id < 0) 4224 return 0; 4225 4226 sec = btf__type_by_id(btf, sec_btf_id); 4227 vs = btf_var_secinfos(sec); 4228 for (i = 0; i < btf_vlen(sec); i++, vs++) { 4229 const struct btf_type *vt; 4230 4231 vt = btf__type_by_id(btf, vs->type); 4232 if (btf_is_func(vt)) 4233 break; 4234 } 4235 4236 /* No func in ksyms sec. No need to add dummy var. */ 4237 if (i == btf_vlen(sec)) 4238 return 0; 4239 4240 int_btf_id = find_int_btf_id(btf); 4241 dummy_var_btf_id = btf__add_var(btf, 4242 "dummy_ksym", 4243 BTF_VAR_GLOBAL_ALLOCATED, 4244 int_btf_id); 4245 if (dummy_var_btf_id < 0) 4246 pr_warn("cannot create a dummy_ksym var\n"); 4247 4248 return dummy_var_btf_id; 4249 } 4250 4251 static int bpf_object__collect_externs(struct bpf_object *obj) 4252 { 4253 struct btf_type *sec, *kcfg_sec = NULL, *ksym_sec = NULL; 4254 const struct btf_type *t; 4255 struct extern_desc *ext; 4256 int i, n, off, dummy_var_btf_id; 4257 const char *ext_name, *sec_name; 4258 size_t ext_essent_len; 4259 Elf_Scn *scn; 4260 Elf64_Shdr *sh; 4261 4262 if (!obj->efile.symbols) 4263 return 0; 4264 4265 scn = elf_sec_by_idx(obj, obj->efile.symbols_shndx); 4266 sh = elf_sec_hdr(obj, scn); 4267 if (!sh || sh->sh_entsize != sizeof(Elf64_Sym)) 4268 return -LIBBPF_ERRNO__FORMAT; 4269 4270 dummy_var_btf_id = add_dummy_ksym_var(obj->btf); 4271 if (dummy_var_btf_id < 0) 4272 return dummy_var_btf_id; 4273 4274 n = sh->sh_size / sh->sh_entsize; 4275 pr_debug("looking for externs among %d symbols...\n", n); 4276 4277 for (i = 0; i < n; i++) { 4278 Elf64_Sym *sym = elf_sym_by_idx(obj, i); 4279 4280 if (!sym) 4281 return -LIBBPF_ERRNO__FORMAT; 4282 if (!sym_is_extern(sym)) 4283 continue; 4284 ext_name = elf_sym_str(obj, sym->st_name); 4285 if (str_is_empty(ext_name)) 4286 continue; 4287 4288 ext = obj->externs; 4289 ext = libbpf_reallocarray(ext, obj->nr_extern + 1, sizeof(*ext)); 4290 if (!ext) 4291 return -ENOMEM; 4292 obj->externs = ext; 4293 ext = &ext[obj->nr_extern]; 4294 memset(ext, 0, sizeof(*ext)); 4295 obj->nr_extern++; 4296 4297 ext->btf_id = find_extern_btf_id(obj->btf, ext_name); 4298 if (ext->btf_id <= 0) { 4299 pr_warn("failed to find BTF for extern '%s': %d\n", 4300 ext_name, ext->btf_id); 4301 return ext->btf_id; 4302 } 4303 t = btf__type_by_id(obj->btf, ext->btf_id); 4304 ext->name = strdup(btf__name_by_offset(obj->btf, t->name_off)); 4305 if (!ext->name) 4306 return -ENOMEM; 4307 ext->sym_idx = i; 4308 ext->is_weak = ELF64_ST_BIND(sym->st_info) == STB_WEAK; 4309 4310 ext_essent_len = bpf_core_essential_name_len(ext->name); 4311 ext->essent_name = NULL; 4312 if (ext_essent_len != strlen(ext->name)) { 4313 ext->essent_name = strndup(ext->name, ext_essent_len); 4314 if (!ext->essent_name) 4315 return -ENOMEM; 4316 } 4317 4318 ext->sec_btf_id = find_extern_sec_btf_id(obj->btf, ext->btf_id); 4319 if (ext->sec_btf_id <= 0) { 4320 pr_warn("failed to find BTF for extern '%s' [%d] section: %d\n", 4321 ext_name, ext->btf_id, ext->sec_btf_id); 4322 return ext->sec_btf_id; 4323 } 4324 sec = (void *)btf__type_by_id(obj->btf, ext->sec_btf_id); 4325 sec_name = btf__name_by_offset(obj->btf, sec->name_off); 4326 4327 if (strcmp(sec_name, KCONFIG_SEC) == 0) { 4328 if (btf_is_func(t)) { 4329 pr_warn("extern function %s is unsupported under %s section\n", 4330 ext->name, KCONFIG_SEC); 4331 return -ENOTSUP; 4332 } 4333 kcfg_sec = sec; 4334 ext->type = EXT_KCFG; 4335 ext->kcfg.sz = btf__resolve_size(obj->btf, t->type); 4336 if (ext->kcfg.sz <= 0) { 4337 pr_warn("failed to resolve size of extern (kcfg) '%s': %d\n", 4338 ext_name, ext->kcfg.sz); 4339 return ext->kcfg.sz; 4340 } 4341 ext->kcfg.align = btf__align_of(obj->btf, t->type); 4342 if (ext->kcfg.align <= 0) { 4343 pr_warn("failed to determine alignment of extern (kcfg) '%s': %d\n", 4344 ext_name, ext->kcfg.align); 4345 return -EINVAL; 4346 } 4347 ext->kcfg.type = find_kcfg_type(obj->btf, t->type, 4348 &ext->kcfg.is_signed); 4349 if (ext->kcfg.type == KCFG_UNKNOWN) { 4350 pr_warn("extern (kcfg) '%s': type is unsupported\n", ext_name); 4351 return -ENOTSUP; 4352 } 4353 } else if (strcmp(sec_name, KSYMS_SEC) == 0) { 4354 ksym_sec = sec; 4355 ext->type = EXT_KSYM; 4356 skip_mods_and_typedefs(obj->btf, t->type, 4357 &ext->ksym.type_id); 4358 } else { 4359 pr_warn("unrecognized extern section '%s'\n", sec_name); 4360 return -ENOTSUP; 4361 } 4362 } 4363 pr_debug("collected %d externs total\n", obj->nr_extern); 4364 4365 if (!obj->nr_extern) 4366 return 0; 4367 4368 /* sort externs by type, for kcfg ones also by (align, size, name) */ 4369 qsort(obj->externs, obj->nr_extern, sizeof(*ext), cmp_externs); 4370 4371 /* for .ksyms section, we need to turn all externs into allocated 4372 * variables in BTF to pass kernel verification; we do this by 4373 * pretending that each extern is a 8-byte variable 4374 */ 4375 if (ksym_sec) { 4376 /* find existing 4-byte integer type in BTF to use for fake 4377 * extern variables in DATASEC 4378 */ 4379 int int_btf_id = find_int_btf_id(obj->btf); 4380 /* For extern function, a dummy_var added earlier 4381 * will be used to replace the vs->type and 4382 * its name string will be used to refill 4383 * the missing param's name. 4384 */ 4385 const struct btf_type *dummy_var; 4386 4387 dummy_var = btf__type_by_id(obj->btf, dummy_var_btf_id); 4388 for (i = 0; i < obj->nr_extern; i++) { 4389 ext = &obj->externs[i]; 4390 if (ext->type != EXT_KSYM) 4391 continue; 4392 pr_debug("extern (ksym) #%d: symbol %d, name %s\n", 4393 i, ext->sym_idx, ext->name); 4394 } 4395 4396 sec = ksym_sec; 4397 n = btf_vlen(sec); 4398 for (i = 0, off = 0; i < n; i++, off += sizeof(int)) { 4399 struct btf_var_secinfo *vs = btf_var_secinfos(sec) + i; 4400 struct btf_type *vt; 4401 4402 vt = (void *)btf__type_by_id(obj->btf, vs->type); 4403 ext_name = btf__name_by_offset(obj->btf, vt->name_off); 4404 ext = find_extern_by_name(obj, ext_name); 4405 if (!ext) { 4406 pr_warn("failed to find extern definition for BTF %s '%s'\n", 4407 btf_kind_str(vt), ext_name); 4408 return -ESRCH; 4409 } 4410 if (btf_is_func(vt)) { 4411 const struct btf_type *func_proto; 4412 struct btf_param *param; 4413 int j; 4414 4415 func_proto = btf__type_by_id(obj->btf, 4416 vt->type); 4417 param = btf_params(func_proto); 4418 /* Reuse the dummy_var string if the 4419 * func proto does not have param name. 4420 */ 4421 for (j = 0; j < btf_vlen(func_proto); j++) 4422 if (param[j].type && !param[j].name_off) 4423 param[j].name_off = 4424 dummy_var->name_off; 4425 vs->type = dummy_var_btf_id; 4426 vt->info &= ~0xffff; 4427 vt->info |= BTF_FUNC_GLOBAL; 4428 } else { 4429 btf_var(vt)->linkage = BTF_VAR_GLOBAL_ALLOCATED; 4430 vt->type = int_btf_id; 4431 } 4432 vs->offset = off; 4433 vs->size = sizeof(int); 4434 } 4435 sec->size = off; 4436 } 4437 4438 if (kcfg_sec) { 4439 sec = kcfg_sec; 4440 /* for kcfg externs calculate their offsets within a .kconfig map */ 4441 off = 0; 4442 for (i = 0; i < obj->nr_extern; i++) { 4443 ext = &obj->externs[i]; 4444 if (ext->type != EXT_KCFG) 4445 continue; 4446 4447 ext->kcfg.data_off = roundup(off, ext->kcfg.align); 4448 off = ext->kcfg.data_off + ext->kcfg.sz; 4449 pr_debug("extern (kcfg) #%d: symbol %d, off %u, name %s\n", 4450 i, ext->sym_idx, ext->kcfg.data_off, ext->name); 4451 } 4452 sec->size = off; 4453 n = btf_vlen(sec); 4454 for (i = 0; i < n; i++) { 4455 struct btf_var_secinfo *vs = btf_var_secinfos(sec) + i; 4456 4457 t = btf__type_by_id(obj->btf, vs->type); 4458 ext_name = btf__name_by_offset(obj->btf, t->name_off); 4459 ext = find_extern_by_name(obj, ext_name); 4460 if (!ext) { 4461 pr_warn("failed to find extern definition for BTF var '%s'\n", 4462 ext_name); 4463 return -ESRCH; 4464 } 4465 btf_var(t)->linkage = BTF_VAR_GLOBAL_ALLOCATED; 4466 vs->offset = ext->kcfg.data_off; 4467 } 4468 } 4469 return 0; 4470 } 4471 4472 static bool prog_is_subprog(const struct bpf_object *obj, const struct bpf_program *prog) 4473 { 4474 return prog->sec_idx == obj->efile.text_shndx; 4475 } 4476 4477 struct bpf_program * 4478 bpf_object__find_program_by_name(const struct bpf_object *obj, 4479 const char *name) 4480 { 4481 struct bpf_program *prog; 4482 4483 bpf_object__for_each_program(prog, obj) { 4484 if (prog_is_subprog(obj, prog)) 4485 continue; 4486 if (!strcmp(prog->name, name)) 4487 return prog; 4488 } 4489 return errno = ENOENT, NULL; 4490 } 4491 4492 static bool bpf_object__shndx_is_data(const struct bpf_object *obj, 4493 int shndx) 4494 { 4495 switch (obj->efile.secs[shndx].sec_type) { 4496 case SEC_BSS: 4497 case SEC_DATA: 4498 case SEC_RODATA: 4499 return true; 4500 default: 4501 return false; 4502 } 4503 } 4504 4505 static bool bpf_object__shndx_is_maps(const struct bpf_object *obj, 4506 int shndx) 4507 { 4508 return shndx == obj->efile.btf_maps_shndx; 4509 } 4510 4511 static enum libbpf_map_type 4512 bpf_object__section_to_libbpf_map_type(const struct bpf_object *obj, int shndx) 4513 { 4514 if (shndx == obj->efile.symbols_shndx) 4515 return LIBBPF_MAP_KCONFIG; 4516 4517 switch (obj->efile.secs[shndx].sec_type) { 4518 case SEC_BSS: 4519 return LIBBPF_MAP_BSS; 4520 case SEC_DATA: 4521 return LIBBPF_MAP_DATA; 4522 case SEC_RODATA: 4523 return LIBBPF_MAP_RODATA; 4524 default: 4525 return LIBBPF_MAP_UNSPEC; 4526 } 4527 } 4528 4529 static int bpf_prog_compute_hash(struct bpf_program *prog) 4530 { 4531 struct bpf_insn *purged; 4532 int i, err = 0; 4533 4534 purged = calloc(prog->insns_cnt, BPF_INSN_SZ); 4535 if (!purged) 4536 return -ENOMEM; 4537 4538 /* If relocations have been done, the map_fd needs to be 4539 * discarded for the digest calculation. 4540 */ 4541 for (i = 0; i < prog->insns_cnt; i++) { 4542 purged[i] = prog->insns[i]; 4543 if (purged[i].code == (BPF_LD | BPF_IMM | BPF_DW) && 4544 (purged[i].src_reg == BPF_PSEUDO_MAP_FD || 4545 purged[i].src_reg == BPF_PSEUDO_MAP_VALUE)) { 4546 purged[i].imm = 0; 4547 i++; 4548 if (i >= prog->insns_cnt || 4549 prog->insns[i].code != 0 || 4550 prog->insns[i].dst_reg != 0 || 4551 prog->insns[i].src_reg != 0 || 4552 prog->insns[i].off != 0) { 4553 err = -EINVAL; 4554 goto out; 4555 } 4556 purged[i] = prog->insns[i]; 4557 purged[i].imm = 0; 4558 } 4559 } 4560 libbpf_sha256(purged, prog->insns_cnt * sizeof(struct bpf_insn), 4561 prog->hash); 4562 out: 4563 free(purged); 4564 return err; 4565 } 4566 4567 static int bpf_program__record_reloc(struct bpf_program *prog, 4568 struct reloc_desc *reloc_desc, 4569 __u32 insn_idx, const char *sym_name, 4570 const Elf64_Sym *sym, const Elf64_Rel *rel) 4571 { 4572 struct bpf_insn *insn = &prog->insns[insn_idx]; 4573 size_t map_idx, nr_maps = prog->obj->nr_maps; 4574 struct bpf_object *obj = prog->obj; 4575 __u32 shdr_idx = sym->st_shndx; 4576 enum libbpf_map_type type; 4577 const char *sym_sec_name; 4578 struct bpf_map *map; 4579 4580 if (!is_call_insn(insn) && !is_ldimm64_insn(insn)) { 4581 pr_warn("prog '%s': invalid relo against '%s' for insns[%d].code 0x%x\n", 4582 prog->name, sym_name, insn_idx, insn->code); 4583 return -LIBBPF_ERRNO__RELOC; 4584 } 4585 4586 if (sym_is_extern(sym)) { 4587 int sym_idx = ELF64_R_SYM(rel->r_info); 4588 int i, n = obj->nr_extern; 4589 struct extern_desc *ext; 4590 4591 for (i = 0; i < n; i++) { 4592 ext = &obj->externs[i]; 4593 if (ext->sym_idx == sym_idx) 4594 break; 4595 } 4596 if (i >= n) { 4597 pr_warn("prog '%s': extern relo failed to find extern for '%s' (%d)\n", 4598 prog->name, sym_name, sym_idx); 4599 return -LIBBPF_ERRNO__RELOC; 4600 } 4601 pr_debug("prog '%s': found extern #%d '%s' (sym %d) for insn #%u\n", 4602 prog->name, i, ext->name, ext->sym_idx, insn_idx); 4603 if (insn->code == (BPF_JMP | BPF_CALL)) 4604 reloc_desc->type = RELO_EXTERN_CALL; 4605 else 4606 reloc_desc->type = RELO_EXTERN_LD64; 4607 reloc_desc->insn_idx = insn_idx; 4608 reloc_desc->ext_idx = i; 4609 return 0; 4610 } 4611 4612 /* sub-program call relocation */ 4613 if (is_call_insn(insn)) { 4614 if (insn->src_reg != BPF_PSEUDO_CALL) { 4615 pr_warn("prog '%s': incorrect bpf_call opcode\n", prog->name); 4616 return -LIBBPF_ERRNO__RELOC; 4617 } 4618 /* text_shndx can be 0, if no default "main" program exists */ 4619 if (!shdr_idx || shdr_idx != obj->efile.text_shndx) { 4620 sym_sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, shdr_idx)); 4621 pr_warn("prog '%s': bad call relo against '%s' in section '%s'\n", 4622 prog->name, sym_name, sym_sec_name); 4623 return -LIBBPF_ERRNO__RELOC; 4624 } 4625 if (sym->st_value % BPF_INSN_SZ) { 4626 pr_warn("prog '%s': bad call relo against '%s' at offset %zu\n", 4627 prog->name, sym_name, (size_t)sym->st_value); 4628 return -LIBBPF_ERRNO__RELOC; 4629 } 4630 reloc_desc->type = RELO_CALL; 4631 reloc_desc->insn_idx = insn_idx; 4632 reloc_desc->sym_off = sym->st_value; 4633 return 0; 4634 } 4635 4636 if (!shdr_idx || shdr_idx >= SHN_LORESERVE) { 4637 pr_warn("prog '%s': invalid relo against '%s' in special section 0x%x; forgot to initialize global var?..\n", 4638 prog->name, sym_name, shdr_idx); 4639 return -LIBBPF_ERRNO__RELOC; 4640 } 4641 4642 /* loading subprog addresses */ 4643 if (sym_is_subprog(sym, obj->efile.text_shndx)) { 4644 /* global_func: sym->st_value = offset in the section, insn->imm = 0. 4645 * local_func: sym->st_value = 0, insn->imm = offset in the section. 4646 */ 4647 if ((sym->st_value % BPF_INSN_SZ) || (insn->imm % BPF_INSN_SZ)) { 4648 pr_warn("prog '%s': bad subprog addr relo against '%s' at offset %zu+%d\n", 4649 prog->name, sym_name, (size_t)sym->st_value, insn->imm); 4650 return -LIBBPF_ERRNO__RELOC; 4651 } 4652 4653 reloc_desc->type = RELO_SUBPROG_ADDR; 4654 reloc_desc->insn_idx = insn_idx; 4655 reloc_desc->sym_off = sym->st_value; 4656 return 0; 4657 } 4658 4659 type = bpf_object__section_to_libbpf_map_type(obj, shdr_idx); 4660 sym_sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, shdr_idx)); 4661 4662 /* arena data relocation */ 4663 if (shdr_idx == obj->efile.arena_data_shndx) { 4664 if (obj->arena_map_idx < 0) { 4665 pr_warn("prog '%s': bad arena data relocation at insn %u, no arena maps defined\n", 4666 prog->name, insn_idx); 4667 return -LIBBPF_ERRNO__RELOC; 4668 } 4669 reloc_desc->type = RELO_DATA; 4670 reloc_desc->insn_idx = insn_idx; 4671 reloc_desc->map_idx = obj->arena_map_idx; 4672 reloc_desc->sym_off = sym->st_value + obj->arena_data_off; 4673 4674 map = &obj->maps[obj->arena_map_idx]; 4675 pr_debug("prog '%s': found arena map %d (%s, sec %d, off %zu) for insn %u\n", 4676 prog->name, obj->arena_map_idx, map->name, map->sec_idx, 4677 map->sec_offset, insn_idx); 4678 return 0; 4679 } 4680 4681 /* jump table data relocation */ 4682 if (shdr_idx == obj->efile.jumptables_data_shndx) { 4683 reloc_desc->type = RELO_INSN_ARRAY; 4684 reloc_desc->insn_idx = insn_idx; 4685 reloc_desc->map_idx = -1; 4686 reloc_desc->sym_off = sym->st_value; 4687 reloc_desc->sym_size = sym->st_size; 4688 return 0; 4689 } 4690 4691 /* generic map reference relocation */ 4692 if (type == LIBBPF_MAP_UNSPEC) { 4693 if (!bpf_object__shndx_is_maps(obj, shdr_idx)) { 4694 pr_warn("prog '%s': bad map relo against '%s' in section '%s'\n", 4695 prog->name, sym_name, sym_sec_name); 4696 return -LIBBPF_ERRNO__RELOC; 4697 } 4698 for (map_idx = 0; map_idx < nr_maps; map_idx++) { 4699 map = &obj->maps[map_idx]; 4700 if (map->libbpf_type != type || 4701 map->sec_idx != sym->st_shndx || 4702 map->sec_offset != sym->st_value) 4703 continue; 4704 pr_debug("prog '%s': found map %zd (%s, sec %d, off %zu) for insn #%u\n", 4705 prog->name, map_idx, map->name, map->sec_idx, 4706 map->sec_offset, insn_idx); 4707 break; 4708 } 4709 if (map_idx >= nr_maps) { 4710 pr_warn("prog '%s': map relo failed to find map for section '%s', off %zu\n", 4711 prog->name, sym_sec_name, (size_t)sym->st_value); 4712 return -LIBBPF_ERRNO__RELOC; 4713 } 4714 reloc_desc->type = RELO_LD64; 4715 reloc_desc->insn_idx = insn_idx; 4716 reloc_desc->map_idx = map_idx; 4717 reloc_desc->sym_off = 0; /* sym->st_value determines map_idx */ 4718 return 0; 4719 } 4720 4721 /* global data map relocation */ 4722 if (!bpf_object__shndx_is_data(obj, shdr_idx)) { 4723 pr_warn("prog '%s': bad data relo against section '%s'\n", 4724 prog->name, sym_sec_name); 4725 return -LIBBPF_ERRNO__RELOC; 4726 } 4727 for (map_idx = 0; map_idx < nr_maps; map_idx++) { 4728 map = &obj->maps[map_idx]; 4729 if (map->libbpf_type != type || map->sec_idx != sym->st_shndx) 4730 continue; 4731 pr_debug("prog '%s': found data map %zd (%s, sec %d, off %zu) for insn %u\n", 4732 prog->name, map_idx, map->name, map->sec_idx, 4733 map->sec_offset, insn_idx); 4734 break; 4735 } 4736 if (map_idx >= nr_maps) { 4737 pr_warn("prog '%s': data relo failed to find map for section '%s'\n", 4738 prog->name, sym_sec_name); 4739 return -LIBBPF_ERRNO__RELOC; 4740 } 4741 4742 reloc_desc->type = RELO_DATA; 4743 reloc_desc->insn_idx = insn_idx; 4744 reloc_desc->map_idx = map_idx; 4745 reloc_desc->sym_off = sym->st_value; 4746 return 0; 4747 } 4748 4749 static bool prog_contains_insn(const struct bpf_program *prog, size_t insn_idx) 4750 { 4751 return insn_idx >= prog->sec_insn_off && 4752 insn_idx < prog->sec_insn_off + prog->sec_insn_cnt; 4753 } 4754 4755 static struct bpf_program *find_prog_by_sec_insn(const struct bpf_object *obj, 4756 size_t sec_idx, size_t insn_idx) 4757 { 4758 int l = 0, r = obj->nr_programs - 1, m; 4759 struct bpf_program *prog; 4760 4761 if (!obj->nr_programs) 4762 return NULL; 4763 4764 while (l < r) { 4765 m = l + (r - l + 1) / 2; 4766 prog = &obj->programs[m]; 4767 4768 if (prog->sec_idx < sec_idx || 4769 (prog->sec_idx == sec_idx && prog->sec_insn_off <= insn_idx)) 4770 l = m; 4771 else 4772 r = m - 1; 4773 } 4774 /* matching program could be at index l, but it still might be the 4775 * wrong one, so we need to double check conditions for the last time 4776 */ 4777 prog = &obj->programs[l]; 4778 if (prog->sec_idx == sec_idx && prog_contains_insn(prog, insn_idx)) 4779 return prog; 4780 return NULL; 4781 } 4782 4783 static int 4784 bpf_object__collect_prog_relos(struct bpf_object *obj, Elf64_Shdr *shdr, Elf_Data *data) 4785 { 4786 const char *relo_sec_name, *sec_name; 4787 size_t sec_idx = shdr->sh_info, sym_idx; 4788 struct bpf_program *prog; 4789 struct reloc_desc *relos; 4790 int err, i, nrels; 4791 const char *sym_name; 4792 __u32 insn_idx; 4793 Elf_Scn *scn; 4794 Elf_Data *scn_data; 4795 Elf64_Sym *sym; 4796 Elf64_Rel *rel; 4797 4798 if (sec_idx >= obj->efile.sec_cnt) 4799 return -EINVAL; 4800 4801 scn = elf_sec_by_idx(obj, sec_idx); 4802 scn_data = elf_sec_data(obj, scn); 4803 if (!scn_data) 4804 return -LIBBPF_ERRNO__FORMAT; 4805 4806 relo_sec_name = elf_sec_str(obj, shdr->sh_name); 4807 sec_name = elf_sec_name(obj, scn); 4808 if (!relo_sec_name || !sec_name) 4809 return -EINVAL; 4810 4811 pr_debug("sec '%s': collecting relocation for section(%zu) '%s'\n", 4812 relo_sec_name, sec_idx, sec_name); 4813 nrels = shdr->sh_size / shdr->sh_entsize; 4814 4815 for (i = 0; i < nrels; i++) { 4816 rel = elf_rel_by_idx(data, i); 4817 if (!rel) { 4818 pr_warn("sec '%s': failed to get relo #%d\n", relo_sec_name, i); 4819 return -LIBBPF_ERRNO__FORMAT; 4820 } 4821 4822 sym_idx = ELF64_R_SYM(rel->r_info); 4823 sym = elf_sym_by_idx(obj, sym_idx); 4824 if (!sym) { 4825 pr_warn("sec '%s': symbol #%zu not found for relo #%d\n", 4826 relo_sec_name, sym_idx, i); 4827 return -LIBBPF_ERRNO__FORMAT; 4828 } 4829 4830 if (sym->st_shndx >= obj->efile.sec_cnt) { 4831 pr_warn("sec '%s': corrupted symbol #%zu pointing to invalid section #%zu for relo #%d\n", 4832 relo_sec_name, sym_idx, (size_t)sym->st_shndx, i); 4833 return -LIBBPF_ERRNO__FORMAT; 4834 } 4835 4836 if (rel->r_offset % BPF_INSN_SZ || rel->r_offset >= scn_data->d_size) { 4837 pr_warn("sec '%s': invalid offset 0x%zx for relo #%d\n", 4838 relo_sec_name, (size_t)rel->r_offset, i); 4839 return -LIBBPF_ERRNO__FORMAT; 4840 } 4841 4842 insn_idx = rel->r_offset / BPF_INSN_SZ; 4843 /* relocations against static functions are recorded as 4844 * relocations against the section that contains a function; 4845 * in such case, symbol will be STT_SECTION and sym.st_name 4846 * will point to empty string (0), so fetch section name 4847 * instead 4848 */ 4849 if (ELF64_ST_TYPE(sym->st_info) == STT_SECTION && sym->st_name == 0) 4850 sym_name = elf_sec_name(obj, elf_sec_by_idx(obj, sym->st_shndx)); 4851 else 4852 sym_name = elf_sym_str(obj, sym->st_name); 4853 sym_name = sym_name ?: "<?"; 4854 4855 pr_debug("sec '%s': relo #%d: insn #%u against '%s'\n", 4856 relo_sec_name, i, insn_idx, sym_name); 4857 4858 prog = find_prog_by_sec_insn(obj, sec_idx, insn_idx); 4859 if (!prog) { 4860 pr_debug("sec '%s': relo #%d: couldn't find program in section '%s' for insn #%u, probably overridden weak function, skipping...\n", 4861 relo_sec_name, i, sec_name, insn_idx); 4862 continue; 4863 } 4864 4865 relos = libbpf_reallocarray(prog->reloc_desc, 4866 prog->nr_reloc + 1, sizeof(*relos)); 4867 if (!relos) 4868 return -ENOMEM; 4869 prog->reloc_desc = relos; 4870 4871 /* adjust insn_idx to local BPF program frame of reference */ 4872 insn_idx -= prog->sec_insn_off; 4873 err = bpf_program__record_reloc(prog, &relos[prog->nr_reloc], 4874 insn_idx, sym_name, sym, rel); 4875 if (err) 4876 return err; 4877 4878 prog->nr_reloc++; 4879 } 4880 return 0; 4881 } 4882 4883 static int map_fill_btf_type_info(struct bpf_object *obj, struct bpf_map *map) 4884 { 4885 int id; 4886 4887 if (!obj->btf) 4888 return -ENOENT; 4889 4890 /* if it's BTF-defined map, we don't need to search for type IDs. 4891 * For struct_ops map, it does not need btf_key_type_id and 4892 * btf_value_type_id. 4893 */ 4894 if (map->sec_idx == obj->efile.btf_maps_shndx || bpf_map__is_struct_ops(map)) 4895 return 0; 4896 4897 /* 4898 * LLVM annotates global data differently in BTF, that is, 4899 * only as '.data', '.bss' or '.rodata'. 4900 */ 4901 if (!bpf_map__is_internal(map)) 4902 return -ENOENT; 4903 4904 id = btf__find_by_name(obj->btf, map->real_name); 4905 if (id < 0) 4906 return id; 4907 4908 map->btf_key_type_id = 0; 4909 map->btf_value_type_id = id; 4910 return 0; 4911 } 4912 4913 static int bpf_get_map_info_from_fdinfo(int fd, struct bpf_map_info *info) 4914 { 4915 char file[PATH_MAX], buff[4096]; 4916 FILE *fp; 4917 __u32 val; 4918 int err; 4919 4920 snprintf(file, sizeof(file), "/proc/%d/fdinfo/%d", getpid(), fd); 4921 memset(info, 0, sizeof(*info)); 4922 4923 fp = fopen(file, "re"); 4924 if (!fp) { 4925 err = -errno; 4926 pr_warn("failed to open %s: %s. No procfs support?\n", file, 4927 errstr(err)); 4928 return err; 4929 } 4930 4931 while (fgets(buff, sizeof(buff), fp)) { 4932 if (sscanf(buff, "map_type:\t%u", &val) == 1) 4933 info->type = val; 4934 else if (sscanf(buff, "key_size:\t%u", &val) == 1) 4935 info->key_size = val; 4936 else if (sscanf(buff, "value_size:\t%u", &val) == 1) 4937 info->value_size = val; 4938 else if (sscanf(buff, "max_entries:\t%u", &val) == 1) 4939 info->max_entries = val; 4940 else if (sscanf(buff, "map_flags:\t%i", &val) == 1) 4941 info->map_flags = val; 4942 } 4943 4944 fclose(fp); 4945 4946 return 0; 4947 } 4948 4949 static bool map_is_created(const struct bpf_map *map) 4950 { 4951 return map->obj->state >= OBJ_PREPARED || map->reused; 4952 } 4953 4954 bool bpf_map__autocreate(const struct bpf_map *map) 4955 { 4956 return map->autocreate; 4957 } 4958 4959 int bpf_map__set_autocreate(struct bpf_map *map, bool autocreate) 4960 { 4961 if (map_is_created(map)) 4962 return libbpf_err(-EBUSY); 4963 4964 map->autocreate = autocreate; 4965 return 0; 4966 } 4967 4968 int bpf_map__set_autoattach(struct bpf_map *map, bool autoattach) 4969 { 4970 if (!bpf_map__is_struct_ops(map)) 4971 return libbpf_err(-EINVAL); 4972 4973 map->autoattach = autoattach; 4974 return 0; 4975 } 4976 4977 bool bpf_map__autoattach(const struct bpf_map *map) 4978 { 4979 return map->autoattach; 4980 } 4981 4982 int bpf_map__reuse_fd(struct bpf_map *map, int fd) 4983 { 4984 struct bpf_map_info info; 4985 __u32 len = sizeof(info), name_len; 4986 int new_fd, err; 4987 char *new_name; 4988 4989 memset(&info, 0, len); 4990 err = bpf_map_get_info_by_fd(fd, &info, &len); 4991 if (err && errno == EINVAL) 4992 err = bpf_get_map_info_from_fdinfo(fd, &info); 4993 if (err) 4994 return libbpf_err(err); 4995 4996 name_len = strlen(info.name); 4997 if (name_len == BPF_OBJ_NAME_LEN - 1 && strncmp(map->name, info.name, name_len) == 0) 4998 new_name = strdup(map->name); 4999 else 5000 new_name = strdup(info.name); 5001 5002 if (!new_name) 5003 return libbpf_err(-errno); 5004 5005 /* 5006 * Like dup(), but make sure new FD is >= 3 and has O_CLOEXEC set. 5007 * This is similar to what we do in ensure_good_fd(), but without 5008 * closing original FD. 5009 */ 5010 new_fd = fcntl(fd, F_DUPFD_CLOEXEC, 3); 5011 if (new_fd < 0) { 5012 err = -errno; 5013 goto err_free_new_name; 5014 } 5015 5016 err = reuse_fd(map->fd, new_fd); 5017 if (err) 5018 goto err_free_new_name; 5019 5020 free(map->name); 5021 5022 map->name = new_name; 5023 map->def.type = info.type; 5024 map->def.key_size = info.key_size; 5025 map->def.value_size = info.value_size; 5026 map->def.max_entries = info.max_entries; 5027 map->def.map_flags = info.map_flags; 5028 map->btf_key_type_id = info.btf_key_type_id; 5029 map->btf_value_type_id = info.btf_value_type_id; 5030 map->reused = true; 5031 map->map_extra = info.map_extra; 5032 5033 return 0; 5034 5035 err_free_new_name: 5036 free(new_name); 5037 return libbpf_err(err); 5038 } 5039 5040 __u32 bpf_map__max_entries(const struct bpf_map *map) 5041 { 5042 return map->def.max_entries; 5043 } 5044 5045 struct bpf_map *bpf_map__inner_map(struct bpf_map *map) 5046 { 5047 if (!bpf_map_type__is_map_in_map(map->def.type)) 5048 return errno = EINVAL, NULL; 5049 5050 return map->inner_map; 5051 } 5052 5053 int bpf_map__set_max_entries(struct bpf_map *map, __u32 max_entries) 5054 { 5055 if (map_is_created(map)) 5056 return libbpf_err(-EBUSY); 5057 5058 map->def.max_entries = max_entries; 5059 5060 /* auto-adjust BPF ringbuf map max_entries to be a multiple of page size */ 5061 if (map_is_ringbuf(map)) 5062 map->def.max_entries = adjust_ringbuf_sz(map->def.max_entries); 5063 5064 return 0; 5065 } 5066 5067 static int bpf_object_prepare_token(struct bpf_object *obj) 5068 { 5069 const char *bpffs_path; 5070 int bpffs_fd = -1, token_fd, err; 5071 bool mandatory; 5072 enum libbpf_print_level level; 5073 5074 /* token is explicitly prevented */ 5075 if (obj->token_path && obj->token_path[0] == '\0') { 5076 pr_debug("object '%s': token is prevented, skipping...\n", obj->name); 5077 return 0; 5078 } 5079 5080 mandatory = obj->token_path != NULL; 5081 level = mandatory ? LIBBPF_WARN : LIBBPF_DEBUG; 5082 5083 bpffs_path = obj->token_path ?: BPF_FS_DEFAULT_PATH; 5084 bpffs_fd = open(bpffs_path, O_DIRECTORY, O_RDWR); 5085 if (bpffs_fd < 0) { 5086 err = -errno; 5087 __pr(level, "object '%s': failed (%s) to open BPF FS mount at '%s'%s\n", 5088 obj->name, errstr(err), bpffs_path, 5089 mandatory ? "" : ", skipping optional step..."); 5090 return mandatory ? err : 0; 5091 } 5092 5093 token_fd = bpf_token_create(bpffs_fd, 0); 5094 close(bpffs_fd); 5095 if (token_fd < 0) { 5096 if (!mandatory && token_fd == -ENOENT) { 5097 pr_debug("object '%s': BPF FS at '%s' doesn't have BPF token delegation set up, skipping...\n", 5098 obj->name, bpffs_path); 5099 return 0; 5100 } 5101 __pr(level, "object '%s': failed (%d) to create BPF token from '%s'%s\n", 5102 obj->name, token_fd, bpffs_path, 5103 mandatory ? "" : ", skipping optional step..."); 5104 return mandatory ? token_fd : 0; 5105 } 5106 5107 obj->feat_cache = calloc(1, sizeof(*obj->feat_cache)); 5108 if (!obj->feat_cache) { 5109 close(token_fd); 5110 return -ENOMEM; 5111 } 5112 5113 obj->token_fd = token_fd; 5114 obj->feat_cache->token_fd = token_fd; 5115 5116 return 0; 5117 } 5118 5119 static int 5120 bpf_object__probe_loading(struct bpf_object *obj) 5121 { 5122 struct bpf_insn insns[] = { 5123 BPF_MOV64_IMM(BPF_REG_0, 0), 5124 BPF_EXIT_INSN(), 5125 }; 5126 int ret, insn_cnt = ARRAY_SIZE(insns); 5127 LIBBPF_OPTS(bpf_prog_load_opts, opts, 5128 .token_fd = obj->token_fd, 5129 .prog_flags = obj->token_fd ? BPF_F_TOKEN_FD : 0, 5130 ); 5131 5132 if (obj->gen_loader) 5133 return 0; 5134 5135 ret = bump_rlimit_memlock(); 5136 if (ret) 5137 pr_warn("Failed to bump RLIMIT_MEMLOCK (err = %s), you might need to do it explicitly!\n", 5138 errstr(ret)); 5139 5140 /* make sure basic loading works */ 5141 ret = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, NULL, "GPL", insns, insn_cnt, &opts); 5142 if (ret < 0) 5143 ret = bpf_prog_load(BPF_PROG_TYPE_TRACEPOINT, NULL, "GPL", insns, insn_cnt, &opts); 5144 if (ret < 0) { 5145 ret = errno; 5146 pr_warn("Error in %s(): %s. Couldn't load trivial BPF program. Make sure your kernel supports BPF (CONFIG_BPF_SYSCALL=y) and/or that RLIMIT_MEMLOCK is set to big enough value.\n", 5147 __func__, errstr(ret)); 5148 return -ret; 5149 } 5150 close(ret); 5151 5152 return 0; 5153 } 5154 5155 bool kernel_supports(const struct bpf_object *obj, enum kern_feature_id feat_id) 5156 { 5157 if (obj->gen_loader) 5158 /* To generate loader program assume the latest kernel 5159 * to avoid doing extra prog_load, map_create syscalls. 5160 */ 5161 return true; 5162 5163 if (obj->token_fd) 5164 return feat_supported(obj->feat_cache, feat_id); 5165 5166 return feat_supported(NULL, feat_id); 5167 } 5168 5169 static bool map_is_reuse_compat(const struct bpf_map *map, int map_fd) 5170 { 5171 struct bpf_map_info map_info; 5172 __u32 map_info_len = sizeof(map_info); 5173 int err; 5174 5175 memset(&map_info, 0, map_info_len); 5176 err = bpf_map_get_info_by_fd(map_fd, &map_info, &map_info_len); 5177 if (err && errno == EINVAL) 5178 err = bpf_get_map_info_from_fdinfo(map_fd, &map_info); 5179 if (err) { 5180 pr_warn("failed to get map info for map FD %d: %s\n", map_fd, 5181 errstr(err)); 5182 return false; 5183 } 5184 5185 /* 5186 * bpf_get_map_info_by_fd() for DEVMAP will always return flags with 5187 * BPF_F_RDONLY_PROG set, but it generally is not set at map creation time. 5188 * Thus, ignore the BPF_F_RDONLY_PROG flag in the flags returned from 5189 * bpf_get_map_info_by_fd() when checking for compatibility with an 5190 * existing DEVMAP. 5191 */ 5192 if (map->def.type == BPF_MAP_TYPE_DEVMAP || map->def.type == BPF_MAP_TYPE_DEVMAP_HASH) 5193 map_info.map_flags &= ~BPF_F_RDONLY_PROG; 5194 5195 return (map_info.type == map->def.type && 5196 map_info.key_size == map->def.key_size && 5197 map_info.value_size == map->def.value_size && 5198 map_info.max_entries == map->def.max_entries && 5199 map_info.map_flags == map->def.map_flags && 5200 map_info.map_extra == map->map_extra); 5201 } 5202 5203 static int 5204 bpf_object__reuse_map(struct bpf_map *map) 5205 { 5206 int err, pin_fd; 5207 5208 pin_fd = bpf_obj_get(map->pin_path); 5209 if (pin_fd < 0) { 5210 err = -errno; 5211 if (err == -ENOENT) { 5212 pr_debug("found no pinned map to reuse at '%s'\n", 5213 map->pin_path); 5214 return 0; 5215 } 5216 5217 pr_warn("couldn't retrieve pinned map '%s': %s\n", 5218 map->pin_path, errstr(err)); 5219 return err; 5220 } 5221 5222 if (!map_is_reuse_compat(map, pin_fd)) { 5223 pr_warn("couldn't reuse pinned map at '%s': parameter mismatch\n", 5224 map->pin_path); 5225 close(pin_fd); 5226 return -EINVAL; 5227 } 5228 5229 err = bpf_map__reuse_fd(map, pin_fd); 5230 close(pin_fd); 5231 if (err) 5232 return err; 5233 5234 map->pinned = true; 5235 pr_debug("reused pinned map at '%s'\n", map->pin_path); 5236 5237 return 0; 5238 } 5239 5240 static int 5241 bpf_object__populate_internal_map(struct bpf_object *obj, struct bpf_map *map) 5242 { 5243 enum libbpf_map_type map_type = map->libbpf_type; 5244 int err, zero = 0; 5245 size_t mmap_sz; 5246 5247 if (obj->gen_loader) { 5248 bpf_gen__map_update_elem(obj->gen_loader, map - obj->maps, 5249 map->mmaped, map->def.value_size); 5250 if (map_type == LIBBPF_MAP_RODATA || map_type == LIBBPF_MAP_KCONFIG) 5251 bpf_gen__map_freeze(obj->gen_loader, map - obj->maps); 5252 return 0; 5253 } 5254 5255 err = bpf_map_update_elem(map->fd, &zero, map->mmaped, 0); 5256 if (err) { 5257 err = -errno; 5258 pr_warn("map '%s': failed to set initial contents: %s\n", 5259 bpf_map__name(map), errstr(err)); 5260 return err; 5261 } 5262 5263 /* Freeze .rodata and .kconfig map as read-only from syscall side. */ 5264 if (map_type == LIBBPF_MAP_RODATA || map_type == LIBBPF_MAP_KCONFIG) { 5265 err = bpf_map_freeze(map->fd); 5266 if (err) { 5267 err = -errno; 5268 pr_warn("map '%s': failed to freeze as read-only: %s\n", 5269 bpf_map__name(map), errstr(err)); 5270 return err; 5271 } 5272 } 5273 5274 /* Remap anonymous mmap()-ed "map initialization image" as 5275 * a BPF map-backed mmap()-ed memory, but preserving the same 5276 * memory address. This will cause kernel to change process' 5277 * page table to point to a different piece of kernel memory, 5278 * but from userspace point of view memory address (and its 5279 * contents, being identical at this point) will stay the 5280 * same. This mapping will be released by bpf_object__close() 5281 * as per normal clean up procedure. 5282 */ 5283 mmap_sz = bpf_map_mmap_sz(map); 5284 if (map->def.map_flags & BPF_F_MMAPABLE) { 5285 void *mmaped; 5286 int prot; 5287 5288 if (map->def.map_flags & BPF_F_RDONLY_PROG) 5289 prot = PROT_READ; 5290 else 5291 prot = PROT_READ | PROT_WRITE; 5292 mmaped = mmap(map->mmaped, mmap_sz, prot, MAP_SHARED | MAP_FIXED, map->fd, 0); 5293 if (mmaped == MAP_FAILED) { 5294 err = -errno; 5295 pr_warn("map '%s': failed to re-mmap() contents: %s\n", 5296 bpf_map__name(map), errstr(err)); 5297 return err; 5298 } 5299 map->mmaped = mmaped; 5300 } else if (map->mmaped) { 5301 munmap(map->mmaped, mmap_sz); 5302 map->mmaped = NULL; 5303 } 5304 5305 return 0; 5306 } 5307 5308 static void bpf_map__destroy(struct bpf_map *map); 5309 5310 static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, bool is_inner) 5311 { 5312 LIBBPF_OPTS(bpf_map_create_opts, create_attr); 5313 struct bpf_map_def *def = &map->def; 5314 const char *map_name = NULL; 5315 int err = 0, map_fd; 5316 5317 if (kernel_supports(obj, FEAT_PROG_NAME)) 5318 map_name = map->name; 5319 create_attr.map_ifindex = map->map_ifindex; 5320 create_attr.map_flags = def->map_flags; 5321 create_attr.numa_node = map->numa_node; 5322 create_attr.map_extra = map->map_extra; 5323 create_attr.token_fd = obj->token_fd; 5324 if (obj->token_fd) 5325 create_attr.map_flags |= BPF_F_TOKEN_FD; 5326 if (map->excl_prog) { 5327 err = bpf_prog_compute_hash(map->excl_prog); 5328 if (err) 5329 return err; 5330 5331 create_attr.excl_prog_hash = map->excl_prog->hash; 5332 create_attr.excl_prog_hash_size = SHA256_DIGEST_LENGTH; 5333 } 5334 5335 if (bpf_map__is_struct_ops(map)) { 5336 create_attr.btf_vmlinux_value_type_id = map->btf_vmlinux_value_type_id; 5337 if (map->mod_btf_fd >= 0) { 5338 create_attr.value_type_btf_obj_fd = map->mod_btf_fd; 5339 create_attr.map_flags |= BPF_F_VTYPE_BTF_OBJ_FD; 5340 } 5341 } 5342 5343 if (obj->btf && btf__fd(obj->btf) >= 0) { 5344 create_attr.btf_fd = btf__fd(obj->btf); 5345 create_attr.btf_key_type_id = map->btf_key_type_id; 5346 create_attr.btf_value_type_id = map->btf_value_type_id; 5347 } 5348 5349 if (bpf_map_type__is_map_in_map(def->type)) { 5350 if (map->inner_map) { 5351 err = map_set_def_max_entries(map->inner_map); 5352 if (err) 5353 return err; 5354 err = bpf_object__create_map(obj, map->inner_map, true); 5355 if (err) { 5356 pr_warn("map '%s': failed to create inner map: %s\n", 5357 map->name, errstr(err)); 5358 return err; 5359 } 5360 map->inner_map_fd = map->inner_map->fd; 5361 } 5362 if (map->inner_map_fd >= 0) 5363 create_attr.inner_map_fd = map->inner_map_fd; 5364 } 5365 5366 switch (def->type) { 5367 case BPF_MAP_TYPE_PERF_EVENT_ARRAY: 5368 case BPF_MAP_TYPE_CGROUP_ARRAY: 5369 case BPF_MAP_TYPE_STACK_TRACE: 5370 case BPF_MAP_TYPE_ARRAY_OF_MAPS: 5371 case BPF_MAP_TYPE_HASH_OF_MAPS: 5372 case BPF_MAP_TYPE_DEVMAP: 5373 case BPF_MAP_TYPE_DEVMAP_HASH: 5374 case BPF_MAP_TYPE_CPUMAP: 5375 case BPF_MAP_TYPE_XSKMAP: 5376 case BPF_MAP_TYPE_SOCKMAP: 5377 case BPF_MAP_TYPE_SOCKHASH: 5378 case BPF_MAP_TYPE_QUEUE: 5379 case BPF_MAP_TYPE_STACK: 5380 case BPF_MAP_TYPE_ARENA: 5381 create_attr.btf_fd = 0; 5382 create_attr.btf_key_type_id = 0; 5383 create_attr.btf_value_type_id = 0; 5384 map->btf_key_type_id = 0; 5385 map->btf_value_type_id = 0; 5386 break; 5387 case BPF_MAP_TYPE_STRUCT_OPS: 5388 create_attr.btf_value_type_id = 0; 5389 break; 5390 default: 5391 break; 5392 } 5393 5394 if (obj->gen_loader) { 5395 bpf_gen__map_create(obj->gen_loader, def->type, map_name, 5396 def->key_size, def->value_size, def->max_entries, 5397 &create_attr, is_inner ? -1 : map - obj->maps); 5398 /* We keep pretenting we have valid FD to pass various fd >= 0 5399 * checks by just keeping original placeholder FDs in place. 5400 * See bpf_object__add_map() comment. 5401 * This placeholder fd will not be used with any syscall and 5402 * will be reset to -1 eventually. 5403 */ 5404 map_fd = map->fd; 5405 } else { 5406 map_fd = bpf_map_create(def->type, map_name, 5407 def->key_size, def->value_size, 5408 def->max_entries, &create_attr); 5409 } 5410 if (map_fd < 0 && (create_attr.btf_key_type_id || create_attr.btf_value_type_id)) { 5411 err = -errno; 5412 pr_warn("Error in bpf_create_map_xattr(%s): %s. Retrying without BTF.\n", 5413 map->name, errstr(err)); 5414 create_attr.btf_fd = 0; 5415 create_attr.btf_key_type_id = 0; 5416 create_attr.btf_value_type_id = 0; 5417 map->btf_key_type_id = 0; 5418 map->btf_value_type_id = 0; 5419 map_fd = bpf_map_create(def->type, map_name, 5420 def->key_size, def->value_size, 5421 def->max_entries, &create_attr); 5422 } 5423 5424 if (bpf_map_type__is_map_in_map(def->type) && map->inner_map) { 5425 if (obj->gen_loader) 5426 map->inner_map->fd = -1; 5427 bpf_map__destroy(map->inner_map); 5428 zfree(&map->inner_map); 5429 } 5430 5431 if (map_fd < 0) 5432 return map_fd; 5433 5434 /* obj->gen_loader case, prevent reuse_fd() from closing map_fd */ 5435 if (map->fd == map_fd) 5436 return 0; 5437 5438 /* Keep placeholder FD value but now point it to the BPF map object. 5439 * This way everything that relied on this map's FD (e.g., relocated 5440 * ldimm64 instructions) will stay valid and won't need adjustments. 5441 * map->fd stays valid but now point to what map_fd points to. 5442 */ 5443 return reuse_fd(map->fd, map_fd); 5444 } 5445 5446 static int init_map_in_map_slots(struct bpf_object *obj, struct bpf_map *map) 5447 { 5448 const struct bpf_map *targ_map; 5449 unsigned int i; 5450 int fd, err = 0; 5451 5452 for (i = 0; i < map->init_slots_sz; i++) { 5453 if (!map->init_slots[i]) 5454 continue; 5455 5456 targ_map = map->init_slots[i]; 5457 fd = targ_map->fd; 5458 5459 if (obj->gen_loader) { 5460 bpf_gen__populate_outer_map(obj->gen_loader, 5461 map - obj->maps, i, 5462 targ_map - obj->maps); 5463 } else { 5464 err = bpf_map_update_elem(map->fd, &i, &fd, 0); 5465 } 5466 if (err) { 5467 err = -errno; 5468 pr_warn("map '%s': failed to initialize slot [%d] to map '%s' fd=%d: %s\n", 5469 map->name, i, targ_map->name, fd, errstr(err)); 5470 return err; 5471 } 5472 pr_debug("map '%s': slot [%d] set to map '%s' fd=%d\n", 5473 map->name, i, targ_map->name, fd); 5474 } 5475 5476 zfree(&map->init_slots); 5477 map->init_slots_sz = 0; 5478 5479 return 0; 5480 } 5481 5482 static int init_prog_array_slots(struct bpf_object *obj, struct bpf_map *map) 5483 { 5484 const struct bpf_program *targ_prog; 5485 unsigned int i; 5486 int fd, err; 5487 5488 if (obj->gen_loader) 5489 return -ENOTSUP; 5490 5491 for (i = 0; i < map->init_slots_sz; i++) { 5492 if (!map->init_slots[i]) 5493 continue; 5494 5495 targ_prog = map->init_slots[i]; 5496 fd = bpf_program__fd(targ_prog); 5497 5498 err = bpf_map_update_elem(map->fd, &i, &fd, 0); 5499 if (err) { 5500 err = -errno; 5501 pr_warn("map '%s': failed to initialize slot [%d] to prog '%s' fd=%d: %s\n", 5502 map->name, i, targ_prog->name, fd, errstr(err)); 5503 return err; 5504 } 5505 pr_debug("map '%s': slot [%d] set to prog '%s' fd=%d\n", 5506 map->name, i, targ_prog->name, fd); 5507 } 5508 5509 zfree(&map->init_slots); 5510 map->init_slots_sz = 0; 5511 5512 return 0; 5513 } 5514 5515 static int bpf_object_init_prog_arrays(struct bpf_object *obj) 5516 { 5517 struct bpf_map *map; 5518 int i, err; 5519 5520 for (i = 0; i < obj->nr_maps; i++) { 5521 map = &obj->maps[i]; 5522 5523 if (!map->init_slots_sz || map->def.type != BPF_MAP_TYPE_PROG_ARRAY) 5524 continue; 5525 5526 err = init_prog_array_slots(obj, map); 5527 if (err < 0) 5528 return err; 5529 } 5530 return 0; 5531 } 5532 5533 static int map_set_def_max_entries(struct bpf_map *map) 5534 { 5535 if (map->def.type == BPF_MAP_TYPE_PERF_EVENT_ARRAY && !map->def.max_entries) { 5536 int nr_cpus; 5537 5538 nr_cpus = libbpf_num_possible_cpus(); 5539 if (nr_cpus < 0) { 5540 pr_warn("map '%s': failed to determine number of system CPUs: %d\n", 5541 map->name, nr_cpus); 5542 return nr_cpus; 5543 } 5544 pr_debug("map '%s': setting size to %d\n", map->name, nr_cpus); 5545 map->def.max_entries = nr_cpus; 5546 } 5547 5548 return 0; 5549 } 5550 5551 static int 5552 bpf_object__create_maps(struct bpf_object *obj) 5553 { 5554 struct bpf_map *map; 5555 unsigned int i, j; 5556 int err; 5557 bool retried; 5558 5559 for (i = 0; i < obj->nr_maps; i++) { 5560 map = &obj->maps[i]; 5561 5562 /* To support old kernels, we skip creating global data maps 5563 * (.rodata, .data, .kconfig, etc); later on, during program 5564 * loading, if we detect that at least one of the to-be-loaded 5565 * programs is referencing any global data map, we'll error 5566 * out with program name and relocation index logged. 5567 * This approach allows to accommodate Clang emitting 5568 * unnecessary .rodata.str1.1 sections for string literals, 5569 * but also it allows to have CO-RE applications that use 5570 * global variables in some of BPF programs, but not others. 5571 * If those global variable-using programs are not loaded at 5572 * runtime due to bpf_program__set_autoload(prog, false), 5573 * bpf_object loading will succeed just fine even on old 5574 * kernels. 5575 */ 5576 if (bpf_map__is_internal(map) && !kernel_supports(obj, FEAT_GLOBAL_DATA)) 5577 map->autocreate = false; 5578 5579 if (!map->autocreate) { 5580 pr_debug("map '%s': skipped auto-creating...\n", map->name); 5581 continue; 5582 } 5583 5584 err = map_set_def_max_entries(map); 5585 if (err) 5586 goto err_out; 5587 5588 retried = false; 5589 retry: 5590 if (map->pin_path) { 5591 err = bpf_object__reuse_map(map); 5592 if (err) { 5593 pr_warn("map '%s': error reusing pinned map\n", 5594 map->name); 5595 goto err_out; 5596 } 5597 if (retried && map->fd < 0) { 5598 pr_warn("map '%s': cannot find pinned map\n", 5599 map->name); 5600 err = -ENOENT; 5601 goto err_out; 5602 } 5603 } 5604 5605 if (map->reused) { 5606 pr_debug("map '%s': skipping creation (preset fd=%d)\n", 5607 map->name, map->fd); 5608 } else { 5609 err = bpf_object__create_map(obj, map, false); 5610 if (err) 5611 goto err_out; 5612 5613 pr_debug("map '%s': created successfully, fd=%d\n", 5614 map->name, map->fd); 5615 5616 if (bpf_map__is_internal(map)) { 5617 err = bpf_object__populate_internal_map(obj, map); 5618 if (err < 0) 5619 goto err_out; 5620 } else if (map->def.type == BPF_MAP_TYPE_ARENA) { 5621 map->mmaped = mmap((void *)(long)map->map_extra, 5622 bpf_map_mmap_sz(map), PROT_READ | PROT_WRITE, 5623 map->map_extra ? MAP_SHARED | MAP_FIXED : MAP_SHARED, 5624 map->fd, 0); 5625 if (map->mmaped == MAP_FAILED) { 5626 err = -errno; 5627 map->mmaped = NULL; 5628 pr_warn("map '%s': failed to mmap arena: %s\n", 5629 map->name, errstr(err)); 5630 return err; 5631 } 5632 if (obj->arena_data) { 5633 memcpy(map->mmaped + obj->arena_data_off, obj->arena_data, 5634 obj->arena_data_sz); 5635 zfree(&obj->arena_data); 5636 } 5637 } 5638 if (map->init_slots_sz && map->def.type != BPF_MAP_TYPE_PROG_ARRAY) { 5639 err = init_map_in_map_slots(obj, map); 5640 if (err < 0) 5641 goto err_out; 5642 } 5643 } 5644 5645 if (map->pin_path && !map->pinned) { 5646 err = bpf_map__pin(map, NULL); 5647 if (err) { 5648 if (!retried && err == -EEXIST) { 5649 retried = true; 5650 goto retry; 5651 } 5652 pr_warn("map '%s': failed to auto-pin at '%s': %s\n", 5653 map->name, map->pin_path, errstr(err)); 5654 goto err_out; 5655 } 5656 } 5657 } 5658 5659 return 0; 5660 5661 err_out: 5662 pr_warn("map '%s': failed to create: %s\n", map->name, errstr(err)); 5663 pr_perm_msg(err); 5664 for (j = 0; j < i; j++) 5665 zclose(obj->maps[j].fd); 5666 return err; 5667 } 5668 5669 static bool bpf_core_is_flavor_sep(const char *s) 5670 { 5671 /* check X___Y name pattern, where X and Y are not underscores */ 5672 return s[0] != '_' && /* X */ 5673 s[1] == '_' && s[2] == '_' && s[3] == '_' && /* ___ */ 5674 s[4] != '_'; /* Y */ 5675 } 5676 5677 /* Given 'some_struct_name___with_flavor' return the length of a name prefix 5678 * before last triple underscore. Struct name part after last triple 5679 * underscore is ignored by BPF CO-RE relocation during relocation matching. 5680 */ 5681 size_t bpf_core_essential_name_len(const char *name) 5682 { 5683 size_t n = strlen(name); 5684 int i; 5685 5686 for (i = n - 5; i >= 0; i--) { 5687 if (bpf_core_is_flavor_sep(name + i)) 5688 return i + 1; 5689 } 5690 return n; 5691 } 5692 5693 void bpf_core_free_cands(struct bpf_core_cand_list *cands) 5694 { 5695 if (!cands) 5696 return; 5697 5698 free(cands->cands); 5699 free(cands); 5700 } 5701 5702 int bpf_core_add_cands(struct bpf_core_cand *local_cand, 5703 size_t local_essent_len, 5704 const struct btf *targ_btf, 5705 const char *targ_btf_name, 5706 int targ_start_id, 5707 struct bpf_core_cand_list *cands) 5708 { 5709 struct bpf_core_cand *new_cands, *cand; 5710 const struct btf_type *t, *local_t; 5711 const char *targ_name, *local_name; 5712 size_t targ_essent_len; 5713 int n, i; 5714 5715 local_t = btf__type_by_id(local_cand->btf, local_cand->id); 5716 local_name = btf__str_by_offset(local_cand->btf, local_t->name_off); 5717 5718 n = btf__type_cnt(targ_btf); 5719 for (i = targ_start_id; i < n; i++) { 5720 t = btf__type_by_id(targ_btf, i); 5721 if (!btf_kind_core_compat(t, local_t)) 5722 continue; 5723 5724 targ_name = btf__name_by_offset(targ_btf, t->name_off); 5725 if (str_is_empty(targ_name)) 5726 continue; 5727 5728 targ_essent_len = bpf_core_essential_name_len(targ_name); 5729 if (targ_essent_len != local_essent_len) 5730 continue; 5731 5732 if (strncmp(local_name, targ_name, local_essent_len) != 0) 5733 continue; 5734 5735 pr_debug("CO-RE relocating [%d] %s %s: found target candidate [%d] %s %s in [%s]\n", 5736 local_cand->id, btf_kind_str(local_t), 5737 local_name, i, btf_kind_str(t), targ_name, 5738 targ_btf_name); 5739 new_cands = libbpf_reallocarray(cands->cands, cands->len + 1, 5740 sizeof(*cands->cands)); 5741 if (!new_cands) 5742 return -ENOMEM; 5743 5744 cand = &new_cands[cands->len]; 5745 cand->btf = targ_btf; 5746 cand->id = i; 5747 5748 cands->cands = new_cands; 5749 cands->len++; 5750 } 5751 return 0; 5752 } 5753 5754 static int load_module_btfs(struct bpf_object *obj) 5755 { 5756 struct bpf_btf_info info; 5757 struct module_btf *mod_btf; 5758 struct btf *btf; 5759 char name[64]; 5760 __u32 id = 0, len; 5761 int err, fd; 5762 5763 if (obj->btf_modules_loaded) 5764 return 0; 5765 5766 if (obj->gen_loader) 5767 return 0; 5768 5769 /* don't do this again, even if we find no module BTFs */ 5770 obj->btf_modules_loaded = true; 5771 5772 /* kernel too old to support module BTFs */ 5773 if (!kernel_supports(obj, FEAT_MODULE_BTF)) 5774 return 0; 5775 5776 while (true) { 5777 err = bpf_btf_get_next_id(id, &id); 5778 if (err && errno == ENOENT) 5779 return 0; 5780 if (err && errno == EPERM) { 5781 pr_debug("skipping module BTFs loading, missing privileges\n"); 5782 return 0; 5783 } 5784 if (err) { 5785 err = -errno; 5786 pr_warn("failed to iterate BTF objects: %s\n", errstr(err)); 5787 return err; 5788 } 5789 5790 fd = bpf_btf_get_fd_by_id(id); 5791 if (fd < 0) { 5792 if (errno == ENOENT) 5793 continue; /* expected race: BTF was unloaded */ 5794 err = -errno; 5795 pr_warn("failed to get BTF object #%d FD: %s\n", id, errstr(err)); 5796 return err; 5797 } 5798 5799 len = sizeof(info); 5800 memset(&info, 0, sizeof(info)); 5801 info.name = ptr_to_u64(name); 5802 info.name_len = sizeof(name); 5803 5804 err = bpf_btf_get_info_by_fd(fd, &info, &len); 5805 if (err) { 5806 err = -errno; 5807 pr_warn("failed to get BTF object #%d info: %s\n", id, errstr(err)); 5808 goto err_out; 5809 } 5810 5811 /* ignore non-module BTFs */ 5812 if (!info.kernel_btf || strcmp(name, "vmlinux") == 0) { 5813 close(fd); 5814 continue; 5815 } 5816 5817 btf = btf_get_from_fd(fd, obj->btf_vmlinux); 5818 err = libbpf_get_error(btf); 5819 if (err) { 5820 pr_warn("failed to load module [%s]'s BTF object #%d: %s\n", 5821 name, id, errstr(err)); 5822 goto err_out; 5823 } 5824 5825 err = libbpf_ensure_mem((void **)&obj->btf_modules, &obj->btf_module_cap, 5826 sizeof(*obj->btf_modules), obj->btf_module_cnt + 1); 5827 if (err) 5828 goto err_out; 5829 5830 mod_btf = &obj->btf_modules[obj->btf_module_cnt++]; 5831 5832 mod_btf->btf = btf; 5833 mod_btf->id = id; 5834 mod_btf->fd = fd; 5835 mod_btf->name = strdup(name); 5836 if (!mod_btf->name) { 5837 err = -ENOMEM; 5838 goto err_out; 5839 } 5840 continue; 5841 5842 err_out: 5843 close(fd); 5844 return err; 5845 } 5846 5847 return 0; 5848 } 5849 5850 static struct bpf_core_cand_list * 5851 bpf_core_find_cands(struct bpf_object *obj, const struct btf *local_btf, __u32 local_type_id) 5852 { 5853 struct bpf_core_cand local_cand = {}; 5854 struct bpf_core_cand_list *cands; 5855 const struct btf *main_btf; 5856 const struct btf_type *local_t; 5857 const char *local_name; 5858 size_t local_essent_len; 5859 int err, i; 5860 5861 local_cand.btf = local_btf; 5862 local_cand.id = local_type_id; 5863 local_t = btf__type_by_id(local_btf, local_type_id); 5864 if (!local_t) 5865 return ERR_PTR(-EINVAL); 5866 5867 local_name = btf__name_by_offset(local_btf, local_t->name_off); 5868 if (str_is_empty(local_name)) 5869 return ERR_PTR(-EINVAL); 5870 local_essent_len = bpf_core_essential_name_len(local_name); 5871 5872 cands = calloc(1, sizeof(*cands)); 5873 if (!cands) 5874 return ERR_PTR(-ENOMEM); 5875 5876 /* Attempt to find target candidates in vmlinux BTF first */ 5877 main_btf = obj->btf_vmlinux_override ?: obj->btf_vmlinux; 5878 err = bpf_core_add_cands(&local_cand, local_essent_len, main_btf, "vmlinux", 1, cands); 5879 if (err) 5880 goto err_out; 5881 5882 /* if vmlinux BTF has any candidate, don't got for module BTFs */ 5883 if (cands->len) 5884 return cands; 5885 5886 /* if vmlinux BTF was overridden, don't attempt to load module BTFs */ 5887 if (obj->btf_vmlinux_override) 5888 return cands; 5889 5890 /* now look through module BTFs, trying to still find candidates */ 5891 err = load_module_btfs(obj); 5892 if (err) 5893 goto err_out; 5894 5895 for (i = 0; i < obj->btf_module_cnt; i++) { 5896 err = bpf_core_add_cands(&local_cand, local_essent_len, 5897 obj->btf_modules[i].btf, 5898 obj->btf_modules[i].name, 5899 btf__type_cnt(obj->btf_vmlinux), 5900 cands); 5901 if (err) 5902 goto err_out; 5903 } 5904 5905 return cands; 5906 err_out: 5907 bpf_core_free_cands(cands); 5908 return ERR_PTR(err); 5909 } 5910 5911 /* Check local and target types for compatibility. This check is used for 5912 * type-based CO-RE relocations and follow slightly different rules than 5913 * field-based relocations. This function assumes that root types were already 5914 * checked for name match. Beyond that initial root-level name check, names 5915 * are completely ignored. Compatibility rules are as follows: 5916 * - any two STRUCTs/UNIONs/FWDs/ENUMs/INTs are considered compatible, but 5917 * kind should match for local and target types (i.e., STRUCT is not 5918 * compatible with UNION); 5919 * - for ENUMs, the size is ignored; 5920 * - for INT, size and signedness are ignored; 5921 * - for ARRAY, dimensionality is ignored, element types are checked for 5922 * compatibility recursively; 5923 * - CONST/VOLATILE/RESTRICT modifiers are ignored; 5924 * - TYPEDEFs/PTRs are compatible if types they pointing to are compatible; 5925 * - FUNC_PROTOs are compatible if they have compatible signature: same 5926 * number of input args and compatible return and argument types. 5927 * These rules are not set in stone and probably will be adjusted as we get 5928 * more experience with using BPF CO-RE relocations. 5929 */ 5930 int bpf_core_types_are_compat(const struct btf *local_btf, __u32 local_id, 5931 const struct btf *targ_btf, __u32 targ_id) 5932 { 5933 return __bpf_core_types_are_compat(local_btf, local_id, targ_btf, targ_id, 32); 5934 } 5935 5936 int bpf_core_types_match(const struct btf *local_btf, __u32 local_id, 5937 const struct btf *targ_btf, __u32 targ_id) 5938 { 5939 return __bpf_core_types_match(local_btf, local_id, targ_btf, targ_id, false, 32); 5940 } 5941 5942 static size_t bpf_core_hash_fn(const long key, void *ctx) 5943 { 5944 return key; 5945 } 5946 5947 static bool bpf_core_equal_fn(const long k1, const long k2, void *ctx) 5948 { 5949 return k1 == k2; 5950 } 5951 5952 static int record_relo_core(struct bpf_program *prog, 5953 const struct bpf_core_relo *core_relo, int insn_idx) 5954 { 5955 struct reloc_desc *relos, *relo; 5956 5957 relos = libbpf_reallocarray(prog->reloc_desc, 5958 prog->nr_reloc + 1, sizeof(*relos)); 5959 if (!relos) 5960 return -ENOMEM; 5961 relo = &relos[prog->nr_reloc]; 5962 relo->type = RELO_CORE; 5963 relo->insn_idx = insn_idx; 5964 relo->core_relo = core_relo; 5965 prog->reloc_desc = relos; 5966 prog->nr_reloc++; 5967 return 0; 5968 } 5969 5970 static const struct bpf_core_relo *find_relo_core(struct bpf_program *prog, int insn_idx) 5971 { 5972 struct reloc_desc *relo; 5973 int i; 5974 5975 for (i = 0; i < prog->nr_reloc; i++) { 5976 relo = &prog->reloc_desc[i]; 5977 if (relo->type != RELO_CORE || relo->insn_idx != insn_idx) 5978 continue; 5979 5980 return relo->core_relo; 5981 } 5982 5983 return NULL; 5984 } 5985 5986 static int bpf_core_resolve_relo(struct bpf_program *prog, 5987 const struct bpf_core_relo *relo, 5988 int relo_idx, 5989 const struct btf *local_btf, 5990 struct hashmap *cand_cache, 5991 struct bpf_core_relo_res *targ_res) 5992 { 5993 struct bpf_core_spec specs_scratch[3] = {}; 5994 struct bpf_core_cand_list *cands = NULL; 5995 const char *prog_name = prog->name; 5996 const struct btf_type *local_type; 5997 const char *local_name; 5998 __u32 local_id = relo->type_id; 5999 int err; 6000 6001 local_type = btf__type_by_id(local_btf, local_id); 6002 if (!local_type) 6003 return -EINVAL; 6004 6005 local_name = btf__name_by_offset(local_btf, local_type->name_off); 6006 if (!local_name) 6007 return -EINVAL; 6008 6009 if (relo->kind != BPF_CORE_TYPE_ID_LOCAL && 6010 !hashmap__find(cand_cache, local_id, &cands)) { 6011 cands = bpf_core_find_cands(prog->obj, local_btf, local_id); 6012 if (IS_ERR(cands)) { 6013 pr_warn("prog '%s': relo #%d: target candidate search failed for [%d] %s %s: %ld\n", 6014 prog_name, relo_idx, local_id, btf_kind_str(local_type), 6015 local_name, PTR_ERR(cands)); 6016 return PTR_ERR(cands); 6017 } 6018 err = hashmap__set(cand_cache, local_id, cands, NULL, NULL); 6019 if (err) { 6020 bpf_core_free_cands(cands); 6021 return err; 6022 } 6023 } 6024 6025 return bpf_core_calc_relo_insn(prog_name, relo, relo_idx, local_btf, cands, specs_scratch, 6026 targ_res); 6027 } 6028 6029 static int 6030 bpf_object__relocate_core(struct bpf_object *obj, const char *targ_btf_path) 6031 { 6032 const struct btf_ext_info_sec *sec; 6033 struct bpf_core_relo_res targ_res; 6034 const struct bpf_core_relo *rec; 6035 const struct btf_ext_info *seg; 6036 struct hashmap_entry *entry; 6037 struct hashmap *cand_cache = NULL; 6038 struct bpf_program *prog; 6039 struct bpf_insn *insn; 6040 const char *sec_name; 6041 int i, err = 0, insn_idx, sec_idx, sec_num; 6042 6043 if (obj->btf_ext->core_relo_info.len == 0) 6044 return 0; 6045 6046 if (targ_btf_path) { 6047 obj->btf_vmlinux_override = btf__parse(targ_btf_path, NULL); 6048 err = libbpf_get_error(obj->btf_vmlinux_override); 6049 if (err) { 6050 pr_warn("failed to parse target BTF: %s\n", errstr(err)); 6051 return err; 6052 } 6053 } 6054 6055 cand_cache = hashmap__new(bpf_core_hash_fn, bpf_core_equal_fn, NULL); 6056 if (IS_ERR(cand_cache)) { 6057 err = PTR_ERR(cand_cache); 6058 goto out; 6059 } 6060 6061 seg = &obj->btf_ext->core_relo_info; 6062 sec_num = 0; 6063 for_each_btf_ext_sec(seg, sec) { 6064 sec_idx = seg->sec_idxs[sec_num]; 6065 sec_num++; 6066 6067 sec_name = btf__name_by_offset(obj->btf, sec->sec_name_off); 6068 if (str_is_empty(sec_name)) { 6069 err = -EINVAL; 6070 goto out; 6071 } 6072 6073 pr_debug("sec '%s': found %d CO-RE relocations\n", sec_name, sec->num_info); 6074 6075 for_each_btf_ext_rec(seg, sec, i, rec) { 6076 if (rec->insn_off % BPF_INSN_SZ) 6077 return -EINVAL; 6078 insn_idx = rec->insn_off / BPF_INSN_SZ; 6079 prog = find_prog_by_sec_insn(obj, sec_idx, insn_idx); 6080 if (!prog) { 6081 /* When __weak subprog is "overridden" by another instance 6082 * of the subprog from a different object file, linker still 6083 * appends all the .BTF.ext info that used to belong to that 6084 * eliminated subprogram. 6085 * This is similar to what x86-64 linker does for relocations. 6086 * So just ignore such relocations just like we ignore 6087 * subprog instructions when discovering subprograms. 6088 */ 6089 pr_debug("sec '%s': skipping CO-RE relocation #%d for insn #%d belonging to eliminated weak subprogram\n", 6090 sec_name, i, insn_idx); 6091 continue; 6092 } 6093 /* no need to apply CO-RE relocation if the program is 6094 * not going to be loaded 6095 */ 6096 if (!prog->autoload) 6097 continue; 6098 6099 /* adjust insn_idx from section frame of reference to the local 6100 * program's frame of reference; (sub-)program code is not yet 6101 * relocated, so it's enough to just subtract in-section offset 6102 */ 6103 insn_idx = insn_idx - prog->sec_insn_off; 6104 if (insn_idx >= prog->insns_cnt) 6105 return -EINVAL; 6106 insn = &prog->insns[insn_idx]; 6107 6108 err = record_relo_core(prog, rec, insn_idx); 6109 if (err) { 6110 pr_warn("prog '%s': relo #%d: failed to record relocation: %s\n", 6111 prog->name, i, errstr(err)); 6112 goto out; 6113 } 6114 6115 if (prog->obj->gen_loader) 6116 continue; 6117 6118 err = bpf_core_resolve_relo(prog, rec, i, obj->btf, cand_cache, &targ_res); 6119 if (err) { 6120 pr_warn("prog '%s': relo #%d: failed to relocate: %s\n", 6121 prog->name, i, errstr(err)); 6122 goto out; 6123 } 6124 6125 err = bpf_core_patch_insn(prog->name, insn, insn_idx, rec, i, &targ_res); 6126 if (err) { 6127 pr_warn("prog '%s': relo #%d: failed to patch insn #%u: %s\n", 6128 prog->name, i, insn_idx, errstr(err)); 6129 goto out; 6130 } 6131 } 6132 } 6133 6134 out: 6135 /* obj->btf_vmlinux and module BTFs are freed after object load */ 6136 btf__free(obj->btf_vmlinux_override); 6137 obj->btf_vmlinux_override = NULL; 6138 6139 if (!IS_ERR_OR_NULL(cand_cache)) { 6140 hashmap__for_each_entry(cand_cache, entry, i) { 6141 bpf_core_free_cands(entry->pvalue); 6142 } 6143 hashmap__free(cand_cache); 6144 } 6145 return err; 6146 } 6147 6148 /* base map load ldimm64 special constant, used also for log fixup logic */ 6149 #define POISON_LDIMM64_MAP_BASE 2001000000 6150 #define POISON_LDIMM64_MAP_PFX "200100" 6151 6152 static void poison_map_ldimm64(struct bpf_program *prog, int relo_idx, 6153 int insn_idx, struct bpf_insn *insn, 6154 int map_idx, const struct bpf_map *map) 6155 { 6156 int i; 6157 6158 pr_debug("prog '%s': relo #%d: poisoning insn #%d that loads map #%d '%s'\n", 6159 prog->name, relo_idx, insn_idx, map_idx, map->name); 6160 6161 /* we turn single ldimm64 into two identical invalid calls */ 6162 for (i = 0; i < 2; i++) { 6163 insn->code = BPF_JMP | BPF_CALL; 6164 insn->dst_reg = 0; 6165 insn->src_reg = 0; 6166 insn->off = 0; 6167 /* if this instruction is reachable (not a dead code), 6168 * verifier will complain with something like: 6169 * invalid func unknown#2001000123 6170 * where lower 123 is map index into obj->maps[] array 6171 */ 6172 insn->imm = POISON_LDIMM64_MAP_BASE + map_idx; 6173 6174 insn++; 6175 } 6176 } 6177 6178 /* unresolved kfunc call special constant, used also for log fixup logic */ 6179 #define POISON_CALL_KFUNC_BASE 2002000000 6180 #define POISON_CALL_KFUNC_PFX "2002" 6181 6182 static void poison_kfunc_call(struct bpf_program *prog, int relo_idx, 6183 int insn_idx, struct bpf_insn *insn, 6184 int ext_idx, const struct extern_desc *ext) 6185 { 6186 pr_debug("prog '%s': relo #%d: poisoning insn #%d that calls kfunc '%s'\n", 6187 prog->name, relo_idx, insn_idx, ext->name); 6188 6189 /* we turn kfunc call into invalid helper call with identifiable constant */ 6190 insn->code = BPF_JMP | BPF_CALL; 6191 insn->dst_reg = 0; 6192 insn->src_reg = 0; 6193 insn->off = 0; 6194 /* if this instruction is reachable (not a dead code), 6195 * verifier will complain with something like: 6196 * invalid func unknown#2001000123 6197 * where lower 123 is extern index into obj->externs[] array 6198 */ 6199 insn->imm = POISON_CALL_KFUNC_BASE + ext_idx; 6200 } 6201 6202 static int find_jt_map(struct bpf_object *obj, struct bpf_program *prog, unsigned int sym_off) 6203 { 6204 size_t i; 6205 6206 for (i = 0; i < obj->jumptable_map_cnt; i++) { 6207 /* 6208 * This might happen that same offset is used for two different 6209 * programs (as jump tables can be the same). However, for 6210 * different programs different maps should be created. 6211 */ 6212 if (obj->jumptable_maps[i].sym_off == sym_off && 6213 obj->jumptable_maps[i].prog == prog) 6214 return obj->jumptable_maps[i].fd; 6215 } 6216 6217 return -ENOENT; 6218 } 6219 6220 static int add_jt_map(struct bpf_object *obj, struct bpf_program *prog, unsigned int sym_off, int map_fd) 6221 { 6222 size_t cnt = obj->jumptable_map_cnt; 6223 size_t size = sizeof(obj->jumptable_maps[0]); 6224 void *tmp; 6225 6226 tmp = libbpf_reallocarray(obj->jumptable_maps, cnt + 1, size); 6227 if (!tmp) 6228 return -ENOMEM; 6229 6230 obj->jumptable_maps = tmp; 6231 obj->jumptable_maps[cnt].prog = prog; 6232 obj->jumptable_maps[cnt].sym_off = sym_off; 6233 obj->jumptable_maps[cnt].fd = map_fd; 6234 obj->jumptable_map_cnt++; 6235 6236 return 0; 6237 } 6238 6239 static int find_subprog_idx(struct bpf_program *prog, int insn_idx) 6240 { 6241 int i; 6242 6243 for (i = prog->subprog_cnt - 1; i >= 0; i--) { 6244 if (insn_idx >= prog->subprogs[i].sub_insn_off) 6245 return i; 6246 } 6247 6248 return -1; 6249 } 6250 6251 static int create_jt_map(struct bpf_object *obj, struct bpf_program *prog, struct reloc_desc *relo) 6252 { 6253 const __u32 jt_entry_size = 8; 6254 unsigned int sym_off = relo->sym_off; 6255 int jt_size = relo->sym_size; 6256 __u32 max_entries = jt_size / jt_entry_size; 6257 __u32 value_size = sizeof(struct bpf_insn_array_value); 6258 struct bpf_insn_array_value val = {}; 6259 int subprog_idx; 6260 int map_fd, err; 6261 __u64 insn_off; 6262 __u64 *jt; 6263 __u32 i; 6264 6265 map_fd = find_jt_map(obj, prog, sym_off); 6266 if (map_fd >= 0) 6267 return map_fd; 6268 6269 if (sym_off % jt_entry_size) { 6270 pr_warn("map '.jumptables': jumptable start %u should be multiple of %u\n", 6271 sym_off, jt_entry_size); 6272 return -EINVAL; 6273 } 6274 6275 if (jt_size % jt_entry_size) { 6276 pr_warn("map '.jumptables': jumptable size %d should be multiple of %u\n", 6277 jt_size, jt_entry_size); 6278 return -EINVAL; 6279 } 6280 6281 map_fd = bpf_map_create(BPF_MAP_TYPE_INSN_ARRAY, ".jumptables", 6282 4, value_size, max_entries, NULL); 6283 if (map_fd < 0) 6284 return map_fd; 6285 6286 if (!obj->jumptables_data) { 6287 pr_warn("map '.jumptables': ELF file is missing jump table data\n"); 6288 err = -EINVAL; 6289 goto err_close; 6290 } 6291 if (sym_off + jt_size > obj->jumptables_data_sz) { 6292 pr_warn("map '.jumptables': jumptables_data size is %zd, trying to access %d\n", 6293 obj->jumptables_data_sz, sym_off + jt_size); 6294 err = -EINVAL; 6295 goto err_close; 6296 } 6297 6298 subprog_idx = -1; /* main program */ 6299 if (relo->insn_idx < 0 || relo->insn_idx >= prog->insns_cnt) { 6300 pr_warn("map '.jumptables': invalid instruction index %d\n", relo->insn_idx); 6301 err = -EINVAL; 6302 goto err_close; 6303 } 6304 if (prog->subprogs) 6305 subprog_idx = find_subprog_idx(prog, relo->insn_idx); 6306 6307 jt = (__u64 *)(obj->jumptables_data + sym_off); 6308 for (i = 0; i < max_entries; i++) { 6309 /* 6310 * The offset should be made to be relative to the beginning of 6311 * the main function, not the subfunction. 6312 */ 6313 insn_off = jt[i]/sizeof(struct bpf_insn); 6314 if (subprog_idx >= 0) { 6315 insn_off -= prog->subprogs[subprog_idx].sec_insn_off; 6316 insn_off += prog->subprogs[subprog_idx].sub_insn_off; 6317 } else { 6318 insn_off -= prog->sec_insn_off; 6319 } 6320 6321 /* 6322 * LLVM-generated jump tables contain u64 records, however 6323 * should contain values that fit in u32. 6324 */ 6325 if (insn_off > UINT32_MAX) { 6326 pr_warn("map '.jumptables': invalid jump table value 0x%llx at offset %u\n", 6327 (long long)jt[i], sym_off + i * jt_entry_size); 6328 err = -EINVAL; 6329 goto err_close; 6330 } 6331 6332 val.orig_off = insn_off; 6333 err = bpf_map_update_elem(map_fd, &i, &val, 0); 6334 if (err) 6335 goto err_close; 6336 } 6337 6338 err = bpf_map_freeze(map_fd); 6339 if (err) 6340 goto err_close; 6341 6342 err = add_jt_map(obj, prog, sym_off, map_fd); 6343 if (err) 6344 goto err_close; 6345 6346 return map_fd; 6347 6348 err_close: 6349 close(map_fd); 6350 return err; 6351 } 6352 6353 /* Relocate data references within program code: 6354 * - map references; 6355 * - global variable references; 6356 * - extern references. 6357 */ 6358 static int 6359 bpf_object__relocate_data(struct bpf_object *obj, struct bpf_program *prog) 6360 { 6361 int i; 6362 6363 for (i = 0; i < prog->nr_reloc; i++) { 6364 struct reloc_desc *relo = &prog->reloc_desc[i]; 6365 struct bpf_insn *insn = &prog->insns[relo->insn_idx]; 6366 const struct bpf_map *map; 6367 struct extern_desc *ext; 6368 6369 switch (relo->type) { 6370 case RELO_LD64: 6371 map = &obj->maps[relo->map_idx]; 6372 if (obj->gen_loader) { 6373 insn[0].src_reg = BPF_PSEUDO_MAP_IDX; 6374 insn[0].imm = relo->map_idx; 6375 } else if (map->autocreate) { 6376 insn[0].src_reg = BPF_PSEUDO_MAP_FD; 6377 insn[0].imm = map->fd; 6378 } else { 6379 poison_map_ldimm64(prog, i, relo->insn_idx, insn, 6380 relo->map_idx, map); 6381 } 6382 break; 6383 case RELO_DATA: 6384 map = &obj->maps[relo->map_idx]; 6385 insn[1].imm = insn[0].imm + relo->sym_off; 6386 if (obj->gen_loader) { 6387 insn[0].src_reg = BPF_PSEUDO_MAP_IDX_VALUE; 6388 insn[0].imm = relo->map_idx; 6389 } else if (map->autocreate) { 6390 insn[0].src_reg = BPF_PSEUDO_MAP_VALUE; 6391 insn[0].imm = map->fd; 6392 } else { 6393 poison_map_ldimm64(prog, i, relo->insn_idx, insn, 6394 relo->map_idx, map); 6395 } 6396 break; 6397 case RELO_EXTERN_LD64: 6398 ext = &obj->externs[relo->ext_idx]; 6399 if (ext->type == EXT_KCFG) { 6400 if (obj->gen_loader) { 6401 insn[0].src_reg = BPF_PSEUDO_MAP_IDX_VALUE; 6402 insn[0].imm = obj->kconfig_map_idx; 6403 } else { 6404 insn[0].src_reg = BPF_PSEUDO_MAP_VALUE; 6405 insn[0].imm = obj->maps[obj->kconfig_map_idx].fd; 6406 } 6407 insn[1].imm = ext->kcfg.data_off; 6408 } else /* EXT_KSYM */ { 6409 if (ext->ksym.type_id && ext->is_set) { /* typed ksyms */ 6410 insn[0].src_reg = BPF_PSEUDO_BTF_ID; 6411 insn[0].imm = ext->ksym.kernel_btf_id; 6412 insn[1].imm = ext->ksym.kernel_btf_obj_fd; 6413 } else { /* typeless ksyms or unresolved typed ksyms */ 6414 insn[0].imm = (__u32)ext->ksym.addr; 6415 insn[1].imm = ext->ksym.addr >> 32; 6416 } 6417 } 6418 break; 6419 case RELO_EXTERN_CALL: 6420 ext = &obj->externs[relo->ext_idx]; 6421 insn[0].src_reg = BPF_PSEUDO_KFUNC_CALL; 6422 if (ext->is_set) { 6423 insn[0].imm = ext->ksym.kernel_btf_id; 6424 insn[0].off = ext->ksym.btf_fd_idx; 6425 } else { /* unresolved weak kfunc call */ 6426 poison_kfunc_call(prog, i, relo->insn_idx, insn, 6427 relo->ext_idx, ext); 6428 } 6429 break; 6430 case RELO_SUBPROG_ADDR: 6431 if (insn[0].src_reg != BPF_PSEUDO_FUNC) { 6432 pr_warn("prog '%s': relo #%d: bad insn\n", 6433 prog->name, i); 6434 return -EINVAL; 6435 } 6436 /* handled already */ 6437 break; 6438 case RELO_CALL: 6439 /* handled already */ 6440 break; 6441 case RELO_CORE: 6442 /* will be handled by bpf_program_record_relos() */ 6443 break; 6444 case RELO_INSN_ARRAY: { 6445 int map_fd; 6446 6447 map_fd = create_jt_map(obj, prog, relo); 6448 if (map_fd < 0) { 6449 pr_warn("prog '%s': relo #%d: can't create jump table: sym_off %u\n", 6450 prog->name, i, relo->sym_off); 6451 return map_fd; 6452 } 6453 insn[0].src_reg = BPF_PSEUDO_MAP_VALUE; 6454 insn->imm = map_fd; 6455 insn->off = 0; 6456 } 6457 break; 6458 default: 6459 pr_warn("prog '%s': relo #%d: bad relo type %d\n", 6460 prog->name, i, relo->type); 6461 return -EINVAL; 6462 } 6463 } 6464 6465 return 0; 6466 } 6467 6468 static int adjust_prog_btf_ext_info(const struct bpf_object *obj, 6469 const struct bpf_program *prog, 6470 const struct btf_ext_info *ext_info, 6471 void **prog_info, __u32 *prog_rec_cnt, 6472 __u32 *prog_rec_sz) 6473 { 6474 void *copy_start = NULL, *copy_end = NULL; 6475 void *rec, *rec_end, *new_prog_info; 6476 const struct btf_ext_info_sec *sec; 6477 size_t old_sz, new_sz; 6478 int i, sec_num, sec_idx, off_adj; 6479 6480 sec_num = 0; 6481 for_each_btf_ext_sec(ext_info, sec) { 6482 sec_idx = ext_info->sec_idxs[sec_num]; 6483 sec_num++; 6484 if (prog->sec_idx != sec_idx) 6485 continue; 6486 6487 for_each_btf_ext_rec(ext_info, sec, i, rec) { 6488 __u32 insn_off = *(__u32 *)rec / BPF_INSN_SZ; 6489 6490 if (insn_off < prog->sec_insn_off) 6491 continue; 6492 if (insn_off >= prog->sec_insn_off + prog->sec_insn_cnt) 6493 break; 6494 6495 if (!copy_start) 6496 copy_start = rec; 6497 copy_end = rec + ext_info->rec_size; 6498 } 6499 6500 if (!copy_start) 6501 return -ENOENT; 6502 6503 /* append func/line info of a given (sub-)program to the main 6504 * program func/line info 6505 */ 6506 old_sz = (size_t)(*prog_rec_cnt) * ext_info->rec_size; 6507 new_sz = old_sz + (copy_end - copy_start); 6508 new_prog_info = realloc(*prog_info, new_sz); 6509 if (!new_prog_info) 6510 return -ENOMEM; 6511 *prog_info = new_prog_info; 6512 *prog_rec_cnt = new_sz / ext_info->rec_size; 6513 memcpy(new_prog_info + old_sz, copy_start, copy_end - copy_start); 6514 6515 /* Kernel instruction offsets are in units of 8-byte 6516 * instructions, while .BTF.ext instruction offsets generated 6517 * by Clang are in units of bytes. So convert Clang offsets 6518 * into kernel offsets and adjust offset according to program 6519 * relocated position. 6520 */ 6521 off_adj = prog->sub_insn_off - prog->sec_insn_off; 6522 rec = new_prog_info + old_sz; 6523 rec_end = new_prog_info + new_sz; 6524 for (; rec < rec_end; rec += ext_info->rec_size) { 6525 __u32 *insn_off = rec; 6526 6527 *insn_off = *insn_off / BPF_INSN_SZ + off_adj; 6528 } 6529 *prog_rec_sz = ext_info->rec_size; 6530 return 0; 6531 } 6532 6533 return -ENOENT; 6534 } 6535 6536 static int 6537 reloc_prog_func_and_line_info(const struct bpf_object *obj, 6538 struct bpf_program *main_prog, 6539 const struct bpf_program *prog) 6540 { 6541 int err; 6542 6543 /* no .BTF.ext relocation if .BTF.ext is missing or kernel doesn't 6544 * support func/line info 6545 */ 6546 if (!obj->btf_ext || !kernel_supports(obj, FEAT_BTF_FUNC)) 6547 return 0; 6548 6549 /* only attempt func info relocation if main program's func_info 6550 * relocation was successful 6551 */ 6552 if (main_prog != prog && !main_prog->func_info) 6553 goto line_info; 6554 6555 err = adjust_prog_btf_ext_info(obj, prog, &obj->btf_ext->func_info, 6556 &main_prog->func_info, 6557 &main_prog->func_info_cnt, 6558 &main_prog->func_info_rec_size); 6559 if (err) { 6560 if (err != -ENOENT) { 6561 pr_warn("prog '%s': error relocating .BTF.ext function info: %s\n", 6562 prog->name, errstr(err)); 6563 return err; 6564 } 6565 if (main_prog->func_info) { 6566 /* 6567 * Some info has already been found but has problem 6568 * in the last btf_ext reloc. Must have to error out. 6569 */ 6570 pr_warn("prog '%s': missing .BTF.ext function info.\n", prog->name); 6571 return err; 6572 } 6573 /* Have problem loading the very first info. Ignore the rest. */ 6574 pr_warn("prog '%s': missing .BTF.ext function info for the main program, skipping all of .BTF.ext func info.\n", 6575 prog->name); 6576 } 6577 6578 line_info: 6579 /* don't relocate line info if main program's relocation failed */ 6580 if (main_prog != prog && !main_prog->line_info) 6581 return 0; 6582 6583 err = adjust_prog_btf_ext_info(obj, prog, &obj->btf_ext->line_info, 6584 &main_prog->line_info, 6585 &main_prog->line_info_cnt, 6586 &main_prog->line_info_rec_size); 6587 if (err) { 6588 if (err != -ENOENT) { 6589 pr_warn("prog '%s': error relocating .BTF.ext line info: %s\n", 6590 prog->name, errstr(err)); 6591 return err; 6592 } 6593 if (main_prog->line_info) { 6594 /* 6595 * Some info has already been found but has problem 6596 * in the last btf_ext reloc. Must have to error out. 6597 */ 6598 pr_warn("prog '%s': missing .BTF.ext line info.\n", prog->name); 6599 return err; 6600 } 6601 /* Have problem loading the very first info. Ignore the rest. */ 6602 pr_warn("prog '%s': missing .BTF.ext line info for the main program, skipping all of .BTF.ext line info.\n", 6603 prog->name); 6604 } 6605 return 0; 6606 } 6607 6608 static int cmp_relo_by_insn_idx(const void *key, const void *elem) 6609 { 6610 size_t insn_idx = *(const size_t *)key; 6611 const struct reloc_desc *relo = elem; 6612 6613 if (insn_idx == relo->insn_idx) 6614 return 0; 6615 return insn_idx < relo->insn_idx ? -1 : 1; 6616 } 6617 6618 static struct reloc_desc *find_prog_insn_relo(const struct bpf_program *prog, size_t insn_idx) 6619 { 6620 if (!prog->nr_reloc) 6621 return NULL; 6622 return bsearch(&insn_idx, prog->reloc_desc, prog->nr_reloc, 6623 sizeof(*prog->reloc_desc), cmp_relo_by_insn_idx); 6624 } 6625 6626 static int append_subprog_relos(struct bpf_program *main_prog, struct bpf_program *subprog) 6627 { 6628 int new_cnt = main_prog->nr_reloc + subprog->nr_reloc; 6629 struct reloc_desc *relos; 6630 int i; 6631 6632 if (main_prog == subprog) 6633 return 0; 6634 relos = libbpf_reallocarray(main_prog->reloc_desc, new_cnt, sizeof(*relos)); 6635 /* if new count is zero, reallocarray can return a valid NULL result; 6636 * in this case the previous pointer will be freed, so we *have to* 6637 * reassign old pointer to the new value (even if it's NULL) 6638 */ 6639 if (!relos && new_cnt) 6640 return -ENOMEM; 6641 if (subprog->nr_reloc) 6642 memcpy(relos + main_prog->nr_reloc, subprog->reloc_desc, 6643 sizeof(*relos) * subprog->nr_reloc); 6644 6645 for (i = main_prog->nr_reloc; i < new_cnt; i++) 6646 relos[i].insn_idx += subprog->sub_insn_off; 6647 /* After insn_idx adjustment the 'relos' array is still sorted 6648 * by insn_idx and doesn't break bsearch. 6649 */ 6650 main_prog->reloc_desc = relos; 6651 main_prog->nr_reloc = new_cnt; 6652 return 0; 6653 } 6654 6655 static int save_subprog_offsets(struct bpf_program *main_prog, struct bpf_program *subprog) 6656 { 6657 size_t size = sizeof(main_prog->subprogs[0]); 6658 int cnt = main_prog->subprog_cnt; 6659 void *tmp; 6660 6661 tmp = libbpf_reallocarray(main_prog->subprogs, cnt + 1, size); 6662 if (!tmp) 6663 return -ENOMEM; 6664 6665 main_prog->subprogs = tmp; 6666 main_prog->subprogs[cnt].sec_insn_off = subprog->sec_insn_off; 6667 main_prog->subprogs[cnt].sub_insn_off = subprog->sub_insn_off; 6668 main_prog->subprog_cnt++; 6669 6670 return 0; 6671 } 6672 6673 static int 6674 bpf_object__append_subprog_code(struct bpf_object *obj, struct bpf_program *main_prog, 6675 struct bpf_program *subprog) 6676 { 6677 struct bpf_insn *insns; 6678 size_t new_cnt; 6679 int err; 6680 6681 subprog->sub_insn_off = main_prog->insns_cnt; 6682 6683 new_cnt = main_prog->insns_cnt + subprog->insns_cnt; 6684 insns = libbpf_reallocarray(main_prog->insns, new_cnt, sizeof(*insns)); 6685 if (!insns) { 6686 pr_warn("prog '%s': failed to realloc prog code\n", main_prog->name); 6687 return -ENOMEM; 6688 } 6689 main_prog->insns = insns; 6690 main_prog->insns_cnt = new_cnt; 6691 6692 memcpy(main_prog->insns + subprog->sub_insn_off, subprog->insns, 6693 subprog->insns_cnt * sizeof(*insns)); 6694 6695 pr_debug("prog '%s': added %zu insns from sub-prog '%s'\n", 6696 main_prog->name, subprog->insns_cnt, subprog->name); 6697 6698 /* The subprog insns are now appended. Append its relos too. */ 6699 err = append_subprog_relos(main_prog, subprog); 6700 if (err) 6701 return err; 6702 6703 err = save_subprog_offsets(main_prog, subprog); 6704 if (err) { 6705 pr_warn("prog '%s': failed to add subprog offsets: %s\n", 6706 main_prog->name, errstr(err)); 6707 return err; 6708 } 6709 6710 return 0; 6711 } 6712 6713 static int 6714 bpf_object__reloc_code(struct bpf_object *obj, struct bpf_program *main_prog, 6715 struct bpf_program *prog) 6716 { 6717 size_t sub_insn_idx, insn_idx; 6718 struct bpf_program *subprog; 6719 struct reloc_desc *relo; 6720 struct bpf_insn *insn; 6721 int err; 6722 6723 err = reloc_prog_func_and_line_info(obj, main_prog, prog); 6724 if (err) 6725 return err; 6726 6727 for (insn_idx = 0; insn_idx < prog->sec_insn_cnt; insn_idx++) { 6728 insn = &main_prog->insns[prog->sub_insn_off + insn_idx]; 6729 if (!insn_is_subprog_call(insn) && !insn_is_pseudo_func(insn)) 6730 continue; 6731 6732 relo = find_prog_insn_relo(prog, insn_idx); 6733 if (relo && relo->type == RELO_EXTERN_CALL) 6734 /* kfunc relocations will be handled later 6735 * in bpf_object__relocate_data() 6736 */ 6737 continue; 6738 if (relo && relo->type != RELO_CALL && relo->type != RELO_SUBPROG_ADDR) { 6739 pr_warn("prog '%s': unexpected relo for insn #%zu, type %d\n", 6740 prog->name, insn_idx, relo->type); 6741 return -LIBBPF_ERRNO__RELOC; 6742 } 6743 if (relo) { 6744 /* sub-program instruction index is a combination of 6745 * an offset of a symbol pointed to by relocation and 6746 * call instruction's imm field; for global functions, 6747 * call always has imm = -1, but for static functions 6748 * relocation is against STT_SECTION and insn->imm 6749 * points to a start of a static function 6750 * 6751 * for subprog addr relocation, the relo->sym_off + insn->imm is 6752 * the byte offset in the corresponding section. 6753 */ 6754 if (relo->type == RELO_CALL) 6755 sub_insn_idx = relo->sym_off / BPF_INSN_SZ + insn->imm + 1; 6756 else 6757 sub_insn_idx = (relo->sym_off + insn->imm) / BPF_INSN_SZ; 6758 } else if (insn_is_pseudo_func(insn)) { 6759 /* 6760 * RELO_SUBPROG_ADDR relo is always emitted even if both 6761 * functions are in the same section, so it shouldn't reach here. 6762 */ 6763 pr_warn("prog '%s': missing subprog addr relo for insn #%zu\n", 6764 prog->name, insn_idx); 6765 return -LIBBPF_ERRNO__RELOC; 6766 } else { 6767 /* if subprogram call is to a static function within 6768 * the same ELF section, there won't be any relocation 6769 * emitted, but it also means there is no additional 6770 * offset necessary, insns->imm is relative to 6771 * instruction's original position within the section 6772 */ 6773 sub_insn_idx = prog->sec_insn_off + insn_idx + insn->imm + 1; 6774 } 6775 6776 /* we enforce that sub-programs should be in .text section */ 6777 subprog = find_prog_by_sec_insn(obj, obj->efile.text_shndx, sub_insn_idx); 6778 if (!subprog) { 6779 pr_warn("prog '%s': no .text section found yet sub-program call exists\n", 6780 prog->name); 6781 return -LIBBPF_ERRNO__RELOC; 6782 } 6783 6784 /* if it's the first call instruction calling into this 6785 * subprogram (meaning this subprog hasn't been processed 6786 * yet) within the context of current main program: 6787 * - append it at the end of main program's instructions blog; 6788 * - process is recursively, while current program is put on hold; 6789 * - if that subprogram calls some other not yet processes 6790 * subprogram, same thing will happen recursively until 6791 * there are no more unprocesses subprograms left to append 6792 * and relocate. 6793 */ 6794 if (subprog->sub_insn_off == 0) { 6795 err = bpf_object__append_subprog_code(obj, main_prog, subprog); 6796 if (err) 6797 return err; 6798 err = bpf_object__reloc_code(obj, main_prog, subprog); 6799 if (err) 6800 return err; 6801 } 6802 6803 /* main_prog->insns memory could have been re-allocated, so 6804 * calculate pointer again 6805 */ 6806 insn = &main_prog->insns[prog->sub_insn_off + insn_idx]; 6807 /* calculate correct instruction position within current main 6808 * prog; each main prog can have a different set of 6809 * subprograms appended (potentially in different order as 6810 * well), so position of any subprog can be different for 6811 * different main programs 6812 */ 6813 insn->imm = subprog->sub_insn_off - (prog->sub_insn_off + insn_idx) - 1; 6814 6815 pr_debug("prog '%s': insn #%zu relocated, imm %d points to subprog '%s' (now at %zu offset)\n", 6816 prog->name, insn_idx, insn->imm, subprog->name, subprog->sub_insn_off); 6817 } 6818 6819 return 0; 6820 } 6821 6822 /* 6823 * Relocate sub-program calls. 6824 * 6825 * Algorithm operates as follows. Each entry-point BPF program (referred to as 6826 * main prog) is processed separately. For each subprog (non-entry functions, 6827 * that can be called from either entry progs or other subprogs) gets their 6828 * sub_insn_off reset to zero. This serves as indicator that this subprogram 6829 * hasn't been yet appended and relocated within current main prog. Once its 6830 * relocated, sub_insn_off will point at the position within current main prog 6831 * where given subprog was appended. This will further be used to relocate all 6832 * the call instructions jumping into this subprog. 6833 * 6834 * We start with main program and process all call instructions. If the call 6835 * is into a subprog that hasn't been processed (i.e., subprog->sub_insn_off 6836 * is zero), subprog instructions are appended at the end of main program's 6837 * instruction array. Then main program is "put on hold" while we recursively 6838 * process newly appended subprogram. If that subprogram calls into another 6839 * subprogram that hasn't been appended, new subprogram is appended again to 6840 * the *main* prog's instructions (subprog's instructions are always left 6841 * untouched, as they need to be in unmodified state for subsequent main progs 6842 * and subprog instructions are always sent only as part of a main prog) and 6843 * the process continues recursively. Once all the subprogs called from a main 6844 * prog or any of its subprogs are appended (and relocated), all their 6845 * positions within finalized instructions array are known, so it's easy to 6846 * rewrite call instructions with correct relative offsets, corresponding to 6847 * desired target subprog. 6848 * 6849 * Its important to realize that some subprogs might not be called from some 6850 * main prog and any of its called/used subprogs. Those will keep their 6851 * subprog->sub_insn_off as zero at all times and won't be appended to current 6852 * main prog and won't be relocated within the context of current main prog. 6853 * They might still be used from other main progs later. 6854 * 6855 * Visually this process can be shown as below. Suppose we have two main 6856 * programs mainA and mainB and BPF object contains three subprogs: subA, 6857 * subB, and subC. mainA calls only subA, mainB calls only subC, but subA and 6858 * subC both call subB: 6859 * 6860 * +--------+ +-------+ 6861 * | v v | 6862 * +--+---+ +--+-+-+ +---+--+ 6863 * | subA | | subB | | subC | 6864 * +--+---+ +------+ +---+--+ 6865 * ^ ^ 6866 * | | 6867 * +---+-------+ +------+----+ 6868 * | mainA | | mainB | 6869 * +-----------+ +-----------+ 6870 * 6871 * We'll start relocating mainA, will find subA, append it and start 6872 * processing sub A recursively: 6873 * 6874 * +-----------+------+ 6875 * | mainA | subA | 6876 * +-----------+------+ 6877 * 6878 * At this point we notice that subB is used from subA, so we append it and 6879 * relocate (there are no further subcalls from subB): 6880 * 6881 * +-----------+------+------+ 6882 * | mainA | subA | subB | 6883 * +-----------+------+------+ 6884 * 6885 * At this point, we relocate subA calls, then go one level up and finish with 6886 * relocatin mainA calls. mainA is done. 6887 * 6888 * For mainB process is similar but results in different order. We start with 6889 * mainB and skip subA and subB, as mainB never calls them (at least 6890 * directly), but we see subC is needed, so we append and start processing it: 6891 * 6892 * +-----------+------+ 6893 * | mainB | subC | 6894 * +-----------+------+ 6895 * Now we see subC needs subB, so we go back to it, append and relocate it: 6896 * 6897 * +-----------+------+------+ 6898 * | mainB | subC | subB | 6899 * +-----------+------+------+ 6900 * 6901 * At this point we unwind recursion, relocate calls in subC, then in mainB. 6902 */ 6903 static int 6904 bpf_object__relocate_calls(struct bpf_object *obj, struct bpf_program *prog) 6905 { 6906 struct bpf_program *subprog; 6907 int i, err; 6908 6909 /* mark all subprogs as not relocated (yet) within the context of 6910 * current main program 6911 */ 6912 for (i = 0; i < obj->nr_programs; i++) { 6913 subprog = &obj->programs[i]; 6914 if (!prog_is_subprog(obj, subprog)) 6915 continue; 6916 6917 subprog->sub_insn_off = 0; 6918 } 6919 6920 err = bpf_object__reloc_code(obj, prog, prog); 6921 if (err) 6922 return err; 6923 6924 return 0; 6925 } 6926 6927 static void 6928 bpf_object__free_relocs(struct bpf_object *obj) 6929 { 6930 struct bpf_program *prog; 6931 int i; 6932 6933 /* free up relocation descriptors */ 6934 for (i = 0; i < obj->nr_programs; i++) { 6935 prog = &obj->programs[i]; 6936 zfree(&prog->reloc_desc); 6937 prog->nr_reloc = 0; 6938 } 6939 } 6940 6941 static int cmp_relocs(const void *_a, const void *_b) 6942 { 6943 const struct reloc_desc *a = _a; 6944 const struct reloc_desc *b = _b; 6945 6946 if (a->insn_idx != b->insn_idx) 6947 return a->insn_idx < b->insn_idx ? -1 : 1; 6948 6949 /* no two relocations should have the same insn_idx, but ... */ 6950 if (a->type != b->type) 6951 return a->type < b->type ? -1 : 1; 6952 6953 return 0; 6954 } 6955 6956 static void bpf_object__sort_relos(struct bpf_object *obj) 6957 { 6958 int i; 6959 6960 for (i = 0; i < obj->nr_programs; i++) { 6961 struct bpf_program *p = &obj->programs[i]; 6962 6963 if (!p->nr_reloc) 6964 continue; 6965 6966 qsort(p->reloc_desc, p->nr_reloc, sizeof(*p->reloc_desc), cmp_relocs); 6967 } 6968 } 6969 6970 static int bpf_prog_assign_exc_cb(struct bpf_object *obj, struct bpf_program *prog) 6971 { 6972 const char *str = "exception_callback:"; 6973 size_t pfx_len = strlen(str); 6974 int i, j, n; 6975 6976 if (!obj->btf || !kernel_supports(obj, FEAT_BTF_DECL_TAG)) 6977 return 0; 6978 6979 n = btf__type_cnt(obj->btf); 6980 for (i = 1; i < n; i++) { 6981 const char *name; 6982 struct btf_type *t; 6983 6984 t = btf_type_by_id(obj->btf, i); 6985 if (!btf_is_decl_tag(t) || btf_decl_tag(t)->component_idx != -1) 6986 continue; 6987 6988 name = btf__str_by_offset(obj->btf, t->name_off); 6989 if (strncmp(name, str, pfx_len) != 0) 6990 continue; 6991 6992 t = btf_type_by_id(obj->btf, t->type); 6993 if (!btf_is_func(t) || btf_func_linkage(t) != BTF_FUNC_GLOBAL) { 6994 pr_warn("prog '%s': exception_callback:<value> decl tag not applied to the main program\n", 6995 prog->name); 6996 return -EINVAL; 6997 } 6998 if (strcmp(prog->name, btf__str_by_offset(obj->btf, t->name_off)) != 0) 6999 continue; 7000 /* Multiple callbacks are specified for the same prog, 7001 * the verifier will eventually return an error for this 7002 * case, hence simply skip appending a subprog. 7003 */ 7004 if (prog->exception_cb_idx >= 0) { 7005 prog->exception_cb_idx = -1; 7006 break; 7007 } 7008 7009 name += pfx_len; 7010 if (str_is_empty(name)) { 7011 pr_warn("prog '%s': exception_callback:<value> decl tag contains empty value\n", 7012 prog->name); 7013 return -EINVAL; 7014 } 7015 7016 for (j = 0; j < obj->nr_programs; j++) { 7017 struct bpf_program *subprog = &obj->programs[j]; 7018 7019 if (!prog_is_subprog(obj, subprog)) 7020 continue; 7021 if (strcmp(name, subprog->name) != 0) 7022 continue; 7023 /* Enforce non-hidden, as from verifier point of 7024 * view it expects global functions, whereas the 7025 * mark_btf_static fixes up linkage as static. 7026 */ 7027 if (!subprog->sym_global || subprog->mark_btf_static) { 7028 pr_warn("prog '%s': exception callback %s must be a global non-hidden function\n", 7029 prog->name, subprog->name); 7030 return -EINVAL; 7031 } 7032 /* Let's see if we already saw a static exception callback with the same name */ 7033 if (prog->exception_cb_idx >= 0) { 7034 pr_warn("prog '%s': multiple subprogs with same name as exception callback '%s'\n", 7035 prog->name, subprog->name); 7036 return -EINVAL; 7037 } 7038 prog->exception_cb_idx = j; 7039 break; 7040 } 7041 7042 if (prog->exception_cb_idx >= 0) 7043 continue; 7044 7045 pr_warn("prog '%s': cannot find exception callback '%s'\n", prog->name, name); 7046 return -ENOENT; 7047 } 7048 7049 return 0; 7050 } 7051 7052 static struct { 7053 enum bpf_prog_type prog_type; 7054 const char *ctx_name; 7055 } global_ctx_map[] = { 7056 { BPF_PROG_TYPE_CGROUP_DEVICE, "bpf_cgroup_dev_ctx" }, 7057 { BPF_PROG_TYPE_CGROUP_SKB, "__sk_buff" }, 7058 { BPF_PROG_TYPE_CGROUP_SOCK, "bpf_sock" }, 7059 { BPF_PROG_TYPE_CGROUP_SOCK_ADDR, "bpf_sock_addr" }, 7060 { BPF_PROG_TYPE_CGROUP_SOCKOPT, "bpf_sockopt" }, 7061 { BPF_PROG_TYPE_CGROUP_SYSCTL, "bpf_sysctl" }, 7062 { BPF_PROG_TYPE_FLOW_DISSECTOR, "__sk_buff" }, 7063 { BPF_PROG_TYPE_KPROBE, "bpf_user_pt_regs_t" }, 7064 { BPF_PROG_TYPE_LWT_IN, "__sk_buff" }, 7065 { BPF_PROG_TYPE_LWT_OUT, "__sk_buff" }, 7066 { BPF_PROG_TYPE_LWT_SEG6LOCAL, "__sk_buff" }, 7067 { BPF_PROG_TYPE_LWT_XMIT, "__sk_buff" }, 7068 { BPF_PROG_TYPE_NETFILTER, "bpf_nf_ctx" }, 7069 { BPF_PROG_TYPE_PERF_EVENT, "bpf_perf_event_data" }, 7070 { BPF_PROG_TYPE_RAW_TRACEPOINT, "bpf_raw_tracepoint_args" }, 7071 { BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE, "bpf_raw_tracepoint_args" }, 7072 { BPF_PROG_TYPE_SCHED_ACT, "__sk_buff" }, 7073 { BPF_PROG_TYPE_SCHED_CLS, "__sk_buff" }, 7074 { BPF_PROG_TYPE_SK_LOOKUP, "bpf_sk_lookup" }, 7075 { BPF_PROG_TYPE_SK_MSG, "sk_msg_md" }, 7076 { BPF_PROG_TYPE_SK_REUSEPORT, "sk_reuseport_md" }, 7077 { BPF_PROG_TYPE_SK_SKB, "__sk_buff" }, 7078 { BPF_PROG_TYPE_SOCK_OPS, "bpf_sock_ops" }, 7079 { BPF_PROG_TYPE_SOCKET_FILTER, "__sk_buff" }, 7080 { BPF_PROG_TYPE_XDP, "xdp_md" }, 7081 /* all other program types don't have "named" context structs */ 7082 }; 7083 7084 /* forward declarations for arch-specific underlying types of bpf_user_pt_regs_t typedef, 7085 * for below __builtin_types_compatible_p() checks; 7086 * with this approach we don't need any extra arch-specific #ifdef guards 7087 */ 7088 struct pt_regs; 7089 struct user_pt_regs; 7090 struct user_regs_struct; 7091 7092 static bool need_func_arg_type_fixup(const struct btf *btf, const struct bpf_program *prog, 7093 const char *subprog_name, int arg_idx, 7094 int arg_type_id, const char *ctx_name) 7095 { 7096 const struct btf_type *t; 7097 const char *tname; 7098 7099 /* check if existing parameter already matches verifier expectations */ 7100 t = skip_mods_and_typedefs(btf, arg_type_id, NULL); 7101 if (!btf_is_ptr(t)) 7102 goto out_warn; 7103 7104 /* typedef bpf_user_pt_regs_t is a special PITA case, valid for kprobe 7105 * and perf_event programs, so check this case early on and forget 7106 * about it for subsequent checks 7107 */ 7108 while (btf_is_mod(t)) 7109 t = btf__type_by_id(btf, t->type); 7110 if (btf_is_typedef(t) && 7111 (prog->type == BPF_PROG_TYPE_KPROBE || prog->type == BPF_PROG_TYPE_PERF_EVENT)) { 7112 tname = btf__str_by_offset(btf, t->name_off) ?: "<anon>"; 7113 if (strcmp(tname, "bpf_user_pt_regs_t") == 0) 7114 return false; /* canonical type for kprobe/perf_event */ 7115 } 7116 7117 /* now we can ignore typedefs moving forward */ 7118 t = skip_mods_and_typedefs(btf, t->type, NULL); 7119 7120 /* if it's `void *`, definitely fix up BTF info */ 7121 if (btf_is_void(t)) 7122 return true; 7123 7124 /* if it's already proper canonical type, no need to fix up */ 7125 tname = btf__str_by_offset(btf, t->name_off) ?: "<anon>"; 7126 if (btf_is_struct(t) && strcmp(tname, ctx_name) == 0) 7127 return false; 7128 7129 /* special cases */ 7130 switch (prog->type) { 7131 case BPF_PROG_TYPE_KPROBE: 7132 /* `struct pt_regs *` is expected, but we need to fix up */ 7133 if (btf_is_struct(t) && strcmp(tname, "pt_regs") == 0) 7134 return true; 7135 break; 7136 case BPF_PROG_TYPE_PERF_EVENT: 7137 if (__builtin_types_compatible_p(bpf_user_pt_regs_t, struct pt_regs) && 7138 btf_is_struct(t) && strcmp(tname, "pt_regs") == 0) 7139 return true; 7140 if (__builtin_types_compatible_p(bpf_user_pt_regs_t, struct user_pt_regs) && 7141 btf_is_struct(t) && strcmp(tname, "user_pt_regs") == 0) 7142 return true; 7143 if (__builtin_types_compatible_p(bpf_user_pt_regs_t, struct user_regs_struct) && 7144 btf_is_struct(t) && strcmp(tname, "user_regs_struct") == 0) 7145 return true; 7146 break; 7147 case BPF_PROG_TYPE_RAW_TRACEPOINT: 7148 case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE: 7149 /* allow u64* as ctx */ 7150 if (btf_is_int(t) && t->size == 8) 7151 return true; 7152 break; 7153 default: 7154 break; 7155 } 7156 7157 out_warn: 7158 pr_warn("prog '%s': subprog '%s' arg#%d is expected to be of `struct %s *` type\n", 7159 prog->name, subprog_name, arg_idx, ctx_name); 7160 return false; 7161 } 7162 7163 static int clone_func_btf_info(struct btf *btf, int orig_fn_id, struct bpf_program *prog) 7164 { 7165 int fn_id, fn_proto_id, ret_type_id, orig_proto_id; 7166 int i, err, arg_cnt, fn_name_off, linkage; 7167 struct btf_type *fn_t, *fn_proto_t, *t; 7168 struct btf_param *p; 7169 7170 /* caller already validated FUNC -> FUNC_PROTO validity */ 7171 fn_t = btf_type_by_id(btf, orig_fn_id); 7172 fn_proto_t = btf_type_by_id(btf, fn_t->type); 7173 7174 /* Note that each btf__add_xxx() operation invalidates 7175 * all btf_type and string pointers, so we need to be 7176 * very careful when cloning BTF types. BTF type 7177 * pointers have to be always refetched. And to avoid 7178 * problems with invalidated string pointers, we 7179 * add empty strings initially, then just fix up 7180 * name_off offsets in place. Offsets are stable for 7181 * existing strings, so that works out. 7182 */ 7183 fn_name_off = fn_t->name_off; /* we are about to invalidate fn_t */ 7184 linkage = btf_func_linkage(fn_t); 7185 orig_proto_id = fn_t->type; /* original FUNC_PROTO ID */ 7186 ret_type_id = fn_proto_t->type; /* fn_proto_t will be invalidated */ 7187 arg_cnt = btf_vlen(fn_proto_t); 7188 7189 /* clone FUNC_PROTO and its params */ 7190 fn_proto_id = btf__add_func_proto(btf, ret_type_id); 7191 if (fn_proto_id < 0) 7192 return -EINVAL; 7193 7194 for (i = 0; i < arg_cnt; i++) { 7195 int name_off; 7196 7197 /* copy original parameter data */ 7198 t = btf_type_by_id(btf, orig_proto_id); 7199 p = &btf_params(t)[i]; 7200 name_off = p->name_off; 7201 7202 err = btf__add_func_param(btf, "", p->type); 7203 if (err) 7204 return err; 7205 7206 fn_proto_t = btf_type_by_id(btf, fn_proto_id); 7207 p = &btf_params(fn_proto_t)[i]; 7208 p->name_off = name_off; /* use remembered str offset */ 7209 } 7210 7211 /* clone FUNC now, btf__add_func() enforces non-empty name, so use 7212 * entry program's name as a placeholder, which we replace immediately 7213 * with original name_off 7214 */ 7215 fn_id = btf__add_func(btf, prog->name, linkage, fn_proto_id); 7216 if (fn_id < 0) 7217 return -EINVAL; 7218 7219 fn_t = btf_type_by_id(btf, fn_id); 7220 fn_t->name_off = fn_name_off; /* reuse original string */ 7221 7222 return fn_id; 7223 } 7224 7225 /* Check if main program or global subprog's function prototype has `arg:ctx` 7226 * argument tags, and, if necessary, substitute correct type to match what BPF 7227 * verifier would expect, taking into account specific program type. This 7228 * allows to support __arg_ctx tag transparently on old kernels that don't yet 7229 * have a native support for it in the verifier, making user's life much 7230 * easier. 7231 */ 7232 static int bpf_program_fixup_func_info(struct bpf_object *obj, struct bpf_program *prog) 7233 { 7234 const char *ctx_name = NULL, *ctx_tag = "arg:ctx", *fn_name; 7235 struct bpf_func_info_min *func_rec; 7236 struct btf_type *fn_t, *fn_proto_t; 7237 struct btf *btf = obj->btf; 7238 const struct btf_type *t; 7239 struct btf_param *p; 7240 int ptr_id = 0, struct_id, tag_id, orig_fn_id; 7241 int i, n, arg_idx, arg_cnt, err, rec_idx; 7242 int *orig_ids; 7243 7244 /* no .BTF.ext, no problem */ 7245 if (!obj->btf_ext || !prog->func_info) 7246 return 0; 7247 7248 /* don't do any fix ups if kernel natively supports __arg_ctx */ 7249 if (kernel_supports(obj, FEAT_ARG_CTX_TAG)) 7250 return 0; 7251 7252 /* some BPF program types just don't have named context structs, so 7253 * this fallback mechanism doesn't work for them 7254 */ 7255 for (i = 0; i < ARRAY_SIZE(global_ctx_map); i++) { 7256 if (global_ctx_map[i].prog_type != prog->type) 7257 continue; 7258 ctx_name = global_ctx_map[i].ctx_name; 7259 break; 7260 } 7261 if (!ctx_name) 7262 return 0; 7263 7264 /* remember original func BTF IDs to detect if we already cloned them */ 7265 orig_ids = calloc(prog->func_info_cnt, sizeof(*orig_ids)); 7266 if (!orig_ids) 7267 return -ENOMEM; 7268 for (i = 0; i < prog->func_info_cnt; i++) { 7269 func_rec = prog->func_info + prog->func_info_rec_size * i; 7270 orig_ids[i] = func_rec->type_id; 7271 } 7272 7273 /* go through each DECL_TAG with "arg:ctx" and see if it points to one 7274 * of our subprogs; if yes and subprog is global and needs adjustment, 7275 * clone and adjust FUNC -> FUNC_PROTO combo 7276 */ 7277 for (i = 1, n = btf__type_cnt(btf); i < n; i++) { 7278 /* only DECL_TAG with "arg:ctx" value are interesting */ 7279 t = btf__type_by_id(btf, i); 7280 if (!btf_is_decl_tag(t)) 7281 continue; 7282 if (strcmp(btf__str_by_offset(btf, t->name_off), ctx_tag) != 0) 7283 continue; 7284 7285 /* only global funcs need adjustment, if at all */ 7286 orig_fn_id = t->type; 7287 fn_t = btf_type_by_id(btf, orig_fn_id); 7288 if (!btf_is_func(fn_t) || btf_func_linkage(fn_t) != BTF_FUNC_GLOBAL) 7289 continue; 7290 7291 /* sanity check FUNC -> FUNC_PROTO chain, just in case */ 7292 fn_proto_t = btf_type_by_id(btf, fn_t->type); 7293 if (!fn_proto_t || !btf_is_func_proto(fn_proto_t)) 7294 continue; 7295 7296 /* find corresponding func_info record */ 7297 func_rec = NULL; 7298 for (rec_idx = 0; rec_idx < prog->func_info_cnt; rec_idx++) { 7299 if (orig_ids[rec_idx] == t->type) { 7300 func_rec = prog->func_info + prog->func_info_rec_size * rec_idx; 7301 break; 7302 } 7303 } 7304 /* current main program doesn't call into this subprog */ 7305 if (!func_rec) 7306 continue; 7307 7308 /* some more sanity checking of DECL_TAG */ 7309 arg_cnt = btf_vlen(fn_proto_t); 7310 arg_idx = btf_decl_tag(t)->component_idx; 7311 if (arg_idx < 0 || arg_idx >= arg_cnt) 7312 continue; 7313 7314 /* check if we should fix up argument type */ 7315 p = &btf_params(fn_proto_t)[arg_idx]; 7316 fn_name = btf__str_by_offset(btf, fn_t->name_off) ?: "<anon>"; 7317 if (!need_func_arg_type_fixup(btf, prog, fn_name, arg_idx, p->type, ctx_name)) 7318 continue; 7319 7320 /* clone fn/fn_proto, unless we already did it for another arg */ 7321 if (func_rec->type_id == orig_fn_id) { 7322 int fn_id; 7323 7324 fn_id = clone_func_btf_info(btf, orig_fn_id, prog); 7325 if (fn_id < 0) { 7326 err = fn_id; 7327 goto err_out; 7328 } 7329 7330 /* point func_info record to a cloned FUNC type */ 7331 func_rec->type_id = fn_id; 7332 } 7333 7334 /* create PTR -> STRUCT type chain to mark PTR_TO_CTX argument; 7335 * we do it just once per main BPF program, as all global 7336 * funcs share the same program type, so need only PTR -> 7337 * STRUCT type chain 7338 */ 7339 if (ptr_id == 0) { 7340 struct_id = btf__add_struct(btf, ctx_name, 0); 7341 ptr_id = btf__add_ptr(btf, struct_id); 7342 if (ptr_id < 0 || struct_id < 0) { 7343 err = -EINVAL; 7344 goto err_out; 7345 } 7346 } 7347 7348 /* for completeness, clone DECL_TAG and point it to cloned param */ 7349 tag_id = btf__add_decl_tag(btf, ctx_tag, func_rec->type_id, arg_idx); 7350 if (tag_id < 0) { 7351 err = -EINVAL; 7352 goto err_out; 7353 } 7354 7355 /* all the BTF manipulations invalidated pointers, refetch them */ 7356 fn_t = btf_type_by_id(btf, func_rec->type_id); 7357 fn_proto_t = btf_type_by_id(btf, fn_t->type); 7358 7359 /* fix up type ID pointed to by param */ 7360 p = &btf_params(fn_proto_t)[arg_idx]; 7361 p->type = ptr_id; 7362 } 7363 7364 free(orig_ids); 7365 return 0; 7366 err_out: 7367 free(orig_ids); 7368 return err; 7369 } 7370 7371 static int bpf_object__relocate(struct bpf_object *obj, const char *targ_btf_path) 7372 { 7373 struct bpf_program *prog; 7374 size_t i, j; 7375 int err; 7376 7377 if (obj->btf_ext) { 7378 err = bpf_object__relocate_core(obj, targ_btf_path); 7379 if (err) { 7380 pr_warn("failed to perform CO-RE relocations: %s\n", 7381 errstr(err)); 7382 return err; 7383 } 7384 bpf_object__sort_relos(obj); 7385 } 7386 7387 /* Before relocating calls pre-process relocations and mark 7388 * few ld_imm64 instructions that points to subprogs. 7389 * Otherwise bpf_object__reloc_code() later would have to consider 7390 * all ld_imm64 insns as relocation candidates. That would 7391 * reduce relocation speed, since amount of find_prog_insn_relo() 7392 * would increase and most of them will fail to find a relo. 7393 */ 7394 for (i = 0; i < obj->nr_programs; i++) { 7395 prog = &obj->programs[i]; 7396 for (j = 0; j < prog->nr_reloc; j++) { 7397 struct reloc_desc *relo = &prog->reloc_desc[j]; 7398 struct bpf_insn *insn = &prog->insns[relo->insn_idx]; 7399 7400 /* mark the insn, so it's recognized by insn_is_pseudo_func() */ 7401 if (relo->type == RELO_SUBPROG_ADDR) 7402 insn[0].src_reg = BPF_PSEUDO_FUNC; 7403 } 7404 } 7405 7406 /* relocate subprogram calls and append used subprograms to main 7407 * programs; each copy of subprogram code needs to be relocated 7408 * differently for each main program, because its code location might 7409 * have changed. 7410 * Append subprog relos to main programs to allow data relos to be 7411 * processed after text is completely relocated. 7412 */ 7413 for (i = 0; i < obj->nr_programs; i++) { 7414 prog = &obj->programs[i]; 7415 /* sub-program's sub-calls are relocated within the context of 7416 * its main program only 7417 */ 7418 if (prog_is_subprog(obj, prog)) 7419 continue; 7420 if (!prog->autoload) 7421 continue; 7422 7423 err = bpf_object__relocate_calls(obj, prog); 7424 if (err) { 7425 pr_warn("prog '%s': failed to relocate calls: %s\n", 7426 prog->name, errstr(err)); 7427 return err; 7428 } 7429 7430 err = bpf_prog_assign_exc_cb(obj, prog); 7431 if (err) 7432 return err; 7433 /* Now, also append exception callback if it has not been done already. */ 7434 if (prog->exception_cb_idx >= 0) { 7435 struct bpf_program *subprog = &obj->programs[prog->exception_cb_idx]; 7436 7437 /* Calling exception callback directly is disallowed, which the 7438 * verifier will reject later. In case it was processed already, 7439 * we can skip this step, otherwise for all other valid cases we 7440 * have to append exception callback now. 7441 */ 7442 if (subprog->sub_insn_off == 0) { 7443 err = bpf_object__append_subprog_code(obj, prog, subprog); 7444 if (err) 7445 return err; 7446 err = bpf_object__reloc_code(obj, prog, subprog); 7447 if (err) 7448 return err; 7449 } 7450 } 7451 } 7452 for (i = 0; i < obj->nr_programs; i++) { 7453 prog = &obj->programs[i]; 7454 if (prog_is_subprog(obj, prog)) 7455 continue; 7456 if (!prog->autoload) 7457 continue; 7458 7459 /* Process data relos for main programs */ 7460 err = bpf_object__relocate_data(obj, prog); 7461 if (err) { 7462 pr_warn("prog '%s': failed to relocate data references: %s\n", 7463 prog->name, errstr(err)); 7464 return err; 7465 } 7466 7467 /* Fix up .BTF.ext information, if necessary */ 7468 err = bpf_program_fixup_func_info(obj, prog); 7469 if (err) { 7470 pr_warn("prog '%s': failed to perform .BTF.ext fix ups: %s\n", 7471 prog->name, errstr(err)); 7472 return err; 7473 } 7474 } 7475 7476 return 0; 7477 } 7478 7479 static int bpf_object__collect_st_ops_relos(struct bpf_object *obj, 7480 Elf64_Shdr *shdr, Elf_Data *data); 7481 7482 static int bpf_object__collect_map_relos(struct bpf_object *obj, 7483 Elf64_Shdr *shdr, Elf_Data *data) 7484 { 7485 const int bpf_ptr_sz = 8, host_ptr_sz = sizeof(void *); 7486 int i, j, nrels, new_sz; 7487 const struct btf_var_secinfo *vi = NULL; 7488 const struct btf_type *sec, *var, *def; 7489 struct bpf_map *map = NULL, *targ_map = NULL; 7490 struct bpf_program *targ_prog = NULL; 7491 bool is_prog_array, is_map_in_map; 7492 const struct btf_member *member; 7493 const char *name, *mname, *type; 7494 unsigned int moff; 7495 Elf64_Sym *sym; 7496 Elf64_Rel *rel; 7497 void *tmp; 7498 7499 if (!obj->efile.btf_maps_sec_btf_id || !obj->btf) 7500 return -EINVAL; 7501 sec = btf__type_by_id(obj->btf, obj->efile.btf_maps_sec_btf_id); 7502 if (!sec) 7503 return -EINVAL; 7504 7505 nrels = shdr->sh_size / shdr->sh_entsize; 7506 for (i = 0; i < nrels; i++) { 7507 rel = elf_rel_by_idx(data, i); 7508 if (!rel) { 7509 pr_warn(".maps relo #%d: failed to get ELF relo\n", i); 7510 return -LIBBPF_ERRNO__FORMAT; 7511 } 7512 7513 sym = elf_sym_by_idx(obj, ELF64_R_SYM(rel->r_info)); 7514 if (!sym) { 7515 pr_warn(".maps relo #%d: symbol %zx not found\n", 7516 i, (size_t)ELF64_R_SYM(rel->r_info)); 7517 return -LIBBPF_ERRNO__FORMAT; 7518 } 7519 name = elf_sym_str(obj, sym->st_name) ?: "<?>"; 7520 7521 pr_debug(".maps relo #%d: for %zd value %zd rel->r_offset %zu name %d ('%s')\n", 7522 i, (ssize_t)(rel->r_info >> 32), (size_t)sym->st_value, 7523 (size_t)rel->r_offset, sym->st_name, name); 7524 7525 for (j = 0; j < obj->nr_maps; j++) { 7526 map = &obj->maps[j]; 7527 if (map->sec_idx != obj->efile.btf_maps_shndx) 7528 continue; 7529 7530 vi = btf_var_secinfos(sec) + map->btf_var_idx; 7531 if (vi->offset <= rel->r_offset && 7532 rel->r_offset + bpf_ptr_sz <= vi->offset + vi->size) 7533 break; 7534 } 7535 if (j == obj->nr_maps) { 7536 pr_warn(".maps relo #%d: cannot find map '%s' at rel->r_offset %zu\n", 7537 i, name, (size_t)rel->r_offset); 7538 return -EINVAL; 7539 } 7540 7541 is_map_in_map = bpf_map_type__is_map_in_map(map->def.type); 7542 is_prog_array = map->def.type == BPF_MAP_TYPE_PROG_ARRAY; 7543 type = is_map_in_map ? "map" : "prog"; 7544 if (is_map_in_map) { 7545 if (sym->st_shndx != obj->efile.btf_maps_shndx) { 7546 pr_warn(".maps relo #%d: '%s' isn't a BTF-defined map\n", 7547 i, name); 7548 return -LIBBPF_ERRNO__RELOC; 7549 } 7550 if (map->def.type == BPF_MAP_TYPE_HASH_OF_MAPS && 7551 map->def.key_size != sizeof(int)) { 7552 pr_warn(".maps relo #%d: hash-of-maps '%s' should have key size %zu.\n", 7553 i, map->name, sizeof(int)); 7554 return -EINVAL; 7555 } 7556 targ_map = bpf_object__find_map_by_name(obj, name); 7557 if (!targ_map) { 7558 pr_warn(".maps relo #%d: '%s' isn't a valid map reference\n", 7559 i, name); 7560 return -ESRCH; 7561 } 7562 } else if (is_prog_array) { 7563 targ_prog = bpf_object__find_program_by_name(obj, name); 7564 if (!targ_prog) { 7565 pr_warn(".maps relo #%d: '%s' isn't a valid program reference\n", 7566 i, name); 7567 return -ESRCH; 7568 } 7569 if (targ_prog->sec_idx != sym->st_shndx || 7570 targ_prog->sec_insn_off * 8 != sym->st_value || 7571 prog_is_subprog(obj, targ_prog)) { 7572 pr_warn(".maps relo #%d: '%s' isn't an entry-point program\n", 7573 i, name); 7574 return -LIBBPF_ERRNO__RELOC; 7575 } 7576 } else { 7577 return -EINVAL; 7578 } 7579 7580 var = btf__type_by_id(obj->btf, vi->type); 7581 def = skip_mods_and_typedefs(obj->btf, var->type, NULL); 7582 if (btf_vlen(def) == 0) 7583 return -EINVAL; 7584 member = btf_members(def) + btf_vlen(def) - 1; 7585 mname = btf__name_by_offset(obj->btf, member->name_off); 7586 if (strcmp(mname, "values")) 7587 return -EINVAL; 7588 7589 moff = btf_member_bit_offset(def, btf_vlen(def) - 1) / 8; 7590 if (rel->r_offset - vi->offset < moff) 7591 return -EINVAL; 7592 7593 moff = rel->r_offset - vi->offset - moff; 7594 /* here we use BPF pointer size, which is always 64 bit, as we 7595 * are parsing ELF that was built for BPF target 7596 */ 7597 if (moff % bpf_ptr_sz) 7598 return -EINVAL; 7599 moff /= bpf_ptr_sz; 7600 if (moff >= map->init_slots_sz) { 7601 new_sz = moff + 1; 7602 tmp = libbpf_reallocarray(map->init_slots, new_sz, host_ptr_sz); 7603 if (!tmp) 7604 return -ENOMEM; 7605 map->init_slots = tmp; 7606 memset(map->init_slots + map->init_slots_sz, 0, 7607 (new_sz - map->init_slots_sz) * host_ptr_sz); 7608 map->init_slots_sz = new_sz; 7609 } 7610 map->init_slots[moff] = is_map_in_map ? (void *)targ_map : (void *)targ_prog; 7611 7612 pr_debug(".maps relo #%d: map '%s' slot [%d] points to %s '%s'\n", 7613 i, map->name, moff, type, name); 7614 } 7615 7616 return 0; 7617 } 7618 7619 static int bpf_object__collect_relos(struct bpf_object *obj) 7620 { 7621 int i, err; 7622 7623 for (i = 0; i < obj->efile.sec_cnt; i++) { 7624 struct elf_sec_desc *sec_desc = &obj->efile.secs[i]; 7625 Elf64_Shdr *shdr; 7626 Elf_Data *data; 7627 int idx; 7628 7629 if (sec_desc->sec_type != SEC_RELO) 7630 continue; 7631 7632 shdr = sec_desc->shdr; 7633 data = sec_desc->data; 7634 idx = shdr->sh_info; 7635 7636 if (shdr->sh_type != SHT_REL || idx < 0 || idx >= obj->efile.sec_cnt) { 7637 pr_warn("internal error at %d\n", __LINE__); 7638 return -LIBBPF_ERRNO__INTERNAL; 7639 } 7640 7641 if (obj->efile.secs[idx].sec_type == SEC_ST_OPS) 7642 err = bpf_object__collect_st_ops_relos(obj, shdr, data); 7643 else if (idx == obj->efile.btf_maps_shndx) 7644 err = bpf_object__collect_map_relos(obj, shdr, data); 7645 else 7646 err = bpf_object__collect_prog_relos(obj, shdr, data); 7647 if (err) 7648 return err; 7649 } 7650 7651 bpf_object__sort_relos(obj); 7652 return 0; 7653 } 7654 7655 static bool insn_is_helper_call(struct bpf_insn *insn, enum bpf_func_id *func_id) 7656 { 7657 if (BPF_CLASS(insn->code) == BPF_JMP && 7658 BPF_OP(insn->code) == BPF_CALL && 7659 BPF_SRC(insn->code) == BPF_K && 7660 insn->src_reg == 0 && 7661 insn->dst_reg == 0) { 7662 *func_id = insn->imm; 7663 return true; 7664 } 7665 return false; 7666 } 7667 7668 static int bpf_object__sanitize_prog(struct bpf_object *obj, struct bpf_program *prog) 7669 { 7670 struct bpf_insn *insn = prog->insns; 7671 enum bpf_func_id func_id; 7672 int i; 7673 7674 if (obj->gen_loader) 7675 return 0; 7676 7677 for (i = 0; i < prog->insns_cnt; i++, insn++) { 7678 if (!insn_is_helper_call(insn, &func_id)) 7679 continue; 7680 7681 /* on kernels that don't yet support 7682 * bpf_probe_read_{kernel,user}[_str] helpers, fall back 7683 * to bpf_probe_read() which works well for old kernels 7684 */ 7685 switch (func_id) { 7686 case BPF_FUNC_probe_read_kernel: 7687 case BPF_FUNC_probe_read_user: 7688 if (!kernel_supports(obj, FEAT_PROBE_READ_KERN)) 7689 insn->imm = BPF_FUNC_probe_read; 7690 break; 7691 case BPF_FUNC_probe_read_kernel_str: 7692 case BPF_FUNC_probe_read_user_str: 7693 if (!kernel_supports(obj, FEAT_PROBE_READ_KERN)) 7694 insn->imm = BPF_FUNC_probe_read_str; 7695 break; 7696 default: 7697 break; 7698 } 7699 } 7700 return 0; 7701 } 7702 7703 static int libbpf_find_attach_btf_id(struct bpf_program *prog, const char *attach_name, 7704 int *btf_obj_fd, int *btf_type_id); 7705 7706 /* this is called as prog->sec_def->prog_prepare_load_fn for libbpf-supported sec_defs */ 7707 static int libbpf_prepare_prog_load(struct bpf_program *prog, 7708 struct bpf_prog_load_opts *opts, long cookie) 7709 { 7710 enum sec_def_flags def = cookie; 7711 7712 /* old kernels might not support specifying expected_attach_type */ 7713 if ((def & SEC_EXP_ATTACH_OPT) && !kernel_supports(prog->obj, FEAT_EXP_ATTACH_TYPE)) 7714 opts->expected_attach_type = 0; 7715 7716 if (def & SEC_SLEEPABLE) 7717 opts->prog_flags |= BPF_F_SLEEPABLE; 7718 7719 if (prog->type == BPF_PROG_TYPE_XDP && (def & SEC_XDP_FRAGS)) 7720 opts->prog_flags |= BPF_F_XDP_HAS_FRAGS; 7721 7722 /* special check for usdt to use uprobe_multi link */ 7723 if ((def & SEC_USDT) && kernel_supports(prog->obj, FEAT_UPROBE_MULTI_LINK)) { 7724 /* for BPF_TRACE_UPROBE_MULTI, user might want to query expected_attach_type 7725 * in prog, and expected_attach_type we set in kernel is from opts, so we 7726 * update both. 7727 */ 7728 prog->expected_attach_type = BPF_TRACE_UPROBE_MULTI; 7729 opts->expected_attach_type = BPF_TRACE_UPROBE_MULTI; 7730 } 7731 7732 if ((def & SEC_ATTACH_BTF) && !prog->attach_btf_id) { 7733 int btf_obj_fd = 0, btf_type_id = 0, err; 7734 const char *attach_name; 7735 7736 attach_name = strchr(prog->sec_name, '/'); 7737 if (!attach_name) { 7738 /* if BPF program is annotated with just SEC("fentry") 7739 * (or similar) without declaratively specifying 7740 * target, then it is expected that target will be 7741 * specified with bpf_program__set_attach_target() at 7742 * runtime before BPF object load step. If not, then 7743 * there is nothing to load into the kernel as BPF 7744 * verifier won't be able to validate BPF program 7745 * correctness anyways. 7746 */ 7747 pr_warn("prog '%s': no BTF-based attach target is specified, use bpf_program__set_attach_target()\n", 7748 prog->name); 7749 return -EINVAL; 7750 } 7751 attach_name++; /* skip over / */ 7752 7753 err = libbpf_find_attach_btf_id(prog, attach_name, &btf_obj_fd, &btf_type_id); 7754 if (err) 7755 return err; 7756 7757 /* cache resolved BTF FD and BTF type ID in the prog */ 7758 prog->attach_btf_obj_fd = btf_obj_fd; 7759 prog->attach_btf_id = btf_type_id; 7760 7761 /* but by now libbpf common logic is not utilizing 7762 * prog->atach_btf_obj_fd/prog->attach_btf_id anymore because 7763 * this callback is called after opts were populated by 7764 * libbpf, so this callback has to update opts explicitly here 7765 */ 7766 opts->attach_btf_obj_fd = btf_obj_fd; 7767 opts->attach_btf_id = btf_type_id; 7768 } 7769 return 0; 7770 } 7771 7772 static void fixup_verifier_log(struct bpf_program *prog, char *buf, size_t buf_sz); 7773 7774 static int bpf_object_load_prog(struct bpf_object *obj, struct bpf_program *prog, 7775 struct bpf_insn *insns, int insns_cnt, 7776 const char *license, __u32 kern_version, int *prog_fd) 7777 { 7778 LIBBPF_OPTS(bpf_prog_load_opts, load_attr); 7779 const char *prog_name = NULL; 7780 size_t log_buf_size = 0; 7781 char *log_buf = NULL, *tmp; 7782 bool own_log_buf = true; 7783 __u32 log_level = prog->log_level; 7784 int ret, err; 7785 7786 /* Be more helpful by rejecting programs that can't be validated early 7787 * with more meaningful and actionable error message. 7788 */ 7789 switch (prog->type) { 7790 case BPF_PROG_TYPE_UNSPEC: 7791 /* 7792 * The program type must be set. Most likely we couldn't find a proper 7793 * section definition at load time, and thus we didn't infer the type. 7794 */ 7795 pr_warn("prog '%s': missing BPF prog type, check ELF section name '%s'\n", 7796 prog->name, prog->sec_name); 7797 return -EINVAL; 7798 case BPF_PROG_TYPE_STRUCT_OPS: 7799 if (prog->attach_btf_id == 0) { 7800 pr_warn("prog '%s': SEC(\"struct_ops\") program isn't referenced anywhere, did you forget to use it?\n", 7801 prog->name); 7802 return -EINVAL; 7803 } 7804 break; 7805 default: 7806 break; 7807 } 7808 7809 if (!insns || !insns_cnt) 7810 return -EINVAL; 7811 7812 if (kernel_supports(obj, FEAT_PROG_NAME)) 7813 prog_name = prog->name; 7814 load_attr.attach_prog_fd = prog->attach_prog_fd; 7815 load_attr.attach_btf_obj_fd = prog->attach_btf_obj_fd; 7816 load_attr.attach_btf_id = prog->attach_btf_id; 7817 load_attr.kern_version = kern_version; 7818 load_attr.prog_ifindex = prog->prog_ifindex; 7819 load_attr.expected_attach_type = prog->expected_attach_type; 7820 7821 /* specify func_info/line_info only if kernel supports them */ 7822 if (obj->btf && btf__fd(obj->btf) >= 0 && kernel_supports(obj, FEAT_BTF_FUNC)) { 7823 load_attr.prog_btf_fd = btf__fd(obj->btf); 7824 load_attr.func_info = prog->func_info; 7825 load_attr.func_info_rec_size = prog->func_info_rec_size; 7826 load_attr.func_info_cnt = prog->func_info_cnt; 7827 load_attr.line_info = prog->line_info; 7828 load_attr.line_info_rec_size = prog->line_info_rec_size; 7829 load_attr.line_info_cnt = prog->line_info_cnt; 7830 } 7831 load_attr.log_level = log_level; 7832 load_attr.prog_flags = prog->prog_flags; 7833 load_attr.fd_array = obj->fd_array; 7834 7835 load_attr.token_fd = obj->token_fd; 7836 if (obj->token_fd) 7837 load_attr.prog_flags |= BPF_F_TOKEN_FD; 7838 7839 /* adjust load_attr if sec_def provides custom preload callback */ 7840 if (prog->sec_def && prog->sec_def->prog_prepare_load_fn) { 7841 err = prog->sec_def->prog_prepare_load_fn(prog, &load_attr, prog->sec_def->cookie); 7842 if (err < 0) { 7843 pr_warn("prog '%s': failed to prepare load attributes: %s\n", 7844 prog->name, errstr(err)); 7845 return err; 7846 } 7847 insns = prog->insns; 7848 insns_cnt = prog->insns_cnt; 7849 } 7850 7851 if (obj->gen_loader) { 7852 bpf_gen__prog_load(obj->gen_loader, prog->type, prog->name, 7853 license, insns, insns_cnt, &load_attr, 7854 prog - obj->programs); 7855 *prog_fd = -1; 7856 return 0; 7857 } 7858 7859 retry_load: 7860 /* if log_level is zero, we don't request logs initially even if 7861 * custom log_buf is specified; if the program load fails, then we'll 7862 * bump log_level to 1 and use either custom log_buf or we'll allocate 7863 * our own and retry the load to get details on what failed 7864 */ 7865 if (log_level) { 7866 if (prog->log_buf) { 7867 log_buf = prog->log_buf; 7868 log_buf_size = prog->log_size; 7869 own_log_buf = false; 7870 } else if (obj->log_buf) { 7871 log_buf = obj->log_buf; 7872 log_buf_size = obj->log_size; 7873 own_log_buf = false; 7874 } else { 7875 log_buf_size = max((size_t)BPF_LOG_BUF_SIZE, log_buf_size * 2); 7876 tmp = realloc(log_buf, log_buf_size); 7877 if (!tmp) { 7878 ret = -ENOMEM; 7879 goto out; 7880 } 7881 log_buf = tmp; 7882 log_buf[0] = '\0'; 7883 own_log_buf = true; 7884 } 7885 } 7886 7887 load_attr.log_buf = log_buf; 7888 load_attr.log_size = log_buf_size; 7889 load_attr.log_level = log_level; 7890 7891 ret = bpf_prog_load(prog->type, prog_name, license, insns, insns_cnt, &load_attr); 7892 if (ret >= 0) { 7893 if (log_level && own_log_buf) { 7894 pr_debug("prog '%s': -- BEGIN PROG LOAD LOG --\n%s-- END PROG LOAD LOG --\n", 7895 prog->name, log_buf); 7896 } 7897 7898 if (obj->has_rodata && kernel_supports(obj, FEAT_PROG_BIND_MAP)) { 7899 struct bpf_map *map; 7900 int i; 7901 7902 for (i = 0; i < obj->nr_maps; i++) { 7903 map = &prog->obj->maps[i]; 7904 if (map->libbpf_type != LIBBPF_MAP_RODATA) 7905 continue; 7906 7907 if (bpf_prog_bind_map(ret, map->fd, NULL)) { 7908 pr_warn("prog '%s': failed to bind map '%s': %s\n", 7909 prog->name, map->real_name, errstr(errno)); 7910 /* Don't fail hard if can't bind rodata. */ 7911 } 7912 } 7913 } 7914 7915 *prog_fd = ret; 7916 ret = 0; 7917 goto out; 7918 } 7919 7920 if (log_level == 0) { 7921 log_level = 1; 7922 goto retry_load; 7923 } 7924 /* On ENOSPC, increase log buffer size and retry, unless custom 7925 * log_buf is specified. 7926 * Be careful to not overflow u32, though. Kernel's log buf size limit 7927 * isn't part of UAPI so it can always be bumped to full 4GB. So don't 7928 * multiply by 2 unless we are sure we'll fit within 32 bits. 7929 * Currently, we'll get -EINVAL when we reach (UINT_MAX >> 2). 7930 */ 7931 if (own_log_buf && errno == ENOSPC && log_buf_size <= UINT_MAX / 2) 7932 goto retry_load; 7933 7934 ret = -errno; 7935 7936 /* post-process verifier log to improve error descriptions */ 7937 fixup_verifier_log(prog, log_buf, log_buf_size); 7938 7939 pr_warn("prog '%s': BPF program load failed: %s\n", prog->name, errstr(errno)); 7940 pr_perm_msg(ret); 7941 7942 if (own_log_buf && log_buf && log_buf[0] != '\0') { 7943 pr_warn("prog '%s': -- BEGIN PROG LOAD LOG --\n%s-- END PROG LOAD LOG --\n", 7944 prog->name, log_buf); 7945 } 7946 7947 out: 7948 if (own_log_buf) 7949 free(log_buf); 7950 return ret; 7951 } 7952 7953 static char *find_prev_line(char *buf, char *cur) 7954 { 7955 char *p; 7956 7957 if (cur == buf) /* end of a log buf */ 7958 return NULL; 7959 7960 p = cur - 1; 7961 while (p - 1 >= buf && *(p - 1) != '\n') 7962 p--; 7963 7964 return p; 7965 } 7966 7967 static void patch_log(char *buf, size_t buf_sz, size_t log_sz, 7968 char *orig, size_t orig_sz, const char *patch) 7969 { 7970 /* size of the remaining log content to the right from the to-be-replaced part */ 7971 size_t rem_sz = (buf + log_sz) - (orig + orig_sz); 7972 size_t patch_sz = strlen(patch); 7973 7974 if (patch_sz != orig_sz) { 7975 /* If patch line(s) are longer than original piece of verifier log, 7976 * shift log contents by (patch_sz - orig_sz) bytes to the right 7977 * starting from after to-be-replaced part of the log. 7978 * 7979 * If patch line(s) are shorter than original piece of verifier log, 7980 * shift log contents by (orig_sz - patch_sz) bytes to the left 7981 * starting from after to-be-replaced part of the log 7982 * 7983 * We need to be careful about not overflowing available 7984 * buf_sz capacity. If that's the case, we'll truncate the end 7985 * of the original log, as necessary. 7986 */ 7987 if (patch_sz > orig_sz) { 7988 if (orig + patch_sz >= buf + buf_sz) { 7989 /* patch is big enough to cover remaining space completely */ 7990 patch_sz -= (orig + patch_sz) - (buf + buf_sz) + 1; 7991 rem_sz = 0; 7992 } else if (patch_sz - orig_sz > buf_sz - log_sz) { 7993 /* patch causes part of remaining log to be truncated */ 7994 rem_sz -= (patch_sz - orig_sz) - (buf_sz - log_sz); 7995 } 7996 } 7997 /* shift remaining log to the right by calculated amount */ 7998 memmove(orig + patch_sz, orig + orig_sz, rem_sz); 7999 } 8000 8001 memcpy(orig, patch, patch_sz); 8002 } 8003 8004 static void fixup_log_failed_core_relo(struct bpf_program *prog, 8005 char *buf, size_t buf_sz, size_t log_sz, 8006 char *line1, char *line2, char *line3) 8007 { 8008 /* Expected log for failed and not properly guarded CO-RE relocation: 8009 * line1 -> 123: (85) call unknown#195896080 8010 * line2 -> invalid func unknown#195896080 8011 * line3 -> <anything else or end of buffer> 8012 * 8013 * "123" is the index of the instruction that was poisoned. We extract 8014 * instruction index to find corresponding CO-RE relocation and 8015 * replace this part of the log with more relevant information about 8016 * failed CO-RE relocation. 8017 */ 8018 const struct bpf_core_relo *relo; 8019 struct bpf_core_spec spec; 8020 char patch[512], spec_buf[256]; 8021 int insn_idx, err, spec_len; 8022 8023 if (sscanf(line1, "%d: (%*d) call unknown#195896080\n", &insn_idx) != 1) 8024 return; 8025 8026 relo = find_relo_core(prog, insn_idx); 8027 if (!relo) 8028 return; 8029 8030 err = bpf_core_parse_spec(prog->name, prog->obj->btf, relo, &spec); 8031 if (err) 8032 return; 8033 8034 spec_len = bpf_core_format_spec(spec_buf, sizeof(spec_buf), &spec); 8035 snprintf(patch, sizeof(patch), 8036 "%d: <invalid CO-RE relocation>\n" 8037 "failed to resolve CO-RE relocation %s%s\n", 8038 insn_idx, spec_buf, spec_len >= sizeof(spec_buf) ? "..." : ""); 8039 8040 patch_log(buf, buf_sz, log_sz, line1, line3 - line1, patch); 8041 } 8042 8043 static void fixup_log_missing_map_load(struct bpf_program *prog, 8044 char *buf, size_t buf_sz, size_t log_sz, 8045 char *line1, char *line2, char *line3) 8046 { 8047 /* Expected log for failed and not properly guarded map reference: 8048 * line1 -> 123: (85) call unknown#2001000345 8049 * line2 -> invalid func unknown#2001000345 8050 * line3 -> <anything else or end of buffer> 8051 * 8052 * "123" is the index of the instruction that was poisoned. 8053 * "345" in "2001000345" is a map index in obj->maps to fetch map name. 8054 */ 8055 struct bpf_object *obj = prog->obj; 8056 const struct bpf_map *map; 8057 int insn_idx, map_idx; 8058 char patch[128]; 8059 8060 if (sscanf(line1, "%d: (%*d) call unknown#%d\n", &insn_idx, &map_idx) != 2) 8061 return; 8062 8063 map_idx -= POISON_LDIMM64_MAP_BASE; 8064 if (map_idx < 0 || map_idx >= obj->nr_maps) 8065 return; 8066 map = &obj->maps[map_idx]; 8067 8068 snprintf(patch, sizeof(patch), 8069 "%d: <invalid BPF map reference>\n" 8070 "BPF map '%s' is referenced but wasn't created\n", 8071 insn_idx, map->name); 8072 8073 patch_log(buf, buf_sz, log_sz, line1, line3 - line1, patch); 8074 } 8075 8076 static void fixup_log_missing_kfunc_call(struct bpf_program *prog, 8077 char *buf, size_t buf_sz, size_t log_sz, 8078 char *line1, char *line2, char *line3) 8079 { 8080 /* Expected log for failed and not properly guarded kfunc call: 8081 * line1 -> 123: (85) call unknown#2002000345 8082 * line2 -> invalid func unknown#2002000345 8083 * line3 -> <anything else or end of buffer> 8084 * 8085 * "123" is the index of the instruction that was poisoned. 8086 * "345" in "2002000345" is an extern index in obj->externs to fetch kfunc name. 8087 */ 8088 struct bpf_object *obj = prog->obj; 8089 const struct extern_desc *ext; 8090 int insn_idx, ext_idx; 8091 char patch[128]; 8092 8093 if (sscanf(line1, "%d: (%*d) call unknown#%d\n", &insn_idx, &ext_idx) != 2) 8094 return; 8095 8096 ext_idx -= POISON_CALL_KFUNC_BASE; 8097 if (ext_idx < 0 || ext_idx >= obj->nr_extern) 8098 return; 8099 ext = &obj->externs[ext_idx]; 8100 8101 snprintf(patch, sizeof(patch), 8102 "%d: <invalid kfunc call>\n" 8103 "kfunc '%s' is referenced but wasn't resolved\n", 8104 insn_idx, ext->name); 8105 8106 patch_log(buf, buf_sz, log_sz, line1, line3 - line1, patch); 8107 } 8108 8109 static void fixup_verifier_log(struct bpf_program *prog, char *buf, size_t buf_sz) 8110 { 8111 /* look for familiar error patterns in last N lines of the log */ 8112 const size_t max_last_line_cnt = 10; 8113 char *prev_line, *cur_line, *next_line; 8114 size_t log_sz; 8115 int i; 8116 8117 if (!buf) 8118 return; 8119 8120 log_sz = strlen(buf) + 1; 8121 next_line = buf + log_sz - 1; 8122 8123 for (i = 0; i < max_last_line_cnt; i++, next_line = cur_line) { 8124 cur_line = find_prev_line(buf, next_line); 8125 if (!cur_line) 8126 return; 8127 8128 if (str_has_pfx(cur_line, "invalid func unknown#195896080\n")) { 8129 prev_line = find_prev_line(buf, cur_line); 8130 if (!prev_line) 8131 continue; 8132 8133 /* failed CO-RE relocation case */ 8134 fixup_log_failed_core_relo(prog, buf, buf_sz, log_sz, 8135 prev_line, cur_line, next_line); 8136 return; 8137 } else if (str_has_pfx(cur_line, "invalid func unknown#"POISON_LDIMM64_MAP_PFX)) { 8138 prev_line = find_prev_line(buf, cur_line); 8139 if (!prev_line) 8140 continue; 8141 8142 /* reference to uncreated BPF map */ 8143 fixup_log_missing_map_load(prog, buf, buf_sz, log_sz, 8144 prev_line, cur_line, next_line); 8145 return; 8146 } else if (str_has_pfx(cur_line, "invalid func unknown#"POISON_CALL_KFUNC_PFX)) { 8147 prev_line = find_prev_line(buf, cur_line); 8148 if (!prev_line) 8149 continue; 8150 8151 /* reference to unresolved kfunc */ 8152 fixup_log_missing_kfunc_call(prog, buf, buf_sz, log_sz, 8153 prev_line, cur_line, next_line); 8154 return; 8155 } 8156 } 8157 } 8158 8159 static int bpf_program_record_relos(struct bpf_program *prog) 8160 { 8161 struct bpf_object *obj = prog->obj; 8162 int i; 8163 8164 for (i = 0; i < prog->nr_reloc; i++) { 8165 struct reloc_desc *relo = &prog->reloc_desc[i]; 8166 struct extern_desc *ext = &obj->externs[relo->ext_idx]; 8167 int kind; 8168 8169 switch (relo->type) { 8170 case RELO_EXTERN_LD64: 8171 if (ext->type != EXT_KSYM) 8172 continue; 8173 kind = btf_is_var(btf__type_by_id(obj->btf, ext->btf_id)) ? 8174 BTF_KIND_VAR : BTF_KIND_FUNC; 8175 bpf_gen__record_extern(obj->gen_loader, ext->name, 8176 ext->is_weak, !ext->ksym.type_id, 8177 true, kind, relo->insn_idx); 8178 break; 8179 case RELO_EXTERN_CALL: 8180 bpf_gen__record_extern(obj->gen_loader, ext->name, 8181 ext->is_weak, false, false, BTF_KIND_FUNC, 8182 relo->insn_idx); 8183 break; 8184 case RELO_CORE: { 8185 struct bpf_core_relo cr = { 8186 .insn_off = relo->insn_idx * 8, 8187 .type_id = relo->core_relo->type_id, 8188 .access_str_off = relo->core_relo->access_str_off, 8189 .kind = relo->core_relo->kind, 8190 }; 8191 8192 bpf_gen__record_relo_core(obj->gen_loader, &cr); 8193 break; 8194 } 8195 default: 8196 continue; 8197 } 8198 } 8199 return 0; 8200 } 8201 8202 static int 8203 bpf_object__load_progs(struct bpf_object *obj, int log_level) 8204 { 8205 struct bpf_program *prog; 8206 size_t i; 8207 int err; 8208 8209 for (i = 0; i < obj->nr_programs; i++) { 8210 prog = &obj->programs[i]; 8211 if (prog_is_subprog(obj, prog)) 8212 continue; 8213 if (!prog->autoload) { 8214 pr_debug("prog '%s': skipped loading\n", prog->name); 8215 continue; 8216 } 8217 prog->log_level |= log_level; 8218 8219 if (obj->gen_loader) 8220 bpf_program_record_relos(prog); 8221 8222 err = bpf_object_load_prog(obj, prog, prog->insns, prog->insns_cnt, 8223 obj->license, obj->kern_version, &prog->fd); 8224 if (err) { 8225 pr_warn("prog '%s': failed to load: %s\n", prog->name, errstr(err)); 8226 return err; 8227 } 8228 } 8229 8230 bpf_object__free_relocs(obj); 8231 return 0; 8232 } 8233 8234 static int bpf_object_prepare_progs(struct bpf_object *obj) 8235 { 8236 struct bpf_program *prog; 8237 size_t i; 8238 int err; 8239 8240 for (i = 0; i < obj->nr_programs; i++) { 8241 prog = &obj->programs[i]; 8242 err = bpf_object__sanitize_prog(obj, prog); 8243 if (err) 8244 return err; 8245 } 8246 return 0; 8247 } 8248 8249 static const struct bpf_sec_def *find_sec_def(const char *sec_name); 8250 8251 static int bpf_object_init_progs(struct bpf_object *obj, const struct bpf_object_open_opts *opts) 8252 { 8253 struct bpf_program *prog; 8254 int err; 8255 8256 bpf_object__for_each_program(prog, obj) { 8257 prog->sec_def = find_sec_def(prog->sec_name); 8258 if (!prog->sec_def) { 8259 /* couldn't guess, but user might manually specify */ 8260 pr_debug("prog '%s': unrecognized ELF section name '%s'\n", 8261 prog->name, prog->sec_name); 8262 continue; 8263 } 8264 8265 prog->type = prog->sec_def->prog_type; 8266 prog->expected_attach_type = prog->sec_def->expected_attach_type; 8267 8268 /* sec_def can have custom callback which should be called 8269 * after bpf_program is initialized to adjust its properties 8270 */ 8271 if (prog->sec_def->prog_setup_fn) { 8272 err = prog->sec_def->prog_setup_fn(prog, prog->sec_def->cookie); 8273 if (err < 0) { 8274 pr_warn("prog '%s': failed to initialize: %s\n", 8275 prog->name, errstr(err)); 8276 return err; 8277 } 8278 } 8279 } 8280 8281 return 0; 8282 } 8283 8284 static struct bpf_object *bpf_object_open(const char *path, const void *obj_buf, size_t obj_buf_sz, 8285 const char *obj_name, 8286 const struct bpf_object_open_opts *opts) 8287 { 8288 const char *kconfig, *btf_tmp_path, *token_path; 8289 struct bpf_object *obj; 8290 int err; 8291 char *log_buf; 8292 size_t log_size; 8293 __u32 log_level; 8294 8295 if (obj_buf && !obj_name) 8296 return ERR_PTR(-EINVAL); 8297 8298 if (elf_version(EV_CURRENT) == EV_NONE) { 8299 pr_warn("failed to init libelf for %s\n", 8300 path ? : "(mem buf)"); 8301 return ERR_PTR(-LIBBPF_ERRNO__LIBELF); 8302 } 8303 8304 if (!OPTS_VALID(opts, bpf_object_open_opts)) 8305 return ERR_PTR(-EINVAL); 8306 8307 obj_name = OPTS_GET(opts, object_name, NULL) ?: obj_name; 8308 if (obj_buf) { 8309 path = obj_name; 8310 pr_debug("loading object '%s' from buffer\n", obj_name); 8311 } else { 8312 pr_debug("loading object from %s\n", path); 8313 } 8314 8315 log_buf = OPTS_GET(opts, kernel_log_buf, NULL); 8316 log_size = OPTS_GET(opts, kernel_log_size, 0); 8317 log_level = OPTS_GET(opts, kernel_log_level, 0); 8318 if (log_size > UINT_MAX) 8319 return ERR_PTR(-EINVAL); 8320 if (log_size && !log_buf) 8321 return ERR_PTR(-EINVAL); 8322 8323 token_path = OPTS_GET(opts, bpf_token_path, NULL); 8324 /* if user didn't specify bpf_token_path explicitly, check if 8325 * LIBBPF_BPF_TOKEN_PATH envvar was set and treat it as bpf_token_path 8326 * option 8327 */ 8328 if (!token_path) 8329 token_path = getenv("LIBBPF_BPF_TOKEN_PATH"); 8330 if (token_path && strlen(token_path) >= PATH_MAX) 8331 return ERR_PTR(-ENAMETOOLONG); 8332 8333 obj = bpf_object__new(path, obj_buf, obj_buf_sz, obj_name); 8334 if (IS_ERR(obj)) 8335 return obj; 8336 8337 obj->log_buf = log_buf; 8338 obj->log_size = log_size; 8339 obj->log_level = log_level; 8340 8341 if (token_path) { 8342 obj->token_path = strdup(token_path); 8343 if (!obj->token_path) { 8344 err = -ENOMEM; 8345 goto out; 8346 } 8347 } 8348 8349 btf_tmp_path = OPTS_GET(opts, btf_custom_path, NULL); 8350 if (btf_tmp_path) { 8351 if (strlen(btf_tmp_path) >= PATH_MAX) { 8352 err = -ENAMETOOLONG; 8353 goto out; 8354 } 8355 obj->btf_custom_path = strdup(btf_tmp_path); 8356 if (!obj->btf_custom_path) { 8357 err = -ENOMEM; 8358 goto out; 8359 } 8360 } 8361 8362 kconfig = OPTS_GET(opts, kconfig, NULL); 8363 if (kconfig) { 8364 obj->kconfig = strdup(kconfig); 8365 if (!obj->kconfig) { 8366 err = -ENOMEM; 8367 goto out; 8368 } 8369 } 8370 8371 err = bpf_object__elf_init(obj); 8372 err = err ? : bpf_object__elf_collect(obj); 8373 err = err ? : bpf_object__collect_externs(obj); 8374 err = err ? : bpf_object_fixup_btf(obj); 8375 err = err ? : bpf_object__init_maps(obj, opts); 8376 err = err ? : bpf_object_init_progs(obj, opts); 8377 err = err ? : bpf_object__collect_relos(obj); 8378 if (err) 8379 goto out; 8380 8381 bpf_object__elf_finish(obj); 8382 8383 return obj; 8384 out: 8385 bpf_object__close(obj); 8386 return ERR_PTR(err); 8387 } 8388 8389 struct bpf_object * 8390 bpf_object__open_file(const char *path, const struct bpf_object_open_opts *opts) 8391 { 8392 if (!path) 8393 return libbpf_err_ptr(-EINVAL); 8394 8395 return libbpf_ptr(bpf_object_open(path, NULL, 0, NULL, opts)); 8396 } 8397 8398 struct bpf_object *bpf_object__open(const char *path) 8399 { 8400 return bpf_object__open_file(path, NULL); 8401 } 8402 8403 struct bpf_object * 8404 bpf_object__open_mem(const void *obj_buf, size_t obj_buf_sz, 8405 const struct bpf_object_open_opts *opts) 8406 { 8407 char tmp_name[64]; 8408 8409 if (!obj_buf || obj_buf_sz == 0) 8410 return libbpf_err_ptr(-EINVAL); 8411 8412 /* create a (quite useless) default "name" for this memory buffer object */ 8413 snprintf(tmp_name, sizeof(tmp_name), "%lx-%zx", (unsigned long)obj_buf, obj_buf_sz); 8414 8415 return libbpf_ptr(bpf_object_open(NULL, obj_buf, obj_buf_sz, tmp_name, opts)); 8416 } 8417 8418 static int bpf_object_unload(struct bpf_object *obj) 8419 { 8420 size_t i; 8421 8422 if (!obj) 8423 return libbpf_err(-EINVAL); 8424 8425 for (i = 0; i < obj->nr_maps; i++) { 8426 zclose(obj->maps[i].fd); 8427 if (obj->maps[i].st_ops) 8428 zfree(&obj->maps[i].st_ops->kern_vdata); 8429 } 8430 8431 for (i = 0; i < obj->nr_programs; i++) 8432 bpf_program__unload(&obj->programs[i]); 8433 8434 return 0; 8435 } 8436 8437 static int bpf_object__sanitize_maps(struct bpf_object *obj) 8438 { 8439 struct bpf_map *m; 8440 8441 bpf_object__for_each_map(m, obj) { 8442 if (!bpf_map__is_internal(m)) 8443 continue; 8444 if (!kernel_supports(obj, FEAT_ARRAY_MMAP)) 8445 m->def.map_flags &= ~BPF_F_MMAPABLE; 8446 } 8447 8448 return 0; 8449 } 8450 8451 typedef int (*kallsyms_cb_t)(unsigned long long sym_addr, char sym_type, 8452 const char *sym_name, void *ctx); 8453 8454 static int libbpf_kallsyms_parse(kallsyms_cb_t cb, void *ctx) 8455 { 8456 char sym_type, sym_name[500]; 8457 unsigned long long sym_addr; 8458 int ret, err = 0; 8459 FILE *f; 8460 8461 f = fopen("/proc/kallsyms", "re"); 8462 if (!f) { 8463 err = -errno; 8464 pr_warn("failed to open /proc/kallsyms: %s\n", errstr(err)); 8465 return err; 8466 } 8467 8468 while (true) { 8469 ret = fscanf(f, "%llx %c %499s%*[^\n]\n", 8470 &sym_addr, &sym_type, sym_name); 8471 if (ret == EOF && feof(f)) 8472 break; 8473 if (ret != 3) { 8474 pr_warn("failed to read kallsyms entry: %d\n", ret); 8475 err = -EINVAL; 8476 break; 8477 } 8478 8479 err = cb(sym_addr, sym_type, sym_name, ctx); 8480 if (err) 8481 break; 8482 } 8483 8484 fclose(f); 8485 return err; 8486 } 8487 8488 static int kallsyms_cb(unsigned long long sym_addr, char sym_type, 8489 const char *sym_name, void *ctx) 8490 { 8491 struct bpf_object *obj = ctx; 8492 const struct btf_type *t; 8493 struct extern_desc *ext; 8494 const char *res; 8495 8496 res = strstr(sym_name, ".llvm."); 8497 if (sym_type == 'd' && res) 8498 ext = find_extern_by_name_with_len(obj, sym_name, res - sym_name); 8499 else 8500 ext = find_extern_by_name(obj, sym_name); 8501 if (!ext || ext->type != EXT_KSYM) 8502 return 0; 8503 8504 t = btf__type_by_id(obj->btf, ext->btf_id); 8505 if (!btf_is_var(t)) 8506 return 0; 8507 8508 if (ext->is_set && ext->ksym.addr != sym_addr) { 8509 pr_warn("extern (ksym) '%s': resolution is ambiguous: 0x%llx or 0x%llx\n", 8510 sym_name, ext->ksym.addr, sym_addr); 8511 return -EINVAL; 8512 } 8513 if (!ext->is_set) { 8514 ext->is_set = true; 8515 ext->ksym.addr = sym_addr; 8516 pr_debug("extern (ksym) '%s': set to 0x%llx\n", sym_name, sym_addr); 8517 } 8518 return 0; 8519 } 8520 8521 static int bpf_object__read_kallsyms_file(struct bpf_object *obj) 8522 { 8523 return libbpf_kallsyms_parse(kallsyms_cb, obj); 8524 } 8525 8526 static int find_ksym_btf_id(struct bpf_object *obj, const char *ksym_name, 8527 __u16 kind, struct btf **res_btf, 8528 struct module_btf **res_mod_btf) 8529 { 8530 struct module_btf *mod_btf; 8531 struct btf *btf; 8532 int i, id, err; 8533 8534 btf = obj->btf_vmlinux; 8535 mod_btf = NULL; 8536 id = btf__find_by_name_kind(btf, ksym_name, kind); 8537 8538 if (id == -ENOENT) { 8539 err = load_module_btfs(obj); 8540 if (err) 8541 return err; 8542 8543 for (i = 0; i < obj->btf_module_cnt; i++) { 8544 /* we assume module_btf's BTF FD is always >0 */ 8545 mod_btf = &obj->btf_modules[i]; 8546 btf = mod_btf->btf; 8547 id = btf__find_by_name_kind_own(btf, ksym_name, kind); 8548 if (id != -ENOENT) 8549 break; 8550 } 8551 } 8552 if (id <= 0) 8553 return -ESRCH; 8554 8555 *res_btf = btf; 8556 *res_mod_btf = mod_btf; 8557 return id; 8558 } 8559 8560 static int bpf_object__resolve_ksym_var_btf_id(struct bpf_object *obj, 8561 struct extern_desc *ext) 8562 { 8563 const struct btf_type *targ_var, *targ_type; 8564 __u32 targ_type_id, local_type_id; 8565 struct module_btf *mod_btf = NULL; 8566 const char *targ_var_name; 8567 struct btf *btf = NULL; 8568 int id, err; 8569 8570 id = find_ksym_btf_id(obj, ext->name, BTF_KIND_VAR, &btf, &mod_btf); 8571 if (id < 0) { 8572 if (id == -ESRCH && ext->is_weak) 8573 return 0; 8574 pr_warn("extern (var ksym) '%s': not found in kernel BTF\n", 8575 ext->name); 8576 return id; 8577 } 8578 8579 /* find local type_id */ 8580 local_type_id = ext->ksym.type_id; 8581 8582 /* find target type_id */ 8583 targ_var = btf__type_by_id(btf, id); 8584 targ_var_name = btf__name_by_offset(btf, targ_var->name_off); 8585 targ_type = skip_mods_and_typedefs(btf, targ_var->type, &targ_type_id); 8586 8587 err = bpf_core_types_are_compat(obj->btf, local_type_id, 8588 btf, targ_type_id); 8589 if (err <= 0) { 8590 const struct btf_type *local_type; 8591 const char *targ_name, *local_name; 8592 8593 local_type = btf__type_by_id(obj->btf, local_type_id); 8594 local_name = btf__name_by_offset(obj->btf, local_type->name_off); 8595 targ_name = btf__name_by_offset(btf, targ_type->name_off); 8596 8597 pr_warn("extern (var ksym) '%s': incompatible types, expected [%d] %s %s, but kernel has [%d] %s %s\n", 8598 ext->name, local_type_id, 8599 btf_kind_str(local_type), local_name, targ_type_id, 8600 btf_kind_str(targ_type), targ_name); 8601 return -EINVAL; 8602 } 8603 8604 ext->is_set = true; 8605 ext->ksym.kernel_btf_obj_fd = mod_btf ? mod_btf->fd : 0; 8606 ext->ksym.kernel_btf_id = id; 8607 pr_debug("extern (var ksym) '%s': resolved to [%d] %s %s\n", 8608 ext->name, id, btf_kind_str(targ_var), targ_var_name); 8609 8610 return 0; 8611 } 8612 8613 static int bpf_object__resolve_ksym_func_btf_id(struct bpf_object *obj, 8614 struct extern_desc *ext) 8615 { 8616 int local_func_proto_id, kfunc_proto_id, kfunc_id; 8617 struct module_btf *mod_btf = NULL; 8618 const struct btf_type *kern_func; 8619 struct btf *kern_btf = NULL; 8620 int ret; 8621 8622 local_func_proto_id = ext->ksym.type_id; 8623 8624 kfunc_id = find_ksym_btf_id(obj, ext->essent_name ?: ext->name, BTF_KIND_FUNC, &kern_btf, 8625 &mod_btf); 8626 if (kfunc_id < 0) { 8627 if (kfunc_id == -ESRCH && ext->is_weak) 8628 return 0; 8629 pr_warn("extern (func ksym) '%s': not found in kernel or module BTFs\n", 8630 ext->name); 8631 return kfunc_id; 8632 } 8633 8634 kern_func = btf__type_by_id(kern_btf, kfunc_id); 8635 kfunc_proto_id = kern_func->type; 8636 8637 ret = bpf_core_types_are_compat(obj->btf, local_func_proto_id, 8638 kern_btf, kfunc_proto_id); 8639 if (ret <= 0) { 8640 if (ext->is_weak) 8641 return 0; 8642 8643 pr_warn("extern (func ksym) '%s': func_proto [%d] incompatible with %s [%d]\n", 8644 ext->name, local_func_proto_id, 8645 mod_btf ? mod_btf->name : "vmlinux", kfunc_proto_id); 8646 return -EINVAL; 8647 } 8648 8649 /* set index for module BTF fd in fd_array, if unset */ 8650 if (mod_btf && !mod_btf->fd_array_idx) { 8651 /* insn->off is s16 */ 8652 if (obj->fd_array_cnt == INT16_MAX) { 8653 pr_warn("extern (func ksym) '%s': module BTF fd index %d too big to fit in bpf_insn offset\n", 8654 ext->name, mod_btf->fd_array_idx); 8655 return -E2BIG; 8656 } 8657 /* Cannot use index 0 for module BTF fd */ 8658 if (!obj->fd_array_cnt) 8659 obj->fd_array_cnt = 1; 8660 8661 ret = libbpf_ensure_mem((void **)&obj->fd_array, &obj->fd_array_cap, sizeof(int), 8662 obj->fd_array_cnt + 1); 8663 if (ret) 8664 return ret; 8665 mod_btf->fd_array_idx = obj->fd_array_cnt; 8666 /* we assume module BTF FD is always >0 */ 8667 obj->fd_array[obj->fd_array_cnt++] = mod_btf->fd; 8668 } 8669 8670 ext->is_set = true; 8671 ext->ksym.kernel_btf_id = kfunc_id; 8672 ext->ksym.btf_fd_idx = mod_btf ? mod_btf->fd_array_idx : 0; 8673 /* Also set kernel_btf_obj_fd to make sure that bpf_object__relocate_data() 8674 * populates FD into ld_imm64 insn when it's used to point to kfunc. 8675 * {kernel_btf_id, btf_fd_idx} -> fixup bpf_call. 8676 * {kernel_btf_id, kernel_btf_obj_fd} -> fixup ld_imm64. 8677 */ 8678 ext->ksym.kernel_btf_obj_fd = mod_btf ? mod_btf->fd : 0; 8679 pr_debug("extern (func ksym) '%s': resolved to %s [%d]\n", 8680 ext->name, mod_btf ? mod_btf->name : "vmlinux", kfunc_id); 8681 8682 return 0; 8683 } 8684 8685 static int bpf_object__resolve_ksyms_btf_id(struct bpf_object *obj) 8686 { 8687 const struct btf_type *t; 8688 struct extern_desc *ext; 8689 int i, err; 8690 8691 for (i = 0; i < obj->nr_extern; i++) { 8692 ext = &obj->externs[i]; 8693 if (ext->type != EXT_KSYM || !ext->ksym.type_id) 8694 continue; 8695 8696 if (obj->gen_loader) { 8697 ext->is_set = true; 8698 ext->ksym.kernel_btf_obj_fd = 0; 8699 ext->ksym.kernel_btf_id = 0; 8700 continue; 8701 } 8702 t = btf__type_by_id(obj->btf, ext->btf_id); 8703 if (btf_is_var(t)) 8704 err = bpf_object__resolve_ksym_var_btf_id(obj, ext); 8705 else 8706 err = bpf_object__resolve_ksym_func_btf_id(obj, ext); 8707 if (err) 8708 return err; 8709 } 8710 return 0; 8711 } 8712 8713 static int bpf_object__resolve_externs(struct bpf_object *obj, 8714 const char *extra_kconfig) 8715 { 8716 bool need_config = false, need_kallsyms = false; 8717 bool need_vmlinux_btf = false; 8718 struct extern_desc *ext; 8719 void *kcfg_data = NULL; 8720 int err, i; 8721 8722 if (obj->nr_extern == 0) 8723 return 0; 8724 8725 if (obj->kconfig_map_idx >= 0) 8726 kcfg_data = obj->maps[obj->kconfig_map_idx].mmaped; 8727 8728 for (i = 0; i < obj->nr_extern; i++) { 8729 ext = &obj->externs[i]; 8730 8731 if (ext->type == EXT_KSYM) { 8732 if (ext->ksym.type_id) 8733 need_vmlinux_btf = true; 8734 else 8735 need_kallsyms = true; 8736 continue; 8737 } else if (ext->type == EXT_KCFG) { 8738 void *ext_ptr = kcfg_data + ext->kcfg.data_off; 8739 __u64 value = 0; 8740 8741 /* Kconfig externs need actual /proc/config.gz */ 8742 if (str_has_pfx(ext->name, "CONFIG_")) { 8743 need_config = true; 8744 continue; 8745 } 8746 8747 /* Virtual kcfg externs are customly handled by libbpf */ 8748 if (strcmp(ext->name, "LINUX_KERNEL_VERSION") == 0) { 8749 value = get_kernel_version(); 8750 if (!value) { 8751 pr_warn("extern (kcfg) '%s': failed to get kernel version\n", ext->name); 8752 return -EINVAL; 8753 } 8754 } else if (strcmp(ext->name, "LINUX_HAS_BPF_COOKIE") == 0) { 8755 value = kernel_supports(obj, FEAT_BPF_COOKIE); 8756 } else if (strcmp(ext->name, "LINUX_HAS_SYSCALL_WRAPPER") == 0) { 8757 value = kernel_supports(obj, FEAT_SYSCALL_WRAPPER); 8758 } else if (!str_has_pfx(ext->name, "LINUX_") || !ext->is_weak) { 8759 /* Currently libbpf supports only CONFIG_ and LINUX_ prefixed 8760 * __kconfig externs, where LINUX_ ones are virtual and filled out 8761 * customly by libbpf (their values don't come from Kconfig). 8762 * If LINUX_xxx variable is not recognized by libbpf, but is marked 8763 * __weak, it defaults to zero value, just like for CONFIG_xxx 8764 * externs. 8765 */ 8766 pr_warn("extern (kcfg) '%s': unrecognized virtual extern\n", ext->name); 8767 return -EINVAL; 8768 } 8769 8770 err = set_kcfg_value_num(ext, ext_ptr, value); 8771 if (err) 8772 return err; 8773 pr_debug("extern (kcfg) '%s': set to 0x%llx\n", 8774 ext->name, (long long)value); 8775 } else { 8776 pr_warn("extern '%s': unrecognized extern kind\n", ext->name); 8777 return -EINVAL; 8778 } 8779 } 8780 if (need_config && extra_kconfig) { 8781 err = bpf_object__read_kconfig_mem(obj, extra_kconfig, kcfg_data); 8782 if (err) 8783 return -EINVAL; 8784 need_config = false; 8785 for (i = 0; i < obj->nr_extern; i++) { 8786 ext = &obj->externs[i]; 8787 if (ext->type == EXT_KCFG && !ext->is_set) { 8788 need_config = true; 8789 break; 8790 } 8791 } 8792 } 8793 if (need_config) { 8794 err = bpf_object__read_kconfig_file(obj, kcfg_data); 8795 if (err) 8796 return -EINVAL; 8797 } 8798 if (need_kallsyms) { 8799 err = bpf_object__read_kallsyms_file(obj); 8800 if (err) 8801 return -EINVAL; 8802 } 8803 if (need_vmlinux_btf) { 8804 err = bpf_object__resolve_ksyms_btf_id(obj); 8805 if (err) 8806 return -EINVAL; 8807 } 8808 for (i = 0; i < obj->nr_extern; i++) { 8809 ext = &obj->externs[i]; 8810 8811 if (!ext->is_set && !ext->is_weak) { 8812 pr_warn("extern '%s' (strong): not resolved\n", ext->name); 8813 return -ESRCH; 8814 } else if (!ext->is_set) { 8815 pr_debug("extern '%s' (weak): not resolved, defaulting to zero\n", 8816 ext->name); 8817 } 8818 } 8819 8820 return 0; 8821 } 8822 8823 static void bpf_map_prepare_vdata(const struct bpf_map *map) 8824 { 8825 const struct btf_type *type; 8826 struct bpf_struct_ops *st_ops; 8827 __u32 i; 8828 8829 st_ops = map->st_ops; 8830 type = btf__type_by_id(map->obj->btf, st_ops->type_id); 8831 for (i = 0; i < btf_vlen(type); i++) { 8832 struct bpf_program *prog = st_ops->progs[i]; 8833 void *kern_data; 8834 int prog_fd; 8835 8836 if (!prog) 8837 continue; 8838 8839 prog_fd = bpf_program__fd(prog); 8840 kern_data = st_ops->kern_vdata + st_ops->kern_func_off[i]; 8841 *(unsigned long *)kern_data = prog_fd; 8842 } 8843 } 8844 8845 static int bpf_object_prepare_struct_ops(struct bpf_object *obj) 8846 { 8847 struct bpf_map *map; 8848 int i; 8849 8850 for (i = 0; i < obj->nr_maps; i++) { 8851 map = &obj->maps[i]; 8852 8853 if (!bpf_map__is_struct_ops(map)) 8854 continue; 8855 8856 if (!map->autocreate) 8857 continue; 8858 8859 bpf_map_prepare_vdata(map); 8860 } 8861 8862 return 0; 8863 } 8864 8865 static void bpf_object_unpin(struct bpf_object *obj) 8866 { 8867 int i; 8868 8869 /* unpin any maps that were auto-pinned during load */ 8870 for (i = 0; i < obj->nr_maps; i++) 8871 if (obj->maps[i].pinned && !obj->maps[i].reused) 8872 bpf_map__unpin(&obj->maps[i], NULL); 8873 } 8874 8875 static void bpf_object_post_load_cleanup(struct bpf_object *obj) 8876 { 8877 int i; 8878 8879 /* clean up fd_array */ 8880 zfree(&obj->fd_array); 8881 8882 /* clean up module BTFs */ 8883 for (i = 0; i < obj->btf_module_cnt; i++) { 8884 close(obj->btf_modules[i].fd); 8885 btf__free(obj->btf_modules[i].btf); 8886 free(obj->btf_modules[i].name); 8887 } 8888 obj->btf_module_cnt = 0; 8889 zfree(&obj->btf_modules); 8890 8891 /* clean up vmlinux BTF */ 8892 btf__free(obj->btf_vmlinux); 8893 obj->btf_vmlinux = NULL; 8894 } 8895 8896 static int bpf_object_prepare(struct bpf_object *obj, const char *target_btf_path) 8897 { 8898 int err; 8899 8900 if (obj->state >= OBJ_PREPARED) { 8901 pr_warn("object '%s': prepare loading can't be attempted twice\n", obj->name); 8902 return -EINVAL; 8903 } 8904 8905 err = bpf_object_prepare_token(obj); 8906 err = err ? : bpf_object__probe_loading(obj); 8907 err = err ? : bpf_object__load_vmlinux_btf(obj, false); 8908 err = err ? : bpf_object__resolve_externs(obj, obj->kconfig); 8909 err = err ? : bpf_object__sanitize_maps(obj); 8910 err = err ? : bpf_object__init_kern_struct_ops_maps(obj); 8911 err = err ? : bpf_object_adjust_struct_ops_autoload(obj); 8912 err = err ? : bpf_object__relocate(obj, obj->btf_custom_path ? : target_btf_path); 8913 err = err ? : bpf_object__sanitize_and_load_btf(obj); 8914 err = err ? : bpf_object__create_maps(obj); 8915 err = err ? : bpf_object_prepare_progs(obj); 8916 8917 if (err) { 8918 bpf_object_unpin(obj); 8919 bpf_object_unload(obj); 8920 obj->state = OBJ_LOADED; 8921 return err; 8922 } 8923 8924 obj->state = OBJ_PREPARED; 8925 return 0; 8926 } 8927 8928 static int bpf_object_load(struct bpf_object *obj, int extra_log_level, const char *target_btf_path) 8929 { 8930 int err; 8931 8932 if (!obj) 8933 return libbpf_err(-EINVAL); 8934 8935 if (obj->state >= OBJ_LOADED) { 8936 pr_warn("object '%s': load can't be attempted twice\n", obj->name); 8937 return libbpf_err(-EINVAL); 8938 } 8939 8940 /* Disallow kernel loading programs of non-native endianness but 8941 * permit cross-endian creation of "light skeleton". 8942 */ 8943 if (obj->gen_loader) { 8944 bpf_gen__init(obj->gen_loader, extra_log_level, obj->nr_programs, obj->nr_maps); 8945 } else if (!is_native_endianness(obj)) { 8946 pr_warn("object '%s': loading non-native endianness is unsupported\n", obj->name); 8947 return libbpf_err(-LIBBPF_ERRNO__ENDIAN); 8948 } 8949 8950 if (obj->state < OBJ_PREPARED) { 8951 err = bpf_object_prepare(obj, target_btf_path); 8952 if (err) 8953 return libbpf_err(err); 8954 } 8955 err = bpf_object__load_progs(obj, extra_log_level); 8956 err = err ? : bpf_object_init_prog_arrays(obj); 8957 err = err ? : bpf_object_prepare_struct_ops(obj); 8958 8959 if (obj->gen_loader) { 8960 /* reset FDs */ 8961 if (obj->btf) 8962 btf__set_fd(obj->btf, -1); 8963 if (!err) 8964 err = bpf_gen__finish(obj->gen_loader, obj->nr_programs, obj->nr_maps); 8965 } 8966 8967 bpf_object_post_load_cleanup(obj); 8968 obj->state = OBJ_LOADED; /* doesn't matter if successfully or not */ 8969 8970 if (err) { 8971 bpf_object_unpin(obj); 8972 bpf_object_unload(obj); 8973 pr_warn("failed to load object '%s'\n", obj->path); 8974 return libbpf_err(err); 8975 } 8976 8977 return 0; 8978 } 8979 8980 int bpf_object__prepare(struct bpf_object *obj) 8981 { 8982 return libbpf_err(bpf_object_prepare(obj, NULL)); 8983 } 8984 8985 int bpf_object__load(struct bpf_object *obj) 8986 { 8987 return bpf_object_load(obj, 0, NULL); 8988 } 8989 8990 static int make_parent_dir(const char *path) 8991 { 8992 char *dname, *dir; 8993 int err = 0; 8994 8995 dname = strdup(path); 8996 if (dname == NULL) 8997 return -ENOMEM; 8998 8999 dir = dirname(dname); 9000 if (mkdir(dir, 0700) && errno != EEXIST) 9001 err = -errno; 9002 9003 free(dname); 9004 if (err) { 9005 pr_warn("failed to mkdir %s: %s\n", path, errstr(err)); 9006 } 9007 return err; 9008 } 9009 9010 static int check_path(const char *path) 9011 { 9012 struct statfs st_fs; 9013 char *dname, *dir; 9014 int err = 0; 9015 9016 if (path == NULL) 9017 return -EINVAL; 9018 9019 dname = strdup(path); 9020 if (dname == NULL) 9021 return -ENOMEM; 9022 9023 dir = dirname(dname); 9024 if (statfs(dir, &st_fs)) { 9025 pr_warn("failed to statfs %s: %s\n", dir, errstr(errno)); 9026 err = -errno; 9027 } 9028 free(dname); 9029 9030 if (!err && st_fs.f_type != BPF_FS_MAGIC) { 9031 pr_warn("specified path %s is not on BPF FS\n", path); 9032 err = -EINVAL; 9033 } 9034 9035 return err; 9036 } 9037 9038 int bpf_program__pin(struct bpf_program *prog, const char *path) 9039 { 9040 int err; 9041 9042 if (prog->fd < 0) { 9043 pr_warn("prog '%s': can't pin program that wasn't loaded\n", prog->name); 9044 return libbpf_err(-EINVAL); 9045 } 9046 9047 err = make_parent_dir(path); 9048 if (err) 9049 return libbpf_err(err); 9050 9051 err = check_path(path); 9052 if (err) 9053 return libbpf_err(err); 9054 9055 if (bpf_obj_pin(prog->fd, path)) { 9056 err = -errno; 9057 pr_warn("prog '%s': failed to pin at '%s': %s\n", prog->name, path, errstr(err)); 9058 return libbpf_err(err); 9059 } 9060 9061 pr_debug("prog '%s': pinned at '%s'\n", prog->name, path); 9062 return 0; 9063 } 9064 9065 int bpf_program__unpin(struct bpf_program *prog, const char *path) 9066 { 9067 int err; 9068 9069 if (prog->fd < 0) { 9070 pr_warn("prog '%s': can't unpin program that wasn't loaded\n", prog->name); 9071 return libbpf_err(-EINVAL); 9072 } 9073 9074 err = check_path(path); 9075 if (err) 9076 return libbpf_err(err); 9077 9078 err = unlink(path); 9079 if (err) 9080 return libbpf_err(-errno); 9081 9082 pr_debug("prog '%s': unpinned from '%s'\n", prog->name, path); 9083 return 0; 9084 } 9085 9086 int bpf_map__pin(struct bpf_map *map, const char *path) 9087 { 9088 int err; 9089 9090 if (map == NULL) { 9091 pr_warn("invalid map pointer\n"); 9092 return libbpf_err(-EINVAL); 9093 } 9094 9095 if (map->fd < 0) { 9096 pr_warn("map '%s': can't pin BPF map without FD (was it created?)\n", map->name); 9097 return libbpf_err(-EINVAL); 9098 } 9099 9100 if (map->pin_path) { 9101 if (path && strcmp(path, map->pin_path)) { 9102 pr_warn("map '%s' already has pin path '%s' different from '%s'\n", 9103 bpf_map__name(map), map->pin_path, path); 9104 return libbpf_err(-EINVAL); 9105 } else if (map->pinned) { 9106 pr_debug("map '%s' already pinned at '%s'; not re-pinning\n", 9107 bpf_map__name(map), map->pin_path); 9108 return 0; 9109 } 9110 } else { 9111 if (!path) { 9112 pr_warn("missing a path to pin map '%s' at\n", 9113 bpf_map__name(map)); 9114 return libbpf_err(-EINVAL); 9115 } else if (map->pinned) { 9116 pr_warn("map '%s' already pinned\n", bpf_map__name(map)); 9117 return libbpf_err(-EEXIST); 9118 } 9119 9120 map->pin_path = strdup(path); 9121 if (!map->pin_path) { 9122 err = -errno; 9123 goto out_err; 9124 } 9125 } 9126 9127 err = make_parent_dir(map->pin_path); 9128 if (err) 9129 return libbpf_err(err); 9130 9131 err = check_path(map->pin_path); 9132 if (err) 9133 return libbpf_err(err); 9134 9135 if (bpf_obj_pin(map->fd, map->pin_path)) { 9136 err = -errno; 9137 goto out_err; 9138 } 9139 9140 map->pinned = true; 9141 pr_debug("pinned map '%s'\n", map->pin_path); 9142 9143 return 0; 9144 9145 out_err: 9146 pr_warn("failed to pin map: %s\n", errstr(err)); 9147 return libbpf_err(err); 9148 } 9149 9150 int bpf_map__unpin(struct bpf_map *map, const char *path) 9151 { 9152 int err; 9153 9154 if (map == NULL) { 9155 pr_warn("invalid map pointer\n"); 9156 return libbpf_err(-EINVAL); 9157 } 9158 9159 if (map->pin_path) { 9160 if (path && strcmp(path, map->pin_path)) { 9161 pr_warn("map '%s' already has pin path '%s' different from '%s'\n", 9162 bpf_map__name(map), map->pin_path, path); 9163 return libbpf_err(-EINVAL); 9164 } 9165 path = map->pin_path; 9166 } else if (!path) { 9167 pr_warn("no path to unpin map '%s' from\n", 9168 bpf_map__name(map)); 9169 return libbpf_err(-EINVAL); 9170 } 9171 9172 err = check_path(path); 9173 if (err) 9174 return libbpf_err(err); 9175 9176 err = unlink(path); 9177 if (err != 0) 9178 return libbpf_err(-errno); 9179 9180 map->pinned = false; 9181 pr_debug("unpinned map '%s' from '%s'\n", bpf_map__name(map), path); 9182 9183 return 0; 9184 } 9185 9186 int bpf_map__set_pin_path(struct bpf_map *map, const char *path) 9187 { 9188 char *new = NULL; 9189 9190 if (path) { 9191 new = strdup(path); 9192 if (!new) 9193 return libbpf_err(-errno); 9194 } 9195 9196 free(map->pin_path); 9197 map->pin_path = new; 9198 return 0; 9199 } 9200 9201 __alias(bpf_map__pin_path) 9202 const char *bpf_map__get_pin_path(const struct bpf_map *map); 9203 9204 const char *bpf_map__pin_path(const struct bpf_map *map) 9205 { 9206 return map->pin_path; 9207 } 9208 9209 bool bpf_map__is_pinned(const struct bpf_map *map) 9210 { 9211 return map->pinned; 9212 } 9213 9214 static void sanitize_pin_path(char *s) 9215 { 9216 /* bpffs disallows periods in path names */ 9217 while (*s) { 9218 if (*s == '.') 9219 *s = '_'; 9220 s++; 9221 } 9222 } 9223 9224 int bpf_object__pin_maps(struct bpf_object *obj, const char *path) 9225 { 9226 struct bpf_map *map; 9227 int err; 9228 9229 if (!obj) 9230 return libbpf_err(-ENOENT); 9231 9232 if (obj->state < OBJ_PREPARED) { 9233 pr_warn("object not yet loaded; load it first\n"); 9234 return libbpf_err(-ENOENT); 9235 } 9236 9237 bpf_object__for_each_map(map, obj) { 9238 char *pin_path = NULL; 9239 char buf[PATH_MAX]; 9240 9241 if (!map->autocreate) 9242 continue; 9243 9244 if (path) { 9245 err = pathname_concat(buf, sizeof(buf), path, bpf_map__name(map)); 9246 if (err) 9247 goto err_unpin_maps; 9248 sanitize_pin_path(buf); 9249 pin_path = buf; 9250 } else if (!map->pin_path) { 9251 continue; 9252 } 9253 9254 err = bpf_map__pin(map, pin_path); 9255 if (err) 9256 goto err_unpin_maps; 9257 } 9258 9259 return 0; 9260 9261 err_unpin_maps: 9262 while ((map = bpf_object__prev_map(obj, map))) { 9263 if (!map->pin_path) 9264 continue; 9265 9266 bpf_map__unpin(map, NULL); 9267 } 9268 9269 return libbpf_err(err); 9270 } 9271 9272 int bpf_object__unpin_maps(struct bpf_object *obj, const char *path) 9273 { 9274 struct bpf_map *map; 9275 int err; 9276 9277 if (!obj) 9278 return libbpf_err(-ENOENT); 9279 9280 bpf_object__for_each_map(map, obj) { 9281 char *pin_path = NULL; 9282 char buf[PATH_MAX]; 9283 9284 if (path) { 9285 err = pathname_concat(buf, sizeof(buf), path, bpf_map__name(map)); 9286 if (err) 9287 return libbpf_err(err); 9288 sanitize_pin_path(buf); 9289 pin_path = buf; 9290 } else if (!map->pin_path) { 9291 continue; 9292 } 9293 9294 err = bpf_map__unpin(map, pin_path); 9295 if (err) 9296 return libbpf_err(err); 9297 } 9298 9299 return 0; 9300 } 9301 9302 int bpf_object__pin_programs(struct bpf_object *obj, const char *path) 9303 { 9304 struct bpf_program *prog; 9305 char buf[PATH_MAX]; 9306 int err; 9307 9308 if (!obj) 9309 return libbpf_err(-ENOENT); 9310 9311 if (obj->state < OBJ_LOADED) { 9312 pr_warn("object not yet loaded; load it first\n"); 9313 return libbpf_err(-ENOENT); 9314 } 9315 9316 bpf_object__for_each_program(prog, obj) { 9317 err = pathname_concat(buf, sizeof(buf), path, prog->name); 9318 if (err) 9319 goto err_unpin_programs; 9320 9321 err = bpf_program__pin(prog, buf); 9322 if (err) 9323 goto err_unpin_programs; 9324 } 9325 9326 return 0; 9327 9328 err_unpin_programs: 9329 while ((prog = bpf_object__prev_program(obj, prog))) { 9330 if (pathname_concat(buf, sizeof(buf), path, prog->name)) 9331 continue; 9332 9333 bpf_program__unpin(prog, buf); 9334 } 9335 9336 return libbpf_err(err); 9337 } 9338 9339 int bpf_object__unpin_programs(struct bpf_object *obj, const char *path) 9340 { 9341 struct bpf_program *prog; 9342 int err; 9343 9344 if (!obj) 9345 return libbpf_err(-ENOENT); 9346 9347 bpf_object__for_each_program(prog, obj) { 9348 char buf[PATH_MAX]; 9349 9350 err = pathname_concat(buf, sizeof(buf), path, prog->name); 9351 if (err) 9352 return libbpf_err(err); 9353 9354 err = bpf_program__unpin(prog, buf); 9355 if (err) 9356 return libbpf_err(err); 9357 } 9358 9359 return 0; 9360 } 9361 9362 int bpf_object__pin(struct bpf_object *obj, const char *path) 9363 { 9364 int err; 9365 9366 err = bpf_object__pin_maps(obj, path); 9367 if (err) 9368 return libbpf_err(err); 9369 9370 err = bpf_object__pin_programs(obj, path); 9371 if (err) { 9372 bpf_object__unpin_maps(obj, path); 9373 return libbpf_err(err); 9374 } 9375 9376 return 0; 9377 } 9378 9379 int bpf_object__unpin(struct bpf_object *obj, const char *path) 9380 { 9381 int err; 9382 9383 err = bpf_object__unpin_programs(obj, path); 9384 if (err) 9385 return libbpf_err(err); 9386 9387 err = bpf_object__unpin_maps(obj, path); 9388 if (err) 9389 return libbpf_err(err); 9390 9391 return 0; 9392 } 9393 9394 static void bpf_map__destroy(struct bpf_map *map) 9395 { 9396 if (map->inner_map) { 9397 bpf_map__destroy(map->inner_map); 9398 zfree(&map->inner_map); 9399 } 9400 9401 zfree(&map->init_slots); 9402 map->init_slots_sz = 0; 9403 9404 if (map->mmaped && map->mmaped != map->obj->arena_data) 9405 munmap(map->mmaped, bpf_map_mmap_sz(map)); 9406 map->mmaped = NULL; 9407 9408 if (map->st_ops) { 9409 zfree(&map->st_ops->data); 9410 zfree(&map->st_ops->progs); 9411 zfree(&map->st_ops->kern_func_off); 9412 zfree(&map->st_ops); 9413 } 9414 9415 zfree(&map->name); 9416 zfree(&map->real_name); 9417 zfree(&map->pin_path); 9418 9419 if (map->fd >= 0) 9420 zclose(map->fd); 9421 } 9422 9423 void bpf_object__close(struct bpf_object *obj) 9424 { 9425 size_t i; 9426 9427 if (IS_ERR_OR_NULL(obj)) 9428 return; 9429 9430 /* 9431 * if user called bpf_object__prepare() without ever getting to 9432 * bpf_object__load(), we need to clean up stuff that is normally 9433 * cleaned up at the end of loading step 9434 */ 9435 bpf_object_post_load_cleanup(obj); 9436 9437 usdt_manager_free(obj->usdt_man); 9438 obj->usdt_man = NULL; 9439 9440 bpf_gen__free(obj->gen_loader); 9441 bpf_object__elf_finish(obj); 9442 bpf_object_unload(obj); 9443 btf__free(obj->btf); 9444 btf__free(obj->btf_vmlinux); 9445 btf_ext__free(obj->btf_ext); 9446 9447 for (i = 0; i < obj->nr_maps; i++) 9448 bpf_map__destroy(&obj->maps[i]); 9449 9450 zfree(&obj->btf_custom_path); 9451 zfree(&obj->kconfig); 9452 9453 for (i = 0; i < obj->nr_extern; i++) { 9454 zfree(&obj->externs[i].name); 9455 zfree(&obj->externs[i].essent_name); 9456 } 9457 9458 zfree(&obj->externs); 9459 obj->nr_extern = 0; 9460 9461 zfree(&obj->maps); 9462 obj->nr_maps = 0; 9463 9464 if (obj->programs && obj->nr_programs) { 9465 for (i = 0; i < obj->nr_programs; i++) 9466 bpf_program__exit(&obj->programs[i]); 9467 } 9468 zfree(&obj->programs); 9469 9470 zfree(&obj->feat_cache); 9471 zfree(&obj->token_path); 9472 if (obj->token_fd > 0) 9473 close(obj->token_fd); 9474 9475 zfree(&obj->arena_data); 9476 9477 zfree(&obj->jumptables_data); 9478 obj->jumptables_data_sz = 0; 9479 9480 for (i = 0; i < obj->jumptable_map_cnt; i++) 9481 close(obj->jumptable_maps[i].fd); 9482 zfree(&obj->jumptable_maps); 9483 9484 free(obj); 9485 } 9486 9487 const char *bpf_object__name(const struct bpf_object *obj) 9488 { 9489 return obj ? obj->name : libbpf_err_ptr(-EINVAL); 9490 } 9491 9492 unsigned int bpf_object__kversion(const struct bpf_object *obj) 9493 { 9494 return obj ? obj->kern_version : 0; 9495 } 9496 9497 int bpf_object__token_fd(const struct bpf_object *obj) 9498 { 9499 return obj->token_fd ?: -1; 9500 } 9501 9502 struct btf *bpf_object__btf(const struct bpf_object *obj) 9503 { 9504 return obj ? obj->btf : NULL; 9505 } 9506 9507 int bpf_object__btf_fd(const struct bpf_object *obj) 9508 { 9509 return obj->btf ? btf__fd(obj->btf) : -1; 9510 } 9511 9512 int bpf_object__set_kversion(struct bpf_object *obj, __u32 kern_version) 9513 { 9514 if (obj->state >= OBJ_LOADED) 9515 return libbpf_err(-EINVAL); 9516 9517 obj->kern_version = kern_version; 9518 9519 return 0; 9520 } 9521 9522 int bpf_object__gen_loader(struct bpf_object *obj, struct gen_loader_opts *opts) 9523 { 9524 struct bpf_gen *gen; 9525 9526 if (!opts) 9527 return libbpf_err(-EFAULT); 9528 if (!OPTS_VALID(opts, gen_loader_opts)) 9529 return libbpf_err(-EINVAL); 9530 gen = calloc(1, sizeof(*gen)); 9531 if (!gen) 9532 return libbpf_err(-ENOMEM); 9533 gen->opts = opts; 9534 gen->swapped_endian = !is_native_endianness(obj); 9535 obj->gen_loader = gen; 9536 return 0; 9537 } 9538 9539 static struct bpf_program * 9540 __bpf_program__iter(const struct bpf_program *p, const struct bpf_object *obj, 9541 bool forward) 9542 { 9543 size_t nr_programs = obj->nr_programs; 9544 ssize_t idx; 9545 9546 if (!nr_programs) 9547 return NULL; 9548 9549 if (!p) 9550 /* Iter from the beginning */ 9551 return forward ? &obj->programs[0] : 9552 &obj->programs[nr_programs - 1]; 9553 9554 if (p->obj != obj) { 9555 pr_warn("error: program handler doesn't match object\n"); 9556 return errno = EINVAL, NULL; 9557 } 9558 9559 idx = (p - obj->programs) + (forward ? 1 : -1); 9560 if (idx >= obj->nr_programs || idx < 0) 9561 return NULL; 9562 return &obj->programs[idx]; 9563 } 9564 9565 struct bpf_program * 9566 bpf_object__next_program(const struct bpf_object *obj, struct bpf_program *prev) 9567 { 9568 struct bpf_program *prog = prev; 9569 9570 do { 9571 prog = __bpf_program__iter(prog, obj, true); 9572 } while (prog && prog_is_subprog(obj, prog)); 9573 9574 return prog; 9575 } 9576 9577 struct bpf_program * 9578 bpf_object__prev_program(const struct bpf_object *obj, struct bpf_program *next) 9579 { 9580 struct bpf_program *prog = next; 9581 9582 do { 9583 prog = __bpf_program__iter(prog, obj, false); 9584 } while (prog && prog_is_subprog(obj, prog)); 9585 9586 return prog; 9587 } 9588 9589 void bpf_program__set_ifindex(struct bpf_program *prog, __u32 ifindex) 9590 { 9591 prog->prog_ifindex = ifindex; 9592 } 9593 9594 const char *bpf_program__name(const struct bpf_program *prog) 9595 { 9596 return prog->name; 9597 } 9598 9599 const char *bpf_program__section_name(const struct bpf_program *prog) 9600 { 9601 return prog->sec_name; 9602 } 9603 9604 bool bpf_program__autoload(const struct bpf_program *prog) 9605 { 9606 return prog->autoload; 9607 } 9608 9609 int bpf_program__set_autoload(struct bpf_program *prog, bool autoload) 9610 { 9611 if (prog->obj->state >= OBJ_LOADED) 9612 return libbpf_err(-EINVAL); 9613 9614 prog->autoload = autoload; 9615 return 0; 9616 } 9617 9618 bool bpf_program__autoattach(const struct bpf_program *prog) 9619 { 9620 return prog->autoattach; 9621 } 9622 9623 void bpf_program__set_autoattach(struct bpf_program *prog, bool autoattach) 9624 { 9625 prog->autoattach = autoattach; 9626 } 9627 9628 const struct bpf_insn *bpf_program__insns(const struct bpf_program *prog) 9629 { 9630 return prog->insns; 9631 } 9632 9633 size_t bpf_program__insn_cnt(const struct bpf_program *prog) 9634 { 9635 return prog->insns_cnt; 9636 } 9637 9638 int bpf_program__set_insns(struct bpf_program *prog, 9639 struct bpf_insn *new_insns, size_t new_insn_cnt) 9640 { 9641 struct bpf_insn *insns; 9642 9643 if (prog->obj->state >= OBJ_LOADED) 9644 return libbpf_err(-EBUSY); 9645 9646 insns = libbpf_reallocarray(prog->insns, new_insn_cnt, sizeof(*insns)); 9647 /* NULL is a valid return from reallocarray if the new count is zero */ 9648 if (!insns && new_insn_cnt) { 9649 pr_warn("prog '%s': failed to realloc prog code\n", prog->name); 9650 return libbpf_err(-ENOMEM); 9651 } 9652 memcpy(insns, new_insns, new_insn_cnt * sizeof(*insns)); 9653 9654 prog->insns = insns; 9655 prog->insns_cnt = new_insn_cnt; 9656 return 0; 9657 } 9658 9659 int bpf_program__fd(const struct bpf_program *prog) 9660 { 9661 if (!prog) 9662 return libbpf_err(-EINVAL); 9663 9664 if (prog->fd < 0) 9665 return libbpf_err(-ENOENT); 9666 9667 return prog->fd; 9668 } 9669 9670 __alias(bpf_program__type) 9671 enum bpf_prog_type bpf_program__get_type(const struct bpf_program *prog); 9672 9673 enum bpf_prog_type bpf_program__type(const struct bpf_program *prog) 9674 { 9675 return prog->type; 9676 } 9677 9678 static size_t custom_sec_def_cnt; 9679 static struct bpf_sec_def *custom_sec_defs; 9680 static struct bpf_sec_def custom_fallback_def; 9681 static bool has_custom_fallback_def; 9682 static int last_custom_sec_def_handler_id; 9683 9684 int bpf_program__set_type(struct bpf_program *prog, enum bpf_prog_type type) 9685 { 9686 if (prog->obj->state >= OBJ_LOADED) 9687 return libbpf_err(-EBUSY); 9688 9689 /* if type is not changed, do nothing */ 9690 if (prog->type == type) 9691 return 0; 9692 9693 prog->type = type; 9694 9695 /* If a program type was changed, we need to reset associated SEC() 9696 * handler, as it will be invalid now. The only exception is a generic 9697 * fallback handler, which by definition is program type-agnostic and 9698 * is a catch-all custom handler, optionally set by the application, 9699 * so should be able to handle any type of BPF program. 9700 */ 9701 if (prog->sec_def != &custom_fallback_def) 9702 prog->sec_def = NULL; 9703 return 0; 9704 } 9705 9706 __alias(bpf_program__expected_attach_type) 9707 enum bpf_attach_type bpf_program__get_expected_attach_type(const struct bpf_program *prog); 9708 9709 enum bpf_attach_type bpf_program__expected_attach_type(const struct bpf_program *prog) 9710 { 9711 return prog->expected_attach_type; 9712 } 9713 9714 int bpf_program__set_expected_attach_type(struct bpf_program *prog, 9715 enum bpf_attach_type type) 9716 { 9717 if (prog->obj->state >= OBJ_LOADED) 9718 return libbpf_err(-EBUSY); 9719 9720 prog->expected_attach_type = type; 9721 return 0; 9722 } 9723 9724 __u32 bpf_program__flags(const struct bpf_program *prog) 9725 { 9726 return prog->prog_flags; 9727 } 9728 9729 int bpf_program__set_flags(struct bpf_program *prog, __u32 flags) 9730 { 9731 if (prog->obj->state >= OBJ_LOADED) 9732 return libbpf_err(-EBUSY); 9733 9734 prog->prog_flags = flags; 9735 return 0; 9736 } 9737 9738 __u32 bpf_program__log_level(const struct bpf_program *prog) 9739 { 9740 return prog->log_level; 9741 } 9742 9743 int bpf_program__set_log_level(struct bpf_program *prog, __u32 log_level) 9744 { 9745 if (prog->obj->state >= OBJ_LOADED) 9746 return libbpf_err(-EBUSY); 9747 9748 prog->log_level = log_level; 9749 return 0; 9750 } 9751 9752 const char *bpf_program__log_buf(const struct bpf_program *prog, size_t *log_size) 9753 { 9754 *log_size = prog->log_size; 9755 return prog->log_buf; 9756 } 9757 9758 int bpf_program__set_log_buf(struct bpf_program *prog, char *log_buf, size_t log_size) 9759 { 9760 if (log_size && !log_buf) 9761 return libbpf_err(-EINVAL); 9762 if (prog->log_size > UINT_MAX) 9763 return libbpf_err(-EINVAL); 9764 if (prog->obj->state >= OBJ_LOADED) 9765 return libbpf_err(-EBUSY); 9766 9767 prog->log_buf = log_buf; 9768 prog->log_size = log_size; 9769 return 0; 9770 } 9771 9772 struct bpf_func_info *bpf_program__func_info(const struct bpf_program *prog) 9773 { 9774 if (prog->func_info_rec_size != sizeof(struct bpf_func_info)) 9775 return libbpf_err_ptr(-EOPNOTSUPP); 9776 return prog->func_info; 9777 } 9778 9779 __u32 bpf_program__func_info_cnt(const struct bpf_program *prog) 9780 { 9781 return prog->func_info_cnt; 9782 } 9783 9784 struct bpf_line_info *bpf_program__line_info(const struct bpf_program *prog) 9785 { 9786 if (prog->line_info_rec_size != sizeof(struct bpf_line_info)) 9787 return libbpf_err_ptr(-EOPNOTSUPP); 9788 return prog->line_info; 9789 } 9790 9791 __u32 bpf_program__line_info_cnt(const struct bpf_program *prog) 9792 { 9793 return prog->line_info_cnt; 9794 } 9795 9796 #define SEC_DEF(sec_pfx, ptype, atype, flags, ...) { \ 9797 .sec = (char *)sec_pfx, \ 9798 .prog_type = BPF_PROG_TYPE_##ptype, \ 9799 .expected_attach_type = atype, \ 9800 .cookie = (long)(flags), \ 9801 .prog_prepare_load_fn = libbpf_prepare_prog_load, \ 9802 __VA_ARGS__ \ 9803 } 9804 9805 static int attach_kprobe(const struct bpf_program *prog, long cookie, struct bpf_link **link); 9806 static int attach_uprobe(const struct bpf_program *prog, long cookie, struct bpf_link **link); 9807 static int attach_ksyscall(const struct bpf_program *prog, long cookie, struct bpf_link **link); 9808 static int attach_usdt(const struct bpf_program *prog, long cookie, struct bpf_link **link); 9809 static int attach_tp(const struct bpf_program *prog, long cookie, struct bpf_link **link); 9810 static int attach_raw_tp(const struct bpf_program *prog, long cookie, struct bpf_link **link); 9811 static int attach_trace(const struct bpf_program *prog, long cookie, struct bpf_link **link); 9812 static int attach_kprobe_multi(const struct bpf_program *prog, long cookie, struct bpf_link **link); 9813 static int attach_kprobe_session(const struct bpf_program *prog, long cookie, struct bpf_link **link); 9814 static int attach_uprobe_multi(const struct bpf_program *prog, long cookie, struct bpf_link **link); 9815 static int attach_lsm(const struct bpf_program *prog, long cookie, struct bpf_link **link); 9816 static int attach_iter(const struct bpf_program *prog, long cookie, struct bpf_link **link); 9817 9818 static const struct bpf_sec_def section_defs[] = { 9819 SEC_DEF("socket", SOCKET_FILTER, 0, SEC_NONE), 9820 SEC_DEF("sk_reuseport/migrate", SK_REUSEPORT, BPF_SK_REUSEPORT_SELECT_OR_MIGRATE, SEC_ATTACHABLE), 9821 SEC_DEF("sk_reuseport", SK_REUSEPORT, BPF_SK_REUSEPORT_SELECT, SEC_ATTACHABLE), 9822 SEC_DEF("kprobe+", KPROBE, 0, SEC_NONE, attach_kprobe), 9823 SEC_DEF("uprobe+", KPROBE, 0, SEC_NONE, attach_uprobe), 9824 SEC_DEF("uprobe.s+", KPROBE, 0, SEC_SLEEPABLE, attach_uprobe), 9825 SEC_DEF("kretprobe+", KPROBE, 0, SEC_NONE, attach_kprobe), 9826 SEC_DEF("uretprobe+", KPROBE, 0, SEC_NONE, attach_uprobe), 9827 SEC_DEF("uretprobe.s+", KPROBE, 0, SEC_SLEEPABLE, attach_uprobe), 9828 SEC_DEF("kprobe.multi+", KPROBE, BPF_TRACE_KPROBE_MULTI, SEC_NONE, attach_kprobe_multi), 9829 SEC_DEF("kretprobe.multi+", KPROBE, BPF_TRACE_KPROBE_MULTI, SEC_NONE, attach_kprobe_multi), 9830 SEC_DEF("kprobe.session+", KPROBE, BPF_TRACE_KPROBE_SESSION, SEC_NONE, attach_kprobe_session), 9831 SEC_DEF("uprobe.multi+", KPROBE, BPF_TRACE_UPROBE_MULTI, SEC_NONE, attach_uprobe_multi), 9832 SEC_DEF("uretprobe.multi+", KPROBE, BPF_TRACE_UPROBE_MULTI, SEC_NONE, attach_uprobe_multi), 9833 SEC_DEF("uprobe.session+", KPROBE, BPF_TRACE_UPROBE_SESSION, SEC_NONE, attach_uprobe_multi), 9834 SEC_DEF("uprobe.multi.s+", KPROBE, BPF_TRACE_UPROBE_MULTI, SEC_SLEEPABLE, attach_uprobe_multi), 9835 SEC_DEF("uretprobe.multi.s+", KPROBE, BPF_TRACE_UPROBE_MULTI, SEC_SLEEPABLE, attach_uprobe_multi), 9836 SEC_DEF("uprobe.session.s+", KPROBE, BPF_TRACE_UPROBE_SESSION, SEC_SLEEPABLE, attach_uprobe_multi), 9837 SEC_DEF("ksyscall+", KPROBE, 0, SEC_NONE, attach_ksyscall), 9838 SEC_DEF("kretsyscall+", KPROBE, 0, SEC_NONE, attach_ksyscall), 9839 SEC_DEF("usdt+", KPROBE, 0, SEC_USDT, attach_usdt), 9840 SEC_DEF("usdt.s+", KPROBE, 0, SEC_USDT | SEC_SLEEPABLE, attach_usdt), 9841 SEC_DEF("tc/ingress", SCHED_CLS, BPF_TCX_INGRESS, SEC_NONE), /* alias for tcx */ 9842 SEC_DEF("tc/egress", SCHED_CLS, BPF_TCX_EGRESS, SEC_NONE), /* alias for tcx */ 9843 SEC_DEF("tcx/ingress", SCHED_CLS, BPF_TCX_INGRESS, SEC_NONE), 9844 SEC_DEF("tcx/egress", SCHED_CLS, BPF_TCX_EGRESS, SEC_NONE), 9845 SEC_DEF("tc", SCHED_CLS, 0, SEC_NONE), /* deprecated / legacy, use tcx */ 9846 SEC_DEF("classifier", SCHED_CLS, 0, SEC_NONE), /* deprecated / legacy, use tcx */ 9847 SEC_DEF("action", SCHED_ACT, 0, SEC_NONE), /* deprecated / legacy, use tcx */ 9848 SEC_DEF("netkit/primary", SCHED_CLS, BPF_NETKIT_PRIMARY, SEC_NONE), 9849 SEC_DEF("netkit/peer", SCHED_CLS, BPF_NETKIT_PEER, SEC_NONE), 9850 SEC_DEF("tracepoint+", TRACEPOINT, 0, SEC_NONE, attach_tp), 9851 SEC_DEF("tp+", TRACEPOINT, 0, SEC_NONE, attach_tp), 9852 SEC_DEF("raw_tracepoint+", RAW_TRACEPOINT, 0, SEC_NONE, attach_raw_tp), 9853 SEC_DEF("raw_tp+", RAW_TRACEPOINT, 0, SEC_NONE, attach_raw_tp), 9854 SEC_DEF("raw_tracepoint.w+", RAW_TRACEPOINT_WRITABLE, 0, SEC_NONE, attach_raw_tp), 9855 SEC_DEF("raw_tp.w+", RAW_TRACEPOINT_WRITABLE, 0, SEC_NONE, attach_raw_tp), 9856 SEC_DEF("tp_btf+", TRACING, BPF_TRACE_RAW_TP, SEC_ATTACH_BTF, attach_trace), 9857 SEC_DEF("fentry+", TRACING, BPF_TRACE_FENTRY, SEC_ATTACH_BTF, attach_trace), 9858 SEC_DEF("fmod_ret+", TRACING, BPF_MODIFY_RETURN, SEC_ATTACH_BTF, attach_trace), 9859 SEC_DEF("fexit+", TRACING, BPF_TRACE_FEXIT, SEC_ATTACH_BTF, attach_trace), 9860 SEC_DEF("fentry.s+", TRACING, BPF_TRACE_FENTRY, SEC_ATTACH_BTF | SEC_SLEEPABLE, attach_trace), 9861 SEC_DEF("fmod_ret.s+", TRACING, BPF_MODIFY_RETURN, SEC_ATTACH_BTF | SEC_SLEEPABLE, attach_trace), 9862 SEC_DEF("fexit.s+", TRACING, BPF_TRACE_FEXIT, SEC_ATTACH_BTF | SEC_SLEEPABLE, attach_trace), 9863 SEC_DEF("fsession+", TRACING, BPF_TRACE_FSESSION, SEC_ATTACH_BTF, attach_trace), 9864 SEC_DEF("fsession.s+", TRACING, BPF_TRACE_FSESSION, SEC_ATTACH_BTF | SEC_SLEEPABLE, attach_trace), 9865 SEC_DEF("freplace+", EXT, 0, SEC_ATTACH_BTF, attach_trace), 9866 SEC_DEF("lsm+", LSM, BPF_LSM_MAC, SEC_ATTACH_BTF, attach_lsm), 9867 SEC_DEF("lsm.s+", LSM, BPF_LSM_MAC, SEC_ATTACH_BTF | SEC_SLEEPABLE, attach_lsm), 9868 SEC_DEF("lsm_cgroup+", LSM, BPF_LSM_CGROUP, SEC_ATTACH_BTF), 9869 SEC_DEF("iter+", TRACING, BPF_TRACE_ITER, SEC_ATTACH_BTF, attach_iter), 9870 SEC_DEF("iter.s+", TRACING, BPF_TRACE_ITER, SEC_ATTACH_BTF | SEC_SLEEPABLE, attach_iter), 9871 SEC_DEF("syscall", SYSCALL, 0, SEC_SLEEPABLE), 9872 SEC_DEF("xdp.frags/devmap", XDP, BPF_XDP_DEVMAP, SEC_XDP_FRAGS), 9873 SEC_DEF("xdp/devmap", XDP, BPF_XDP_DEVMAP, SEC_ATTACHABLE), 9874 SEC_DEF("xdp.frags/cpumap", XDP, BPF_XDP_CPUMAP, SEC_XDP_FRAGS), 9875 SEC_DEF("xdp/cpumap", XDP, BPF_XDP_CPUMAP, SEC_ATTACHABLE), 9876 SEC_DEF("xdp.frags", XDP, BPF_XDP, SEC_XDP_FRAGS), 9877 SEC_DEF("xdp", XDP, BPF_XDP, SEC_ATTACHABLE_OPT), 9878 SEC_DEF("perf_event", PERF_EVENT, 0, SEC_NONE), 9879 SEC_DEF("lwt_in", LWT_IN, 0, SEC_NONE), 9880 SEC_DEF("lwt_out", LWT_OUT, 0, SEC_NONE), 9881 SEC_DEF("lwt_xmit", LWT_XMIT, 0, SEC_NONE), 9882 SEC_DEF("lwt_seg6local", LWT_SEG6LOCAL, 0, SEC_NONE), 9883 SEC_DEF("sockops", SOCK_OPS, BPF_CGROUP_SOCK_OPS, SEC_ATTACHABLE_OPT), 9884 SEC_DEF("sk_skb/stream_parser", SK_SKB, BPF_SK_SKB_STREAM_PARSER, SEC_ATTACHABLE_OPT), 9885 SEC_DEF("sk_skb/stream_verdict",SK_SKB, BPF_SK_SKB_STREAM_VERDICT, SEC_ATTACHABLE_OPT), 9886 SEC_DEF("sk_skb/verdict", SK_SKB, BPF_SK_SKB_VERDICT, SEC_ATTACHABLE_OPT), 9887 SEC_DEF("sk_skb", SK_SKB, 0, SEC_NONE), 9888 SEC_DEF("sk_msg", SK_MSG, BPF_SK_MSG_VERDICT, SEC_ATTACHABLE_OPT), 9889 SEC_DEF("lirc_mode2", LIRC_MODE2, BPF_LIRC_MODE2, SEC_ATTACHABLE_OPT), 9890 SEC_DEF("flow_dissector", FLOW_DISSECTOR, BPF_FLOW_DISSECTOR, SEC_ATTACHABLE_OPT), 9891 SEC_DEF("cgroup_skb/ingress", CGROUP_SKB, BPF_CGROUP_INET_INGRESS, SEC_ATTACHABLE_OPT), 9892 SEC_DEF("cgroup_skb/egress", CGROUP_SKB, BPF_CGROUP_INET_EGRESS, SEC_ATTACHABLE_OPT), 9893 SEC_DEF("cgroup/skb", CGROUP_SKB, 0, SEC_NONE), 9894 SEC_DEF("cgroup/sock_create", CGROUP_SOCK, BPF_CGROUP_INET_SOCK_CREATE, SEC_ATTACHABLE), 9895 SEC_DEF("cgroup/sock_release", CGROUP_SOCK, BPF_CGROUP_INET_SOCK_RELEASE, SEC_ATTACHABLE), 9896 SEC_DEF("cgroup/sock", CGROUP_SOCK, BPF_CGROUP_INET_SOCK_CREATE, SEC_ATTACHABLE_OPT), 9897 SEC_DEF("cgroup/post_bind4", CGROUP_SOCK, BPF_CGROUP_INET4_POST_BIND, SEC_ATTACHABLE), 9898 SEC_DEF("cgroup/post_bind6", CGROUP_SOCK, BPF_CGROUP_INET6_POST_BIND, SEC_ATTACHABLE), 9899 SEC_DEF("cgroup/bind4", CGROUP_SOCK_ADDR, BPF_CGROUP_INET4_BIND, SEC_ATTACHABLE), 9900 SEC_DEF("cgroup/bind6", CGROUP_SOCK_ADDR, BPF_CGROUP_INET6_BIND, SEC_ATTACHABLE), 9901 SEC_DEF("cgroup/connect4", CGROUP_SOCK_ADDR, BPF_CGROUP_INET4_CONNECT, SEC_ATTACHABLE), 9902 SEC_DEF("cgroup/connect6", CGROUP_SOCK_ADDR, BPF_CGROUP_INET6_CONNECT, SEC_ATTACHABLE), 9903 SEC_DEF("cgroup/connect_unix", CGROUP_SOCK_ADDR, BPF_CGROUP_UNIX_CONNECT, SEC_ATTACHABLE), 9904 SEC_DEF("cgroup/sendmsg4", CGROUP_SOCK_ADDR, BPF_CGROUP_UDP4_SENDMSG, SEC_ATTACHABLE), 9905 SEC_DEF("cgroup/sendmsg6", CGROUP_SOCK_ADDR, BPF_CGROUP_UDP6_SENDMSG, SEC_ATTACHABLE), 9906 SEC_DEF("cgroup/sendmsg_unix", CGROUP_SOCK_ADDR, BPF_CGROUP_UNIX_SENDMSG, SEC_ATTACHABLE), 9907 SEC_DEF("cgroup/recvmsg4", CGROUP_SOCK_ADDR, BPF_CGROUP_UDP4_RECVMSG, SEC_ATTACHABLE), 9908 SEC_DEF("cgroup/recvmsg6", CGROUP_SOCK_ADDR, BPF_CGROUP_UDP6_RECVMSG, SEC_ATTACHABLE), 9909 SEC_DEF("cgroup/recvmsg_unix", CGROUP_SOCK_ADDR, BPF_CGROUP_UNIX_RECVMSG, SEC_ATTACHABLE), 9910 SEC_DEF("cgroup/getpeername4", CGROUP_SOCK_ADDR, BPF_CGROUP_INET4_GETPEERNAME, SEC_ATTACHABLE), 9911 SEC_DEF("cgroup/getpeername6", CGROUP_SOCK_ADDR, BPF_CGROUP_INET6_GETPEERNAME, SEC_ATTACHABLE), 9912 SEC_DEF("cgroup/getpeername_unix", CGROUP_SOCK_ADDR, BPF_CGROUP_UNIX_GETPEERNAME, SEC_ATTACHABLE), 9913 SEC_DEF("cgroup/getsockname4", CGROUP_SOCK_ADDR, BPF_CGROUP_INET4_GETSOCKNAME, SEC_ATTACHABLE), 9914 SEC_DEF("cgroup/getsockname6", CGROUP_SOCK_ADDR, BPF_CGROUP_INET6_GETSOCKNAME, SEC_ATTACHABLE), 9915 SEC_DEF("cgroup/getsockname_unix", CGROUP_SOCK_ADDR, BPF_CGROUP_UNIX_GETSOCKNAME, SEC_ATTACHABLE), 9916 SEC_DEF("cgroup/sysctl", CGROUP_SYSCTL, BPF_CGROUP_SYSCTL, SEC_ATTACHABLE), 9917 SEC_DEF("cgroup/getsockopt", CGROUP_SOCKOPT, BPF_CGROUP_GETSOCKOPT, SEC_ATTACHABLE), 9918 SEC_DEF("cgroup/setsockopt", CGROUP_SOCKOPT, BPF_CGROUP_SETSOCKOPT, SEC_ATTACHABLE), 9919 SEC_DEF("cgroup/dev", CGROUP_DEVICE, BPF_CGROUP_DEVICE, SEC_ATTACHABLE_OPT), 9920 SEC_DEF("struct_ops+", STRUCT_OPS, 0, SEC_NONE), 9921 SEC_DEF("struct_ops.s+", STRUCT_OPS, 0, SEC_SLEEPABLE), 9922 SEC_DEF("sk_lookup", SK_LOOKUP, BPF_SK_LOOKUP, SEC_ATTACHABLE), 9923 SEC_DEF("netfilter", NETFILTER, BPF_NETFILTER, SEC_NONE), 9924 }; 9925 9926 int libbpf_register_prog_handler(const char *sec, 9927 enum bpf_prog_type prog_type, 9928 enum bpf_attach_type exp_attach_type, 9929 const struct libbpf_prog_handler_opts *opts) 9930 { 9931 struct bpf_sec_def *sec_def; 9932 9933 if (!OPTS_VALID(opts, libbpf_prog_handler_opts)) 9934 return libbpf_err(-EINVAL); 9935 9936 if (last_custom_sec_def_handler_id == INT_MAX) /* prevent overflow */ 9937 return libbpf_err(-E2BIG); 9938 9939 if (sec) { 9940 sec_def = libbpf_reallocarray(custom_sec_defs, custom_sec_def_cnt + 1, 9941 sizeof(*sec_def)); 9942 if (!sec_def) 9943 return libbpf_err(-ENOMEM); 9944 9945 custom_sec_defs = sec_def; 9946 sec_def = &custom_sec_defs[custom_sec_def_cnt]; 9947 } else { 9948 if (has_custom_fallback_def) 9949 return libbpf_err(-EBUSY); 9950 9951 sec_def = &custom_fallback_def; 9952 } 9953 9954 sec_def->sec = sec ? strdup(sec) : NULL; 9955 if (sec && !sec_def->sec) 9956 return libbpf_err(-ENOMEM); 9957 9958 sec_def->prog_type = prog_type; 9959 sec_def->expected_attach_type = exp_attach_type; 9960 sec_def->cookie = OPTS_GET(opts, cookie, 0); 9961 9962 sec_def->prog_setup_fn = OPTS_GET(opts, prog_setup_fn, NULL); 9963 sec_def->prog_prepare_load_fn = OPTS_GET(opts, prog_prepare_load_fn, NULL); 9964 sec_def->prog_attach_fn = OPTS_GET(opts, prog_attach_fn, NULL); 9965 9966 sec_def->handler_id = ++last_custom_sec_def_handler_id; 9967 9968 if (sec) 9969 custom_sec_def_cnt++; 9970 else 9971 has_custom_fallback_def = true; 9972 9973 return sec_def->handler_id; 9974 } 9975 9976 int libbpf_unregister_prog_handler(int handler_id) 9977 { 9978 struct bpf_sec_def *sec_defs; 9979 int i; 9980 9981 if (handler_id <= 0) 9982 return libbpf_err(-EINVAL); 9983 9984 if (has_custom_fallback_def && custom_fallback_def.handler_id == handler_id) { 9985 memset(&custom_fallback_def, 0, sizeof(custom_fallback_def)); 9986 has_custom_fallback_def = false; 9987 return 0; 9988 } 9989 9990 for (i = 0; i < custom_sec_def_cnt; i++) { 9991 if (custom_sec_defs[i].handler_id == handler_id) 9992 break; 9993 } 9994 9995 if (i == custom_sec_def_cnt) 9996 return libbpf_err(-ENOENT); 9997 9998 free(custom_sec_defs[i].sec); 9999 for (i = i + 1; i < custom_sec_def_cnt; i++) 10000 custom_sec_defs[i - 1] = custom_sec_defs[i]; 10001 custom_sec_def_cnt--; 10002 10003 /* try to shrink the array, but it's ok if we couldn't */ 10004 sec_defs = libbpf_reallocarray(custom_sec_defs, custom_sec_def_cnt, sizeof(*sec_defs)); 10005 /* if new count is zero, reallocarray can return a valid NULL result; 10006 * in this case the previous pointer will be freed, so we *have to* 10007 * reassign old pointer to the new value (even if it's NULL) 10008 */ 10009 if (sec_defs || custom_sec_def_cnt == 0) 10010 custom_sec_defs = sec_defs; 10011 10012 return 0; 10013 } 10014 10015 static bool sec_def_matches(const struct bpf_sec_def *sec_def, const char *sec_name) 10016 { 10017 size_t len = strlen(sec_def->sec); 10018 10019 /* "type/" always has to have proper SEC("type/extras") form */ 10020 if (sec_def->sec[len - 1] == '/') { 10021 if (str_has_pfx(sec_name, sec_def->sec)) 10022 return true; 10023 return false; 10024 } 10025 10026 /* "type+" means it can be either exact SEC("type") or 10027 * well-formed SEC("type/extras") with proper '/' separator 10028 */ 10029 if (sec_def->sec[len - 1] == '+') { 10030 len--; 10031 /* not even a prefix */ 10032 if (strncmp(sec_name, sec_def->sec, len) != 0) 10033 return false; 10034 /* exact match or has '/' separator */ 10035 if (sec_name[len] == '\0' || sec_name[len] == '/') 10036 return true; 10037 return false; 10038 } 10039 10040 return strcmp(sec_name, sec_def->sec) == 0; 10041 } 10042 10043 static const struct bpf_sec_def *find_sec_def(const char *sec_name) 10044 { 10045 const struct bpf_sec_def *sec_def; 10046 int i, n; 10047 10048 n = custom_sec_def_cnt; 10049 for (i = 0; i < n; i++) { 10050 sec_def = &custom_sec_defs[i]; 10051 if (sec_def_matches(sec_def, sec_name)) 10052 return sec_def; 10053 } 10054 10055 n = ARRAY_SIZE(section_defs); 10056 for (i = 0; i < n; i++) { 10057 sec_def = §ion_defs[i]; 10058 if (sec_def_matches(sec_def, sec_name)) 10059 return sec_def; 10060 } 10061 10062 if (has_custom_fallback_def) 10063 return &custom_fallback_def; 10064 10065 return NULL; 10066 } 10067 10068 #define MAX_TYPE_NAME_SIZE 32 10069 10070 static char *libbpf_get_type_names(bool attach_type) 10071 { 10072 int i, len = ARRAY_SIZE(section_defs) * MAX_TYPE_NAME_SIZE; 10073 char *buf; 10074 10075 buf = malloc(len); 10076 if (!buf) 10077 return NULL; 10078 10079 buf[0] = '\0'; 10080 /* Forge string buf with all available names */ 10081 for (i = 0; i < ARRAY_SIZE(section_defs); i++) { 10082 const struct bpf_sec_def *sec_def = §ion_defs[i]; 10083 10084 if (attach_type) { 10085 if (sec_def->prog_prepare_load_fn != libbpf_prepare_prog_load) 10086 continue; 10087 10088 if (!(sec_def->cookie & SEC_ATTACHABLE)) 10089 continue; 10090 } 10091 10092 if (strlen(buf) + strlen(section_defs[i].sec) + 2 > len) { 10093 free(buf); 10094 return NULL; 10095 } 10096 strcat(buf, " "); 10097 strcat(buf, section_defs[i].sec); 10098 } 10099 10100 return buf; 10101 } 10102 10103 int libbpf_prog_type_by_name(const char *name, enum bpf_prog_type *prog_type, 10104 enum bpf_attach_type *expected_attach_type) 10105 { 10106 const struct bpf_sec_def *sec_def; 10107 char *type_names; 10108 10109 if (!name) 10110 return libbpf_err(-EINVAL); 10111 10112 sec_def = find_sec_def(name); 10113 if (sec_def) { 10114 *prog_type = sec_def->prog_type; 10115 *expected_attach_type = sec_def->expected_attach_type; 10116 return 0; 10117 } 10118 10119 pr_debug("failed to guess program type from ELF section '%s'\n", name); 10120 type_names = libbpf_get_type_names(false); 10121 if (type_names != NULL) { 10122 pr_debug("supported section(type) names are:%s\n", type_names); 10123 free(type_names); 10124 } 10125 10126 return libbpf_err(-ESRCH); 10127 } 10128 10129 const char *libbpf_bpf_attach_type_str(enum bpf_attach_type t) 10130 { 10131 if (t < 0 || t >= ARRAY_SIZE(attach_type_name)) 10132 return NULL; 10133 10134 return attach_type_name[t]; 10135 } 10136 10137 const char *libbpf_bpf_link_type_str(enum bpf_link_type t) 10138 { 10139 if (t < 0 || t >= ARRAY_SIZE(link_type_name)) 10140 return NULL; 10141 10142 return link_type_name[t]; 10143 } 10144 10145 const char *libbpf_bpf_map_type_str(enum bpf_map_type t) 10146 { 10147 if (t < 0 || t >= ARRAY_SIZE(map_type_name)) 10148 return NULL; 10149 10150 return map_type_name[t]; 10151 } 10152 10153 const char *libbpf_bpf_prog_type_str(enum bpf_prog_type t) 10154 { 10155 if (t < 0 || t >= ARRAY_SIZE(prog_type_name)) 10156 return NULL; 10157 10158 return prog_type_name[t]; 10159 } 10160 10161 static struct bpf_map *find_struct_ops_map_by_offset(struct bpf_object *obj, 10162 int sec_idx, 10163 size_t offset) 10164 { 10165 struct bpf_map *map; 10166 size_t i; 10167 10168 for (i = 0; i < obj->nr_maps; i++) { 10169 map = &obj->maps[i]; 10170 if (!bpf_map__is_struct_ops(map)) 10171 continue; 10172 if (map->sec_idx == sec_idx && 10173 map->sec_offset <= offset && 10174 offset - map->sec_offset < map->def.value_size) 10175 return map; 10176 } 10177 10178 return NULL; 10179 } 10180 10181 /* Collect the reloc from ELF, populate the st_ops->progs[], and update 10182 * st_ops->data for shadow type. 10183 */ 10184 static int bpf_object__collect_st_ops_relos(struct bpf_object *obj, 10185 Elf64_Shdr *shdr, Elf_Data *data) 10186 { 10187 const struct btf_type *type; 10188 const struct btf_member *member; 10189 struct bpf_struct_ops *st_ops; 10190 struct bpf_program *prog; 10191 unsigned int shdr_idx; 10192 const struct btf *btf; 10193 struct bpf_map *map; 10194 unsigned int moff, insn_idx; 10195 const char *name; 10196 __u32 member_idx; 10197 Elf64_Sym *sym; 10198 Elf64_Rel *rel; 10199 int i, nrels; 10200 10201 btf = obj->btf; 10202 nrels = shdr->sh_size / shdr->sh_entsize; 10203 for (i = 0; i < nrels; i++) { 10204 rel = elf_rel_by_idx(data, i); 10205 if (!rel) { 10206 pr_warn("struct_ops reloc: failed to get %d reloc\n", i); 10207 return -LIBBPF_ERRNO__FORMAT; 10208 } 10209 10210 sym = elf_sym_by_idx(obj, ELF64_R_SYM(rel->r_info)); 10211 if (!sym) { 10212 pr_warn("struct_ops reloc: symbol %zx not found\n", 10213 (size_t)ELF64_R_SYM(rel->r_info)); 10214 return -LIBBPF_ERRNO__FORMAT; 10215 } 10216 10217 name = elf_sym_str(obj, sym->st_name) ?: "<?>"; 10218 map = find_struct_ops_map_by_offset(obj, shdr->sh_info, rel->r_offset); 10219 if (!map) { 10220 pr_warn("struct_ops reloc: cannot find map at rel->r_offset %zu\n", 10221 (size_t)rel->r_offset); 10222 return -EINVAL; 10223 } 10224 10225 moff = rel->r_offset - map->sec_offset; 10226 shdr_idx = sym->st_shndx; 10227 st_ops = map->st_ops; 10228 pr_debug("struct_ops reloc %s: for %lld value %lld shdr_idx %u rel->r_offset %zu map->sec_offset %zu name %d (\'%s\')\n", 10229 map->name, 10230 (long long)(rel->r_info >> 32), 10231 (long long)sym->st_value, 10232 shdr_idx, (size_t)rel->r_offset, 10233 map->sec_offset, sym->st_name, name); 10234 10235 if (shdr_idx >= SHN_LORESERVE) { 10236 pr_warn("struct_ops reloc %s: rel->r_offset %zu shdr_idx %u unsupported non-static function\n", 10237 map->name, (size_t)rel->r_offset, shdr_idx); 10238 return -LIBBPF_ERRNO__RELOC; 10239 } 10240 if (sym->st_value % BPF_INSN_SZ) { 10241 pr_warn("struct_ops reloc %s: invalid target program offset %llu\n", 10242 map->name, (unsigned long long)sym->st_value); 10243 return -LIBBPF_ERRNO__FORMAT; 10244 } 10245 insn_idx = sym->st_value / BPF_INSN_SZ; 10246 10247 type = btf__type_by_id(btf, st_ops->type_id); 10248 member = find_member_by_offset(type, moff * 8); 10249 if (!member) { 10250 pr_warn("struct_ops reloc %s: cannot find member at moff %u\n", 10251 map->name, moff); 10252 return -EINVAL; 10253 } 10254 member_idx = member - btf_members(type); 10255 name = btf__name_by_offset(btf, member->name_off); 10256 10257 if (!resolve_func_ptr(btf, member->type, NULL)) { 10258 pr_warn("struct_ops reloc %s: cannot relocate non func ptr %s\n", 10259 map->name, name); 10260 return -EINVAL; 10261 } 10262 10263 prog = find_prog_by_sec_insn(obj, shdr_idx, insn_idx); 10264 if (!prog) { 10265 pr_warn("struct_ops reloc %s: cannot find prog at shdr_idx %u to relocate func ptr %s\n", 10266 map->name, shdr_idx, name); 10267 return -EINVAL; 10268 } 10269 10270 /* prevent the use of BPF prog with invalid type */ 10271 if (prog->type != BPF_PROG_TYPE_STRUCT_OPS) { 10272 pr_warn("struct_ops reloc %s: prog %s is not struct_ops BPF program\n", 10273 map->name, prog->name); 10274 return -EINVAL; 10275 } 10276 10277 st_ops->progs[member_idx] = prog; 10278 10279 /* st_ops->data will be exposed to users, being returned by 10280 * bpf_map__initial_value() as a pointer to the shadow 10281 * type. All function pointers in the original struct type 10282 * should be converted to a pointer to struct bpf_program 10283 * in the shadow type. 10284 */ 10285 *((struct bpf_program **)(st_ops->data + moff)) = prog; 10286 } 10287 10288 return 0; 10289 } 10290 10291 #define BTF_TRACE_PREFIX "btf_trace_" 10292 #define BTF_LSM_PREFIX "bpf_lsm_" 10293 #define BTF_ITER_PREFIX "bpf_iter_" 10294 #define BTF_MAX_NAME_SIZE 128 10295 10296 void btf_get_kernel_prefix_kind(enum bpf_attach_type attach_type, 10297 const char **prefix, int *kind) 10298 { 10299 switch (attach_type) { 10300 case BPF_TRACE_RAW_TP: 10301 *prefix = BTF_TRACE_PREFIX; 10302 *kind = BTF_KIND_TYPEDEF; 10303 break; 10304 case BPF_LSM_MAC: 10305 case BPF_LSM_CGROUP: 10306 *prefix = BTF_LSM_PREFIX; 10307 *kind = BTF_KIND_FUNC; 10308 break; 10309 case BPF_TRACE_ITER: 10310 *prefix = BTF_ITER_PREFIX; 10311 *kind = BTF_KIND_FUNC; 10312 break; 10313 default: 10314 *prefix = ""; 10315 *kind = BTF_KIND_FUNC; 10316 } 10317 } 10318 10319 static int find_btf_by_prefix_kind(const struct btf *btf, const char *prefix, 10320 const char *name, __u32 kind) 10321 { 10322 char btf_type_name[BTF_MAX_NAME_SIZE]; 10323 int ret; 10324 10325 ret = snprintf(btf_type_name, sizeof(btf_type_name), 10326 "%s%s", prefix, name); 10327 /* snprintf returns the number of characters written excluding the 10328 * terminating null. So, if >= BTF_MAX_NAME_SIZE are written, it 10329 * indicates truncation. 10330 */ 10331 if (ret < 0 || ret >= sizeof(btf_type_name)) 10332 return -ENAMETOOLONG; 10333 return btf__find_by_name_kind(btf, btf_type_name, kind); 10334 } 10335 10336 static inline int find_attach_btf_id(struct btf *btf, const char *name, 10337 enum bpf_attach_type attach_type) 10338 { 10339 const char *prefix; 10340 int kind; 10341 10342 btf_get_kernel_prefix_kind(attach_type, &prefix, &kind); 10343 return find_btf_by_prefix_kind(btf, prefix, name, kind); 10344 } 10345 10346 int libbpf_find_vmlinux_btf_id(const char *name, 10347 enum bpf_attach_type attach_type) 10348 { 10349 struct btf *btf; 10350 int err; 10351 10352 btf = btf__load_vmlinux_btf(); 10353 err = libbpf_get_error(btf); 10354 if (err) { 10355 pr_warn("vmlinux BTF is not found\n"); 10356 return libbpf_err(err); 10357 } 10358 10359 err = find_attach_btf_id(btf, name, attach_type); 10360 if (err <= 0) 10361 pr_warn("%s is not found in vmlinux BTF\n", name); 10362 10363 btf__free(btf); 10364 return libbpf_err(err); 10365 } 10366 10367 static int libbpf_find_prog_btf_id(const char *name, __u32 attach_prog_fd, int token_fd) 10368 { 10369 struct bpf_prog_info info; 10370 __u32 info_len = sizeof(info); 10371 struct btf *btf; 10372 int err; 10373 10374 memset(&info, 0, info_len); 10375 err = bpf_prog_get_info_by_fd(attach_prog_fd, &info, &info_len); 10376 if (err) { 10377 pr_warn("failed bpf_prog_get_info_by_fd for FD %d: %s\n", 10378 attach_prog_fd, errstr(err)); 10379 return err; 10380 } 10381 10382 err = -EINVAL; 10383 if (!info.btf_id) { 10384 pr_warn("The target program doesn't have BTF\n"); 10385 goto out; 10386 } 10387 btf = btf_load_from_kernel(info.btf_id, NULL, token_fd); 10388 err = libbpf_get_error(btf); 10389 if (err) { 10390 pr_warn("Failed to get BTF %d of the program: %s\n", info.btf_id, errstr(err)); 10391 goto out; 10392 } 10393 err = btf__find_by_name_kind(btf, name, BTF_KIND_FUNC); 10394 btf__free(btf); 10395 if (err <= 0) { 10396 pr_warn("%s is not found in prog's BTF\n", name); 10397 goto out; 10398 } 10399 out: 10400 return err; 10401 } 10402 10403 static int find_kernel_btf_id(struct bpf_object *obj, const char *attach_name, 10404 enum bpf_attach_type attach_type, 10405 int *btf_obj_fd, int *btf_type_id) 10406 { 10407 int ret, i, mod_len = 0; 10408 const char *fn_name, *mod_name = NULL; 10409 10410 fn_name = strchr(attach_name, ':'); 10411 if (fn_name) { 10412 mod_name = attach_name; 10413 mod_len = fn_name - mod_name; 10414 fn_name++; 10415 } 10416 10417 if (!mod_name || strncmp(mod_name, "vmlinux", mod_len) == 0) { 10418 ret = find_attach_btf_id(obj->btf_vmlinux, 10419 mod_name ? fn_name : attach_name, 10420 attach_type); 10421 if (ret > 0) { 10422 *btf_obj_fd = 0; /* vmlinux BTF */ 10423 *btf_type_id = ret; 10424 return 0; 10425 } 10426 if (ret != -ENOENT) 10427 return ret; 10428 } 10429 10430 ret = load_module_btfs(obj); 10431 if (ret) 10432 return ret; 10433 10434 for (i = 0; i < obj->btf_module_cnt; i++) { 10435 const struct module_btf *mod = &obj->btf_modules[i]; 10436 10437 if (mod_name && strncmp(mod->name, mod_name, mod_len) != 0) 10438 continue; 10439 10440 ret = find_attach_btf_id(mod->btf, 10441 mod_name ? fn_name : attach_name, 10442 attach_type); 10443 if (ret > 0) { 10444 *btf_obj_fd = mod->fd; 10445 *btf_type_id = ret; 10446 return 0; 10447 } 10448 if (ret == -ENOENT) 10449 continue; 10450 10451 return ret; 10452 } 10453 10454 return -ESRCH; 10455 } 10456 10457 static int libbpf_find_attach_btf_id(struct bpf_program *prog, const char *attach_name, 10458 int *btf_obj_fd, int *btf_type_id) 10459 { 10460 enum bpf_attach_type attach_type = prog->expected_attach_type; 10461 __u32 attach_prog_fd = prog->attach_prog_fd; 10462 int err = 0; 10463 10464 /* BPF program's BTF ID */ 10465 if (prog->type == BPF_PROG_TYPE_EXT || attach_prog_fd) { 10466 if (!attach_prog_fd) { 10467 pr_warn("prog '%s': attach program FD is not set\n", prog->name); 10468 return -EINVAL; 10469 } 10470 err = libbpf_find_prog_btf_id(attach_name, attach_prog_fd, prog->obj->token_fd); 10471 if (err < 0) { 10472 pr_warn("prog '%s': failed to find BPF program (FD %d) BTF ID for '%s': %s\n", 10473 prog->name, attach_prog_fd, attach_name, errstr(err)); 10474 return err; 10475 } 10476 *btf_obj_fd = 0; 10477 *btf_type_id = err; 10478 return 0; 10479 } 10480 10481 /* kernel/module BTF ID */ 10482 if (prog->obj->gen_loader) { 10483 bpf_gen__record_attach_target(prog->obj->gen_loader, attach_name, attach_type); 10484 *btf_obj_fd = 0; 10485 *btf_type_id = 1; 10486 } else { 10487 err = find_kernel_btf_id(prog->obj, attach_name, 10488 attach_type, btf_obj_fd, 10489 btf_type_id); 10490 } 10491 if (err) { 10492 pr_warn("prog '%s': failed to find kernel BTF type ID of '%s': %s\n", 10493 prog->name, attach_name, errstr(err)); 10494 return err; 10495 } 10496 return 0; 10497 } 10498 10499 int libbpf_attach_type_by_name(const char *name, 10500 enum bpf_attach_type *attach_type) 10501 { 10502 char *type_names; 10503 const struct bpf_sec_def *sec_def; 10504 10505 if (!name) 10506 return libbpf_err(-EINVAL); 10507 10508 sec_def = find_sec_def(name); 10509 if (!sec_def) { 10510 pr_debug("failed to guess attach type based on ELF section name '%s'\n", name); 10511 type_names = libbpf_get_type_names(true); 10512 if (type_names != NULL) { 10513 pr_debug("attachable section(type) names are:%s\n", type_names); 10514 free(type_names); 10515 } 10516 10517 return libbpf_err(-EINVAL); 10518 } 10519 10520 if (sec_def->prog_prepare_load_fn != libbpf_prepare_prog_load) 10521 return libbpf_err(-EINVAL); 10522 if (!(sec_def->cookie & SEC_ATTACHABLE)) 10523 return libbpf_err(-EINVAL); 10524 10525 *attach_type = sec_def->expected_attach_type; 10526 return 0; 10527 } 10528 10529 int bpf_map__fd(const struct bpf_map *map) 10530 { 10531 if (!map) 10532 return libbpf_err(-EINVAL); 10533 if (!map_is_created(map)) 10534 return -1; 10535 return map->fd; 10536 } 10537 10538 static bool map_uses_real_name(const struct bpf_map *map) 10539 { 10540 /* Since libbpf started to support custom .data.* and .rodata.* maps, 10541 * their user-visible name differs from kernel-visible name. Users see 10542 * such map's corresponding ELF section name as a map name. 10543 * This check distinguishes .data/.rodata from .data.* and .rodata.* 10544 * maps to know which name has to be returned to the user. 10545 */ 10546 if (map->libbpf_type == LIBBPF_MAP_DATA && strcmp(map->real_name, DATA_SEC) != 0) 10547 return true; 10548 if (map->libbpf_type == LIBBPF_MAP_RODATA && strcmp(map->real_name, RODATA_SEC) != 0) 10549 return true; 10550 return false; 10551 } 10552 10553 const char *bpf_map__name(const struct bpf_map *map) 10554 { 10555 if (!map) 10556 return NULL; 10557 10558 if (map_uses_real_name(map)) 10559 return map->real_name; 10560 10561 return map->name; 10562 } 10563 10564 enum bpf_map_type bpf_map__type(const struct bpf_map *map) 10565 { 10566 return map->def.type; 10567 } 10568 10569 int bpf_map__set_type(struct bpf_map *map, enum bpf_map_type type) 10570 { 10571 if (map_is_created(map)) 10572 return libbpf_err(-EBUSY); 10573 map->def.type = type; 10574 return 0; 10575 } 10576 10577 __u32 bpf_map__map_flags(const struct bpf_map *map) 10578 { 10579 return map->def.map_flags; 10580 } 10581 10582 int bpf_map__set_map_flags(struct bpf_map *map, __u32 flags) 10583 { 10584 if (map_is_created(map)) 10585 return libbpf_err(-EBUSY); 10586 map->def.map_flags = flags; 10587 return 0; 10588 } 10589 10590 __u64 bpf_map__map_extra(const struct bpf_map *map) 10591 { 10592 return map->map_extra; 10593 } 10594 10595 int bpf_map__set_map_extra(struct bpf_map *map, __u64 map_extra) 10596 { 10597 if (map_is_created(map)) 10598 return libbpf_err(-EBUSY); 10599 map->map_extra = map_extra; 10600 return 0; 10601 } 10602 10603 __u32 bpf_map__numa_node(const struct bpf_map *map) 10604 { 10605 return map->numa_node; 10606 } 10607 10608 int bpf_map__set_numa_node(struct bpf_map *map, __u32 numa_node) 10609 { 10610 if (map_is_created(map)) 10611 return libbpf_err(-EBUSY); 10612 map->numa_node = numa_node; 10613 return 0; 10614 } 10615 10616 __u32 bpf_map__key_size(const struct bpf_map *map) 10617 { 10618 return map->def.key_size; 10619 } 10620 10621 int bpf_map__set_key_size(struct bpf_map *map, __u32 size) 10622 { 10623 if (map_is_created(map)) 10624 return libbpf_err(-EBUSY); 10625 map->def.key_size = size; 10626 return 0; 10627 } 10628 10629 __u32 bpf_map__value_size(const struct bpf_map *map) 10630 { 10631 return map->def.value_size; 10632 } 10633 10634 static int map_btf_datasec_resize(struct bpf_map *map, __u32 size) 10635 { 10636 struct btf *btf; 10637 struct btf_type *datasec_type, *var_type; 10638 struct btf_var_secinfo *var; 10639 const struct btf_type *array_type; 10640 const struct btf_array *array; 10641 int vlen, element_sz, new_array_id; 10642 __u32 nr_elements; 10643 10644 /* check btf existence */ 10645 btf = bpf_object__btf(map->obj); 10646 if (!btf) 10647 return -ENOENT; 10648 10649 /* verify map is datasec */ 10650 datasec_type = btf_type_by_id(btf, bpf_map__btf_value_type_id(map)); 10651 if (!btf_is_datasec(datasec_type)) { 10652 pr_warn("map '%s': cannot be resized, map value type is not a datasec\n", 10653 bpf_map__name(map)); 10654 return -EINVAL; 10655 } 10656 10657 /* verify datasec has at least one var */ 10658 vlen = btf_vlen(datasec_type); 10659 if (vlen == 0) { 10660 pr_warn("map '%s': cannot be resized, map value datasec is empty\n", 10661 bpf_map__name(map)); 10662 return -EINVAL; 10663 } 10664 10665 /* verify last var in the datasec is an array */ 10666 var = &btf_var_secinfos(datasec_type)[vlen - 1]; 10667 var_type = btf_type_by_id(btf, var->type); 10668 array_type = skip_mods_and_typedefs(btf, var_type->type, NULL); 10669 if (!btf_is_array(array_type)) { 10670 pr_warn("map '%s': cannot be resized, last var must be an array\n", 10671 bpf_map__name(map)); 10672 return -EINVAL; 10673 } 10674 10675 /* verify request size aligns with array */ 10676 array = btf_array(array_type); 10677 element_sz = btf__resolve_size(btf, array->type); 10678 if (element_sz <= 0 || (size - var->offset) % element_sz != 0) { 10679 pr_warn("map '%s': cannot be resized, element size (%d) doesn't align with new total size (%u)\n", 10680 bpf_map__name(map), element_sz, size); 10681 return -EINVAL; 10682 } 10683 10684 /* create a new array based on the existing array, but with new length */ 10685 nr_elements = (size - var->offset) / element_sz; 10686 new_array_id = btf__add_array(btf, array->index_type, array->type, nr_elements); 10687 if (new_array_id < 0) 10688 return new_array_id; 10689 10690 /* adding a new btf type invalidates existing pointers to btf objects, 10691 * so refresh pointers before proceeding 10692 */ 10693 datasec_type = btf_type_by_id(btf, map->btf_value_type_id); 10694 var = &btf_var_secinfos(datasec_type)[vlen - 1]; 10695 var_type = btf_type_by_id(btf, var->type); 10696 10697 /* finally update btf info */ 10698 datasec_type->size = size; 10699 var->size = size - var->offset; 10700 var_type->type = new_array_id; 10701 10702 return 0; 10703 } 10704 10705 int bpf_map__set_value_size(struct bpf_map *map, __u32 size) 10706 { 10707 if (map_is_created(map)) 10708 return libbpf_err(-EBUSY); 10709 10710 if (map->mmaped) { 10711 size_t mmap_old_sz, mmap_new_sz; 10712 int err; 10713 10714 if (map->def.type != BPF_MAP_TYPE_ARRAY) 10715 return libbpf_err(-EOPNOTSUPP); 10716 10717 mmap_old_sz = bpf_map_mmap_sz(map); 10718 mmap_new_sz = array_map_mmap_sz(size, map->def.max_entries); 10719 err = bpf_map_mmap_resize(map, mmap_old_sz, mmap_new_sz); 10720 if (err) { 10721 pr_warn("map '%s': failed to resize memory-mapped region: %s\n", 10722 bpf_map__name(map), errstr(err)); 10723 return libbpf_err(err); 10724 } 10725 err = map_btf_datasec_resize(map, size); 10726 if (err && err != -ENOENT) { 10727 pr_warn("map '%s': failed to adjust resized BTF, clearing BTF key/value info: %s\n", 10728 bpf_map__name(map), errstr(err)); 10729 map->btf_value_type_id = 0; 10730 map->btf_key_type_id = 0; 10731 } 10732 } 10733 10734 map->def.value_size = size; 10735 return 0; 10736 } 10737 10738 __u32 bpf_map__btf_key_type_id(const struct bpf_map *map) 10739 { 10740 return map ? map->btf_key_type_id : 0; 10741 } 10742 10743 __u32 bpf_map__btf_value_type_id(const struct bpf_map *map) 10744 { 10745 return map ? map->btf_value_type_id : 0; 10746 } 10747 10748 int bpf_map__set_initial_value(struct bpf_map *map, 10749 const void *data, size_t size) 10750 { 10751 size_t actual_sz; 10752 10753 if (map_is_created(map)) 10754 return libbpf_err(-EBUSY); 10755 10756 if (!map->mmaped || map->libbpf_type == LIBBPF_MAP_KCONFIG) 10757 return libbpf_err(-EINVAL); 10758 10759 if (map->def.type == BPF_MAP_TYPE_ARENA) 10760 actual_sz = map->obj->arena_data_sz; 10761 else 10762 actual_sz = map->def.value_size; 10763 if (size != actual_sz) 10764 return libbpf_err(-EINVAL); 10765 10766 memcpy(map->mmaped, data, size); 10767 return 0; 10768 } 10769 10770 void *bpf_map__initial_value(const struct bpf_map *map, size_t *psize) 10771 { 10772 if (bpf_map__is_struct_ops(map)) { 10773 if (psize) 10774 *psize = map->def.value_size; 10775 return map->st_ops->data; 10776 } 10777 10778 if (!map->mmaped) 10779 return NULL; 10780 10781 if (map->def.type == BPF_MAP_TYPE_ARENA) 10782 *psize = map->obj->arena_data_sz; 10783 else 10784 *psize = map->def.value_size; 10785 10786 return map->mmaped; 10787 } 10788 10789 bool bpf_map__is_internal(const struct bpf_map *map) 10790 { 10791 return map->libbpf_type != LIBBPF_MAP_UNSPEC; 10792 } 10793 10794 __u32 bpf_map__ifindex(const struct bpf_map *map) 10795 { 10796 return map->map_ifindex; 10797 } 10798 10799 int bpf_map__set_ifindex(struct bpf_map *map, __u32 ifindex) 10800 { 10801 if (map_is_created(map)) 10802 return libbpf_err(-EBUSY); 10803 map->map_ifindex = ifindex; 10804 return 0; 10805 } 10806 10807 int bpf_map__set_inner_map_fd(struct bpf_map *map, int fd) 10808 { 10809 if (!bpf_map_type__is_map_in_map(map->def.type)) { 10810 pr_warn("error: unsupported map type\n"); 10811 return libbpf_err(-EINVAL); 10812 } 10813 if (map->inner_map_fd != -1) { 10814 pr_warn("error: inner_map_fd already specified\n"); 10815 return libbpf_err(-EINVAL); 10816 } 10817 if (map->inner_map) { 10818 bpf_map__destroy(map->inner_map); 10819 zfree(&map->inner_map); 10820 } 10821 map->inner_map_fd = fd; 10822 return 0; 10823 } 10824 10825 int bpf_map__set_exclusive_program(struct bpf_map *map, struct bpf_program *prog) 10826 { 10827 if (map_is_created(map)) { 10828 pr_warn("exclusive programs must be set before map creation\n"); 10829 return libbpf_err(-EINVAL); 10830 } 10831 10832 if (map->obj != prog->obj) { 10833 pr_warn("excl_prog and map must be from the same bpf object\n"); 10834 return libbpf_err(-EINVAL); 10835 } 10836 10837 map->excl_prog = prog; 10838 return 0; 10839 } 10840 10841 struct bpf_program *bpf_map__exclusive_program(struct bpf_map *map) 10842 { 10843 return map->excl_prog; 10844 } 10845 10846 static struct bpf_map * 10847 __bpf_map__iter(const struct bpf_map *m, const struct bpf_object *obj, int i) 10848 { 10849 ssize_t idx; 10850 struct bpf_map *s, *e; 10851 10852 if (!obj || !obj->maps) 10853 return errno = EINVAL, NULL; 10854 10855 s = obj->maps; 10856 e = obj->maps + obj->nr_maps; 10857 10858 if ((m < s) || (m >= e)) { 10859 pr_warn("error in %s: map handler doesn't belong to object\n", 10860 __func__); 10861 return errno = EINVAL, NULL; 10862 } 10863 10864 idx = (m - obj->maps) + i; 10865 if (idx >= obj->nr_maps || idx < 0) 10866 return NULL; 10867 return &obj->maps[idx]; 10868 } 10869 10870 struct bpf_map * 10871 bpf_object__next_map(const struct bpf_object *obj, const struct bpf_map *prev) 10872 { 10873 if (prev == NULL && obj != NULL) 10874 return obj->maps; 10875 10876 return __bpf_map__iter(prev, obj, 1); 10877 } 10878 10879 struct bpf_map * 10880 bpf_object__prev_map(const struct bpf_object *obj, const struct bpf_map *next) 10881 { 10882 if (next == NULL && obj != NULL) { 10883 if (!obj->nr_maps) 10884 return NULL; 10885 return obj->maps + obj->nr_maps - 1; 10886 } 10887 10888 return __bpf_map__iter(next, obj, -1); 10889 } 10890 10891 struct bpf_map * 10892 bpf_object__find_map_by_name(const struct bpf_object *obj, const char *name) 10893 { 10894 struct bpf_map *pos; 10895 10896 bpf_object__for_each_map(pos, obj) { 10897 /* if it's a special internal map name (which always starts 10898 * with dot) then check if that special name matches the 10899 * real map name (ELF section name) 10900 */ 10901 if (name[0] == '.') { 10902 if (pos->real_name && strcmp(pos->real_name, name) == 0) 10903 return pos; 10904 continue; 10905 } 10906 /* otherwise map name has to be an exact match */ 10907 if (map_uses_real_name(pos)) { 10908 if (strcmp(pos->real_name, name) == 0) 10909 return pos; 10910 continue; 10911 } 10912 if (strcmp(pos->name, name) == 0) 10913 return pos; 10914 } 10915 return errno = ENOENT, NULL; 10916 } 10917 10918 int 10919 bpf_object__find_map_fd_by_name(const struct bpf_object *obj, const char *name) 10920 { 10921 return bpf_map__fd(bpf_object__find_map_by_name(obj, name)); 10922 } 10923 10924 static int validate_map_op(const struct bpf_map *map, size_t key_sz, 10925 size_t value_sz, bool check_value_sz, __u64 flags) 10926 { 10927 if (!map_is_created(map)) /* map is not yet created */ 10928 return -ENOENT; 10929 10930 if (map->def.key_size != key_sz) { 10931 pr_warn("map '%s': unexpected key size %zu provided, expected %u\n", 10932 map->name, key_sz, map->def.key_size); 10933 return -EINVAL; 10934 } 10935 10936 if (map->fd < 0) { 10937 pr_warn("map '%s': can't use BPF map without FD (was it created?)\n", map->name); 10938 return -EINVAL; 10939 } 10940 10941 if (!check_value_sz) 10942 return 0; 10943 10944 switch (map->def.type) { 10945 case BPF_MAP_TYPE_PERCPU_ARRAY: 10946 case BPF_MAP_TYPE_PERCPU_HASH: 10947 case BPF_MAP_TYPE_LRU_PERCPU_HASH: 10948 case BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE: { 10949 int num_cpu = libbpf_num_possible_cpus(); 10950 size_t elem_sz = roundup(map->def.value_size, 8); 10951 10952 if (flags & (BPF_F_CPU | BPF_F_ALL_CPUS)) { 10953 if ((flags & BPF_F_CPU) && (flags & BPF_F_ALL_CPUS)) { 10954 pr_warn("map '%s': BPF_F_CPU and BPF_F_ALL_CPUS are mutually exclusive\n", 10955 map->name); 10956 return -EINVAL; 10957 } 10958 if (map->def.value_size != value_sz) { 10959 pr_warn("map '%s': unexpected value size %zu provided for either BPF_F_CPU or BPF_F_ALL_CPUS, expected %u\n", 10960 map->name, value_sz, map->def.value_size); 10961 return -EINVAL; 10962 } 10963 break; 10964 } 10965 10966 if (value_sz != num_cpu * elem_sz) { 10967 pr_warn("map '%s': unexpected value size %zu provided for per-CPU map, expected %d * %zu = %zd\n", 10968 map->name, value_sz, num_cpu, elem_sz, num_cpu * elem_sz); 10969 return -EINVAL; 10970 } 10971 break; 10972 } 10973 default: 10974 if (map->def.value_size != value_sz) { 10975 pr_warn("map '%s': unexpected value size %zu provided, expected %u\n", 10976 map->name, value_sz, map->def.value_size); 10977 return -EINVAL; 10978 } 10979 break; 10980 } 10981 return 0; 10982 } 10983 10984 int bpf_map__lookup_elem(const struct bpf_map *map, 10985 const void *key, size_t key_sz, 10986 void *value, size_t value_sz, __u64 flags) 10987 { 10988 int err; 10989 10990 err = validate_map_op(map, key_sz, value_sz, true, flags); 10991 if (err) 10992 return libbpf_err(err); 10993 10994 return bpf_map_lookup_elem_flags(map->fd, key, value, flags); 10995 } 10996 10997 int bpf_map__update_elem(const struct bpf_map *map, 10998 const void *key, size_t key_sz, 10999 const void *value, size_t value_sz, __u64 flags) 11000 { 11001 int err; 11002 11003 err = validate_map_op(map, key_sz, value_sz, true, flags); 11004 if (err) 11005 return libbpf_err(err); 11006 11007 return bpf_map_update_elem(map->fd, key, value, flags); 11008 } 11009 11010 int bpf_map__delete_elem(const struct bpf_map *map, 11011 const void *key, size_t key_sz, __u64 flags) 11012 { 11013 int err; 11014 11015 err = validate_map_op(map, key_sz, 0, false /* check_value_sz */, flags); 11016 if (err) 11017 return libbpf_err(err); 11018 11019 return bpf_map_delete_elem_flags(map->fd, key, flags); 11020 } 11021 11022 int bpf_map__lookup_and_delete_elem(const struct bpf_map *map, 11023 const void *key, size_t key_sz, 11024 void *value, size_t value_sz, __u64 flags) 11025 { 11026 int err; 11027 11028 err = validate_map_op(map, key_sz, value_sz, true, flags); 11029 if (err) 11030 return libbpf_err(err); 11031 11032 return bpf_map_lookup_and_delete_elem_flags(map->fd, key, value, flags); 11033 } 11034 11035 int bpf_map__get_next_key(const struct bpf_map *map, 11036 const void *cur_key, void *next_key, size_t key_sz) 11037 { 11038 int err; 11039 11040 err = validate_map_op(map, key_sz, 0, false /* check_value_sz */, 0); 11041 if (err) 11042 return libbpf_err(err); 11043 11044 return bpf_map_get_next_key(map->fd, cur_key, next_key); 11045 } 11046 11047 long libbpf_get_error(const void *ptr) 11048 { 11049 if (!IS_ERR_OR_NULL(ptr)) 11050 return 0; 11051 11052 if (IS_ERR(ptr)) 11053 errno = -PTR_ERR(ptr); 11054 11055 /* If ptr == NULL, then errno should be already set by the failing 11056 * API, because libbpf never returns NULL on success and it now always 11057 * sets errno on error. So no extra errno handling for ptr == NULL 11058 * case. 11059 */ 11060 return -errno; 11061 } 11062 11063 /* Replace link's underlying BPF program with the new one */ 11064 int bpf_link__update_program(struct bpf_link *link, struct bpf_program *prog) 11065 { 11066 int ret; 11067 int prog_fd = bpf_program__fd(prog); 11068 11069 if (prog_fd < 0) { 11070 pr_warn("prog '%s': can't use BPF program without FD (was it loaded?)\n", 11071 prog->name); 11072 return libbpf_err(-EINVAL); 11073 } 11074 11075 ret = bpf_link_update(bpf_link__fd(link), prog_fd, NULL); 11076 return libbpf_err_errno(ret); 11077 } 11078 11079 /* Release "ownership" of underlying BPF resource (typically, BPF program 11080 * attached to some BPF hook, e.g., tracepoint, kprobe, etc). Disconnected 11081 * link, when destructed through bpf_link__destroy() call won't attempt to 11082 * detach/unregisted that BPF resource. This is useful in situations where, 11083 * say, attached BPF program has to outlive userspace program that attached it 11084 * in the system. Depending on type of BPF program, though, there might be 11085 * additional steps (like pinning BPF program in BPF FS) necessary to ensure 11086 * exit of userspace program doesn't trigger automatic detachment and clean up 11087 * inside the kernel. 11088 */ 11089 void bpf_link__disconnect(struct bpf_link *link) 11090 { 11091 link->disconnected = true; 11092 } 11093 11094 int bpf_link__destroy(struct bpf_link *link) 11095 { 11096 int err = 0; 11097 11098 if (IS_ERR_OR_NULL(link)) 11099 return 0; 11100 11101 if (!link->disconnected && link->detach) 11102 err = link->detach(link); 11103 if (link->pin_path) 11104 free(link->pin_path); 11105 if (link->dealloc) 11106 link->dealloc(link); 11107 else 11108 free(link); 11109 11110 return libbpf_err(err); 11111 } 11112 11113 int bpf_link__fd(const struct bpf_link *link) 11114 { 11115 return link->fd; 11116 } 11117 11118 const char *bpf_link__pin_path(const struct bpf_link *link) 11119 { 11120 return link->pin_path; 11121 } 11122 11123 static int bpf_link__detach_fd(struct bpf_link *link) 11124 { 11125 return libbpf_err_errno(close(link->fd)); 11126 } 11127 11128 struct bpf_link *bpf_link__open(const char *path) 11129 { 11130 struct bpf_link *link; 11131 int fd; 11132 11133 fd = bpf_obj_get(path); 11134 if (fd < 0) { 11135 fd = -errno; 11136 pr_warn("failed to open link at %s: %d\n", path, fd); 11137 return libbpf_err_ptr(fd); 11138 } 11139 11140 link = calloc(1, sizeof(*link)); 11141 if (!link) { 11142 close(fd); 11143 return libbpf_err_ptr(-ENOMEM); 11144 } 11145 link->detach = &bpf_link__detach_fd; 11146 link->fd = fd; 11147 11148 link->pin_path = strdup(path); 11149 if (!link->pin_path) { 11150 bpf_link__destroy(link); 11151 return libbpf_err_ptr(-ENOMEM); 11152 } 11153 11154 return link; 11155 } 11156 11157 int bpf_link__detach(struct bpf_link *link) 11158 { 11159 return bpf_link_detach(link->fd) ? -errno : 0; 11160 } 11161 11162 int bpf_link__pin(struct bpf_link *link, const char *path) 11163 { 11164 int err; 11165 11166 if (link->pin_path) 11167 return libbpf_err(-EBUSY); 11168 err = make_parent_dir(path); 11169 if (err) 11170 return libbpf_err(err); 11171 err = check_path(path); 11172 if (err) 11173 return libbpf_err(err); 11174 11175 link->pin_path = strdup(path); 11176 if (!link->pin_path) 11177 return libbpf_err(-ENOMEM); 11178 11179 if (bpf_obj_pin(link->fd, link->pin_path)) { 11180 err = -errno; 11181 zfree(&link->pin_path); 11182 return libbpf_err(err); 11183 } 11184 11185 pr_debug("link fd=%d: pinned at %s\n", link->fd, link->pin_path); 11186 return 0; 11187 } 11188 11189 int bpf_link__unpin(struct bpf_link *link) 11190 { 11191 int err; 11192 11193 if (!link->pin_path) 11194 return libbpf_err(-EINVAL); 11195 11196 err = unlink(link->pin_path); 11197 if (err != 0) 11198 return -errno; 11199 11200 pr_debug("link fd=%d: unpinned from %s\n", link->fd, link->pin_path); 11201 zfree(&link->pin_path); 11202 return 0; 11203 } 11204 11205 struct bpf_link_perf { 11206 struct bpf_link link; 11207 int perf_event_fd; 11208 /* legacy kprobe support: keep track of probe identifier and type */ 11209 char *legacy_probe_name; 11210 bool legacy_is_kprobe; 11211 bool legacy_is_retprobe; 11212 }; 11213 11214 static int remove_kprobe_event_legacy(const char *probe_name, bool retprobe); 11215 static int remove_uprobe_event_legacy(const char *probe_name, bool retprobe); 11216 11217 static int bpf_link_perf_detach(struct bpf_link *link) 11218 { 11219 struct bpf_link_perf *perf_link = container_of(link, struct bpf_link_perf, link); 11220 int err = 0; 11221 11222 if (ioctl(perf_link->perf_event_fd, PERF_EVENT_IOC_DISABLE, 0) < 0) 11223 err = -errno; 11224 11225 if (perf_link->perf_event_fd != link->fd) 11226 close(perf_link->perf_event_fd); 11227 close(link->fd); 11228 11229 /* legacy uprobe/kprobe needs to be removed after perf event fd closure */ 11230 if (perf_link->legacy_probe_name) { 11231 if (perf_link->legacy_is_kprobe) { 11232 err = remove_kprobe_event_legacy(perf_link->legacy_probe_name, 11233 perf_link->legacy_is_retprobe); 11234 } else { 11235 err = remove_uprobe_event_legacy(perf_link->legacy_probe_name, 11236 perf_link->legacy_is_retprobe); 11237 } 11238 } 11239 11240 return err; 11241 } 11242 11243 static void bpf_link_perf_dealloc(struct bpf_link *link) 11244 { 11245 struct bpf_link_perf *perf_link = container_of(link, struct bpf_link_perf, link); 11246 11247 free(perf_link->legacy_probe_name); 11248 free(perf_link); 11249 } 11250 11251 struct bpf_link *bpf_program__attach_perf_event_opts(const struct bpf_program *prog, int pfd, 11252 const struct bpf_perf_event_opts *opts) 11253 { 11254 struct bpf_link_perf *link; 11255 int prog_fd, link_fd = -1, err; 11256 bool force_ioctl_attach; 11257 11258 if (!OPTS_VALID(opts, bpf_perf_event_opts)) 11259 return libbpf_err_ptr(-EINVAL); 11260 11261 if (pfd < 0) { 11262 pr_warn("prog '%s': invalid perf event FD %d\n", 11263 prog->name, pfd); 11264 return libbpf_err_ptr(-EINVAL); 11265 } 11266 prog_fd = bpf_program__fd(prog); 11267 if (prog_fd < 0) { 11268 pr_warn("prog '%s': can't attach BPF program without FD (was it loaded?)\n", 11269 prog->name); 11270 return libbpf_err_ptr(-EINVAL); 11271 } 11272 11273 link = calloc(1, sizeof(*link)); 11274 if (!link) 11275 return libbpf_err_ptr(-ENOMEM); 11276 link->link.detach = &bpf_link_perf_detach; 11277 link->link.dealloc = &bpf_link_perf_dealloc; 11278 link->perf_event_fd = pfd; 11279 11280 force_ioctl_attach = OPTS_GET(opts, force_ioctl_attach, false); 11281 if (kernel_supports(prog->obj, FEAT_PERF_LINK) && !force_ioctl_attach) { 11282 DECLARE_LIBBPF_OPTS(bpf_link_create_opts, link_opts, 11283 .perf_event.bpf_cookie = OPTS_GET(opts, bpf_cookie, 0)); 11284 11285 link_fd = bpf_link_create(prog_fd, pfd, BPF_PERF_EVENT, &link_opts); 11286 if (link_fd < 0) { 11287 err = -errno; 11288 pr_warn("prog '%s': failed to create BPF link for perf_event FD %d: %s\n", 11289 prog->name, pfd, errstr(err)); 11290 goto err_out; 11291 } 11292 link->link.fd = link_fd; 11293 } else { 11294 if (OPTS_GET(opts, bpf_cookie, 0)) { 11295 pr_warn("prog '%s': user context value is not supported\n", prog->name); 11296 err = -EOPNOTSUPP; 11297 goto err_out; 11298 } 11299 11300 if (ioctl(pfd, PERF_EVENT_IOC_SET_BPF, prog_fd) < 0) { 11301 err = -errno; 11302 pr_warn("prog '%s': failed to attach to perf_event FD %d: %s\n", 11303 prog->name, pfd, errstr(err)); 11304 if (err == -EPROTO) 11305 pr_warn("prog '%s': try add PERF_SAMPLE_CALLCHAIN to or remove exclude_callchain_[kernel|user] from pfd %d\n", 11306 prog->name, pfd); 11307 goto err_out; 11308 } 11309 link->link.fd = pfd; 11310 } 11311 11312 if (!OPTS_GET(opts, dont_enable, false)) { 11313 if (ioctl(pfd, PERF_EVENT_IOC_ENABLE, 0) < 0) { 11314 err = -errno; 11315 pr_warn("prog '%s': failed to enable perf_event FD %d: %s\n", 11316 prog->name, pfd, errstr(err)); 11317 goto err_out; 11318 } 11319 } 11320 11321 return &link->link; 11322 err_out: 11323 if (link_fd >= 0) 11324 close(link_fd); 11325 free(link); 11326 return libbpf_err_ptr(err); 11327 } 11328 11329 struct bpf_link *bpf_program__attach_perf_event(const struct bpf_program *prog, int pfd) 11330 { 11331 return bpf_program__attach_perf_event_opts(prog, pfd, NULL); 11332 } 11333 11334 /* 11335 * this function is expected to parse integer in the range of [0, 2^31-1] from 11336 * given file using scanf format string fmt. If actual parsed value is 11337 * negative, the result might be indistinguishable from error 11338 */ 11339 static int parse_uint_from_file(const char *file, const char *fmt) 11340 { 11341 int err, ret; 11342 FILE *f; 11343 11344 f = fopen(file, "re"); 11345 if (!f) { 11346 err = -errno; 11347 pr_debug("failed to open '%s': %s\n", file, errstr(err)); 11348 return err; 11349 } 11350 err = fscanf(f, fmt, &ret); 11351 if (err != 1) { 11352 err = err == EOF ? -EIO : -errno; 11353 pr_debug("failed to parse '%s': %s\n", file, errstr(err)); 11354 fclose(f); 11355 return err; 11356 } 11357 fclose(f); 11358 return ret; 11359 } 11360 11361 static int determine_kprobe_perf_type(void) 11362 { 11363 const char *file = "/sys/bus/event_source/devices/kprobe/type"; 11364 11365 return parse_uint_from_file(file, "%d\n"); 11366 } 11367 11368 static int determine_uprobe_perf_type(void) 11369 { 11370 const char *file = "/sys/bus/event_source/devices/uprobe/type"; 11371 11372 return parse_uint_from_file(file, "%d\n"); 11373 } 11374 11375 static int determine_kprobe_retprobe_bit(void) 11376 { 11377 const char *file = "/sys/bus/event_source/devices/kprobe/format/retprobe"; 11378 11379 return parse_uint_from_file(file, "config:%d\n"); 11380 } 11381 11382 static int determine_uprobe_retprobe_bit(void) 11383 { 11384 const char *file = "/sys/bus/event_source/devices/uprobe/format/retprobe"; 11385 11386 return parse_uint_from_file(file, "config:%d\n"); 11387 } 11388 11389 #define PERF_UPROBE_REF_CTR_OFFSET_BITS 32 11390 #define PERF_UPROBE_REF_CTR_OFFSET_SHIFT 32 11391 11392 static int perf_event_open_probe(bool uprobe, bool retprobe, const char *name, 11393 uint64_t offset, int pid, size_t ref_ctr_off) 11394 { 11395 const size_t attr_sz = sizeof(struct perf_event_attr); 11396 struct perf_event_attr attr; 11397 int type, pfd; 11398 11399 if ((__u64)ref_ctr_off >= (1ULL << PERF_UPROBE_REF_CTR_OFFSET_BITS)) 11400 return -EINVAL; 11401 11402 memset(&attr, 0, attr_sz); 11403 11404 type = uprobe ? determine_uprobe_perf_type() 11405 : determine_kprobe_perf_type(); 11406 if (type < 0) { 11407 pr_warn("failed to determine %s perf type: %s\n", 11408 uprobe ? "uprobe" : "kprobe", 11409 errstr(type)); 11410 return type; 11411 } 11412 if (retprobe) { 11413 int bit = uprobe ? determine_uprobe_retprobe_bit() 11414 : determine_kprobe_retprobe_bit(); 11415 11416 if (bit < 0) { 11417 pr_warn("failed to determine %s retprobe bit: %s\n", 11418 uprobe ? "uprobe" : "kprobe", 11419 errstr(bit)); 11420 return bit; 11421 } 11422 attr.config |= 1 << bit; 11423 } 11424 attr.size = attr_sz; 11425 attr.type = type; 11426 attr.config |= (__u64)ref_ctr_off << PERF_UPROBE_REF_CTR_OFFSET_SHIFT; 11427 attr.config1 = ptr_to_u64(name); /* kprobe_func or uprobe_path */ 11428 attr.config2 = offset; /* kprobe_addr or probe_offset */ 11429 11430 /* pid filter is meaningful only for uprobes */ 11431 pfd = syscall(__NR_perf_event_open, &attr, 11432 pid < 0 ? -1 : pid /* pid */, 11433 pid == -1 ? 0 : -1 /* cpu */, 11434 -1 /* group_fd */, PERF_FLAG_FD_CLOEXEC); 11435 return pfd >= 0 ? pfd : -errno; 11436 } 11437 11438 static int append_to_file(const char *file, const char *fmt, ...) 11439 { 11440 int fd, n, err = 0; 11441 va_list ap; 11442 char buf[1024]; 11443 11444 va_start(ap, fmt); 11445 n = vsnprintf(buf, sizeof(buf), fmt, ap); 11446 va_end(ap); 11447 11448 if (n < 0 || n >= sizeof(buf)) 11449 return -EINVAL; 11450 11451 fd = open(file, O_WRONLY | O_APPEND | O_CLOEXEC, 0); 11452 if (fd < 0) 11453 return -errno; 11454 11455 if (write(fd, buf, n) < 0) 11456 err = -errno; 11457 11458 close(fd); 11459 return err; 11460 } 11461 11462 #define DEBUGFS "/sys/kernel/debug/tracing" 11463 #define TRACEFS "/sys/kernel/tracing" 11464 11465 static bool use_debugfs(void) 11466 { 11467 static int has_debugfs = -1; 11468 11469 if (has_debugfs < 0) 11470 has_debugfs = faccessat(AT_FDCWD, DEBUGFS, F_OK, AT_EACCESS) == 0; 11471 11472 return has_debugfs == 1; 11473 } 11474 11475 static const char *tracefs_path(void) 11476 { 11477 return use_debugfs() ? DEBUGFS : TRACEFS; 11478 } 11479 11480 static const char *tracefs_kprobe_events(void) 11481 { 11482 return use_debugfs() ? DEBUGFS"/kprobe_events" : TRACEFS"/kprobe_events"; 11483 } 11484 11485 static const char *tracefs_uprobe_events(void) 11486 { 11487 return use_debugfs() ? DEBUGFS"/uprobe_events" : TRACEFS"/uprobe_events"; 11488 } 11489 11490 static const char *tracefs_available_filter_functions(void) 11491 { 11492 return use_debugfs() ? DEBUGFS"/available_filter_functions" 11493 : TRACEFS"/available_filter_functions"; 11494 } 11495 11496 static const char *tracefs_available_filter_functions_addrs(void) 11497 { 11498 return use_debugfs() ? DEBUGFS"/available_filter_functions_addrs" 11499 : TRACEFS"/available_filter_functions_addrs"; 11500 } 11501 11502 static void gen_probe_legacy_event_name(char *buf, size_t buf_sz, 11503 const char *name, size_t offset) 11504 { 11505 static int index = 0; 11506 int i; 11507 11508 snprintf(buf, buf_sz, "libbpf_%u_%d_%s_0x%zx", getpid(), 11509 __sync_fetch_and_add(&index, 1), name, offset); 11510 11511 /* sanitize name in the probe name */ 11512 for (i = 0; buf[i]; i++) { 11513 if (!isalnum(buf[i])) 11514 buf[i] = '_'; 11515 } 11516 } 11517 11518 static int add_kprobe_event_legacy(const char *probe_name, bool retprobe, 11519 const char *kfunc_name, size_t offset) 11520 { 11521 return append_to_file(tracefs_kprobe_events(), "%c:%s/%s %s+0x%zx", 11522 retprobe ? 'r' : 'p', 11523 retprobe ? "kretprobes" : "kprobes", 11524 probe_name, kfunc_name, offset); 11525 } 11526 11527 static int remove_kprobe_event_legacy(const char *probe_name, bool retprobe) 11528 { 11529 return append_to_file(tracefs_kprobe_events(), "-:%s/%s", 11530 retprobe ? "kretprobes" : "kprobes", probe_name); 11531 } 11532 11533 static int determine_kprobe_perf_type_legacy(const char *probe_name, bool retprobe) 11534 { 11535 char file[256]; 11536 11537 snprintf(file, sizeof(file), "%s/events/%s/%s/id", 11538 tracefs_path(), retprobe ? "kretprobes" : "kprobes", probe_name); 11539 11540 return parse_uint_from_file(file, "%d\n"); 11541 } 11542 11543 static int perf_event_kprobe_open_legacy(const char *probe_name, bool retprobe, 11544 const char *kfunc_name, size_t offset, int pid) 11545 { 11546 const size_t attr_sz = sizeof(struct perf_event_attr); 11547 struct perf_event_attr attr; 11548 int type, pfd, err; 11549 11550 err = add_kprobe_event_legacy(probe_name, retprobe, kfunc_name, offset); 11551 if (err < 0) { 11552 pr_warn("failed to add legacy kprobe event for '%s+0x%zx': %s\n", 11553 kfunc_name, offset, 11554 errstr(err)); 11555 return err; 11556 } 11557 type = determine_kprobe_perf_type_legacy(probe_name, retprobe); 11558 if (type < 0) { 11559 err = type; 11560 pr_warn("failed to determine legacy kprobe event id for '%s+0x%zx': %s\n", 11561 kfunc_name, offset, 11562 errstr(err)); 11563 goto err_clean_legacy; 11564 } 11565 11566 memset(&attr, 0, attr_sz); 11567 attr.size = attr_sz; 11568 attr.config = type; 11569 attr.type = PERF_TYPE_TRACEPOINT; 11570 11571 pfd = syscall(__NR_perf_event_open, &attr, 11572 pid < 0 ? -1 : pid, /* pid */ 11573 pid == -1 ? 0 : -1, /* cpu */ 11574 -1 /* group_fd */, PERF_FLAG_FD_CLOEXEC); 11575 if (pfd < 0) { 11576 err = -errno; 11577 pr_warn("legacy kprobe perf_event_open() failed: %s\n", 11578 errstr(err)); 11579 goto err_clean_legacy; 11580 } 11581 return pfd; 11582 11583 err_clean_legacy: 11584 /* Clear the newly added legacy kprobe_event */ 11585 remove_kprobe_event_legacy(probe_name, retprobe); 11586 return err; 11587 } 11588 11589 static const char *arch_specific_syscall_pfx(void) 11590 { 11591 #if defined(__x86_64__) 11592 return "x64"; 11593 #elif defined(__i386__) 11594 return "ia32"; 11595 #elif defined(__s390x__) 11596 return "s390x"; 11597 #elif defined(__arm__) 11598 return "arm"; 11599 #elif defined(__aarch64__) 11600 return "arm64"; 11601 #elif defined(__mips__) 11602 return "mips"; 11603 #elif defined(__riscv) 11604 return "riscv"; 11605 #elif defined(__powerpc__) 11606 return "powerpc"; 11607 #elif defined(__powerpc64__) 11608 return "powerpc64"; 11609 #else 11610 return NULL; 11611 #endif 11612 } 11613 11614 int probe_kern_syscall_wrapper(int token_fd) 11615 { 11616 char syscall_name[64]; 11617 const char *ksys_pfx; 11618 11619 ksys_pfx = arch_specific_syscall_pfx(); 11620 if (!ksys_pfx) 11621 return 0; 11622 11623 snprintf(syscall_name, sizeof(syscall_name), "__%s_sys_bpf", ksys_pfx); 11624 11625 if (determine_kprobe_perf_type() >= 0) { 11626 int pfd; 11627 11628 pfd = perf_event_open_probe(false, false, syscall_name, 0, getpid(), 0); 11629 if (pfd >= 0) 11630 close(pfd); 11631 11632 return pfd >= 0 ? 1 : 0; 11633 } else { /* legacy mode */ 11634 char probe_name[MAX_EVENT_NAME_LEN]; 11635 11636 gen_probe_legacy_event_name(probe_name, sizeof(probe_name), syscall_name, 0); 11637 if (add_kprobe_event_legacy(probe_name, false, syscall_name, 0) < 0) 11638 return 0; 11639 11640 (void)remove_kprobe_event_legacy(probe_name, false); 11641 return 1; 11642 } 11643 } 11644 11645 struct bpf_link * 11646 bpf_program__attach_kprobe_opts(const struct bpf_program *prog, 11647 const char *func_name, 11648 const struct bpf_kprobe_opts *opts) 11649 { 11650 DECLARE_LIBBPF_OPTS(bpf_perf_event_opts, pe_opts); 11651 enum probe_attach_mode attach_mode; 11652 char *legacy_probe = NULL; 11653 struct bpf_link *link; 11654 size_t offset; 11655 bool retprobe, legacy; 11656 int pfd, err; 11657 11658 if (!OPTS_VALID(opts, bpf_kprobe_opts)) 11659 return libbpf_err_ptr(-EINVAL); 11660 11661 attach_mode = OPTS_GET(opts, attach_mode, PROBE_ATTACH_MODE_DEFAULT); 11662 retprobe = OPTS_GET(opts, retprobe, false); 11663 offset = OPTS_GET(opts, offset, 0); 11664 pe_opts.bpf_cookie = OPTS_GET(opts, bpf_cookie, 0); 11665 11666 legacy = determine_kprobe_perf_type() < 0; 11667 switch (attach_mode) { 11668 case PROBE_ATTACH_MODE_LEGACY: 11669 legacy = true; 11670 pe_opts.force_ioctl_attach = true; 11671 break; 11672 case PROBE_ATTACH_MODE_PERF: 11673 if (legacy) 11674 return libbpf_err_ptr(-ENOTSUP); 11675 pe_opts.force_ioctl_attach = true; 11676 break; 11677 case PROBE_ATTACH_MODE_LINK: 11678 if (legacy || !kernel_supports(prog->obj, FEAT_PERF_LINK)) 11679 return libbpf_err_ptr(-ENOTSUP); 11680 break; 11681 case PROBE_ATTACH_MODE_DEFAULT: 11682 break; 11683 default: 11684 return libbpf_err_ptr(-EINVAL); 11685 } 11686 11687 if (!legacy) { 11688 pfd = perf_event_open_probe(false /* uprobe */, retprobe, 11689 func_name, offset, 11690 -1 /* pid */, 0 /* ref_ctr_off */); 11691 } else { 11692 char probe_name[MAX_EVENT_NAME_LEN]; 11693 11694 gen_probe_legacy_event_name(probe_name, sizeof(probe_name), 11695 func_name, offset); 11696 11697 legacy_probe = strdup(probe_name); 11698 if (!legacy_probe) 11699 return libbpf_err_ptr(-ENOMEM); 11700 11701 pfd = perf_event_kprobe_open_legacy(legacy_probe, retprobe, func_name, 11702 offset, -1 /* pid */); 11703 } 11704 if (pfd < 0) { 11705 err = -errno; 11706 pr_warn("prog '%s': failed to create %s '%s+0x%zx' perf event: %s\n", 11707 prog->name, retprobe ? "kretprobe" : "kprobe", 11708 func_name, offset, 11709 errstr(err)); 11710 goto err_out; 11711 } 11712 link = bpf_program__attach_perf_event_opts(prog, pfd, &pe_opts); 11713 err = libbpf_get_error(link); 11714 if (err) { 11715 close(pfd); 11716 pr_warn("prog '%s': failed to attach to %s '%s+0x%zx': %s\n", 11717 prog->name, retprobe ? "kretprobe" : "kprobe", 11718 func_name, offset, 11719 errstr(err)); 11720 goto err_clean_legacy; 11721 } 11722 if (legacy) { 11723 struct bpf_link_perf *perf_link = container_of(link, struct bpf_link_perf, link); 11724 11725 perf_link->legacy_probe_name = legacy_probe; 11726 perf_link->legacy_is_kprobe = true; 11727 perf_link->legacy_is_retprobe = retprobe; 11728 } 11729 11730 return link; 11731 11732 err_clean_legacy: 11733 if (legacy) 11734 remove_kprobe_event_legacy(legacy_probe, retprobe); 11735 err_out: 11736 free(legacy_probe); 11737 return libbpf_err_ptr(err); 11738 } 11739 11740 struct bpf_link *bpf_program__attach_kprobe(const struct bpf_program *prog, 11741 bool retprobe, 11742 const char *func_name) 11743 { 11744 DECLARE_LIBBPF_OPTS(bpf_kprobe_opts, opts, 11745 .retprobe = retprobe, 11746 ); 11747 11748 return bpf_program__attach_kprobe_opts(prog, func_name, &opts); 11749 } 11750 11751 struct bpf_link *bpf_program__attach_ksyscall(const struct bpf_program *prog, 11752 const char *syscall_name, 11753 const struct bpf_ksyscall_opts *opts) 11754 { 11755 LIBBPF_OPTS(bpf_kprobe_opts, kprobe_opts); 11756 char func_name[128]; 11757 11758 if (!OPTS_VALID(opts, bpf_ksyscall_opts)) 11759 return libbpf_err_ptr(-EINVAL); 11760 11761 if (kernel_supports(prog->obj, FEAT_SYSCALL_WRAPPER)) { 11762 /* arch_specific_syscall_pfx() should never return NULL here 11763 * because it is guarded by kernel_supports(). However, since 11764 * compiler does not know that we have an explicit conditional 11765 * as well. 11766 */ 11767 snprintf(func_name, sizeof(func_name), "__%s_sys_%s", 11768 arch_specific_syscall_pfx() ? : "", syscall_name); 11769 } else { 11770 snprintf(func_name, sizeof(func_name), "__se_sys_%s", syscall_name); 11771 } 11772 11773 kprobe_opts.retprobe = OPTS_GET(opts, retprobe, false); 11774 kprobe_opts.bpf_cookie = OPTS_GET(opts, bpf_cookie, 0); 11775 11776 return bpf_program__attach_kprobe_opts(prog, func_name, &kprobe_opts); 11777 } 11778 11779 /* Adapted from perf/util/string.c */ 11780 bool glob_match(const char *str, const char *pat) 11781 { 11782 while (*str && *pat && *pat != '*') { 11783 if (*pat == '?') { /* Matches any single character */ 11784 str++; 11785 pat++; 11786 continue; 11787 } 11788 if (*str != *pat) 11789 return false; 11790 str++; 11791 pat++; 11792 } 11793 /* Check wild card */ 11794 if (*pat == '*') { 11795 while (*pat == '*') 11796 pat++; 11797 if (!*pat) /* Tail wild card matches all */ 11798 return true; 11799 while (*str) 11800 if (glob_match(str++, pat)) 11801 return true; 11802 } 11803 return !*str && !*pat; 11804 } 11805 11806 struct kprobe_multi_resolve { 11807 const char *pattern; 11808 unsigned long *addrs; 11809 size_t cap; 11810 size_t cnt; 11811 }; 11812 11813 struct avail_kallsyms_data { 11814 char **syms; 11815 size_t cnt; 11816 struct kprobe_multi_resolve *res; 11817 }; 11818 11819 static int avail_func_cmp(const void *a, const void *b) 11820 { 11821 return strcmp(*(const char **)a, *(const char **)b); 11822 } 11823 11824 static int avail_kallsyms_cb(unsigned long long sym_addr, char sym_type, 11825 const char *sym_name, void *ctx) 11826 { 11827 struct avail_kallsyms_data *data = ctx; 11828 struct kprobe_multi_resolve *res = data->res; 11829 int err; 11830 11831 if (!glob_match(sym_name, res->pattern)) 11832 return 0; 11833 11834 if (!bsearch(&sym_name, data->syms, data->cnt, sizeof(*data->syms), avail_func_cmp)) { 11835 /* Some versions of kernel strip out .llvm.<hash> suffix from 11836 * function names reported in available_filter_functions, but 11837 * don't do so for kallsyms. While this is clearly a kernel 11838 * bug (fixed by [0]) we try to accommodate that in libbpf to 11839 * make multi-kprobe usability a bit better: if no match is 11840 * found, we will strip .llvm. suffix and try one more time. 11841 * 11842 * [0] fb6a421fb615 ("kallsyms: Match symbols exactly with CONFIG_LTO_CLANG") 11843 */ 11844 char sym_trim[256], *psym_trim = sym_trim; 11845 const char *sym_sfx; 11846 11847 if (!(sym_sfx = strstr(sym_name, ".llvm."))) 11848 return 0; 11849 11850 /* psym_trim vs sym_trim dance is done to avoid pointer vs array 11851 * coercion differences and get proper `const char **` pointer 11852 * which avail_func_cmp() expects 11853 */ 11854 snprintf(sym_trim, sizeof(sym_trim), "%.*s", (int)(sym_sfx - sym_name), sym_name); 11855 if (!bsearch(&psym_trim, data->syms, data->cnt, sizeof(*data->syms), avail_func_cmp)) 11856 return 0; 11857 } 11858 11859 err = libbpf_ensure_mem((void **)&res->addrs, &res->cap, sizeof(*res->addrs), res->cnt + 1); 11860 if (err) 11861 return err; 11862 11863 res->addrs[res->cnt++] = (unsigned long)sym_addr; 11864 return 0; 11865 } 11866 11867 static int libbpf_available_kallsyms_parse(struct kprobe_multi_resolve *res) 11868 { 11869 const char *available_functions_file = tracefs_available_filter_functions(); 11870 struct avail_kallsyms_data data; 11871 char sym_name[500]; 11872 FILE *f; 11873 int err = 0, ret, i; 11874 char **syms = NULL; 11875 size_t cap = 0, cnt = 0; 11876 11877 f = fopen(available_functions_file, "re"); 11878 if (!f) { 11879 err = -errno; 11880 pr_warn("failed to open %s: %s\n", available_functions_file, errstr(err)); 11881 return err; 11882 } 11883 11884 while (true) { 11885 char *name; 11886 11887 ret = fscanf(f, "%499s%*[^\n]\n", sym_name); 11888 if (ret == EOF && feof(f)) 11889 break; 11890 11891 if (ret != 1) { 11892 pr_warn("failed to parse available_filter_functions entry: %d\n", ret); 11893 err = -EINVAL; 11894 goto cleanup; 11895 } 11896 11897 if (!glob_match(sym_name, res->pattern)) 11898 continue; 11899 11900 err = libbpf_ensure_mem((void **)&syms, &cap, sizeof(*syms), cnt + 1); 11901 if (err) 11902 goto cleanup; 11903 11904 name = strdup(sym_name); 11905 if (!name) { 11906 err = -errno; 11907 goto cleanup; 11908 } 11909 11910 syms[cnt++] = name; 11911 } 11912 11913 /* no entries found, bail out */ 11914 if (cnt == 0) { 11915 err = -ENOENT; 11916 goto cleanup; 11917 } 11918 11919 /* sort available functions */ 11920 qsort(syms, cnt, sizeof(*syms), avail_func_cmp); 11921 11922 data.syms = syms; 11923 data.res = res; 11924 data.cnt = cnt; 11925 libbpf_kallsyms_parse(avail_kallsyms_cb, &data); 11926 11927 if (res->cnt == 0) 11928 err = -ENOENT; 11929 11930 cleanup: 11931 for (i = 0; i < cnt; i++) 11932 free((char *)syms[i]); 11933 free(syms); 11934 11935 fclose(f); 11936 return err; 11937 } 11938 11939 static bool has_available_filter_functions_addrs(void) 11940 { 11941 return access(tracefs_available_filter_functions_addrs(), R_OK) != -1; 11942 } 11943 11944 static int libbpf_available_kprobes_parse(struct kprobe_multi_resolve *res) 11945 { 11946 const char *available_path = tracefs_available_filter_functions_addrs(); 11947 char sym_name[500]; 11948 FILE *f; 11949 int ret, err = 0; 11950 unsigned long long sym_addr; 11951 11952 f = fopen(available_path, "re"); 11953 if (!f) { 11954 err = -errno; 11955 pr_warn("failed to open %s: %s\n", available_path, errstr(err)); 11956 return err; 11957 } 11958 11959 while (true) { 11960 ret = fscanf(f, "%llx %499s%*[^\n]\n", &sym_addr, sym_name); 11961 if (ret == EOF && feof(f)) 11962 break; 11963 11964 if (ret != 2) { 11965 pr_warn("failed to parse available_filter_functions_addrs entry: %d\n", 11966 ret); 11967 err = -EINVAL; 11968 goto cleanup; 11969 } 11970 11971 if (!glob_match(sym_name, res->pattern)) 11972 continue; 11973 11974 err = libbpf_ensure_mem((void **)&res->addrs, &res->cap, 11975 sizeof(*res->addrs), res->cnt + 1); 11976 if (err) 11977 goto cleanup; 11978 11979 res->addrs[res->cnt++] = (unsigned long)sym_addr; 11980 } 11981 11982 if (res->cnt == 0) 11983 err = -ENOENT; 11984 11985 cleanup: 11986 fclose(f); 11987 return err; 11988 } 11989 11990 struct bpf_link * 11991 bpf_program__attach_kprobe_multi_opts(const struct bpf_program *prog, 11992 const char *pattern, 11993 const struct bpf_kprobe_multi_opts *opts) 11994 { 11995 LIBBPF_OPTS(bpf_link_create_opts, lopts); 11996 struct kprobe_multi_resolve res = { 11997 .pattern = pattern, 11998 }; 11999 enum bpf_attach_type attach_type; 12000 struct bpf_link *link = NULL; 12001 const unsigned long *addrs; 12002 int err, link_fd, prog_fd; 12003 bool retprobe, session, unique_match; 12004 const __u64 *cookies; 12005 const char **syms; 12006 size_t cnt; 12007 12008 if (!OPTS_VALID(opts, bpf_kprobe_multi_opts)) 12009 return libbpf_err_ptr(-EINVAL); 12010 12011 prog_fd = bpf_program__fd(prog); 12012 if (prog_fd < 0) { 12013 pr_warn("prog '%s': can't attach BPF program without FD (was it loaded?)\n", 12014 prog->name); 12015 return libbpf_err_ptr(-EINVAL); 12016 } 12017 12018 syms = OPTS_GET(opts, syms, false); 12019 addrs = OPTS_GET(opts, addrs, false); 12020 cnt = OPTS_GET(opts, cnt, false); 12021 cookies = OPTS_GET(opts, cookies, false); 12022 unique_match = OPTS_GET(opts, unique_match, false); 12023 12024 if (!pattern && !addrs && !syms) 12025 return libbpf_err_ptr(-EINVAL); 12026 if (pattern && (addrs || syms || cookies || cnt)) 12027 return libbpf_err_ptr(-EINVAL); 12028 if (!pattern && !cnt) 12029 return libbpf_err_ptr(-EINVAL); 12030 if (!pattern && unique_match) 12031 return libbpf_err_ptr(-EINVAL); 12032 if (addrs && syms) 12033 return libbpf_err_ptr(-EINVAL); 12034 12035 if (pattern) { 12036 if (has_available_filter_functions_addrs()) 12037 err = libbpf_available_kprobes_parse(&res); 12038 else 12039 err = libbpf_available_kallsyms_parse(&res); 12040 if (err) 12041 goto error; 12042 12043 if (unique_match && res.cnt != 1) { 12044 pr_warn("prog '%s': failed to find a unique match for '%s' (%zu matches)\n", 12045 prog->name, pattern, res.cnt); 12046 err = -EINVAL; 12047 goto error; 12048 } 12049 12050 addrs = res.addrs; 12051 cnt = res.cnt; 12052 } 12053 12054 retprobe = OPTS_GET(opts, retprobe, false); 12055 session = OPTS_GET(opts, session, false); 12056 12057 if (retprobe && session) 12058 return libbpf_err_ptr(-EINVAL); 12059 12060 attach_type = session ? BPF_TRACE_KPROBE_SESSION : BPF_TRACE_KPROBE_MULTI; 12061 12062 lopts.kprobe_multi.syms = syms; 12063 lopts.kprobe_multi.addrs = addrs; 12064 lopts.kprobe_multi.cookies = cookies; 12065 lopts.kprobe_multi.cnt = cnt; 12066 lopts.kprobe_multi.flags = retprobe ? BPF_F_KPROBE_MULTI_RETURN : 0; 12067 12068 link = calloc(1, sizeof(*link)); 12069 if (!link) { 12070 err = -ENOMEM; 12071 goto error; 12072 } 12073 link->detach = &bpf_link__detach_fd; 12074 12075 link_fd = bpf_link_create(prog_fd, 0, attach_type, &lopts); 12076 if (link_fd < 0) { 12077 err = -errno; 12078 pr_warn("prog '%s': failed to attach: %s\n", 12079 prog->name, errstr(err)); 12080 goto error; 12081 } 12082 link->fd = link_fd; 12083 free(res.addrs); 12084 return link; 12085 12086 error: 12087 free(link); 12088 free(res.addrs); 12089 return libbpf_err_ptr(err); 12090 } 12091 12092 static int attach_kprobe(const struct bpf_program *prog, long cookie, struct bpf_link **link) 12093 { 12094 DECLARE_LIBBPF_OPTS(bpf_kprobe_opts, opts); 12095 unsigned long offset = 0; 12096 const char *func_name; 12097 char *func; 12098 int n; 12099 12100 *link = NULL; 12101 12102 /* no auto-attach for SEC("kprobe") and SEC("kretprobe") */ 12103 if (strcmp(prog->sec_name, "kprobe") == 0 || strcmp(prog->sec_name, "kretprobe") == 0) 12104 return 0; 12105 12106 opts.retprobe = str_has_pfx(prog->sec_name, "kretprobe/"); 12107 if (opts.retprobe) 12108 func_name = prog->sec_name + sizeof("kretprobe/") - 1; 12109 else 12110 func_name = prog->sec_name + sizeof("kprobe/") - 1; 12111 12112 n = sscanf(func_name, "%m[a-zA-Z0-9_.]+%li", &func, &offset); 12113 if (n < 1) { 12114 pr_warn("kprobe name is invalid: %s\n", func_name); 12115 return -EINVAL; 12116 } 12117 if (opts.retprobe && offset != 0) { 12118 free(func); 12119 pr_warn("kretprobes do not support offset specification\n"); 12120 return -EINVAL; 12121 } 12122 12123 opts.offset = offset; 12124 *link = bpf_program__attach_kprobe_opts(prog, func, &opts); 12125 free(func); 12126 return libbpf_get_error(*link); 12127 } 12128 12129 static int attach_ksyscall(const struct bpf_program *prog, long cookie, struct bpf_link **link) 12130 { 12131 LIBBPF_OPTS(bpf_ksyscall_opts, opts); 12132 const char *syscall_name; 12133 12134 *link = NULL; 12135 12136 /* no auto-attach for SEC("ksyscall") and SEC("kretsyscall") */ 12137 if (strcmp(prog->sec_name, "ksyscall") == 0 || strcmp(prog->sec_name, "kretsyscall") == 0) 12138 return 0; 12139 12140 opts.retprobe = str_has_pfx(prog->sec_name, "kretsyscall/"); 12141 if (opts.retprobe) 12142 syscall_name = prog->sec_name + sizeof("kretsyscall/") - 1; 12143 else 12144 syscall_name = prog->sec_name + sizeof("ksyscall/") - 1; 12145 12146 *link = bpf_program__attach_ksyscall(prog, syscall_name, &opts); 12147 return *link ? 0 : -errno; 12148 } 12149 12150 static int attach_kprobe_multi(const struct bpf_program *prog, long cookie, struct bpf_link **link) 12151 { 12152 LIBBPF_OPTS(bpf_kprobe_multi_opts, opts); 12153 const char *spec; 12154 char *pattern; 12155 int n; 12156 12157 *link = NULL; 12158 12159 /* no auto-attach for SEC("kprobe.multi") and SEC("kretprobe.multi") */ 12160 if (strcmp(prog->sec_name, "kprobe.multi") == 0 || 12161 strcmp(prog->sec_name, "kretprobe.multi") == 0) 12162 return 0; 12163 12164 opts.retprobe = str_has_pfx(prog->sec_name, "kretprobe.multi/"); 12165 if (opts.retprobe) 12166 spec = prog->sec_name + sizeof("kretprobe.multi/") - 1; 12167 else 12168 spec = prog->sec_name + sizeof("kprobe.multi/") - 1; 12169 12170 n = sscanf(spec, "%m[a-zA-Z0-9_.*?]", &pattern); 12171 if (n < 1) { 12172 pr_warn("kprobe multi pattern is invalid: %s\n", spec); 12173 return -EINVAL; 12174 } 12175 12176 *link = bpf_program__attach_kprobe_multi_opts(prog, pattern, &opts); 12177 free(pattern); 12178 return libbpf_get_error(*link); 12179 } 12180 12181 static int attach_kprobe_session(const struct bpf_program *prog, long cookie, 12182 struct bpf_link **link) 12183 { 12184 LIBBPF_OPTS(bpf_kprobe_multi_opts, opts, .session = true); 12185 const char *spec; 12186 char *pattern; 12187 int n; 12188 12189 *link = NULL; 12190 12191 /* no auto-attach for SEC("kprobe.session") */ 12192 if (strcmp(prog->sec_name, "kprobe.session") == 0) 12193 return 0; 12194 12195 spec = prog->sec_name + sizeof("kprobe.session/") - 1; 12196 n = sscanf(spec, "%m[a-zA-Z0-9_.*?]", &pattern); 12197 if (n < 1) { 12198 pr_warn("kprobe session pattern is invalid: %s\n", spec); 12199 return -EINVAL; 12200 } 12201 12202 *link = bpf_program__attach_kprobe_multi_opts(prog, pattern, &opts); 12203 free(pattern); 12204 return *link ? 0 : -errno; 12205 } 12206 12207 static int attach_uprobe_multi(const struct bpf_program *prog, long cookie, struct bpf_link **link) 12208 { 12209 char *probe_type = NULL, *binary_path = NULL, *func_name = NULL; 12210 LIBBPF_OPTS(bpf_uprobe_multi_opts, opts); 12211 int n, ret = -EINVAL; 12212 12213 *link = NULL; 12214 12215 n = sscanf(prog->sec_name, "%m[^/]/%m[^:]:%m[^\n]", 12216 &probe_type, &binary_path, &func_name); 12217 switch (n) { 12218 case 1: 12219 /* handle SEC("u[ret]probe") - format is valid, but auto-attach is impossible. */ 12220 ret = 0; 12221 break; 12222 case 3: 12223 opts.session = str_has_pfx(probe_type, "uprobe.session"); 12224 opts.retprobe = str_has_pfx(probe_type, "uretprobe.multi"); 12225 12226 *link = bpf_program__attach_uprobe_multi(prog, -1, binary_path, func_name, &opts); 12227 ret = libbpf_get_error(*link); 12228 break; 12229 default: 12230 pr_warn("prog '%s': invalid format of section definition '%s'\n", prog->name, 12231 prog->sec_name); 12232 break; 12233 } 12234 free(probe_type); 12235 free(binary_path); 12236 free(func_name); 12237 return ret; 12238 } 12239 12240 static inline int add_uprobe_event_legacy(const char *probe_name, bool retprobe, 12241 const char *binary_path, size_t offset) 12242 { 12243 return append_to_file(tracefs_uprobe_events(), "%c:%s/%s %s:0x%zx", 12244 retprobe ? 'r' : 'p', 12245 retprobe ? "uretprobes" : "uprobes", 12246 probe_name, binary_path, offset); 12247 } 12248 12249 static inline int remove_uprobe_event_legacy(const char *probe_name, bool retprobe) 12250 { 12251 return append_to_file(tracefs_uprobe_events(), "-:%s/%s", 12252 retprobe ? "uretprobes" : "uprobes", probe_name); 12253 } 12254 12255 static int determine_uprobe_perf_type_legacy(const char *probe_name, bool retprobe) 12256 { 12257 char file[512]; 12258 12259 snprintf(file, sizeof(file), "%s/events/%s/%s/id", 12260 tracefs_path(), retprobe ? "uretprobes" : "uprobes", probe_name); 12261 12262 return parse_uint_from_file(file, "%d\n"); 12263 } 12264 12265 static int perf_event_uprobe_open_legacy(const char *probe_name, bool retprobe, 12266 const char *binary_path, size_t offset, int pid) 12267 { 12268 const size_t attr_sz = sizeof(struct perf_event_attr); 12269 struct perf_event_attr attr; 12270 int type, pfd, err; 12271 12272 err = add_uprobe_event_legacy(probe_name, retprobe, binary_path, offset); 12273 if (err < 0) { 12274 pr_warn("failed to add legacy uprobe event for %s:0x%zx: %s\n", 12275 binary_path, (size_t)offset, errstr(err)); 12276 return err; 12277 } 12278 type = determine_uprobe_perf_type_legacy(probe_name, retprobe); 12279 if (type < 0) { 12280 err = type; 12281 pr_warn("failed to determine legacy uprobe event id for %s:0x%zx: %s\n", 12282 binary_path, offset, errstr(err)); 12283 goto err_clean_legacy; 12284 } 12285 12286 memset(&attr, 0, attr_sz); 12287 attr.size = attr_sz; 12288 attr.config = type; 12289 attr.type = PERF_TYPE_TRACEPOINT; 12290 12291 pfd = syscall(__NR_perf_event_open, &attr, 12292 pid < 0 ? -1 : pid, /* pid */ 12293 pid == -1 ? 0 : -1, /* cpu */ 12294 -1 /* group_fd */, PERF_FLAG_FD_CLOEXEC); 12295 if (pfd < 0) { 12296 err = -errno; 12297 pr_warn("legacy uprobe perf_event_open() failed: %s\n", errstr(err)); 12298 goto err_clean_legacy; 12299 } 12300 return pfd; 12301 12302 err_clean_legacy: 12303 /* Clear the newly added legacy uprobe_event */ 12304 remove_uprobe_event_legacy(probe_name, retprobe); 12305 return err; 12306 } 12307 12308 /* Find offset of function name in archive specified by path. Currently 12309 * supported are .zip files that do not compress their contents, as used on 12310 * Android in the form of APKs, for example. "file_name" is the name of the ELF 12311 * file inside the archive. "func_name" matches symbol name or name@@LIB for 12312 * library functions. 12313 * 12314 * An overview of the APK format specifically provided here: 12315 * https://en.wikipedia.org/w/index.php?title=Apk_(file_format)&oldid=1139099120#Package_contents 12316 */ 12317 static long elf_find_func_offset_from_archive(const char *archive_path, const char *file_name, 12318 const char *func_name) 12319 { 12320 struct zip_archive *archive; 12321 struct zip_entry entry; 12322 long ret; 12323 Elf *elf; 12324 12325 archive = zip_archive_open(archive_path); 12326 if (IS_ERR(archive)) { 12327 ret = PTR_ERR(archive); 12328 pr_warn("zip: failed to open %s: %ld\n", archive_path, ret); 12329 return ret; 12330 } 12331 12332 ret = zip_archive_find_entry(archive, file_name, &entry); 12333 if (ret) { 12334 pr_warn("zip: could not find archive member %s in %s: %ld\n", file_name, 12335 archive_path, ret); 12336 goto out; 12337 } 12338 pr_debug("zip: found entry for %s in %s at 0x%lx\n", file_name, archive_path, 12339 (unsigned long)entry.data_offset); 12340 12341 if (entry.compression) { 12342 pr_warn("zip: entry %s of %s is compressed and cannot be handled\n", file_name, 12343 archive_path); 12344 ret = -LIBBPF_ERRNO__FORMAT; 12345 goto out; 12346 } 12347 12348 elf = elf_memory((void *)entry.data, entry.data_length); 12349 if (!elf) { 12350 pr_warn("elf: could not read elf file %s from %s: %s\n", file_name, archive_path, 12351 elf_errmsg(-1)); 12352 ret = -LIBBPF_ERRNO__LIBELF; 12353 goto out; 12354 } 12355 12356 ret = elf_find_func_offset(elf, file_name, func_name); 12357 if (ret > 0) { 12358 pr_debug("elf: symbol address match for %s of %s in %s: 0x%x + 0x%lx = 0x%lx\n", 12359 func_name, file_name, archive_path, entry.data_offset, ret, 12360 ret + entry.data_offset); 12361 ret += entry.data_offset; 12362 } 12363 elf_end(elf); 12364 12365 out: 12366 zip_archive_close(archive); 12367 return ret; 12368 } 12369 12370 static const char *arch_specific_lib_paths(void) 12371 { 12372 /* 12373 * Based on https://packages.debian.org/sid/libc6. 12374 * 12375 * Assume that the traced program is built for the same architecture 12376 * as libbpf, which should cover the vast majority of cases. 12377 */ 12378 #if defined(__x86_64__) 12379 return "/lib/x86_64-linux-gnu"; 12380 #elif defined(__i386__) 12381 return "/lib/i386-linux-gnu"; 12382 #elif defined(__s390x__) 12383 return "/lib/s390x-linux-gnu"; 12384 #elif defined(__arm__) && defined(__SOFTFP__) 12385 return "/lib/arm-linux-gnueabi"; 12386 #elif defined(__arm__) && !defined(__SOFTFP__) 12387 return "/lib/arm-linux-gnueabihf"; 12388 #elif defined(__aarch64__) 12389 return "/lib/aarch64-linux-gnu"; 12390 #elif defined(__mips__) && defined(__MIPSEL__) && _MIPS_SZLONG == 64 12391 return "/lib/mips64el-linux-gnuabi64"; 12392 #elif defined(__mips__) && defined(__MIPSEL__) && _MIPS_SZLONG == 32 12393 return "/lib/mipsel-linux-gnu"; 12394 #elif defined(__powerpc64__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ 12395 return "/lib/powerpc64le-linux-gnu"; 12396 #elif defined(__sparc__) && defined(__arch64__) 12397 return "/lib/sparc64-linux-gnu"; 12398 #elif defined(__riscv) && __riscv_xlen == 64 12399 return "/lib/riscv64-linux-gnu"; 12400 #else 12401 return NULL; 12402 #endif 12403 } 12404 12405 /* Get full path to program/shared library. */ 12406 static int resolve_full_path(const char *file, char *result, size_t result_sz) 12407 { 12408 const char *search_paths[3] = {}; 12409 int i, perm; 12410 12411 if (str_has_sfx(file, ".so") || strstr(file, ".so.")) { 12412 search_paths[0] = getenv("LD_LIBRARY_PATH"); 12413 search_paths[1] = "/usr/lib64:/usr/lib"; 12414 search_paths[2] = arch_specific_lib_paths(); 12415 perm = R_OK; 12416 } else { 12417 search_paths[0] = getenv("PATH"); 12418 search_paths[1] = "/usr/bin:/usr/sbin"; 12419 perm = R_OK | X_OK; 12420 } 12421 12422 for (i = 0; i < ARRAY_SIZE(search_paths); i++) { 12423 const char *s; 12424 12425 if (!search_paths[i]) 12426 continue; 12427 for (s = search_paths[i]; s != NULL; s = strchr(s, ':')) { 12428 const char *next_path; 12429 int seg_len; 12430 12431 if (s[0] == ':') 12432 s++; 12433 next_path = strchr(s, ':'); 12434 seg_len = next_path ? next_path - s : strlen(s); 12435 if (!seg_len) 12436 continue; 12437 snprintf(result, result_sz, "%.*s/%s", seg_len, s, file); 12438 /* ensure it has required permissions */ 12439 if (faccessat(AT_FDCWD, result, perm, AT_EACCESS) < 0) 12440 continue; 12441 pr_debug("resolved '%s' to '%s'\n", file, result); 12442 return 0; 12443 } 12444 } 12445 return -ENOENT; 12446 } 12447 12448 struct bpf_link * 12449 bpf_program__attach_uprobe_multi(const struct bpf_program *prog, 12450 pid_t pid, 12451 const char *path, 12452 const char *func_pattern, 12453 const struct bpf_uprobe_multi_opts *opts) 12454 { 12455 const unsigned long *ref_ctr_offsets = NULL, *offsets = NULL; 12456 LIBBPF_OPTS(bpf_link_create_opts, lopts); 12457 unsigned long *resolved_offsets = NULL; 12458 enum bpf_attach_type attach_type; 12459 int err = 0, link_fd, prog_fd; 12460 struct bpf_link *link = NULL; 12461 char full_path[PATH_MAX]; 12462 bool retprobe, session; 12463 const __u64 *cookies; 12464 const char **syms; 12465 size_t cnt; 12466 12467 if (!OPTS_VALID(opts, bpf_uprobe_multi_opts)) 12468 return libbpf_err_ptr(-EINVAL); 12469 12470 prog_fd = bpf_program__fd(prog); 12471 if (prog_fd < 0) { 12472 pr_warn("prog '%s': can't attach BPF program without FD (was it loaded?)\n", 12473 prog->name); 12474 return libbpf_err_ptr(-EINVAL); 12475 } 12476 12477 syms = OPTS_GET(opts, syms, NULL); 12478 offsets = OPTS_GET(opts, offsets, NULL); 12479 ref_ctr_offsets = OPTS_GET(opts, ref_ctr_offsets, NULL); 12480 cookies = OPTS_GET(opts, cookies, NULL); 12481 cnt = OPTS_GET(opts, cnt, 0); 12482 retprobe = OPTS_GET(opts, retprobe, false); 12483 session = OPTS_GET(opts, session, false); 12484 12485 /* 12486 * User can specify 2 mutually exclusive set of inputs: 12487 * 12488 * 1) use only path/func_pattern/pid arguments 12489 * 12490 * 2) use path/pid with allowed combinations of: 12491 * syms/offsets/ref_ctr_offsets/cookies/cnt 12492 * 12493 * - syms and offsets are mutually exclusive 12494 * - ref_ctr_offsets and cookies are optional 12495 * 12496 * Any other usage results in error. 12497 */ 12498 12499 if (!path) 12500 return libbpf_err_ptr(-EINVAL); 12501 if (!func_pattern && cnt == 0) 12502 return libbpf_err_ptr(-EINVAL); 12503 12504 if (func_pattern) { 12505 if (syms || offsets || ref_ctr_offsets || cookies || cnt) 12506 return libbpf_err_ptr(-EINVAL); 12507 } else { 12508 if (!!syms == !!offsets) 12509 return libbpf_err_ptr(-EINVAL); 12510 } 12511 12512 if (retprobe && session) 12513 return libbpf_err_ptr(-EINVAL); 12514 12515 if (func_pattern) { 12516 if (!strchr(path, '/')) { 12517 err = resolve_full_path(path, full_path, sizeof(full_path)); 12518 if (err) { 12519 pr_warn("prog '%s': failed to resolve full path for '%s': %s\n", 12520 prog->name, path, errstr(err)); 12521 return libbpf_err_ptr(err); 12522 } 12523 path = full_path; 12524 } 12525 12526 err = elf_resolve_pattern_offsets(path, func_pattern, 12527 &resolved_offsets, &cnt); 12528 if (err < 0) 12529 return libbpf_err_ptr(err); 12530 offsets = resolved_offsets; 12531 } else if (syms) { 12532 err = elf_resolve_syms_offsets(path, cnt, syms, &resolved_offsets, STT_FUNC); 12533 if (err < 0) 12534 return libbpf_err_ptr(err); 12535 offsets = resolved_offsets; 12536 } 12537 12538 attach_type = session ? BPF_TRACE_UPROBE_SESSION : BPF_TRACE_UPROBE_MULTI; 12539 12540 lopts.uprobe_multi.path = path; 12541 lopts.uprobe_multi.offsets = offsets; 12542 lopts.uprobe_multi.ref_ctr_offsets = ref_ctr_offsets; 12543 lopts.uprobe_multi.cookies = cookies; 12544 lopts.uprobe_multi.cnt = cnt; 12545 lopts.uprobe_multi.flags = retprobe ? BPF_F_UPROBE_MULTI_RETURN : 0; 12546 12547 if (pid == 0) 12548 pid = getpid(); 12549 if (pid > 0) 12550 lopts.uprobe_multi.pid = pid; 12551 12552 link = calloc(1, sizeof(*link)); 12553 if (!link) { 12554 err = -ENOMEM; 12555 goto error; 12556 } 12557 link->detach = &bpf_link__detach_fd; 12558 12559 link_fd = bpf_link_create(prog_fd, 0, attach_type, &lopts); 12560 if (link_fd < 0) { 12561 err = -errno; 12562 pr_warn("prog '%s': failed to attach multi-uprobe: %s\n", 12563 prog->name, errstr(err)); 12564 goto error; 12565 } 12566 link->fd = link_fd; 12567 free(resolved_offsets); 12568 return link; 12569 12570 error: 12571 free(resolved_offsets); 12572 free(link); 12573 return libbpf_err_ptr(err); 12574 } 12575 12576 LIBBPF_API struct bpf_link * 12577 bpf_program__attach_uprobe_opts(const struct bpf_program *prog, pid_t pid, 12578 const char *binary_path, size_t func_offset, 12579 const struct bpf_uprobe_opts *opts) 12580 { 12581 const char *archive_path = NULL, *archive_sep = NULL; 12582 char *legacy_probe = NULL; 12583 DECLARE_LIBBPF_OPTS(bpf_perf_event_opts, pe_opts); 12584 enum probe_attach_mode attach_mode; 12585 char full_path[PATH_MAX]; 12586 struct bpf_link *link; 12587 size_t ref_ctr_off; 12588 int pfd, err; 12589 bool retprobe, legacy; 12590 const char *func_name; 12591 12592 if (!OPTS_VALID(opts, bpf_uprobe_opts)) 12593 return libbpf_err_ptr(-EINVAL); 12594 12595 attach_mode = OPTS_GET(opts, attach_mode, PROBE_ATTACH_MODE_DEFAULT); 12596 retprobe = OPTS_GET(opts, retprobe, false); 12597 ref_ctr_off = OPTS_GET(opts, ref_ctr_offset, 0); 12598 pe_opts.bpf_cookie = OPTS_GET(opts, bpf_cookie, 0); 12599 12600 if (!binary_path) 12601 return libbpf_err_ptr(-EINVAL); 12602 12603 /* Check if "binary_path" refers to an archive. */ 12604 archive_sep = strstr(binary_path, "!/"); 12605 if (archive_sep) { 12606 full_path[0] = '\0'; 12607 libbpf_strlcpy(full_path, binary_path, 12608 min(sizeof(full_path), (size_t)(archive_sep - binary_path + 1))); 12609 archive_path = full_path; 12610 binary_path = archive_sep + 2; 12611 } else if (!strchr(binary_path, '/')) { 12612 err = resolve_full_path(binary_path, full_path, sizeof(full_path)); 12613 if (err) { 12614 pr_warn("prog '%s': failed to resolve full path for '%s': %s\n", 12615 prog->name, binary_path, errstr(err)); 12616 return libbpf_err_ptr(err); 12617 } 12618 binary_path = full_path; 12619 } 12620 func_name = OPTS_GET(opts, func_name, NULL); 12621 if (func_name) { 12622 long sym_off; 12623 12624 if (archive_path) { 12625 sym_off = elf_find_func_offset_from_archive(archive_path, binary_path, 12626 func_name); 12627 binary_path = archive_path; 12628 } else { 12629 sym_off = elf_find_func_offset_from_file(binary_path, func_name); 12630 } 12631 if (sym_off < 0) 12632 return libbpf_err_ptr(sym_off); 12633 func_offset += sym_off; 12634 } 12635 12636 legacy = determine_uprobe_perf_type() < 0; 12637 switch (attach_mode) { 12638 case PROBE_ATTACH_MODE_LEGACY: 12639 legacy = true; 12640 pe_opts.force_ioctl_attach = true; 12641 break; 12642 case PROBE_ATTACH_MODE_PERF: 12643 if (legacy) 12644 return libbpf_err_ptr(-ENOTSUP); 12645 pe_opts.force_ioctl_attach = true; 12646 break; 12647 case PROBE_ATTACH_MODE_LINK: 12648 if (legacy || !kernel_supports(prog->obj, FEAT_PERF_LINK)) 12649 return libbpf_err_ptr(-ENOTSUP); 12650 break; 12651 case PROBE_ATTACH_MODE_DEFAULT: 12652 break; 12653 default: 12654 return libbpf_err_ptr(-EINVAL); 12655 } 12656 12657 if (!legacy) { 12658 pfd = perf_event_open_probe(true /* uprobe */, retprobe, binary_path, 12659 func_offset, pid, ref_ctr_off); 12660 } else { 12661 char probe_name[MAX_EVENT_NAME_LEN]; 12662 12663 if (ref_ctr_off) 12664 return libbpf_err_ptr(-EINVAL); 12665 12666 gen_probe_legacy_event_name(probe_name, sizeof(probe_name), 12667 strrchr(binary_path, '/') ? : binary_path, 12668 func_offset); 12669 12670 legacy_probe = strdup(probe_name); 12671 if (!legacy_probe) 12672 return libbpf_err_ptr(-ENOMEM); 12673 12674 pfd = perf_event_uprobe_open_legacy(legacy_probe, retprobe, 12675 binary_path, func_offset, pid); 12676 } 12677 if (pfd < 0) { 12678 err = -errno; 12679 pr_warn("prog '%s': failed to create %s '%s:0x%zx' perf event: %s\n", 12680 prog->name, retprobe ? "uretprobe" : "uprobe", 12681 binary_path, func_offset, 12682 errstr(err)); 12683 goto err_out; 12684 } 12685 12686 link = bpf_program__attach_perf_event_opts(prog, pfd, &pe_opts); 12687 err = libbpf_get_error(link); 12688 if (err) { 12689 close(pfd); 12690 pr_warn("prog '%s': failed to attach to %s '%s:0x%zx': %s\n", 12691 prog->name, retprobe ? "uretprobe" : "uprobe", 12692 binary_path, func_offset, 12693 errstr(err)); 12694 goto err_clean_legacy; 12695 } 12696 if (legacy) { 12697 struct bpf_link_perf *perf_link = container_of(link, struct bpf_link_perf, link); 12698 12699 perf_link->legacy_probe_name = legacy_probe; 12700 perf_link->legacy_is_kprobe = false; 12701 perf_link->legacy_is_retprobe = retprobe; 12702 } 12703 return link; 12704 12705 err_clean_legacy: 12706 if (legacy) 12707 remove_uprobe_event_legacy(legacy_probe, retprobe); 12708 err_out: 12709 free(legacy_probe); 12710 return libbpf_err_ptr(err); 12711 } 12712 12713 /* Format of u[ret]probe section definition supporting auto-attach: 12714 * u[ret]probe/binary:function[+offset] 12715 * 12716 * binary can be an absolute/relative path or a filename; the latter is resolved to a 12717 * full binary path via bpf_program__attach_uprobe_opts. 12718 * 12719 * Specifying uprobe+ ensures we carry out strict matching; either "uprobe" must be 12720 * specified (and auto-attach is not possible) or the above format is specified for 12721 * auto-attach. 12722 */ 12723 static int attach_uprobe(const struct bpf_program *prog, long cookie, struct bpf_link **link) 12724 { 12725 DECLARE_LIBBPF_OPTS(bpf_uprobe_opts, opts); 12726 char *probe_type = NULL, *binary_path = NULL, *func_name = NULL, *func_off; 12727 int n, c, ret = -EINVAL; 12728 long offset = 0; 12729 12730 *link = NULL; 12731 12732 n = sscanf(prog->sec_name, "%m[^/]/%m[^:]:%m[^\n]", 12733 &probe_type, &binary_path, &func_name); 12734 switch (n) { 12735 case 1: 12736 /* handle SEC("u[ret]probe") - format is valid, but auto-attach is impossible. */ 12737 ret = 0; 12738 break; 12739 case 2: 12740 pr_warn("prog '%s': section '%s' missing ':function[+offset]' specification\n", 12741 prog->name, prog->sec_name); 12742 break; 12743 case 3: 12744 /* check if user specifies `+offset`, if yes, this should be 12745 * the last part of the string, make sure sscanf read to EOL 12746 */ 12747 func_off = strrchr(func_name, '+'); 12748 if (func_off) { 12749 n = sscanf(func_off, "+%li%n", &offset, &c); 12750 if (n == 1 && *(func_off + c) == '\0') 12751 func_off[0] = '\0'; 12752 else 12753 offset = 0; 12754 } 12755 opts.retprobe = strcmp(probe_type, "uretprobe") == 0 || 12756 strcmp(probe_type, "uretprobe.s") == 0; 12757 if (opts.retprobe && offset != 0) { 12758 pr_warn("prog '%s': uretprobes do not support offset specification\n", 12759 prog->name); 12760 break; 12761 } 12762 opts.func_name = func_name; 12763 *link = bpf_program__attach_uprobe_opts(prog, -1, binary_path, offset, &opts); 12764 ret = libbpf_get_error(*link); 12765 break; 12766 default: 12767 pr_warn("prog '%s': invalid format of section definition '%s'\n", prog->name, 12768 prog->sec_name); 12769 break; 12770 } 12771 free(probe_type); 12772 free(binary_path); 12773 free(func_name); 12774 12775 return ret; 12776 } 12777 12778 struct bpf_link *bpf_program__attach_uprobe(const struct bpf_program *prog, 12779 bool retprobe, pid_t pid, 12780 const char *binary_path, 12781 size_t func_offset) 12782 { 12783 DECLARE_LIBBPF_OPTS(bpf_uprobe_opts, opts, .retprobe = retprobe); 12784 12785 return bpf_program__attach_uprobe_opts(prog, pid, binary_path, func_offset, &opts); 12786 } 12787 12788 struct bpf_link *bpf_program__attach_usdt(const struct bpf_program *prog, 12789 pid_t pid, const char *binary_path, 12790 const char *usdt_provider, const char *usdt_name, 12791 const struct bpf_usdt_opts *opts) 12792 { 12793 char resolved_path[512]; 12794 struct bpf_object *obj = prog->obj; 12795 struct bpf_link *link; 12796 __u64 usdt_cookie; 12797 int err; 12798 12799 if (!OPTS_VALID(opts, bpf_uprobe_opts)) 12800 return libbpf_err_ptr(-EINVAL); 12801 12802 if (bpf_program__fd(prog) < 0) { 12803 pr_warn("prog '%s': can't attach BPF program without FD (was it loaded?)\n", 12804 prog->name); 12805 return libbpf_err_ptr(-EINVAL); 12806 } 12807 12808 if (!binary_path) 12809 return libbpf_err_ptr(-EINVAL); 12810 12811 if (!strchr(binary_path, '/')) { 12812 err = resolve_full_path(binary_path, resolved_path, sizeof(resolved_path)); 12813 if (err) { 12814 pr_warn("prog '%s': failed to resolve full path for '%s': %s\n", 12815 prog->name, binary_path, errstr(err)); 12816 return libbpf_err_ptr(err); 12817 } 12818 binary_path = resolved_path; 12819 } 12820 12821 /* USDT manager is instantiated lazily on first USDT attach. It will 12822 * be destroyed together with BPF object in bpf_object__close(). 12823 */ 12824 if (IS_ERR(obj->usdt_man)) 12825 return libbpf_ptr(obj->usdt_man); 12826 if (!obj->usdt_man) { 12827 obj->usdt_man = usdt_manager_new(obj); 12828 if (IS_ERR(obj->usdt_man)) 12829 return libbpf_ptr(obj->usdt_man); 12830 } 12831 12832 usdt_cookie = OPTS_GET(opts, usdt_cookie, 0); 12833 link = usdt_manager_attach_usdt(obj->usdt_man, prog, pid, binary_path, 12834 usdt_provider, usdt_name, usdt_cookie); 12835 err = libbpf_get_error(link); 12836 if (err) 12837 return libbpf_err_ptr(err); 12838 return link; 12839 } 12840 12841 static int attach_usdt(const struct bpf_program *prog, long cookie, struct bpf_link **link) 12842 { 12843 char *path = NULL, *provider = NULL, *name = NULL; 12844 const char *sec_name; 12845 int n, err; 12846 12847 sec_name = bpf_program__section_name(prog); 12848 if (strcmp(sec_name, "usdt") == 0) { 12849 /* no auto-attach for just SEC("usdt") */ 12850 *link = NULL; 12851 return 0; 12852 } 12853 12854 n = sscanf(sec_name, "usdt/%m[^:]:%m[^:]:%m[^:]", &path, &provider, &name); 12855 if (n != 3) { 12856 pr_warn("invalid section '%s', expected SEC(\"usdt/<path>:<provider>:<name>\")\n", 12857 sec_name); 12858 err = -EINVAL; 12859 } else { 12860 *link = bpf_program__attach_usdt(prog, -1 /* any process */, path, 12861 provider, name, NULL); 12862 err = libbpf_get_error(*link); 12863 } 12864 free(path); 12865 free(provider); 12866 free(name); 12867 return err; 12868 } 12869 12870 static int determine_tracepoint_id(const char *tp_category, 12871 const char *tp_name) 12872 { 12873 char file[PATH_MAX]; 12874 int ret; 12875 12876 ret = snprintf(file, sizeof(file), "%s/events/%s/%s/id", 12877 tracefs_path(), tp_category, tp_name); 12878 if (ret < 0) 12879 return -errno; 12880 if (ret >= sizeof(file)) { 12881 pr_debug("tracepoint %s/%s path is too long\n", 12882 tp_category, tp_name); 12883 return -E2BIG; 12884 } 12885 return parse_uint_from_file(file, "%d\n"); 12886 } 12887 12888 static int perf_event_open_tracepoint(const char *tp_category, 12889 const char *tp_name) 12890 { 12891 const size_t attr_sz = sizeof(struct perf_event_attr); 12892 struct perf_event_attr attr; 12893 int tp_id, pfd, err; 12894 12895 tp_id = determine_tracepoint_id(tp_category, tp_name); 12896 if (tp_id < 0) { 12897 pr_warn("failed to determine tracepoint '%s/%s' perf event ID: %s\n", 12898 tp_category, tp_name, 12899 errstr(tp_id)); 12900 return tp_id; 12901 } 12902 12903 memset(&attr, 0, attr_sz); 12904 attr.type = PERF_TYPE_TRACEPOINT; 12905 attr.size = attr_sz; 12906 attr.config = tp_id; 12907 12908 pfd = syscall(__NR_perf_event_open, &attr, -1 /* pid */, 0 /* cpu */, 12909 -1 /* group_fd */, PERF_FLAG_FD_CLOEXEC); 12910 if (pfd < 0) { 12911 err = -errno; 12912 pr_warn("tracepoint '%s/%s' perf_event_open() failed: %s\n", 12913 tp_category, tp_name, 12914 errstr(err)); 12915 return err; 12916 } 12917 return pfd; 12918 } 12919 12920 struct bpf_link *bpf_program__attach_tracepoint_opts(const struct bpf_program *prog, 12921 const char *tp_category, 12922 const char *tp_name, 12923 const struct bpf_tracepoint_opts *opts) 12924 { 12925 DECLARE_LIBBPF_OPTS(bpf_perf_event_opts, pe_opts); 12926 struct bpf_link *link; 12927 int pfd, err; 12928 12929 if (!OPTS_VALID(opts, bpf_tracepoint_opts)) 12930 return libbpf_err_ptr(-EINVAL); 12931 12932 pe_opts.bpf_cookie = OPTS_GET(opts, bpf_cookie, 0); 12933 12934 pfd = perf_event_open_tracepoint(tp_category, tp_name); 12935 if (pfd < 0) { 12936 pr_warn("prog '%s': failed to create tracepoint '%s/%s' perf event: %s\n", 12937 prog->name, tp_category, tp_name, 12938 errstr(pfd)); 12939 return libbpf_err_ptr(pfd); 12940 } 12941 link = bpf_program__attach_perf_event_opts(prog, pfd, &pe_opts); 12942 err = libbpf_get_error(link); 12943 if (err) { 12944 close(pfd); 12945 pr_warn("prog '%s': failed to attach to tracepoint '%s/%s': %s\n", 12946 prog->name, tp_category, tp_name, 12947 errstr(err)); 12948 return libbpf_err_ptr(err); 12949 } 12950 return link; 12951 } 12952 12953 struct bpf_link *bpf_program__attach_tracepoint(const struct bpf_program *prog, 12954 const char *tp_category, 12955 const char *tp_name) 12956 { 12957 return bpf_program__attach_tracepoint_opts(prog, tp_category, tp_name, NULL); 12958 } 12959 12960 static int attach_tp(const struct bpf_program *prog, long cookie, struct bpf_link **link) 12961 { 12962 char *sec_name, *tp_cat, *tp_name; 12963 12964 *link = NULL; 12965 12966 /* no auto-attach for SEC("tp") or SEC("tracepoint") */ 12967 if (strcmp(prog->sec_name, "tp") == 0 || strcmp(prog->sec_name, "tracepoint") == 0) 12968 return 0; 12969 12970 sec_name = strdup(prog->sec_name); 12971 if (!sec_name) 12972 return -ENOMEM; 12973 12974 /* extract "tp/<category>/<name>" or "tracepoint/<category>/<name>" */ 12975 if (str_has_pfx(prog->sec_name, "tp/")) 12976 tp_cat = sec_name + sizeof("tp/") - 1; 12977 else 12978 tp_cat = sec_name + sizeof("tracepoint/") - 1; 12979 tp_name = strchr(tp_cat, '/'); 12980 if (!tp_name) { 12981 free(sec_name); 12982 return -EINVAL; 12983 } 12984 *tp_name = '\0'; 12985 tp_name++; 12986 12987 *link = bpf_program__attach_tracepoint(prog, tp_cat, tp_name); 12988 free(sec_name); 12989 return libbpf_get_error(*link); 12990 } 12991 12992 struct bpf_link * 12993 bpf_program__attach_raw_tracepoint_opts(const struct bpf_program *prog, 12994 const char *tp_name, 12995 struct bpf_raw_tracepoint_opts *opts) 12996 { 12997 LIBBPF_OPTS(bpf_raw_tp_opts, raw_opts); 12998 struct bpf_link *link; 12999 int prog_fd, pfd; 13000 13001 if (!OPTS_VALID(opts, bpf_raw_tracepoint_opts)) 13002 return libbpf_err_ptr(-EINVAL); 13003 13004 prog_fd = bpf_program__fd(prog); 13005 if (prog_fd < 0) { 13006 pr_warn("prog '%s': can't attach before loaded\n", prog->name); 13007 return libbpf_err_ptr(-EINVAL); 13008 } 13009 13010 link = calloc(1, sizeof(*link)); 13011 if (!link) 13012 return libbpf_err_ptr(-ENOMEM); 13013 link->detach = &bpf_link__detach_fd; 13014 13015 raw_opts.tp_name = tp_name; 13016 raw_opts.cookie = OPTS_GET(opts, cookie, 0); 13017 pfd = bpf_raw_tracepoint_open_opts(prog_fd, &raw_opts); 13018 if (pfd < 0) { 13019 pfd = -errno; 13020 free(link); 13021 pr_warn("prog '%s': failed to attach to raw tracepoint '%s': %s\n", 13022 prog->name, tp_name, errstr(pfd)); 13023 return libbpf_err_ptr(pfd); 13024 } 13025 link->fd = pfd; 13026 return link; 13027 } 13028 13029 struct bpf_link *bpf_program__attach_raw_tracepoint(const struct bpf_program *prog, 13030 const char *tp_name) 13031 { 13032 return bpf_program__attach_raw_tracepoint_opts(prog, tp_name, NULL); 13033 } 13034 13035 static int attach_raw_tp(const struct bpf_program *prog, long cookie, struct bpf_link **link) 13036 { 13037 static const char *const prefixes[] = { 13038 "raw_tp", 13039 "raw_tracepoint", 13040 "raw_tp.w", 13041 "raw_tracepoint.w", 13042 }; 13043 size_t i; 13044 const char *tp_name = NULL; 13045 13046 *link = NULL; 13047 13048 for (i = 0; i < ARRAY_SIZE(prefixes); i++) { 13049 size_t pfx_len; 13050 13051 if (!str_has_pfx(prog->sec_name, prefixes[i])) 13052 continue; 13053 13054 pfx_len = strlen(prefixes[i]); 13055 /* no auto-attach case of, e.g., SEC("raw_tp") */ 13056 if (prog->sec_name[pfx_len] == '\0') 13057 return 0; 13058 13059 if (prog->sec_name[pfx_len] != '/') 13060 continue; 13061 13062 tp_name = prog->sec_name + pfx_len + 1; 13063 break; 13064 } 13065 13066 if (!tp_name) { 13067 pr_warn("prog '%s': invalid section name '%s'\n", 13068 prog->name, prog->sec_name); 13069 return -EINVAL; 13070 } 13071 13072 *link = bpf_program__attach_raw_tracepoint(prog, tp_name); 13073 return libbpf_get_error(*link); 13074 } 13075 13076 /* Common logic for all BPF program types that attach to a btf_id */ 13077 static struct bpf_link *bpf_program__attach_btf_id(const struct bpf_program *prog, 13078 const struct bpf_trace_opts *opts) 13079 { 13080 LIBBPF_OPTS(bpf_link_create_opts, link_opts); 13081 struct bpf_link *link; 13082 int prog_fd, pfd; 13083 13084 if (!OPTS_VALID(opts, bpf_trace_opts)) 13085 return libbpf_err_ptr(-EINVAL); 13086 13087 prog_fd = bpf_program__fd(prog); 13088 if (prog_fd < 0) { 13089 pr_warn("prog '%s': can't attach before loaded\n", prog->name); 13090 return libbpf_err_ptr(-EINVAL); 13091 } 13092 13093 link = calloc(1, sizeof(*link)); 13094 if (!link) 13095 return libbpf_err_ptr(-ENOMEM); 13096 link->detach = &bpf_link__detach_fd; 13097 13098 /* libbpf is smart enough to redirect to BPF_RAW_TRACEPOINT_OPEN on old kernels */ 13099 link_opts.tracing.cookie = OPTS_GET(opts, cookie, 0); 13100 pfd = bpf_link_create(prog_fd, 0, bpf_program__expected_attach_type(prog), &link_opts); 13101 if (pfd < 0) { 13102 pfd = -errno; 13103 free(link); 13104 pr_warn("prog '%s': failed to attach: %s\n", 13105 prog->name, errstr(pfd)); 13106 return libbpf_err_ptr(pfd); 13107 } 13108 link->fd = pfd; 13109 return link; 13110 } 13111 13112 struct bpf_link *bpf_program__attach_trace(const struct bpf_program *prog) 13113 { 13114 return bpf_program__attach_btf_id(prog, NULL); 13115 } 13116 13117 struct bpf_link *bpf_program__attach_trace_opts(const struct bpf_program *prog, 13118 const struct bpf_trace_opts *opts) 13119 { 13120 return bpf_program__attach_btf_id(prog, opts); 13121 } 13122 13123 struct bpf_link *bpf_program__attach_lsm(const struct bpf_program *prog) 13124 { 13125 return bpf_program__attach_btf_id(prog, NULL); 13126 } 13127 13128 static int attach_trace(const struct bpf_program *prog, long cookie, struct bpf_link **link) 13129 { 13130 *link = bpf_program__attach_trace(prog); 13131 return libbpf_get_error(*link); 13132 } 13133 13134 static int attach_lsm(const struct bpf_program *prog, long cookie, struct bpf_link **link) 13135 { 13136 *link = bpf_program__attach_lsm(prog); 13137 return libbpf_get_error(*link); 13138 } 13139 13140 static struct bpf_link * 13141 bpf_program_attach_fd(const struct bpf_program *prog, 13142 int target_fd, const char *target_name, 13143 const struct bpf_link_create_opts *opts) 13144 { 13145 enum bpf_attach_type attach_type; 13146 struct bpf_link *link; 13147 int prog_fd, link_fd; 13148 13149 prog_fd = bpf_program__fd(prog); 13150 if (prog_fd < 0) { 13151 pr_warn("prog '%s': can't attach before loaded\n", prog->name); 13152 return libbpf_err_ptr(-EINVAL); 13153 } 13154 13155 link = calloc(1, sizeof(*link)); 13156 if (!link) 13157 return libbpf_err_ptr(-ENOMEM); 13158 link->detach = &bpf_link__detach_fd; 13159 13160 attach_type = bpf_program__expected_attach_type(prog); 13161 link_fd = bpf_link_create(prog_fd, target_fd, attach_type, opts); 13162 if (link_fd < 0) { 13163 link_fd = -errno; 13164 free(link); 13165 pr_warn("prog '%s': failed to attach to %s: %s\n", 13166 prog->name, target_name, 13167 errstr(link_fd)); 13168 return libbpf_err_ptr(link_fd); 13169 } 13170 link->fd = link_fd; 13171 return link; 13172 } 13173 13174 struct bpf_link * 13175 bpf_program__attach_cgroup(const struct bpf_program *prog, int cgroup_fd) 13176 { 13177 return bpf_program_attach_fd(prog, cgroup_fd, "cgroup", NULL); 13178 } 13179 13180 struct bpf_link * 13181 bpf_program__attach_netns(const struct bpf_program *prog, int netns_fd) 13182 { 13183 return bpf_program_attach_fd(prog, netns_fd, "netns", NULL); 13184 } 13185 13186 struct bpf_link * 13187 bpf_program__attach_sockmap(const struct bpf_program *prog, int map_fd) 13188 { 13189 return bpf_program_attach_fd(prog, map_fd, "sockmap", NULL); 13190 } 13191 13192 struct bpf_link *bpf_program__attach_xdp(const struct bpf_program *prog, int ifindex) 13193 { 13194 /* target_fd/target_ifindex use the same field in LINK_CREATE */ 13195 return bpf_program_attach_fd(prog, ifindex, "xdp", NULL); 13196 } 13197 13198 struct bpf_link * 13199 bpf_program__attach_cgroup_opts(const struct bpf_program *prog, int cgroup_fd, 13200 const struct bpf_cgroup_opts *opts) 13201 { 13202 LIBBPF_OPTS(bpf_link_create_opts, link_create_opts); 13203 __u32 relative_id; 13204 int relative_fd; 13205 13206 if (!OPTS_VALID(opts, bpf_cgroup_opts)) 13207 return libbpf_err_ptr(-EINVAL); 13208 13209 relative_id = OPTS_GET(opts, relative_id, 0); 13210 relative_fd = OPTS_GET(opts, relative_fd, 0); 13211 13212 if (relative_fd && relative_id) { 13213 pr_warn("prog '%s': relative_fd and relative_id cannot be set at the same time\n", 13214 prog->name); 13215 return libbpf_err_ptr(-EINVAL); 13216 } 13217 13218 link_create_opts.cgroup.expected_revision = OPTS_GET(opts, expected_revision, 0); 13219 link_create_opts.cgroup.relative_fd = relative_fd; 13220 link_create_opts.cgroup.relative_id = relative_id; 13221 link_create_opts.flags = OPTS_GET(opts, flags, 0); 13222 13223 return bpf_program_attach_fd(prog, cgroup_fd, "cgroup", &link_create_opts); 13224 } 13225 13226 struct bpf_link * 13227 bpf_program__attach_tcx(const struct bpf_program *prog, int ifindex, 13228 const struct bpf_tcx_opts *opts) 13229 { 13230 LIBBPF_OPTS(bpf_link_create_opts, link_create_opts); 13231 __u32 relative_id; 13232 int relative_fd; 13233 13234 if (!OPTS_VALID(opts, bpf_tcx_opts)) 13235 return libbpf_err_ptr(-EINVAL); 13236 13237 relative_id = OPTS_GET(opts, relative_id, 0); 13238 relative_fd = OPTS_GET(opts, relative_fd, 0); 13239 13240 /* validate we don't have unexpected combinations of non-zero fields */ 13241 if (!ifindex) { 13242 pr_warn("prog '%s': target netdevice ifindex cannot be zero\n", 13243 prog->name); 13244 return libbpf_err_ptr(-EINVAL); 13245 } 13246 if (relative_fd && relative_id) { 13247 pr_warn("prog '%s': relative_fd and relative_id cannot be set at the same time\n", 13248 prog->name); 13249 return libbpf_err_ptr(-EINVAL); 13250 } 13251 13252 link_create_opts.tcx.expected_revision = OPTS_GET(opts, expected_revision, 0); 13253 link_create_opts.tcx.relative_fd = relative_fd; 13254 link_create_opts.tcx.relative_id = relative_id; 13255 link_create_opts.flags = OPTS_GET(opts, flags, 0); 13256 13257 /* target_fd/target_ifindex use the same field in LINK_CREATE */ 13258 return bpf_program_attach_fd(prog, ifindex, "tcx", &link_create_opts); 13259 } 13260 13261 struct bpf_link * 13262 bpf_program__attach_netkit(const struct bpf_program *prog, int ifindex, 13263 const struct bpf_netkit_opts *opts) 13264 { 13265 LIBBPF_OPTS(bpf_link_create_opts, link_create_opts); 13266 __u32 relative_id; 13267 int relative_fd; 13268 13269 if (!OPTS_VALID(opts, bpf_netkit_opts)) 13270 return libbpf_err_ptr(-EINVAL); 13271 13272 relative_id = OPTS_GET(opts, relative_id, 0); 13273 relative_fd = OPTS_GET(opts, relative_fd, 0); 13274 13275 /* validate we don't have unexpected combinations of non-zero fields */ 13276 if (!ifindex) { 13277 pr_warn("prog '%s': target netdevice ifindex cannot be zero\n", 13278 prog->name); 13279 return libbpf_err_ptr(-EINVAL); 13280 } 13281 if (relative_fd && relative_id) { 13282 pr_warn("prog '%s': relative_fd and relative_id cannot be set at the same time\n", 13283 prog->name); 13284 return libbpf_err_ptr(-EINVAL); 13285 } 13286 13287 link_create_opts.netkit.expected_revision = OPTS_GET(opts, expected_revision, 0); 13288 link_create_opts.netkit.relative_fd = relative_fd; 13289 link_create_opts.netkit.relative_id = relative_id; 13290 link_create_opts.flags = OPTS_GET(opts, flags, 0); 13291 13292 return bpf_program_attach_fd(prog, ifindex, "netkit", &link_create_opts); 13293 } 13294 13295 struct bpf_link *bpf_program__attach_freplace(const struct bpf_program *prog, 13296 int target_fd, 13297 const char *attach_func_name) 13298 { 13299 int btf_id; 13300 13301 if (!!target_fd != !!attach_func_name) { 13302 pr_warn("prog '%s': supply none or both of target_fd and attach_func_name\n", 13303 prog->name); 13304 return libbpf_err_ptr(-EINVAL); 13305 } 13306 13307 if (prog->type != BPF_PROG_TYPE_EXT) { 13308 pr_warn("prog '%s': only BPF_PROG_TYPE_EXT can attach as freplace\n", 13309 prog->name); 13310 return libbpf_err_ptr(-EINVAL); 13311 } 13312 13313 if (target_fd) { 13314 LIBBPF_OPTS(bpf_link_create_opts, target_opts); 13315 13316 btf_id = libbpf_find_prog_btf_id(attach_func_name, target_fd, prog->obj->token_fd); 13317 if (btf_id < 0) 13318 return libbpf_err_ptr(btf_id); 13319 13320 target_opts.target_btf_id = btf_id; 13321 13322 return bpf_program_attach_fd(prog, target_fd, "freplace", 13323 &target_opts); 13324 } else { 13325 /* no target, so use raw_tracepoint_open for compatibility 13326 * with old kernels 13327 */ 13328 return bpf_program__attach_trace(prog); 13329 } 13330 } 13331 13332 struct bpf_link * 13333 bpf_program__attach_iter(const struct bpf_program *prog, 13334 const struct bpf_iter_attach_opts *opts) 13335 { 13336 DECLARE_LIBBPF_OPTS(bpf_link_create_opts, link_create_opts); 13337 struct bpf_link *link; 13338 int prog_fd, link_fd; 13339 __u32 target_fd = 0; 13340 13341 if (!OPTS_VALID(opts, bpf_iter_attach_opts)) 13342 return libbpf_err_ptr(-EINVAL); 13343 13344 link_create_opts.iter_info = OPTS_GET(opts, link_info, (void *)0); 13345 link_create_opts.iter_info_len = OPTS_GET(opts, link_info_len, 0); 13346 13347 prog_fd = bpf_program__fd(prog); 13348 if (prog_fd < 0) { 13349 pr_warn("prog '%s': can't attach before loaded\n", prog->name); 13350 return libbpf_err_ptr(-EINVAL); 13351 } 13352 13353 link = calloc(1, sizeof(*link)); 13354 if (!link) 13355 return libbpf_err_ptr(-ENOMEM); 13356 link->detach = &bpf_link__detach_fd; 13357 13358 link_fd = bpf_link_create(prog_fd, target_fd, BPF_TRACE_ITER, 13359 &link_create_opts); 13360 if (link_fd < 0) { 13361 link_fd = -errno; 13362 free(link); 13363 pr_warn("prog '%s': failed to attach to iterator: %s\n", 13364 prog->name, errstr(link_fd)); 13365 return libbpf_err_ptr(link_fd); 13366 } 13367 link->fd = link_fd; 13368 return link; 13369 } 13370 13371 static int attach_iter(const struct bpf_program *prog, long cookie, struct bpf_link **link) 13372 { 13373 *link = bpf_program__attach_iter(prog, NULL); 13374 return libbpf_get_error(*link); 13375 } 13376 13377 struct bpf_link *bpf_program__attach_netfilter(const struct bpf_program *prog, 13378 const struct bpf_netfilter_opts *opts) 13379 { 13380 LIBBPF_OPTS(bpf_link_create_opts, lopts); 13381 struct bpf_link *link; 13382 int prog_fd, link_fd; 13383 13384 if (!OPTS_VALID(opts, bpf_netfilter_opts)) 13385 return libbpf_err_ptr(-EINVAL); 13386 13387 prog_fd = bpf_program__fd(prog); 13388 if (prog_fd < 0) { 13389 pr_warn("prog '%s': can't attach before loaded\n", prog->name); 13390 return libbpf_err_ptr(-EINVAL); 13391 } 13392 13393 link = calloc(1, sizeof(*link)); 13394 if (!link) 13395 return libbpf_err_ptr(-ENOMEM); 13396 13397 link->detach = &bpf_link__detach_fd; 13398 13399 lopts.netfilter.pf = OPTS_GET(opts, pf, 0); 13400 lopts.netfilter.hooknum = OPTS_GET(opts, hooknum, 0); 13401 lopts.netfilter.priority = OPTS_GET(opts, priority, 0); 13402 lopts.netfilter.flags = OPTS_GET(opts, flags, 0); 13403 13404 link_fd = bpf_link_create(prog_fd, 0, BPF_NETFILTER, &lopts); 13405 if (link_fd < 0) { 13406 link_fd = -errno; 13407 free(link); 13408 pr_warn("prog '%s': failed to attach to netfilter: %s\n", 13409 prog->name, errstr(link_fd)); 13410 return libbpf_err_ptr(link_fd); 13411 } 13412 link->fd = link_fd; 13413 13414 return link; 13415 } 13416 13417 struct bpf_link *bpf_program__attach(const struct bpf_program *prog) 13418 { 13419 struct bpf_link *link = NULL; 13420 int err; 13421 13422 if (!prog->sec_def || !prog->sec_def->prog_attach_fn) 13423 return libbpf_err_ptr(-EOPNOTSUPP); 13424 13425 if (bpf_program__fd(prog) < 0) { 13426 pr_warn("prog '%s': can't attach BPF program without FD (was it loaded?)\n", 13427 prog->name); 13428 return libbpf_err_ptr(-EINVAL); 13429 } 13430 13431 err = prog->sec_def->prog_attach_fn(prog, prog->sec_def->cookie, &link); 13432 if (err) 13433 return libbpf_err_ptr(err); 13434 13435 /* When calling bpf_program__attach() explicitly, auto-attach support 13436 * is expected to work, so NULL returned link is considered an error. 13437 * This is different for skeleton's attach, see comment in 13438 * bpf_object__attach_skeleton(). 13439 */ 13440 if (!link) 13441 return libbpf_err_ptr(-EOPNOTSUPP); 13442 13443 return link; 13444 } 13445 13446 struct bpf_link_struct_ops { 13447 struct bpf_link link; 13448 int map_fd; 13449 }; 13450 13451 static int bpf_link__detach_struct_ops(struct bpf_link *link) 13452 { 13453 struct bpf_link_struct_ops *st_link; 13454 __u32 zero = 0; 13455 13456 st_link = container_of(link, struct bpf_link_struct_ops, link); 13457 13458 if (st_link->map_fd < 0) 13459 /* w/o a real link */ 13460 return bpf_map_delete_elem(link->fd, &zero); 13461 13462 return close(link->fd); 13463 } 13464 13465 struct bpf_link *bpf_map__attach_struct_ops(const struct bpf_map *map) 13466 { 13467 struct bpf_link_struct_ops *link; 13468 __u32 zero = 0; 13469 int err, fd; 13470 13471 if (!bpf_map__is_struct_ops(map)) { 13472 pr_warn("map '%s': can't attach non-struct_ops map\n", map->name); 13473 return libbpf_err_ptr(-EINVAL); 13474 } 13475 13476 if (map->fd < 0) { 13477 pr_warn("map '%s': can't attach BPF map without FD (was it created?)\n", map->name); 13478 return libbpf_err_ptr(-EINVAL); 13479 } 13480 13481 link = calloc(1, sizeof(*link)); 13482 if (!link) 13483 return libbpf_err_ptr(-EINVAL); 13484 13485 /* kern_vdata should be prepared during the loading phase. */ 13486 err = bpf_map_update_elem(map->fd, &zero, map->st_ops->kern_vdata, 0); 13487 /* It can be EBUSY if the map has been used to create or 13488 * update a link before. We don't allow updating the value of 13489 * a struct_ops once it is set. That ensures that the value 13490 * never changed. So, it is safe to skip EBUSY. 13491 */ 13492 if (err && (!(map->def.map_flags & BPF_F_LINK) || err != -EBUSY)) { 13493 free(link); 13494 return libbpf_err_ptr(err); 13495 } 13496 13497 link->link.detach = bpf_link__detach_struct_ops; 13498 13499 if (!(map->def.map_flags & BPF_F_LINK)) { 13500 /* w/o a real link */ 13501 link->link.fd = map->fd; 13502 link->map_fd = -1; 13503 return &link->link; 13504 } 13505 13506 fd = bpf_link_create(map->fd, 0, BPF_STRUCT_OPS, NULL); 13507 if (fd < 0) { 13508 free(link); 13509 return libbpf_err_ptr(fd); 13510 } 13511 13512 link->link.fd = fd; 13513 link->map_fd = map->fd; 13514 13515 return &link->link; 13516 } 13517 13518 /* 13519 * Swap the back struct_ops of a link with a new struct_ops map. 13520 */ 13521 int bpf_link__update_map(struct bpf_link *link, const struct bpf_map *map) 13522 { 13523 struct bpf_link_struct_ops *st_ops_link; 13524 __u32 zero = 0; 13525 int err; 13526 13527 if (!bpf_map__is_struct_ops(map)) 13528 return libbpf_err(-EINVAL); 13529 13530 if (map->fd < 0) { 13531 pr_warn("map '%s': can't use BPF map without FD (was it created?)\n", map->name); 13532 return libbpf_err(-EINVAL); 13533 } 13534 13535 st_ops_link = container_of(link, struct bpf_link_struct_ops, link); 13536 /* Ensure the type of a link is correct */ 13537 if (st_ops_link->map_fd < 0) 13538 return libbpf_err(-EINVAL); 13539 13540 err = bpf_map_update_elem(map->fd, &zero, map->st_ops->kern_vdata, 0); 13541 /* It can be EBUSY if the map has been used to create or 13542 * update a link before. We don't allow updating the value of 13543 * a struct_ops once it is set. That ensures that the value 13544 * never changed. So, it is safe to skip EBUSY. 13545 */ 13546 if (err && err != -EBUSY) 13547 return err; 13548 13549 err = bpf_link_update(link->fd, map->fd, NULL); 13550 if (err < 0) 13551 return err; 13552 13553 st_ops_link->map_fd = map->fd; 13554 13555 return 0; 13556 } 13557 13558 typedef enum bpf_perf_event_ret (*bpf_perf_event_print_t)(struct perf_event_header *hdr, 13559 void *private_data); 13560 13561 static enum bpf_perf_event_ret 13562 perf_event_read_simple(void *mmap_mem, size_t mmap_size, size_t page_size, 13563 void **copy_mem, size_t *copy_size, 13564 bpf_perf_event_print_t fn, void *private_data) 13565 { 13566 struct perf_event_mmap_page *header = mmap_mem; 13567 __u64 data_head = ring_buffer_read_head(header); 13568 __u64 data_tail = header->data_tail; 13569 void *base = ((__u8 *)header) + page_size; 13570 int ret = LIBBPF_PERF_EVENT_CONT; 13571 struct perf_event_header *ehdr; 13572 size_t ehdr_size; 13573 13574 while (data_head != data_tail) { 13575 ehdr = base + (data_tail & (mmap_size - 1)); 13576 ehdr_size = ehdr->size; 13577 13578 if (((void *)ehdr) + ehdr_size > base + mmap_size) { 13579 void *copy_start = ehdr; 13580 size_t len_first = base + mmap_size - copy_start; 13581 size_t len_secnd = ehdr_size - len_first; 13582 13583 if (*copy_size < ehdr_size) { 13584 free(*copy_mem); 13585 *copy_mem = malloc(ehdr_size); 13586 if (!*copy_mem) { 13587 *copy_size = 0; 13588 ret = LIBBPF_PERF_EVENT_ERROR; 13589 break; 13590 } 13591 *copy_size = ehdr_size; 13592 } 13593 13594 memcpy(*copy_mem, copy_start, len_first); 13595 memcpy(*copy_mem + len_first, base, len_secnd); 13596 ehdr = *copy_mem; 13597 } 13598 13599 ret = fn(ehdr, private_data); 13600 data_tail += ehdr_size; 13601 if (ret != LIBBPF_PERF_EVENT_CONT) 13602 break; 13603 } 13604 13605 ring_buffer_write_tail(header, data_tail); 13606 return libbpf_err(ret); 13607 } 13608 13609 struct perf_buffer; 13610 13611 struct perf_buffer_params { 13612 struct perf_event_attr *attr; 13613 /* if event_cb is specified, it takes precendence */ 13614 perf_buffer_event_fn event_cb; 13615 /* sample_cb and lost_cb are higher-level common-case callbacks */ 13616 perf_buffer_sample_fn sample_cb; 13617 perf_buffer_lost_fn lost_cb; 13618 void *ctx; 13619 int cpu_cnt; 13620 int *cpus; 13621 int *map_keys; 13622 }; 13623 13624 struct perf_cpu_buf { 13625 struct perf_buffer *pb; 13626 void *base; /* mmap()'ed memory */ 13627 void *buf; /* for reconstructing segmented data */ 13628 size_t buf_size; 13629 int fd; 13630 int cpu; 13631 int map_key; 13632 }; 13633 13634 struct perf_buffer { 13635 perf_buffer_event_fn event_cb; 13636 perf_buffer_sample_fn sample_cb; 13637 perf_buffer_lost_fn lost_cb; 13638 void *ctx; /* passed into callbacks */ 13639 13640 size_t page_size; 13641 size_t mmap_size; 13642 struct perf_cpu_buf **cpu_bufs; 13643 struct epoll_event *events; 13644 int cpu_cnt; /* number of allocated CPU buffers */ 13645 int epoll_fd; /* perf event FD */ 13646 int map_fd; /* BPF_MAP_TYPE_PERF_EVENT_ARRAY BPF map FD */ 13647 }; 13648 13649 static void perf_buffer__free_cpu_buf(struct perf_buffer *pb, 13650 struct perf_cpu_buf *cpu_buf) 13651 { 13652 if (!cpu_buf) 13653 return; 13654 if (cpu_buf->base && 13655 munmap(cpu_buf->base, pb->mmap_size + pb->page_size)) 13656 pr_warn("failed to munmap cpu_buf #%d\n", cpu_buf->cpu); 13657 if (cpu_buf->fd >= 0) { 13658 ioctl(cpu_buf->fd, PERF_EVENT_IOC_DISABLE, 0); 13659 close(cpu_buf->fd); 13660 } 13661 free(cpu_buf->buf); 13662 free(cpu_buf); 13663 } 13664 13665 void perf_buffer__free(struct perf_buffer *pb) 13666 { 13667 int i; 13668 13669 if (IS_ERR_OR_NULL(pb)) 13670 return; 13671 if (pb->cpu_bufs) { 13672 for (i = 0; i < pb->cpu_cnt; i++) { 13673 struct perf_cpu_buf *cpu_buf = pb->cpu_bufs[i]; 13674 13675 if (!cpu_buf) 13676 continue; 13677 13678 bpf_map_delete_elem(pb->map_fd, &cpu_buf->map_key); 13679 perf_buffer__free_cpu_buf(pb, cpu_buf); 13680 } 13681 free(pb->cpu_bufs); 13682 } 13683 if (pb->epoll_fd >= 0) 13684 close(pb->epoll_fd); 13685 free(pb->events); 13686 free(pb); 13687 } 13688 13689 static struct perf_cpu_buf * 13690 perf_buffer__open_cpu_buf(struct perf_buffer *pb, struct perf_event_attr *attr, 13691 int cpu, int map_key) 13692 { 13693 struct perf_cpu_buf *cpu_buf; 13694 int err; 13695 13696 cpu_buf = calloc(1, sizeof(*cpu_buf)); 13697 if (!cpu_buf) 13698 return ERR_PTR(-ENOMEM); 13699 13700 cpu_buf->pb = pb; 13701 cpu_buf->cpu = cpu; 13702 cpu_buf->map_key = map_key; 13703 13704 cpu_buf->fd = syscall(__NR_perf_event_open, attr, -1 /* pid */, cpu, 13705 -1, PERF_FLAG_FD_CLOEXEC); 13706 if (cpu_buf->fd < 0) { 13707 err = -errno; 13708 pr_warn("failed to open perf buffer event on cpu #%d: %s\n", 13709 cpu, errstr(err)); 13710 goto error; 13711 } 13712 13713 cpu_buf->base = mmap(NULL, pb->mmap_size + pb->page_size, 13714 PROT_READ | PROT_WRITE, MAP_SHARED, 13715 cpu_buf->fd, 0); 13716 if (cpu_buf->base == MAP_FAILED) { 13717 cpu_buf->base = NULL; 13718 err = -errno; 13719 pr_warn("failed to mmap perf buffer on cpu #%d: %s\n", 13720 cpu, errstr(err)); 13721 goto error; 13722 } 13723 13724 if (ioctl(cpu_buf->fd, PERF_EVENT_IOC_ENABLE, 0) < 0) { 13725 err = -errno; 13726 pr_warn("failed to enable perf buffer event on cpu #%d: %s\n", 13727 cpu, errstr(err)); 13728 goto error; 13729 } 13730 13731 return cpu_buf; 13732 13733 error: 13734 perf_buffer__free_cpu_buf(pb, cpu_buf); 13735 return (struct perf_cpu_buf *)ERR_PTR(err); 13736 } 13737 13738 static struct perf_buffer *__perf_buffer__new(int map_fd, size_t page_cnt, 13739 struct perf_buffer_params *p); 13740 13741 struct perf_buffer *perf_buffer__new(int map_fd, size_t page_cnt, 13742 perf_buffer_sample_fn sample_cb, 13743 perf_buffer_lost_fn lost_cb, 13744 void *ctx, 13745 const struct perf_buffer_opts *opts) 13746 { 13747 const size_t attr_sz = sizeof(struct perf_event_attr); 13748 struct perf_buffer_params p = {}; 13749 struct perf_event_attr attr; 13750 __u32 sample_period; 13751 13752 if (!OPTS_VALID(opts, perf_buffer_opts)) 13753 return libbpf_err_ptr(-EINVAL); 13754 13755 sample_period = OPTS_GET(opts, sample_period, 1); 13756 if (!sample_period) 13757 sample_period = 1; 13758 13759 memset(&attr, 0, attr_sz); 13760 attr.size = attr_sz; 13761 attr.config = PERF_COUNT_SW_BPF_OUTPUT; 13762 attr.type = PERF_TYPE_SOFTWARE; 13763 attr.sample_type = PERF_SAMPLE_RAW; 13764 attr.wakeup_events = sample_period; 13765 13766 p.attr = &attr; 13767 p.sample_cb = sample_cb; 13768 p.lost_cb = lost_cb; 13769 p.ctx = ctx; 13770 13771 return libbpf_ptr(__perf_buffer__new(map_fd, page_cnt, &p)); 13772 } 13773 13774 struct perf_buffer *perf_buffer__new_raw(int map_fd, size_t page_cnt, 13775 struct perf_event_attr *attr, 13776 perf_buffer_event_fn event_cb, void *ctx, 13777 const struct perf_buffer_raw_opts *opts) 13778 { 13779 struct perf_buffer_params p = {}; 13780 13781 if (!attr) 13782 return libbpf_err_ptr(-EINVAL); 13783 13784 if (!OPTS_VALID(opts, perf_buffer_raw_opts)) 13785 return libbpf_err_ptr(-EINVAL); 13786 13787 p.attr = attr; 13788 p.event_cb = event_cb; 13789 p.ctx = ctx; 13790 p.cpu_cnt = OPTS_GET(opts, cpu_cnt, 0); 13791 p.cpus = OPTS_GET(opts, cpus, NULL); 13792 p.map_keys = OPTS_GET(opts, map_keys, NULL); 13793 13794 return libbpf_ptr(__perf_buffer__new(map_fd, page_cnt, &p)); 13795 } 13796 13797 static struct perf_buffer *__perf_buffer__new(int map_fd, size_t page_cnt, 13798 struct perf_buffer_params *p) 13799 { 13800 const char *online_cpus_file = "/sys/devices/system/cpu/online"; 13801 struct bpf_map_info map; 13802 struct perf_buffer *pb; 13803 bool *online = NULL; 13804 __u32 map_info_len; 13805 int err, i, j, n; 13806 13807 if (page_cnt == 0 || (page_cnt & (page_cnt - 1))) { 13808 pr_warn("page count should be power of two, but is %zu\n", 13809 page_cnt); 13810 return ERR_PTR(-EINVAL); 13811 } 13812 13813 /* best-effort sanity checks */ 13814 memset(&map, 0, sizeof(map)); 13815 map_info_len = sizeof(map); 13816 err = bpf_map_get_info_by_fd(map_fd, &map, &map_info_len); 13817 if (err) { 13818 err = -errno; 13819 /* if BPF_OBJ_GET_INFO_BY_FD is supported, will return 13820 * -EBADFD, -EFAULT, or -E2BIG on real error 13821 */ 13822 if (err != -EINVAL) { 13823 pr_warn("failed to get map info for map FD %d: %s\n", 13824 map_fd, errstr(err)); 13825 return ERR_PTR(err); 13826 } 13827 pr_debug("failed to get map info for FD %d; API not supported? Ignoring...\n", 13828 map_fd); 13829 } else { 13830 if (map.type != BPF_MAP_TYPE_PERF_EVENT_ARRAY) { 13831 pr_warn("map '%s' should be BPF_MAP_TYPE_PERF_EVENT_ARRAY\n", 13832 map.name); 13833 return ERR_PTR(-EINVAL); 13834 } 13835 } 13836 13837 pb = calloc(1, sizeof(*pb)); 13838 if (!pb) 13839 return ERR_PTR(-ENOMEM); 13840 13841 pb->event_cb = p->event_cb; 13842 pb->sample_cb = p->sample_cb; 13843 pb->lost_cb = p->lost_cb; 13844 pb->ctx = p->ctx; 13845 13846 pb->page_size = getpagesize(); 13847 pb->mmap_size = pb->page_size * page_cnt; 13848 pb->map_fd = map_fd; 13849 13850 pb->epoll_fd = epoll_create1(EPOLL_CLOEXEC); 13851 if (pb->epoll_fd < 0) { 13852 err = -errno; 13853 pr_warn("failed to create epoll instance: %s\n", 13854 errstr(err)); 13855 goto error; 13856 } 13857 13858 if (p->cpu_cnt > 0) { 13859 pb->cpu_cnt = p->cpu_cnt; 13860 } else { 13861 pb->cpu_cnt = libbpf_num_possible_cpus(); 13862 if (pb->cpu_cnt < 0) { 13863 err = pb->cpu_cnt; 13864 goto error; 13865 } 13866 if (map.max_entries && map.max_entries < pb->cpu_cnt) 13867 pb->cpu_cnt = map.max_entries; 13868 } 13869 13870 pb->events = calloc(pb->cpu_cnt, sizeof(*pb->events)); 13871 if (!pb->events) { 13872 err = -ENOMEM; 13873 pr_warn("failed to allocate events: out of memory\n"); 13874 goto error; 13875 } 13876 pb->cpu_bufs = calloc(pb->cpu_cnt, sizeof(*pb->cpu_bufs)); 13877 if (!pb->cpu_bufs) { 13878 err = -ENOMEM; 13879 pr_warn("failed to allocate buffers: out of memory\n"); 13880 goto error; 13881 } 13882 13883 err = parse_cpu_mask_file(online_cpus_file, &online, &n); 13884 if (err) { 13885 pr_warn("failed to get online CPU mask: %s\n", errstr(err)); 13886 goto error; 13887 } 13888 13889 for (i = 0, j = 0; i < pb->cpu_cnt; i++) { 13890 struct perf_cpu_buf *cpu_buf; 13891 int cpu, map_key; 13892 13893 cpu = p->cpu_cnt > 0 ? p->cpus[i] : i; 13894 map_key = p->cpu_cnt > 0 ? p->map_keys[i] : i; 13895 13896 /* in case user didn't explicitly requested particular CPUs to 13897 * be attached to, skip offline/not present CPUs 13898 */ 13899 if (p->cpu_cnt <= 0 && (cpu >= n || !online[cpu])) 13900 continue; 13901 13902 cpu_buf = perf_buffer__open_cpu_buf(pb, p->attr, cpu, map_key); 13903 if (IS_ERR(cpu_buf)) { 13904 err = PTR_ERR(cpu_buf); 13905 goto error; 13906 } 13907 13908 pb->cpu_bufs[j] = cpu_buf; 13909 13910 err = bpf_map_update_elem(pb->map_fd, &map_key, 13911 &cpu_buf->fd, 0); 13912 if (err) { 13913 err = -errno; 13914 pr_warn("failed to set cpu #%d, key %d -> perf FD %d: %s\n", 13915 cpu, map_key, cpu_buf->fd, 13916 errstr(err)); 13917 goto error; 13918 } 13919 13920 pb->events[j].events = EPOLLIN; 13921 pb->events[j].data.ptr = cpu_buf; 13922 if (epoll_ctl(pb->epoll_fd, EPOLL_CTL_ADD, cpu_buf->fd, 13923 &pb->events[j]) < 0) { 13924 err = -errno; 13925 pr_warn("failed to epoll_ctl cpu #%d perf FD %d: %s\n", 13926 cpu, cpu_buf->fd, 13927 errstr(err)); 13928 goto error; 13929 } 13930 j++; 13931 } 13932 pb->cpu_cnt = j; 13933 free(online); 13934 13935 return pb; 13936 13937 error: 13938 free(online); 13939 if (pb) 13940 perf_buffer__free(pb); 13941 return ERR_PTR(err); 13942 } 13943 13944 struct perf_sample_raw { 13945 struct perf_event_header header; 13946 uint32_t size; 13947 char data[]; 13948 }; 13949 13950 struct perf_sample_lost { 13951 struct perf_event_header header; 13952 uint64_t id; 13953 uint64_t lost; 13954 uint64_t sample_id; 13955 }; 13956 13957 static enum bpf_perf_event_ret 13958 perf_buffer__process_record(struct perf_event_header *e, void *ctx) 13959 { 13960 struct perf_cpu_buf *cpu_buf = ctx; 13961 struct perf_buffer *pb = cpu_buf->pb; 13962 void *data = e; 13963 13964 /* user wants full control over parsing perf event */ 13965 if (pb->event_cb) 13966 return pb->event_cb(pb->ctx, cpu_buf->cpu, e); 13967 13968 switch (e->type) { 13969 case PERF_RECORD_SAMPLE: { 13970 struct perf_sample_raw *s = data; 13971 13972 if (pb->sample_cb) 13973 pb->sample_cb(pb->ctx, cpu_buf->cpu, s->data, s->size); 13974 break; 13975 } 13976 case PERF_RECORD_LOST: { 13977 struct perf_sample_lost *s = data; 13978 13979 if (pb->lost_cb) 13980 pb->lost_cb(pb->ctx, cpu_buf->cpu, s->lost); 13981 break; 13982 } 13983 default: 13984 pr_warn("unknown perf sample type %d\n", e->type); 13985 return LIBBPF_PERF_EVENT_ERROR; 13986 } 13987 return LIBBPF_PERF_EVENT_CONT; 13988 } 13989 13990 static int perf_buffer__process_records(struct perf_buffer *pb, 13991 struct perf_cpu_buf *cpu_buf) 13992 { 13993 enum bpf_perf_event_ret ret; 13994 13995 ret = perf_event_read_simple(cpu_buf->base, pb->mmap_size, 13996 pb->page_size, &cpu_buf->buf, 13997 &cpu_buf->buf_size, 13998 perf_buffer__process_record, cpu_buf); 13999 if (ret != LIBBPF_PERF_EVENT_CONT) 14000 return ret; 14001 return 0; 14002 } 14003 14004 int perf_buffer__epoll_fd(const struct perf_buffer *pb) 14005 { 14006 return pb->epoll_fd; 14007 } 14008 14009 int perf_buffer__poll(struct perf_buffer *pb, int timeout_ms) 14010 { 14011 int i, cnt, err; 14012 14013 cnt = epoll_wait(pb->epoll_fd, pb->events, pb->cpu_cnt, timeout_ms); 14014 if (cnt < 0) 14015 return -errno; 14016 14017 for (i = 0; i < cnt; i++) { 14018 struct perf_cpu_buf *cpu_buf = pb->events[i].data.ptr; 14019 14020 err = perf_buffer__process_records(pb, cpu_buf); 14021 if (err) { 14022 pr_warn("error while processing records: %s\n", errstr(err)); 14023 return libbpf_err(err); 14024 } 14025 } 14026 return cnt; 14027 } 14028 14029 /* Return number of PERF_EVENT_ARRAY map slots set up by this perf_buffer 14030 * manager. 14031 */ 14032 size_t perf_buffer__buffer_cnt(const struct perf_buffer *pb) 14033 { 14034 return pb->cpu_cnt; 14035 } 14036 14037 /* 14038 * Return perf_event FD of a ring buffer in *buf_idx* slot of 14039 * PERF_EVENT_ARRAY BPF map. This FD can be polled for new data using 14040 * select()/poll()/epoll() Linux syscalls. 14041 */ 14042 int perf_buffer__buffer_fd(const struct perf_buffer *pb, size_t buf_idx) 14043 { 14044 struct perf_cpu_buf *cpu_buf; 14045 14046 if (buf_idx >= pb->cpu_cnt) 14047 return libbpf_err(-EINVAL); 14048 14049 cpu_buf = pb->cpu_bufs[buf_idx]; 14050 if (!cpu_buf) 14051 return libbpf_err(-ENOENT); 14052 14053 return cpu_buf->fd; 14054 } 14055 14056 int perf_buffer__buffer(struct perf_buffer *pb, int buf_idx, void **buf, size_t *buf_size) 14057 { 14058 struct perf_cpu_buf *cpu_buf; 14059 14060 if (buf_idx >= pb->cpu_cnt) 14061 return libbpf_err(-EINVAL); 14062 14063 cpu_buf = pb->cpu_bufs[buf_idx]; 14064 if (!cpu_buf) 14065 return libbpf_err(-ENOENT); 14066 14067 *buf = cpu_buf->base; 14068 *buf_size = pb->mmap_size; 14069 return 0; 14070 } 14071 14072 /* 14073 * Consume data from perf ring buffer corresponding to slot *buf_idx* in 14074 * PERF_EVENT_ARRAY BPF map without waiting/polling. If there is no data to 14075 * consume, do nothing and return success. 14076 * Returns: 14077 * - 0 on success; 14078 * - <0 on failure. 14079 */ 14080 int perf_buffer__consume_buffer(struct perf_buffer *pb, size_t buf_idx) 14081 { 14082 struct perf_cpu_buf *cpu_buf; 14083 14084 if (buf_idx >= pb->cpu_cnt) 14085 return libbpf_err(-EINVAL); 14086 14087 cpu_buf = pb->cpu_bufs[buf_idx]; 14088 if (!cpu_buf) 14089 return libbpf_err(-ENOENT); 14090 14091 return perf_buffer__process_records(pb, cpu_buf); 14092 } 14093 14094 int perf_buffer__consume(struct perf_buffer *pb) 14095 { 14096 int i, err; 14097 14098 for (i = 0; i < pb->cpu_cnt; i++) { 14099 struct perf_cpu_buf *cpu_buf = pb->cpu_bufs[i]; 14100 14101 if (!cpu_buf) 14102 continue; 14103 14104 err = perf_buffer__process_records(pb, cpu_buf); 14105 if (err) { 14106 pr_warn("perf_buffer: failed to process records in buffer #%d: %s\n", 14107 i, errstr(err)); 14108 return libbpf_err(err); 14109 } 14110 } 14111 return 0; 14112 } 14113 14114 int bpf_program__set_attach_target(struct bpf_program *prog, 14115 int attach_prog_fd, 14116 const char *attach_func_name) 14117 { 14118 int btf_obj_fd = 0, btf_id = 0, err; 14119 14120 if (!prog || attach_prog_fd < 0) 14121 return libbpf_err(-EINVAL); 14122 14123 if (prog->obj->state >= OBJ_LOADED) 14124 return libbpf_err(-EINVAL); 14125 14126 if (attach_prog_fd && !attach_func_name) { 14127 /* Store attach_prog_fd. The BTF ID will be resolved later during 14128 * the normal object/program load phase. 14129 */ 14130 prog->attach_prog_fd = attach_prog_fd; 14131 return 0; 14132 } 14133 14134 if (attach_prog_fd) { 14135 btf_id = libbpf_find_prog_btf_id(attach_func_name, 14136 attach_prog_fd, prog->obj->token_fd); 14137 if (btf_id < 0) 14138 return libbpf_err(btf_id); 14139 } else { 14140 if (!attach_func_name) 14141 return libbpf_err(-EINVAL); 14142 14143 /* load btf_vmlinux, if not yet */ 14144 err = bpf_object__load_vmlinux_btf(prog->obj, true); 14145 if (err) 14146 return libbpf_err(err); 14147 err = find_kernel_btf_id(prog->obj, attach_func_name, 14148 prog->expected_attach_type, 14149 &btf_obj_fd, &btf_id); 14150 if (err) 14151 return libbpf_err(err); 14152 } 14153 14154 prog->attach_btf_id = btf_id; 14155 prog->attach_btf_obj_fd = btf_obj_fd; 14156 prog->attach_prog_fd = attach_prog_fd; 14157 return 0; 14158 } 14159 14160 int bpf_program__assoc_struct_ops(struct bpf_program *prog, struct bpf_map *map, 14161 struct bpf_prog_assoc_struct_ops_opts *opts) 14162 { 14163 int prog_fd, map_fd; 14164 14165 prog_fd = bpf_program__fd(prog); 14166 if (prog_fd < 0) { 14167 pr_warn("prog '%s': can't associate BPF program without FD (was it loaded?)\n", 14168 prog->name); 14169 return libbpf_err(-EINVAL); 14170 } 14171 14172 if (prog->type == BPF_PROG_TYPE_STRUCT_OPS) { 14173 pr_warn("prog '%s': can't associate struct_ops program\n", prog->name); 14174 return libbpf_err(-EINVAL); 14175 } 14176 14177 map_fd = bpf_map__fd(map); 14178 if (map_fd < 0) { 14179 pr_warn("map '%s': can't associate BPF map without FD (was it created?)\n", map->name); 14180 return libbpf_err(-EINVAL); 14181 } 14182 14183 if (!bpf_map__is_struct_ops(map)) { 14184 pr_warn("map '%s': can't associate non-struct_ops map\n", map->name); 14185 return libbpf_err(-EINVAL); 14186 } 14187 14188 return bpf_prog_assoc_struct_ops(prog_fd, map_fd, opts); 14189 } 14190 14191 int parse_cpu_mask_str(const char *s, bool **mask, int *mask_sz) 14192 { 14193 int err = 0, n, len, start, end = -1; 14194 bool *tmp; 14195 14196 *mask = NULL; 14197 *mask_sz = 0; 14198 14199 /* Each sub string separated by ',' has format \d+-\d+ or \d+ */ 14200 while (*s) { 14201 if (*s == ',' || *s == '\n') { 14202 s++; 14203 continue; 14204 } 14205 n = sscanf(s, "%d%n-%d%n", &start, &len, &end, &len); 14206 if (n <= 0 || n > 2) { 14207 pr_warn("Failed to get CPU range %s: %d\n", s, n); 14208 err = -EINVAL; 14209 goto cleanup; 14210 } else if (n == 1) { 14211 end = start; 14212 } 14213 if (start < 0 || start > end) { 14214 pr_warn("Invalid CPU range [%d,%d] in %s\n", 14215 start, end, s); 14216 err = -EINVAL; 14217 goto cleanup; 14218 } 14219 tmp = realloc(*mask, end + 1); 14220 if (!tmp) { 14221 err = -ENOMEM; 14222 goto cleanup; 14223 } 14224 *mask = tmp; 14225 memset(tmp + *mask_sz, 0, start - *mask_sz); 14226 memset(tmp + start, 1, end - start + 1); 14227 *mask_sz = end + 1; 14228 s += len; 14229 } 14230 if (!*mask_sz) { 14231 pr_warn("Empty CPU range\n"); 14232 return -EINVAL; 14233 } 14234 return 0; 14235 cleanup: 14236 free(*mask); 14237 *mask = NULL; 14238 return err; 14239 } 14240 14241 int parse_cpu_mask_file(const char *fcpu, bool **mask, int *mask_sz) 14242 { 14243 int fd, err = 0, len; 14244 char buf[128]; 14245 14246 fd = open(fcpu, O_RDONLY | O_CLOEXEC); 14247 if (fd < 0) { 14248 err = -errno; 14249 pr_warn("Failed to open cpu mask file %s: %s\n", fcpu, errstr(err)); 14250 return err; 14251 } 14252 len = read(fd, buf, sizeof(buf)); 14253 close(fd); 14254 if (len <= 0) { 14255 err = len ? -errno : -EINVAL; 14256 pr_warn("Failed to read cpu mask from %s: %s\n", fcpu, errstr(err)); 14257 return err; 14258 } 14259 if (len >= sizeof(buf)) { 14260 pr_warn("CPU mask is too big in file %s\n", fcpu); 14261 return -E2BIG; 14262 } 14263 buf[len] = '\0'; 14264 14265 return parse_cpu_mask_str(buf, mask, mask_sz); 14266 } 14267 14268 int libbpf_num_possible_cpus(void) 14269 { 14270 static const char *fcpu = "/sys/devices/system/cpu/possible"; 14271 static int cpus; 14272 int err, n, i, tmp_cpus; 14273 bool *mask; 14274 14275 tmp_cpus = READ_ONCE(cpus); 14276 if (tmp_cpus > 0) 14277 return tmp_cpus; 14278 14279 err = parse_cpu_mask_file(fcpu, &mask, &n); 14280 if (err) 14281 return libbpf_err(err); 14282 14283 tmp_cpus = 0; 14284 for (i = 0; i < n; i++) { 14285 if (mask[i]) 14286 tmp_cpus++; 14287 } 14288 free(mask); 14289 14290 WRITE_ONCE(cpus, tmp_cpus); 14291 return tmp_cpus; 14292 } 14293 14294 static int populate_skeleton_maps(const struct bpf_object *obj, 14295 struct bpf_map_skeleton *maps, 14296 size_t map_cnt, size_t map_skel_sz) 14297 { 14298 int i; 14299 14300 for (i = 0; i < map_cnt; i++) { 14301 struct bpf_map_skeleton *map_skel = (void *)maps + i * map_skel_sz; 14302 struct bpf_map **map = map_skel->map; 14303 const char *name = map_skel->name; 14304 void **mmaped = map_skel->mmaped; 14305 14306 *map = bpf_object__find_map_by_name(obj, name); 14307 if (!*map) { 14308 pr_warn("failed to find skeleton map '%s'\n", name); 14309 return -ESRCH; 14310 } 14311 14312 /* externs shouldn't be pre-setup from user code */ 14313 if (mmaped && (*map)->libbpf_type != LIBBPF_MAP_KCONFIG) 14314 *mmaped = (*map)->mmaped; 14315 } 14316 return 0; 14317 } 14318 14319 static int populate_skeleton_progs(const struct bpf_object *obj, 14320 struct bpf_prog_skeleton *progs, 14321 size_t prog_cnt, size_t prog_skel_sz) 14322 { 14323 int i; 14324 14325 for (i = 0; i < prog_cnt; i++) { 14326 struct bpf_prog_skeleton *prog_skel = (void *)progs + i * prog_skel_sz; 14327 struct bpf_program **prog = prog_skel->prog; 14328 const char *name = prog_skel->name; 14329 14330 *prog = bpf_object__find_program_by_name(obj, name); 14331 if (!*prog) { 14332 pr_warn("failed to find skeleton program '%s'\n", name); 14333 return -ESRCH; 14334 } 14335 } 14336 return 0; 14337 } 14338 14339 int bpf_object__open_skeleton(struct bpf_object_skeleton *s, 14340 const struct bpf_object_open_opts *opts) 14341 { 14342 struct bpf_object *obj; 14343 int err; 14344 14345 obj = bpf_object_open(NULL, s->data, s->data_sz, s->name, opts); 14346 if (IS_ERR(obj)) { 14347 err = PTR_ERR(obj); 14348 pr_warn("failed to initialize skeleton BPF object '%s': %s\n", 14349 s->name, errstr(err)); 14350 return libbpf_err(err); 14351 } 14352 14353 *s->obj = obj; 14354 err = populate_skeleton_maps(obj, s->maps, s->map_cnt, s->map_skel_sz); 14355 if (err) { 14356 pr_warn("failed to populate skeleton maps for '%s': %s\n", s->name, errstr(err)); 14357 return libbpf_err(err); 14358 } 14359 14360 err = populate_skeleton_progs(obj, s->progs, s->prog_cnt, s->prog_skel_sz); 14361 if (err) { 14362 pr_warn("failed to populate skeleton progs for '%s': %s\n", s->name, errstr(err)); 14363 return libbpf_err(err); 14364 } 14365 14366 return 0; 14367 } 14368 14369 int bpf_object__open_subskeleton(struct bpf_object_subskeleton *s) 14370 { 14371 int err, len, var_idx, i; 14372 const char *var_name; 14373 const struct bpf_map *map; 14374 struct btf *btf; 14375 __u32 map_type_id; 14376 const struct btf_type *map_type, *var_type; 14377 const struct bpf_var_skeleton *var_skel; 14378 struct btf_var_secinfo *var; 14379 14380 if (!s->obj) 14381 return libbpf_err(-EINVAL); 14382 14383 btf = bpf_object__btf(s->obj); 14384 if (!btf) { 14385 pr_warn("subskeletons require BTF at runtime (object %s)\n", 14386 bpf_object__name(s->obj)); 14387 return libbpf_err(-errno); 14388 } 14389 14390 err = populate_skeleton_maps(s->obj, s->maps, s->map_cnt, s->map_skel_sz); 14391 if (err) { 14392 pr_warn("failed to populate subskeleton maps: %s\n", errstr(err)); 14393 return libbpf_err(err); 14394 } 14395 14396 err = populate_skeleton_progs(s->obj, s->progs, s->prog_cnt, s->prog_skel_sz); 14397 if (err) { 14398 pr_warn("failed to populate subskeleton maps: %s\n", errstr(err)); 14399 return libbpf_err(err); 14400 } 14401 14402 for (var_idx = 0; var_idx < s->var_cnt; var_idx++) { 14403 var_skel = (void *)s->vars + var_idx * s->var_skel_sz; 14404 map = *var_skel->map; 14405 map_type_id = bpf_map__btf_value_type_id(map); 14406 map_type = btf__type_by_id(btf, map_type_id); 14407 14408 if (!btf_is_datasec(map_type)) { 14409 pr_warn("type for map '%1$s' is not a datasec: %2$s\n", 14410 bpf_map__name(map), 14411 __btf_kind_str(btf_kind(map_type))); 14412 return libbpf_err(-EINVAL); 14413 } 14414 14415 len = btf_vlen(map_type); 14416 var = btf_var_secinfos(map_type); 14417 for (i = 0; i < len; i++, var++) { 14418 var_type = btf__type_by_id(btf, var->type); 14419 var_name = btf__name_by_offset(btf, var_type->name_off); 14420 if (strcmp(var_name, var_skel->name) == 0) { 14421 *var_skel->addr = map->mmaped + var->offset; 14422 break; 14423 } 14424 } 14425 } 14426 return 0; 14427 } 14428 14429 void bpf_object__destroy_subskeleton(struct bpf_object_subskeleton *s) 14430 { 14431 if (!s) 14432 return; 14433 free(s->maps); 14434 free(s->progs); 14435 free(s->vars); 14436 free(s); 14437 } 14438 14439 int bpf_object__load_skeleton(struct bpf_object_skeleton *s) 14440 { 14441 int i, err; 14442 14443 err = bpf_object__load(*s->obj); 14444 if (err) { 14445 pr_warn("failed to load BPF skeleton '%s': %s\n", s->name, errstr(err)); 14446 return libbpf_err(err); 14447 } 14448 14449 for (i = 0; i < s->map_cnt; i++) { 14450 struct bpf_map_skeleton *map_skel = (void *)s->maps + i * s->map_skel_sz; 14451 struct bpf_map *map = *map_skel->map; 14452 14453 if (!map_skel->mmaped) 14454 continue; 14455 14456 if (map->def.type == BPF_MAP_TYPE_ARENA) 14457 *map_skel->mmaped = map->mmaped + map->obj->arena_data_off; 14458 else 14459 *map_skel->mmaped = map->mmaped; 14460 } 14461 14462 return 0; 14463 } 14464 14465 int bpf_object__attach_skeleton(struct bpf_object_skeleton *s) 14466 { 14467 int i, err; 14468 14469 for (i = 0; i < s->prog_cnt; i++) { 14470 struct bpf_prog_skeleton *prog_skel = (void *)s->progs + i * s->prog_skel_sz; 14471 struct bpf_program *prog = *prog_skel->prog; 14472 struct bpf_link **link = prog_skel->link; 14473 14474 if (!prog->autoload || !prog->autoattach) 14475 continue; 14476 14477 /* auto-attaching not supported for this program */ 14478 if (!prog->sec_def || !prog->sec_def->prog_attach_fn) 14479 continue; 14480 14481 /* if user already set the link manually, don't attempt auto-attach */ 14482 if (*link) 14483 continue; 14484 14485 err = prog->sec_def->prog_attach_fn(prog, prog->sec_def->cookie, link); 14486 if (err) { 14487 pr_warn("prog '%s': failed to auto-attach: %s\n", 14488 bpf_program__name(prog), errstr(err)); 14489 return libbpf_err(err); 14490 } 14491 14492 /* It's possible that for some SEC() definitions auto-attach 14493 * is supported in some cases (e.g., if definition completely 14494 * specifies target information), but is not in other cases. 14495 * SEC("uprobe") is one such case. If user specified target 14496 * binary and function name, such BPF program can be 14497 * auto-attached. But if not, it shouldn't trigger skeleton's 14498 * attach to fail. It should just be skipped. 14499 * attach_fn signals such case with returning 0 (no error) and 14500 * setting link to NULL. 14501 */ 14502 } 14503 14504 14505 for (i = 0; i < s->map_cnt; i++) { 14506 struct bpf_map_skeleton *map_skel = (void *)s->maps + i * s->map_skel_sz; 14507 struct bpf_map *map = *map_skel->map; 14508 struct bpf_link **link; 14509 14510 if (!map->autocreate || !map->autoattach) 14511 continue; 14512 14513 /* only struct_ops maps can be attached */ 14514 if (!bpf_map__is_struct_ops(map)) 14515 continue; 14516 14517 /* skeleton is created with earlier version of bpftool, notify user */ 14518 if (s->map_skel_sz < offsetofend(struct bpf_map_skeleton, link)) { 14519 pr_warn("map '%s': BPF skeleton version is old, skipping map auto-attachment...\n", 14520 bpf_map__name(map)); 14521 continue; 14522 } 14523 14524 link = map_skel->link; 14525 if (!link) { 14526 pr_warn("map '%s': BPF map skeleton link is uninitialized\n", 14527 bpf_map__name(map)); 14528 continue; 14529 } 14530 14531 if (*link) 14532 continue; 14533 14534 *link = bpf_map__attach_struct_ops(map); 14535 if (!*link) { 14536 err = -errno; 14537 pr_warn("map '%s': failed to auto-attach: %s\n", 14538 bpf_map__name(map), errstr(err)); 14539 return libbpf_err(err); 14540 } 14541 } 14542 14543 return 0; 14544 } 14545 14546 void bpf_object__detach_skeleton(struct bpf_object_skeleton *s) 14547 { 14548 int i; 14549 14550 for (i = 0; i < s->prog_cnt; i++) { 14551 struct bpf_prog_skeleton *prog_skel = (void *)s->progs + i * s->prog_skel_sz; 14552 struct bpf_link **link = prog_skel->link; 14553 14554 bpf_link__destroy(*link); 14555 *link = NULL; 14556 } 14557 14558 if (s->map_skel_sz < sizeof(struct bpf_map_skeleton)) 14559 return; 14560 14561 for (i = 0; i < s->map_cnt; i++) { 14562 struct bpf_map_skeleton *map_skel = (void *)s->maps + i * s->map_skel_sz; 14563 struct bpf_link **link = map_skel->link; 14564 14565 if (link) { 14566 bpf_link__destroy(*link); 14567 *link = NULL; 14568 } 14569 } 14570 } 14571 14572 void bpf_object__destroy_skeleton(struct bpf_object_skeleton *s) 14573 { 14574 if (!s) 14575 return; 14576 14577 bpf_object__detach_skeleton(s); 14578 if (s->obj) 14579 bpf_object__close(*s->obj); 14580 free(s->maps); 14581 free(s->progs); 14582 free(s); 14583 } 14584