1 // SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) 2 /* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */ 3 #include <linux/kernel.h> 4 #include <linux/filter.h> 5 #include "bpf.h" 6 #include "libbpf.h" 7 #include "libbpf_common.h" 8 #include "libbpf_internal.h" 9 #include "str_error.h" 10 11 static inline __u64 ptr_to_u64(const void *ptr) 12 { 13 return (__u64)(unsigned long)ptr; 14 } 15 16 int probe_fd(int fd) 17 { 18 if (fd >= 0) 19 close(fd); 20 return fd >= 0; 21 } 22 23 static int probe_kern_prog_name(int token_fd) 24 { 25 const size_t attr_sz = offsetofend(union bpf_attr, prog_name); 26 struct bpf_insn insns[] = { 27 BPF_MOV64_IMM(BPF_REG_0, 0), 28 BPF_EXIT_INSN(), 29 }; 30 union bpf_attr attr; 31 int ret; 32 33 memset(&attr, 0, attr_sz); 34 attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER; 35 attr.license = ptr_to_u64("GPL"); 36 attr.insns = ptr_to_u64(insns); 37 attr.insn_cnt = (__u32)ARRAY_SIZE(insns); 38 attr.prog_token_fd = token_fd; 39 if (token_fd) 40 attr.prog_flags |= BPF_F_TOKEN_FD; 41 libbpf_strlcpy(attr.prog_name, "libbpf_nametest", sizeof(attr.prog_name)); 42 43 /* make sure loading with name works */ 44 ret = sys_bpf_prog_load(&attr, attr_sz, PROG_LOAD_ATTEMPTS); 45 return probe_fd(ret); 46 } 47 48 static int probe_kern_global_data(int token_fd) 49 { 50 char *cp, errmsg[STRERR_BUFSIZE]; 51 struct bpf_insn insns[] = { 52 BPF_LD_MAP_VALUE(BPF_REG_1, 0, 16), 53 BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 42), 54 BPF_MOV64_IMM(BPF_REG_0, 0), 55 BPF_EXIT_INSN(), 56 }; 57 LIBBPF_OPTS(bpf_map_create_opts, map_opts, 58 .token_fd = token_fd, 59 .map_flags = token_fd ? BPF_F_TOKEN_FD : 0, 60 ); 61 LIBBPF_OPTS(bpf_prog_load_opts, prog_opts, 62 .token_fd = token_fd, 63 .prog_flags = token_fd ? BPF_F_TOKEN_FD : 0, 64 ); 65 int ret, map, insn_cnt = ARRAY_SIZE(insns); 66 67 map = bpf_map_create(BPF_MAP_TYPE_ARRAY, "libbpf_global", sizeof(int), 32, 1, &map_opts); 68 if (map < 0) { 69 ret = -errno; 70 cp = libbpf_strerror_r(ret, errmsg, sizeof(errmsg)); 71 pr_warn("Error in %s():%s(%d). Couldn't create simple array map.\n", 72 __func__, cp, -ret); 73 return ret; 74 } 75 76 insns[0].imm = map; 77 78 ret = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, NULL, "GPL", insns, insn_cnt, &prog_opts); 79 close(map); 80 return probe_fd(ret); 81 } 82 83 static int probe_kern_btf(int token_fd) 84 { 85 static const char strs[] = "\0int"; 86 __u32 types[] = { 87 /* int */ 88 BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4), 89 }; 90 91 return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types), 92 strs, sizeof(strs), token_fd)); 93 } 94 95 static int probe_kern_btf_func(int token_fd) 96 { 97 static const char strs[] = "\0int\0x\0a"; 98 /* void x(int a) {} */ 99 __u32 types[] = { 100 /* int */ 101 BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ 102 /* FUNC_PROTO */ /* [2] */ 103 BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_FUNC_PROTO, 0, 1), 0), 104 BTF_PARAM_ENC(7, 1), 105 /* FUNC x */ /* [3] */ 106 BTF_TYPE_ENC(5, BTF_INFO_ENC(BTF_KIND_FUNC, 0, 0), 2), 107 }; 108 109 return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types), 110 strs, sizeof(strs), token_fd)); 111 } 112 113 static int probe_kern_btf_func_global(int token_fd) 114 { 115 static const char strs[] = "\0int\0x\0a"; 116 /* static void x(int a) {} */ 117 __u32 types[] = { 118 /* int */ 119 BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ 120 /* FUNC_PROTO */ /* [2] */ 121 BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_FUNC_PROTO, 0, 1), 0), 122 BTF_PARAM_ENC(7, 1), 123 /* FUNC x BTF_FUNC_GLOBAL */ /* [3] */ 124 BTF_TYPE_ENC(5, BTF_INFO_ENC(BTF_KIND_FUNC, 0, BTF_FUNC_GLOBAL), 2), 125 }; 126 127 return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types), 128 strs, sizeof(strs), token_fd)); 129 } 130 131 static int probe_kern_btf_datasec(int token_fd) 132 { 133 static const char strs[] = "\0x\0.data"; 134 /* static int a; */ 135 __u32 types[] = { 136 /* int */ 137 BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ 138 /* VAR x */ /* [2] */ 139 BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_VAR, 0, 0), 1), 140 BTF_VAR_STATIC, 141 /* DATASEC val */ /* [3] */ 142 BTF_TYPE_ENC(3, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 1), 4), 143 BTF_VAR_SECINFO_ENC(2, 0, 4), 144 }; 145 146 return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types), 147 strs, sizeof(strs), token_fd)); 148 } 149 150 static int probe_kern_btf_qmark_datasec(int token_fd) 151 { 152 static const char strs[] = "\0x\0?.data"; 153 /* static int a; */ 154 __u32 types[] = { 155 /* int */ 156 BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ 157 /* VAR x */ /* [2] */ 158 BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_VAR, 0, 0), 1), 159 BTF_VAR_STATIC, 160 /* DATASEC ?.data */ /* [3] */ 161 BTF_TYPE_ENC(3, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 1), 4), 162 BTF_VAR_SECINFO_ENC(2, 0, 4), 163 }; 164 165 return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types), 166 strs, sizeof(strs), token_fd)); 167 } 168 169 static int probe_kern_btf_float(int token_fd) 170 { 171 static const char strs[] = "\0float"; 172 __u32 types[] = { 173 /* float */ 174 BTF_TYPE_FLOAT_ENC(1, 4), 175 }; 176 177 return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types), 178 strs, sizeof(strs), token_fd)); 179 } 180 181 static int probe_kern_btf_decl_tag(int token_fd) 182 { 183 static const char strs[] = "\0tag"; 184 __u32 types[] = { 185 /* int */ 186 BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ 187 /* VAR x */ /* [2] */ 188 BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_VAR, 0, 0), 1), 189 BTF_VAR_STATIC, 190 /* attr */ 191 BTF_TYPE_DECL_TAG_ENC(1, 2, -1), 192 }; 193 194 return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types), 195 strs, sizeof(strs), token_fd)); 196 } 197 198 static int probe_kern_btf_type_tag(int token_fd) 199 { 200 static const char strs[] = "\0tag"; 201 __u32 types[] = { 202 /* int */ 203 BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ 204 /* attr */ 205 BTF_TYPE_TYPE_TAG_ENC(1, 1), /* [2] */ 206 /* ptr */ 207 BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_PTR, 0, 0), 2), /* [3] */ 208 }; 209 210 return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types), 211 strs, sizeof(strs), token_fd)); 212 } 213 214 static int probe_kern_array_mmap(int token_fd) 215 { 216 LIBBPF_OPTS(bpf_map_create_opts, opts, 217 .map_flags = BPF_F_MMAPABLE | (token_fd ? BPF_F_TOKEN_FD : 0), 218 .token_fd = token_fd, 219 ); 220 int fd; 221 222 fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, "libbpf_mmap", sizeof(int), sizeof(int), 1, &opts); 223 return probe_fd(fd); 224 } 225 226 static int probe_kern_exp_attach_type(int token_fd) 227 { 228 LIBBPF_OPTS(bpf_prog_load_opts, opts, 229 .expected_attach_type = BPF_CGROUP_INET_SOCK_CREATE, 230 .token_fd = token_fd, 231 .prog_flags = token_fd ? BPF_F_TOKEN_FD : 0, 232 ); 233 struct bpf_insn insns[] = { 234 BPF_MOV64_IMM(BPF_REG_0, 0), 235 BPF_EXIT_INSN(), 236 }; 237 int fd, insn_cnt = ARRAY_SIZE(insns); 238 239 /* use any valid combination of program type and (optional) 240 * non-zero expected attach type (i.e., not a BPF_CGROUP_INET_INGRESS) 241 * to see if kernel supports expected_attach_type field for 242 * BPF_PROG_LOAD command 243 */ 244 fd = bpf_prog_load(BPF_PROG_TYPE_CGROUP_SOCK, NULL, "GPL", insns, insn_cnt, &opts); 245 return probe_fd(fd); 246 } 247 248 static int probe_kern_probe_read_kernel(int token_fd) 249 { 250 LIBBPF_OPTS(bpf_prog_load_opts, opts, 251 .token_fd = token_fd, 252 .prog_flags = token_fd ? BPF_F_TOKEN_FD : 0, 253 ); 254 struct bpf_insn insns[] = { 255 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), /* r1 = r10 (fp) */ 256 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), /* r1 += -8 */ 257 BPF_MOV64_IMM(BPF_REG_2, 8), /* r2 = 8 */ 258 BPF_MOV64_IMM(BPF_REG_3, 0), /* r3 = 0 */ 259 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_probe_read_kernel), 260 BPF_EXIT_INSN(), 261 }; 262 int fd, insn_cnt = ARRAY_SIZE(insns); 263 264 fd = bpf_prog_load(BPF_PROG_TYPE_TRACEPOINT, NULL, "GPL", insns, insn_cnt, &opts); 265 return probe_fd(fd); 266 } 267 268 static int probe_prog_bind_map(int token_fd) 269 { 270 char *cp, errmsg[STRERR_BUFSIZE]; 271 struct bpf_insn insns[] = { 272 BPF_MOV64_IMM(BPF_REG_0, 0), 273 BPF_EXIT_INSN(), 274 }; 275 LIBBPF_OPTS(bpf_map_create_opts, map_opts, 276 .token_fd = token_fd, 277 .map_flags = token_fd ? BPF_F_TOKEN_FD : 0, 278 ); 279 LIBBPF_OPTS(bpf_prog_load_opts, prog_opts, 280 .token_fd = token_fd, 281 .prog_flags = token_fd ? BPF_F_TOKEN_FD : 0, 282 ); 283 int ret, map, prog, insn_cnt = ARRAY_SIZE(insns); 284 285 map = bpf_map_create(BPF_MAP_TYPE_ARRAY, "libbpf_det_bind", sizeof(int), 32, 1, &map_opts); 286 if (map < 0) { 287 ret = -errno; 288 cp = libbpf_strerror_r(ret, errmsg, sizeof(errmsg)); 289 pr_warn("Error in %s():%s(%d). Couldn't create simple array map.\n", 290 __func__, cp, -ret); 291 return ret; 292 } 293 294 prog = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, NULL, "GPL", insns, insn_cnt, &prog_opts); 295 if (prog < 0) { 296 close(map); 297 return 0; 298 } 299 300 ret = bpf_prog_bind_map(prog, map, NULL); 301 302 close(map); 303 close(prog); 304 305 return ret >= 0; 306 } 307 308 static int probe_module_btf(int token_fd) 309 { 310 static const char strs[] = "\0int"; 311 __u32 types[] = { 312 /* int */ 313 BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4), 314 }; 315 struct bpf_btf_info info; 316 __u32 len = sizeof(info); 317 char name[16]; 318 int fd, err; 319 320 fd = libbpf__load_raw_btf((char *)types, sizeof(types), strs, sizeof(strs), token_fd); 321 if (fd < 0) 322 return 0; /* BTF not supported at all */ 323 324 memset(&info, 0, sizeof(info)); 325 info.name = ptr_to_u64(name); 326 info.name_len = sizeof(name); 327 328 /* check that BPF_OBJ_GET_INFO_BY_FD supports specifying name pointer; 329 * kernel's module BTF support coincides with support for 330 * name/name_len fields in struct bpf_btf_info. 331 */ 332 err = bpf_btf_get_info_by_fd(fd, &info, &len); 333 close(fd); 334 return !err; 335 } 336 337 static int probe_perf_link(int token_fd) 338 { 339 struct bpf_insn insns[] = { 340 BPF_MOV64_IMM(BPF_REG_0, 0), 341 BPF_EXIT_INSN(), 342 }; 343 LIBBPF_OPTS(bpf_prog_load_opts, opts, 344 .token_fd = token_fd, 345 .prog_flags = token_fd ? BPF_F_TOKEN_FD : 0, 346 ); 347 int prog_fd, link_fd, err; 348 349 prog_fd = bpf_prog_load(BPF_PROG_TYPE_TRACEPOINT, NULL, "GPL", 350 insns, ARRAY_SIZE(insns), &opts); 351 if (prog_fd < 0) 352 return -errno; 353 354 /* use invalid perf_event FD to get EBADF, if link is supported; 355 * otherwise EINVAL should be returned 356 */ 357 link_fd = bpf_link_create(prog_fd, -1, BPF_PERF_EVENT, NULL); 358 err = -errno; /* close() can clobber errno */ 359 360 if (link_fd >= 0) 361 close(link_fd); 362 close(prog_fd); 363 364 return link_fd < 0 && err == -EBADF; 365 } 366 367 static int probe_uprobe_multi_link(int token_fd) 368 { 369 LIBBPF_OPTS(bpf_prog_load_opts, load_opts, 370 .expected_attach_type = BPF_TRACE_UPROBE_MULTI, 371 .token_fd = token_fd, 372 .prog_flags = token_fd ? BPF_F_TOKEN_FD : 0, 373 ); 374 LIBBPF_OPTS(bpf_link_create_opts, link_opts); 375 struct bpf_insn insns[] = { 376 BPF_MOV64_IMM(BPF_REG_0, 0), 377 BPF_EXIT_INSN(), 378 }; 379 int prog_fd, link_fd, err; 380 unsigned long offset = 0; 381 382 prog_fd = bpf_prog_load(BPF_PROG_TYPE_KPROBE, NULL, "GPL", 383 insns, ARRAY_SIZE(insns), &load_opts); 384 if (prog_fd < 0) 385 return -errno; 386 387 /* Creating uprobe in '/' binary should fail with -EBADF. */ 388 link_opts.uprobe_multi.path = "/"; 389 link_opts.uprobe_multi.offsets = &offset; 390 link_opts.uprobe_multi.cnt = 1; 391 392 link_fd = bpf_link_create(prog_fd, -1, BPF_TRACE_UPROBE_MULTI, &link_opts); 393 err = -errno; /* close() can clobber errno */ 394 395 if (link_fd >= 0) 396 close(link_fd); 397 close(prog_fd); 398 399 return link_fd < 0 && err == -EBADF; 400 } 401 402 static int probe_kern_bpf_cookie(int token_fd) 403 { 404 struct bpf_insn insns[] = { 405 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_attach_cookie), 406 BPF_EXIT_INSN(), 407 }; 408 LIBBPF_OPTS(bpf_prog_load_opts, opts, 409 .token_fd = token_fd, 410 .prog_flags = token_fd ? BPF_F_TOKEN_FD : 0, 411 ); 412 int ret, insn_cnt = ARRAY_SIZE(insns); 413 414 ret = bpf_prog_load(BPF_PROG_TYPE_TRACEPOINT, NULL, "GPL", insns, insn_cnt, &opts); 415 return probe_fd(ret); 416 } 417 418 static int probe_kern_btf_enum64(int token_fd) 419 { 420 static const char strs[] = "\0enum64"; 421 __u32 types[] = { 422 BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_ENUM64, 0, 0), 8), 423 }; 424 425 return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types), 426 strs, sizeof(strs), token_fd)); 427 } 428 429 static int probe_kern_arg_ctx_tag(int token_fd) 430 { 431 static const char strs[] = "\0a\0b\0arg:ctx\0"; 432 const __u32 types[] = { 433 /* [1] INT */ 434 BTF_TYPE_INT_ENC(1 /* "a" */, BTF_INT_SIGNED, 0, 32, 4), 435 /* [2] PTR -> VOID */ 436 BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_PTR, 0, 0), 0), 437 /* [3] FUNC_PROTO `int(void *a)` */ 438 BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_FUNC_PROTO, 0, 1), 1), 439 BTF_PARAM_ENC(1 /* "a" */, 2), 440 /* [4] FUNC 'a' -> FUNC_PROTO (main prog) */ 441 BTF_TYPE_ENC(1 /* "a" */, BTF_INFO_ENC(BTF_KIND_FUNC, 0, BTF_FUNC_GLOBAL), 3), 442 /* [5] FUNC_PROTO `int(void *b __arg_ctx)` */ 443 BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_FUNC_PROTO, 0, 1), 1), 444 BTF_PARAM_ENC(3 /* "b" */, 2), 445 /* [6] FUNC 'b' -> FUNC_PROTO (subprog) */ 446 BTF_TYPE_ENC(3 /* "b" */, BTF_INFO_ENC(BTF_KIND_FUNC, 0, BTF_FUNC_GLOBAL), 5), 447 /* [7] DECL_TAG 'arg:ctx' -> func 'b' arg 'b' */ 448 BTF_TYPE_DECL_TAG_ENC(5 /* "arg:ctx" */, 6, 0), 449 }; 450 const struct bpf_insn insns[] = { 451 /* main prog */ 452 BPF_CALL_REL(+1), 453 BPF_EXIT_INSN(), 454 /* global subprog */ 455 BPF_EMIT_CALL(BPF_FUNC_get_func_ip), /* needs PTR_TO_CTX */ 456 BPF_EXIT_INSN(), 457 }; 458 const struct bpf_func_info_min func_infos[] = { 459 { 0, 4 }, /* main prog -> FUNC 'a' */ 460 { 2, 6 }, /* subprog -> FUNC 'b' */ 461 }; 462 LIBBPF_OPTS(bpf_prog_load_opts, opts, 463 .token_fd = token_fd, 464 .prog_flags = token_fd ? BPF_F_TOKEN_FD : 0, 465 ); 466 int prog_fd, btf_fd, insn_cnt = ARRAY_SIZE(insns); 467 468 btf_fd = libbpf__load_raw_btf((char *)types, sizeof(types), strs, sizeof(strs), token_fd); 469 if (btf_fd < 0) 470 return 0; 471 472 opts.prog_btf_fd = btf_fd; 473 opts.func_info = &func_infos; 474 opts.func_info_cnt = ARRAY_SIZE(func_infos); 475 opts.func_info_rec_size = sizeof(func_infos[0]); 476 477 prog_fd = bpf_prog_load(BPF_PROG_TYPE_KPROBE, "det_arg_ctx", 478 "GPL", insns, insn_cnt, &opts); 479 close(btf_fd); 480 481 return probe_fd(prog_fd); 482 } 483 484 typedef int (*feature_probe_fn)(int /* token_fd */); 485 486 static struct kern_feature_cache feature_cache; 487 488 static struct kern_feature_desc { 489 const char *desc; 490 feature_probe_fn probe; 491 } feature_probes[__FEAT_CNT] = { 492 [FEAT_PROG_NAME] = { 493 "BPF program name", probe_kern_prog_name, 494 }, 495 [FEAT_GLOBAL_DATA] = { 496 "global variables", probe_kern_global_data, 497 }, 498 [FEAT_BTF] = { 499 "minimal BTF", probe_kern_btf, 500 }, 501 [FEAT_BTF_FUNC] = { 502 "BTF functions", probe_kern_btf_func, 503 }, 504 [FEAT_BTF_GLOBAL_FUNC] = { 505 "BTF global function", probe_kern_btf_func_global, 506 }, 507 [FEAT_BTF_DATASEC] = { 508 "BTF data section and variable", probe_kern_btf_datasec, 509 }, 510 [FEAT_ARRAY_MMAP] = { 511 "ARRAY map mmap()", probe_kern_array_mmap, 512 }, 513 [FEAT_EXP_ATTACH_TYPE] = { 514 "BPF_PROG_LOAD expected_attach_type attribute", 515 probe_kern_exp_attach_type, 516 }, 517 [FEAT_PROBE_READ_KERN] = { 518 "bpf_probe_read_kernel() helper", probe_kern_probe_read_kernel, 519 }, 520 [FEAT_PROG_BIND_MAP] = { 521 "BPF_PROG_BIND_MAP support", probe_prog_bind_map, 522 }, 523 [FEAT_MODULE_BTF] = { 524 "module BTF support", probe_module_btf, 525 }, 526 [FEAT_BTF_FLOAT] = { 527 "BTF_KIND_FLOAT support", probe_kern_btf_float, 528 }, 529 [FEAT_PERF_LINK] = { 530 "BPF perf link support", probe_perf_link, 531 }, 532 [FEAT_BTF_DECL_TAG] = { 533 "BTF_KIND_DECL_TAG support", probe_kern_btf_decl_tag, 534 }, 535 [FEAT_BTF_TYPE_TAG] = { 536 "BTF_KIND_TYPE_TAG support", probe_kern_btf_type_tag, 537 }, 538 [FEAT_MEMCG_ACCOUNT] = { 539 "memcg-based memory accounting", probe_memcg_account, 540 }, 541 [FEAT_BPF_COOKIE] = { 542 "BPF cookie support", probe_kern_bpf_cookie, 543 }, 544 [FEAT_BTF_ENUM64] = { 545 "BTF_KIND_ENUM64 support", probe_kern_btf_enum64, 546 }, 547 [FEAT_SYSCALL_WRAPPER] = { 548 "Kernel using syscall wrapper", probe_kern_syscall_wrapper, 549 }, 550 [FEAT_UPROBE_MULTI_LINK] = { 551 "BPF multi-uprobe link support", probe_uprobe_multi_link, 552 }, 553 [FEAT_ARG_CTX_TAG] = { 554 "kernel-side __arg_ctx tag", probe_kern_arg_ctx_tag, 555 }, 556 [FEAT_BTF_QMARK_DATASEC] = { 557 "BTF DATASEC names starting from '?'", probe_kern_btf_qmark_datasec, 558 }, 559 }; 560 561 bool feat_supported(struct kern_feature_cache *cache, enum kern_feature_id feat_id) 562 { 563 struct kern_feature_desc *feat = &feature_probes[feat_id]; 564 int ret; 565 566 /* assume global feature cache, unless custom one is provided */ 567 if (!cache) 568 cache = &feature_cache; 569 570 if (READ_ONCE(cache->res[feat_id]) == FEAT_UNKNOWN) { 571 ret = feat->probe(cache->token_fd); 572 if (ret > 0) { 573 WRITE_ONCE(cache->res[feat_id], FEAT_SUPPORTED); 574 } else if (ret == 0) { 575 WRITE_ONCE(cache->res[feat_id], FEAT_MISSING); 576 } else { 577 pr_warn("Detection of kernel %s support failed: %d\n", feat->desc, ret); 578 WRITE_ONCE(cache->res[feat_id], FEAT_MISSING); 579 } 580 } 581 582 return READ_ONCE(cache->res[feat_id]) == FEAT_SUPPORTED; 583 } 584