1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2011-2015 PLUMgrid, http://plumgrid.com 3 * Copyright (c) 2016 Facebook 4 */ 5 #include <linux/kernel.h> 6 #include <linux/types.h> 7 #include <linux/slab.h> 8 #include <linux/bpf.h> 9 #include <linux/bpf_verifier.h> 10 #include <linux/bpf_perf_event.h> 11 #include <linux/btf.h> 12 #include <linux/filter.h> 13 #include <linux/uaccess.h> 14 #include <linux/ctype.h> 15 #include <linux/kprobes.h> 16 #include <linux/spinlock.h> 17 #include <linux/syscalls.h> 18 #include <linux/error-injection.h> 19 #include <linux/btf_ids.h> 20 #include <linux/bpf_lsm.h> 21 #include <linux/fprobe.h> 22 #include <linux/bsearch.h> 23 #include <linux/sort.h> 24 #include <linux/key.h> 25 #include <linux/verification.h> 26 #include <linux/namei.h> 27 28 #include <net/bpf_sk_storage.h> 29 30 #include <uapi/linux/bpf.h> 31 #include <uapi/linux/btf.h> 32 33 #include <asm/tlb.h> 34 35 #include "trace_probe.h" 36 #include "trace.h" 37 38 #define CREATE_TRACE_POINTS 39 #include "bpf_trace.h" 40 41 #define bpf_event_rcu_dereference(p) \ 42 rcu_dereference_protected(p, lockdep_is_held(&bpf_event_mutex)) 43 44 #define MAX_UPROBE_MULTI_CNT (1U << 20) 45 #define MAX_KPROBE_MULTI_CNT (1U << 20) 46 47 #ifdef CONFIG_MODULES 48 struct bpf_trace_module { 49 struct module *module; 50 struct list_head list; 51 }; 52 53 static LIST_HEAD(bpf_trace_modules); 54 static DEFINE_MUTEX(bpf_module_mutex); 55 56 static struct bpf_raw_event_map *bpf_get_raw_tracepoint_module(const char *name) 57 { 58 struct bpf_raw_event_map *btp, *ret = NULL; 59 struct bpf_trace_module *btm; 60 unsigned int i; 61 62 mutex_lock(&bpf_module_mutex); 63 list_for_each_entry(btm, &bpf_trace_modules, list) { 64 for (i = 0; i < btm->module->num_bpf_raw_events; ++i) { 65 btp = &btm->module->bpf_raw_events[i]; 66 if (!strcmp(btp->tp->name, name)) { 67 if (try_module_get(btm->module)) 68 ret = btp; 69 goto out; 70 } 71 } 72 } 73 out: 74 mutex_unlock(&bpf_module_mutex); 75 return ret; 76 } 77 #else 78 static struct bpf_raw_event_map *bpf_get_raw_tracepoint_module(const char *name) 79 { 80 return NULL; 81 } 82 #endif /* CONFIG_MODULES */ 83 84 u64 bpf_get_stackid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); 85 u64 bpf_get_stack(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); 86 87 static int bpf_btf_printf_prepare(struct btf_ptr *ptr, u32 btf_ptr_size, 88 u64 flags, const struct btf **btf, 89 s32 *btf_id); 90 static u64 bpf_kprobe_multi_cookie(struct bpf_run_ctx *ctx); 91 static u64 bpf_kprobe_multi_entry_ip(struct bpf_run_ctx *ctx); 92 93 static u64 bpf_uprobe_multi_cookie(struct bpf_run_ctx *ctx); 94 static u64 bpf_uprobe_multi_entry_ip(struct bpf_run_ctx *ctx); 95 96 /** 97 * trace_call_bpf - invoke BPF program 98 * @call: tracepoint event 99 * @ctx: opaque context pointer 100 * 101 * kprobe handlers execute BPF programs via this helper. 102 * Can be used from static tracepoints in the future. 103 * 104 * Return: BPF programs always return an integer which is interpreted by 105 * kprobe handler as: 106 * 0 - return from kprobe (event is filtered out) 107 * 1 - store kprobe event into ring buffer 108 * Other values are reserved and currently alias to 1 109 */ 110 unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx) 111 { 112 unsigned int ret; 113 114 cant_sleep(); 115 116 if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) { 117 /* 118 * since some bpf program is already running on this cpu, 119 * don't call into another bpf program (same or different) 120 * and don't send kprobe event into ring-buffer, 121 * so return zero here 122 */ 123 rcu_read_lock(); 124 bpf_prog_inc_misses_counters(rcu_dereference(call->prog_array)); 125 rcu_read_unlock(); 126 ret = 0; 127 goto out; 128 } 129 130 /* 131 * Instead of moving rcu_read_lock/rcu_dereference/rcu_read_unlock 132 * to all call sites, we did a bpf_prog_array_valid() there to check 133 * whether call->prog_array is empty or not, which is 134 * a heuristic to speed up execution. 135 * 136 * If bpf_prog_array_valid() fetched prog_array was 137 * non-NULL, we go into trace_call_bpf() and do the actual 138 * proper rcu_dereference() under RCU lock. 139 * If it turns out that prog_array is NULL then, we bail out. 140 * For the opposite, if the bpf_prog_array_valid() fetched pointer 141 * was NULL, you'll skip the prog_array with the risk of missing 142 * out of events when it was updated in between this and the 143 * rcu_dereference() which is accepted risk. 144 */ 145 rcu_read_lock(); 146 ret = bpf_prog_run_array(rcu_dereference(call->prog_array), 147 ctx, bpf_prog_run); 148 rcu_read_unlock(); 149 150 out: 151 __this_cpu_dec(bpf_prog_active); 152 153 return ret; 154 } 155 156 #ifdef CONFIG_BPF_KPROBE_OVERRIDE 157 BPF_CALL_2(bpf_override_return, struct pt_regs *, regs, unsigned long, rc) 158 { 159 regs_set_return_value(regs, rc); 160 override_function_with_return(regs); 161 return 0; 162 } 163 164 static const struct bpf_func_proto bpf_override_return_proto = { 165 .func = bpf_override_return, 166 .gpl_only = true, 167 .ret_type = RET_INTEGER, 168 .arg1_type = ARG_PTR_TO_CTX, 169 .arg2_type = ARG_ANYTHING, 170 }; 171 #endif 172 173 static __always_inline int 174 bpf_probe_read_user_common(void *dst, u32 size, const void __user *unsafe_ptr) 175 { 176 int ret; 177 178 ret = copy_from_user_nofault(dst, unsafe_ptr, size); 179 if (unlikely(ret < 0)) 180 memset(dst, 0, size); 181 return ret; 182 } 183 184 BPF_CALL_3(bpf_probe_read_user, void *, dst, u32, size, 185 const void __user *, unsafe_ptr) 186 { 187 return bpf_probe_read_user_common(dst, size, unsafe_ptr); 188 } 189 190 const struct bpf_func_proto bpf_probe_read_user_proto = { 191 .func = bpf_probe_read_user, 192 .gpl_only = true, 193 .ret_type = RET_INTEGER, 194 .arg1_type = ARG_PTR_TO_UNINIT_MEM, 195 .arg2_type = ARG_CONST_SIZE_OR_ZERO, 196 .arg3_type = ARG_ANYTHING, 197 }; 198 199 static __always_inline int 200 bpf_probe_read_user_str_common(void *dst, u32 size, 201 const void __user *unsafe_ptr) 202 { 203 int ret; 204 205 /* 206 * NB: We rely on strncpy_from_user() not copying junk past the NUL 207 * terminator into `dst`. 208 * 209 * strncpy_from_user() does long-sized strides in the fast path. If the 210 * strncpy does not mask out the bytes after the NUL in `unsafe_ptr`, 211 * then there could be junk after the NUL in `dst`. If user takes `dst` 212 * and keys a hash map with it, then semantically identical strings can 213 * occupy multiple entries in the map. 214 */ 215 ret = strncpy_from_user_nofault(dst, unsafe_ptr, size); 216 if (unlikely(ret < 0)) 217 memset(dst, 0, size); 218 return ret; 219 } 220 221 BPF_CALL_3(bpf_probe_read_user_str, void *, dst, u32, size, 222 const void __user *, unsafe_ptr) 223 { 224 return bpf_probe_read_user_str_common(dst, size, unsafe_ptr); 225 } 226 227 const struct bpf_func_proto bpf_probe_read_user_str_proto = { 228 .func = bpf_probe_read_user_str, 229 .gpl_only = true, 230 .ret_type = RET_INTEGER, 231 .arg1_type = ARG_PTR_TO_UNINIT_MEM, 232 .arg2_type = ARG_CONST_SIZE_OR_ZERO, 233 .arg3_type = ARG_ANYTHING, 234 }; 235 236 BPF_CALL_3(bpf_probe_read_kernel, void *, dst, u32, size, 237 const void *, unsafe_ptr) 238 { 239 return bpf_probe_read_kernel_common(dst, size, unsafe_ptr); 240 } 241 242 const struct bpf_func_proto bpf_probe_read_kernel_proto = { 243 .func = bpf_probe_read_kernel, 244 .gpl_only = true, 245 .ret_type = RET_INTEGER, 246 .arg1_type = ARG_PTR_TO_UNINIT_MEM, 247 .arg2_type = ARG_CONST_SIZE_OR_ZERO, 248 .arg3_type = ARG_ANYTHING, 249 }; 250 251 static __always_inline int 252 bpf_probe_read_kernel_str_common(void *dst, u32 size, const void *unsafe_ptr) 253 { 254 int ret; 255 256 /* 257 * The strncpy_from_kernel_nofault() call will likely not fill the 258 * entire buffer, but that's okay in this circumstance as we're probing 259 * arbitrary memory anyway similar to bpf_probe_read_*() and might 260 * as well probe the stack. Thus, memory is explicitly cleared 261 * only in error case, so that improper users ignoring return 262 * code altogether don't copy garbage; otherwise length of string 263 * is returned that can be used for bpf_perf_event_output() et al. 264 */ 265 ret = strncpy_from_kernel_nofault(dst, unsafe_ptr, size); 266 if (unlikely(ret < 0)) 267 memset(dst, 0, size); 268 return ret; 269 } 270 271 BPF_CALL_3(bpf_probe_read_kernel_str, void *, dst, u32, size, 272 const void *, unsafe_ptr) 273 { 274 return bpf_probe_read_kernel_str_common(dst, size, unsafe_ptr); 275 } 276 277 const struct bpf_func_proto bpf_probe_read_kernel_str_proto = { 278 .func = bpf_probe_read_kernel_str, 279 .gpl_only = true, 280 .ret_type = RET_INTEGER, 281 .arg1_type = ARG_PTR_TO_UNINIT_MEM, 282 .arg2_type = ARG_CONST_SIZE_OR_ZERO, 283 .arg3_type = ARG_ANYTHING, 284 }; 285 286 #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE 287 BPF_CALL_3(bpf_probe_read_compat, void *, dst, u32, size, 288 const void *, unsafe_ptr) 289 { 290 if ((unsigned long)unsafe_ptr < TASK_SIZE) { 291 return bpf_probe_read_user_common(dst, size, 292 (__force void __user *)unsafe_ptr); 293 } 294 return bpf_probe_read_kernel_common(dst, size, unsafe_ptr); 295 } 296 297 static const struct bpf_func_proto bpf_probe_read_compat_proto = { 298 .func = bpf_probe_read_compat, 299 .gpl_only = true, 300 .ret_type = RET_INTEGER, 301 .arg1_type = ARG_PTR_TO_UNINIT_MEM, 302 .arg2_type = ARG_CONST_SIZE_OR_ZERO, 303 .arg3_type = ARG_ANYTHING, 304 }; 305 306 BPF_CALL_3(bpf_probe_read_compat_str, void *, dst, u32, size, 307 const void *, unsafe_ptr) 308 { 309 if ((unsigned long)unsafe_ptr < TASK_SIZE) { 310 return bpf_probe_read_user_str_common(dst, size, 311 (__force void __user *)unsafe_ptr); 312 } 313 return bpf_probe_read_kernel_str_common(dst, size, unsafe_ptr); 314 } 315 316 static const struct bpf_func_proto bpf_probe_read_compat_str_proto = { 317 .func = bpf_probe_read_compat_str, 318 .gpl_only = true, 319 .ret_type = RET_INTEGER, 320 .arg1_type = ARG_PTR_TO_UNINIT_MEM, 321 .arg2_type = ARG_CONST_SIZE_OR_ZERO, 322 .arg3_type = ARG_ANYTHING, 323 }; 324 #endif /* CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE */ 325 326 BPF_CALL_3(bpf_probe_write_user, void __user *, unsafe_ptr, const void *, src, 327 u32, size) 328 { 329 /* 330 * Ensure we're in user context which is safe for the helper to 331 * run. This helper has no business in a kthread. 332 * 333 * access_ok() should prevent writing to non-user memory, but in 334 * some situations (nommu, temporary switch, etc) access_ok() does 335 * not provide enough validation, hence the check on KERNEL_DS. 336 * 337 * nmi_uaccess_okay() ensures the probe is not run in an interim 338 * state, when the task or mm are switched. This is specifically 339 * required to prevent the use of temporary mm. 340 */ 341 342 if (unlikely(in_interrupt() || 343 current->flags & (PF_KTHREAD | PF_EXITING))) 344 return -EPERM; 345 if (unlikely(!nmi_uaccess_okay())) 346 return -EPERM; 347 348 return copy_to_user_nofault(unsafe_ptr, src, size); 349 } 350 351 static const struct bpf_func_proto bpf_probe_write_user_proto = { 352 .func = bpf_probe_write_user, 353 .gpl_only = true, 354 .ret_type = RET_INTEGER, 355 .arg1_type = ARG_ANYTHING, 356 .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, 357 .arg3_type = ARG_CONST_SIZE, 358 }; 359 360 static const struct bpf_func_proto *bpf_get_probe_write_proto(void) 361 { 362 if (!capable(CAP_SYS_ADMIN)) 363 return NULL; 364 365 pr_warn_ratelimited("%s[%d] is installing a program with bpf_probe_write_user helper that may corrupt user memory!", 366 current->comm, task_pid_nr(current)); 367 368 return &bpf_probe_write_user_proto; 369 } 370 371 #define MAX_TRACE_PRINTK_VARARGS 3 372 #define BPF_TRACE_PRINTK_SIZE 1024 373 374 BPF_CALL_5(bpf_trace_printk, char *, fmt, u32, fmt_size, u64, arg1, 375 u64, arg2, u64, arg3) 376 { 377 u64 args[MAX_TRACE_PRINTK_VARARGS] = { arg1, arg2, arg3 }; 378 struct bpf_bprintf_data data = { 379 .get_bin_args = true, 380 .get_buf = true, 381 }; 382 int ret; 383 384 ret = bpf_bprintf_prepare(fmt, fmt_size, args, 385 MAX_TRACE_PRINTK_VARARGS, &data); 386 if (ret < 0) 387 return ret; 388 389 ret = bstr_printf(data.buf, MAX_BPRINTF_BUF, fmt, data.bin_args); 390 391 trace_bpf_trace_printk(data.buf); 392 393 bpf_bprintf_cleanup(&data); 394 395 return ret; 396 } 397 398 static const struct bpf_func_proto bpf_trace_printk_proto = { 399 .func = bpf_trace_printk, 400 .gpl_only = true, 401 .ret_type = RET_INTEGER, 402 .arg1_type = ARG_PTR_TO_MEM | MEM_RDONLY, 403 .arg2_type = ARG_CONST_SIZE, 404 }; 405 406 static void __set_printk_clr_event(void) 407 { 408 /* 409 * This program might be calling bpf_trace_printk, 410 * so enable the associated bpf_trace/bpf_trace_printk event. 411 * Repeat this each time as it is possible a user has 412 * disabled bpf_trace_printk events. By loading a program 413 * calling bpf_trace_printk() however the user has expressed 414 * the intent to see such events. 415 */ 416 if (trace_set_clr_event("bpf_trace", "bpf_trace_printk", 1)) 417 pr_warn_ratelimited("could not enable bpf_trace_printk events"); 418 } 419 420 const struct bpf_func_proto *bpf_get_trace_printk_proto(void) 421 { 422 __set_printk_clr_event(); 423 return &bpf_trace_printk_proto; 424 } 425 426 BPF_CALL_4(bpf_trace_vprintk, char *, fmt, u32, fmt_size, const void *, args, 427 u32, data_len) 428 { 429 struct bpf_bprintf_data data = { 430 .get_bin_args = true, 431 .get_buf = true, 432 }; 433 int ret, num_args; 434 435 if (data_len & 7 || data_len > MAX_BPRINTF_VARARGS * 8 || 436 (data_len && !args)) 437 return -EINVAL; 438 num_args = data_len / 8; 439 440 ret = bpf_bprintf_prepare(fmt, fmt_size, args, num_args, &data); 441 if (ret < 0) 442 return ret; 443 444 ret = bstr_printf(data.buf, MAX_BPRINTF_BUF, fmt, data.bin_args); 445 446 trace_bpf_trace_printk(data.buf); 447 448 bpf_bprintf_cleanup(&data); 449 450 return ret; 451 } 452 453 static const struct bpf_func_proto bpf_trace_vprintk_proto = { 454 .func = bpf_trace_vprintk, 455 .gpl_only = true, 456 .ret_type = RET_INTEGER, 457 .arg1_type = ARG_PTR_TO_MEM | MEM_RDONLY, 458 .arg2_type = ARG_CONST_SIZE, 459 .arg3_type = ARG_PTR_TO_MEM | PTR_MAYBE_NULL | MEM_RDONLY, 460 .arg4_type = ARG_CONST_SIZE_OR_ZERO, 461 }; 462 463 const struct bpf_func_proto *bpf_get_trace_vprintk_proto(void) 464 { 465 __set_printk_clr_event(); 466 return &bpf_trace_vprintk_proto; 467 } 468 469 BPF_CALL_5(bpf_seq_printf, struct seq_file *, m, char *, fmt, u32, fmt_size, 470 const void *, args, u32, data_len) 471 { 472 struct bpf_bprintf_data data = { 473 .get_bin_args = true, 474 }; 475 int err, num_args; 476 477 if (data_len & 7 || data_len > MAX_BPRINTF_VARARGS * 8 || 478 (data_len && !args)) 479 return -EINVAL; 480 num_args = data_len / 8; 481 482 err = bpf_bprintf_prepare(fmt, fmt_size, args, num_args, &data); 483 if (err < 0) 484 return err; 485 486 seq_bprintf(m, fmt, data.bin_args); 487 488 bpf_bprintf_cleanup(&data); 489 490 return seq_has_overflowed(m) ? -EOVERFLOW : 0; 491 } 492 493 BTF_ID_LIST_SINGLE(btf_seq_file_ids, struct, seq_file) 494 495 static const struct bpf_func_proto bpf_seq_printf_proto = { 496 .func = bpf_seq_printf, 497 .gpl_only = true, 498 .ret_type = RET_INTEGER, 499 .arg1_type = ARG_PTR_TO_BTF_ID, 500 .arg1_btf_id = &btf_seq_file_ids[0], 501 .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, 502 .arg3_type = ARG_CONST_SIZE, 503 .arg4_type = ARG_PTR_TO_MEM | PTR_MAYBE_NULL | MEM_RDONLY, 504 .arg5_type = ARG_CONST_SIZE_OR_ZERO, 505 }; 506 507 BPF_CALL_3(bpf_seq_write, struct seq_file *, m, const void *, data, u32, len) 508 { 509 return seq_write(m, data, len) ? -EOVERFLOW : 0; 510 } 511 512 static const struct bpf_func_proto bpf_seq_write_proto = { 513 .func = bpf_seq_write, 514 .gpl_only = true, 515 .ret_type = RET_INTEGER, 516 .arg1_type = ARG_PTR_TO_BTF_ID, 517 .arg1_btf_id = &btf_seq_file_ids[0], 518 .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, 519 .arg3_type = ARG_CONST_SIZE_OR_ZERO, 520 }; 521 522 BPF_CALL_4(bpf_seq_printf_btf, struct seq_file *, m, struct btf_ptr *, ptr, 523 u32, btf_ptr_size, u64, flags) 524 { 525 const struct btf *btf; 526 s32 btf_id; 527 int ret; 528 529 ret = bpf_btf_printf_prepare(ptr, btf_ptr_size, flags, &btf, &btf_id); 530 if (ret) 531 return ret; 532 533 return btf_type_seq_show_flags(btf, btf_id, ptr->ptr, m, flags); 534 } 535 536 static const struct bpf_func_proto bpf_seq_printf_btf_proto = { 537 .func = bpf_seq_printf_btf, 538 .gpl_only = true, 539 .ret_type = RET_INTEGER, 540 .arg1_type = ARG_PTR_TO_BTF_ID, 541 .arg1_btf_id = &btf_seq_file_ids[0], 542 .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, 543 .arg3_type = ARG_CONST_SIZE_OR_ZERO, 544 .arg4_type = ARG_ANYTHING, 545 }; 546 547 static __always_inline int 548 get_map_perf_counter(struct bpf_map *map, u64 flags, 549 u64 *value, u64 *enabled, u64 *running) 550 { 551 struct bpf_array *array = container_of(map, struct bpf_array, map); 552 unsigned int cpu = smp_processor_id(); 553 u64 index = flags & BPF_F_INDEX_MASK; 554 struct bpf_event_entry *ee; 555 556 if (unlikely(flags & ~(BPF_F_INDEX_MASK))) 557 return -EINVAL; 558 if (index == BPF_F_CURRENT_CPU) 559 index = cpu; 560 if (unlikely(index >= array->map.max_entries)) 561 return -E2BIG; 562 563 ee = READ_ONCE(array->ptrs[index]); 564 if (!ee) 565 return -ENOENT; 566 567 return perf_event_read_local(ee->event, value, enabled, running); 568 } 569 570 BPF_CALL_2(bpf_perf_event_read, struct bpf_map *, map, u64, flags) 571 { 572 u64 value = 0; 573 int err; 574 575 err = get_map_perf_counter(map, flags, &value, NULL, NULL); 576 /* 577 * this api is ugly since we miss [-22..-2] range of valid 578 * counter values, but that's uapi 579 */ 580 if (err) 581 return err; 582 return value; 583 } 584 585 static const struct bpf_func_proto bpf_perf_event_read_proto = { 586 .func = bpf_perf_event_read, 587 .gpl_only = true, 588 .ret_type = RET_INTEGER, 589 .arg1_type = ARG_CONST_MAP_PTR, 590 .arg2_type = ARG_ANYTHING, 591 }; 592 593 BPF_CALL_4(bpf_perf_event_read_value, struct bpf_map *, map, u64, flags, 594 struct bpf_perf_event_value *, buf, u32, size) 595 { 596 int err = -EINVAL; 597 598 if (unlikely(size != sizeof(struct bpf_perf_event_value))) 599 goto clear; 600 err = get_map_perf_counter(map, flags, &buf->counter, &buf->enabled, 601 &buf->running); 602 if (unlikely(err)) 603 goto clear; 604 return 0; 605 clear: 606 memset(buf, 0, size); 607 return err; 608 } 609 610 static const struct bpf_func_proto bpf_perf_event_read_value_proto = { 611 .func = bpf_perf_event_read_value, 612 .gpl_only = true, 613 .ret_type = RET_INTEGER, 614 .arg1_type = ARG_CONST_MAP_PTR, 615 .arg2_type = ARG_ANYTHING, 616 .arg3_type = ARG_PTR_TO_UNINIT_MEM, 617 .arg4_type = ARG_CONST_SIZE, 618 }; 619 620 static __always_inline u64 621 __bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map, 622 u64 flags, struct perf_sample_data *sd) 623 { 624 struct bpf_array *array = container_of(map, struct bpf_array, map); 625 unsigned int cpu = smp_processor_id(); 626 u64 index = flags & BPF_F_INDEX_MASK; 627 struct bpf_event_entry *ee; 628 struct perf_event *event; 629 630 if (index == BPF_F_CURRENT_CPU) 631 index = cpu; 632 if (unlikely(index >= array->map.max_entries)) 633 return -E2BIG; 634 635 ee = READ_ONCE(array->ptrs[index]); 636 if (!ee) 637 return -ENOENT; 638 639 event = ee->event; 640 if (unlikely(event->attr.type != PERF_TYPE_SOFTWARE || 641 event->attr.config != PERF_COUNT_SW_BPF_OUTPUT)) 642 return -EINVAL; 643 644 if (unlikely(event->oncpu != cpu)) 645 return -EOPNOTSUPP; 646 647 return perf_event_output(event, sd, regs); 648 } 649 650 /* 651 * Support executing tracepoints in normal, irq, and nmi context that each call 652 * bpf_perf_event_output 653 */ 654 struct bpf_trace_sample_data { 655 struct perf_sample_data sds[3]; 656 }; 657 658 static DEFINE_PER_CPU(struct bpf_trace_sample_data, bpf_trace_sds); 659 static DEFINE_PER_CPU(int, bpf_trace_nest_level); 660 BPF_CALL_5(bpf_perf_event_output, struct pt_regs *, regs, struct bpf_map *, map, 661 u64, flags, void *, data, u64, size) 662 { 663 struct bpf_trace_sample_data *sds; 664 struct perf_raw_record raw = { 665 .frag = { 666 .size = size, 667 .data = data, 668 }, 669 }; 670 struct perf_sample_data *sd; 671 int nest_level, err; 672 673 preempt_disable(); 674 sds = this_cpu_ptr(&bpf_trace_sds); 675 nest_level = this_cpu_inc_return(bpf_trace_nest_level); 676 677 if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(sds->sds))) { 678 err = -EBUSY; 679 goto out; 680 } 681 682 sd = &sds->sds[nest_level - 1]; 683 684 if (unlikely(flags & ~(BPF_F_INDEX_MASK))) { 685 err = -EINVAL; 686 goto out; 687 } 688 689 perf_sample_data_init(sd, 0, 0); 690 perf_sample_save_raw_data(sd, &raw); 691 692 err = __bpf_perf_event_output(regs, map, flags, sd); 693 out: 694 this_cpu_dec(bpf_trace_nest_level); 695 preempt_enable(); 696 return err; 697 } 698 699 static const struct bpf_func_proto bpf_perf_event_output_proto = { 700 .func = bpf_perf_event_output, 701 .gpl_only = true, 702 .ret_type = RET_INTEGER, 703 .arg1_type = ARG_PTR_TO_CTX, 704 .arg2_type = ARG_CONST_MAP_PTR, 705 .arg3_type = ARG_ANYTHING, 706 .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY, 707 .arg5_type = ARG_CONST_SIZE_OR_ZERO, 708 }; 709 710 static DEFINE_PER_CPU(int, bpf_event_output_nest_level); 711 struct bpf_nested_pt_regs { 712 struct pt_regs regs[3]; 713 }; 714 static DEFINE_PER_CPU(struct bpf_nested_pt_regs, bpf_pt_regs); 715 static DEFINE_PER_CPU(struct bpf_trace_sample_data, bpf_misc_sds); 716 717 u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size, 718 void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy) 719 { 720 struct perf_raw_frag frag = { 721 .copy = ctx_copy, 722 .size = ctx_size, 723 .data = ctx, 724 }; 725 struct perf_raw_record raw = { 726 .frag = { 727 { 728 .next = ctx_size ? &frag : NULL, 729 }, 730 .size = meta_size, 731 .data = meta, 732 }, 733 }; 734 struct perf_sample_data *sd; 735 struct pt_regs *regs; 736 int nest_level; 737 u64 ret; 738 739 preempt_disable(); 740 nest_level = this_cpu_inc_return(bpf_event_output_nest_level); 741 742 if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(bpf_misc_sds.sds))) { 743 ret = -EBUSY; 744 goto out; 745 } 746 sd = this_cpu_ptr(&bpf_misc_sds.sds[nest_level - 1]); 747 regs = this_cpu_ptr(&bpf_pt_regs.regs[nest_level - 1]); 748 749 perf_fetch_caller_regs(regs); 750 perf_sample_data_init(sd, 0, 0); 751 perf_sample_save_raw_data(sd, &raw); 752 753 ret = __bpf_perf_event_output(regs, map, flags, sd); 754 out: 755 this_cpu_dec(bpf_event_output_nest_level); 756 preempt_enable(); 757 return ret; 758 } 759 760 BPF_CALL_0(bpf_get_current_task) 761 { 762 return (long) current; 763 } 764 765 const struct bpf_func_proto bpf_get_current_task_proto = { 766 .func = bpf_get_current_task, 767 .gpl_only = true, 768 .ret_type = RET_INTEGER, 769 }; 770 771 BPF_CALL_0(bpf_get_current_task_btf) 772 { 773 return (unsigned long) current; 774 } 775 776 const struct bpf_func_proto bpf_get_current_task_btf_proto = { 777 .func = bpf_get_current_task_btf, 778 .gpl_only = true, 779 .ret_type = RET_PTR_TO_BTF_ID_TRUSTED, 780 .ret_btf_id = &btf_tracing_ids[BTF_TRACING_TYPE_TASK], 781 }; 782 783 BPF_CALL_1(bpf_task_pt_regs, struct task_struct *, task) 784 { 785 return (unsigned long) task_pt_regs(task); 786 } 787 788 BTF_ID_LIST(bpf_task_pt_regs_ids) 789 BTF_ID(struct, pt_regs) 790 791 const struct bpf_func_proto bpf_task_pt_regs_proto = { 792 .func = bpf_task_pt_regs, 793 .gpl_only = true, 794 .arg1_type = ARG_PTR_TO_BTF_ID, 795 .arg1_btf_id = &btf_tracing_ids[BTF_TRACING_TYPE_TASK], 796 .ret_type = RET_PTR_TO_BTF_ID, 797 .ret_btf_id = &bpf_task_pt_regs_ids[0], 798 }; 799 800 struct send_signal_irq_work { 801 struct irq_work irq_work; 802 struct task_struct *task; 803 u32 sig; 804 enum pid_type type; 805 bool has_siginfo; 806 struct kernel_siginfo info; 807 }; 808 809 static DEFINE_PER_CPU(struct send_signal_irq_work, send_signal_work); 810 811 static void do_bpf_send_signal(struct irq_work *entry) 812 { 813 struct send_signal_irq_work *work; 814 struct kernel_siginfo *siginfo; 815 816 work = container_of(entry, struct send_signal_irq_work, irq_work); 817 siginfo = work->has_siginfo ? &work->info : SEND_SIG_PRIV; 818 819 group_send_sig_info(work->sig, siginfo, work->task, work->type); 820 put_task_struct(work->task); 821 } 822 823 static int bpf_send_signal_common(u32 sig, enum pid_type type, struct task_struct *task, u64 value) 824 { 825 struct send_signal_irq_work *work = NULL; 826 struct kernel_siginfo info; 827 struct kernel_siginfo *siginfo; 828 829 if (!task) { 830 task = current; 831 siginfo = SEND_SIG_PRIV; 832 } else { 833 clear_siginfo(&info); 834 info.si_signo = sig; 835 info.si_errno = 0; 836 info.si_code = SI_KERNEL; 837 info.si_pid = 0; 838 info.si_uid = 0; 839 info.si_value.sival_ptr = (void *)(unsigned long)value; 840 siginfo = &info; 841 } 842 843 /* Similar to bpf_probe_write_user, task needs to be 844 * in a sound condition and kernel memory access be 845 * permitted in order to send signal to the current 846 * task. 847 */ 848 if (unlikely(task->flags & (PF_KTHREAD | PF_EXITING))) 849 return -EPERM; 850 if (unlikely(!nmi_uaccess_okay())) 851 return -EPERM; 852 /* Task should not be pid=1 to avoid kernel panic. */ 853 if (unlikely(is_global_init(task))) 854 return -EPERM; 855 856 if (irqs_disabled()) { 857 /* Do an early check on signal validity. Otherwise, 858 * the error is lost in deferred irq_work. 859 */ 860 if (unlikely(!valid_signal(sig))) 861 return -EINVAL; 862 863 work = this_cpu_ptr(&send_signal_work); 864 if (irq_work_is_busy(&work->irq_work)) 865 return -EBUSY; 866 867 /* Add the current task, which is the target of sending signal, 868 * to the irq_work. The current task may change when queued 869 * irq works get executed. 870 */ 871 work->task = get_task_struct(task); 872 work->has_siginfo = siginfo == &info; 873 if (work->has_siginfo) 874 copy_siginfo(&work->info, &info); 875 work->sig = sig; 876 work->type = type; 877 irq_work_queue(&work->irq_work); 878 return 0; 879 } 880 881 return group_send_sig_info(sig, siginfo, task, type); 882 } 883 884 BPF_CALL_1(bpf_send_signal, u32, sig) 885 { 886 return bpf_send_signal_common(sig, PIDTYPE_TGID, NULL, 0); 887 } 888 889 static const struct bpf_func_proto bpf_send_signal_proto = { 890 .func = bpf_send_signal, 891 .gpl_only = false, 892 .ret_type = RET_INTEGER, 893 .arg1_type = ARG_ANYTHING, 894 }; 895 896 BPF_CALL_1(bpf_send_signal_thread, u32, sig) 897 { 898 return bpf_send_signal_common(sig, PIDTYPE_PID, NULL, 0); 899 } 900 901 static const struct bpf_func_proto bpf_send_signal_thread_proto = { 902 .func = bpf_send_signal_thread, 903 .gpl_only = false, 904 .ret_type = RET_INTEGER, 905 .arg1_type = ARG_ANYTHING, 906 }; 907 908 BPF_CALL_3(bpf_d_path, struct path *, path, char *, buf, u32, sz) 909 { 910 struct path copy; 911 long len; 912 char *p; 913 914 if (!sz) 915 return 0; 916 917 /* 918 * The path pointer is verified as trusted and safe to use, 919 * but let's double check it's valid anyway to workaround 920 * potentially broken verifier. 921 */ 922 len = copy_from_kernel_nofault(©, path, sizeof(*path)); 923 if (len < 0) 924 return len; 925 926 p = d_path(©, buf, sz); 927 if (IS_ERR(p)) { 928 len = PTR_ERR(p); 929 } else { 930 len = buf + sz - p; 931 memmove(buf, p, len); 932 } 933 934 return len; 935 } 936 937 BTF_SET_START(btf_allowlist_d_path) 938 #ifdef CONFIG_SECURITY 939 BTF_ID(func, security_file_permission) 940 BTF_ID(func, security_inode_getattr) 941 BTF_ID(func, security_file_open) 942 #endif 943 #ifdef CONFIG_SECURITY_PATH 944 BTF_ID(func, security_path_truncate) 945 #endif 946 BTF_ID(func, vfs_truncate) 947 BTF_ID(func, vfs_fallocate) 948 BTF_ID(func, dentry_open) 949 BTF_ID(func, vfs_getattr) 950 BTF_ID(func, filp_close) 951 BTF_SET_END(btf_allowlist_d_path) 952 953 static bool bpf_d_path_allowed(const struct bpf_prog *prog) 954 { 955 if (prog->type == BPF_PROG_TYPE_TRACING && 956 prog->expected_attach_type == BPF_TRACE_ITER) 957 return true; 958 959 if (prog->type == BPF_PROG_TYPE_LSM) 960 return bpf_lsm_is_sleepable_hook(prog->aux->attach_btf_id); 961 962 return btf_id_set_contains(&btf_allowlist_d_path, 963 prog->aux->attach_btf_id); 964 } 965 966 BTF_ID_LIST_SINGLE(bpf_d_path_btf_ids, struct, path) 967 968 static const struct bpf_func_proto bpf_d_path_proto = { 969 .func = bpf_d_path, 970 .gpl_only = false, 971 .ret_type = RET_INTEGER, 972 .arg1_type = ARG_PTR_TO_BTF_ID, 973 .arg1_btf_id = &bpf_d_path_btf_ids[0], 974 .arg2_type = ARG_PTR_TO_MEM, 975 .arg3_type = ARG_CONST_SIZE_OR_ZERO, 976 .allowed = bpf_d_path_allowed, 977 }; 978 979 #define BTF_F_ALL (BTF_F_COMPACT | BTF_F_NONAME | \ 980 BTF_F_PTR_RAW | BTF_F_ZERO) 981 982 static int bpf_btf_printf_prepare(struct btf_ptr *ptr, u32 btf_ptr_size, 983 u64 flags, const struct btf **btf, 984 s32 *btf_id) 985 { 986 const struct btf_type *t; 987 988 if (unlikely(flags & ~(BTF_F_ALL))) 989 return -EINVAL; 990 991 if (btf_ptr_size != sizeof(struct btf_ptr)) 992 return -EINVAL; 993 994 *btf = bpf_get_btf_vmlinux(); 995 996 if (IS_ERR_OR_NULL(*btf)) 997 return IS_ERR(*btf) ? PTR_ERR(*btf) : -EINVAL; 998 999 if (ptr->type_id > 0) 1000 *btf_id = ptr->type_id; 1001 else 1002 return -EINVAL; 1003 1004 if (*btf_id > 0) 1005 t = btf_type_by_id(*btf, *btf_id); 1006 if (*btf_id <= 0 || !t) 1007 return -ENOENT; 1008 1009 return 0; 1010 } 1011 1012 BPF_CALL_5(bpf_snprintf_btf, char *, str, u32, str_size, struct btf_ptr *, ptr, 1013 u32, btf_ptr_size, u64, flags) 1014 { 1015 const struct btf *btf; 1016 s32 btf_id; 1017 int ret; 1018 1019 ret = bpf_btf_printf_prepare(ptr, btf_ptr_size, flags, &btf, &btf_id); 1020 if (ret) 1021 return ret; 1022 1023 return btf_type_snprintf_show(btf, btf_id, ptr->ptr, str, str_size, 1024 flags); 1025 } 1026 1027 const struct bpf_func_proto bpf_snprintf_btf_proto = { 1028 .func = bpf_snprintf_btf, 1029 .gpl_only = false, 1030 .ret_type = RET_INTEGER, 1031 .arg1_type = ARG_PTR_TO_MEM, 1032 .arg2_type = ARG_CONST_SIZE, 1033 .arg3_type = ARG_PTR_TO_MEM | MEM_RDONLY, 1034 .arg4_type = ARG_CONST_SIZE, 1035 .arg5_type = ARG_ANYTHING, 1036 }; 1037 1038 BPF_CALL_1(bpf_get_func_ip_tracing, void *, ctx) 1039 { 1040 /* This helper call is inlined by verifier. */ 1041 return ((u64 *)ctx)[-2]; 1042 } 1043 1044 static const struct bpf_func_proto bpf_get_func_ip_proto_tracing = { 1045 .func = bpf_get_func_ip_tracing, 1046 .gpl_only = true, 1047 .ret_type = RET_INTEGER, 1048 .arg1_type = ARG_PTR_TO_CTX, 1049 }; 1050 1051 #ifdef CONFIG_X86_KERNEL_IBT 1052 static unsigned long get_entry_ip(unsigned long fentry_ip) 1053 { 1054 u32 instr; 1055 1056 /* We want to be extra safe in case entry ip is on the page edge, 1057 * but otherwise we need to avoid get_kernel_nofault()'s overhead. 1058 */ 1059 if ((fentry_ip & ~PAGE_MASK) < ENDBR_INSN_SIZE) { 1060 if (get_kernel_nofault(instr, (u32 *)(fentry_ip - ENDBR_INSN_SIZE))) 1061 return fentry_ip; 1062 } else { 1063 instr = *(u32 *)(fentry_ip - ENDBR_INSN_SIZE); 1064 } 1065 if (is_endbr(instr)) 1066 fentry_ip -= ENDBR_INSN_SIZE; 1067 return fentry_ip; 1068 } 1069 #else 1070 #define get_entry_ip(fentry_ip) fentry_ip 1071 #endif 1072 1073 BPF_CALL_1(bpf_get_func_ip_kprobe, struct pt_regs *, regs) 1074 { 1075 struct bpf_trace_run_ctx *run_ctx __maybe_unused; 1076 struct kprobe *kp; 1077 1078 #ifdef CONFIG_UPROBES 1079 run_ctx = container_of(current->bpf_ctx, struct bpf_trace_run_ctx, run_ctx); 1080 if (run_ctx->is_uprobe) 1081 return ((struct uprobe_dispatch_data *)current->utask->vaddr)->bp_addr; 1082 #endif 1083 1084 kp = kprobe_running(); 1085 1086 if (!kp || !(kp->flags & KPROBE_FLAG_ON_FUNC_ENTRY)) 1087 return 0; 1088 1089 return get_entry_ip((uintptr_t)kp->addr); 1090 } 1091 1092 static const struct bpf_func_proto bpf_get_func_ip_proto_kprobe = { 1093 .func = bpf_get_func_ip_kprobe, 1094 .gpl_only = true, 1095 .ret_type = RET_INTEGER, 1096 .arg1_type = ARG_PTR_TO_CTX, 1097 }; 1098 1099 BPF_CALL_1(bpf_get_func_ip_kprobe_multi, struct pt_regs *, regs) 1100 { 1101 return bpf_kprobe_multi_entry_ip(current->bpf_ctx); 1102 } 1103 1104 static const struct bpf_func_proto bpf_get_func_ip_proto_kprobe_multi = { 1105 .func = bpf_get_func_ip_kprobe_multi, 1106 .gpl_only = false, 1107 .ret_type = RET_INTEGER, 1108 .arg1_type = ARG_PTR_TO_CTX, 1109 }; 1110 1111 BPF_CALL_1(bpf_get_attach_cookie_kprobe_multi, struct pt_regs *, regs) 1112 { 1113 return bpf_kprobe_multi_cookie(current->bpf_ctx); 1114 } 1115 1116 static const struct bpf_func_proto bpf_get_attach_cookie_proto_kmulti = { 1117 .func = bpf_get_attach_cookie_kprobe_multi, 1118 .gpl_only = false, 1119 .ret_type = RET_INTEGER, 1120 .arg1_type = ARG_PTR_TO_CTX, 1121 }; 1122 1123 BPF_CALL_1(bpf_get_func_ip_uprobe_multi, struct pt_regs *, regs) 1124 { 1125 return bpf_uprobe_multi_entry_ip(current->bpf_ctx); 1126 } 1127 1128 static const struct bpf_func_proto bpf_get_func_ip_proto_uprobe_multi = { 1129 .func = bpf_get_func_ip_uprobe_multi, 1130 .gpl_only = false, 1131 .ret_type = RET_INTEGER, 1132 .arg1_type = ARG_PTR_TO_CTX, 1133 }; 1134 1135 BPF_CALL_1(bpf_get_attach_cookie_uprobe_multi, struct pt_regs *, regs) 1136 { 1137 return bpf_uprobe_multi_cookie(current->bpf_ctx); 1138 } 1139 1140 static const struct bpf_func_proto bpf_get_attach_cookie_proto_umulti = { 1141 .func = bpf_get_attach_cookie_uprobe_multi, 1142 .gpl_only = false, 1143 .ret_type = RET_INTEGER, 1144 .arg1_type = ARG_PTR_TO_CTX, 1145 }; 1146 1147 BPF_CALL_1(bpf_get_attach_cookie_trace, void *, ctx) 1148 { 1149 struct bpf_trace_run_ctx *run_ctx; 1150 1151 run_ctx = container_of(current->bpf_ctx, struct bpf_trace_run_ctx, run_ctx); 1152 return run_ctx->bpf_cookie; 1153 } 1154 1155 static const struct bpf_func_proto bpf_get_attach_cookie_proto_trace = { 1156 .func = bpf_get_attach_cookie_trace, 1157 .gpl_only = false, 1158 .ret_type = RET_INTEGER, 1159 .arg1_type = ARG_PTR_TO_CTX, 1160 }; 1161 1162 BPF_CALL_1(bpf_get_attach_cookie_pe, struct bpf_perf_event_data_kern *, ctx) 1163 { 1164 return ctx->event->bpf_cookie; 1165 } 1166 1167 static const struct bpf_func_proto bpf_get_attach_cookie_proto_pe = { 1168 .func = bpf_get_attach_cookie_pe, 1169 .gpl_only = false, 1170 .ret_type = RET_INTEGER, 1171 .arg1_type = ARG_PTR_TO_CTX, 1172 }; 1173 1174 BPF_CALL_1(bpf_get_attach_cookie_tracing, void *, ctx) 1175 { 1176 struct bpf_trace_run_ctx *run_ctx; 1177 1178 run_ctx = container_of(current->bpf_ctx, struct bpf_trace_run_ctx, run_ctx); 1179 return run_ctx->bpf_cookie; 1180 } 1181 1182 static const struct bpf_func_proto bpf_get_attach_cookie_proto_tracing = { 1183 .func = bpf_get_attach_cookie_tracing, 1184 .gpl_only = false, 1185 .ret_type = RET_INTEGER, 1186 .arg1_type = ARG_PTR_TO_CTX, 1187 }; 1188 1189 BPF_CALL_3(bpf_get_branch_snapshot, void *, buf, u32, size, u64, flags) 1190 { 1191 static const u32 br_entry_size = sizeof(struct perf_branch_entry); 1192 u32 entry_cnt = size / br_entry_size; 1193 1194 entry_cnt = static_call(perf_snapshot_branch_stack)(buf, entry_cnt); 1195 1196 if (unlikely(flags)) 1197 return -EINVAL; 1198 1199 if (!entry_cnt) 1200 return -ENOENT; 1201 1202 return entry_cnt * br_entry_size; 1203 } 1204 1205 static const struct bpf_func_proto bpf_get_branch_snapshot_proto = { 1206 .func = bpf_get_branch_snapshot, 1207 .gpl_only = true, 1208 .ret_type = RET_INTEGER, 1209 .arg1_type = ARG_PTR_TO_UNINIT_MEM, 1210 .arg2_type = ARG_CONST_SIZE_OR_ZERO, 1211 }; 1212 1213 BPF_CALL_3(get_func_arg, void *, ctx, u32, n, u64 *, value) 1214 { 1215 /* This helper call is inlined by verifier. */ 1216 u64 nr_args = ((u64 *)ctx)[-1]; 1217 1218 if ((u64) n >= nr_args) 1219 return -EINVAL; 1220 *value = ((u64 *)ctx)[n]; 1221 return 0; 1222 } 1223 1224 static const struct bpf_func_proto bpf_get_func_arg_proto = { 1225 .func = get_func_arg, 1226 .ret_type = RET_INTEGER, 1227 .arg1_type = ARG_PTR_TO_CTX, 1228 .arg2_type = ARG_ANYTHING, 1229 .arg3_type = ARG_PTR_TO_FIXED_SIZE_MEM | MEM_UNINIT | MEM_WRITE | MEM_ALIGNED, 1230 .arg3_size = sizeof(u64), 1231 }; 1232 1233 BPF_CALL_2(get_func_ret, void *, ctx, u64 *, value) 1234 { 1235 /* This helper call is inlined by verifier. */ 1236 u64 nr_args = ((u64 *)ctx)[-1]; 1237 1238 *value = ((u64 *)ctx)[nr_args]; 1239 return 0; 1240 } 1241 1242 static const struct bpf_func_proto bpf_get_func_ret_proto = { 1243 .func = get_func_ret, 1244 .ret_type = RET_INTEGER, 1245 .arg1_type = ARG_PTR_TO_CTX, 1246 .arg2_type = ARG_PTR_TO_FIXED_SIZE_MEM | MEM_UNINIT | MEM_WRITE | MEM_ALIGNED, 1247 .arg2_size = sizeof(u64), 1248 }; 1249 1250 BPF_CALL_1(get_func_arg_cnt, void *, ctx) 1251 { 1252 /* This helper call is inlined by verifier. */ 1253 return ((u64 *)ctx)[-1]; 1254 } 1255 1256 static const struct bpf_func_proto bpf_get_func_arg_cnt_proto = { 1257 .func = get_func_arg_cnt, 1258 .ret_type = RET_INTEGER, 1259 .arg1_type = ARG_PTR_TO_CTX, 1260 }; 1261 1262 #ifdef CONFIG_KEYS 1263 __bpf_kfunc_start_defs(); 1264 1265 /** 1266 * bpf_lookup_user_key - lookup a key by its serial 1267 * @serial: key handle serial number 1268 * @flags: lookup-specific flags 1269 * 1270 * Search a key with a given *serial* and the provided *flags*. 1271 * If found, increment the reference count of the key by one, and 1272 * return it in the bpf_key structure. 1273 * 1274 * The bpf_key structure must be passed to bpf_key_put() when done 1275 * with it, so that the key reference count is decremented and the 1276 * bpf_key structure is freed. 1277 * 1278 * Permission checks are deferred to the time the key is used by 1279 * one of the available key-specific kfuncs. 1280 * 1281 * Set *flags* with KEY_LOOKUP_CREATE, to attempt creating a requested 1282 * special keyring (e.g. session keyring), if it doesn't yet exist. 1283 * Set *flags* with KEY_LOOKUP_PARTIAL, to lookup a key without waiting 1284 * for the key construction, and to retrieve uninstantiated keys (keys 1285 * without data attached to them). 1286 * 1287 * Return: a bpf_key pointer with a valid key pointer if the key is found, a 1288 * NULL pointer otherwise. 1289 */ 1290 __bpf_kfunc struct bpf_key *bpf_lookup_user_key(u32 serial, u64 flags) 1291 { 1292 key_ref_t key_ref; 1293 struct bpf_key *bkey; 1294 1295 if (flags & ~KEY_LOOKUP_ALL) 1296 return NULL; 1297 1298 /* 1299 * Permission check is deferred until the key is used, as the 1300 * intent of the caller is unknown here. 1301 */ 1302 key_ref = lookup_user_key(serial, flags, KEY_DEFER_PERM_CHECK); 1303 if (IS_ERR(key_ref)) 1304 return NULL; 1305 1306 bkey = kmalloc(sizeof(*bkey), GFP_KERNEL); 1307 if (!bkey) { 1308 key_put(key_ref_to_ptr(key_ref)); 1309 return NULL; 1310 } 1311 1312 bkey->key = key_ref_to_ptr(key_ref); 1313 bkey->has_ref = true; 1314 1315 return bkey; 1316 } 1317 1318 /** 1319 * bpf_lookup_system_key - lookup a key by a system-defined ID 1320 * @id: key ID 1321 * 1322 * Obtain a bpf_key structure with a key pointer set to the passed key ID. 1323 * The key pointer is marked as invalid, to prevent bpf_key_put() from 1324 * attempting to decrement the key reference count on that pointer. The key 1325 * pointer set in such way is currently understood only by 1326 * verify_pkcs7_signature(). 1327 * 1328 * Set *id* to one of the values defined in include/linux/verification.h: 1329 * 0 for the primary keyring (immutable keyring of system keys); 1330 * VERIFY_USE_SECONDARY_KEYRING for both the primary and secondary keyring 1331 * (where keys can be added only if they are vouched for by existing keys 1332 * in those keyrings); VERIFY_USE_PLATFORM_KEYRING for the platform 1333 * keyring (primarily used by the integrity subsystem to verify a kexec'ed 1334 * kerned image and, possibly, the initramfs signature). 1335 * 1336 * Return: a bpf_key pointer with an invalid key pointer set from the 1337 * pre-determined ID on success, a NULL pointer otherwise 1338 */ 1339 __bpf_kfunc struct bpf_key *bpf_lookup_system_key(u64 id) 1340 { 1341 struct bpf_key *bkey; 1342 1343 if (system_keyring_id_check(id) < 0) 1344 return NULL; 1345 1346 bkey = kmalloc(sizeof(*bkey), GFP_ATOMIC); 1347 if (!bkey) 1348 return NULL; 1349 1350 bkey->key = (struct key *)(unsigned long)id; 1351 bkey->has_ref = false; 1352 1353 return bkey; 1354 } 1355 1356 /** 1357 * bpf_key_put - decrement key reference count if key is valid and free bpf_key 1358 * @bkey: bpf_key structure 1359 * 1360 * Decrement the reference count of the key inside *bkey*, if the pointer 1361 * is valid, and free *bkey*. 1362 */ 1363 __bpf_kfunc void bpf_key_put(struct bpf_key *bkey) 1364 { 1365 if (bkey->has_ref) 1366 key_put(bkey->key); 1367 1368 kfree(bkey); 1369 } 1370 1371 #ifdef CONFIG_SYSTEM_DATA_VERIFICATION 1372 /** 1373 * bpf_verify_pkcs7_signature - verify a PKCS#7 signature 1374 * @data_p: data to verify 1375 * @sig_p: signature of the data 1376 * @trusted_keyring: keyring with keys trusted for signature verification 1377 * 1378 * Verify the PKCS#7 signature *sig_ptr* against the supplied *data_ptr* 1379 * with keys in a keyring referenced by *trusted_keyring*. 1380 * 1381 * Return: 0 on success, a negative value on error. 1382 */ 1383 __bpf_kfunc int bpf_verify_pkcs7_signature(struct bpf_dynptr *data_p, 1384 struct bpf_dynptr *sig_p, 1385 struct bpf_key *trusted_keyring) 1386 { 1387 struct bpf_dynptr_kern *data_ptr = (struct bpf_dynptr_kern *)data_p; 1388 struct bpf_dynptr_kern *sig_ptr = (struct bpf_dynptr_kern *)sig_p; 1389 const void *data, *sig; 1390 u32 data_len, sig_len; 1391 int ret; 1392 1393 if (trusted_keyring->has_ref) { 1394 /* 1395 * Do the permission check deferred in bpf_lookup_user_key(). 1396 * See bpf_lookup_user_key() for more details. 1397 * 1398 * A call to key_task_permission() here would be redundant, as 1399 * it is already done by keyring_search() called by 1400 * find_asymmetric_key(). 1401 */ 1402 ret = key_validate(trusted_keyring->key); 1403 if (ret < 0) 1404 return ret; 1405 } 1406 1407 data_len = __bpf_dynptr_size(data_ptr); 1408 data = __bpf_dynptr_data(data_ptr, data_len); 1409 sig_len = __bpf_dynptr_size(sig_ptr); 1410 sig = __bpf_dynptr_data(sig_ptr, sig_len); 1411 1412 return verify_pkcs7_signature(data, data_len, sig, sig_len, 1413 trusted_keyring->key, 1414 VERIFYING_UNSPECIFIED_SIGNATURE, NULL, 1415 NULL); 1416 } 1417 #endif /* CONFIG_SYSTEM_DATA_VERIFICATION */ 1418 1419 __bpf_kfunc_end_defs(); 1420 1421 BTF_KFUNCS_START(key_sig_kfunc_set) 1422 BTF_ID_FLAGS(func, bpf_lookup_user_key, KF_ACQUIRE | KF_RET_NULL | KF_SLEEPABLE) 1423 BTF_ID_FLAGS(func, bpf_lookup_system_key, KF_ACQUIRE | KF_RET_NULL) 1424 BTF_ID_FLAGS(func, bpf_key_put, KF_RELEASE) 1425 #ifdef CONFIG_SYSTEM_DATA_VERIFICATION 1426 BTF_ID_FLAGS(func, bpf_verify_pkcs7_signature, KF_SLEEPABLE) 1427 #endif 1428 BTF_KFUNCS_END(key_sig_kfunc_set) 1429 1430 static const struct btf_kfunc_id_set bpf_key_sig_kfunc_set = { 1431 .owner = THIS_MODULE, 1432 .set = &key_sig_kfunc_set, 1433 }; 1434 1435 static int __init bpf_key_sig_kfuncs_init(void) 1436 { 1437 return register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING, 1438 &bpf_key_sig_kfunc_set); 1439 } 1440 1441 late_initcall(bpf_key_sig_kfuncs_init); 1442 #endif /* CONFIG_KEYS */ 1443 1444 static const struct bpf_func_proto * 1445 bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 1446 { 1447 switch (func_id) { 1448 case BPF_FUNC_map_lookup_elem: 1449 return &bpf_map_lookup_elem_proto; 1450 case BPF_FUNC_map_update_elem: 1451 return &bpf_map_update_elem_proto; 1452 case BPF_FUNC_map_delete_elem: 1453 return &bpf_map_delete_elem_proto; 1454 case BPF_FUNC_map_push_elem: 1455 return &bpf_map_push_elem_proto; 1456 case BPF_FUNC_map_pop_elem: 1457 return &bpf_map_pop_elem_proto; 1458 case BPF_FUNC_map_peek_elem: 1459 return &bpf_map_peek_elem_proto; 1460 case BPF_FUNC_map_lookup_percpu_elem: 1461 return &bpf_map_lookup_percpu_elem_proto; 1462 case BPF_FUNC_ktime_get_ns: 1463 return &bpf_ktime_get_ns_proto; 1464 case BPF_FUNC_ktime_get_boot_ns: 1465 return &bpf_ktime_get_boot_ns_proto; 1466 case BPF_FUNC_tail_call: 1467 return &bpf_tail_call_proto; 1468 case BPF_FUNC_get_current_task: 1469 return &bpf_get_current_task_proto; 1470 case BPF_FUNC_get_current_task_btf: 1471 return &bpf_get_current_task_btf_proto; 1472 case BPF_FUNC_task_pt_regs: 1473 return &bpf_task_pt_regs_proto; 1474 case BPF_FUNC_get_current_uid_gid: 1475 return &bpf_get_current_uid_gid_proto; 1476 case BPF_FUNC_get_current_comm: 1477 return &bpf_get_current_comm_proto; 1478 case BPF_FUNC_trace_printk: 1479 return bpf_get_trace_printk_proto(); 1480 case BPF_FUNC_get_smp_processor_id: 1481 return &bpf_get_smp_processor_id_proto; 1482 case BPF_FUNC_get_numa_node_id: 1483 return &bpf_get_numa_node_id_proto; 1484 case BPF_FUNC_perf_event_read: 1485 return &bpf_perf_event_read_proto; 1486 case BPF_FUNC_get_prandom_u32: 1487 return &bpf_get_prandom_u32_proto; 1488 case BPF_FUNC_probe_write_user: 1489 return security_locked_down(LOCKDOWN_BPF_WRITE_USER) < 0 ? 1490 NULL : bpf_get_probe_write_proto(); 1491 case BPF_FUNC_probe_read_user: 1492 return &bpf_probe_read_user_proto; 1493 case BPF_FUNC_probe_read_kernel: 1494 return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ? 1495 NULL : &bpf_probe_read_kernel_proto; 1496 case BPF_FUNC_probe_read_user_str: 1497 return &bpf_probe_read_user_str_proto; 1498 case BPF_FUNC_probe_read_kernel_str: 1499 return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ? 1500 NULL : &bpf_probe_read_kernel_str_proto; 1501 #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE 1502 case BPF_FUNC_probe_read: 1503 return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ? 1504 NULL : &bpf_probe_read_compat_proto; 1505 case BPF_FUNC_probe_read_str: 1506 return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ? 1507 NULL : &bpf_probe_read_compat_str_proto; 1508 #endif 1509 #ifdef CONFIG_CGROUPS 1510 case BPF_FUNC_cgrp_storage_get: 1511 return &bpf_cgrp_storage_get_proto; 1512 case BPF_FUNC_cgrp_storage_delete: 1513 return &bpf_cgrp_storage_delete_proto; 1514 case BPF_FUNC_current_task_under_cgroup: 1515 return &bpf_current_task_under_cgroup_proto; 1516 #endif 1517 case BPF_FUNC_send_signal: 1518 return &bpf_send_signal_proto; 1519 case BPF_FUNC_send_signal_thread: 1520 return &bpf_send_signal_thread_proto; 1521 case BPF_FUNC_perf_event_read_value: 1522 return &bpf_perf_event_read_value_proto; 1523 case BPF_FUNC_ringbuf_output: 1524 return &bpf_ringbuf_output_proto; 1525 case BPF_FUNC_ringbuf_reserve: 1526 return &bpf_ringbuf_reserve_proto; 1527 case BPF_FUNC_ringbuf_submit: 1528 return &bpf_ringbuf_submit_proto; 1529 case BPF_FUNC_ringbuf_discard: 1530 return &bpf_ringbuf_discard_proto; 1531 case BPF_FUNC_ringbuf_query: 1532 return &bpf_ringbuf_query_proto; 1533 case BPF_FUNC_jiffies64: 1534 return &bpf_jiffies64_proto; 1535 case BPF_FUNC_get_task_stack: 1536 return prog->sleepable ? &bpf_get_task_stack_sleepable_proto 1537 : &bpf_get_task_stack_proto; 1538 case BPF_FUNC_copy_from_user: 1539 return &bpf_copy_from_user_proto; 1540 case BPF_FUNC_copy_from_user_task: 1541 return &bpf_copy_from_user_task_proto; 1542 case BPF_FUNC_snprintf_btf: 1543 return &bpf_snprintf_btf_proto; 1544 case BPF_FUNC_per_cpu_ptr: 1545 return &bpf_per_cpu_ptr_proto; 1546 case BPF_FUNC_this_cpu_ptr: 1547 return &bpf_this_cpu_ptr_proto; 1548 case BPF_FUNC_task_storage_get: 1549 if (bpf_prog_check_recur(prog)) 1550 return &bpf_task_storage_get_recur_proto; 1551 return &bpf_task_storage_get_proto; 1552 case BPF_FUNC_task_storage_delete: 1553 if (bpf_prog_check_recur(prog)) 1554 return &bpf_task_storage_delete_recur_proto; 1555 return &bpf_task_storage_delete_proto; 1556 case BPF_FUNC_for_each_map_elem: 1557 return &bpf_for_each_map_elem_proto; 1558 case BPF_FUNC_snprintf: 1559 return &bpf_snprintf_proto; 1560 case BPF_FUNC_get_func_ip: 1561 return &bpf_get_func_ip_proto_tracing; 1562 case BPF_FUNC_get_branch_snapshot: 1563 return &bpf_get_branch_snapshot_proto; 1564 case BPF_FUNC_find_vma: 1565 return &bpf_find_vma_proto; 1566 case BPF_FUNC_trace_vprintk: 1567 return bpf_get_trace_vprintk_proto(); 1568 default: 1569 return bpf_base_func_proto(func_id, prog); 1570 } 1571 } 1572 1573 static bool is_kprobe_multi(const struct bpf_prog *prog) 1574 { 1575 return prog->expected_attach_type == BPF_TRACE_KPROBE_MULTI || 1576 prog->expected_attach_type == BPF_TRACE_KPROBE_SESSION; 1577 } 1578 1579 static inline bool is_kprobe_session(const struct bpf_prog *prog) 1580 { 1581 return prog->expected_attach_type == BPF_TRACE_KPROBE_SESSION; 1582 } 1583 1584 static inline bool is_uprobe_multi(const struct bpf_prog *prog) 1585 { 1586 return prog->expected_attach_type == BPF_TRACE_UPROBE_MULTI || 1587 prog->expected_attach_type == BPF_TRACE_UPROBE_SESSION; 1588 } 1589 1590 static inline bool is_uprobe_session(const struct bpf_prog *prog) 1591 { 1592 return prog->expected_attach_type == BPF_TRACE_UPROBE_SESSION; 1593 } 1594 1595 static const struct bpf_func_proto * 1596 kprobe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 1597 { 1598 switch (func_id) { 1599 case BPF_FUNC_perf_event_output: 1600 return &bpf_perf_event_output_proto; 1601 case BPF_FUNC_get_stackid: 1602 return &bpf_get_stackid_proto; 1603 case BPF_FUNC_get_stack: 1604 return prog->sleepable ? &bpf_get_stack_sleepable_proto : &bpf_get_stack_proto; 1605 #ifdef CONFIG_BPF_KPROBE_OVERRIDE 1606 case BPF_FUNC_override_return: 1607 return &bpf_override_return_proto; 1608 #endif 1609 case BPF_FUNC_get_func_ip: 1610 if (is_kprobe_multi(prog)) 1611 return &bpf_get_func_ip_proto_kprobe_multi; 1612 if (is_uprobe_multi(prog)) 1613 return &bpf_get_func_ip_proto_uprobe_multi; 1614 return &bpf_get_func_ip_proto_kprobe; 1615 case BPF_FUNC_get_attach_cookie: 1616 if (is_kprobe_multi(prog)) 1617 return &bpf_get_attach_cookie_proto_kmulti; 1618 if (is_uprobe_multi(prog)) 1619 return &bpf_get_attach_cookie_proto_umulti; 1620 return &bpf_get_attach_cookie_proto_trace; 1621 default: 1622 return bpf_tracing_func_proto(func_id, prog); 1623 } 1624 } 1625 1626 /* bpf+kprobe programs can access fields of 'struct pt_regs' */ 1627 static bool kprobe_prog_is_valid_access(int off, int size, enum bpf_access_type type, 1628 const struct bpf_prog *prog, 1629 struct bpf_insn_access_aux *info) 1630 { 1631 if (off < 0 || off >= sizeof(struct pt_regs)) 1632 return false; 1633 if (type != BPF_READ) 1634 return false; 1635 if (off % size != 0) 1636 return false; 1637 /* 1638 * Assertion for 32 bit to make sure last 8 byte access 1639 * (BPF_DW) to the last 4 byte member is disallowed. 1640 */ 1641 if (off + size > sizeof(struct pt_regs)) 1642 return false; 1643 1644 return true; 1645 } 1646 1647 const struct bpf_verifier_ops kprobe_verifier_ops = { 1648 .get_func_proto = kprobe_prog_func_proto, 1649 .is_valid_access = kprobe_prog_is_valid_access, 1650 }; 1651 1652 const struct bpf_prog_ops kprobe_prog_ops = { 1653 }; 1654 1655 BPF_CALL_5(bpf_perf_event_output_tp, void *, tp_buff, struct bpf_map *, map, 1656 u64, flags, void *, data, u64, size) 1657 { 1658 struct pt_regs *regs = *(struct pt_regs **)tp_buff; 1659 1660 /* 1661 * r1 points to perf tracepoint buffer where first 8 bytes are hidden 1662 * from bpf program and contain a pointer to 'struct pt_regs'. Fetch it 1663 * from there and call the same bpf_perf_event_output() helper inline. 1664 */ 1665 return ____bpf_perf_event_output(regs, map, flags, data, size); 1666 } 1667 1668 static const struct bpf_func_proto bpf_perf_event_output_proto_tp = { 1669 .func = bpf_perf_event_output_tp, 1670 .gpl_only = true, 1671 .ret_type = RET_INTEGER, 1672 .arg1_type = ARG_PTR_TO_CTX, 1673 .arg2_type = ARG_CONST_MAP_PTR, 1674 .arg3_type = ARG_ANYTHING, 1675 .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY, 1676 .arg5_type = ARG_CONST_SIZE_OR_ZERO, 1677 }; 1678 1679 BPF_CALL_3(bpf_get_stackid_tp, void *, tp_buff, struct bpf_map *, map, 1680 u64, flags) 1681 { 1682 struct pt_regs *regs = *(struct pt_regs **)tp_buff; 1683 1684 /* 1685 * Same comment as in bpf_perf_event_output_tp(), only that this time 1686 * the other helper's function body cannot be inlined due to being 1687 * external, thus we need to call raw helper function. 1688 */ 1689 return bpf_get_stackid((unsigned long) regs, (unsigned long) map, 1690 flags, 0, 0); 1691 } 1692 1693 static const struct bpf_func_proto bpf_get_stackid_proto_tp = { 1694 .func = bpf_get_stackid_tp, 1695 .gpl_only = true, 1696 .ret_type = RET_INTEGER, 1697 .arg1_type = ARG_PTR_TO_CTX, 1698 .arg2_type = ARG_CONST_MAP_PTR, 1699 .arg3_type = ARG_ANYTHING, 1700 }; 1701 1702 BPF_CALL_4(bpf_get_stack_tp, void *, tp_buff, void *, buf, u32, size, 1703 u64, flags) 1704 { 1705 struct pt_regs *regs = *(struct pt_regs **)tp_buff; 1706 1707 return bpf_get_stack((unsigned long) regs, (unsigned long) buf, 1708 (unsigned long) size, flags, 0); 1709 } 1710 1711 static const struct bpf_func_proto bpf_get_stack_proto_tp = { 1712 .func = bpf_get_stack_tp, 1713 .gpl_only = true, 1714 .ret_type = RET_INTEGER, 1715 .arg1_type = ARG_PTR_TO_CTX, 1716 .arg2_type = ARG_PTR_TO_UNINIT_MEM, 1717 .arg3_type = ARG_CONST_SIZE_OR_ZERO, 1718 .arg4_type = ARG_ANYTHING, 1719 }; 1720 1721 static const struct bpf_func_proto * 1722 tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 1723 { 1724 switch (func_id) { 1725 case BPF_FUNC_perf_event_output: 1726 return &bpf_perf_event_output_proto_tp; 1727 case BPF_FUNC_get_stackid: 1728 return &bpf_get_stackid_proto_tp; 1729 case BPF_FUNC_get_stack: 1730 return &bpf_get_stack_proto_tp; 1731 case BPF_FUNC_get_attach_cookie: 1732 return &bpf_get_attach_cookie_proto_trace; 1733 default: 1734 return bpf_tracing_func_proto(func_id, prog); 1735 } 1736 } 1737 1738 static bool tp_prog_is_valid_access(int off, int size, enum bpf_access_type type, 1739 const struct bpf_prog *prog, 1740 struct bpf_insn_access_aux *info) 1741 { 1742 if (off < sizeof(void *) || off >= PERF_MAX_TRACE_SIZE) 1743 return false; 1744 if (type != BPF_READ) 1745 return false; 1746 if (off % size != 0) 1747 return false; 1748 1749 BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(__u64)); 1750 return true; 1751 } 1752 1753 const struct bpf_verifier_ops tracepoint_verifier_ops = { 1754 .get_func_proto = tp_prog_func_proto, 1755 .is_valid_access = tp_prog_is_valid_access, 1756 }; 1757 1758 const struct bpf_prog_ops tracepoint_prog_ops = { 1759 }; 1760 1761 BPF_CALL_3(bpf_perf_prog_read_value, struct bpf_perf_event_data_kern *, ctx, 1762 struct bpf_perf_event_value *, buf, u32, size) 1763 { 1764 int err = -EINVAL; 1765 1766 if (unlikely(size != sizeof(struct bpf_perf_event_value))) 1767 goto clear; 1768 err = perf_event_read_local(ctx->event, &buf->counter, &buf->enabled, 1769 &buf->running); 1770 if (unlikely(err)) 1771 goto clear; 1772 return 0; 1773 clear: 1774 memset(buf, 0, size); 1775 return err; 1776 } 1777 1778 static const struct bpf_func_proto bpf_perf_prog_read_value_proto = { 1779 .func = bpf_perf_prog_read_value, 1780 .gpl_only = true, 1781 .ret_type = RET_INTEGER, 1782 .arg1_type = ARG_PTR_TO_CTX, 1783 .arg2_type = ARG_PTR_TO_UNINIT_MEM, 1784 .arg3_type = ARG_CONST_SIZE, 1785 }; 1786 1787 BPF_CALL_4(bpf_read_branch_records, struct bpf_perf_event_data_kern *, ctx, 1788 void *, buf, u32, size, u64, flags) 1789 { 1790 static const u32 br_entry_size = sizeof(struct perf_branch_entry); 1791 struct perf_branch_stack *br_stack = ctx->data->br_stack; 1792 u32 to_copy; 1793 1794 if (unlikely(flags & ~BPF_F_GET_BRANCH_RECORDS_SIZE)) 1795 return -EINVAL; 1796 1797 if (unlikely(!(ctx->data->sample_flags & PERF_SAMPLE_BRANCH_STACK))) 1798 return -ENOENT; 1799 1800 if (unlikely(!br_stack)) 1801 return -ENOENT; 1802 1803 if (flags & BPF_F_GET_BRANCH_RECORDS_SIZE) 1804 return br_stack->nr * br_entry_size; 1805 1806 if (!buf || (size % br_entry_size != 0)) 1807 return -EINVAL; 1808 1809 to_copy = min_t(u32, br_stack->nr * br_entry_size, size); 1810 memcpy(buf, br_stack->entries, to_copy); 1811 1812 return to_copy; 1813 } 1814 1815 static const struct bpf_func_proto bpf_read_branch_records_proto = { 1816 .func = bpf_read_branch_records, 1817 .gpl_only = true, 1818 .ret_type = RET_INTEGER, 1819 .arg1_type = ARG_PTR_TO_CTX, 1820 .arg2_type = ARG_PTR_TO_MEM_OR_NULL, 1821 .arg3_type = ARG_CONST_SIZE_OR_ZERO, 1822 .arg4_type = ARG_ANYTHING, 1823 }; 1824 1825 static const struct bpf_func_proto * 1826 pe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 1827 { 1828 switch (func_id) { 1829 case BPF_FUNC_perf_event_output: 1830 return &bpf_perf_event_output_proto_tp; 1831 case BPF_FUNC_get_stackid: 1832 return &bpf_get_stackid_proto_pe; 1833 case BPF_FUNC_get_stack: 1834 return &bpf_get_stack_proto_pe; 1835 case BPF_FUNC_perf_prog_read_value: 1836 return &bpf_perf_prog_read_value_proto; 1837 case BPF_FUNC_read_branch_records: 1838 return &bpf_read_branch_records_proto; 1839 case BPF_FUNC_get_attach_cookie: 1840 return &bpf_get_attach_cookie_proto_pe; 1841 default: 1842 return bpf_tracing_func_proto(func_id, prog); 1843 } 1844 } 1845 1846 /* 1847 * bpf_raw_tp_regs are separate from bpf_pt_regs used from skb/xdp 1848 * to avoid potential recursive reuse issue when/if tracepoints are added 1849 * inside bpf_*_event_output, bpf_get_stackid and/or bpf_get_stack. 1850 * 1851 * Since raw tracepoints run despite bpf_prog_active, support concurrent usage 1852 * in normal, irq, and nmi context. 1853 */ 1854 struct bpf_raw_tp_regs { 1855 struct pt_regs regs[3]; 1856 }; 1857 static DEFINE_PER_CPU(struct bpf_raw_tp_regs, bpf_raw_tp_regs); 1858 static DEFINE_PER_CPU(int, bpf_raw_tp_nest_level); 1859 static struct pt_regs *get_bpf_raw_tp_regs(void) 1860 { 1861 struct bpf_raw_tp_regs *tp_regs = this_cpu_ptr(&bpf_raw_tp_regs); 1862 int nest_level = this_cpu_inc_return(bpf_raw_tp_nest_level); 1863 1864 if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(tp_regs->regs))) { 1865 this_cpu_dec(bpf_raw_tp_nest_level); 1866 return ERR_PTR(-EBUSY); 1867 } 1868 1869 return &tp_regs->regs[nest_level - 1]; 1870 } 1871 1872 static void put_bpf_raw_tp_regs(void) 1873 { 1874 this_cpu_dec(bpf_raw_tp_nest_level); 1875 } 1876 1877 BPF_CALL_5(bpf_perf_event_output_raw_tp, struct bpf_raw_tracepoint_args *, args, 1878 struct bpf_map *, map, u64, flags, void *, data, u64, size) 1879 { 1880 struct pt_regs *regs = get_bpf_raw_tp_regs(); 1881 int ret; 1882 1883 if (IS_ERR(regs)) 1884 return PTR_ERR(regs); 1885 1886 perf_fetch_caller_regs(regs); 1887 ret = ____bpf_perf_event_output(regs, map, flags, data, size); 1888 1889 put_bpf_raw_tp_regs(); 1890 return ret; 1891 } 1892 1893 static const struct bpf_func_proto bpf_perf_event_output_proto_raw_tp = { 1894 .func = bpf_perf_event_output_raw_tp, 1895 .gpl_only = true, 1896 .ret_type = RET_INTEGER, 1897 .arg1_type = ARG_PTR_TO_CTX, 1898 .arg2_type = ARG_CONST_MAP_PTR, 1899 .arg3_type = ARG_ANYTHING, 1900 .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY, 1901 .arg5_type = ARG_CONST_SIZE_OR_ZERO, 1902 }; 1903 1904 extern const struct bpf_func_proto bpf_skb_output_proto; 1905 extern const struct bpf_func_proto bpf_xdp_output_proto; 1906 extern const struct bpf_func_proto bpf_xdp_get_buff_len_trace_proto; 1907 1908 BPF_CALL_3(bpf_get_stackid_raw_tp, struct bpf_raw_tracepoint_args *, args, 1909 struct bpf_map *, map, u64, flags) 1910 { 1911 struct pt_regs *regs = get_bpf_raw_tp_regs(); 1912 int ret; 1913 1914 if (IS_ERR(regs)) 1915 return PTR_ERR(regs); 1916 1917 perf_fetch_caller_regs(regs); 1918 /* similar to bpf_perf_event_output_tp, but pt_regs fetched differently */ 1919 ret = bpf_get_stackid((unsigned long) regs, (unsigned long) map, 1920 flags, 0, 0); 1921 put_bpf_raw_tp_regs(); 1922 return ret; 1923 } 1924 1925 static const struct bpf_func_proto bpf_get_stackid_proto_raw_tp = { 1926 .func = bpf_get_stackid_raw_tp, 1927 .gpl_only = true, 1928 .ret_type = RET_INTEGER, 1929 .arg1_type = ARG_PTR_TO_CTX, 1930 .arg2_type = ARG_CONST_MAP_PTR, 1931 .arg3_type = ARG_ANYTHING, 1932 }; 1933 1934 BPF_CALL_4(bpf_get_stack_raw_tp, struct bpf_raw_tracepoint_args *, args, 1935 void *, buf, u32, size, u64, flags) 1936 { 1937 struct pt_regs *regs = get_bpf_raw_tp_regs(); 1938 int ret; 1939 1940 if (IS_ERR(regs)) 1941 return PTR_ERR(regs); 1942 1943 perf_fetch_caller_regs(regs); 1944 ret = bpf_get_stack((unsigned long) regs, (unsigned long) buf, 1945 (unsigned long) size, flags, 0); 1946 put_bpf_raw_tp_regs(); 1947 return ret; 1948 } 1949 1950 static const struct bpf_func_proto bpf_get_stack_proto_raw_tp = { 1951 .func = bpf_get_stack_raw_tp, 1952 .gpl_only = true, 1953 .ret_type = RET_INTEGER, 1954 .arg1_type = ARG_PTR_TO_CTX, 1955 .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, 1956 .arg3_type = ARG_CONST_SIZE_OR_ZERO, 1957 .arg4_type = ARG_ANYTHING, 1958 }; 1959 1960 static const struct bpf_func_proto * 1961 raw_tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 1962 { 1963 switch (func_id) { 1964 case BPF_FUNC_perf_event_output: 1965 return &bpf_perf_event_output_proto_raw_tp; 1966 case BPF_FUNC_get_stackid: 1967 return &bpf_get_stackid_proto_raw_tp; 1968 case BPF_FUNC_get_stack: 1969 return &bpf_get_stack_proto_raw_tp; 1970 case BPF_FUNC_get_attach_cookie: 1971 return &bpf_get_attach_cookie_proto_tracing; 1972 default: 1973 return bpf_tracing_func_proto(func_id, prog); 1974 } 1975 } 1976 1977 const struct bpf_func_proto * 1978 tracing_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 1979 { 1980 const struct bpf_func_proto *fn; 1981 1982 switch (func_id) { 1983 #ifdef CONFIG_NET 1984 case BPF_FUNC_skb_output: 1985 return &bpf_skb_output_proto; 1986 case BPF_FUNC_xdp_output: 1987 return &bpf_xdp_output_proto; 1988 case BPF_FUNC_skc_to_tcp6_sock: 1989 return &bpf_skc_to_tcp6_sock_proto; 1990 case BPF_FUNC_skc_to_tcp_sock: 1991 return &bpf_skc_to_tcp_sock_proto; 1992 case BPF_FUNC_skc_to_tcp_timewait_sock: 1993 return &bpf_skc_to_tcp_timewait_sock_proto; 1994 case BPF_FUNC_skc_to_tcp_request_sock: 1995 return &bpf_skc_to_tcp_request_sock_proto; 1996 case BPF_FUNC_skc_to_udp6_sock: 1997 return &bpf_skc_to_udp6_sock_proto; 1998 case BPF_FUNC_skc_to_unix_sock: 1999 return &bpf_skc_to_unix_sock_proto; 2000 case BPF_FUNC_skc_to_mptcp_sock: 2001 return &bpf_skc_to_mptcp_sock_proto; 2002 case BPF_FUNC_sk_storage_get: 2003 return &bpf_sk_storage_get_tracing_proto; 2004 case BPF_FUNC_sk_storage_delete: 2005 return &bpf_sk_storage_delete_tracing_proto; 2006 case BPF_FUNC_sock_from_file: 2007 return &bpf_sock_from_file_proto; 2008 case BPF_FUNC_get_socket_cookie: 2009 return &bpf_get_socket_ptr_cookie_proto; 2010 case BPF_FUNC_xdp_get_buff_len: 2011 return &bpf_xdp_get_buff_len_trace_proto; 2012 #endif 2013 case BPF_FUNC_seq_printf: 2014 return prog->expected_attach_type == BPF_TRACE_ITER ? 2015 &bpf_seq_printf_proto : 2016 NULL; 2017 case BPF_FUNC_seq_write: 2018 return prog->expected_attach_type == BPF_TRACE_ITER ? 2019 &bpf_seq_write_proto : 2020 NULL; 2021 case BPF_FUNC_seq_printf_btf: 2022 return prog->expected_attach_type == BPF_TRACE_ITER ? 2023 &bpf_seq_printf_btf_proto : 2024 NULL; 2025 case BPF_FUNC_d_path: 2026 return &bpf_d_path_proto; 2027 case BPF_FUNC_get_func_arg: 2028 return bpf_prog_has_trampoline(prog) ? &bpf_get_func_arg_proto : NULL; 2029 case BPF_FUNC_get_func_ret: 2030 return bpf_prog_has_trampoline(prog) ? &bpf_get_func_ret_proto : NULL; 2031 case BPF_FUNC_get_func_arg_cnt: 2032 return bpf_prog_has_trampoline(prog) ? &bpf_get_func_arg_cnt_proto : NULL; 2033 case BPF_FUNC_get_attach_cookie: 2034 if (prog->type == BPF_PROG_TYPE_TRACING && 2035 prog->expected_attach_type == BPF_TRACE_RAW_TP) 2036 return &bpf_get_attach_cookie_proto_tracing; 2037 return bpf_prog_has_trampoline(prog) ? &bpf_get_attach_cookie_proto_tracing : NULL; 2038 default: 2039 fn = raw_tp_prog_func_proto(func_id, prog); 2040 if (!fn && prog->expected_attach_type == BPF_TRACE_ITER) 2041 fn = bpf_iter_get_func_proto(func_id, prog); 2042 return fn; 2043 } 2044 } 2045 2046 static bool raw_tp_prog_is_valid_access(int off, int size, 2047 enum bpf_access_type type, 2048 const struct bpf_prog *prog, 2049 struct bpf_insn_access_aux *info) 2050 { 2051 return bpf_tracing_ctx_access(off, size, type); 2052 } 2053 2054 static bool tracing_prog_is_valid_access(int off, int size, 2055 enum bpf_access_type type, 2056 const struct bpf_prog *prog, 2057 struct bpf_insn_access_aux *info) 2058 { 2059 return bpf_tracing_btf_ctx_access(off, size, type, prog, info); 2060 } 2061 2062 int __weak bpf_prog_test_run_tracing(struct bpf_prog *prog, 2063 const union bpf_attr *kattr, 2064 union bpf_attr __user *uattr) 2065 { 2066 return -ENOTSUPP; 2067 } 2068 2069 const struct bpf_verifier_ops raw_tracepoint_verifier_ops = { 2070 .get_func_proto = raw_tp_prog_func_proto, 2071 .is_valid_access = raw_tp_prog_is_valid_access, 2072 }; 2073 2074 const struct bpf_prog_ops raw_tracepoint_prog_ops = { 2075 #ifdef CONFIG_NET 2076 .test_run = bpf_prog_test_run_raw_tp, 2077 #endif 2078 }; 2079 2080 const struct bpf_verifier_ops tracing_verifier_ops = { 2081 .get_func_proto = tracing_prog_func_proto, 2082 .is_valid_access = tracing_prog_is_valid_access, 2083 }; 2084 2085 const struct bpf_prog_ops tracing_prog_ops = { 2086 .test_run = bpf_prog_test_run_tracing, 2087 }; 2088 2089 static bool raw_tp_writable_prog_is_valid_access(int off, int size, 2090 enum bpf_access_type type, 2091 const struct bpf_prog *prog, 2092 struct bpf_insn_access_aux *info) 2093 { 2094 if (off == 0) { 2095 if (size != sizeof(u64) || type != BPF_READ) 2096 return false; 2097 info->reg_type = PTR_TO_TP_BUFFER; 2098 } 2099 return raw_tp_prog_is_valid_access(off, size, type, prog, info); 2100 } 2101 2102 const struct bpf_verifier_ops raw_tracepoint_writable_verifier_ops = { 2103 .get_func_proto = raw_tp_prog_func_proto, 2104 .is_valid_access = raw_tp_writable_prog_is_valid_access, 2105 }; 2106 2107 const struct bpf_prog_ops raw_tracepoint_writable_prog_ops = { 2108 }; 2109 2110 static bool pe_prog_is_valid_access(int off, int size, enum bpf_access_type type, 2111 const struct bpf_prog *prog, 2112 struct bpf_insn_access_aux *info) 2113 { 2114 const int size_u64 = sizeof(u64); 2115 2116 if (off < 0 || off >= sizeof(struct bpf_perf_event_data)) 2117 return false; 2118 if (type != BPF_READ) 2119 return false; 2120 if (off % size != 0) { 2121 if (sizeof(unsigned long) != 4) 2122 return false; 2123 if (size != 8) 2124 return false; 2125 if (off % size != 4) 2126 return false; 2127 } 2128 2129 switch (off) { 2130 case bpf_ctx_range(struct bpf_perf_event_data, sample_period): 2131 bpf_ctx_record_field_size(info, size_u64); 2132 if (!bpf_ctx_narrow_access_ok(off, size, size_u64)) 2133 return false; 2134 break; 2135 case bpf_ctx_range(struct bpf_perf_event_data, addr): 2136 bpf_ctx_record_field_size(info, size_u64); 2137 if (!bpf_ctx_narrow_access_ok(off, size, size_u64)) 2138 return false; 2139 break; 2140 default: 2141 if (size != sizeof(long)) 2142 return false; 2143 } 2144 2145 return true; 2146 } 2147 2148 static u32 pe_prog_convert_ctx_access(enum bpf_access_type type, 2149 const struct bpf_insn *si, 2150 struct bpf_insn *insn_buf, 2151 struct bpf_prog *prog, u32 *target_size) 2152 { 2153 struct bpf_insn *insn = insn_buf; 2154 2155 switch (si->off) { 2156 case offsetof(struct bpf_perf_event_data, sample_period): 2157 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern, 2158 data), si->dst_reg, si->src_reg, 2159 offsetof(struct bpf_perf_event_data_kern, data)); 2160 *insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg, 2161 bpf_target_off(struct perf_sample_data, period, 8, 2162 target_size)); 2163 break; 2164 case offsetof(struct bpf_perf_event_data, addr): 2165 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern, 2166 data), si->dst_reg, si->src_reg, 2167 offsetof(struct bpf_perf_event_data_kern, data)); 2168 *insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg, 2169 bpf_target_off(struct perf_sample_data, addr, 8, 2170 target_size)); 2171 break; 2172 default: 2173 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern, 2174 regs), si->dst_reg, si->src_reg, 2175 offsetof(struct bpf_perf_event_data_kern, regs)); 2176 *insn++ = BPF_LDX_MEM(BPF_SIZEOF(long), si->dst_reg, si->dst_reg, 2177 si->off); 2178 break; 2179 } 2180 2181 return insn - insn_buf; 2182 } 2183 2184 const struct bpf_verifier_ops perf_event_verifier_ops = { 2185 .get_func_proto = pe_prog_func_proto, 2186 .is_valid_access = pe_prog_is_valid_access, 2187 .convert_ctx_access = pe_prog_convert_ctx_access, 2188 }; 2189 2190 const struct bpf_prog_ops perf_event_prog_ops = { 2191 }; 2192 2193 static DEFINE_MUTEX(bpf_event_mutex); 2194 2195 #define BPF_TRACE_MAX_PROGS 64 2196 2197 int perf_event_attach_bpf_prog(struct perf_event *event, 2198 struct bpf_prog *prog, 2199 u64 bpf_cookie) 2200 { 2201 struct bpf_prog_array *old_array; 2202 struct bpf_prog_array *new_array; 2203 int ret = -EEXIST; 2204 2205 /* 2206 * Kprobe override only works if they are on the function entry, 2207 * and only if they are on the opt-in list. 2208 */ 2209 if (prog->kprobe_override && 2210 (!trace_kprobe_on_func_entry(event->tp_event) || 2211 !trace_kprobe_error_injectable(event->tp_event))) 2212 return -EINVAL; 2213 2214 mutex_lock(&bpf_event_mutex); 2215 2216 if (event->prog) 2217 goto unlock; 2218 2219 old_array = bpf_event_rcu_dereference(event->tp_event->prog_array); 2220 if (old_array && 2221 bpf_prog_array_length(old_array) >= BPF_TRACE_MAX_PROGS) { 2222 ret = -E2BIG; 2223 goto unlock; 2224 } 2225 2226 ret = bpf_prog_array_copy(old_array, NULL, prog, bpf_cookie, &new_array); 2227 if (ret < 0) 2228 goto unlock; 2229 2230 /* set the new array to event->tp_event and set event->prog */ 2231 event->prog = prog; 2232 event->bpf_cookie = bpf_cookie; 2233 rcu_assign_pointer(event->tp_event->prog_array, new_array); 2234 bpf_prog_array_free_sleepable(old_array); 2235 2236 unlock: 2237 mutex_unlock(&bpf_event_mutex); 2238 return ret; 2239 } 2240 2241 void perf_event_detach_bpf_prog(struct perf_event *event) 2242 { 2243 struct bpf_prog_array *old_array; 2244 struct bpf_prog_array *new_array; 2245 int ret; 2246 2247 mutex_lock(&bpf_event_mutex); 2248 2249 if (!event->prog) 2250 goto unlock; 2251 2252 old_array = bpf_event_rcu_dereference(event->tp_event->prog_array); 2253 ret = bpf_prog_array_copy(old_array, event->prog, NULL, 0, &new_array); 2254 if (ret < 0) { 2255 bpf_prog_array_delete_safe(old_array, event->prog); 2256 } else { 2257 rcu_assign_pointer(event->tp_event->prog_array, new_array); 2258 bpf_prog_array_free_sleepable(old_array); 2259 } 2260 2261 bpf_prog_put(event->prog); 2262 event->prog = NULL; 2263 2264 unlock: 2265 mutex_unlock(&bpf_event_mutex); 2266 } 2267 2268 int perf_event_query_prog_array(struct perf_event *event, void __user *info) 2269 { 2270 struct perf_event_query_bpf __user *uquery = info; 2271 struct perf_event_query_bpf query = {}; 2272 struct bpf_prog_array *progs; 2273 u32 *ids, prog_cnt, ids_len; 2274 int ret; 2275 2276 if (!perfmon_capable()) 2277 return -EPERM; 2278 if (event->attr.type != PERF_TYPE_TRACEPOINT) 2279 return -EINVAL; 2280 if (copy_from_user(&query, uquery, sizeof(query))) 2281 return -EFAULT; 2282 2283 ids_len = query.ids_len; 2284 if (ids_len > BPF_TRACE_MAX_PROGS) 2285 return -E2BIG; 2286 ids = kcalloc(ids_len, sizeof(u32), GFP_USER | __GFP_NOWARN); 2287 if (!ids) 2288 return -ENOMEM; 2289 /* 2290 * The above kcalloc returns ZERO_SIZE_PTR when ids_len = 0, which 2291 * is required when user only wants to check for uquery->prog_cnt. 2292 * There is no need to check for it since the case is handled 2293 * gracefully in bpf_prog_array_copy_info. 2294 */ 2295 2296 mutex_lock(&bpf_event_mutex); 2297 progs = bpf_event_rcu_dereference(event->tp_event->prog_array); 2298 ret = bpf_prog_array_copy_info(progs, ids, ids_len, &prog_cnt); 2299 mutex_unlock(&bpf_event_mutex); 2300 2301 if (copy_to_user(&uquery->prog_cnt, &prog_cnt, sizeof(prog_cnt)) || 2302 copy_to_user(uquery->ids, ids, ids_len * sizeof(u32))) 2303 ret = -EFAULT; 2304 2305 kfree(ids); 2306 return ret; 2307 } 2308 2309 extern struct bpf_raw_event_map __start__bpf_raw_tp[]; 2310 extern struct bpf_raw_event_map __stop__bpf_raw_tp[]; 2311 2312 struct bpf_raw_event_map *bpf_get_raw_tracepoint(const char *name) 2313 { 2314 struct bpf_raw_event_map *btp = __start__bpf_raw_tp; 2315 2316 for (; btp < __stop__bpf_raw_tp; btp++) { 2317 if (!strcmp(btp->tp->name, name)) 2318 return btp; 2319 } 2320 2321 return bpf_get_raw_tracepoint_module(name); 2322 } 2323 2324 void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp) 2325 { 2326 struct module *mod; 2327 2328 preempt_disable(); 2329 mod = __module_address((unsigned long)btp); 2330 module_put(mod); 2331 preempt_enable(); 2332 } 2333 2334 static __always_inline 2335 void __bpf_trace_run(struct bpf_raw_tp_link *link, u64 *args) 2336 { 2337 struct bpf_prog *prog = link->link.prog; 2338 struct bpf_run_ctx *old_run_ctx; 2339 struct bpf_trace_run_ctx run_ctx; 2340 2341 cant_sleep(); 2342 if (unlikely(this_cpu_inc_return(*(prog->active)) != 1)) { 2343 bpf_prog_inc_misses_counter(prog); 2344 goto out; 2345 } 2346 2347 run_ctx.bpf_cookie = link->cookie; 2348 old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx); 2349 2350 rcu_read_lock(); 2351 (void) bpf_prog_run(prog, args); 2352 rcu_read_unlock(); 2353 2354 bpf_reset_run_ctx(old_run_ctx); 2355 out: 2356 this_cpu_dec(*(prog->active)); 2357 } 2358 2359 #define UNPACK(...) __VA_ARGS__ 2360 #define REPEAT_1(FN, DL, X, ...) FN(X) 2361 #define REPEAT_2(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_1(FN, DL, __VA_ARGS__) 2362 #define REPEAT_3(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_2(FN, DL, __VA_ARGS__) 2363 #define REPEAT_4(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_3(FN, DL, __VA_ARGS__) 2364 #define REPEAT_5(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_4(FN, DL, __VA_ARGS__) 2365 #define REPEAT_6(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_5(FN, DL, __VA_ARGS__) 2366 #define REPEAT_7(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_6(FN, DL, __VA_ARGS__) 2367 #define REPEAT_8(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_7(FN, DL, __VA_ARGS__) 2368 #define REPEAT_9(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_8(FN, DL, __VA_ARGS__) 2369 #define REPEAT_10(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_9(FN, DL, __VA_ARGS__) 2370 #define REPEAT_11(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_10(FN, DL, __VA_ARGS__) 2371 #define REPEAT_12(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_11(FN, DL, __VA_ARGS__) 2372 #define REPEAT(X, FN, DL, ...) REPEAT_##X(FN, DL, __VA_ARGS__) 2373 2374 #define SARG(X) u64 arg##X 2375 #define COPY(X) args[X] = arg##X 2376 2377 #define __DL_COM (,) 2378 #define __DL_SEM (;) 2379 2380 #define __SEQ_0_11 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11 2381 2382 #define BPF_TRACE_DEFN_x(x) \ 2383 void bpf_trace_run##x(struct bpf_raw_tp_link *link, \ 2384 REPEAT(x, SARG, __DL_COM, __SEQ_0_11)) \ 2385 { \ 2386 u64 args[x]; \ 2387 REPEAT(x, COPY, __DL_SEM, __SEQ_0_11); \ 2388 __bpf_trace_run(link, args); \ 2389 } \ 2390 EXPORT_SYMBOL_GPL(bpf_trace_run##x) 2391 BPF_TRACE_DEFN_x(1); 2392 BPF_TRACE_DEFN_x(2); 2393 BPF_TRACE_DEFN_x(3); 2394 BPF_TRACE_DEFN_x(4); 2395 BPF_TRACE_DEFN_x(5); 2396 BPF_TRACE_DEFN_x(6); 2397 BPF_TRACE_DEFN_x(7); 2398 BPF_TRACE_DEFN_x(8); 2399 BPF_TRACE_DEFN_x(9); 2400 BPF_TRACE_DEFN_x(10); 2401 BPF_TRACE_DEFN_x(11); 2402 BPF_TRACE_DEFN_x(12); 2403 2404 int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_raw_tp_link *link) 2405 { 2406 struct tracepoint *tp = btp->tp; 2407 struct bpf_prog *prog = link->link.prog; 2408 2409 /* 2410 * check that program doesn't access arguments beyond what's 2411 * available in this tracepoint 2412 */ 2413 if (prog->aux->max_ctx_offset > btp->num_args * sizeof(u64)) 2414 return -EINVAL; 2415 2416 if (prog->aux->max_tp_access > btp->writable_size) 2417 return -EINVAL; 2418 2419 return tracepoint_probe_register_may_exist(tp, (void *)btp->bpf_func, link); 2420 } 2421 2422 int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_raw_tp_link *link) 2423 { 2424 return tracepoint_probe_unregister(btp->tp, (void *)btp->bpf_func, link); 2425 } 2426 2427 int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id, 2428 u32 *fd_type, const char **buf, 2429 u64 *probe_offset, u64 *probe_addr, 2430 unsigned long *missed) 2431 { 2432 bool is_tracepoint, is_syscall_tp; 2433 struct bpf_prog *prog; 2434 int flags, err = 0; 2435 2436 prog = event->prog; 2437 if (!prog) 2438 return -ENOENT; 2439 2440 /* not supporting BPF_PROG_TYPE_PERF_EVENT yet */ 2441 if (prog->type == BPF_PROG_TYPE_PERF_EVENT) 2442 return -EOPNOTSUPP; 2443 2444 *prog_id = prog->aux->id; 2445 flags = event->tp_event->flags; 2446 is_tracepoint = flags & TRACE_EVENT_FL_TRACEPOINT; 2447 is_syscall_tp = is_syscall_trace_event(event->tp_event); 2448 2449 if (is_tracepoint || is_syscall_tp) { 2450 *buf = is_tracepoint ? event->tp_event->tp->name 2451 : event->tp_event->name; 2452 /* We allow NULL pointer for tracepoint */ 2453 if (fd_type) 2454 *fd_type = BPF_FD_TYPE_TRACEPOINT; 2455 if (probe_offset) 2456 *probe_offset = 0x0; 2457 if (probe_addr) 2458 *probe_addr = 0x0; 2459 } else { 2460 /* kprobe/uprobe */ 2461 err = -EOPNOTSUPP; 2462 #ifdef CONFIG_KPROBE_EVENTS 2463 if (flags & TRACE_EVENT_FL_KPROBE) 2464 err = bpf_get_kprobe_info(event, fd_type, buf, 2465 probe_offset, probe_addr, missed, 2466 event->attr.type == PERF_TYPE_TRACEPOINT); 2467 #endif 2468 #ifdef CONFIG_UPROBE_EVENTS 2469 if (flags & TRACE_EVENT_FL_UPROBE) 2470 err = bpf_get_uprobe_info(event, fd_type, buf, 2471 probe_offset, probe_addr, 2472 event->attr.type == PERF_TYPE_TRACEPOINT); 2473 #endif 2474 } 2475 2476 return err; 2477 } 2478 2479 static int __init send_signal_irq_work_init(void) 2480 { 2481 int cpu; 2482 struct send_signal_irq_work *work; 2483 2484 for_each_possible_cpu(cpu) { 2485 work = per_cpu_ptr(&send_signal_work, cpu); 2486 init_irq_work(&work->irq_work, do_bpf_send_signal); 2487 } 2488 return 0; 2489 } 2490 2491 subsys_initcall(send_signal_irq_work_init); 2492 2493 #ifdef CONFIG_MODULES 2494 static int bpf_event_notify(struct notifier_block *nb, unsigned long op, 2495 void *module) 2496 { 2497 struct bpf_trace_module *btm, *tmp; 2498 struct module *mod = module; 2499 int ret = 0; 2500 2501 if (mod->num_bpf_raw_events == 0 || 2502 (op != MODULE_STATE_COMING && op != MODULE_STATE_GOING)) 2503 goto out; 2504 2505 mutex_lock(&bpf_module_mutex); 2506 2507 switch (op) { 2508 case MODULE_STATE_COMING: 2509 btm = kzalloc(sizeof(*btm), GFP_KERNEL); 2510 if (btm) { 2511 btm->module = module; 2512 list_add(&btm->list, &bpf_trace_modules); 2513 } else { 2514 ret = -ENOMEM; 2515 } 2516 break; 2517 case MODULE_STATE_GOING: 2518 list_for_each_entry_safe(btm, tmp, &bpf_trace_modules, list) { 2519 if (btm->module == module) { 2520 list_del(&btm->list); 2521 kfree(btm); 2522 break; 2523 } 2524 } 2525 break; 2526 } 2527 2528 mutex_unlock(&bpf_module_mutex); 2529 2530 out: 2531 return notifier_from_errno(ret); 2532 } 2533 2534 static struct notifier_block bpf_module_nb = { 2535 .notifier_call = bpf_event_notify, 2536 }; 2537 2538 static int __init bpf_event_init(void) 2539 { 2540 register_module_notifier(&bpf_module_nb); 2541 return 0; 2542 } 2543 2544 fs_initcall(bpf_event_init); 2545 #endif /* CONFIG_MODULES */ 2546 2547 struct bpf_session_run_ctx { 2548 struct bpf_run_ctx run_ctx; 2549 bool is_return; 2550 void *data; 2551 }; 2552 2553 #ifdef CONFIG_FPROBE 2554 struct bpf_kprobe_multi_link { 2555 struct bpf_link link; 2556 struct fprobe fp; 2557 unsigned long *addrs; 2558 u64 *cookies; 2559 u32 cnt; 2560 u32 mods_cnt; 2561 struct module **mods; 2562 u32 flags; 2563 }; 2564 2565 struct bpf_kprobe_multi_run_ctx { 2566 struct bpf_session_run_ctx session_ctx; 2567 struct bpf_kprobe_multi_link *link; 2568 unsigned long entry_ip; 2569 }; 2570 2571 struct user_syms { 2572 const char **syms; 2573 char *buf; 2574 }; 2575 2576 static int copy_user_syms(struct user_syms *us, unsigned long __user *usyms, u32 cnt) 2577 { 2578 unsigned long __user usymbol; 2579 const char **syms = NULL; 2580 char *buf = NULL, *p; 2581 int err = -ENOMEM; 2582 unsigned int i; 2583 2584 syms = kvmalloc_array(cnt, sizeof(*syms), GFP_KERNEL); 2585 if (!syms) 2586 goto error; 2587 2588 buf = kvmalloc_array(cnt, KSYM_NAME_LEN, GFP_KERNEL); 2589 if (!buf) 2590 goto error; 2591 2592 for (p = buf, i = 0; i < cnt; i++) { 2593 if (__get_user(usymbol, usyms + i)) { 2594 err = -EFAULT; 2595 goto error; 2596 } 2597 err = strncpy_from_user(p, (const char __user *) usymbol, KSYM_NAME_LEN); 2598 if (err == KSYM_NAME_LEN) 2599 err = -E2BIG; 2600 if (err < 0) 2601 goto error; 2602 syms[i] = p; 2603 p += err + 1; 2604 } 2605 2606 us->syms = syms; 2607 us->buf = buf; 2608 return 0; 2609 2610 error: 2611 if (err) { 2612 kvfree(syms); 2613 kvfree(buf); 2614 } 2615 return err; 2616 } 2617 2618 static void kprobe_multi_put_modules(struct module **mods, u32 cnt) 2619 { 2620 u32 i; 2621 2622 for (i = 0; i < cnt; i++) 2623 module_put(mods[i]); 2624 } 2625 2626 static void free_user_syms(struct user_syms *us) 2627 { 2628 kvfree(us->syms); 2629 kvfree(us->buf); 2630 } 2631 2632 static void bpf_kprobe_multi_link_release(struct bpf_link *link) 2633 { 2634 struct bpf_kprobe_multi_link *kmulti_link; 2635 2636 kmulti_link = container_of(link, struct bpf_kprobe_multi_link, link); 2637 unregister_fprobe(&kmulti_link->fp); 2638 kprobe_multi_put_modules(kmulti_link->mods, kmulti_link->mods_cnt); 2639 } 2640 2641 static void bpf_kprobe_multi_link_dealloc(struct bpf_link *link) 2642 { 2643 struct bpf_kprobe_multi_link *kmulti_link; 2644 2645 kmulti_link = container_of(link, struct bpf_kprobe_multi_link, link); 2646 kvfree(kmulti_link->addrs); 2647 kvfree(kmulti_link->cookies); 2648 kfree(kmulti_link->mods); 2649 kfree(kmulti_link); 2650 } 2651 2652 static int bpf_kprobe_multi_link_fill_link_info(const struct bpf_link *link, 2653 struct bpf_link_info *info) 2654 { 2655 u64 __user *ucookies = u64_to_user_ptr(info->kprobe_multi.cookies); 2656 u64 __user *uaddrs = u64_to_user_ptr(info->kprobe_multi.addrs); 2657 struct bpf_kprobe_multi_link *kmulti_link; 2658 u32 ucount = info->kprobe_multi.count; 2659 int err = 0, i; 2660 2661 if (!uaddrs ^ !ucount) 2662 return -EINVAL; 2663 if (ucookies && !ucount) 2664 return -EINVAL; 2665 2666 kmulti_link = container_of(link, struct bpf_kprobe_multi_link, link); 2667 info->kprobe_multi.count = kmulti_link->cnt; 2668 info->kprobe_multi.flags = kmulti_link->flags; 2669 info->kprobe_multi.missed = kmulti_link->fp.nmissed; 2670 2671 if (!uaddrs) 2672 return 0; 2673 if (ucount < kmulti_link->cnt) 2674 err = -ENOSPC; 2675 else 2676 ucount = kmulti_link->cnt; 2677 2678 if (ucookies) { 2679 if (kmulti_link->cookies) { 2680 if (copy_to_user(ucookies, kmulti_link->cookies, ucount * sizeof(u64))) 2681 return -EFAULT; 2682 } else { 2683 for (i = 0; i < ucount; i++) { 2684 if (put_user(0, ucookies + i)) 2685 return -EFAULT; 2686 } 2687 } 2688 } 2689 2690 if (kallsyms_show_value(current_cred())) { 2691 if (copy_to_user(uaddrs, kmulti_link->addrs, ucount * sizeof(u64))) 2692 return -EFAULT; 2693 } else { 2694 for (i = 0; i < ucount; i++) { 2695 if (put_user(0, uaddrs + i)) 2696 return -EFAULT; 2697 } 2698 } 2699 return err; 2700 } 2701 2702 static const struct bpf_link_ops bpf_kprobe_multi_link_lops = { 2703 .release = bpf_kprobe_multi_link_release, 2704 .dealloc_deferred = bpf_kprobe_multi_link_dealloc, 2705 .fill_link_info = bpf_kprobe_multi_link_fill_link_info, 2706 }; 2707 2708 static void bpf_kprobe_multi_cookie_swap(void *a, void *b, int size, const void *priv) 2709 { 2710 const struct bpf_kprobe_multi_link *link = priv; 2711 unsigned long *addr_a = a, *addr_b = b; 2712 u64 *cookie_a, *cookie_b; 2713 2714 cookie_a = link->cookies + (addr_a - link->addrs); 2715 cookie_b = link->cookies + (addr_b - link->addrs); 2716 2717 /* swap addr_a/addr_b and cookie_a/cookie_b values */ 2718 swap(*addr_a, *addr_b); 2719 swap(*cookie_a, *cookie_b); 2720 } 2721 2722 static int bpf_kprobe_multi_addrs_cmp(const void *a, const void *b) 2723 { 2724 const unsigned long *addr_a = a, *addr_b = b; 2725 2726 if (*addr_a == *addr_b) 2727 return 0; 2728 return *addr_a < *addr_b ? -1 : 1; 2729 } 2730 2731 static int bpf_kprobe_multi_cookie_cmp(const void *a, const void *b, const void *priv) 2732 { 2733 return bpf_kprobe_multi_addrs_cmp(a, b); 2734 } 2735 2736 static u64 bpf_kprobe_multi_cookie(struct bpf_run_ctx *ctx) 2737 { 2738 struct bpf_kprobe_multi_run_ctx *run_ctx; 2739 struct bpf_kprobe_multi_link *link; 2740 u64 *cookie, entry_ip; 2741 unsigned long *addr; 2742 2743 if (WARN_ON_ONCE(!ctx)) 2744 return 0; 2745 run_ctx = container_of(current->bpf_ctx, struct bpf_kprobe_multi_run_ctx, 2746 session_ctx.run_ctx); 2747 link = run_ctx->link; 2748 if (!link->cookies) 2749 return 0; 2750 entry_ip = run_ctx->entry_ip; 2751 addr = bsearch(&entry_ip, link->addrs, link->cnt, sizeof(entry_ip), 2752 bpf_kprobe_multi_addrs_cmp); 2753 if (!addr) 2754 return 0; 2755 cookie = link->cookies + (addr - link->addrs); 2756 return *cookie; 2757 } 2758 2759 static u64 bpf_kprobe_multi_entry_ip(struct bpf_run_ctx *ctx) 2760 { 2761 struct bpf_kprobe_multi_run_ctx *run_ctx; 2762 2763 run_ctx = container_of(current->bpf_ctx, struct bpf_kprobe_multi_run_ctx, 2764 session_ctx.run_ctx); 2765 return run_ctx->entry_ip; 2766 } 2767 2768 static int 2769 kprobe_multi_link_prog_run(struct bpf_kprobe_multi_link *link, 2770 unsigned long entry_ip, struct pt_regs *regs, 2771 bool is_return, void *data) 2772 { 2773 struct bpf_kprobe_multi_run_ctx run_ctx = { 2774 .session_ctx = { 2775 .is_return = is_return, 2776 .data = data, 2777 }, 2778 .link = link, 2779 .entry_ip = entry_ip, 2780 }; 2781 struct bpf_run_ctx *old_run_ctx; 2782 int err; 2783 2784 if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) { 2785 bpf_prog_inc_misses_counter(link->link.prog); 2786 err = 0; 2787 goto out; 2788 } 2789 2790 migrate_disable(); 2791 rcu_read_lock(); 2792 old_run_ctx = bpf_set_run_ctx(&run_ctx.session_ctx.run_ctx); 2793 err = bpf_prog_run(link->link.prog, regs); 2794 bpf_reset_run_ctx(old_run_ctx); 2795 rcu_read_unlock(); 2796 migrate_enable(); 2797 2798 out: 2799 __this_cpu_dec(bpf_prog_active); 2800 return err; 2801 } 2802 2803 static int 2804 kprobe_multi_link_handler(struct fprobe *fp, unsigned long fentry_ip, 2805 unsigned long ret_ip, struct pt_regs *regs, 2806 void *data) 2807 { 2808 struct bpf_kprobe_multi_link *link; 2809 int err; 2810 2811 link = container_of(fp, struct bpf_kprobe_multi_link, fp); 2812 err = kprobe_multi_link_prog_run(link, get_entry_ip(fentry_ip), regs, false, data); 2813 return is_kprobe_session(link->link.prog) ? err : 0; 2814 } 2815 2816 static void 2817 kprobe_multi_link_exit_handler(struct fprobe *fp, unsigned long fentry_ip, 2818 unsigned long ret_ip, struct pt_regs *regs, 2819 void *data) 2820 { 2821 struct bpf_kprobe_multi_link *link; 2822 2823 link = container_of(fp, struct bpf_kprobe_multi_link, fp); 2824 kprobe_multi_link_prog_run(link, get_entry_ip(fentry_ip), regs, true, data); 2825 } 2826 2827 static int symbols_cmp_r(const void *a, const void *b, const void *priv) 2828 { 2829 const char **str_a = (const char **) a; 2830 const char **str_b = (const char **) b; 2831 2832 return strcmp(*str_a, *str_b); 2833 } 2834 2835 struct multi_symbols_sort { 2836 const char **funcs; 2837 u64 *cookies; 2838 }; 2839 2840 static void symbols_swap_r(void *a, void *b, int size, const void *priv) 2841 { 2842 const struct multi_symbols_sort *data = priv; 2843 const char **name_a = a, **name_b = b; 2844 2845 swap(*name_a, *name_b); 2846 2847 /* If defined, swap also related cookies. */ 2848 if (data->cookies) { 2849 u64 *cookie_a, *cookie_b; 2850 2851 cookie_a = data->cookies + (name_a - data->funcs); 2852 cookie_b = data->cookies + (name_b - data->funcs); 2853 swap(*cookie_a, *cookie_b); 2854 } 2855 } 2856 2857 struct modules_array { 2858 struct module **mods; 2859 int mods_cnt; 2860 int mods_cap; 2861 }; 2862 2863 static int add_module(struct modules_array *arr, struct module *mod) 2864 { 2865 struct module **mods; 2866 2867 if (arr->mods_cnt == arr->mods_cap) { 2868 arr->mods_cap = max(16, arr->mods_cap * 3 / 2); 2869 mods = krealloc_array(arr->mods, arr->mods_cap, sizeof(*mods), GFP_KERNEL); 2870 if (!mods) 2871 return -ENOMEM; 2872 arr->mods = mods; 2873 } 2874 2875 arr->mods[arr->mods_cnt] = mod; 2876 arr->mods_cnt++; 2877 return 0; 2878 } 2879 2880 static bool has_module(struct modules_array *arr, struct module *mod) 2881 { 2882 int i; 2883 2884 for (i = arr->mods_cnt - 1; i >= 0; i--) { 2885 if (arr->mods[i] == mod) 2886 return true; 2887 } 2888 return false; 2889 } 2890 2891 static int get_modules_for_addrs(struct module ***mods, unsigned long *addrs, u32 addrs_cnt) 2892 { 2893 struct modules_array arr = {}; 2894 u32 i, err = 0; 2895 2896 for (i = 0; i < addrs_cnt; i++) { 2897 struct module *mod; 2898 2899 preempt_disable(); 2900 mod = __module_address(addrs[i]); 2901 /* Either no module or we it's already stored */ 2902 if (!mod || has_module(&arr, mod)) { 2903 preempt_enable(); 2904 continue; 2905 } 2906 if (!try_module_get(mod)) 2907 err = -EINVAL; 2908 preempt_enable(); 2909 if (err) 2910 break; 2911 err = add_module(&arr, mod); 2912 if (err) { 2913 module_put(mod); 2914 break; 2915 } 2916 } 2917 2918 /* We return either err < 0 in case of error, ... */ 2919 if (err) { 2920 kprobe_multi_put_modules(arr.mods, arr.mods_cnt); 2921 kfree(arr.mods); 2922 return err; 2923 } 2924 2925 /* or number of modules found if everything is ok. */ 2926 *mods = arr.mods; 2927 return arr.mods_cnt; 2928 } 2929 2930 static int addrs_check_error_injection_list(unsigned long *addrs, u32 cnt) 2931 { 2932 u32 i; 2933 2934 for (i = 0; i < cnt; i++) { 2935 if (!within_error_injection_list(addrs[i])) 2936 return -EINVAL; 2937 } 2938 return 0; 2939 } 2940 2941 int bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog) 2942 { 2943 struct bpf_kprobe_multi_link *link = NULL; 2944 struct bpf_link_primer link_primer; 2945 void __user *ucookies; 2946 unsigned long *addrs; 2947 u32 flags, cnt, size; 2948 void __user *uaddrs; 2949 u64 *cookies = NULL; 2950 void __user *usyms; 2951 int err; 2952 2953 /* no support for 32bit archs yet */ 2954 if (sizeof(u64) != sizeof(void *)) 2955 return -EOPNOTSUPP; 2956 2957 if (!is_kprobe_multi(prog)) 2958 return -EINVAL; 2959 2960 flags = attr->link_create.kprobe_multi.flags; 2961 if (flags & ~BPF_F_KPROBE_MULTI_RETURN) 2962 return -EINVAL; 2963 2964 uaddrs = u64_to_user_ptr(attr->link_create.kprobe_multi.addrs); 2965 usyms = u64_to_user_ptr(attr->link_create.kprobe_multi.syms); 2966 if (!!uaddrs == !!usyms) 2967 return -EINVAL; 2968 2969 cnt = attr->link_create.kprobe_multi.cnt; 2970 if (!cnt) 2971 return -EINVAL; 2972 if (cnt > MAX_KPROBE_MULTI_CNT) 2973 return -E2BIG; 2974 2975 size = cnt * sizeof(*addrs); 2976 addrs = kvmalloc_array(cnt, sizeof(*addrs), GFP_KERNEL); 2977 if (!addrs) 2978 return -ENOMEM; 2979 2980 ucookies = u64_to_user_ptr(attr->link_create.kprobe_multi.cookies); 2981 if (ucookies) { 2982 cookies = kvmalloc_array(cnt, sizeof(*addrs), GFP_KERNEL); 2983 if (!cookies) { 2984 err = -ENOMEM; 2985 goto error; 2986 } 2987 if (copy_from_user(cookies, ucookies, size)) { 2988 err = -EFAULT; 2989 goto error; 2990 } 2991 } 2992 2993 if (uaddrs) { 2994 if (copy_from_user(addrs, uaddrs, size)) { 2995 err = -EFAULT; 2996 goto error; 2997 } 2998 } else { 2999 struct multi_symbols_sort data = { 3000 .cookies = cookies, 3001 }; 3002 struct user_syms us; 3003 3004 err = copy_user_syms(&us, usyms, cnt); 3005 if (err) 3006 goto error; 3007 3008 if (cookies) 3009 data.funcs = us.syms; 3010 3011 sort_r(us.syms, cnt, sizeof(*us.syms), symbols_cmp_r, 3012 symbols_swap_r, &data); 3013 3014 err = ftrace_lookup_symbols(us.syms, cnt, addrs); 3015 free_user_syms(&us); 3016 if (err) 3017 goto error; 3018 } 3019 3020 if (prog->kprobe_override && addrs_check_error_injection_list(addrs, cnt)) { 3021 err = -EINVAL; 3022 goto error; 3023 } 3024 3025 link = kzalloc(sizeof(*link), GFP_KERNEL); 3026 if (!link) { 3027 err = -ENOMEM; 3028 goto error; 3029 } 3030 3031 bpf_link_init(&link->link, BPF_LINK_TYPE_KPROBE_MULTI, 3032 &bpf_kprobe_multi_link_lops, prog); 3033 3034 err = bpf_link_prime(&link->link, &link_primer); 3035 if (err) 3036 goto error; 3037 3038 if (!(flags & BPF_F_KPROBE_MULTI_RETURN)) 3039 link->fp.entry_handler = kprobe_multi_link_handler; 3040 if ((flags & BPF_F_KPROBE_MULTI_RETURN) || is_kprobe_session(prog)) 3041 link->fp.exit_handler = kprobe_multi_link_exit_handler; 3042 if (is_kprobe_session(prog)) 3043 link->fp.entry_data_size = sizeof(u64); 3044 3045 link->addrs = addrs; 3046 link->cookies = cookies; 3047 link->cnt = cnt; 3048 link->flags = flags; 3049 3050 if (cookies) { 3051 /* 3052 * Sorting addresses will trigger sorting cookies as well 3053 * (check bpf_kprobe_multi_cookie_swap). This way we can 3054 * find cookie based on the address in bpf_get_attach_cookie 3055 * helper. 3056 */ 3057 sort_r(addrs, cnt, sizeof(*addrs), 3058 bpf_kprobe_multi_cookie_cmp, 3059 bpf_kprobe_multi_cookie_swap, 3060 link); 3061 } 3062 3063 err = get_modules_for_addrs(&link->mods, addrs, cnt); 3064 if (err < 0) { 3065 bpf_link_cleanup(&link_primer); 3066 return err; 3067 } 3068 link->mods_cnt = err; 3069 3070 err = register_fprobe_ips(&link->fp, addrs, cnt); 3071 if (err) { 3072 kprobe_multi_put_modules(link->mods, link->mods_cnt); 3073 bpf_link_cleanup(&link_primer); 3074 return err; 3075 } 3076 3077 return bpf_link_settle(&link_primer); 3078 3079 error: 3080 kfree(link); 3081 kvfree(addrs); 3082 kvfree(cookies); 3083 return err; 3084 } 3085 #else /* !CONFIG_FPROBE */ 3086 int bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog) 3087 { 3088 return -EOPNOTSUPP; 3089 } 3090 static u64 bpf_kprobe_multi_cookie(struct bpf_run_ctx *ctx) 3091 { 3092 return 0; 3093 } 3094 static u64 bpf_kprobe_multi_entry_ip(struct bpf_run_ctx *ctx) 3095 { 3096 return 0; 3097 } 3098 #endif 3099 3100 #ifdef CONFIG_UPROBES 3101 struct bpf_uprobe_multi_link; 3102 3103 struct bpf_uprobe { 3104 struct bpf_uprobe_multi_link *link; 3105 loff_t offset; 3106 unsigned long ref_ctr_offset; 3107 u64 cookie; 3108 struct uprobe *uprobe; 3109 struct uprobe_consumer consumer; 3110 bool session; 3111 }; 3112 3113 struct bpf_uprobe_multi_link { 3114 struct path path; 3115 struct bpf_link link; 3116 u32 cnt; 3117 u32 flags; 3118 struct bpf_uprobe *uprobes; 3119 struct task_struct *task; 3120 }; 3121 3122 struct bpf_uprobe_multi_run_ctx { 3123 struct bpf_session_run_ctx session_ctx; 3124 unsigned long entry_ip; 3125 struct bpf_uprobe *uprobe; 3126 }; 3127 3128 static void bpf_uprobe_unregister(struct bpf_uprobe *uprobes, u32 cnt) 3129 { 3130 u32 i; 3131 3132 for (i = 0; i < cnt; i++) 3133 uprobe_unregister_nosync(uprobes[i].uprobe, &uprobes[i].consumer); 3134 3135 if (cnt) 3136 uprobe_unregister_sync(); 3137 } 3138 3139 static void bpf_uprobe_multi_link_release(struct bpf_link *link) 3140 { 3141 struct bpf_uprobe_multi_link *umulti_link; 3142 3143 umulti_link = container_of(link, struct bpf_uprobe_multi_link, link); 3144 bpf_uprobe_unregister(umulti_link->uprobes, umulti_link->cnt); 3145 if (umulti_link->task) 3146 put_task_struct(umulti_link->task); 3147 path_put(&umulti_link->path); 3148 } 3149 3150 static void bpf_uprobe_multi_link_dealloc(struct bpf_link *link) 3151 { 3152 struct bpf_uprobe_multi_link *umulti_link; 3153 3154 umulti_link = container_of(link, struct bpf_uprobe_multi_link, link); 3155 kvfree(umulti_link->uprobes); 3156 kfree(umulti_link); 3157 } 3158 3159 static int bpf_uprobe_multi_link_fill_link_info(const struct bpf_link *link, 3160 struct bpf_link_info *info) 3161 { 3162 u64 __user *uref_ctr_offsets = u64_to_user_ptr(info->uprobe_multi.ref_ctr_offsets); 3163 u64 __user *ucookies = u64_to_user_ptr(info->uprobe_multi.cookies); 3164 u64 __user *uoffsets = u64_to_user_ptr(info->uprobe_multi.offsets); 3165 u64 __user *upath = u64_to_user_ptr(info->uprobe_multi.path); 3166 u32 upath_size = info->uprobe_multi.path_size; 3167 struct bpf_uprobe_multi_link *umulti_link; 3168 u32 ucount = info->uprobe_multi.count; 3169 int err = 0, i; 3170 char *p, *buf; 3171 long left = 0; 3172 3173 if (!upath ^ !upath_size) 3174 return -EINVAL; 3175 3176 if ((uoffsets || uref_ctr_offsets || ucookies) && !ucount) 3177 return -EINVAL; 3178 3179 umulti_link = container_of(link, struct bpf_uprobe_multi_link, link); 3180 info->uprobe_multi.count = umulti_link->cnt; 3181 info->uprobe_multi.flags = umulti_link->flags; 3182 info->uprobe_multi.pid = umulti_link->task ? 3183 task_pid_nr_ns(umulti_link->task, task_active_pid_ns(current)) : 0; 3184 3185 upath_size = upath_size ? min_t(u32, upath_size, PATH_MAX) : PATH_MAX; 3186 buf = kmalloc(upath_size, GFP_KERNEL); 3187 if (!buf) 3188 return -ENOMEM; 3189 p = d_path(&umulti_link->path, buf, upath_size); 3190 if (IS_ERR(p)) { 3191 kfree(buf); 3192 return PTR_ERR(p); 3193 } 3194 upath_size = buf + upath_size - p; 3195 3196 if (upath) 3197 left = copy_to_user(upath, p, upath_size); 3198 kfree(buf); 3199 if (left) 3200 return -EFAULT; 3201 info->uprobe_multi.path_size = upath_size; 3202 3203 if (!uoffsets && !ucookies && !uref_ctr_offsets) 3204 return 0; 3205 3206 if (ucount < umulti_link->cnt) 3207 err = -ENOSPC; 3208 else 3209 ucount = umulti_link->cnt; 3210 3211 for (i = 0; i < ucount; i++) { 3212 if (uoffsets && 3213 put_user(umulti_link->uprobes[i].offset, uoffsets + i)) 3214 return -EFAULT; 3215 if (uref_ctr_offsets && 3216 put_user(umulti_link->uprobes[i].ref_ctr_offset, uref_ctr_offsets + i)) 3217 return -EFAULT; 3218 if (ucookies && 3219 put_user(umulti_link->uprobes[i].cookie, ucookies + i)) 3220 return -EFAULT; 3221 } 3222 3223 return err; 3224 } 3225 3226 static const struct bpf_link_ops bpf_uprobe_multi_link_lops = { 3227 .release = bpf_uprobe_multi_link_release, 3228 .dealloc_deferred = bpf_uprobe_multi_link_dealloc, 3229 .fill_link_info = bpf_uprobe_multi_link_fill_link_info, 3230 }; 3231 3232 static int uprobe_prog_run(struct bpf_uprobe *uprobe, 3233 unsigned long entry_ip, 3234 struct pt_regs *regs, 3235 bool is_return, void *data) 3236 { 3237 struct bpf_uprobe_multi_link *link = uprobe->link; 3238 struct bpf_uprobe_multi_run_ctx run_ctx = { 3239 .session_ctx = { 3240 .is_return = is_return, 3241 .data = data, 3242 }, 3243 .entry_ip = entry_ip, 3244 .uprobe = uprobe, 3245 }; 3246 struct bpf_prog *prog = link->link.prog; 3247 bool sleepable = prog->sleepable; 3248 struct bpf_run_ctx *old_run_ctx; 3249 int err; 3250 3251 if (link->task && !same_thread_group(current, link->task)) 3252 return 0; 3253 3254 if (sleepable) 3255 rcu_read_lock_trace(); 3256 else 3257 rcu_read_lock(); 3258 3259 migrate_disable(); 3260 3261 old_run_ctx = bpf_set_run_ctx(&run_ctx.session_ctx.run_ctx); 3262 err = bpf_prog_run(link->link.prog, regs); 3263 bpf_reset_run_ctx(old_run_ctx); 3264 3265 migrate_enable(); 3266 3267 if (sleepable) 3268 rcu_read_unlock_trace(); 3269 else 3270 rcu_read_unlock(); 3271 return err; 3272 } 3273 3274 static bool 3275 uprobe_multi_link_filter(struct uprobe_consumer *con, struct mm_struct *mm) 3276 { 3277 struct bpf_uprobe *uprobe; 3278 3279 uprobe = container_of(con, struct bpf_uprobe, consumer); 3280 return uprobe->link->task->mm == mm; 3281 } 3282 3283 static int 3284 uprobe_multi_link_handler(struct uprobe_consumer *con, struct pt_regs *regs, 3285 __u64 *data) 3286 { 3287 struct bpf_uprobe *uprobe; 3288 int ret; 3289 3290 uprobe = container_of(con, struct bpf_uprobe, consumer); 3291 ret = uprobe_prog_run(uprobe, instruction_pointer(regs), regs, false, data); 3292 if (uprobe->session) 3293 return ret ? UPROBE_HANDLER_IGNORE : 0; 3294 return 0; 3295 } 3296 3297 static int 3298 uprobe_multi_link_ret_handler(struct uprobe_consumer *con, unsigned long func, struct pt_regs *regs, 3299 __u64 *data) 3300 { 3301 struct bpf_uprobe *uprobe; 3302 3303 uprobe = container_of(con, struct bpf_uprobe, consumer); 3304 uprobe_prog_run(uprobe, func, regs, true, data); 3305 return 0; 3306 } 3307 3308 static u64 bpf_uprobe_multi_entry_ip(struct bpf_run_ctx *ctx) 3309 { 3310 struct bpf_uprobe_multi_run_ctx *run_ctx; 3311 3312 run_ctx = container_of(current->bpf_ctx, struct bpf_uprobe_multi_run_ctx, 3313 session_ctx.run_ctx); 3314 return run_ctx->entry_ip; 3315 } 3316 3317 static u64 bpf_uprobe_multi_cookie(struct bpf_run_ctx *ctx) 3318 { 3319 struct bpf_uprobe_multi_run_ctx *run_ctx; 3320 3321 run_ctx = container_of(current->bpf_ctx, struct bpf_uprobe_multi_run_ctx, 3322 session_ctx.run_ctx); 3323 return run_ctx->uprobe->cookie; 3324 } 3325 3326 int bpf_uprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog) 3327 { 3328 struct bpf_uprobe_multi_link *link = NULL; 3329 unsigned long __user *uref_ctr_offsets; 3330 struct bpf_link_primer link_primer; 3331 struct bpf_uprobe *uprobes = NULL; 3332 struct task_struct *task = NULL; 3333 unsigned long __user *uoffsets; 3334 u64 __user *ucookies; 3335 void __user *upath; 3336 u32 flags, cnt, i; 3337 struct path path; 3338 char *name; 3339 pid_t pid; 3340 int err; 3341 3342 /* no support for 32bit archs yet */ 3343 if (sizeof(u64) != sizeof(void *)) 3344 return -EOPNOTSUPP; 3345 3346 if (!is_uprobe_multi(prog)) 3347 return -EINVAL; 3348 3349 flags = attr->link_create.uprobe_multi.flags; 3350 if (flags & ~BPF_F_UPROBE_MULTI_RETURN) 3351 return -EINVAL; 3352 3353 /* 3354 * path, offsets and cnt are mandatory, 3355 * ref_ctr_offsets and cookies are optional 3356 */ 3357 upath = u64_to_user_ptr(attr->link_create.uprobe_multi.path); 3358 uoffsets = u64_to_user_ptr(attr->link_create.uprobe_multi.offsets); 3359 cnt = attr->link_create.uprobe_multi.cnt; 3360 pid = attr->link_create.uprobe_multi.pid; 3361 3362 if (!upath || !uoffsets || !cnt || pid < 0) 3363 return -EINVAL; 3364 if (cnt > MAX_UPROBE_MULTI_CNT) 3365 return -E2BIG; 3366 3367 uref_ctr_offsets = u64_to_user_ptr(attr->link_create.uprobe_multi.ref_ctr_offsets); 3368 ucookies = u64_to_user_ptr(attr->link_create.uprobe_multi.cookies); 3369 3370 name = strndup_user(upath, PATH_MAX); 3371 if (IS_ERR(name)) { 3372 err = PTR_ERR(name); 3373 return err; 3374 } 3375 3376 err = kern_path(name, LOOKUP_FOLLOW, &path); 3377 kfree(name); 3378 if (err) 3379 return err; 3380 3381 if (!d_is_reg(path.dentry)) { 3382 err = -EBADF; 3383 goto error_path_put; 3384 } 3385 3386 if (pid) { 3387 task = get_pid_task(find_vpid(pid), PIDTYPE_TGID); 3388 if (!task) { 3389 err = -ESRCH; 3390 goto error_path_put; 3391 } 3392 } 3393 3394 err = -ENOMEM; 3395 3396 link = kzalloc(sizeof(*link), GFP_KERNEL); 3397 uprobes = kvcalloc(cnt, sizeof(*uprobes), GFP_KERNEL); 3398 3399 if (!uprobes || !link) 3400 goto error_free; 3401 3402 for (i = 0; i < cnt; i++) { 3403 if (__get_user(uprobes[i].offset, uoffsets + i)) { 3404 err = -EFAULT; 3405 goto error_free; 3406 } 3407 if (uprobes[i].offset < 0) { 3408 err = -EINVAL; 3409 goto error_free; 3410 } 3411 if (uref_ctr_offsets && __get_user(uprobes[i].ref_ctr_offset, uref_ctr_offsets + i)) { 3412 err = -EFAULT; 3413 goto error_free; 3414 } 3415 if (ucookies && __get_user(uprobes[i].cookie, ucookies + i)) { 3416 err = -EFAULT; 3417 goto error_free; 3418 } 3419 3420 uprobes[i].link = link; 3421 3422 if (!(flags & BPF_F_UPROBE_MULTI_RETURN)) 3423 uprobes[i].consumer.handler = uprobe_multi_link_handler; 3424 if (flags & BPF_F_UPROBE_MULTI_RETURN || is_uprobe_session(prog)) 3425 uprobes[i].consumer.ret_handler = uprobe_multi_link_ret_handler; 3426 if (is_uprobe_session(prog)) 3427 uprobes[i].session = true; 3428 if (pid) 3429 uprobes[i].consumer.filter = uprobe_multi_link_filter; 3430 } 3431 3432 link->cnt = cnt; 3433 link->uprobes = uprobes; 3434 link->path = path; 3435 link->task = task; 3436 link->flags = flags; 3437 3438 bpf_link_init(&link->link, BPF_LINK_TYPE_UPROBE_MULTI, 3439 &bpf_uprobe_multi_link_lops, prog); 3440 3441 for (i = 0; i < cnt; i++) { 3442 uprobes[i].uprobe = uprobe_register(d_real_inode(link->path.dentry), 3443 uprobes[i].offset, 3444 uprobes[i].ref_ctr_offset, 3445 &uprobes[i].consumer); 3446 if (IS_ERR(uprobes[i].uprobe)) { 3447 err = PTR_ERR(uprobes[i].uprobe); 3448 link->cnt = i; 3449 goto error_unregister; 3450 } 3451 } 3452 3453 err = bpf_link_prime(&link->link, &link_primer); 3454 if (err) 3455 goto error_unregister; 3456 3457 return bpf_link_settle(&link_primer); 3458 3459 error_unregister: 3460 bpf_uprobe_unregister(uprobes, link->cnt); 3461 3462 error_free: 3463 kvfree(uprobes); 3464 kfree(link); 3465 if (task) 3466 put_task_struct(task); 3467 error_path_put: 3468 path_put(&path); 3469 return err; 3470 } 3471 #else /* !CONFIG_UPROBES */ 3472 int bpf_uprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog) 3473 { 3474 return -EOPNOTSUPP; 3475 } 3476 static u64 bpf_uprobe_multi_cookie(struct bpf_run_ctx *ctx) 3477 { 3478 return 0; 3479 } 3480 static u64 bpf_uprobe_multi_entry_ip(struct bpf_run_ctx *ctx) 3481 { 3482 return 0; 3483 } 3484 #endif /* CONFIG_UPROBES */ 3485 3486 __bpf_kfunc_start_defs(); 3487 3488 __bpf_kfunc bool bpf_session_is_return(void) 3489 { 3490 struct bpf_session_run_ctx *session_ctx; 3491 3492 session_ctx = container_of(current->bpf_ctx, struct bpf_session_run_ctx, run_ctx); 3493 return session_ctx->is_return; 3494 } 3495 3496 __bpf_kfunc __u64 *bpf_session_cookie(void) 3497 { 3498 struct bpf_session_run_ctx *session_ctx; 3499 3500 session_ctx = container_of(current->bpf_ctx, struct bpf_session_run_ctx, run_ctx); 3501 return session_ctx->data; 3502 } 3503 3504 __bpf_kfunc_end_defs(); 3505 3506 BTF_KFUNCS_START(kprobe_multi_kfunc_set_ids) 3507 BTF_ID_FLAGS(func, bpf_session_is_return) 3508 BTF_ID_FLAGS(func, bpf_session_cookie) 3509 BTF_KFUNCS_END(kprobe_multi_kfunc_set_ids) 3510 3511 static int bpf_kprobe_multi_filter(const struct bpf_prog *prog, u32 kfunc_id) 3512 { 3513 if (!btf_id_set8_contains(&kprobe_multi_kfunc_set_ids, kfunc_id)) 3514 return 0; 3515 3516 if (!is_kprobe_session(prog) && !is_uprobe_session(prog)) 3517 return -EACCES; 3518 3519 return 0; 3520 } 3521 3522 static const struct btf_kfunc_id_set bpf_kprobe_multi_kfunc_set = { 3523 .owner = THIS_MODULE, 3524 .set = &kprobe_multi_kfunc_set_ids, 3525 .filter = bpf_kprobe_multi_filter, 3526 }; 3527 3528 static int __init bpf_kprobe_multi_kfuncs_init(void) 3529 { 3530 return register_btf_kfunc_id_set(BPF_PROG_TYPE_KPROBE, &bpf_kprobe_multi_kfunc_set); 3531 } 3532 3533 late_initcall(bpf_kprobe_multi_kfuncs_init); 3534 3535 __bpf_kfunc_start_defs(); 3536 3537 __bpf_kfunc int bpf_send_signal_task(struct task_struct *task, int sig, enum pid_type type, 3538 u64 value) 3539 { 3540 if (type != PIDTYPE_PID && type != PIDTYPE_TGID) 3541 return -EINVAL; 3542 3543 return bpf_send_signal_common(sig, type, task, value); 3544 } 3545 3546 __bpf_kfunc_end_defs(); 3547