1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2011-2015 PLUMgrid, http://plumgrid.com 3 * Copyright (c) 2016 Facebook 4 */ 5 #include <linux/kernel.h> 6 #include <linux/types.h> 7 #include <linux/slab.h> 8 #include <linux/bpf.h> 9 #include <linux/bpf_verifier.h> 10 #include <linux/bpf_perf_event.h> 11 #include <linux/btf.h> 12 #include <linux/filter.h> 13 #include <linux/uaccess.h> 14 #include <linux/ctype.h> 15 #include <linux/kprobes.h> 16 #include <linux/spinlock.h> 17 #include <linux/syscalls.h> 18 #include <linux/error-injection.h> 19 #include <linux/btf_ids.h> 20 #include <linux/bpf_lsm.h> 21 #include <linux/fprobe.h> 22 #include <linux/bsearch.h> 23 #include <linux/sort.h> 24 #include <linux/key.h> 25 #include <linux/verification.h> 26 27 #include <net/bpf_sk_storage.h> 28 29 #include <uapi/linux/bpf.h> 30 #include <uapi/linux/btf.h> 31 32 #include <asm/tlb.h> 33 34 #include "trace_probe.h" 35 #include "trace.h" 36 37 #define CREATE_TRACE_POINTS 38 #include "bpf_trace.h" 39 40 #define bpf_event_rcu_dereference(p) \ 41 rcu_dereference_protected(p, lockdep_is_held(&bpf_event_mutex)) 42 43 #ifdef CONFIG_MODULES 44 struct bpf_trace_module { 45 struct module *module; 46 struct list_head list; 47 }; 48 49 static LIST_HEAD(bpf_trace_modules); 50 static DEFINE_MUTEX(bpf_module_mutex); 51 52 static struct bpf_raw_event_map *bpf_get_raw_tracepoint_module(const char *name) 53 { 54 struct bpf_raw_event_map *btp, *ret = NULL; 55 struct bpf_trace_module *btm; 56 unsigned int i; 57 58 mutex_lock(&bpf_module_mutex); 59 list_for_each_entry(btm, &bpf_trace_modules, list) { 60 for (i = 0; i < btm->module->num_bpf_raw_events; ++i) { 61 btp = &btm->module->bpf_raw_events[i]; 62 if (!strcmp(btp->tp->name, name)) { 63 if (try_module_get(btm->module)) 64 ret = btp; 65 goto out; 66 } 67 } 68 } 69 out: 70 mutex_unlock(&bpf_module_mutex); 71 return ret; 72 } 73 #else 74 static struct bpf_raw_event_map *bpf_get_raw_tracepoint_module(const char *name) 75 { 76 return NULL; 77 } 78 #endif /* CONFIG_MODULES */ 79 80 u64 bpf_get_stackid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); 81 u64 bpf_get_stack(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); 82 83 static int bpf_btf_printf_prepare(struct btf_ptr *ptr, u32 btf_ptr_size, 84 u64 flags, const struct btf **btf, 85 s32 *btf_id); 86 static u64 bpf_kprobe_multi_cookie(struct bpf_run_ctx *ctx); 87 static u64 bpf_kprobe_multi_entry_ip(struct bpf_run_ctx *ctx); 88 89 /** 90 * trace_call_bpf - invoke BPF program 91 * @call: tracepoint event 92 * @ctx: opaque context pointer 93 * 94 * kprobe handlers execute BPF programs via this helper. 95 * Can be used from static tracepoints in the future. 96 * 97 * Return: BPF programs always return an integer which is interpreted by 98 * kprobe handler as: 99 * 0 - return from kprobe (event is filtered out) 100 * 1 - store kprobe event into ring buffer 101 * Other values are reserved and currently alias to 1 102 */ 103 unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx) 104 { 105 unsigned int ret; 106 107 cant_sleep(); 108 109 if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) { 110 /* 111 * since some bpf program is already running on this cpu, 112 * don't call into another bpf program (same or different) 113 * and don't send kprobe event into ring-buffer, 114 * so return zero here 115 */ 116 ret = 0; 117 goto out; 118 } 119 120 /* 121 * Instead of moving rcu_read_lock/rcu_dereference/rcu_read_unlock 122 * to all call sites, we did a bpf_prog_array_valid() there to check 123 * whether call->prog_array is empty or not, which is 124 * a heuristic to speed up execution. 125 * 126 * If bpf_prog_array_valid() fetched prog_array was 127 * non-NULL, we go into trace_call_bpf() and do the actual 128 * proper rcu_dereference() under RCU lock. 129 * If it turns out that prog_array is NULL then, we bail out. 130 * For the opposite, if the bpf_prog_array_valid() fetched pointer 131 * was NULL, you'll skip the prog_array with the risk of missing 132 * out of events when it was updated in between this and the 133 * rcu_dereference() which is accepted risk. 134 */ 135 rcu_read_lock(); 136 ret = bpf_prog_run_array(rcu_dereference(call->prog_array), 137 ctx, bpf_prog_run); 138 rcu_read_unlock(); 139 140 out: 141 __this_cpu_dec(bpf_prog_active); 142 143 return ret; 144 } 145 146 #ifdef CONFIG_BPF_KPROBE_OVERRIDE 147 BPF_CALL_2(bpf_override_return, struct pt_regs *, regs, unsigned long, rc) 148 { 149 regs_set_return_value(regs, rc); 150 override_function_with_return(regs); 151 return 0; 152 } 153 154 static const struct bpf_func_proto bpf_override_return_proto = { 155 .func = bpf_override_return, 156 .gpl_only = true, 157 .ret_type = RET_INTEGER, 158 .arg1_type = ARG_PTR_TO_CTX, 159 .arg2_type = ARG_ANYTHING, 160 }; 161 #endif 162 163 static __always_inline int 164 bpf_probe_read_user_common(void *dst, u32 size, const void __user *unsafe_ptr) 165 { 166 int ret; 167 168 ret = copy_from_user_nofault(dst, unsafe_ptr, size); 169 if (unlikely(ret < 0)) 170 memset(dst, 0, size); 171 return ret; 172 } 173 174 BPF_CALL_3(bpf_probe_read_user, void *, dst, u32, size, 175 const void __user *, unsafe_ptr) 176 { 177 return bpf_probe_read_user_common(dst, size, unsafe_ptr); 178 } 179 180 const struct bpf_func_proto bpf_probe_read_user_proto = { 181 .func = bpf_probe_read_user, 182 .gpl_only = true, 183 .ret_type = RET_INTEGER, 184 .arg1_type = ARG_PTR_TO_UNINIT_MEM, 185 .arg2_type = ARG_CONST_SIZE_OR_ZERO, 186 .arg3_type = ARG_ANYTHING, 187 }; 188 189 static __always_inline int 190 bpf_probe_read_user_str_common(void *dst, u32 size, 191 const void __user *unsafe_ptr) 192 { 193 int ret; 194 195 /* 196 * NB: We rely on strncpy_from_user() not copying junk past the NUL 197 * terminator into `dst`. 198 * 199 * strncpy_from_user() does long-sized strides in the fast path. If the 200 * strncpy does not mask out the bytes after the NUL in `unsafe_ptr`, 201 * then there could be junk after the NUL in `dst`. If user takes `dst` 202 * and keys a hash map with it, then semantically identical strings can 203 * occupy multiple entries in the map. 204 */ 205 ret = strncpy_from_user_nofault(dst, unsafe_ptr, size); 206 if (unlikely(ret < 0)) 207 memset(dst, 0, size); 208 return ret; 209 } 210 211 BPF_CALL_3(bpf_probe_read_user_str, void *, dst, u32, size, 212 const void __user *, unsafe_ptr) 213 { 214 return bpf_probe_read_user_str_common(dst, size, unsafe_ptr); 215 } 216 217 const struct bpf_func_proto bpf_probe_read_user_str_proto = { 218 .func = bpf_probe_read_user_str, 219 .gpl_only = true, 220 .ret_type = RET_INTEGER, 221 .arg1_type = ARG_PTR_TO_UNINIT_MEM, 222 .arg2_type = ARG_CONST_SIZE_OR_ZERO, 223 .arg3_type = ARG_ANYTHING, 224 }; 225 226 static __always_inline int 227 bpf_probe_read_kernel_common(void *dst, u32 size, const void *unsafe_ptr) 228 { 229 int ret; 230 231 ret = copy_from_kernel_nofault(dst, unsafe_ptr, size); 232 if (unlikely(ret < 0)) 233 memset(dst, 0, size); 234 return ret; 235 } 236 237 BPF_CALL_3(bpf_probe_read_kernel, void *, dst, u32, size, 238 const void *, unsafe_ptr) 239 { 240 return bpf_probe_read_kernel_common(dst, size, unsafe_ptr); 241 } 242 243 const struct bpf_func_proto bpf_probe_read_kernel_proto = { 244 .func = bpf_probe_read_kernel, 245 .gpl_only = true, 246 .ret_type = RET_INTEGER, 247 .arg1_type = ARG_PTR_TO_UNINIT_MEM, 248 .arg2_type = ARG_CONST_SIZE_OR_ZERO, 249 .arg3_type = ARG_ANYTHING, 250 }; 251 252 static __always_inline int 253 bpf_probe_read_kernel_str_common(void *dst, u32 size, const void *unsafe_ptr) 254 { 255 int ret; 256 257 /* 258 * The strncpy_from_kernel_nofault() call will likely not fill the 259 * entire buffer, but that's okay in this circumstance as we're probing 260 * arbitrary memory anyway similar to bpf_probe_read_*() and might 261 * as well probe the stack. Thus, memory is explicitly cleared 262 * only in error case, so that improper users ignoring return 263 * code altogether don't copy garbage; otherwise length of string 264 * is returned that can be used for bpf_perf_event_output() et al. 265 */ 266 ret = strncpy_from_kernel_nofault(dst, unsafe_ptr, size); 267 if (unlikely(ret < 0)) 268 memset(dst, 0, size); 269 return ret; 270 } 271 272 BPF_CALL_3(bpf_probe_read_kernel_str, void *, dst, u32, size, 273 const void *, unsafe_ptr) 274 { 275 return bpf_probe_read_kernel_str_common(dst, size, unsafe_ptr); 276 } 277 278 const struct bpf_func_proto bpf_probe_read_kernel_str_proto = { 279 .func = bpf_probe_read_kernel_str, 280 .gpl_only = true, 281 .ret_type = RET_INTEGER, 282 .arg1_type = ARG_PTR_TO_UNINIT_MEM, 283 .arg2_type = ARG_CONST_SIZE_OR_ZERO, 284 .arg3_type = ARG_ANYTHING, 285 }; 286 287 #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE 288 BPF_CALL_3(bpf_probe_read_compat, void *, dst, u32, size, 289 const void *, unsafe_ptr) 290 { 291 if ((unsigned long)unsafe_ptr < TASK_SIZE) { 292 return bpf_probe_read_user_common(dst, size, 293 (__force void __user *)unsafe_ptr); 294 } 295 return bpf_probe_read_kernel_common(dst, size, unsafe_ptr); 296 } 297 298 static const struct bpf_func_proto bpf_probe_read_compat_proto = { 299 .func = bpf_probe_read_compat, 300 .gpl_only = true, 301 .ret_type = RET_INTEGER, 302 .arg1_type = ARG_PTR_TO_UNINIT_MEM, 303 .arg2_type = ARG_CONST_SIZE_OR_ZERO, 304 .arg3_type = ARG_ANYTHING, 305 }; 306 307 BPF_CALL_3(bpf_probe_read_compat_str, void *, dst, u32, size, 308 const void *, unsafe_ptr) 309 { 310 if ((unsigned long)unsafe_ptr < TASK_SIZE) { 311 return bpf_probe_read_user_str_common(dst, size, 312 (__force void __user *)unsafe_ptr); 313 } 314 return bpf_probe_read_kernel_str_common(dst, size, unsafe_ptr); 315 } 316 317 static const struct bpf_func_proto bpf_probe_read_compat_str_proto = { 318 .func = bpf_probe_read_compat_str, 319 .gpl_only = true, 320 .ret_type = RET_INTEGER, 321 .arg1_type = ARG_PTR_TO_UNINIT_MEM, 322 .arg2_type = ARG_CONST_SIZE_OR_ZERO, 323 .arg3_type = ARG_ANYTHING, 324 }; 325 #endif /* CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE */ 326 327 BPF_CALL_3(bpf_probe_write_user, void __user *, unsafe_ptr, const void *, src, 328 u32, size) 329 { 330 /* 331 * Ensure we're in user context which is safe for the helper to 332 * run. This helper has no business in a kthread. 333 * 334 * access_ok() should prevent writing to non-user memory, but in 335 * some situations (nommu, temporary switch, etc) access_ok() does 336 * not provide enough validation, hence the check on KERNEL_DS. 337 * 338 * nmi_uaccess_okay() ensures the probe is not run in an interim 339 * state, when the task or mm are switched. This is specifically 340 * required to prevent the use of temporary mm. 341 */ 342 343 if (unlikely(in_interrupt() || 344 current->flags & (PF_KTHREAD | PF_EXITING))) 345 return -EPERM; 346 if (unlikely(!nmi_uaccess_okay())) 347 return -EPERM; 348 349 return copy_to_user_nofault(unsafe_ptr, src, size); 350 } 351 352 static const struct bpf_func_proto bpf_probe_write_user_proto = { 353 .func = bpf_probe_write_user, 354 .gpl_only = true, 355 .ret_type = RET_INTEGER, 356 .arg1_type = ARG_ANYTHING, 357 .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, 358 .arg3_type = ARG_CONST_SIZE, 359 }; 360 361 static const struct bpf_func_proto *bpf_get_probe_write_proto(void) 362 { 363 if (!capable(CAP_SYS_ADMIN)) 364 return NULL; 365 366 pr_warn_ratelimited("%s[%d] is installing a program with bpf_probe_write_user helper that may corrupt user memory!", 367 current->comm, task_pid_nr(current)); 368 369 return &bpf_probe_write_user_proto; 370 } 371 372 #define MAX_TRACE_PRINTK_VARARGS 3 373 #define BPF_TRACE_PRINTK_SIZE 1024 374 375 BPF_CALL_5(bpf_trace_printk, char *, fmt, u32, fmt_size, u64, arg1, 376 u64, arg2, u64, arg3) 377 { 378 u64 args[MAX_TRACE_PRINTK_VARARGS] = { arg1, arg2, arg3 }; 379 struct bpf_bprintf_data data = { 380 .get_bin_args = true, 381 .get_buf = true, 382 }; 383 int ret; 384 385 ret = bpf_bprintf_prepare(fmt, fmt_size, args, 386 MAX_TRACE_PRINTK_VARARGS, &data); 387 if (ret < 0) 388 return ret; 389 390 ret = bstr_printf(data.buf, MAX_BPRINTF_BUF, fmt, data.bin_args); 391 392 trace_bpf_trace_printk(data.buf); 393 394 bpf_bprintf_cleanup(&data); 395 396 return ret; 397 } 398 399 static const struct bpf_func_proto bpf_trace_printk_proto = { 400 .func = bpf_trace_printk, 401 .gpl_only = true, 402 .ret_type = RET_INTEGER, 403 .arg1_type = ARG_PTR_TO_MEM | MEM_RDONLY, 404 .arg2_type = ARG_CONST_SIZE, 405 }; 406 407 static void __set_printk_clr_event(void) 408 { 409 /* 410 * This program might be calling bpf_trace_printk, 411 * so enable the associated bpf_trace/bpf_trace_printk event. 412 * Repeat this each time as it is possible a user has 413 * disabled bpf_trace_printk events. By loading a program 414 * calling bpf_trace_printk() however the user has expressed 415 * the intent to see such events. 416 */ 417 if (trace_set_clr_event("bpf_trace", "bpf_trace_printk", 1)) 418 pr_warn_ratelimited("could not enable bpf_trace_printk events"); 419 } 420 421 const struct bpf_func_proto *bpf_get_trace_printk_proto(void) 422 { 423 __set_printk_clr_event(); 424 return &bpf_trace_printk_proto; 425 } 426 427 BPF_CALL_4(bpf_trace_vprintk, char *, fmt, u32, fmt_size, const void *, args, 428 u32, data_len) 429 { 430 struct bpf_bprintf_data data = { 431 .get_bin_args = true, 432 .get_buf = true, 433 }; 434 int ret, num_args; 435 436 if (data_len & 7 || data_len > MAX_BPRINTF_VARARGS * 8 || 437 (data_len && !args)) 438 return -EINVAL; 439 num_args = data_len / 8; 440 441 ret = bpf_bprintf_prepare(fmt, fmt_size, args, num_args, &data); 442 if (ret < 0) 443 return ret; 444 445 ret = bstr_printf(data.buf, MAX_BPRINTF_BUF, fmt, data.bin_args); 446 447 trace_bpf_trace_printk(data.buf); 448 449 bpf_bprintf_cleanup(&data); 450 451 return ret; 452 } 453 454 static const struct bpf_func_proto bpf_trace_vprintk_proto = { 455 .func = bpf_trace_vprintk, 456 .gpl_only = true, 457 .ret_type = RET_INTEGER, 458 .arg1_type = ARG_PTR_TO_MEM | MEM_RDONLY, 459 .arg2_type = ARG_CONST_SIZE, 460 .arg3_type = ARG_PTR_TO_MEM | PTR_MAYBE_NULL | MEM_RDONLY, 461 .arg4_type = ARG_CONST_SIZE_OR_ZERO, 462 }; 463 464 const struct bpf_func_proto *bpf_get_trace_vprintk_proto(void) 465 { 466 __set_printk_clr_event(); 467 return &bpf_trace_vprintk_proto; 468 } 469 470 BPF_CALL_5(bpf_seq_printf, struct seq_file *, m, char *, fmt, u32, fmt_size, 471 const void *, args, u32, data_len) 472 { 473 struct bpf_bprintf_data data = { 474 .get_bin_args = true, 475 }; 476 int err, num_args; 477 478 if (data_len & 7 || data_len > MAX_BPRINTF_VARARGS * 8 || 479 (data_len && !args)) 480 return -EINVAL; 481 num_args = data_len / 8; 482 483 err = bpf_bprintf_prepare(fmt, fmt_size, args, num_args, &data); 484 if (err < 0) 485 return err; 486 487 seq_bprintf(m, fmt, data.bin_args); 488 489 bpf_bprintf_cleanup(&data); 490 491 return seq_has_overflowed(m) ? -EOVERFLOW : 0; 492 } 493 494 BTF_ID_LIST_SINGLE(btf_seq_file_ids, struct, seq_file) 495 496 static const struct bpf_func_proto bpf_seq_printf_proto = { 497 .func = bpf_seq_printf, 498 .gpl_only = true, 499 .ret_type = RET_INTEGER, 500 .arg1_type = ARG_PTR_TO_BTF_ID, 501 .arg1_btf_id = &btf_seq_file_ids[0], 502 .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, 503 .arg3_type = ARG_CONST_SIZE, 504 .arg4_type = ARG_PTR_TO_MEM | PTR_MAYBE_NULL | MEM_RDONLY, 505 .arg5_type = ARG_CONST_SIZE_OR_ZERO, 506 }; 507 508 BPF_CALL_3(bpf_seq_write, struct seq_file *, m, const void *, data, u32, len) 509 { 510 return seq_write(m, data, len) ? -EOVERFLOW : 0; 511 } 512 513 static const struct bpf_func_proto bpf_seq_write_proto = { 514 .func = bpf_seq_write, 515 .gpl_only = true, 516 .ret_type = RET_INTEGER, 517 .arg1_type = ARG_PTR_TO_BTF_ID, 518 .arg1_btf_id = &btf_seq_file_ids[0], 519 .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, 520 .arg3_type = ARG_CONST_SIZE_OR_ZERO, 521 }; 522 523 BPF_CALL_4(bpf_seq_printf_btf, struct seq_file *, m, struct btf_ptr *, ptr, 524 u32, btf_ptr_size, u64, flags) 525 { 526 const struct btf *btf; 527 s32 btf_id; 528 int ret; 529 530 ret = bpf_btf_printf_prepare(ptr, btf_ptr_size, flags, &btf, &btf_id); 531 if (ret) 532 return ret; 533 534 return btf_type_seq_show_flags(btf, btf_id, ptr->ptr, m, flags); 535 } 536 537 static const struct bpf_func_proto bpf_seq_printf_btf_proto = { 538 .func = bpf_seq_printf_btf, 539 .gpl_only = true, 540 .ret_type = RET_INTEGER, 541 .arg1_type = ARG_PTR_TO_BTF_ID, 542 .arg1_btf_id = &btf_seq_file_ids[0], 543 .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, 544 .arg3_type = ARG_CONST_SIZE_OR_ZERO, 545 .arg4_type = ARG_ANYTHING, 546 }; 547 548 static __always_inline int 549 get_map_perf_counter(struct bpf_map *map, u64 flags, 550 u64 *value, u64 *enabled, u64 *running) 551 { 552 struct bpf_array *array = container_of(map, struct bpf_array, map); 553 unsigned int cpu = smp_processor_id(); 554 u64 index = flags & BPF_F_INDEX_MASK; 555 struct bpf_event_entry *ee; 556 557 if (unlikely(flags & ~(BPF_F_INDEX_MASK))) 558 return -EINVAL; 559 if (index == BPF_F_CURRENT_CPU) 560 index = cpu; 561 if (unlikely(index >= array->map.max_entries)) 562 return -E2BIG; 563 564 ee = READ_ONCE(array->ptrs[index]); 565 if (!ee) 566 return -ENOENT; 567 568 return perf_event_read_local(ee->event, value, enabled, running); 569 } 570 571 BPF_CALL_2(bpf_perf_event_read, struct bpf_map *, map, u64, flags) 572 { 573 u64 value = 0; 574 int err; 575 576 err = get_map_perf_counter(map, flags, &value, NULL, NULL); 577 /* 578 * this api is ugly since we miss [-22..-2] range of valid 579 * counter values, but that's uapi 580 */ 581 if (err) 582 return err; 583 return value; 584 } 585 586 static const struct bpf_func_proto bpf_perf_event_read_proto = { 587 .func = bpf_perf_event_read, 588 .gpl_only = true, 589 .ret_type = RET_INTEGER, 590 .arg1_type = ARG_CONST_MAP_PTR, 591 .arg2_type = ARG_ANYTHING, 592 }; 593 594 BPF_CALL_4(bpf_perf_event_read_value, struct bpf_map *, map, u64, flags, 595 struct bpf_perf_event_value *, buf, u32, size) 596 { 597 int err = -EINVAL; 598 599 if (unlikely(size != sizeof(struct bpf_perf_event_value))) 600 goto clear; 601 err = get_map_perf_counter(map, flags, &buf->counter, &buf->enabled, 602 &buf->running); 603 if (unlikely(err)) 604 goto clear; 605 return 0; 606 clear: 607 memset(buf, 0, size); 608 return err; 609 } 610 611 static const struct bpf_func_proto bpf_perf_event_read_value_proto = { 612 .func = bpf_perf_event_read_value, 613 .gpl_only = true, 614 .ret_type = RET_INTEGER, 615 .arg1_type = ARG_CONST_MAP_PTR, 616 .arg2_type = ARG_ANYTHING, 617 .arg3_type = ARG_PTR_TO_UNINIT_MEM, 618 .arg4_type = ARG_CONST_SIZE, 619 }; 620 621 static __always_inline u64 622 __bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map, 623 u64 flags, struct perf_sample_data *sd) 624 { 625 struct bpf_array *array = container_of(map, struct bpf_array, map); 626 unsigned int cpu = smp_processor_id(); 627 u64 index = flags & BPF_F_INDEX_MASK; 628 struct bpf_event_entry *ee; 629 struct perf_event *event; 630 631 if (index == BPF_F_CURRENT_CPU) 632 index = cpu; 633 if (unlikely(index >= array->map.max_entries)) 634 return -E2BIG; 635 636 ee = READ_ONCE(array->ptrs[index]); 637 if (!ee) 638 return -ENOENT; 639 640 event = ee->event; 641 if (unlikely(event->attr.type != PERF_TYPE_SOFTWARE || 642 event->attr.config != PERF_COUNT_SW_BPF_OUTPUT)) 643 return -EINVAL; 644 645 if (unlikely(event->oncpu != cpu)) 646 return -EOPNOTSUPP; 647 648 return perf_event_output(event, sd, regs); 649 } 650 651 /* 652 * Support executing tracepoints in normal, irq, and nmi context that each call 653 * bpf_perf_event_output 654 */ 655 struct bpf_trace_sample_data { 656 struct perf_sample_data sds[3]; 657 }; 658 659 static DEFINE_PER_CPU(struct bpf_trace_sample_data, bpf_trace_sds); 660 static DEFINE_PER_CPU(int, bpf_trace_nest_level); 661 BPF_CALL_5(bpf_perf_event_output, struct pt_regs *, regs, struct bpf_map *, map, 662 u64, flags, void *, data, u64, size) 663 { 664 struct bpf_trace_sample_data *sds = this_cpu_ptr(&bpf_trace_sds); 665 int nest_level = this_cpu_inc_return(bpf_trace_nest_level); 666 struct perf_raw_record raw = { 667 .frag = { 668 .size = size, 669 .data = data, 670 }, 671 }; 672 struct perf_sample_data *sd; 673 int err; 674 675 if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(sds->sds))) { 676 err = -EBUSY; 677 goto out; 678 } 679 680 sd = &sds->sds[nest_level - 1]; 681 682 if (unlikely(flags & ~(BPF_F_INDEX_MASK))) { 683 err = -EINVAL; 684 goto out; 685 } 686 687 perf_sample_data_init(sd, 0, 0); 688 sd->raw = &raw; 689 sd->sample_flags |= PERF_SAMPLE_RAW; 690 691 err = __bpf_perf_event_output(regs, map, flags, sd); 692 693 out: 694 this_cpu_dec(bpf_trace_nest_level); 695 return err; 696 } 697 698 static const struct bpf_func_proto bpf_perf_event_output_proto = { 699 .func = bpf_perf_event_output, 700 .gpl_only = true, 701 .ret_type = RET_INTEGER, 702 .arg1_type = ARG_PTR_TO_CTX, 703 .arg2_type = ARG_CONST_MAP_PTR, 704 .arg3_type = ARG_ANYTHING, 705 .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY, 706 .arg5_type = ARG_CONST_SIZE_OR_ZERO, 707 }; 708 709 static DEFINE_PER_CPU(int, bpf_event_output_nest_level); 710 struct bpf_nested_pt_regs { 711 struct pt_regs regs[3]; 712 }; 713 static DEFINE_PER_CPU(struct bpf_nested_pt_regs, bpf_pt_regs); 714 static DEFINE_PER_CPU(struct bpf_trace_sample_data, bpf_misc_sds); 715 716 u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size, 717 void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy) 718 { 719 int nest_level = this_cpu_inc_return(bpf_event_output_nest_level); 720 struct perf_raw_frag frag = { 721 .copy = ctx_copy, 722 .size = ctx_size, 723 .data = ctx, 724 }; 725 struct perf_raw_record raw = { 726 .frag = { 727 { 728 .next = ctx_size ? &frag : NULL, 729 }, 730 .size = meta_size, 731 .data = meta, 732 }, 733 }; 734 struct perf_sample_data *sd; 735 struct pt_regs *regs; 736 u64 ret; 737 738 if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(bpf_misc_sds.sds))) { 739 ret = -EBUSY; 740 goto out; 741 } 742 sd = this_cpu_ptr(&bpf_misc_sds.sds[nest_level - 1]); 743 regs = this_cpu_ptr(&bpf_pt_regs.regs[nest_level - 1]); 744 745 perf_fetch_caller_regs(regs); 746 perf_sample_data_init(sd, 0, 0); 747 sd->raw = &raw; 748 sd->sample_flags |= PERF_SAMPLE_RAW; 749 750 ret = __bpf_perf_event_output(regs, map, flags, sd); 751 out: 752 this_cpu_dec(bpf_event_output_nest_level); 753 return ret; 754 } 755 756 BPF_CALL_0(bpf_get_current_task) 757 { 758 return (long) current; 759 } 760 761 const struct bpf_func_proto bpf_get_current_task_proto = { 762 .func = bpf_get_current_task, 763 .gpl_only = true, 764 .ret_type = RET_INTEGER, 765 }; 766 767 BPF_CALL_0(bpf_get_current_task_btf) 768 { 769 return (unsigned long) current; 770 } 771 772 const struct bpf_func_proto bpf_get_current_task_btf_proto = { 773 .func = bpf_get_current_task_btf, 774 .gpl_only = true, 775 .ret_type = RET_PTR_TO_BTF_ID_TRUSTED, 776 .ret_btf_id = &btf_tracing_ids[BTF_TRACING_TYPE_TASK], 777 }; 778 779 BPF_CALL_1(bpf_task_pt_regs, struct task_struct *, task) 780 { 781 return (unsigned long) task_pt_regs(task); 782 } 783 784 BTF_ID_LIST(bpf_task_pt_regs_ids) 785 BTF_ID(struct, pt_regs) 786 787 const struct bpf_func_proto bpf_task_pt_regs_proto = { 788 .func = bpf_task_pt_regs, 789 .gpl_only = true, 790 .arg1_type = ARG_PTR_TO_BTF_ID, 791 .arg1_btf_id = &btf_tracing_ids[BTF_TRACING_TYPE_TASK], 792 .ret_type = RET_PTR_TO_BTF_ID, 793 .ret_btf_id = &bpf_task_pt_regs_ids[0], 794 }; 795 796 BPF_CALL_2(bpf_current_task_under_cgroup, struct bpf_map *, map, u32, idx) 797 { 798 struct bpf_array *array = container_of(map, struct bpf_array, map); 799 struct cgroup *cgrp; 800 801 if (unlikely(idx >= array->map.max_entries)) 802 return -E2BIG; 803 804 cgrp = READ_ONCE(array->ptrs[idx]); 805 if (unlikely(!cgrp)) 806 return -EAGAIN; 807 808 return task_under_cgroup_hierarchy(current, cgrp); 809 } 810 811 static const struct bpf_func_proto bpf_current_task_under_cgroup_proto = { 812 .func = bpf_current_task_under_cgroup, 813 .gpl_only = false, 814 .ret_type = RET_INTEGER, 815 .arg1_type = ARG_CONST_MAP_PTR, 816 .arg2_type = ARG_ANYTHING, 817 }; 818 819 struct send_signal_irq_work { 820 struct irq_work irq_work; 821 struct task_struct *task; 822 u32 sig; 823 enum pid_type type; 824 }; 825 826 static DEFINE_PER_CPU(struct send_signal_irq_work, send_signal_work); 827 828 static void do_bpf_send_signal(struct irq_work *entry) 829 { 830 struct send_signal_irq_work *work; 831 832 work = container_of(entry, struct send_signal_irq_work, irq_work); 833 group_send_sig_info(work->sig, SEND_SIG_PRIV, work->task, work->type); 834 put_task_struct(work->task); 835 } 836 837 static int bpf_send_signal_common(u32 sig, enum pid_type type) 838 { 839 struct send_signal_irq_work *work = NULL; 840 841 /* Similar to bpf_probe_write_user, task needs to be 842 * in a sound condition and kernel memory access be 843 * permitted in order to send signal to the current 844 * task. 845 */ 846 if (unlikely(current->flags & (PF_KTHREAD | PF_EXITING))) 847 return -EPERM; 848 if (unlikely(!nmi_uaccess_okay())) 849 return -EPERM; 850 /* Task should not be pid=1 to avoid kernel panic. */ 851 if (unlikely(is_global_init(current))) 852 return -EPERM; 853 854 if (irqs_disabled()) { 855 /* Do an early check on signal validity. Otherwise, 856 * the error is lost in deferred irq_work. 857 */ 858 if (unlikely(!valid_signal(sig))) 859 return -EINVAL; 860 861 work = this_cpu_ptr(&send_signal_work); 862 if (irq_work_is_busy(&work->irq_work)) 863 return -EBUSY; 864 865 /* Add the current task, which is the target of sending signal, 866 * to the irq_work. The current task may change when queued 867 * irq works get executed. 868 */ 869 work->task = get_task_struct(current); 870 work->sig = sig; 871 work->type = type; 872 irq_work_queue(&work->irq_work); 873 return 0; 874 } 875 876 return group_send_sig_info(sig, SEND_SIG_PRIV, current, type); 877 } 878 879 BPF_CALL_1(bpf_send_signal, u32, sig) 880 { 881 return bpf_send_signal_common(sig, PIDTYPE_TGID); 882 } 883 884 static const struct bpf_func_proto bpf_send_signal_proto = { 885 .func = bpf_send_signal, 886 .gpl_only = false, 887 .ret_type = RET_INTEGER, 888 .arg1_type = ARG_ANYTHING, 889 }; 890 891 BPF_CALL_1(bpf_send_signal_thread, u32, sig) 892 { 893 return bpf_send_signal_common(sig, PIDTYPE_PID); 894 } 895 896 static const struct bpf_func_proto bpf_send_signal_thread_proto = { 897 .func = bpf_send_signal_thread, 898 .gpl_only = false, 899 .ret_type = RET_INTEGER, 900 .arg1_type = ARG_ANYTHING, 901 }; 902 903 BPF_CALL_3(bpf_d_path, struct path *, path, char *, buf, u32, sz) 904 { 905 long len; 906 char *p; 907 908 if (!sz) 909 return 0; 910 911 p = d_path(path, buf, sz); 912 if (IS_ERR(p)) { 913 len = PTR_ERR(p); 914 } else { 915 len = buf + sz - p; 916 memmove(buf, p, len); 917 } 918 919 return len; 920 } 921 922 BTF_SET_START(btf_allowlist_d_path) 923 #ifdef CONFIG_SECURITY 924 BTF_ID(func, security_file_permission) 925 BTF_ID(func, security_inode_getattr) 926 BTF_ID(func, security_file_open) 927 #endif 928 #ifdef CONFIG_SECURITY_PATH 929 BTF_ID(func, security_path_truncate) 930 #endif 931 BTF_ID(func, vfs_truncate) 932 BTF_ID(func, vfs_fallocate) 933 BTF_ID(func, dentry_open) 934 BTF_ID(func, vfs_getattr) 935 BTF_ID(func, filp_close) 936 BTF_SET_END(btf_allowlist_d_path) 937 938 static bool bpf_d_path_allowed(const struct bpf_prog *prog) 939 { 940 if (prog->type == BPF_PROG_TYPE_TRACING && 941 prog->expected_attach_type == BPF_TRACE_ITER) 942 return true; 943 944 if (prog->type == BPF_PROG_TYPE_LSM) 945 return bpf_lsm_is_sleepable_hook(prog->aux->attach_btf_id); 946 947 return btf_id_set_contains(&btf_allowlist_d_path, 948 prog->aux->attach_btf_id); 949 } 950 951 BTF_ID_LIST_SINGLE(bpf_d_path_btf_ids, struct, path) 952 953 static const struct bpf_func_proto bpf_d_path_proto = { 954 .func = bpf_d_path, 955 .gpl_only = false, 956 .ret_type = RET_INTEGER, 957 .arg1_type = ARG_PTR_TO_BTF_ID, 958 .arg1_btf_id = &bpf_d_path_btf_ids[0], 959 .arg2_type = ARG_PTR_TO_MEM, 960 .arg3_type = ARG_CONST_SIZE_OR_ZERO, 961 .allowed = bpf_d_path_allowed, 962 }; 963 964 #define BTF_F_ALL (BTF_F_COMPACT | BTF_F_NONAME | \ 965 BTF_F_PTR_RAW | BTF_F_ZERO) 966 967 static int bpf_btf_printf_prepare(struct btf_ptr *ptr, u32 btf_ptr_size, 968 u64 flags, const struct btf **btf, 969 s32 *btf_id) 970 { 971 const struct btf_type *t; 972 973 if (unlikely(flags & ~(BTF_F_ALL))) 974 return -EINVAL; 975 976 if (btf_ptr_size != sizeof(struct btf_ptr)) 977 return -EINVAL; 978 979 *btf = bpf_get_btf_vmlinux(); 980 981 if (IS_ERR_OR_NULL(*btf)) 982 return IS_ERR(*btf) ? PTR_ERR(*btf) : -EINVAL; 983 984 if (ptr->type_id > 0) 985 *btf_id = ptr->type_id; 986 else 987 return -EINVAL; 988 989 if (*btf_id > 0) 990 t = btf_type_by_id(*btf, *btf_id); 991 if (*btf_id <= 0 || !t) 992 return -ENOENT; 993 994 return 0; 995 } 996 997 BPF_CALL_5(bpf_snprintf_btf, char *, str, u32, str_size, struct btf_ptr *, ptr, 998 u32, btf_ptr_size, u64, flags) 999 { 1000 const struct btf *btf; 1001 s32 btf_id; 1002 int ret; 1003 1004 ret = bpf_btf_printf_prepare(ptr, btf_ptr_size, flags, &btf, &btf_id); 1005 if (ret) 1006 return ret; 1007 1008 return btf_type_snprintf_show(btf, btf_id, ptr->ptr, str, str_size, 1009 flags); 1010 } 1011 1012 const struct bpf_func_proto bpf_snprintf_btf_proto = { 1013 .func = bpf_snprintf_btf, 1014 .gpl_only = false, 1015 .ret_type = RET_INTEGER, 1016 .arg1_type = ARG_PTR_TO_MEM, 1017 .arg2_type = ARG_CONST_SIZE, 1018 .arg3_type = ARG_PTR_TO_MEM | MEM_RDONLY, 1019 .arg4_type = ARG_CONST_SIZE, 1020 .arg5_type = ARG_ANYTHING, 1021 }; 1022 1023 BPF_CALL_1(bpf_get_func_ip_tracing, void *, ctx) 1024 { 1025 /* This helper call is inlined by verifier. */ 1026 return ((u64 *)ctx)[-2]; 1027 } 1028 1029 static const struct bpf_func_proto bpf_get_func_ip_proto_tracing = { 1030 .func = bpf_get_func_ip_tracing, 1031 .gpl_only = true, 1032 .ret_type = RET_INTEGER, 1033 .arg1_type = ARG_PTR_TO_CTX, 1034 }; 1035 1036 #ifdef CONFIG_X86_KERNEL_IBT 1037 static unsigned long get_entry_ip(unsigned long fentry_ip) 1038 { 1039 u32 instr; 1040 1041 /* Being extra safe in here in case entry ip is on the page-edge. */ 1042 if (get_kernel_nofault(instr, (u32 *) fentry_ip - 1)) 1043 return fentry_ip; 1044 if (is_endbr(instr)) 1045 fentry_ip -= ENDBR_INSN_SIZE; 1046 return fentry_ip; 1047 } 1048 #else 1049 #define get_entry_ip(fentry_ip) fentry_ip 1050 #endif 1051 1052 BPF_CALL_1(bpf_get_func_ip_kprobe, struct pt_regs *, regs) 1053 { 1054 struct kprobe *kp = kprobe_running(); 1055 1056 if (!kp || !(kp->flags & KPROBE_FLAG_ON_FUNC_ENTRY)) 1057 return 0; 1058 1059 return get_entry_ip((uintptr_t)kp->addr); 1060 } 1061 1062 static const struct bpf_func_proto bpf_get_func_ip_proto_kprobe = { 1063 .func = bpf_get_func_ip_kprobe, 1064 .gpl_only = true, 1065 .ret_type = RET_INTEGER, 1066 .arg1_type = ARG_PTR_TO_CTX, 1067 }; 1068 1069 BPF_CALL_1(bpf_get_func_ip_kprobe_multi, struct pt_regs *, regs) 1070 { 1071 return bpf_kprobe_multi_entry_ip(current->bpf_ctx); 1072 } 1073 1074 static const struct bpf_func_proto bpf_get_func_ip_proto_kprobe_multi = { 1075 .func = bpf_get_func_ip_kprobe_multi, 1076 .gpl_only = false, 1077 .ret_type = RET_INTEGER, 1078 .arg1_type = ARG_PTR_TO_CTX, 1079 }; 1080 1081 BPF_CALL_1(bpf_get_attach_cookie_kprobe_multi, struct pt_regs *, regs) 1082 { 1083 return bpf_kprobe_multi_cookie(current->bpf_ctx); 1084 } 1085 1086 static const struct bpf_func_proto bpf_get_attach_cookie_proto_kmulti = { 1087 .func = bpf_get_attach_cookie_kprobe_multi, 1088 .gpl_only = false, 1089 .ret_type = RET_INTEGER, 1090 .arg1_type = ARG_PTR_TO_CTX, 1091 }; 1092 1093 BPF_CALL_1(bpf_get_attach_cookie_trace, void *, ctx) 1094 { 1095 struct bpf_trace_run_ctx *run_ctx; 1096 1097 run_ctx = container_of(current->bpf_ctx, struct bpf_trace_run_ctx, run_ctx); 1098 return run_ctx->bpf_cookie; 1099 } 1100 1101 static const struct bpf_func_proto bpf_get_attach_cookie_proto_trace = { 1102 .func = bpf_get_attach_cookie_trace, 1103 .gpl_only = false, 1104 .ret_type = RET_INTEGER, 1105 .arg1_type = ARG_PTR_TO_CTX, 1106 }; 1107 1108 BPF_CALL_1(bpf_get_attach_cookie_pe, struct bpf_perf_event_data_kern *, ctx) 1109 { 1110 return ctx->event->bpf_cookie; 1111 } 1112 1113 static const struct bpf_func_proto bpf_get_attach_cookie_proto_pe = { 1114 .func = bpf_get_attach_cookie_pe, 1115 .gpl_only = false, 1116 .ret_type = RET_INTEGER, 1117 .arg1_type = ARG_PTR_TO_CTX, 1118 }; 1119 1120 BPF_CALL_1(bpf_get_attach_cookie_tracing, void *, ctx) 1121 { 1122 struct bpf_trace_run_ctx *run_ctx; 1123 1124 run_ctx = container_of(current->bpf_ctx, struct bpf_trace_run_ctx, run_ctx); 1125 return run_ctx->bpf_cookie; 1126 } 1127 1128 static const struct bpf_func_proto bpf_get_attach_cookie_proto_tracing = { 1129 .func = bpf_get_attach_cookie_tracing, 1130 .gpl_only = false, 1131 .ret_type = RET_INTEGER, 1132 .arg1_type = ARG_PTR_TO_CTX, 1133 }; 1134 1135 BPF_CALL_3(bpf_get_branch_snapshot, void *, buf, u32, size, u64, flags) 1136 { 1137 #ifndef CONFIG_X86 1138 return -ENOENT; 1139 #else 1140 static const u32 br_entry_size = sizeof(struct perf_branch_entry); 1141 u32 entry_cnt = size / br_entry_size; 1142 1143 entry_cnt = static_call(perf_snapshot_branch_stack)(buf, entry_cnt); 1144 1145 if (unlikely(flags)) 1146 return -EINVAL; 1147 1148 if (!entry_cnt) 1149 return -ENOENT; 1150 1151 return entry_cnt * br_entry_size; 1152 #endif 1153 } 1154 1155 static const struct bpf_func_proto bpf_get_branch_snapshot_proto = { 1156 .func = bpf_get_branch_snapshot, 1157 .gpl_only = true, 1158 .ret_type = RET_INTEGER, 1159 .arg1_type = ARG_PTR_TO_UNINIT_MEM, 1160 .arg2_type = ARG_CONST_SIZE_OR_ZERO, 1161 }; 1162 1163 BPF_CALL_3(get_func_arg, void *, ctx, u32, n, u64 *, value) 1164 { 1165 /* This helper call is inlined by verifier. */ 1166 u64 nr_args = ((u64 *)ctx)[-1]; 1167 1168 if ((u64) n >= nr_args) 1169 return -EINVAL; 1170 *value = ((u64 *)ctx)[n]; 1171 return 0; 1172 } 1173 1174 static const struct bpf_func_proto bpf_get_func_arg_proto = { 1175 .func = get_func_arg, 1176 .ret_type = RET_INTEGER, 1177 .arg1_type = ARG_PTR_TO_CTX, 1178 .arg2_type = ARG_ANYTHING, 1179 .arg3_type = ARG_PTR_TO_LONG, 1180 }; 1181 1182 BPF_CALL_2(get_func_ret, void *, ctx, u64 *, value) 1183 { 1184 /* This helper call is inlined by verifier. */ 1185 u64 nr_args = ((u64 *)ctx)[-1]; 1186 1187 *value = ((u64 *)ctx)[nr_args]; 1188 return 0; 1189 } 1190 1191 static const struct bpf_func_proto bpf_get_func_ret_proto = { 1192 .func = get_func_ret, 1193 .ret_type = RET_INTEGER, 1194 .arg1_type = ARG_PTR_TO_CTX, 1195 .arg2_type = ARG_PTR_TO_LONG, 1196 }; 1197 1198 BPF_CALL_1(get_func_arg_cnt, void *, ctx) 1199 { 1200 /* This helper call is inlined by verifier. */ 1201 return ((u64 *)ctx)[-1]; 1202 } 1203 1204 static const struct bpf_func_proto bpf_get_func_arg_cnt_proto = { 1205 .func = get_func_arg_cnt, 1206 .ret_type = RET_INTEGER, 1207 .arg1_type = ARG_PTR_TO_CTX, 1208 }; 1209 1210 #ifdef CONFIG_KEYS 1211 __diag_push(); 1212 __diag_ignore_all("-Wmissing-prototypes", 1213 "kfuncs which will be used in BPF programs"); 1214 1215 /** 1216 * bpf_lookup_user_key - lookup a key by its serial 1217 * @serial: key handle serial number 1218 * @flags: lookup-specific flags 1219 * 1220 * Search a key with a given *serial* and the provided *flags*. 1221 * If found, increment the reference count of the key by one, and 1222 * return it in the bpf_key structure. 1223 * 1224 * The bpf_key structure must be passed to bpf_key_put() when done 1225 * with it, so that the key reference count is decremented and the 1226 * bpf_key structure is freed. 1227 * 1228 * Permission checks are deferred to the time the key is used by 1229 * one of the available key-specific kfuncs. 1230 * 1231 * Set *flags* with KEY_LOOKUP_CREATE, to attempt creating a requested 1232 * special keyring (e.g. session keyring), if it doesn't yet exist. 1233 * Set *flags* with KEY_LOOKUP_PARTIAL, to lookup a key without waiting 1234 * for the key construction, and to retrieve uninstantiated keys (keys 1235 * without data attached to them). 1236 * 1237 * Return: a bpf_key pointer with a valid key pointer if the key is found, a 1238 * NULL pointer otherwise. 1239 */ 1240 struct bpf_key *bpf_lookup_user_key(u32 serial, u64 flags) 1241 { 1242 key_ref_t key_ref; 1243 struct bpf_key *bkey; 1244 1245 if (flags & ~KEY_LOOKUP_ALL) 1246 return NULL; 1247 1248 /* 1249 * Permission check is deferred until the key is used, as the 1250 * intent of the caller is unknown here. 1251 */ 1252 key_ref = lookup_user_key(serial, flags, KEY_DEFER_PERM_CHECK); 1253 if (IS_ERR(key_ref)) 1254 return NULL; 1255 1256 bkey = kmalloc(sizeof(*bkey), GFP_KERNEL); 1257 if (!bkey) { 1258 key_put(key_ref_to_ptr(key_ref)); 1259 return NULL; 1260 } 1261 1262 bkey->key = key_ref_to_ptr(key_ref); 1263 bkey->has_ref = true; 1264 1265 return bkey; 1266 } 1267 1268 /** 1269 * bpf_lookup_system_key - lookup a key by a system-defined ID 1270 * @id: key ID 1271 * 1272 * Obtain a bpf_key structure with a key pointer set to the passed key ID. 1273 * The key pointer is marked as invalid, to prevent bpf_key_put() from 1274 * attempting to decrement the key reference count on that pointer. The key 1275 * pointer set in such way is currently understood only by 1276 * verify_pkcs7_signature(). 1277 * 1278 * Set *id* to one of the values defined in include/linux/verification.h: 1279 * 0 for the primary keyring (immutable keyring of system keys); 1280 * VERIFY_USE_SECONDARY_KEYRING for both the primary and secondary keyring 1281 * (where keys can be added only if they are vouched for by existing keys 1282 * in those keyrings); VERIFY_USE_PLATFORM_KEYRING for the platform 1283 * keyring (primarily used by the integrity subsystem to verify a kexec'ed 1284 * kerned image and, possibly, the initramfs signature). 1285 * 1286 * Return: a bpf_key pointer with an invalid key pointer set from the 1287 * pre-determined ID on success, a NULL pointer otherwise 1288 */ 1289 struct bpf_key *bpf_lookup_system_key(u64 id) 1290 { 1291 struct bpf_key *bkey; 1292 1293 if (system_keyring_id_check(id) < 0) 1294 return NULL; 1295 1296 bkey = kmalloc(sizeof(*bkey), GFP_ATOMIC); 1297 if (!bkey) 1298 return NULL; 1299 1300 bkey->key = (struct key *)(unsigned long)id; 1301 bkey->has_ref = false; 1302 1303 return bkey; 1304 } 1305 1306 /** 1307 * bpf_key_put - decrement key reference count if key is valid and free bpf_key 1308 * @bkey: bpf_key structure 1309 * 1310 * Decrement the reference count of the key inside *bkey*, if the pointer 1311 * is valid, and free *bkey*. 1312 */ 1313 void bpf_key_put(struct bpf_key *bkey) 1314 { 1315 if (bkey->has_ref) 1316 key_put(bkey->key); 1317 1318 kfree(bkey); 1319 } 1320 1321 #ifdef CONFIG_SYSTEM_DATA_VERIFICATION 1322 /** 1323 * bpf_verify_pkcs7_signature - verify a PKCS#7 signature 1324 * @data_ptr: data to verify 1325 * @sig_ptr: signature of the data 1326 * @trusted_keyring: keyring with keys trusted for signature verification 1327 * 1328 * Verify the PKCS#7 signature *sig_ptr* against the supplied *data_ptr* 1329 * with keys in a keyring referenced by *trusted_keyring*. 1330 * 1331 * Return: 0 on success, a negative value on error. 1332 */ 1333 int bpf_verify_pkcs7_signature(struct bpf_dynptr_kern *data_ptr, 1334 struct bpf_dynptr_kern *sig_ptr, 1335 struct bpf_key *trusted_keyring) 1336 { 1337 int ret; 1338 1339 if (trusted_keyring->has_ref) { 1340 /* 1341 * Do the permission check deferred in bpf_lookup_user_key(). 1342 * See bpf_lookup_user_key() for more details. 1343 * 1344 * A call to key_task_permission() here would be redundant, as 1345 * it is already done by keyring_search() called by 1346 * find_asymmetric_key(). 1347 */ 1348 ret = key_validate(trusted_keyring->key); 1349 if (ret < 0) 1350 return ret; 1351 } 1352 1353 return verify_pkcs7_signature(data_ptr->data, 1354 bpf_dynptr_get_size(data_ptr), 1355 sig_ptr->data, 1356 bpf_dynptr_get_size(sig_ptr), 1357 trusted_keyring->key, 1358 VERIFYING_UNSPECIFIED_SIGNATURE, NULL, 1359 NULL); 1360 } 1361 #endif /* CONFIG_SYSTEM_DATA_VERIFICATION */ 1362 1363 __diag_pop(); 1364 1365 BTF_SET8_START(key_sig_kfunc_set) 1366 BTF_ID_FLAGS(func, bpf_lookup_user_key, KF_ACQUIRE | KF_RET_NULL | KF_SLEEPABLE) 1367 BTF_ID_FLAGS(func, bpf_lookup_system_key, KF_ACQUIRE | KF_RET_NULL) 1368 BTF_ID_FLAGS(func, bpf_key_put, KF_RELEASE) 1369 #ifdef CONFIG_SYSTEM_DATA_VERIFICATION 1370 BTF_ID_FLAGS(func, bpf_verify_pkcs7_signature, KF_SLEEPABLE) 1371 #endif 1372 BTF_SET8_END(key_sig_kfunc_set) 1373 1374 static const struct btf_kfunc_id_set bpf_key_sig_kfunc_set = { 1375 .owner = THIS_MODULE, 1376 .set = &key_sig_kfunc_set, 1377 }; 1378 1379 static int __init bpf_key_sig_kfuncs_init(void) 1380 { 1381 return register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING, 1382 &bpf_key_sig_kfunc_set); 1383 } 1384 1385 late_initcall(bpf_key_sig_kfuncs_init); 1386 #endif /* CONFIG_KEYS */ 1387 1388 static const struct bpf_func_proto * 1389 bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 1390 { 1391 switch (func_id) { 1392 case BPF_FUNC_map_lookup_elem: 1393 return &bpf_map_lookup_elem_proto; 1394 case BPF_FUNC_map_update_elem: 1395 return &bpf_map_update_elem_proto; 1396 case BPF_FUNC_map_delete_elem: 1397 return &bpf_map_delete_elem_proto; 1398 case BPF_FUNC_map_push_elem: 1399 return &bpf_map_push_elem_proto; 1400 case BPF_FUNC_map_pop_elem: 1401 return &bpf_map_pop_elem_proto; 1402 case BPF_FUNC_map_peek_elem: 1403 return &bpf_map_peek_elem_proto; 1404 case BPF_FUNC_map_lookup_percpu_elem: 1405 return &bpf_map_lookup_percpu_elem_proto; 1406 case BPF_FUNC_ktime_get_ns: 1407 return &bpf_ktime_get_ns_proto; 1408 case BPF_FUNC_ktime_get_boot_ns: 1409 return &bpf_ktime_get_boot_ns_proto; 1410 case BPF_FUNC_tail_call: 1411 return &bpf_tail_call_proto; 1412 case BPF_FUNC_get_current_pid_tgid: 1413 return &bpf_get_current_pid_tgid_proto; 1414 case BPF_FUNC_get_current_task: 1415 return &bpf_get_current_task_proto; 1416 case BPF_FUNC_get_current_task_btf: 1417 return &bpf_get_current_task_btf_proto; 1418 case BPF_FUNC_task_pt_regs: 1419 return &bpf_task_pt_regs_proto; 1420 case BPF_FUNC_get_current_uid_gid: 1421 return &bpf_get_current_uid_gid_proto; 1422 case BPF_FUNC_get_current_comm: 1423 return &bpf_get_current_comm_proto; 1424 case BPF_FUNC_trace_printk: 1425 return bpf_get_trace_printk_proto(); 1426 case BPF_FUNC_get_smp_processor_id: 1427 return &bpf_get_smp_processor_id_proto; 1428 case BPF_FUNC_get_numa_node_id: 1429 return &bpf_get_numa_node_id_proto; 1430 case BPF_FUNC_perf_event_read: 1431 return &bpf_perf_event_read_proto; 1432 case BPF_FUNC_current_task_under_cgroup: 1433 return &bpf_current_task_under_cgroup_proto; 1434 case BPF_FUNC_get_prandom_u32: 1435 return &bpf_get_prandom_u32_proto; 1436 case BPF_FUNC_probe_write_user: 1437 return security_locked_down(LOCKDOWN_BPF_WRITE_USER) < 0 ? 1438 NULL : bpf_get_probe_write_proto(); 1439 case BPF_FUNC_probe_read_user: 1440 return &bpf_probe_read_user_proto; 1441 case BPF_FUNC_probe_read_kernel: 1442 return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ? 1443 NULL : &bpf_probe_read_kernel_proto; 1444 case BPF_FUNC_probe_read_user_str: 1445 return &bpf_probe_read_user_str_proto; 1446 case BPF_FUNC_probe_read_kernel_str: 1447 return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ? 1448 NULL : &bpf_probe_read_kernel_str_proto; 1449 #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE 1450 case BPF_FUNC_probe_read: 1451 return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ? 1452 NULL : &bpf_probe_read_compat_proto; 1453 case BPF_FUNC_probe_read_str: 1454 return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ? 1455 NULL : &bpf_probe_read_compat_str_proto; 1456 #endif 1457 #ifdef CONFIG_CGROUPS 1458 case BPF_FUNC_get_current_cgroup_id: 1459 return &bpf_get_current_cgroup_id_proto; 1460 case BPF_FUNC_get_current_ancestor_cgroup_id: 1461 return &bpf_get_current_ancestor_cgroup_id_proto; 1462 case BPF_FUNC_cgrp_storage_get: 1463 return &bpf_cgrp_storage_get_proto; 1464 case BPF_FUNC_cgrp_storage_delete: 1465 return &bpf_cgrp_storage_delete_proto; 1466 #endif 1467 case BPF_FUNC_send_signal: 1468 return &bpf_send_signal_proto; 1469 case BPF_FUNC_send_signal_thread: 1470 return &bpf_send_signal_thread_proto; 1471 case BPF_FUNC_perf_event_read_value: 1472 return &bpf_perf_event_read_value_proto; 1473 case BPF_FUNC_get_ns_current_pid_tgid: 1474 return &bpf_get_ns_current_pid_tgid_proto; 1475 case BPF_FUNC_ringbuf_output: 1476 return &bpf_ringbuf_output_proto; 1477 case BPF_FUNC_ringbuf_reserve: 1478 return &bpf_ringbuf_reserve_proto; 1479 case BPF_FUNC_ringbuf_submit: 1480 return &bpf_ringbuf_submit_proto; 1481 case BPF_FUNC_ringbuf_discard: 1482 return &bpf_ringbuf_discard_proto; 1483 case BPF_FUNC_ringbuf_query: 1484 return &bpf_ringbuf_query_proto; 1485 case BPF_FUNC_jiffies64: 1486 return &bpf_jiffies64_proto; 1487 case BPF_FUNC_get_task_stack: 1488 return &bpf_get_task_stack_proto; 1489 case BPF_FUNC_copy_from_user: 1490 return &bpf_copy_from_user_proto; 1491 case BPF_FUNC_copy_from_user_task: 1492 return &bpf_copy_from_user_task_proto; 1493 case BPF_FUNC_snprintf_btf: 1494 return &bpf_snprintf_btf_proto; 1495 case BPF_FUNC_per_cpu_ptr: 1496 return &bpf_per_cpu_ptr_proto; 1497 case BPF_FUNC_this_cpu_ptr: 1498 return &bpf_this_cpu_ptr_proto; 1499 case BPF_FUNC_task_storage_get: 1500 if (bpf_prog_check_recur(prog)) 1501 return &bpf_task_storage_get_recur_proto; 1502 return &bpf_task_storage_get_proto; 1503 case BPF_FUNC_task_storage_delete: 1504 if (bpf_prog_check_recur(prog)) 1505 return &bpf_task_storage_delete_recur_proto; 1506 return &bpf_task_storage_delete_proto; 1507 case BPF_FUNC_for_each_map_elem: 1508 return &bpf_for_each_map_elem_proto; 1509 case BPF_FUNC_snprintf: 1510 return &bpf_snprintf_proto; 1511 case BPF_FUNC_get_func_ip: 1512 return &bpf_get_func_ip_proto_tracing; 1513 case BPF_FUNC_get_branch_snapshot: 1514 return &bpf_get_branch_snapshot_proto; 1515 case BPF_FUNC_find_vma: 1516 return &bpf_find_vma_proto; 1517 case BPF_FUNC_trace_vprintk: 1518 return bpf_get_trace_vprintk_proto(); 1519 default: 1520 return bpf_base_func_proto(func_id); 1521 } 1522 } 1523 1524 static const struct bpf_func_proto * 1525 kprobe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 1526 { 1527 switch (func_id) { 1528 case BPF_FUNC_perf_event_output: 1529 return &bpf_perf_event_output_proto; 1530 case BPF_FUNC_get_stackid: 1531 return &bpf_get_stackid_proto; 1532 case BPF_FUNC_get_stack: 1533 return &bpf_get_stack_proto; 1534 #ifdef CONFIG_BPF_KPROBE_OVERRIDE 1535 case BPF_FUNC_override_return: 1536 return &bpf_override_return_proto; 1537 #endif 1538 case BPF_FUNC_get_func_ip: 1539 return prog->expected_attach_type == BPF_TRACE_KPROBE_MULTI ? 1540 &bpf_get_func_ip_proto_kprobe_multi : 1541 &bpf_get_func_ip_proto_kprobe; 1542 case BPF_FUNC_get_attach_cookie: 1543 return prog->expected_attach_type == BPF_TRACE_KPROBE_MULTI ? 1544 &bpf_get_attach_cookie_proto_kmulti : 1545 &bpf_get_attach_cookie_proto_trace; 1546 default: 1547 return bpf_tracing_func_proto(func_id, prog); 1548 } 1549 } 1550 1551 /* bpf+kprobe programs can access fields of 'struct pt_regs' */ 1552 static bool kprobe_prog_is_valid_access(int off, int size, enum bpf_access_type type, 1553 const struct bpf_prog *prog, 1554 struct bpf_insn_access_aux *info) 1555 { 1556 if (off < 0 || off >= sizeof(struct pt_regs)) 1557 return false; 1558 if (type != BPF_READ) 1559 return false; 1560 if (off % size != 0) 1561 return false; 1562 /* 1563 * Assertion for 32 bit to make sure last 8 byte access 1564 * (BPF_DW) to the last 4 byte member is disallowed. 1565 */ 1566 if (off + size > sizeof(struct pt_regs)) 1567 return false; 1568 1569 return true; 1570 } 1571 1572 const struct bpf_verifier_ops kprobe_verifier_ops = { 1573 .get_func_proto = kprobe_prog_func_proto, 1574 .is_valid_access = kprobe_prog_is_valid_access, 1575 }; 1576 1577 const struct bpf_prog_ops kprobe_prog_ops = { 1578 }; 1579 1580 BPF_CALL_5(bpf_perf_event_output_tp, void *, tp_buff, struct bpf_map *, map, 1581 u64, flags, void *, data, u64, size) 1582 { 1583 struct pt_regs *regs = *(struct pt_regs **)tp_buff; 1584 1585 /* 1586 * r1 points to perf tracepoint buffer where first 8 bytes are hidden 1587 * from bpf program and contain a pointer to 'struct pt_regs'. Fetch it 1588 * from there and call the same bpf_perf_event_output() helper inline. 1589 */ 1590 return ____bpf_perf_event_output(regs, map, flags, data, size); 1591 } 1592 1593 static const struct bpf_func_proto bpf_perf_event_output_proto_tp = { 1594 .func = bpf_perf_event_output_tp, 1595 .gpl_only = true, 1596 .ret_type = RET_INTEGER, 1597 .arg1_type = ARG_PTR_TO_CTX, 1598 .arg2_type = ARG_CONST_MAP_PTR, 1599 .arg3_type = ARG_ANYTHING, 1600 .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY, 1601 .arg5_type = ARG_CONST_SIZE_OR_ZERO, 1602 }; 1603 1604 BPF_CALL_3(bpf_get_stackid_tp, void *, tp_buff, struct bpf_map *, map, 1605 u64, flags) 1606 { 1607 struct pt_regs *regs = *(struct pt_regs **)tp_buff; 1608 1609 /* 1610 * Same comment as in bpf_perf_event_output_tp(), only that this time 1611 * the other helper's function body cannot be inlined due to being 1612 * external, thus we need to call raw helper function. 1613 */ 1614 return bpf_get_stackid((unsigned long) regs, (unsigned long) map, 1615 flags, 0, 0); 1616 } 1617 1618 static const struct bpf_func_proto bpf_get_stackid_proto_tp = { 1619 .func = bpf_get_stackid_tp, 1620 .gpl_only = true, 1621 .ret_type = RET_INTEGER, 1622 .arg1_type = ARG_PTR_TO_CTX, 1623 .arg2_type = ARG_CONST_MAP_PTR, 1624 .arg3_type = ARG_ANYTHING, 1625 }; 1626 1627 BPF_CALL_4(bpf_get_stack_tp, void *, tp_buff, void *, buf, u32, size, 1628 u64, flags) 1629 { 1630 struct pt_regs *regs = *(struct pt_regs **)tp_buff; 1631 1632 return bpf_get_stack((unsigned long) regs, (unsigned long) buf, 1633 (unsigned long) size, flags, 0); 1634 } 1635 1636 static const struct bpf_func_proto bpf_get_stack_proto_tp = { 1637 .func = bpf_get_stack_tp, 1638 .gpl_only = true, 1639 .ret_type = RET_INTEGER, 1640 .arg1_type = ARG_PTR_TO_CTX, 1641 .arg2_type = ARG_PTR_TO_UNINIT_MEM, 1642 .arg3_type = ARG_CONST_SIZE_OR_ZERO, 1643 .arg4_type = ARG_ANYTHING, 1644 }; 1645 1646 static const struct bpf_func_proto * 1647 tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 1648 { 1649 switch (func_id) { 1650 case BPF_FUNC_perf_event_output: 1651 return &bpf_perf_event_output_proto_tp; 1652 case BPF_FUNC_get_stackid: 1653 return &bpf_get_stackid_proto_tp; 1654 case BPF_FUNC_get_stack: 1655 return &bpf_get_stack_proto_tp; 1656 case BPF_FUNC_get_attach_cookie: 1657 return &bpf_get_attach_cookie_proto_trace; 1658 default: 1659 return bpf_tracing_func_proto(func_id, prog); 1660 } 1661 } 1662 1663 static bool tp_prog_is_valid_access(int off, int size, enum bpf_access_type type, 1664 const struct bpf_prog *prog, 1665 struct bpf_insn_access_aux *info) 1666 { 1667 if (off < sizeof(void *) || off >= PERF_MAX_TRACE_SIZE) 1668 return false; 1669 if (type != BPF_READ) 1670 return false; 1671 if (off % size != 0) 1672 return false; 1673 1674 BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(__u64)); 1675 return true; 1676 } 1677 1678 const struct bpf_verifier_ops tracepoint_verifier_ops = { 1679 .get_func_proto = tp_prog_func_proto, 1680 .is_valid_access = tp_prog_is_valid_access, 1681 }; 1682 1683 const struct bpf_prog_ops tracepoint_prog_ops = { 1684 }; 1685 1686 BPF_CALL_3(bpf_perf_prog_read_value, struct bpf_perf_event_data_kern *, ctx, 1687 struct bpf_perf_event_value *, buf, u32, size) 1688 { 1689 int err = -EINVAL; 1690 1691 if (unlikely(size != sizeof(struct bpf_perf_event_value))) 1692 goto clear; 1693 err = perf_event_read_local(ctx->event, &buf->counter, &buf->enabled, 1694 &buf->running); 1695 if (unlikely(err)) 1696 goto clear; 1697 return 0; 1698 clear: 1699 memset(buf, 0, size); 1700 return err; 1701 } 1702 1703 static const struct bpf_func_proto bpf_perf_prog_read_value_proto = { 1704 .func = bpf_perf_prog_read_value, 1705 .gpl_only = true, 1706 .ret_type = RET_INTEGER, 1707 .arg1_type = ARG_PTR_TO_CTX, 1708 .arg2_type = ARG_PTR_TO_UNINIT_MEM, 1709 .arg3_type = ARG_CONST_SIZE, 1710 }; 1711 1712 BPF_CALL_4(bpf_read_branch_records, struct bpf_perf_event_data_kern *, ctx, 1713 void *, buf, u32, size, u64, flags) 1714 { 1715 static const u32 br_entry_size = sizeof(struct perf_branch_entry); 1716 struct perf_branch_stack *br_stack = ctx->data->br_stack; 1717 u32 to_copy; 1718 1719 if (unlikely(flags & ~BPF_F_GET_BRANCH_RECORDS_SIZE)) 1720 return -EINVAL; 1721 1722 if (unlikely(!(ctx->data->sample_flags & PERF_SAMPLE_BRANCH_STACK))) 1723 return -ENOENT; 1724 1725 if (unlikely(!br_stack)) 1726 return -ENOENT; 1727 1728 if (flags & BPF_F_GET_BRANCH_RECORDS_SIZE) 1729 return br_stack->nr * br_entry_size; 1730 1731 if (!buf || (size % br_entry_size != 0)) 1732 return -EINVAL; 1733 1734 to_copy = min_t(u32, br_stack->nr * br_entry_size, size); 1735 memcpy(buf, br_stack->entries, to_copy); 1736 1737 return to_copy; 1738 } 1739 1740 static const struct bpf_func_proto bpf_read_branch_records_proto = { 1741 .func = bpf_read_branch_records, 1742 .gpl_only = true, 1743 .ret_type = RET_INTEGER, 1744 .arg1_type = ARG_PTR_TO_CTX, 1745 .arg2_type = ARG_PTR_TO_MEM_OR_NULL, 1746 .arg3_type = ARG_CONST_SIZE_OR_ZERO, 1747 .arg4_type = ARG_ANYTHING, 1748 }; 1749 1750 static const struct bpf_func_proto * 1751 pe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 1752 { 1753 switch (func_id) { 1754 case BPF_FUNC_perf_event_output: 1755 return &bpf_perf_event_output_proto_tp; 1756 case BPF_FUNC_get_stackid: 1757 return &bpf_get_stackid_proto_pe; 1758 case BPF_FUNC_get_stack: 1759 return &bpf_get_stack_proto_pe; 1760 case BPF_FUNC_perf_prog_read_value: 1761 return &bpf_perf_prog_read_value_proto; 1762 case BPF_FUNC_read_branch_records: 1763 return &bpf_read_branch_records_proto; 1764 case BPF_FUNC_get_attach_cookie: 1765 return &bpf_get_attach_cookie_proto_pe; 1766 default: 1767 return bpf_tracing_func_proto(func_id, prog); 1768 } 1769 } 1770 1771 /* 1772 * bpf_raw_tp_regs are separate from bpf_pt_regs used from skb/xdp 1773 * to avoid potential recursive reuse issue when/if tracepoints are added 1774 * inside bpf_*_event_output, bpf_get_stackid and/or bpf_get_stack. 1775 * 1776 * Since raw tracepoints run despite bpf_prog_active, support concurrent usage 1777 * in normal, irq, and nmi context. 1778 */ 1779 struct bpf_raw_tp_regs { 1780 struct pt_regs regs[3]; 1781 }; 1782 static DEFINE_PER_CPU(struct bpf_raw_tp_regs, bpf_raw_tp_regs); 1783 static DEFINE_PER_CPU(int, bpf_raw_tp_nest_level); 1784 static struct pt_regs *get_bpf_raw_tp_regs(void) 1785 { 1786 struct bpf_raw_tp_regs *tp_regs = this_cpu_ptr(&bpf_raw_tp_regs); 1787 int nest_level = this_cpu_inc_return(bpf_raw_tp_nest_level); 1788 1789 if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(tp_regs->regs))) { 1790 this_cpu_dec(bpf_raw_tp_nest_level); 1791 return ERR_PTR(-EBUSY); 1792 } 1793 1794 return &tp_regs->regs[nest_level - 1]; 1795 } 1796 1797 static void put_bpf_raw_tp_regs(void) 1798 { 1799 this_cpu_dec(bpf_raw_tp_nest_level); 1800 } 1801 1802 BPF_CALL_5(bpf_perf_event_output_raw_tp, struct bpf_raw_tracepoint_args *, args, 1803 struct bpf_map *, map, u64, flags, void *, data, u64, size) 1804 { 1805 struct pt_regs *regs = get_bpf_raw_tp_regs(); 1806 int ret; 1807 1808 if (IS_ERR(regs)) 1809 return PTR_ERR(regs); 1810 1811 perf_fetch_caller_regs(regs); 1812 ret = ____bpf_perf_event_output(regs, map, flags, data, size); 1813 1814 put_bpf_raw_tp_regs(); 1815 return ret; 1816 } 1817 1818 static const struct bpf_func_proto bpf_perf_event_output_proto_raw_tp = { 1819 .func = bpf_perf_event_output_raw_tp, 1820 .gpl_only = true, 1821 .ret_type = RET_INTEGER, 1822 .arg1_type = ARG_PTR_TO_CTX, 1823 .arg2_type = ARG_CONST_MAP_PTR, 1824 .arg3_type = ARG_ANYTHING, 1825 .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY, 1826 .arg5_type = ARG_CONST_SIZE_OR_ZERO, 1827 }; 1828 1829 extern const struct bpf_func_proto bpf_skb_output_proto; 1830 extern const struct bpf_func_proto bpf_xdp_output_proto; 1831 extern const struct bpf_func_proto bpf_xdp_get_buff_len_trace_proto; 1832 1833 BPF_CALL_3(bpf_get_stackid_raw_tp, struct bpf_raw_tracepoint_args *, args, 1834 struct bpf_map *, map, u64, flags) 1835 { 1836 struct pt_regs *regs = get_bpf_raw_tp_regs(); 1837 int ret; 1838 1839 if (IS_ERR(regs)) 1840 return PTR_ERR(regs); 1841 1842 perf_fetch_caller_regs(regs); 1843 /* similar to bpf_perf_event_output_tp, but pt_regs fetched differently */ 1844 ret = bpf_get_stackid((unsigned long) regs, (unsigned long) map, 1845 flags, 0, 0); 1846 put_bpf_raw_tp_regs(); 1847 return ret; 1848 } 1849 1850 static const struct bpf_func_proto bpf_get_stackid_proto_raw_tp = { 1851 .func = bpf_get_stackid_raw_tp, 1852 .gpl_only = true, 1853 .ret_type = RET_INTEGER, 1854 .arg1_type = ARG_PTR_TO_CTX, 1855 .arg2_type = ARG_CONST_MAP_PTR, 1856 .arg3_type = ARG_ANYTHING, 1857 }; 1858 1859 BPF_CALL_4(bpf_get_stack_raw_tp, struct bpf_raw_tracepoint_args *, args, 1860 void *, buf, u32, size, u64, flags) 1861 { 1862 struct pt_regs *regs = get_bpf_raw_tp_regs(); 1863 int ret; 1864 1865 if (IS_ERR(regs)) 1866 return PTR_ERR(regs); 1867 1868 perf_fetch_caller_regs(regs); 1869 ret = bpf_get_stack((unsigned long) regs, (unsigned long) buf, 1870 (unsigned long) size, flags, 0); 1871 put_bpf_raw_tp_regs(); 1872 return ret; 1873 } 1874 1875 static const struct bpf_func_proto bpf_get_stack_proto_raw_tp = { 1876 .func = bpf_get_stack_raw_tp, 1877 .gpl_only = true, 1878 .ret_type = RET_INTEGER, 1879 .arg1_type = ARG_PTR_TO_CTX, 1880 .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, 1881 .arg3_type = ARG_CONST_SIZE_OR_ZERO, 1882 .arg4_type = ARG_ANYTHING, 1883 }; 1884 1885 static const struct bpf_func_proto * 1886 raw_tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 1887 { 1888 switch (func_id) { 1889 case BPF_FUNC_perf_event_output: 1890 return &bpf_perf_event_output_proto_raw_tp; 1891 case BPF_FUNC_get_stackid: 1892 return &bpf_get_stackid_proto_raw_tp; 1893 case BPF_FUNC_get_stack: 1894 return &bpf_get_stack_proto_raw_tp; 1895 default: 1896 return bpf_tracing_func_proto(func_id, prog); 1897 } 1898 } 1899 1900 const struct bpf_func_proto * 1901 tracing_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 1902 { 1903 const struct bpf_func_proto *fn; 1904 1905 switch (func_id) { 1906 #ifdef CONFIG_NET 1907 case BPF_FUNC_skb_output: 1908 return &bpf_skb_output_proto; 1909 case BPF_FUNC_xdp_output: 1910 return &bpf_xdp_output_proto; 1911 case BPF_FUNC_skc_to_tcp6_sock: 1912 return &bpf_skc_to_tcp6_sock_proto; 1913 case BPF_FUNC_skc_to_tcp_sock: 1914 return &bpf_skc_to_tcp_sock_proto; 1915 case BPF_FUNC_skc_to_tcp_timewait_sock: 1916 return &bpf_skc_to_tcp_timewait_sock_proto; 1917 case BPF_FUNC_skc_to_tcp_request_sock: 1918 return &bpf_skc_to_tcp_request_sock_proto; 1919 case BPF_FUNC_skc_to_udp6_sock: 1920 return &bpf_skc_to_udp6_sock_proto; 1921 case BPF_FUNC_skc_to_unix_sock: 1922 return &bpf_skc_to_unix_sock_proto; 1923 case BPF_FUNC_skc_to_mptcp_sock: 1924 return &bpf_skc_to_mptcp_sock_proto; 1925 case BPF_FUNC_sk_storage_get: 1926 return &bpf_sk_storage_get_tracing_proto; 1927 case BPF_FUNC_sk_storage_delete: 1928 return &bpf_sk_storage_delete_tracing_proto; 1929 case BPF_FUNC_sock_from_file: 1930 return &bpf_sock_from_file_proto; 1931 case BPF_FUNC_get_socket_cookie: 1932 return &bpf_get_socket_ptr_cookie_proto; 1933 case BPF_FUNC_xdp_get_buff_len: 1934 return &bpf_xdp_get_buff_len_trace_proto; 1935 #endif 1936 case BPF_FUNC_seq_printf: 1937 return prog->expected_attach_type == BPF_TRACE_ITER ? 1938 &bpf_seq_printf_proto : 1939 NULL; 1940 case BPF_FUNC_seq_write: 1941 return prog->expected_attach_type == BPF_TRACE_ITER ? 1942 &bpf_seq_write_proto : 1943 NULL; 1944 case BPF_FUNC_seq_printf_btf: 1945 return prog->expected_attach_type == BPF_TRACE_ITER ? 1946 &bpf_seq_printf_btf_proto : 1947 NULL; 1948 case BPF_FUNC_d_path: 1949 return &bpf_d_path_proto; 1950 case BPF_FUNC_get_func_arg: 1951 return bpf_prog_has_trampoline(prog) ? &bpf_get_func_arg_proto : NULL; 1952 case BPF_FUNC_get_func_ret: 1953 return bpf_prog_has_trampoline(prog) ? &bpf_get_func_ret_proto : NULL; 1954 case BPF_FUNC_get_func_arg_cnt: 1955 return bpf_prog_has_trampoline(prog) ? &bpf_get_func_arg_cnt_proto : NULL; 1956 case BPF_FUNC_get_attach_cookie: 1957 return bpf_prog_has_trampoline(prog) ? &bpf_get_attach_cookie_proto_tracing : NULL; 1958 default: 1959 fn = raw_tp_prog_func_proto(func_id, prog); 1960 if (!fn && prog->expected_attach_type == BPF_TRACE_ITER) 1961 fn = bpf_iter_get_func_proto(func_id, prog); 1962 return fn; 1963 } 1964 } 1965 1966 static bool raw_tp_prog_is_valid_access(int off, int size, 1967 enum bpf_access_type type, 1968 const struct bpf_prog *prog, 1969 struct bpf_insn_access_aux *info) 1970 { 1971 return bpf_tracing_ctx_access(off, size, type); 1972 } 1973 1974 static bool tracing_prog_is_valid_access(int off, int size, 1975 enum bpf_access_type type, 1976 const struct bpf_prog *prog, 1977 struct bpf_insn_access_aux *info) 1978 { 1979 return bpf_tracing_btf_ctx_access(off, size, type, prog, info); 1980 } 1981 1982 int __weak bpf_prog_test_run_tracing(struct bpf_prog *prog, 1983 const union bpf_attr *kattr, 1984 union bpf_attr __user *uattr) 1985 { 1986 return -ENOTSUPP; 1987 } 1988 1989 const struct bpf_verifier_ops raw_tracepoint_verifier_ops = { 1990 .get_func_proto = raw_tp_prog_func_proto, 1991 .is_valid_access = raw_tp_prog_is_valid_access, 1992 }; 1993 1994 const struct bpf_prog_ops raw_tracepoint_prog_ops = { 1995 #ifdef CONFIG_NET 1996 .test_run = bpf_prog_test_run_raw_tp, 1997 #endif 1998 }; 1999 2000 const struct bpf_verifier_ops tracing_verifier_ops = { 2001 .get_func_proto = tracing_prog_func_proto, 2002 .is_valid_access = tracing_prog_is_valid_access, 2003 }; 2004 2005 const struct bpf_prog_ops tracing_prog_ops = { 2006 .test_run = bpf_prog_test_run_tracing, 2007 }; 2008 2009 static bool raw_tp_writable_prog_is_valid_access(int off, int size, 2010 enum bpf_access_type type, 2011 const struct bpf_prog *prog, 2012 struct bpf_insn_access_aux *info) 2013 { 2014 if (off == 0) { 2015 if (size != sizeof(u64) || type != BPF_READ) 2016 return false; 2017 info->reg_type = PTR_TO_TP_BUFFER; 2018 } 2019 return raw_tp_prog_is_valid_access(off, size, type, prog, info); 2020 } 2021 2022 const struct bpf_verifier_ops raw_tracepoint_writable_verifier_ops = { 2023 .get_func_proto = raw_tp_prog_func_proto, 2024 .is_valid_access = raw_tp_writable_prog_is_valid_access, 2025 }; 2026 2027 const struct bpf_prog_ops raw_tracepoint_writable_prog_ops = { 2028 }; 2029 2030 static bool pe_prog_is_valid_access(int off, int size, enum bpf_access_type type, 2031 const struct bpf_prog *prog, 2032 struct bpf_insn_access_aux *info) 2033 { 2034 const int size_u64 = sizeof(u64); 2035 2036 if (off < 0 || off >= sizeof(struct bpf_perf_event_data)) 2037 return false; 2038 if (type != BPF_READ) 2039 return false; 2040 if (off % size != 0) { 2041 if (sizeof(unsigned long) != 4) 2042 return false; 2043 if (size != 8) 2044 return false; 2045 if (off % size != 4) 2046 return false; 2047 } 2048 2049 switch (off) { 2050 case bpf_ctx_range(struct bpf_perf_event_data, sample_period): 2051 bpf_ctx_record_field_size(info, size_u64); 2052 if (!bpf_ctx_narrow_access_ok(off, size, size_u64)) 2053 return false; 2054 break; 2055 case bpf_ctx_range(struct bpf_perf_event_data, addr): 2056 bpf_ctx_record_field_size(info, size_u64); 2057 if (!bpf_ctx_narrow_access_ok(off, size, size_u64)) 2058 return false; 2059 break; 2060 default: 2061 if (size != sizeof(long)) 2062 return false; 2063 } 2064 2065 return true; 2066 } 2067 2068 static u32 pe_prog_convert_ctx_access(enum bpf_access_type type, 2069 const struct bpf_insn *si, 2070 struct bpf_insn *insn_buf, 2071 struct bpf_prog *prog, u32 *target_size) 2072 { 2073 struct bpf_insn *insn = insn_buf; 2074 2075 switch (si->off) { 2076 case offsetof(struct bpf_perf_event_data, sample_period): 2077 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern, 2078 data), si->dst_reg, si->src_reg, 2079 offsetof(struct bpf_perf_event_data_kern, data)); 2080 *insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg, 2081 bpf_target_off(struct perf_sample_data, period, 8, 2082 target_size)); 2083 break; 2084 case offsetof(struct bpf_perf_event_data, addr): 2085 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern, 2086 data), si->dst_reg, si->src_reg, 2087 offsetof(struct bpf_perf_event_data_kern, data)); 2088 *insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg, 2089 bpf_target_off(struct perf_sample_data, addr, 8, 2090 target_size)); 2091 break; 2092 default: 2093 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern, 2094 regs), si->dst_reg, si->src_reg, 2095 offsetof(struct bpf_perf_event_data_kern, regs)); 2096 *insn++ = BPF_LDX_MEM(BPF_SIZEOF(long), si->dst_reg, si->dst_reg, 2097 si->off); 2098 break; 2099 } 2100 2101 return insn - insn_buf; 2102 } 2103 2104 const struct bpf_verifier_ops perf_event_verifier_ops = { 2105 .get_func_proto = pe_prog_func_proto, 2106 .is_valid_access = pe_prog_is_valid_access, 2107 .convert_ctx_access = pe_prog_convert_ctx_access, 2108 }; 2109 2110 const struct bpf_prog_ops perf_event_prog_ops = { 2111 }; 2112 2113 static DEFINE_MUTEX(bpf_event_mutex); 2114 2115 #define BPF_TRACE_MAX_PROGS 64 2116 2117 int perf_event_attach_bpf_prog(struct perf_event *event, 2118 struct bpf_prog *prog, 2119 u64 bpf_cookie) 2120 { 2121 struct bpf_prog_array *old_array; 2122 struct bpf_prog_array *new_array; 2123 int ret = -EEXIST; 2124 2125 /* 2126 * Kprobe override only works if they are on the function entry, 2127 * and only if they are on the opt-in list. 2128 */ 2129 if (prog->kprobe_override && 2130 (!trace_kprobe_on_func_entry(event->tp_event) || 2131 !trace_kprobe_error_injectable(event->tp_event))) 2132 return -EINVAL; 2133 2134 mutex_lock(&bpf_event_mutex); 2135 2136 if (event->prog) 2137 goto unlock; 2138 2139 old_array = bpf_event_rcu_dereference(event->tp_event->prog_array); 2140 if (old_array && 2141 bpf_prog_array_length(old_array) >= BPF_TRACE_MAX_PROGS) { 2142 ret = -E2BIG; 2143 goto unlock; 2144 } 2145 2146 ret = bpf_prog_array_copy(old_array, NULL, prog, bpf_cookie, &new_array); 2147 if (ret < 0) 2148 goto unlock; 2149 2150 /* set the new array to event->tp_event and set event->prog */ 2151 event->prog = prog; 2152 event->bpf_cookie = bpf_cookie; 2153 rcu_assign_pointer(event->tp_event->prog_array, new_array); 2154 bpf_prog_array_free_sleepable(old_array); 2155 2156 unlock: 2157 mutex_unlock(&bpf_event_mutex); 2158 return ret; 2159 } 2160 2161 void perf_event_detach_bpf_prog(struct perf_event *event) 2162 { 2163 struct bpf_prog_array *old_array; 2164 struct bpf_prog_array *new_array; 2165 int ret; 2166 2167 mutex_lock(&bpf_event_mutex); 2168 2169 if (!event->prog) 2170 goto unlock; 2171 2172 old_array = bpf_event_rcu_dereference(event->tp_event->prog_array); 2173 ret = bpf_prog_array_copy(old_array, event->prog, NULL, 0, &new_array); 2174 if (ret == -ENOENT) 2175 goto unlock; 2176 if (ret < 0) { 2177 bpf_prog_array_delete_safe(old_array, event->prog); 2178 } else { 2179 rcu_assign_pointer(event->tp_event->prog_array, new_array); 2180 bpf_prog_array_free_sleepable(old_array); 2181 } 2182 2183 bpf_prog_put(event->prog); 2184 event->prog = NULL; 2185 2186 unlock: 2187 mutex_unlock(&bpf_event_mutex); 2188 } 2189 2190 int perf_event_query_prog_array(struct perf_event *event, void __user *info) 2191 { 2192 struct perf_event_query_bpf __user *uquery = info; 2193 struct perf_event_query_bpf query = {}; 2194 struct bpf_prog_array *progs; 2195 u32 *ids, prog_cnt, ids_len; 2196 int ret; 2197 2198 if (!perfmon_capable()) 2199 return -EPERM; 2200 if (event->attr.type != PERF_TYPE_TRACEPOINT) 2201 return -EINVAL; 2202 if (copy_from_user(&query, uquery, sizeof(query))) 2203 return -EFAULT; 2204 2205 ids_len = query.ids_len; 2206 if (ids_len > BPF_TRACE_MAX_PROGS) 2207 return -E2BIG; 2208 ids = kcalloc(ids_len, sizeof(u32), GFP_USER | __GFP_NOWARN); 2209 if (!ids) 2210 return -ENOMEM; 2211 /* 2212 * The above kcalloc returns ZERO_SIZE_PTR when ids_len = 0, which 2213 * is required when user only wants to check for uquery->prog_cnt. 2214 * There is no need to check for it since the case is handled 2215 * gracefully in bpf_prog_array_copy_info. 2216 */ 2217 2218 mutex_lock(&bpf_event_mutex); 2219 progs = bpf_event_rcu_dereference(event->tp_event->prog_array); 2220 ret = bpf_prog_array_copy_info(progs, ids, ids_len, &prog_cnt); 2221 mutex_unlock(&bpf_event_mutex); 2222 2223 if (copy_to_user(&uquery->prog_cnt, &prog_cnt, sizeof(prog_cnt)) || 2224 copy_to_user(uquery->ids, ids, ids_len * sizeof(u32))) 2225 ret = -EFAULT; 2226 2227 kfree(ids); 2228 return ret; 2229 } 2230 2231 extern struct bpf_raw_event_map __start__bpf_raw_tp[]; 2232 extern struct bpf_raw_event_map __stop__bpf_raw_tp[]; 2233 2234 struct bpf_raw_event_map *bpf_get_raw_tracepoint(const char *name) 2235 { 2236 struct bpf_raw_event_map *btp = __start__bpf_raw_tp; 2237 2238 for (; btp < __stop__bpf_raw_tp; btp++) { 2239 if (!strcmp(btp->tp->name, name)) 2240 return btp; 2241 } 2242 2243 return bpf_get_raw_tracepoint_module(name); 2244 } 2245 2246 void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp) 2247 { 2248 struct module *mod; 2249 2250 preempt_disable(); 2251 mod = __module_address((unsigned long)btp); 2252 module_put(mod); 2253 preempt_enable(); 2254 } 2255 2256 static __always_inline 2257 void __bpf_trace_run(struct bpf_prog *prog, u64 *args) 2258 { 2259 cant_sleep(); 2260 if (unlikely(this_cpu_inc_return(*(prog->active)) != 1)) { 2261 bpf_prog_inc_misses_counter(prog); 2262 goto out; 2263 } 2264 rcu_read_lock(); 2265 (void) bpf_prog_run(prog, args); 2266 rcu_read_unlock(); 2267 out: 2268 this_cpu_dec(*(prog->active)); 2269 } 2270 2271 #define UNPACK(...) __VA_ARGS__ 2272 #define REPEAT_1(FN, DL, X, ...) FN(X) 2273 #define REPEAT_2(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_1(FN, DL, __VA_ARGS__) 2274 #define REPEAT_3(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_2(FN, DL, __VA_ARGS__) 2275 #define REPEAT_4(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_3(FN, DL, __VA_ARGS__) 2276 #define REPEAT_5(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_4(FN, DL, __VA_ARGS__) 2277 #define REPEAT_6(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_5(FN, DL, __VA_ARGS__) 2278 #define REPEAT_7(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_6(FN, DL, __VA_ARGS__) 2279 #define REPEAT_8(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_7(FN, DL, __VA_ARGS__) 2280 #define REPEAT_9(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_8(FN, DL, __VA_ARGS__) 2281 #define REPEAT_10(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_9(FN, DL, __VA_ARGS__) 2282 #define REPEAT_11(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_10(FN, DL, __VA_ARGS__) 2283 #define REPEAT_12(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_11(FN, DL, __VA_ARGS__) 2284 #define REPEAT(X, FN, DL, ...) REPEAT_##X(FN, DL, __VA_ARGS__) 2285 2286 #define SARG(X) u64 arg##X 2287 #define COPY(X) args[X] = arg##X 2288 2289 #define __DL_COM (,) 2290 #define __DL_SEM (;) 2291 2292 #define __SEQ_0_11 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11 2293 2294 #define BPF_TRACE_DEFN_x(x) \ 2295 void bpf_trace_run##x(struct bpf_prog *prog, \ 2296 REPEAT(x, SARG, __DL_COM, __SEQ_0_11)) \ 2297 { \ 2298 u64 args[x]; \ 2299 REPEAT(x, COPY, __DL_SEM, __SEQ_0_11); \ 2300 __bpf_trace_run(prog, args); \ 2301 } \ 2302 EXPORT_SYMBOL_GPL(bpf_trace_run##x) 2303 BPF_TRACE_DEFN_x(1); 2304 BPF_TRACE_DEFN_x(2); 2305 BPF_TRACE_DEFN_x(3); 2306 BPF_TRACE_DEFN_x(4); 2307 BPF_TRACE_DEFN_x(5); 2308 BPF_TRACE_DEFN_x(6); 2309 BPF_TRACE_DEFN_x(7); 2310 BPF_TRACE_DEFN_x(8); 2311 BPF_TRACE_DEFN_x(9); 2312 BPF_TRACE_DEFN_x(10); 2313 BPF_TRACE_DEFN_x(11); 2314 BPF_TRACE_DEFN_x(12); 2315 2316 static int __bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog) 2317 { 2318 struct tracepoint *tp = btp->tp; 2319 2320 /* 2321 * check that program doesn't access arguments beyond what's 2322 * available in this tracepoint 2323 */ 2324 if (prog->aux->max_ctx_offset > btp->num_args * sizeof(u64)) 2325 return -EINVAL; 2326 2327 if (prog->aux->max_tp_access > btp->writable_size) 2328 return -EINVAL; 2329 2330 return tracepoint_probe_register_may_exist(tp, (void *)btp->bpf_func, 2331 prog); 2332 } 2333 2334 int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog) 2335 { 2336 return __bpf_probe_register(btp, prog); 2337 } 2338 2339 int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_prog *prog) 2340 { 2341 return tracepoint_probe_unregister(btp->tp, (void *)btp->bpf_func, prog); 2342 } 2343 2344 int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id, 2345 u32 *fd_type, const char **buf, 2346 u64 *probe_offset, u64 *probe_addr) 2347 { 2348 bool is_tracepoint, is_syscall_tp; 2349 struct bpf_prog *prog; 2350 int flags, err = 0; 2351 2352 prog = event->prog; 2353 if (!prog) 2354 return -ENOENT; 2355 2356 /* not supporting BPF_PROG_TYPE_PERF_EVENT yet */ 2357 if (prog->type == BPF_PROG_TYPE_PERF_EVENT) 2358 return -EOPNOTSUPP; 2359 2360 *prog_id = prog->aux->id; 2361 flags = event->tp_event->flags; 2362 is_tracepoint = flags & TRACE_EVENT_FL_TRACEPOINT; 2363 is_syscall_tp = is_syscall_trace_event(event->tp_event); 2364 2365 if (is_tracepoint || is_syscall_tp) { 2366 *buf = is_tracepoint ? event->tp_event->tp->name 2367 : event->tp_event->name; 2368 *fd_type = BPF_FD_TYPE_TRACEPOINT; 2369 *probe_offset = 0x0; 2370 *probe_addr = 0x0; 2371 } else { 2372 /* kprobe/uprobe */ 2373 err = -EOPNOTSUPP; 2374 #ifdef CONFIG_KPROBE_EVENTS 2375 if (flags & TRACE_EVENT_FL_KPROBE) 2376 err = bpf_get_kprobe_info(event, fd_type, buf, 2377 probe_offset, probe_addr, 2378 event->attr.type == PERF_TYPE_TRACEPOINT); 2379 #endif 2380 #ifdef CONFIG_UPROBE_EVENTS 2381 if (flags & TRACE_EVENT_FL_UPROBE) 2382 err = bpf_get_uprobe_info(event, fd_type, buf, 2383 probe_offset, 2384 event->attr.type == PERF_TYPE_TRACEPOINT); 2385 #endif 2386 } 2387 2388 return err; 2389 } 2390 2391 static int __init send_signal_irq_work_init(void) 2392 { 2393 int cpu; 2394 struct send_signal_irq_work *work; 2395 2396 for_each_possible_cpu(cpu) { 2397 work = per_cpu_ptr(&send_signal_work, cpu); 2398 init_irq_work(&work->irq_work, do_bpf_send_signal); 2399 } 2400 return 0; 2401 } 2402 2403 subsys_initcall(send_signal_irq_work_init); 2404 2405 #ifdef CONFIG_MODULES 2406 static int bpf_event_notify(struct notifier_block *nb, unsigned long op, 2407 void *module) 2408 { 2409 struct bpf_trace_module *btm, *tmp; 2410 struct module *mod = module; 2411 int ret = 0; 2412 2413 if (mod->num_bpf_raw_events == 0 || 2414 (op != MODULE_STATE_COMING && op != MODULE_STATE_GOING)) 2415 goto out; 2416 2417 mutex_lock(&bpf_module_mutex); 2418 2419 switch (op) { 2420 case MODULE_STATE_COMING: 2421 btm = kzalloc(sizeof(*btm), GFP_KERNEL); 2422 if (btm) { 2423 btm->module = module; 2424 list_add(&btm->list, &bpf_trace_modules); 2425 } else { 2426 ret = -ENOMEM; 2427 } 2428 break; 2429 case MODULE_STATE_GOING: 2430 list_for_each_entry_safe(btm, tmp, &bpf_trace_modules, list) { 2431 if (btm->module == module) { 2432 list_del(&btm->list); 2433 kfree(btm); 2434 break; 2435 } 2436 } 2437 break; 2438 } 2439 2440 mutex_unlock(&bpf_module_mutex); 2441 2442 out: 2443 return notifier_from_errno(ret); 2444 } 2445 2446 static struct notifier_block bpf_module_nb = { 2447 .notifier_call = bpf_event_notify, 2448 }; 2449 2450 static int __init bpf_event_init(void) 2451 { 2452 register_module_notifier(&bpf_module_nb); 2453 return 0; 2454 } 2455 2456 fs_initcall(bpf_event_init); 2457 #endif /* CONFIG_MODULES */ 2458 2459 #ifdef CONFIG_FPROBE 2460 struct bpf_kprobe_multi_link { 2461 struct bpf_link link; 2462 struct fprobe fp; 2463 unsigned long *addrs; 2464 u64 *cookies; 2465 u32 cnt; 2466 u32 mods_cnt; 2467 struct module **mods; 2468 }; 2469 2470 struct bpf_kprobe_multi_run_ctx { 2471 struct bpf_run_ctx run_ctx; 2472 struct bpf_kprobe_multi_link *link; 2473 unsigned long entry_ip; 2474 }; 2475 2476 struct user_syms { 2477 const char **syms; 2478 char *buf; 2479 }; 2480 2481 static int copy_user_syms(struct user_syms *us, unsigned long __user *usyms, u32 cnt) 2482 { 2483 unsigned long __user usymbol; 2484 const char **syms = NULL; 2485 char *buf = NULL, *p; 2486 int err = -ENOMEM; 2487 unsigned int i; 2488 2489 syms = kvmalloc_array(cnt, sizeof(*syms), GFP_KERNEL); 2490 if (!syms) 2491 goto error; 2492 2493 buf = kvmalloc_array(cnt, KSYM_NAME_LEN, GFP_KERNEL); 2494 if (!buf) 2495 goto error; 2496 2497 for (p = buf, i = 0; i < cnt; i++) { 2498 if (__get_user(usymbol, usyms + i)) { 2499 err = -EFAULT; 2500 goto error; 2501 } 2502 err = strncpy_from_user(p, (const char __user *) usymbol, KSYM_NAME_LEN); 2503 if (err == KSYM_NAME_LEN) 2504 err = -E2BIG; 2505 if (err < 0) 2506 goto error; 2507 syms[i] = p; 2508 p += err + 1; 2509 } 2510 2511 us->syms = syms; 2512 us->buf = buf; 2513 return 0; 2514 2515 error: 2516 if (err) { 2517 kvfree(syms); 2518 kvfree(buf); 2519 } 2520 return err; 2521 } 2522 2523 static void kprobe_multi_put_modules(struct module **mods, u32 cnt) 2524 { 2525 u32 i; 2526 2527 for (i = 0; i < cnt; i++) 2528 module_put(mods[i]); 2529 } 2530 2531 static void free_user_syms(struct user_syms *us) 2532 { 2533 kvfree(us->syms); 2534 kvfree(us->buf); 2535 } 2536 2537 static void bpf_kprobe_multi_link_release(struct bpf_link *link) 2538 { 2539 struct bpf_kprobe_multi_link *kmulti_link; 2540 2541 kmulti_link = container_of(link, struct bpf_kprobe_multi_link, link); 2542 unregister_fprobe(&kmulti_link->fp); 2543 kprobe_multi_put_modules(kmulti_link->mods, kmulti_link->mods_cnt); 2544 } 2545 2546 static void bpf_kprobe_multi_link_dealloc(struct bpf_link *link) 2547 { 2548 struct bpf_kprobe_multi_link *kmulti_link; 2549 2550 kmulti_link = container_of(link, struct bpf_kprobe_multi_link, link); 2551 kvfree(kmulti_link->addrs); 2552 kvfree(kmulti_link->cookies); 2553 kfree(kmulti_link->mods); 2554 kfree(kmulti_link); 2555 } 2556 2557 static const struct bpf_link_ops bpf_kprobe_multi_link_lops = { 2558 .release = bpf_kprobe_multi_link_release, 2559 .dealloc = bpf_kprobe_multi_link_dealloc, 2560 }; 2561 2562 static void bpf_kprobe_multi_cookie_swap(void *a, void *b, int size, const void *priv) 2563 { 2564 const struct bpf_kprobe_multi_link *link = priv; 2565 unsigned long *addr_a = a, *addr_b = b; 2566 u64 *cookie_a, *cookie_b; 2567 2568 cookie_a = link->cookies + (addr_a - link->addrs); 2569 cookie_b = link->cookies + (addr_b - link->addrs); 2570 2571 /* swap addr_a/addr_b and cookie_a/cookie_b values */ 2572 swap(*addr_a, *addr_b); 2573 swap(*cookie_a, *cookie_b); 2574 } 2575 2576 static int bpf_kprobe_multi_addrs_cmp(const void *a, const void *b) 2577 { 2578 const unsigned long *addr_a = a, *addr_b = b; 2579 2580 if (*addr_a == *addr_b) 2581 return 0; 2582 return *addr_a < *addr_b ? -1 : 1; 2583 } 2584 2585 static int bpf_kprobe_multi_cookie_cmp(const void *a, const void *b, const void *priv) 2586 { 2587 return bpf_kprobe_multi_addrs_cmp(a, b); 2588 } 2589 2590 static u64 bpf_kprobe_multi_cookie(struct bpf_run_ctx *ctx) 2591 { 2592 struct bpf_kprobe_multi_run_ctx *run_ctx; 2593 struct bpf_kprobe_multi_link *link; 2594 u64 *cookie, entry_ip; 2595 unsigned long *addr; 2596 2597 if (WARN_ON_ONCE(!ctx)) 2598 return 0; 2599 run_ctx = container_of(current->bpf_ctx, struct bpf_kprobe_multi_run_ctx, run_ctx); 2600 link = run_ctx->link; 2601 if (!link->cookies) 2602 return 0; 2603 entry_ip = run_ctx->entry_ip; 2604 addr = bsearch(&entry_ip, link->addrs, link->cnt, sizeof(entry_ip), 2605 bpf_kprobe_multi_addrs_cmp); 2606 if (!addr) 2607 return 0; 2608 cookie = link->cookies + (addr - link->addrs); 2609 return *cookie; 2610 } 2611 2612 static u64 bpf_kprobe_multi_entry_ip(struct bpf_run_ctx *ctx) 2613 { 2614 struct bpf_kprobe_multi_run_ctx *run_ctx; 2615 2616 run_ctx = container_of(current->bpf_ctx, struct bpf_kprobe_multi_run_ctx, run_ctx); 2617 return run_ctx->entry_ip; 2618 } 2619 2620 static int 2621 kprobe_multi_link_prog_run(struct bpf_kprobe_multi_link *link, 2622 unsigned long entry_ip, struct pt_regs *regs) 2623 { 2624 struct bpf_kprobe_multi_run_ctx run_ctx = { 2625 .link = link, 2626 .entry_ip = entry_ip, 2627 }; 2628 struct bpf_run_ctx *old_run_ctx; 2629 int err; 2630 2631 if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) { 2632 err = 0; 2633 goto out; 2634 } 2635 2636 migrate_disable(); 2637 rcu_read_lock(); 2638 old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx); 2639 err = bpf_prog_run(link->link.prog, regs); 2640 bpf_reset_run_ctx(old_run_ctx); 2641 rcu_read_unlock(); 2642 migrate_enable(); 2643 2644 out: 2645 __this_cpu_dec(bpf_prog_active); 2646 return err; 2647 } 2648 2649 static void 2650 kprobe_multi_link_handler(struct fprobe *fp, unsigned long fentry_ip, 2651 struct pt_regs *regs) 2652 { 2653 struct bpf_kprobe_multi_link *link; 2654 2655 link = container_of(fp, struct bpf_kprobe_multi_link, fp); 2656 kprobe_multi_link_prog_run(link, get_entry_ip(fentry_ip), regs); 2657 } 2658 2659 static int symbols_cmp_r(const void *a, const void *b, const void *priv) 2660 { 2661 const char **str_a = (const char **) a; 2662 const char **str_b = (const char **) b; 2663 2664 return strcmp(*str_a, *str_b); 2665 } 2666 2667 struct multi_symbols_sort { 2668 const char **funcs; 2669 u64 *cookies; 2670 }; 2671 2672 static void symbols_swap_r(void *a, void *b, int size, const void *priv) 2673 { 2674 const struct multi_symbols_sort *data = priv; 2675 const char **name_a = a, **name_b = b; 2676 2677 swap(*name_a, *name_b); 2678 2679 /* If defined, swap also related cookies. */ 2680 if (data->cookies) { 2681 u64 *cookie_a, *cookie_b; 2682 2683 cookie_a = data->cookies + (name_a - data->funcs); 2684 cookie_b = data->cookies + (name_b - data->funcs); 2685 swap(*cookie_a, *cookie_b); 2686 } 2687 } 2688 2689 struct modules_array { 2690 struct module **mods; 2691 int mods_cnt; 2692 int mods_cap; 2693 }; 2694 2695 static int add_module(struct modules_array *arr, struct module *mod) 2696 { 2697 struct module **mods; 2698 2699 if (arr->mods_cnt == arr->mods_cap) { 2700 arr->mods_cap = max(16, arr->mods_cap * 3 / 2); 2701 mods = krealloc_array(arr->mods, arr->mods_cap, sizeof(*mods), GFP_KERNEL); 2702 if (!mods) 2703 return -ENOMEM; 2704 arr->mods = mods; 2705 } 2706 2707 arr->mods[arr->mods_cnt] = mod; 2708 arr->mods_cnt++; 2709 return 0; 2710 } 2711 2712 static bool has_module(struct modules_array *arr, struct module *mod) 2713 { 2714 int i; 2715 2716 for (i = arr->mods_cnt - 1; i >= 0; i--) { 2717 if (arr->mods[i] == mod) 2718 return true; 2719 } 2720 return false; 2721 } 2722 2723 static int get_modules_for_addrs(struct module ***mods, unsigned long *addrs, u32 addrs_cnt) 2724 { 2725 struct modules_array arr = {}; 2726 u32 i, err = 0; 2727 2728 for (i = 0; i < addrs_cnt; i++) { 2729 struct module *mod; 2730 2731 preempt_disable(); 2732 mod = __module_address(addrs[i]); 2733 /* Either no module or we it's already stored */ 2734 if (!mod || has_module(&arr, mod)) { 2735 preempt_enable(); 2736 continue; 2737 } 2738 if (!try_module_get(mod)) 2739 err = -EINVAL; 2740 preempt_enable(); 2741 if (err) 2742 break; 2743 err = add_module(&arr, mod); 2744 if (err) { 2745 module_put(mod); 2746 break; 2747 } 2748 } 2749 2750 /* We return either err < 0 in case of error, ... */ 2751 if (err) { 2752 kprobe_multi_put_modules(arr.mods, arr.mods_cnt); 2753 kfree(arr.mods); 2754 return err; 2755 } 2756 2757 /* or number of modules found if everything is ok. */ 2758 *mods = arr.mods; 2759 return arr.mods_cnt; 2760 } 2761 2762 int bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog) 2763 { 2764 struct bpf_kprobe_multi_link *link = NULL; 2765 struct bpf_link_primer link_primer; 2766 void __user *ucookies; 2767 unsigned long *addrs; 2768 u32 flags, cnt, size; 2769 void __user *uaddrs; 2770 u64 *cookies = NULL; 2771 void __user *usyms; 2772 int err; 2773 2774 /* no support for 32bit archs yet */ 2775 if (sizeof(u64) != sizeof(void *)) 2776 return -EOPNOTSUPP; 2777 2778 if (prog->expected_attach_type != BPF_TRACE_KPROBE_MULTI) 2779 return -EINVAL; 2780 2781 flags = attr->link_create.kprobe_multi.flags; 2782 if (flags & ~BPF_F_KPROBE_MULTI_RETURN) 2783 return -EINVAL; 2784 2785 uaddrs = u64_to_user_ptr(attr->link_create.kprobe_multi.addrs); 2786 usyms = u64_to_user_ptr(attr->link_create.kprobe_multi.syms); 2787 if (!!uaddrs == !!usyms) 2788 return -EINVAL; 2789 2790 cnt = attr->link_create.kprobe_multi.cnt; 2791 if (!cnt) 2792 return -EINVAL; 2793 2794 size = cnt * sizeof(*addrs); 2795 addrs = kvmalloc_array(cnt, sizeof(*addrs), GFP_KERNEL); 2796 if (!addrs) 2797 return -ENOMEM; 2798 2799 ucookies = u64_to_user_ptr(attr->link_create.kprobe_multi.cookies); 2800 if (ucookies) { 2801 cookies = kvmalloc_array(cnt, sizeof(*addrs), GFP_KERNEL); 2802 if (!cookies) { 2803 err = -ENOMEM; 2804 goto error; 2805 } 2806 if (copy_from_user(cookies, ucookies, size)) { 2807 err = -EFAULT; 2808 goto error; 2809 } 2810 } 2811 2812 if (uaddrs) { 2813 if (copy_from_user(addrs, uaddrs, size)) { 2814 err = -EFAULT; 2815 goto error; 2816 } 2817 } else { 2818 struct multi_symbols_sort data = { 2819 .cookies = cookies, 2820 }; 2821 struct user_syms us; 2822 2823 err = copy_user_syms(&us, usyms, cnt); 2824 if (err) 2825 goto error; 2826 2827 if (cookies) 2828 data.funcs = us.syms; 2829 2830 sort_r(us.syms, cnt, sizeof(*us.syms), symbols_cmp_r, 2831 symbols_swap_r, &data); 2832 2833 err = ftrace_lookup_symbols(us.syms, cnt, addrs); 2834 free_user_syms(&us); 2835 if (err) 2836 goto error; 2837 } 2838 2839 link = kzalloc(sizeof(*link), GFP_KERNEL); 2840 if (!link) { 2841 err = -ENOMEM; 2842 goto error; 2843 } 2844 2845 bpf_link_init(&link->link, BPF_LINK_TYPE_KPROBE_MULTI, 2846 &bpf_kprobe_multi_link_lops, prog); 2847 2848 err = bpf_link_prime(&link->link, &link_primer); 2849 if (err) 2850 goto error; 2851 2852 if (flags & BPF_F_KPROBE_MULTI_RETURN) 2853 link->fp.exit_handler = kprobe_multi_link_handler; 2854 else 2855 link->fp.entry_handler = kprobe_multi_link_handler; 2856 2857 link->addrs = addrs; 2858 link->cookies = cookies; 2859 link->cnt = cnt; 2860 2861 if (cookies) { 2862 /* 2863 * Sorting addresses will trigger sorting cookies as well 2864 * (check bpf_kprobe_multi_cookie_swap). This way we can 2865 * find cookie based on the address in bpf_get_attach_cookie 2866 * helper. 2867 */ 2868 sort_r(addrs, cnt, sizeof(*addrs), 2869 bpf_kprobe_multi_cookie_cmp, 2870 bpf_kprobe_multi_cookie_swap, 2871 link); 2872 } 2873 2874 err = get_modules_for_addrs(&link->mods, addrs, cnt); 2875 if (err < 0) { 2876 bpf_link_cleanup(&link_primer); 2877 return err; 2878 } 2879 link->mods_cnt = err; 2880 2881 err = register_fprobe_ips(&link->fp, addrs, cnt); 2882 if (err) { 2883 kprobe_multi_put_modules(link->mods, link->mods_cnt); 2884 bpf_link_cleanup(&link_primer); 2885 return err; 2886 } 2887 2888 return bpf_link_settle(&link_primer); 2889 2890 error: 2891 kfree(link); 2892 kvfree(addrs); 2893 kvfree(cookies); 2894 return err; 2895 } 2896 #else /* !CONFIG_FPROBE */ 2897 int bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog) 2898 { 2899 return -EOPNOTSUPP; 2900 } 2901 static u64 bpf_kprobe_multi_cookie(struct bpf_run_ctx *ctx) 2902 { 2903 return 0; 2904 } 2905 static u64 bpf_kprobe_multi_entry_ip(struct bpf_run_ctx *ctx) 2906 { 2907 return 0; 2908 } 2909 #endif 2910