1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2011-2015 PLUMgrid, http://plumgrid.com 3 * Copyright (c) 2016 Facebook 4 */ 5 #include <linux/kernel.h> 6 #include <linux/types.h> 7 #include <linux/slab.h> 8 #include <linux/bpf.h> 9 #include <linux/bpf_perf_event.h> 10 #include <linux/filter.h> 11 #include <linux/uaccess.h> 12 #include <linux/ctype.h> 13 #include <linux/kprobes.h> 14 #include <linux/spinlock.h> 15 #include <linux/syscalls.h> 16 #include <linux/error-injection.h> 17 #include <linux/btf_ids.h> 18 19 #include <asm/tlb.h> 20 21 #include "trace_probe.h" 22 #include "trace.h" 23 24 #define CREATE_TRACE_POINTS 25 #include "bpf_trace.h" 26 27 #define bpf_event_rcu_dereference(p) \ 28 rcu_dereference_protected(p, lockdep_is_held(&bpf_event_mutex)) 29 30 #ifdef CONFIG_MODULES 31 struct bpf_trace_module { 32 struct module *module; 33 struct list_head list; 34 }; 35 36 static LIST_HEAD(bpf_trace_modules); 37 static DEFINE_MUTEX(bpf_module_mutex); 38 39 static struct bpf_raw_event_map *bpf_get_raw_tracepoint_module(const char *name) 40 { 41 struct bpf_raw_event_map *btp, *ret = NULL; 42 struct bpf_trace_module *btm; 43 unsigned int i; 44 45 mutex_lock(&bpf_module_mutex); 46 list_for_each_entry(btm, &bpf_trace_modules, list) { 47 for (i = 0; i < btm->module->num_bpf_raw_events; ++i) { 48 btp = &btm->module->bpf_raw_events[i]; 49 if (!strcmp(btp->tp->name, name)) { 50 if (try_module_get(btm->module)) 51 ret = btp; 52 goto out; 53 } 54 } 55 } 56 out: 57 mutex_unlock(&bpf_module_mutex); 58 return ret; 59 } 60 #else 61 static struct bpf_raw_event_map *bpf_get_raw_tracepoint_module(const char *name) 62 { 63 return NULL; 64 } 65 #endif /* CONFIG_MODULES */ 66 67 u64 bpf_get_stackid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); 68 u64 bpf_get_stack(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); 69 70 /** 71 * trace_call_bpf - invoke BPF program 72 * @call: tracepoint event 73 * @ctx: opaque context pointer 74 * 75 * kprobe handlers execute BPF programs via this helper. 76 * Can be used from static tracepoints in the future. 77 * 78 * Return: BPF programs always return an integer which is interpreted by 79 * kprobe handler as: 80 * 0 - return from kprobe (event is filtered out) 81 * 1 - store kprobe event into ring buffer 82 * Other values are reserved and currently alias to 1 83 */ 84 unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx) 85 { 86 unsigned int ret; 87 88 if (in_nmi()) /* not supported yet */ 89 return 1; 90 91 cant_sleep(); 92 93 if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) { 94 /* 95 * since some bpf program is already running on this cpu, 96 * don't call into another bpf program (same or different) 97 * and don't send kprobe event into ring-buffer, 98 * so return zero here 99 */ 100 ret = 0; 101 goto out; 102 } 103 104 /* 105 * Instead of moving rcu_read_lock/rcu_dereference/rcu_read_unlock 106 * to all call sites, we did a bpf_prog_array_valid() there to check 107 * whether call->prog_array is empty or not, which is 108 * a heurisitc to speed up execution. 109 * 110 * If bpf_prog_array_valid() fetched prog_array was 111 * non-NULL, we go into trace_call_bpf() and do the actual 112 * proper rcu_dereference() under RCU lock. 113 * If it turns out that prog_array is NULL then, we bail out. 114 * For the opposite, if the bpf_prog_array_valid() fetched pointer 115 * was NULL, you'll skip the prog_array with the risk of missing 116 * out of events when it was updated in between this and the 117 * rcu_dereference() which is accepted risk. 118 */ 119 ret = BPF_PROG_RUN_ARRAY_CHECK(call->prog_array, ctx, BPF_PROG_RUN); 120 121 out: 122 __this_cpu_dec(bpf_prog_active); 123 124 return ret; 125 } 126 127 #ifdef CONFIG_BPF_KPROBE_OVERRIDE 128 BPF_CALL_2(bpf_override_return, struct pt_regs *, regs, unsigned long, rc) 129 { 130 regs_set_return_value(regs, rc); 131 override_function_with_return(regs); 132 return 0; 133 } 134 135 static const struct bpf_func_proto bpf_override_return_proto = { 136 .func = bpf_override_return, 137 .gpl_only = true, 138 .ret_type = RET_INTEGER, 139 .arg1_type = ARG_PTR_TO_CTX, 140 .arg2_type = ARG_ANYTHING, 141 }; 142 #endif 143 144 static __always_inline int 145 bpf_probe_read_user_common(void *dst, u32 size, const void __user *unsafe_ptr) 146 { 147 int ret; 148 149 ret = copy_from_user_nofault(dst, unsafe_ptr, size); 150 if (unlikely(ret < 0)) 151 memset(dst, 0, size); 152 return ret; 153 } 154 155 BPF_CALL_3(bpf_probe_read_user, void *, dst, u32, size, 156 const void __user *, unsafe_ptr) 157 { 158 return bpf_probe_read_user_common(dst, size, unsafe_ptr); 159 } 160 161 const struct bpf_func_proto bpf_probe_read_user_proto = { 162 .func = bpf_probe_read_user, 163 .gpl_only = true, 164 .ret_type = RET_INTEGER, 165 .arg1_type = ARG_PTR_TO_UNINIT_MEM, 166 .arg2_type = ARG_CONST_SIZE_OR_ZERO, 167 .arg3_type = ARG_ANYTHING, 168 }; 169 170 static __always_inline int 171 bpf_probe_read_user_str_common(void *dst, u32 size, 172 const void __user *unsafe_ptr) 173 { 174 int ret; 175 176 ret = strncpy_from_user_nofault(dst, unsafe_ptr, size); 177 if (unlikely(ret < 0)) 178 memset(dst, 0, size); 179 return ret; 180 } 181 182 BPF_CALL_3(bpf_probe_read_user_str, void *, dst, u32, size, 183 const void __user *, unsafe_ptr) 184 { 185 return bpf_probe_read_user_str_common(dst, size, unsafe_ptr); 186 } 187 188 const struct bpf_func_proto bpf_probe_read_user_str_proto = { 189 .func = bpf_probe_read_user_str, 190 .gpl_only = true, 191 .ret_type = RET_INTEGER, 192 .arg1_type = ARG_PTR_TO_UNINIT_MEM, 193 .arg2_type = ARG_CONST_SIZE_OR_ZERO, 194 .arg3_type = ARG_ANYTHING, 195 }; 196 197 static __always_inline int 198 bpf_probe_read_kernel_common(void *dst, u32 size, const void *unsafe_ptr) 199 { 200 int ret = security_locked_down(LOCKDOWN_BPF_READ); 201 202 if (unlikely(ret < 0)) 203 goto fail; 204 ret = copy_from_kernel_nofault(dst, unsafe_ptr, size); 205 if (unlikely(ret < 0)) 206 goto fail; 207 return ret; 208 fail: 209 memset(dst, 0, size); 210 return ret; 211 } 212 213 BPF_CALL_3(bpf_probe_read_kernel, void *, dst, u32, size, 214 const void *, unsafe_ptr) 215 { 216 return bpf_probe_read_kernel_common(dst, size, unsafe_ptr); 217 } 218 219 const struct bpf_func_proto bpf_probe_read_kernel_proto = { 220 .func = bpf_probe_read_kernel, 221 .gpl_only = true, 222 .ret_type = RET_INTEGER, 223 .arg1_type = ARG_PTR_TO_UNINIT_MEM, 224 .arg2_type = ARG_CONST_SIZE_OR_ZERO, 225 .arg3_type = ARG_ANYTHING, 226 }; 227 228 static __always_inline int 229 bpf_probe_read_kernel_str_common(void *dst, u32 size, const void *unsafe_ptr) 230 { 231 int ret = security_locked_down(LOCKDOWN_BPF_READ); 232 233 if (unlikely(ret < 0)) 234 goto fail; 235 236 /* 237 * The strncpy_from_kernel_nofault() call will likely not fill the 238 * entire buffer, but that's okay in this circumstance as we're probing 239 * arbitrary memory anyway similar to bpf_probe_read_*() and might 240 * as well probe the stack. Thus, memory is explicitly cleared 241 * only in error case, so that improper users ignoring return 242 * code altogether don't copy garbage; otherwise length of string 243 * is returned that can be used for bpf_perf_event_output() et al. 244 */ 245 ret = strncpy_from_kernel_nofault(dst, unsafe_ptr, size); 246 if (unlikely(ret < 0)) 247 goto fail; 248 249 return ret; 250 fail: 251 memset(dst, 0, size); 252 return ret; 253 } 254 255 BPF_CALL_3(bpf_probe_read_kernel_str, void *, dst, u32, size, 256 const void *, unsafe_ptr) 257 { 258 return bpf_probe_read_kernel_str_common(dst, size, unsafe_ptr); 259 } 260 261 const struct bpf_func_proto bpf_probe_read_kernel_str_proto = { 262 .func = bpf_probe_read_kernel_str, 263 .gpl_only = true, 264 .ret_type = RET_INTEGER, 265 .arg1_type = ARG_PTR_TO_UNINIT_MEM, 266 .arg2_type = ARG_CONST_SIZE_OR_ZERO, 267 .arg3_type = ARG_ANYTHING, 268 }; 269 270 #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE 271 BPF_CALL_3(bpf_probe_read_compat, void *, dst, u32, size, 272 const void *, unsafe_ptr) 273 { 274 if ((unsigned long)unsafe_ptr < TASK_SIZE) { 275 return bpf_probe_read_user_common(dst, size, 276 (__force void __user *)unsafe_ptr); 277 } 278 return bpf_probe_read_kernel_common(dst, size, unsafe_ptr); 279 } 280 281 static const struct bpf_func_proto bpf_probe_read_compat_proto = { 282 .func = bpf_probe_read_compat, 283 .gpl_only = true, 284 .ret_type = RET_INTEGER, 285 .arg1_type = ARG_PTR_TO_UNINIT_MEM, 286 .arg2_type = ARG_CONST_SIZE_OR_ZERO, 287 .arg3_type = ARG_ANYTHING, 288 }; 289 290 BPF_CALL_3(bpf_probe_read_compat_str, void *, dst, u32, size, 291 const void *, unsafe_ptr) 292 { 293 if ((unsigned long)unsafe_ptr < TASK_SIZE) { 294 return bpf_probe_read_user_str_common(dst, size, 295 (__force void __user *)unsafe_ptr); 296 } 297 return bpf_probe_read_kernel_str_common(dst, size, unsafe_ptr); 298 } 299 300 static const struct bpf_func_proto bpf_probe_read_compat_str_proto = { 301 .func = bpf_probe_read_compat_str, 302 .gpl_only = true, 303 .ret_type = RET_INTEGER, 304 .arg1_type = ARG_PTR_TO_UNINIT_MEM, 305 .arg2_type = ARG_CONST_SIZE_OR_ZERO, 306 .arg3_type = ARG_ANYTHING, 307 }; 308 #endif /* CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE */ 309 310 BPF_CALL_3(bpf_probe_write_user, void __user *, unsafe_ptr, const void *, src, 311 u32, size) 312 { 313 /* 314 * Ensure we're in user context which is safe for the helper to 315 * run. This helper has no business in a kthread. 316 * 317 * access_ok() should prevent writing to non-user memory, but in 318 * some situations (nommu, temporary switch, etc) access_ok() does 319 * not provide enough validation, hence the check on KERNEL_DS. 320 * 321 * nmi_uaccess_okay() ensures the probe is not run in an interim 322 * state, when the task or mm are switched. This is specifically 323 * required to prevent the use of temporary mm. 324 */ 325 326 if (unlikely(in_interrupt() || 327 current->flags & (PF_KTHREAD | PF_EXITING))) 328 return -EPERM; 329 if (unlikely(uaccess_kernel())) 330 return -EPERM; 331 if (unlikely(!nmi_uaccess_okay())) 332 return -EPERM; 333 334 return copy_to_user_nofault(unsafe_ptr, src, size); 335 } 336 337 static const struct bpf_func_proto bpf_probe_write_user_proto = { 338 .func = bpf_probe_write_user, 339 .gpl_only = true, 340 .ret_type = RET_INTEGER, 341 .arg1_type = ARG_ANYTHING, 342 .arg2_type = ARG_PTR_TO_MEM, 343 .arg3_type = ARG_CONST_SIZE, 344 }; 345 346 static const struct bpf_func_proto *bpf_get_probe_write_proto(void) 347 { 348 if (!capable(CAP_SYS_ADMIN)) 349 return NULL; 350 351 pr_warn_ratelimited("%s[%d] is installing a program with bpf_probe_write_user helper that may corrupt user memory!", 352 current->comm, task_pid_nr(current)); 353 354 return &bpf_probe_write_user_proto; 355 } 356 357 static void bpf_trace_copy_string(char *buf, void *unsafe_ptr, char fmt_ptype, 358 size_t bufsz) 359 { 360 void __user *user_ptr = (__force void __user *)unsafe_ptr; 361 362 buf[0] = 0; 363 364 switch (fmt_ptype) { 365 case 's': 366 #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE 367 if ((unsigned long)unsafe_ptr < TASK_SIZE) { 368 strncpy_from_user_nofault(buf, user_ptr, bufsz); 369 break; 370 } 371 fallthrough; 372 #endif 373 case 'k': 374 strncpy_from_kernel_nofault(buf, unsafe_ptr, bufsz); 375 break; 376 case 'u': 377 strncpy_from_user_nofault(buf, user_ptr, bufsz); 378 break; 379 } 380 } 381 382 static DEFINE_RAW_SPINLOCK(trace_printk_lock); 383 384 #define BPF_TRACE_PRINTK_SIZE 1024 385 386 static __printf(1, 0) int bpf_do_trace_printk(const char *fmt, ...) 387 { 388 static char buf[BPF_TRACE_PRINTK_SIZE]; 389 unsigned long flags; 390 va_list ap; 391 int ret; 392 393 raw_spin_lock_irqsave(&trace_printk_lock, flags); 394 va_start(ap, fmt); 395 ret = vsnprintf(buf, sizeof(buf), fmt, ap); 396 va_end(ap); 397 /* vsnprintf() will not append null for zero-length strings */ 398 if (ret == 0) 399 buf[0] = '\0'; 400 trace_bpf_trace_printk(buf); 401 raw_spin_unlock_irqrestore(&trace_printk_lock, flags); 402 403 return ret; 404 } 405 406 /* 407 * Only limited trace_printk() conversion specifiers allowed: 408 * %d %i %u %x %ld %li %lu %lx %lld %lli %llu %llx %p %pB %pks %pus %s 409 */ 410 BPF_CALL_5(bpf_trace_printk, char *, fmt, u32, fmt_size, u64, arg1, 411 u64, arg2, u64, arg3) 412 { 413 int i, mod[3] = {}, fmt_cnt = 0; 414 char buf[64], fmt_ptype; 415 void *unsafe_ptr = NULL; 416 bool str_seen = false; 417 418 /* 419 * bpf_check()->check_func_arg()->check_stack_boundary() 420 * guarantees that fmt points to bpf program stack, 421 * fmt_size bytes of it were initialized and fmt_size > 0 422 */ 423 if (fmt[--fmt_size] != 0) 424 return -EINVAL; 425 426 /* check format string for allowed specifiers */ 427 for (i = 0; i < fmt_size; i++) { 428 if ((!isprint(fmt[i]) && !isspace(fmt[i])) || !isascii(fmt[i])) 429 return -EINVAL; 430 431 if (fmt[i] != '%') 432 continue; 433 434 if (fmt_cnt >= 3) 435 return -EINVAL; 436 437 /* fmt[i] != 0 && fmt[last] == 0, so we can access fmt[i + 1] */ 438 i++; 439 if (fmt[i] == 'l') { 440 mod[fmt_cnt]++; 441 i++; 442 } else if (fmt[i] == 'p') { 443 mod[fmt_cnt]++; 444 if ((fmt[i + 1] == 'k' || 445 fmt[i + 1] == 'u') && 446 fmt[i + 2] == 's') { 447 fmt_ptype = fmt[i + 1]; 448 i += 2; 449 goto fmt_str; 450 } 451 452 if (fmt[i + 1] == 'B') { 453 i++; 454 goto fmt_next; 455 } 456 457 /* disallow any further format extensions */ 458 if (fmt[i + 1] != 0 && 459 !isspace(fmt[i + 1]) && 460 !ispunct(fmt[i + 1])) 461 return -EINVAL; 462 463 goto fmt_next; 464 } else if (fmt[i] == 's') { 465 mod[fmt_cnt]++; 466 fmt_ptype = fmt[i]; 467 fmt_str: 468 if (str_seen) 469 /* allow only one '%s' per fmt string */ 470 return -EINVAL; 471 str_seen = true; 472 473 if (fmt[i + 1] != 0 && 474 !isspace(fmt[i + 1]) && 475 !ispunct(fmt[i + 1])) 476 return -EINVAL; 477 478 switch (fmt_cnt) { 479 case 0: 480 unsafe_ptr = (void *)(long)arg1; 481 arg1 = (long)buf; 482 break; 483 case 1: 484 unsafe_ptr = (void *)(long)arg2; 485 arg2 = (long)buf; 486 break; 487 case 2: 488 unsafe_ptr = (void *)(long)arg3; 489 arg3 = (long)buf; 490 break; 491 } 492 493 bpf_trace_copy_string(buf, unsafe_ptr, fmt_ptype, 494 sizeof(buf)); 495 goto fmt_next; 496 } 497 498 if (fmt[i] == 'l') { 499 mod[fmt_cnt]++; 500 i++; 501 } 502 503 if (fmt[i] != 'i' && fmt[i] != 'd' && 504 fmt[i] != 'u' && fmt[i] != 'x') 505 return -EINVAL; 506 fmt_next: 507 fmt_cnt++; 508 } 509 510 /* Horrid workaround for getting va_list handling working with different 511 * argument type combinations generically for 32 and 64 bit archs. 512 */ 513 #define __BPF_TP_EMIT() __BPF_ARG3_TP() 514 #define __BPF_TP(...) \ 515 bpf_do_trace_printk(fmt, ##__VA_ARGS__) 516 517 #define __BPF_ARG1_TP(...) \ 518 ((mod[0] == 2 || (mod[0] == 1 && __BITS_PER_LONG == 64)) \ 519 ? __BPF_TP(arg1, ##__VA_ARGS__) \ 520 : ((mod[0] == 1 || (mod[0] == 0 && __BITS_PER_LONG == 32)) \ 521 ? __BPF_TP((long)arg1, ##__VA_ARGS__) \ 522 : __BPF_TP((u32)arg1, ##__VA_ARGS__))) 523 524 #define __BPF_ARG2_TP(...) \ 525 ((mod[1] == 2 || (mod[1] == 1 && __BITS_PER_LONG == 64)) \ 526 ? __BPF_ARG1_TP(arg2, ##__VA_ARGS__) \ 527 : ((mod[1] == 1 || (mod[1] == 0 && __BITS_PER_LONG == 32)) \ 528 ? __BPF_ARG1_TP((long)arg2, ##__VA_ARGS__) \ 529 : __BPF_ARG1_TP((u32)arg2, ##__VA_ARGS__))) 530 531 #define __BPF_ARG3_TP(...) \ 532 ((mod[2] == 2 || (mod[2] == 1 && __BITS_PER_LONG == 64)) \ 533 ? __BPF_ARG2_TP(arg3, ##__VA_ARGS__) \ 534 : ((mod[2] == 1 || (mod[2] == 0 && __BITS_PER_LONG == 32)) \ 535 ? __BPF_ARG2_TP((long)arg3, ##__VA_ARGS__) \ 536 : __BPF_ARG2_TP((u32)arg3, ##__VA_ARGS__))) 537 538 return __BPF_TP_EMIT(); 539 } 540 541 static const struct bpf_func_proto bpf_trace_printk_proto = { 542 .func = bpf_trace_printk, 543 .gpl_only = true, 544 .ret_type = RET_INTEGER, 545 .arg1_type = ARG_PTR_TO_MEM, 546 .arg2_type = ARG_CONST_SIZE, 547 }; 548 549 const struct bpf_func_proto *bpf_get_trace_printk_proto(void) 550 { 551 /* 552 * This program might be calling bpf_trace_printk, 553 * so enable the associated bpf_trace/bpf_trace_printk event. 554 * Repeat this each time as it is possible a user has 555 * disabled bpf_trace_printk events. By loading a program 556 * calling bpf_trace_printk() however the user has expressed 557 * the intent to see such events. 558 */ 559 if (trace_set_clr_event("bpf_trace", "bpf_trace_printk", 1)) 560 pr_warn_ratelimited("could not enable bpf_trace_printk events"); 561 562 return &bpf_trace_printk_proto; 563 } 564 565 #define MAX_SEQ_PRINTF_VARARGS 12 566 #define MAX_SEQ_PRINTF_MAX_MEMCPY 6 567 #define MAX_SEQ_PRINTF_STR_LEN 128 568 569 struct bpf_seq_printf_buf { 570 char buf[MAX_SEQ_PRINTF_MAX_MEMCPY][MAX_SEQ_PRINTF_STR_LEN]; 571 }; 572 static DEFINE_PER_CPU(struct bpf_seq_printf_buf, bpf_seq_printf_buf); 573 static DEFINE_PER_CPU(int, bpf_seq_printf_buf_used); 574 575 BPF_CALL_5(bpf_seq_printf, struct seq_file *, m, char *, fmt, u32, fmt_size, 576 const void *, data, u32, data_len) 577 { 578 int err = -EINVAL, fmt_cnt = 0, memcpy_cnt = 0; 579 int i, buf_used, copy_size, num_args; 580 u64 params[MAX_SEQ_PRINTF_VARARGS]; 581 struct bpf_seq_printf_buf *bufs; 582 const u64 *args = data; 583 584 buf_used = this_cpu_inc_return(bpf_seq_printf_buf_used); 585 if (WARN_ON_ONCE(buf_used > 1)) { 586 err = -EBUSY; 587 goto out; 588 } 589 590 bufs = this_cpu_ptr(&bpf_seq_printf_buf); 591 592 /* 593 * bpf_check()->check_func_arg()->check_stack_boundary() 594 * guarantees that fmt points to bpf program stack, 595 * fmt_size bytes of it were initialized and fmt_size > 0 596 */ 597 if (fmt[--fmt_size] != 0) 598 goto out; 599 600 if (data_len & 7) 601 goto out; 602 603 for (i = 0; i < fmt_size; i++) { 604 if (fmt[i] == '%') { 605 if (fmt[i + 1] == '%') 606 i++; 607 else if (!data || !data_len) 608 goto out; 609 } 610 } 611 612 num_args = data_len / 8; 613 614 /* check format string for allowed specifiers */ 615 for (i = 0; i < fmt_size; i++) { 616 /* only printable ascii for now. */ 617 if ((!isprint(fmt[i]) && !isspace(fmt[i])) || !isascii(fmt[i])) { 618 err = -EINVAL; 619 goto out; 620 } 621 622 if (fmt[i] != '%') 623 continue; 624 625 if (fmt[i + 1] == '%') { 626 i++; 627 continue; 628 } 629 630 if (fmt_cnt >= MAX_SEQ_PRINTF_VARARGS) { 631 err = -E2BIG; 632 goto out; 633 } 634 635 if (fmt_cnt >= num_args) { 636 err = -EINVAL; 637 goto out; 638 } 639 640 /* fmt[i] != 0 && fmt[last] == 0, so we can access fmt[i + 1] */ 641 i++; 642 643 /* skip optional "[0 +-][num]" width formating field */ 644 while (fmt[i] == '0' || fmt[i] == '+' || fmt[i] == '-' || 645 fmt[i] == ' ') 646 i++; 647 if (fmt[i] >= '1' && fmt[i] <= '9') { 648 i++; 649 while (fmt[i] >= '0' && fmt[i] <= '9') 650 i++; 651 } 652 653 if (fmt[i] == 's') { 654 void *unsafe_ptr; 655 656 /* try our best to copy */ 657 if (memcpy_cnt >= MAX_SEQ_PRINTF_MAX_MEMCPY) { 658 err = -E2BIG; 659 goto out; 660 } 661 662 unsafe_ptr = (void *)(long)args[fmt_cnt]; 663 err = strncpy_from_kernel_nofault(bufs->buf[memcpy_cnt], 664 unsafe_ptr, MAX_SEQ_PRINTF_STR_LEN); 665 if (err < 0) 666 bufs->buf[memcpy_cnt][0] = '\0'; 667 params[fmt_cnt] = (u64)(long)bufs->buf[memcpy_cnt]; 668 669 fmt_cnt++; 670 memcpy_cnt++; 671 continue; 672 } 673 674 if (fmt[i] == 'p') { 675 if (fmt[i + 1] == 0 || 676 fmt[i + 1] == 'K' || 677 fmt[i + 1] == 'x' || 678 fmt[i + 1] == 'B') { 679 /* just kernel pointers */ 680 params[fmt_cnt] = args[fmt_cnt]; 681 fmt_cnt++; 682 continue; 683 } 684 685 /* only support "%pI4", "%pi4", "%pI6" and "%pi6". */ 686 if (fmt[i + 1] != 'i' && fmt[i + 1] != 'I') { 687 err = -EINVAL; 688 goto out; 689 } 690 if (fmt[i + 2] != '4' && fmt[i + 2] != '6') { 691 err = -EINVAL; 692 goto out; 693 } 694 695 if (memcpy_cnt >= MAX_SEQ_PRINTF_MAX_MEMCPY) { 696 err = -E2BIG; 697 goto out; 698 } 699 700 701 copy_size = (fmt[i + 2] == '4') ? 4 : 16; 702 703 err = copy_from_kernel_nofault(bufs->buf[memcpy_cnt], 704 (void *) (long) args[fmt_cnt], 705 copy_size); 706 if (err < 0) 707 memset(bufs->buf[memcpy_cnt], 0, copy_size); 708 params[fmt_cnt] = (u64)(long)bufs->buf[memcpy_cnt]; 709 710 i += 2; 711 fmt_cnt++; 712 memcpy_cnt++; 713 continue; 714 } 715 716 if (fmt[i] == 'l') { 717 i++; 718 if (fmt[i] == 'l') 719 i++; 720 } 721 722 if (fmt[i] != 'i' && fmt[i] != 'd' && 723 fmt[i] != 'u' && fmt[i] != 'x' && 724 fmt[i] != 'X') { 725 err = -EINVAL; 726 goto out; 727 } 728 729 params[fmt_cnt] = args[fmt_cnt]; 730 fmt_cnt++; 731 } 732 733 /* Maximumly we can have MAX_SEQ_PRINTF_VARARGS parameter, just give 734 * all of them to seq_printf(). 735 */ 736 seq_printf(m, fmt, params[0], params[1], params[2], params[3], 737 params[4], params[5], params[6], params[7], params[8], 738 params[9], params[10], params[11]); 739 740 err = seq_has_overflowed(m) ? -EOVERFLOW : 0; 741 out: 742 this_cpu_dec(bpf_seq_printf_buf_used); 743 return err; 744 } 745 746 BTF_ID_LIST_SINGLE(btf_seq_file_ids, struct, seq_file) 747 748 static const struct bpf_func_proto bpf_seq_printf_proto = { 749 .func = bpf_seq_printf, 750 .gpl_only = true, 751 .ret_type = RET_INTEGER, 752 .arg1_type = ARG_PTR_TO_BTF_ID, 753 .arg1_btf_id = &btf_seq_file_ids[0], 754 .arg2_type = ARG_PTR_TO_MEM, 755 .arg3_type = ARG_CONST_SIZE, 756 .arg4_type = ARG_PTR_TO_MEM_OR_NULL, 757 .arg5_type = ARG_CONST_SIZE_OR_ZERO, 758 }; 759 760 BPF_CALL_3(bpf_seq_write, struct seq_file *, m, const void *, data, u32, len) 761 { 762 return seq_write(m, data, len) ? -EOVERFLOW : 0; 763 } 764 765 static const struct bpf_func_proto bpf_seq_write_proto = { 766 .func = bpf_seq_write, 767 .gpl_only = true, 768 .ret_type = RET_INTEGER, 769 .arg1_type = ARG_PTR_TO_BTF_ID, 770 .arg1_btf_id = &btf_seq_file_ids[0], 771 .arg2_type = ARG_PTR_TO_MEM, 772 .arg3_type = ARG_CONST_SIZE_OR_ZERO, 773 }; 774 775 static __always_inline int 776 get_map_perf_counter(struct bpf_map *map, u64 flags, 777 u64 *value, u64 *enabled, u64 *running) 778 { 779 struct bpf_array *array = container_of(map, struct bpf_array, map); 780 unsigned int cpu = smp_processor_id(); 781 u64 index = flags & BPF_F_INDEX_MASK; 782 struct bpf_event_entry *ee; 783 784 if (unlikely(flags & ~(BPF_F_INDEX_MASK))) 785 return -EINVAL; 786 if (index == BPF_F_CURRENT_CPU) 787 index = cpu; 788 if (unlikely(index >= array->map.max_entries)) 789 return -E2BIG; 790 791 ee = READ_ONCE(array->ptrs[index]); 792 if (!ee) 793 return -ENOENT; 794 795 return perf_event_read_local(ee->event, value, enabled, running); 796 } 797 798 BPF_CALL_2(bpf_perf_event_read, struct bpf_map *, map, u64, flags) 799 { 800 u64 value = 0; 801 int err; 802 803 err = get_map_perf_counter(map, flags, &value, NULL, NULL); 804 /* 805 * this api is ugly since we miss [-22..-2] range of valid 806 * counter values, but that's uapi 807 */ 808 if (err) 809 return err; 810 return value; 811 } 812 813 static const struct bpf_func_proto bpf_perf_event_read_proto = { 814 .func = bpf_perf_event_read, 815 .gpl_only = true, 816 .ret_type = RET_INTEGER, 817 .arg1_type = ARG_CONST_MAP_PTR, 818 .arg2_type = ARG_ANYTHING, 819 }; 820 821 BPF_CALL_4(bpf_perf_event_read_value, struct bpf_map *, map, u64, flags, 822 struct bpf_perf_event_value *, buf, u32, size) 823 { 824 int err = -EINVAL; 825 826 if (unlikely(size != sizeof(struct bpf_perf_event_value))) 827 goto clear; 828 err = get_map_perf_counter(map, flags, &buf->counter, &buf->enabled, 829 &buf->running); 830 if (unlikely(err)) 831 goto clear; 832 return 0; 833 clear: 834 memset(buf, 0, size); 835 return err; 836 } 837 838 static const struct bpf_func_proto bpf_perf_event_read_value_proto = { 839 .func = bpf_perf_event_read_value, 840 .gpl_only = true, 841 .ret_type = RET_INTEGER, 842 .arg1_type = ARG_CONST_MAP_PTR, 843 .arg2_type = ARG_ANYTHING, 844 .arg3_type = ARG_PTR_TO_UNINIT_MEM, 845 .arg4_type = ARG_CONST_SIZE, 846 }; 847 848 static __always_inline u64 849 __bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map, 850 u64 flags, struct perf_sample_data *sd) 851 { 852 struct bpf_array *array = container_of(map, struct bpf_array, map); 853 unsigned int cpu = smp_processor_id(); 854 u64 index = flags & BPF_F_INDEX_MASK; 855 struct bpf_event_entry *ee; 856 struct perf_event *event; 857 858 if (index == BPF_F_CURRENT_CPU) 859 index = cpu; 860 if (unlikely(index >= array->map.max_entries)) 861 return -E2BIG; 862 863 ee = READ_ONCE(array->ptrs[index]); 864 if (!ee) 865 return -ENOENT; 866 867 event = ee->event; 868 if (unlikely(event->attr.type != PERF_TYPE_SOFTWARE || 869 event->attr.config != PERF_COUNT_SW_BPF_OUTPUT)) 870 return -EINVAL; 871 872 if (unlikely(event->oncpu != cpu)) 873 return -EOPNOTSUPP; 874 875 return perf_event_output(event, sd, regs); 876 } 877 878 /* 879 * Support executing tracepoints in normal, irq, and nmi context that each call 880 * bpf_perf_event_output 881 */ 882 struct bpf_trace_sample_data { 883 struct perf_sample_data sds[3]; 884 }; 885 886 static DEFINE_PER_CPU(struct bpf_trace_sample_data, bpf_trace_sds); 887 static DEFINE_PER_CPU(int, bpf_trace_nest_level); 888 BPF_CALL_5(bpf_perf_event_output, struct pt_regs *, regs, struct bpf_map *, map, 889 u64, flags, void *, data, u64, size) 890 { 891 struct bpf_trace_sample_data *sds = this_cpu_ptr(&bpf_trace_sds); 892 int nest_level = this_cpu_inc_return(bpf_trace_nest_level); 893 struct perf_raw_record raw = { 894 .frag = { 895 .size = size, 896 .data = data, 897 }, 898 }; 899 struct perf_sample_data *sd; 900 int err; 901 902 if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(sds->sds))) { 903 err = -EBUSY; 904 goto out; 905 } 906 907 sd = &sds->sds[nest_level - 1]; 908 909 if (unlikely(flags & ~(BPF_F_INDEX_MASK))) { 910 err = -EINVAL; 911 goto out; 912 } 913 914 perf_sample_data_init(sd, 0, 0); 915 sd->raw = &raw; 916 917 err = __bpf_perf_event_output(regs, map, flags, sd); 918 919 out: 920 this_cpu_dec(bpf_trace_nest_level); 921 return err; 922 } 923 924 static const struct bpf_func_proto bpf_perf_event_output_proto = { 925 .func = bpf_perf_event_output, 926 .gpl_only = true, 927 .ret_type = RET_INTEGER, 928 .arg1_type = ARG_PTR_TO_CTX, 929 .arg2_type = ARG_CONST_MAP_PTR, 930 .arg3_type = ARG_ANYTHING, 931 .arg4_type = ARG_PTR_TO_MEM, 932 .arg5_type = ARG_CONST_SIZE_OR_ZERO, 933 }; 934 935 static DEFINE_PER_CPU(int, bpf_event_output_nest_level); 936 struct bpf_nested_pt_regs { 937 struct pt_regs regs[3]; 938 }; 939 static DEFINE_PER_CPU(struct bpf_nested_pt_regs, bpf_pt_regs); 940 static DEFINE_PER_CPU(struct bpf_trace_sample_data, bpf_misc_sds); 941 942 u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size, 943 void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy) 944 { 945 int nest_level = this_cpu_inc_return(bpf_event_output_nest_level); 946 struct perf_raw_frag frag = { 947 .copy = ctx_copy, 948 .size = ctx_size, 949 .data = ctx, 950 }; 951 struct perf_raw_record raw = { 952 .frag = { 953 { 954 .next = ctx_size ? &frag : NULL, 955 }, 956 .size = meta_size, 957 .data = meta, 958 }, 959 }; 960 struct perf_sample_data *sd; 961 struct pt_regs *regs; 962 u64 ret; 963 964 if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(bpf_misc_sds.sds))) { 965 ret = -EBUSY; 966 goto out; 967 } 968 sd = this_cpu_ptr(&bpf_misc_sds.sds[nest_level - 1]); 969 regs = this_cpu_ptr(&bpf_pt_regs.regs[nest_level - 1]); 970 971 perf_fetch_caller_regs(regs); 972 perf_sample_data_init(sd, 0, 0); 973 sd->raw = &raw; 974 975 ret = __bpf_perf_event_output(regs, map, flags, sd); 976 out: 977 this_cpu_dec(bpf_event_output_nest_level); 978 return ret; 979 } 980 981 BPF_CALL_0(bpf_get_current_task) 982 { 983 return (long) current; 984 } 985 986 const struct bpf_func_proto bpf_get_current_task_proto = { 987 .func = bpf_get_current_task, 988 .gpl_only = true, 989 .ret_type = RET_INTEGER, 990 }; 991 992 BPF_CALL_2(bpf_current_task_under_cgroup, struct bpf_map *, map, u32, idx) 993 { 994 struct bpf_array *array = container_of(map, struct bpf_array, map); 995 struct cgroup *cgrp; 996 997 if (unlikely(idx >= array->map.max_entries)) 998 return -E2BIG; 999 1000 cgrp = READ_ONCE(array->ptrs[idx]); 1001 if (unlikely(!cgrp)) 1002 return -EAGAIN; 1003 1004 return task_under_cgroup_hierarchy(current, cgrp); 1005 } 1006 1007 static const struct bpf_func_proto bpf_current_task_under_cgroup_proto = { 1008 .func = bpf_current_task_under_cgroup, 1009 .gpl_only = false, 1010 .ret_type = RET_INTEGER, 1011 .arg1_type = ARG_CONST_MAP_PTR, 1012 .arg2_type = ARG_ANYTHING, 1013 }; 1014 1015 struct send_signal_irq_work { 1016 struct irq_work irq_work; 1017 struct task_struct *task; 1018 u32 sig; 1019 enum pid_type type; 1020 }; 1021 1022 static DEFINE_PER_CPU(struct send_signal_irq_work, send_signal_work); 1023 1024 static void do_bpf_send_signal(struct irq_work *entry) 1025 { 1026 struct send_signal_irq_work *work; 1027 1028 work = container_of(entry, struct send_signal_irq_work, irq_work); 1029 group_send_sig_info(work->sig, SEND_SIG_PRIV, work->task, work->type); 1030 } 1031 1032 static int bpf_send_signal_common(u32 sig, enum pid_type type) 1033 { 1034 struct send_signal_irq_work *work = NULL; 1035 1036 /* Similar to bpf_probe_write_user, task needs to be 1037 * in a sound condition and kernel memory access be 1038 * permitted in order to send signal to the current 1039 * task. 1040 */ 1041 if (unlikely(current->flags & (PF_KTHREAD | PF_EXITING))) 1042 return -EPERM; 1043 if (unlikely(uaccess_kernel())) 1044 return -EPERM; 1045 if (unlikely(!nmi_uaccess_okay())) 1046 return -EPERM; 1047 1048 if (irqs_disabled()) { 1049 /* Do an early check on signal validity. Otherwise, 1050 * the error is lost in deferred irq_work. 1051 */ 1052 if (unlikely(!valid_signal(sig))) 1053 return -EINVAL; 1054 1055 work = this_cpu_ptr(&send_signal_work); 1056 if (atomic_read(&work->irq_work.flags) & IRQ_WORK_BUSY) 1057 return -EBUSY; 1058 1059 /* Add the current task, which is the target of sending signal, 1060 * to the irq_work. The current task may change when queued 1061 * irq works get executed. 1062 */ 1063 work->task = current; 1064 work->sig = sig; 1065 work->type = type; 1066 irq_work_queue(&work->irq_work); 1067 return 0; 1068 } 1069 1070 return group_send_sig_info(sig, SEND_SIG_PRIV, current, type); 1071 } 1072 1073 BPF_CALL_1(bpf_send_signal, u32, sig) 1074 { 1075 return bpf_send_signal_common(sig, PIDTYPE_TGID); 1076 } 1077 1078 static const struct bpf_func_proto bpf_send_signal_proto = { 1079 .func = bpf_send_signal, 1080 .gpl_only = false, 1081 .ret_type = RET_INTEGER, 1082 .arg1_type = ARG_ANYTHING, 1083 }; 1084 1085 BPF_CALL_1(bpf_send_signal_thread, u32, sig) 1086 { 1087 return bpf_send_signal_common(sig, PIDTYPE_PID); 1088 } 1089 1090 static const struct bpf_func_proto bpf_send_signal_thread_proto = { 1091 .func = bpf_send_signal_thread, 1092 .gpl_only = false, 1093 .ret_type = RET_INTEGER, 1094 .arg1_type = ARG_ANYTHING, 1095 }; 1096 1097 BPF_CALL_3(bpf_d_path, struct path *, path, char *, buf, u32, sz) 1098 { 1099 long len; 1100 char *p; 1101 1102 if (!sz) 1103 return 0; 1104 1105 p = d_path(path, buf, sz); 1106 if (IS_ERR(p)) { 1107 len = PTR_ERR(p); 1108 } else { 1109 len = buf + sz - p; 1110 memmove(buf, p, len); 1111 } 1112 1113 return len; 1114 } 1115 1116 BTF_SET_START(btf_allowlist_d_path) 1117 #ifdef CONFIG_SECURITY 1118 BTF_ID(func, security_file_permission) 1119 BTF_ID(func, security_inode_getattr) 1120 BTF_ID(func, security_file_open) 1121 #endif 1122 #ifdef CONFIG_SECURITY_PATH 1123 BTF_ID(func, security_path_truncate) 1124 #endif 1125 BTF_ID(func, vfs_truncate) 1126 BTF_ID(func, vfs_fallocate) 1127 BTF_ID(func, dentry_open) 1128 BTF_ID(func, vfs_getattr) 1129 BTF_ID(func, filp_close) 1130 BTF_SET_END(btf_allowlist_d_path) 1131 1132 static bool bpf_d_path_allowed(const struct bpf_prog *prog) 1133 { 1134 return btf_id_set_contains(&btf_allowlist_d_path, prog->aux->attach_btf_id); 1135 } 1136 1137 BTF_ID_LIST_SINGLE(bpf_d_path_btf_ids, struct, path) 1138 1139 static const struct bpf_func_proto bpf_d_path_proto = { 1140 .func = bpf_d_path, 1141 .gpl_only = false, 1142 .ret_type = RET_INTEGER, 1143 .arg1_type = ARG_PTR_TO_BTF_ID, 1144 .arg1_btf_id = &bpf_d_path_btf_ids[0], 1145 .arg2_type = ARG_PTR_TO_MEM, 1146 .arg3_type = ARG_CONST_SIZE_OR_ZERO, 1147 .allowed = bpf_d_path_allowed, 1148 }; 1149 1150 const struct bpf_func_proto * 1151 bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 1152 { 1153 switch (func_id) { 1154 case BPF_FUNC_map_lookup_elem: 1155 return &bpf_map_lookup_elem_proto; 1156 case BPF_FUNC_map_update_elem: 1157 return &bpf_map_update_elem_proto; 1158 case BPF_FUNC_map_delete_elem: 1159 return &bpf_map_delete_elem_proto; 1160 case BPF_FUNC_map_push_elem: 1161 return &bpf_map_push_elem_proto; 1162 case BPF_FUNC_map_pop_elem: 1163 return &bpf_map_pop_elem_proto; 1164 case BPF_FUNC_map_peek_elem: 1165 return &bpf_map_peek_elem_proto; 1166 case BPF_FUNC_ktime_get_ns: 1167 return &bpf_ktime_get_ns_proto; 1168 case BPF_FUNC_ktime_get_boot_ns: 1169 return &bpf_ktime_get_boot_ns_proto; 1170 case BPF_FUNC_tail_call: 1171 return &bpf_tail_call_proto; 1172 case BPF_FUNC_get_current_pid_tgid: 1173 return &bpf_get_current_pid_tgid_proto; 1174 case BPF_FUNC_get_current_task: 1175 return &bpf_get_current_task_proto; 1176 case BPF_FUNC_get_current_uid_gid: 1177 return &bpf_get_current_uid_gid_proto; 1178 case BPF_FUNC_get_current_comm: 1179 return &bpf_get_current_comm_proto; 1180 case BPF_FUNC_trace_printk: 1181 return bpf_get_trace_printk_proto(); 1182 case BPF_FUNC_get_smp_processor_id: 1183 return &bpf_get_smp_processor_id_proto; 1184 case BPF_FUNC_get_numa_node_id: 1185 return &bpf_get_numa_node_id_proto; 1186 case BPF_FUNC_perf_event_read: 1187 return &bpf_perf_event_read_proto; 1188 case BPF_FUNC_probe_write_user: 1189 return bpf_get_probe_write_proto(); 1190 case BPF_FUNC_current_task_under_cgroup: 1191 return &bpf_current_task_under_cgroup_proto; 1192 case BPF_FUNC_get_prandom_u32: 1193 return &bpf_get_prandom_u32_proto; 1194 case BPF_FUNC_probe_read_user: 1195 return &bpf_probe_read_user_proto; 1196 case BPF_FUNC_probe_read_kernel: 1197 return &bpf_probe_read_kernel_proto; 1198 case BPF_FUNC_probe_read_user_str: 1199 return &bpf_probe_read_user_str_proto; 1200 case BPF_FUNC_probe_read_kernel_str: 1201 return &bpf_probe_read_kernel_str_proto; 1202 #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE 1203 case BPF_FUNC_probe_read: 1204 return &bpf_probe_read_compat_proto; 1205 case BPF_FUNC_probe_read_str: 1206 return &bpf_probe_read_compat_str_proto; 1207 #endif 1208 #ifdef CONFIG_CGROUPS 1209 case BPF_FUNC_get_current_cgroup_id: 1210 return &bpf_get_current_cgroup_id_proto; 1211 #endif 1212 case BPF_FUNC_send_signal: 1213 return &bpf_send_signal_proto; 1214 case BPF_FUNC_send_signal_thread: 1215 return &bpf_send_signal_thread_proto; 1216 case BPF_FUNC_perf_event_read_value: 1217 return &bpf_perf_event_read_value_proto; 1218 case BPF_FUNC_get_ns_current_pid_tgid: 1219 return &bpf_get_ns_current_pid_tgid_proto; 1220 case BPF_FUNC_ringbuf_output: 1221 return &bpf_ringbuf_output_proto; 1222 case BPF_FUNC_ringbuf_reserve: 1223 return &bpf_ringbuf_reserve_proto; 1224 case BPF_FUNC_ringbuf_submit: 1225 return &bpf_ringbuf_submit_proto; 1226 case BPF_FUNC_ringbuf_discard: 1227 return &bpf_ringbuf_discard_proto; 1228 case BPF_FUNC_ringbuf_query: 1229 return &bpf_ringbuf_query_proto; 1230 case BPF_FUNC_jiffies64: 1231 return &bpf_jiffies64_proto; 1232 case BPF_FUNC_get_task_stack: 1233 return &bpf_get_task_stack_proto; 1234 case BPF_FUNC_copy_from_user: 1235 return prog->aux->sleepable ? &bpf_copy_from_user_proto : NULL; 1236 default: 1237 return NULL; 1238 } 1239 } 1240 1241 static const struct bpf_func_proto * 1242 kprobe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 1243 { 1244 switch (func_id) { 1245 case BPF_FUNC_perf_event_output: 1246 return &bpf_perf_event_output_proto; 1247 case BPF_FUNC_get_stackid: 1248 return &bpf_get_stackid_proto; 1249 case BPF_FUNC_get_stack: 1250 return &bpf_get_stack_proto; 1251 #ifdef CONFIG_BPF_KPROBE_OVERRIDE 1252 case BPF_FUNC_override_return: 1253 return &bpf_override_return_proto; 1254 #endif 1255 default: 1256 return bpf_tracing_func_proto(func_id, prog); 1257 } 1258 } 1259 1260 /* bpf+kprobe programs can access fields of 'struct pt_regs' */ 1261 static bool kprobe_prog_is_valid_access(int off, int size, enum bpf_access_type type, 1262 const struct bpf_prog *prog, 1263 struct bpf_insn_access_aux *info) 1264 { 1265 if (off < 0 || off >= sizeof(struct pt_regs)) 1266 return false; 1267 if (type != BPF_READ) 1268 return false; 1269 if (off % size != 0) 1270 return false; 1271 /* 1272 * Assertion for 32 bit to make sure last 8 byte access 1273 * (BPF_DW) to the last 4 byte member is disallowed. 1274 */ 1275 if (off + size > sizeof(struct pt_regs)) 1276 return false; 1277 1278 return true; 1279 } 1280 1281 const struct bpf_verifier_ops kprobe_verifier_ops = { 1282 .get_func_proto = kprobe_prog_func_proto, 1283 .is_valid_access = kprobe_prog_is_valid_access, 1284 }; 1285 1286 const struct bpf_prog_ops kprobe_prog_ops = { 1287 }; 1288 1289 BPF_CALL_5(bpf_perf_event_output_tp, void *, tp_buff, struct bpf_map *, map, 1290 u64, flags, void *, data, u64, size) 1291 { 1292 struct pt_regs *regs = *(struct pt_regs **)tp_buff; 1293 1294 /* 1295 * r1 points to perf tracepoint buffer where first 8 bytes are hidden 1296 * from bpf program and contain a pointer to 'struct pt_regs'. Fetch it 1297 * from there and call the same bpf_perf_event_output() helper inline. 1298 */ 1299 return ____bpf_perf_event_output(regs, map, flags, data, size); 1300 } 1301 1302 static const struct bpf_func_proto bpf_perf_event_output_proto_tp = { 1303 .func = bpf_perf_event_output_tp, 1304 .gpl_only = true, 1305 .ret_type = RET_INTEGER, 1306 .arg1_type = ARG_PTR_TO_CTX, 1307 .arg2_type = ARG_CONST_MAP_PTR, 1308 .arg3_type = ARG_ANYTHING, 1309 .arg4_type = ARG_PTR_TO_MEM, 1310 .arg5_type = ARG_CONST_SIZE_OR_ZERO, 1311 }; 1312 1313 BPF_CALL_3(bpf_get_stackid_tp, void *, tp_buff, struct bpf_map *, map, 1314 u64, flags) 1315 { 1316 struct pt_regs *regs = *(struct pt_regs **)tp_buff; 1317 1318 /* 1319 * Same comment as in bpf_perf_event_output_tp(), only that this time 1320 * the other helper's function body cannot be inlined due to being 1321 * external, thus we need to call raw helper function. 1322 */ 1323 return bpf_get_stackid((unsigned long) regs, (unsigned long) map, 1324 flags, 0, 0); 1325 } 1326 1327 static const struct bpf_func_proto bpf_get_stackid_proto_tp = { 1328 .func = bpf_get_stackid_tp, 1329 .gpl_only = true, 1330 .ret_type = RET_INTEGER, 1331 .arg1_type = ARG_PTR_TO_CTX, 1332 .arg2_type = ARG_CONST_MAP_PTR, 1333 .arg3_type = ARG_ANYTHING, 1334 }; 1335 1336 BPF_CALL_4(bpf_get_stack_tp, void *, tp_buff, void *, buf, u32, size, 1337 u64, flags) 1338 { 1339 struct pt_regs *regs = *(struct pt_regs **)tp_buff; 1340 1341 return bpf_get_stack((unsigned long) regs, (unsigned long) buf, 1342 (unsigned long) size, flags, 0); 1343 } 1344 1345 static const struct bpf_func_proto bpf_get_stack_proto_tp = { 1346 .func = bpf_get_stack_tp, 1347 .gpl_only = true, 1348 .ret_type = RET_INTEGER, 1349 .arg1_type = ARG_PTR_TO_CTX, 1350 .arg2_type = ARG_PTR_TO_UNINIT_MEM, 1351 .arg3_type = ARG_CONST_SIZE_OR_ZERO, 1352 .arg4_type = ARG_ANYTHING, 1353 }; 1354 1355 static const struct bpf_func_proto * 1356 tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 1357 { 1358 switch (func_id) { 1359 case BPF_FUNC_perf_event_output: 1360 return &bpf_perf_event_output_proto_tp; 1361 case BPF_FUNC_get_stackid: 1362 return &bpf_get_stackid_proto_tp; 1363 case BPF_FUNC_get_stack: 1364 return &bpf_get_stack_proto_tp; 1365 default: 1366 return bpf_tracing_func_proto(func_id, prog); 1367 } 1368 } 1369 1370 static bool tp_prog_is_valid_access(int off, int size, enum bpf_access_type type, 1371 const struct bpf_prog *prog, 1372 struct bpf_insn_access_aux *info) 1373 { 1374 if (off < sizeof(void *) || off >= PERF_MAX_TRACE_SIZE) 1375 return false; 1376 if (type != BPF_READ) 1377 return false; 1378 if (off % size != 0) 1379 return false; 1380 1381 BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(__u64)); 1382 return true; 1383 } 1384 1385 const struct bpf_verifier_ops tracepoint_verifier_ops = { 1386 .get_func_proto = tp_prog_func_proto, 1387 .is_valid_access = tp_prog_is_valid_access, 1388 }; 1389 1390 const struct bpf_prog_ops tracepoint_prog_ops = { 1391 }; 1392 1393 BPF_CALL_3(bpf_perf_prog_read_value, struct bpf_perf_event_data_kern *, ctx, 1394 struct bpf_perf_event_value *, buf, u32, size) 1395 { 1396 int err = -EINVAL; 1397 1398 if (unlikely(size != sizeof(struct bpf_perf_event_value))) 1399 goto clear; 1400 err = perf_event_read_local(ctx->event, &buf->counter, &buf->enabled, 1401 &buf->running); 1402 if (unlikely(err)) 1403 goto clear; 1404 return 0; 1405 clear: 1406 memset(buf, 0, size); 1407 return err; 1408 } 1409 1410 static const struct bpf_func_proto bpf_perf_prog_read_value_proto = { 1411 .func = bpf_perf_prog_read_value, 1412 .gpl_only = true, 1413 .ret_type = RET_INTEGER, 1414 .arg1_type = ARG_PTR_TO_CTX, 1415 .arg2_type = ARG_PTR_TO_UNINIT_MEM, 1416 .arg3_type = ARG_CONST_SIZE, 1417 }; 1418 1419 BPF_CALL_4(bpf_read_branch_records, struct bpf_perf_event_data_kern *, ctx, 1420 void *, buf, u32, size, u64, flags) 1421 { 1422 #ifndef CONFIG_X86 1423 return -ENOENT; 1424 #else 1425 static const u32 br_entry_size = sizeof(struct perf_branch_entry); 1426 struct perf_branch_stack *br_stack = ctx->data->br_stack; 1427 u32 to_copy; 1428 1429 if (unlikely(flags & ~BPF_F_GET_BRANCH_RECORDS_SIZE)) 1430 return -EINVAL; 1431 1432 if (unlikely(!br_stack)) 1433 return -EINVAL; 1434 1435 if (flags & BPF_F_GET_BRANCH_RECORDS_SIZE) 1436 return br_stack->nr * br_entry_size; 1437 1438 if (!buf || (size % br_entry_size != 0)) 1439 return -EINVAL; 1440 1441 to_copy = min_t(u32, br_stack->nr * br_entry_size, size); 1442 memcpy(buf, br_stack->entries, to_copy); 1443 1444 return to_copy; 1445 #endif 1446 } 1447 1448 static const struct bpf_func_proto bpf_read_branch_records_proto = { 1449 .func = bpf_read_branch_records, 1450 .gpl_only = true, 1451 .ret_type = RET_INTEGER, 1452 .arg1_type = ARG_PTR_TO_CTX, 1453 .arg2_type = ARG_PTR_TO_MEM_OR_NULL, 1454 .arg3_type = ARG_CONST_SIZE_OR_ZERO, 1455 .arg4_type = ARG_ANYTHING, 1456 }; 1457 1458 static const struct bpf_func_proto * 1459 pe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 1460 { 1461 switch (func_id) { 1462 case BPF_FUNC_perf_event_output: 1463 return &bpf_perf_event_output_proto_tp; 1464 case BPF_FUNC_get_stackid: 1465 return &bpf_get_stackid_proto_pe; 1466 case BPF_FUNC_get_stack: 1467 return &bpf_get_stack_proto_pe; 1468 case BPF_FUNC_perf_prog_read_value: 1469 return &bpf_perf_prog_read_value_proto; 1470 case BPF_FUNC_read_branch_records: 1471 return &bpf_read_branch_records_proto; 1472 default: 1473 return bpf_tracing_func_proto(func_id, prog); 1474 } 1475 } 1476 1477 /* 1478 * bpf_raw_tp_regs are separate from bpf_pt_regs used from skb/xdp 1479 * to avoid potential recursive reuse issue when/if tracepoints are added 1480 * inside bpf_*_event_output, bpf_get_stackid and/or bpf_get_stack. 1481 * 1482 * Since raw tracepoints run despite bpf_prog_active, support concurrent usage 1483 * in normal, irq, and nmi context. 1484 */ 1485 struct bpf_raw_tp_regs { 1486 struct pt_regs regs[3]; 1487 }; 1488 static DEFINE_PER_CPU(struct bpf_raw_tp_regs, bpf_raw_tp_regs); 1489 static DEFINE_PER_CPU(int, bpf_raw_tp_nest_level); 1490 static struct pt_regs *get_bpf_raw_tp_regs(void) 1491 { 1492 struct bpf_raw_tp_regs *tp_regs = this_cpu_ptr(&bpf_raw_tp_regs); 1493 int nest_level = this_cpu_inc_return(bpf_raw_tp_nest_level); 1494 1495 if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(tp_regs->regs))) { 1496 this_cpu_dec(bpf_raw_tp_nest_level); 1497 return ERR_PTR(-EBUSY); 1498 } 1499 1500 return &tp_regs->regs[nest_level - 1]; 1501 } 1502 1503 static void put_bpf_raw_tp_regs(void) 1504 { 1505 this_cpu_dec(bpf_raw_tp_nest_level); 1506 } 1507 1508 BPF_CALL_5(bpf_perf_event_output_raw_tp, struct bpf_raw_tracepoint_args *, args, 1509 struct bpf_map *, map, u64, flags, void *, data, u64, size) 1510 { 1511 struct pt_regs *regs = get_bpf_raw_tp_regs(); 1512 int ret; 1513 1514 if (IS_ERR(regs)) 1515 return PTR_ERR(regs); 1516 1517 perf_fetch_caller_regs(regs); 1518 ret = ____bpf_perf_event_output(regs, map, flags, data, size); 1519 1520 put_bpf_raw_tp_regs(); 1521 return ret; 1522 } 1523 1524 static const struct bpf_func_proto bpf_perf_event_output_proto_raw_tp = { 1525 .func = bpf_perf_event_output_raw_tp, 1526 .gpl_only = true, 1527 .ret_type = RET_INTEGER, 1528 .arg1_type = ARG_PTR_TO_CTX, 1529 .arg2_type = ARG_CONST_MAP_PTR, 1530 .arg3_type = ARG_ANYTHING, 1531 .arg4_type = ARG_PTR_TO_MEM, 1532 .arg5_type = ARG_CONST_SIZE_OR_ZERO, 1533 }; 1534 1535 extern const struct bpf_func_proto bpf_skb_output_proto; 1536 extern const struct bpf_func_proto bpf_xdp_output_proto; 1537 1538 BPF_CALL_3(bpf_get_stackid_raw_tp, struct bpf_raw_tracepoint_args *, args, 1539 struct bpf_map *, map, u64, flags) 1540 { 1541 struct pt_regs *regs = get_bpf_raw_tp_regs(); 1542 int ret; 1543 1544 if (IS_ERR(regs)) 1545 return PTR_ERR(regs); 1546 1547 perf_fetch_caller_regs(regs); 1548 /* similar to bpf_perf_event_output_tp, but pt_regs fetched differently */ 1549 ret = bpf_get_stackid((unsigned long) regs, (unsigned long) map, 1550 flags, 0, 0); 1551 put_bpf_raw_tp_regs(); 1552 return ret; 1553 } 1554 1555 static const struct bpf_func_proto bpf_get_stackid_proto_raw_tp = { 1556 .func = bpf_get_stackid_raw_tp, 1557 .gpl_only = true, 1558 .ret_type = RET_INTEGER, 1559 .arg1_type = ARG_PTR_TO_CTX, 1560 .arg2_type = ARG_CONST_MAP_PTR, 1561 .arg3_type = ARG_ANYTHING, 1562 }; 1563 1564 BPF_CALL_4(bpf_get_stack_raw_tp, struct bpf_raw_tracepoint_args *, args, 1565 void *, buf, u32, size, u64, flags) 1566 { 1567 struct pt_regs *regs = get_bpf_raw_tp_regs(); 1568 int ret; 1569 1570 if (IS_ERR(regs)) 1571 return PTR_ERR(regs); 1572 1573 perf_fetch_caller_regs(regs); 1574 ret = bpf_get_stack((unsigned long) regs, (unsigned long) buf, 1575 (unsigned long) size, flags, 0); 1576 put_bpf_raw_tp_regs(); 1577 return ret; 1578 } 1579 1580 static const struct bpf_func_proto bpf_get_stack_proto_raw_tp = { 1581 .func = bpf_get_stack_raw_tp, 1582 .gpl_only = true, 1583 .ret_type = RET_INTEGER, 1584 .arg1_type = ARG_PTR_TO_CTX, 1585 .arg2_type = ARG_PTR_TO_MEM, 1586 .arg3_type = ARG_CONST_SIZE_OR_ZERO, 1587 .arg4_type = ARG_ANYTHING, 1588 }; 1589 1590 static const struct bpf_func_proto * 1591 raw_tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 1592 { 1593 switch (func_id) { 1594 case BPF_FUNC_perf_event_output: 1595 return &bpf_perf_event_output_proto_raw_tp; 1596 case BPF_FUNC_get_stackid: 1597 return &bpf_get_stackid_proto_raw_tp; 1598 case BPF_FUNC_get_stack: 1599 return &bpf_get_stack_proto_raw_tp; 1600 default: 1601 return bpf_tracing_func_proto(func_id, prog); 1602 } 1603 } 1604 1605 const struct bpf_func_proto * 1606 tracing_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 1607 { 1608 switch (func_id) { 1609 #ifdef CONFIG_NET 1610 case BPF_FUNC_skb_output: 1611 return &bpf_skb_output_proto; 1612 case BPF_FUNC_xdp_output: 1613 return &bpf_xdp_output_proto; 1614 case BPF_FUNC_skc_to_tcp6_sock: 1615 return &bpf_skc_to_tcp6_sock_proto; 1616 case BPF_FUNC_skc_to_tcp_sock: 1617 return &bpf_skc_to_tcp_sock_proto; 1618 case BPF_FUNC_skc_to_tcp_timewait_sock: 1619 return &bpf_skc_to_tcp_timewait_sock_proto; 1620 case BPF_FUNC_skc_to_tcp_request_sock: 1621 return &bpf_skc_to_tcp_request_sock_proto; 1622 case BPF_FUNC_skc_to_udp6_sock: 1623 return &bpf_skc_to_udp6_sock_proto; 1624 #endif 1625 case BPF_FUNC_seq_printf: 1626 return prog->expected_attach_type == BPF_TRACE_ITER ? 1627 &bpf_seq_printf_proto : 1628 NULL; 1629 case BPF_FUNC_seq_write: 1630 return prog->expected_attach_type == BPF_TRACE_ITER ? 1631 &bpf_seq_write_proto : 1632 NULL; 1633 case BPF_FUNC_d_path: 1634 return &bpf_d_path_proto; 1635 default: 1636 return raw_tp_prog_func_proto(func_id, prog); 1637 } 1638 } 1639 1640 static bool raw_tp_prog_is_valid_access(int off, int size, 1641 enum bpf_access_type type, 1642 const struct bpf_prog *prog, 1643 struct bpf_insn_access_aux *info) 1644 { 1645 if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS) 1646 return false; 1647 if (type != BPF_READ) 1648 return false; 1649 if (off % size != 0) 1650 return false; 1651 return true; 1652 } 1653 1654 static bool tracing_prog_is_valid_access(int off, int size, 1655 enum bpf_access_type type, 1656 const struct bpf_prog *prog, 1657 struct bpf_insn_access_aux *info) 1658 { 1659 if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS) 1660 return false; 1661 if (type != BPF_READ) 1662 return false; 1663 if (off % size != 0) 1664 return false; 1665 return btf_ctx_access(off, size, type, prog, info); 1666 } 1667 1668 int __weak bpf_prog_test_run_tracing(struct bpf_prog *prog, 1669 const union bpf_attr *kattr, 1670 union bpf_attr __user *uattr) 1671 { 1672 return -ENOTSUPP; 1673 } 1674 1675 const struct bpf_verifier_ops raw_tracepoint_verifier_ops = { 1676 .get_func_proto = raw_tp_prog_func_proto, 1677 .is_valid_access = raw_tp_prog_is_valid_access, 1678 }; 1679 1680 const struct bpf_prog_ops raw_tracepoint_prog_ops = { 1681 }; 1682 1683 const struct bpf_verifier_ops tracing_verifier_ops = { 1684 .get_func_proto = tracing_prog_func_proto, 1685 .is_valid_access = tracing_prog_is_valid_access, 1686 }; 1687 1688 const struct bpf_prog_ops tracing_prog_ops = { 1689 .test_run = bpf_prog_test_run_tracing, 1690 }; 1691 1692 static bool raw_tp_writable_prog_is_valid_access(int off, int size, 1693 enum bpf_access_type type, 1694 const struct bpf_prog *prog, 1695 struct bpf_insn_access_aux *info) 1696 { 1697 if (off == 0) { 1698 if (size != sizeof(u64) || type != BPF_READ) 1699 return false; 1700 info->reg_type = PTR_TO_TP_BUFFER; 1701 } 1702 return raw_tp_prog_is_valid_access(off, size, type, prog, info); 1703 } 1704 1705 const struct bpf_verifier_ops raw_tracepoint_writable_verifier_ops = { 1706 .get_func_proto = raw_tp_prog_func_proto, 1707 .is_valid_access = raw_tp_writable_prog_is_valid_access, 1708 }; 1709 1710 const struct bpf_prog_ops raw_tracepoint_writable_prog_ops = { 1711 }; 1712 1713 static bool pe_prog_is_valid_access(int off, int size, enum bpf_access_type type, 1714 const struct bpf_prog *prog, 1715 struct bpf_insn_access_aux *info) 1716 { 1717 const int size_u64 = sizeof(u64); 1718 1719 if (off < 0 || off >= sizeof(struct bpf_perf_event_data)) 1720 return false; 1721 if (type != BPF_READ) 1722 return false; 1723 if (off % size != 0) { 1724 if (sizeof(unsigned long) != 4) 1725 return false; 1726 if (size != 8) 1727 return false; 1728 if (off % size != 4) 1729 return false; 1730 } 1731 1732 switch (off) { 1733 case bpf_ctx_range(struct bpf_perf_event_data, sample_period): 1734 bpf_ctx_record_field_size(info, size_u64); 1735 if (!bpf_ctx_narrow_access_ok(off, size, size_u64)) 1736 return false; 1737 break; 1738 case bpf_ctx_range(struct bpf_perf_event_data, addr): 1739 bpf_ctx_record_field_size(info, size_u64); 1740 if (!bpf_ctx_narrow_access_ok(off, size, size_u64)) 1741 return false; 1742 break; 1743 default: 1744 if (size != sizeof(long)) 1745 return false; 1746 } 1747 1748 return true; 1749 } 1750 1751 static u32 pe_prog_convert_ctx_access(enum bpf_access_type type, 1752 const struct bpf_insn *si, 1753 struct bpf_insn *insn_buf, 1754 struct bpf_prog *prog, u32 *target_size) 1755 { 1756 struct bpf_insn *insn = insn_buf; 1757 1758 switch (si->off) { 1759 case offsetof(struct bpf_perf_event_data, sample_period): 1760 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern, 1761 data), si->dst_reg, si->src_reg, 1762 offsetof(struct bpf_perf_event_data_kern, data)); 1763 *insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg, 1764 bpf_target_off(struct perf_sample_data, period, 8, 1765 target_size)); 1766 break; 1767 case offsetof(struct bpf_perf_event_data, addr): 1768 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern, 1769 data), si->dst_reg, si->src_reg, 1770 offsetof(struct bpf_perf_event_data_kern, data)); 1771 *insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg, 1772 bpf_target_off(struct perf_sample_data, addr, 8, 1773 target_size)); 1774 break; 1775 default: 1776 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern, 1777 regs), si->dst_reg, si->src_reg, 1778 offsetof(struct bpf_perf_event_data_kern, regs)); 1779 *insn++ = BPF_LDX_MEM(BPF_SIZEOF(long), si->dst_reg, si->dst_reg, 1780 si->off); 1781 break; 1782 } 1783 1784 return insn - insn_buf; 1785 } 1786 1787 const struct bpf_verifier_ops perf_event_verifier_ops = { 1788 .get_func_proto = pe_prog_func_proto, 1789 .is_valid_access = pe_prog_is_valid_access, 1790 .convert_ctx_access = pe_prog_convert_ctx_access, 1791 }; 1792 1793 const struct bpf_prog_ops perf_event_prog_ops = { 1794 }; 1795 1796 static DEFINE_MUTEX(bpf_event_mutex); 1797 1798 #define BPF_TRACE_MAX_PROGS 64 1799 1800 int perf_event_attach_bpf_prog(struct perf_event *event, 1801 struct bpf_prog *prog) 1802 { 1803 struct bpf_prog_array *old_array; 1804 struct bpf_prog_array *new_array; 1805 int ret = -EEXIST; 1806 1807 /* 1808 * Kprobe override only works if they are on the function entry, 1809 * and only if they are on the opt-in list. 1810 */ 1811 if (prog->kprobe_override && 1812 (!trace_kprobe_on_func_entry(event->tp_event) || 1813 !trace_kprobe_error_injectable(event->tp_event))) 1814 return -EINVAL; 1815 1816 mutex_lock(&bpf_event_mutex); 1817 1818 if (event->prog) 1819 goto unlock; 1820 1821 old_array = bpf_event_rcu_dereference(event->tp_event->prog_array); 1822 if (old_array && 1823 bpf_prog_array_length(old_array) >= BPF_TRACE_MAX_PROGS) { 1824 ret = -E2BIG; 1825 goto unlock; 1826 } 1827 1828 ret = bpf_prog_array_copy(old_array, NULL, prog, &new_array); 1829 if (ret < 0) 1830 goto unlock; 1831 1832 /* set the new array to event->tp_event and set event->prog */ 1833 event->prog = prog; 1834 rcu_assign_pointer(event->tp_event->prog_array, new_array); 1835 bpf_prog_array_free(old_array); 1836 1837 unlock: 1838 mutex_unlock(&bpf_event_mutex); 1839 return ret; 1840 } 1841 1842 void perf_event_detach_bpf_prog(struct perf_event *event) 1843 { 1844 struct bpf_prog_array *old_array; 1845 struct bpf_prog_array *new_array; 1846 int ret; 1847 1848 mutex_lock(&bpf_event_mutex); 1849 1850 if (!event->prog) 1851 goto unlock; 1852 1853 old_array = bpf_event_rcu_dereference(event->tp_event->prog_array); 1854 ret = bpf_prog_array_copy(old_array, event->prog, NULL, &new_array); 1855 if (ret == -ENOENT) 1856 goto unlock; 1857 if (ret < 0) { 1858 bpf_prog_array_delete_safe(old_array, event->prog); 1859 } else { 1860 rcu_assign_pointer(event->tp_event->prog_array, new_array); 1861 bpf_prog_array_free(old_array); 1862 } 1863 1864 bpf_prog_put(event->prog); 1865 event->prog = NULL; 1866 1867 unlock: 1868 mutex_unlock(&bpf_event_mutex); 1869 } 1870 1871 int perf_event_query_prog_array(struct perf_event *event, void __user *info) 1872 { 1873 struct perf_event_query_bpf __user *uquery = info; 1874 struct perf_event_query_bpf query = {}; 1875 struct bpf_prog_array *progs; 1876 u32 *ids, prog_cnt, ids_len; 1877 int ret; 1878 1879 if (!perfmon_capable()) 1880 return -EPERM; 1881 if (event->attr.type != PERF_TYPE_TRACEPOINT) 1882 return -EINVAL; 1883 if (copy_from_user(&query, uquery, sizeof(query))) 1884 return -EFAULT; 1885 1886 ids_len = query.ids_len; 1887 if (ids_len > BPF_TRACE_MAX_PROGS) 1888 return -E2BIG; 1889 ids = kcalloc(ids_len, sizeof(u32), GFP_USER | __GFP_NOWARN); 1890 if (!ids) 1891 return -ENOMEM; 1892 /* 1893 * The above kcalloc returns ZERO_SIZE_PTR when ids_len = 0, which 1894 * is required when user only wants to check for uquery->prog_cnt. 1895 * There is no need to check for it since the case is handled 1896 * gracefully in bpf_prog_array_copy_info. 1897 */ 1898 1899 mutex_lock(&bpf_event_mutex); 1900 progs = bpf_event_rcu_dereference(event->tp_event->prog_array); 1901 ret = bpf_prog_array_copy_info(progs, ids, ids_len, &prog_cnt); 1902 mutex_unlock(&bpf_event_mutex); 1903 1904 if (copy_to_user(&uquery->prog_cnt, &prog_cnt, sizeof(prog_cnt)) || 1905 copy_to_user(uquery->ids, ids, ids_len * sizeof(u32))) 1906 ret = -EFAULT; 1907 1908 kfree(ids); 1909 return ret; 1910 } 1911 1912 extern struct bpf_raw_event_map __start__bpf_raw_tp[]; 1913 extern struct bpf_raw_event_map __stop__bpf_raw_tp[]; 1914 1915 struct bpf_raw_event_map *bpf_get_raw_tracepoint(const char *name) 1916 { 1917 struct bpf_raw_event_map *btp = __start__bpf_raw_tp; 1918 1919 for (; btp < __stop__bpf_raw_tp; btp++) { 1920 if (!strcmp(btp->tp->name, name)) 1921 return btp; 1922 } 1923 1924 return bpf_get_raw_tracepoint_module(name); 1925 } 1926 1927 void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp) 1928 { 1929 struct module *mod = __module_address((unsigned long)btp); 1930 1931 if (mod) 1932 module_put(mod); 1933 } 1934 1935 static __always_inline 1936 void __bpf_trace_run(struct bpf_prog *prog, u64 *args) 1937 { 1938 cant_sleep(); 1939 rcu_read_lock(); 1940 (void) BPF_PROG_RUN(prog, args); 1941 rcu_read_unlock(); 1942 } 1943 1944 #define UNPACK(...) __VA_ARGS__ 1945 #define REPEAT_1(FN, DL, X, ...) FN(X) 1946 #define REPEAT_2(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_1(FN, DL, __VA_ARGS__) 1947 #define REPEAT_3(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_2(FN, DL, __VA_ARGS__) 1948 #define REPEAT_4(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_3(FN, DL, __VA_ARGS__) 1949 #define REPEAT_5(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_4(FN, DL, __VA_ARGS__) 1950 #define REPEAT_6(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_5(FN, DL, __VA_ARGS__) 1951 #define REPEAT_7(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_6(FN, DL, __VA_ARGS__) 1952 #define REPEAT_8(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_7(FN, DL, __VA_ARGS__) 1953 #define REPEAT_9(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_8(FN, DL, __VA_ARGS__) 1954 #define REPEAT_10(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_9(FN, DL, __VA_ARGS__) 1955 #define REPEAT_11(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_10(FN, DL, __VA_ARGS__) 1956 #define REPEAT_12(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_11(FN, DL, __VA_ARGS__) 1957 #define REPEAT(X, FN, DL, ...) REPEAT_##X(FN, DL, __VA_ARGS__) 1958 1959 #define SARG(X) u64 arg##X 1960 #define COPY(X) args[X] = arg##X 1961 1962 #define __DL_COM (,) 1963 #define __DL_SEM (;) 1964 1965 #define __SEQ_0_11 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11 1966 1967 #define BPF_TRACE_DEFN_x(x) \ 1968 void bpf_trace_run##x(struct bpf_prog *prog, \ 1969 REPEAT(x, SARG, __DL_COM, __SEQ_0_11)) \ 1970 { \ 1971 u64 args[x]; \ 1972 REPEAT(x, COPY, __DL_SEM, __SEQ_0_11); \ 1973 __bpf_trace_run(prog, args); \ 1974 } \ 1975 EXPORT_SYMBOL_GPL(bpf_trace_run##x) 1976 BPF_TRACE_DEFN_x(1); 1977 BPF_TRACE_DEFN_x(2); 1978 BPF_TRACE_DEFN_x(3); 1979 BPF_TRACE_DEFN_x(4); 1980 BPF_TRACE_DEFN_x(5); 1981 BPF_TRACE_DEFN_x(6); 1982 BPF_TRACE_DEFN_x(7); 1983 BPF_TRACE_DEFN_x(8); 1984 BPF_TRACE_DEFN_x(9); 1985 BPF_TRACE_DEFN_x(10); 1986 BPF_TRACE_DEFN_x(11); 1987 BPF_TRACE_DEFN_x(12); 1988 1989 static int __bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog) 1990 { 1991 struct tracepoint *tp = btp->tp; 1992 1993 /* 1994 * check that program doesn't access arguments beyond what's 1995 * available in this tracepoint 1996 */ 1997 if (prog->aux->max_ctx_offset > btp->num_args * sizeof(u64)) 1998 return -EINVAL; 1999 2000 if (prog->aux->max_tp_access > btp->writable_size) 2001 return -EINVAL; 2002 2003 return tracepoint_probe_register(tp, (void *)btp->bpf_func, prog); 2004 } 2005 2006 int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog) 2007 { 2008 return __bpf_probe_register(btp, prog); 2009 } 2010 2011 int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_prog *prog) 2012 { 2013 return tracepoint_probe_unregister(btp->tp, (void *)btp->bpf_func, prog); 2014 } 2015 2016 int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id, 2017 u32 *fd_type, const char **buf, 2018 u64 *probe_offset, u64 *probe_addr) 2019 { 2020 bool is_tracepoint, is_syscall_tp; 2021 struct bpf_prog *prog; 2022 int flags, err = 0; 2023 2024 prog = event->prog; 2025 if (!prog) 2026 return -ENOENT; 2027 2028 /* not supporting BPF_PROG_TYPE_PERF_EVENT yet */ 2029 if (prog->type == BPF_PROG_TYPE_PERF_EVENT) 2030 return -EOPNOTSUPP; 2031 2032 *prog_id = prog->aux->id; 2033 flags = event->tp_event->flags; 2034 is_tracepoint = flags & TRACE_EVENT_FL_TRACEPOINT; 2035 is_syscall_tp = is_syscall_trace_event(event->tp_event); 2036 2037 if (is_tracepoint || is_syscall_tp) { 2038 *buf = is_tracepoint ? event->tp_event->tp->name 2039 : event->tp_event->name; 2040 *fd_type = BPF_FD_TYPE_TRACEPOINT; 2041 *probe_offset = 0x0; 2042 *probe_addr = 0x0; 2043 } else { 2044 /* kprobe/uprobe */ 2045 err = -EOPNOTSUPP; 2046 #ifdef CONFIG_KPROBE_EVENTS 2047 if (flags & TRACE_EVENT_FL_KPROBE) 2048 err = bpf_get_kprobe_info(event, fd_type, buf, 2049 probe_offset, probe_addr, 2050 event->attr.type == PERF_TYPE_TRACEPOINT); 2051 #endif 2052 #ifdef CONFIG_UPROBE_EVENTS 2053 if (flags & TRACE_EVENT_FL_UPROBE) 2054 err = bpf_get_uprobe_info(event, fd_type, buf, 2055 probe_offset, 2056 event->attr.type == PERF_TYPE_TRACEPOINT); 2057 #endif 2058 } 2059 2060 return err; 2061 } 2062 2063 static int __init send_signal_irq_work_init(void) 2064 { 2065 int cpu; 2066 struct send_signal_irq_work *work; 2067 2068 for_each_possible_cpu(cpu) { 2069 work = per_cpu_ptr(&send_signal_work, cpu); 2070 init_irq_work(&work->irq_work, do_bpf_send_signal); 2071 } 2072 return 0; 2073 } 2074 2075 subsys_initcall(send_signal_irq_work_init); 2076 2077 #ifdef CONFIG_MODULES 2078 static int bpf_event_notify(struct notifier_block *nb, unsigned long op, 2079 void *module) 2080 { 2081 struct bpf_trace_module *btm, *tmp; 2082 struct module *mod = module; 2083 2084 if (mod->num_bpf_raw_events == 0 || 2085 (op != MODULE_STATE_COMING && op != MODULE_STATE_GOING)) 2086 return 0; 2087 2088 mutex_lock(&bpf_module_mutex); 2089 2090 switch (op) { 2091 case MODULE_STATE_COMING: 2092 btm = kzalloc(sizeof(*btm), GFP_KERNEL); 2093 if (btm) { 2094 btm->module = module; 2095 list_add(&btm->list, &bpf_trace_modules); 2096 } 2097 break; 2098 case MODULE_STATE_GOING: 2099 list_for_each_entry_safe(btm, tmp, &bpf_trace_modules, list) { 2100 if (btm->module == module) { 2101 list_del(&btm->list); 2102 kfree(btm); 2103 break; 2104 } 2105 } 2106 break; 2107 } 2108 2109 mutex_unlock(&bpf_module_mutex); 2110 2111 return 0; 2112 } 2113 2114 static struct notifier_block bpf_module_nb = { 2115 .notifier_call = bpf_event_notify, 2116 }; 2117 2118 static int __init bpf_event_init(void) 2119 { 2120 register_module_notifier(&bpf_module_nb); 2121 return 0; 2122 } 2123 2124 fs_initcall(bpf_event_init); 2125 #endif /* CONFIG_MODULES */ 2126