1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2011-2015 PLUMgrid, http://plumgrid.com 3 * Copyright (c) 2016 Facebook 4 */ 5 #include <linux/kernel.h> 6 #include <linux/types.h> 7 #include <linux/slab.h> 8 #include <linux/bpf.h> 9 #include <linux/bpf_verifier.h> 10 #include <linux/bpf_perf_event.h> 11 #include <linux/btf.h> 12 #include <linux/filter.h> 13 #include <linux/uaccess.h> 14 #include <linux/ctype.h> 15 #include <linux/kprobes.h> 16 #include <linux/spinlock.h> 17 #include <linux/syscalls.h> 18 #include <linux/error-injection.h> 19 #include <linux/btf_ids.h> 20 #include <linux/bpf_lsm.h> 21 #include <linux/fprobe.h> 22 #include <linux/bsearch.h> 23 #include <linux/sort.h> 24 #include <linux/key.h> 25 #include <linux/namei.h> 26 27 #include <net/bpf_sk_storage.h> 28 29 #include <uapi/linux/bpf.h> 30 #include <uapi/linux/btf.h> 31 32 #include <asm/tlb.h> 33 34 #include "trace_probe.h" 35 #include "trace.h" 36 37 #define CREATE_TRACE_POINTS 38 #include "bpf_trace.h" 39 40 #define bpf_event_rcu_dereference(p) \ 41 rcu_dereference_protected(p, lockdep_is_held(&bpf_event_mutex)) 42 43 #define MAX_UPROBE_MULTI_CNT (1U << 20) 44 #define MAX_KPROBE_MULTI_CNT (1U << 20) 45 46 #ifdef CONFIG_MODULES 47 struct bpf_trace_module { 48 struct module *module; 49 struct list_head list; 50 }; 51 52 static LIST_HEAD(bpf_trace_modules); 53 static DEFINE_MUTEX(bpf_module_mutex); 54 55 static struct bpf_raw_event_map *bpf_get_raw_tracepoint_module(const char *name) 56 { 57 struct bpf_raw_event_map *btp, *ret = NULL; 58 struct bpf_trace_module *btm; 59 unsigned int i; 60 61 mutex_lock(&bpf_module_mutex); 62 list_for_each_entry(btm, &bpf_trace_modules, list) { 63 for (i = 0; i < btm->module->num_bpf_raw_events; ++i) { 64 btp = &btm->module->bpf_raw_events[i]; 65 if (!strcmp(btp->tp->name, name)) { 66 if (try_module_get(btm->module)) 67 ret = btp; 68 goto out; 69 } 70 } 71 } 72 out: 73 mutex_unlock(&bpf_module_mutex); 74 return ret; 75 } 76 #else 77 static struct bpf_raw_event_map *bpf_get_raw_tracepoint_module(const char *name) 78 { 79 return NULL; 80 } 81 #endif /* CONFIG_MODULES */ 82 83 u64 bpf_get_stackid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); 84 u64 bpf_get_stack(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); 85 86 static int bpf_btf_printf_prepare(struct btf_ptr *ptr, u32 btf_ptr_size, 87 u64 flags, const struct btf **btf, 88 s32 *btf_id); 89 static u64 bpf_kprobe_multi_cookie(struct bpf_run_ctx *ctx); 90 static u64 bpf_kprobe_multi_entry_ip(struct bpf_run_ctx *ctx); 91 92 static u64 bpf_uprobe_multi_cookie(struct bpf_run_ctx *ctx); 93 static u64 bpf_uprobe_multi_entry_ip(struct bpf_run_ctx *ctx); 94 95 /** 96 * trace_call_bpf - invoke BPF program 97 * @call: tracepoint event 98 * @ctx: opaque context pointer 99 * 100 * kprobe handlers execute BPF programs via this helper. 101 * Can be used from static tracepoints in the future. 102 * 103 * Return: BPF programs always return an integer which is interpreted by 104 * kprobe handler as: 105 * 0 - return from kprobe (event is filtered out) 106 * 1 - store kprobe event into ring buffer 107 * Other values are reserved and currently alias to 1 108 */ 109 unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx) 110 { 111 unsigned int ret; 112 113 cant_sleep(); 114 115 if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) { 116 /* 117 * since some bpf program is already running on this cpu, 118 * don't call into another bpf program (same or different) 119 * and don't send kprobe event into ring-buffer, 120 * so return zero here 121 */ 122 rcu_read_lock(); 123 bpf_prog_inc_misses_counters(rcu_dereference(call->prog_array)); 124 rcu_read_unlock(); 125 ret = 0; 126 goto out; 127 } 128 129 /* 130 * Instead of moving rcu_read_lock/rcu_dereference/rcu_read_unlock 131 * to all call sites, we did a bpf_prog_array_valid() there to check 132 * whether call->prog_array is empty or not, which is 133 * a heuristic to speed up execution. 134 * 135 * If bpf_prog_array_valid() fetched prog_array was 136 * non-NULL, we go into trace_call_bpf() and do the actual 137 * proper rcu_dereference() under RCU lock. 138 * If it turns out that prog_array is NULL then, we bail out. 139 * For the opposite, if the bpf_prog_array_valid() fetched pointer 140 * was NULL, you'll skip the prog_array with the risk of missing 141 * out of events when it was updated in between this and the 142 * rcu_dereference() which is accepted risk. 143 */ 144 rcu_read_lock(); 145 ret = bpf_prog_run_array(rcu_dereference(call->prog_array), 146 ctx, bpf_prog_run); 147 rcu_read_unlock(); 148 149 out: 150 __this_cpu_dec(bpf_prog_active); 151 152 return ret; 153 } 154 155 #ifdef CONFIG_BPF_KPROBE_OVERRIDE 156 BPF_CALL_2(bpf_override_return, struct pt_regs *, regs, unsigned long, rc) 157 { 158 regs_set_return_value(regs, rc); 159 override_function_with_return(regs); 160 return 0; 161 } 162 163 static const struct bpf_func_proto bpf_override_return_proto = { 164 .func = bpf_override_return, 165 .gpl_only = true, 166 .ret_type = RET_INTEGER, 167 .arg1_type = ARG_PTR_TO_CTX, 168 .arg2_type = ARG_ANYTHING, 169 }; 170 #endif 171 172 static __always_inline int 173 bpf_probe_read_user_common(void *dst, u32 size, const void __user *unsafe_ptr) 174 { 175 int ret; 176 177 ret = copy_from_user_nofault(dst, unsafe_ptr, size); 178 if (unlikely(ret < 0)) 179 memset(dst, 0, size); 180 return ret; 181 } 182 183 BPF_CALL_3(bpf_probe_read_user, void *, dst, u32, size, 184 const void __user *, unsafe_ptr) 185 { 186 return bpf_probe_read_user_common(dst, size, unsafe_ptr); 187 } 188 189 const struct bpf_func_proto bpf_probe_read_user_proto = { 190 .func = bpf_probe_read_user, 191 .gpl_only = true, 192 .ret_type = RET_INTEGER, 193 .arg1_type = ARG_PTR_TO_UNINIT_MEM, 194 .arg2_type = ARG_CONST_SIZE_OR_ZERO, 195 .arg3_type = ARG_ANYTHING, 196 }; 197 198 static __always_inline int 199 bpf_probe_read_user_str_common(void *dst, u32 size, 200 const void __user *unsafe_ptr) 201 { 202 int ret; 203 204 /* 205 * NB: We rely on strncpy_from_user() not copying junk past the NUL 206 * terminator into `dst`. 207 * 208 * strncpy_from_user() does long-sized strides in the fast path. If the 209 * strncpy does not mask out the bytes after the NUL in `unsafe_ptr`, 210 * then there could be junk after the NUL in `dst`. If user takes `dst` 211 * and keys a hash map with it, then semantically identical strings can 212 * occupy multiple entries in the map. 213 */ 214 ret = strncpy_from_user_nofault(dst, unsafe_ptr, size); 215 if (unlikely(ret < 0)) 216 memset(dst, 0, size); 217 return ret; 218 } 219 220 BPF_CALL_3(bpf_probe_read_user_str, void *, dst, u32, size, 221 const void __user *, unsafe_ptr) 222 { 223 return bpf_probe_read_user_str_common(dst, size, unsafe_ptr); 224 } 225 226 const struct bpf_func_proto bpf_probe_read_user_str_proto = { 227 .func = bpf_probe_read_user_str, 228 .gpl_only = true, 229 .ret_type = RET_INTEGER, 230 .arg1_type = ARG_PTR_TO_UNINIT_MEM, 231 .arg2_type = ARG_CONST_SIZE_OR_ZERO, 232 .arg3_type = ARG_ANYTHING, 233 }; 234 235 BPF_CALL_3(bpf_probe_read_kernel, void *, dst, u32, size, 236 const void *, unsafe_ptr) 237 { 238 return bpf_probe_read_kernel_common(dst, size, unsafe_ptr); 239 } 240 241 const struct bpf_func_proto bpf_probe_read_kernel_proto = { 242 .func = bpf_probe_read_kernel, 243 .gpl_only = true, 244 .ret_type = RET_INTEGER, 245 .arg1_type = ARG_PTR_TO_UNINIT_MEM, 246 .arg2_type = ARG_CONST_SIZE_OR_ZERO, 247 .arg3_type = ARG_ANYTHING, 248 }; 249 250 static __always_inline int 251 bpf_probe_read_kernel_str_common(void *dst, u32 size, const void *unsafe_ptr) 252 { 253 int ret; 254 255 /* 256 * The strncpy_from_kernel_nofault() call will likely not fill the 257 * entire buffer, but that's okay in this circumstance as we're probing 258 * arbitrary memory anyway similar to bpf_probe_read_*() and might 259 * as well probe the stack. Thus, memory is explicitly cleared 260 * only in error case, so that improper users ignoring return 261 * code altogether don't copy garbage; otherwise length of string 262 * is returned that can be used for bpf_perf_event_output() et al. 263 */ 264 ret = strncpy_from_kernel_nofault(dst, unsafe_ptr, size); 265 if (unlikely(ret < 0)) 266 memset(dst, 0, size); 267 return ret; 268 } 269 270 BPF_CALL_3(bpf_probe_read_kernel_str, void *, dst, u32, size, 271 const void *, unsafe_ptr) 272 { 273 return bpf_probe_read_kernel_str_common(dst, size, unsafe_ptr); 274 } 275 276 const struct bpf_func_proto bpf_probe_read_kernel_str_proto = { 277 .func = bpf_probe_read_kernel_str, 278 .gpl_only = true, 279 .ret_type = RET_INTEGER, 280 .arg1_type = ARG_PTR_TO_UNINIT_MEM, 281 .arg2_type = ARG_CONST_SIZE_OR_ZERO, 282 .arg3_type = ARG_ANYTHING, 283 }; 284 285 #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE 286 BPF_CALL_3(bpf_probe_read_compat, void *, dst, u32, size, 287 const void *, unsafe_ptr) 288 { 289 if ((unsigned long)unsafe_ptr < TASK_SIZE) { 290 return bpf_probe_read_user_common(dst, size, 291 (__force void __user *)unsafe_ptr); 292 } 293 return bpf_probe_read_kernel_common(dst, size, unsafe_ptr); 294 } 295 296 static const struct bpf_func_proto bpf_probe_read_compat_proto = { 297 .func = bpf_probe_read_compat, 298 .gpl_only = true, 299 .ret_type = RET_INTEGER, 300 .arg1_type = ARG_PTR_TO_UNINIT_MEM, 301 .arg2_type = ARG_CONST_SIZE_OR_ZERO, 302 .arg3_type = ARG_ANYTHING, 303 }; 304 305 BPF_CALL_3(bpf_probe_read_compat_str, void *, dst, u32, size, 306 const void *, unsafe_ptr) 307 { 308 if ((unsigned long)unsafe_ptr < TASK_SIZE) { 309 return bpf_probe_read_user_str_common(dst, size, 310 (__force void __user *)unsafe_ptr); 311 } 312 return bpf_probe_read_kernel_str_common(dst, size, unsafe_ptr); 313 } 314 315 static const struct bpf_func_proto bpf_probe_read_compat_str_proto = { 316 .func = bpf_probe_read_compat_str, 317 .gpl_only = true, 318 .ret_type = RET_INTEGER, 319 .arg1_type = ARG_PTR_TO_UNINIT_MEM, 320 .arg2_type = ARG_CONST_SIZE_OR_ZERO, 321 .arg3_type = ARG_ANYTHING, 322 }; 323 #endif /* CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE */ 324 325 BPF_CALL_3(bpf_probe_write_user, void __user *, unsafe_ptr, const void *, src, 326 u32, size) 327 { 328 /* 329 * Ensure we're in user context which is safe for the helper to 330 * run. This helper has no business in a kthread. 331 * 332 * access_ok() should prevent writing to non-user memory, but in 333 * some situations (nommu, temporary switch, etc) access_ok() does 334 * not provide enough validation, hence the check on KERNEL_DS. 335 * 336 * nmi_uaccess_okay() ensures the probe is not run in an interim 337 * state, when the task or mm are switched. This is specifically 338 * required to prevent the use of temporary mm. 339 */ 340 341 if (unlikely(in_interrupt() || 342 current->flags & (PF_KTHREAD | PF_EXITING))) 343 return -EPERM; 344 if (unlikely(!nmi_uaccess_okay())) 345 return -EPERM; 346 347 return copy_to_user_nofault(unsafe_ptr, src, size); 348 } 349 350 static const struct bpf_func_proto bpf_probe_write_user_proto = { 351 .func = bpf_probe_write_user, 352 .gpl_only = true, 353 .ret_type = RET_INTEGER, 354 .arg1_type = ARG_ANYTHING, 355 .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, 356 .arg3_type = ARG_CONST_SIZE, 357 }; 358 359 #define MAX_TRACE_PRINTK_VARARGS 3 360 #define BPF_TRACE_PRINTK_SIZE 1024 361 362 BPF_CALL_5(bpf_trace_printk, char *, fmt, u32, fmt_size, u64, arg1, 363 u64, arg2, u64, arg3) 364 { 365 u64 args[MAX_TRACE_PRINTK_VARARGS] = { arg1, arg2, arg3 }; 366 struct bpf_bprintf_data data = { 367 .get_bin_args = true, 368 .get_buf = true, 369 }; 370 int ret; 371 372 ret = bpf_bprintf_prepare(fmt, fmt_size, args, 373 MAX_TRACE_PRINTK_VARARGS, &data); 374 if (ret < 0) 375 return ret; 376 377 ret = bstr_printf(data.buf, MAX_BPRINTF_BUF, fmt, data.bin_args); 378 379 trace_bpf_trace_printk(data.buf); 380 381 bpf_bprintf_cleanup(&data); 382 383 return ret; 384 } 385 386 static const struct bpf_func_proto bpf_trace_printk_proto = { 387 .func = bpf_trace_printk, 388 .gpl_only = true, 389 .ret_type = RET_INTEGER, 390 .arg1_type = ARG_PTR_TO_MEM | MEM_RDONLY, 391 .arg2_type = ARG_CONST_SIZE, 392 }; 393 394 static void __set_printk_clr_event(struct work_struct *work) 395 { 396 /* 397 * This program might be calling bpf_trace_printk, 398 * so enable the associated bpf_trace/bpf_trace_printk event. 399 * Repeat this each time as it is possible a user has 400 * disabled bpf_trace_printk events. By loading a program 401 * calling bpf_trace_printk() however the user has expressed 402 * the intent to see such events. 403 */ 404 if (trace_set_clr_event("bpf_trace", "bpf_trace_printk", 1)) 405 pr_warn_ratelimited("could not enable bpf_trace_printk events"); 406 } 407 static DECLARE_WORK(set_printk_work, __set_printk_clr_event); 408 409 const struct bpf_func_proto *bpf_get_trace_printk_proto(void) 410 { 411 schedule_work(&set_printk_work); 412 return &bpf_trace_printk_proto; 413 } 414 415 BPF_CALL_4(bpf_trace_vprintk, char *, fmt, u32, fmt_size, const void *, args, 416 u32, data_len) 417 { 418 struct bpf_bprintf_data data = { 419 .get_bin_args = true, 420 .get_buf = true, 421 }; 422 int ret, num_args; 423 424 if (data_len & 7 || data_len > MAX_BPRINTF_VARARGS * 8 || 425 (data_len && !args)) 426 return -EINVAL; 427 num_args = data_len / 8; 428 429 ret = bpf_bprintf_prepare(fmt, fmt_size, args, num_args, &data); 430 if (ret < 0) 431 return ret; 432 433 ret = bstr_printf(data.buf, MAX_BPRINTF_BUF, fmt, data.bin_args); 434 435 trace_bpf_trace_printk(data.buf); 436 437 bpf_bprintf_cleanup(&data); 438 439 return ret; 440 } 441 442 static const struct bpf_func_proto bpf_trace_vprintk_proto = { 443 .func = bpf_trace_vprintk, 444 .gpl_only = true, 445 .ret_type = RET_INTEGER, 446 .arg1_type = ARG_PTR_TO_MEM | MEM_RDONLY, 447 .arg2_type = ARG_CONST_SIZE, 448 .arg3_type = ARG_PTR_TO_MEM | PTR_MAYBE_NULL | MEM_RDONLY, 449 .arg4_type = ARG_CONST_SIZE_OR_ZERO, 450 }; 451 452 const struct bpf_func_proto *bpf_get_trace_vprintk_proto(void) 453 { 454 schedule_work(&set_printk_work); 455 return &bpf_trace_vprintk_proto; 456 } 457 458 BPF_CALL_5(bpf_seq_printf, struct seq_file *, m, char *, fmt, u32, fmt_size, 459 const void *, args, u32, data_len) 460 { 461 struct bpf_bprintf_data data = { 462 .get_bin_args = true, 463 }; 464 int err, num_args; 465 466 if (data_len & 7 || data_len > MAX_BPRINTF_VARARGS * 8 || 467 (data_len && !args)) 468 return -EINVAL; 469 num_args = data_len / 8; 470 471 err = bpf_bprintf_prepare(fmt, fmt_size, args, num_args, &data); 472 if (err < 0) 473 return err; 474 475 seq_bprintf(m, fmt, data.bin_args); 476 477 bpf_bprintf_cleanup(&data); 478 479 return seq_has_overflowed(m) ? -EOVERFLOW : 0; 480 } 481 482 BTF_ID_LIST_SINGLE(btf_seq_file_ids, struct, seq_file) 483 484 static const struct bpf_func_proto bpf_seq_printf_proto = { 485 .func = bpf_seq_printf, 486 .gpl_only = true, 487 .ret_type = RET_INTEGER, 488 .arg1_type = ARG_PTR_TO_BTF_ID, 489 .arg1_btf_id = &btf_seq_file_ids[0], 490 .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, 491 .arg3_type = ARG_CONST_SIZE, 492 .arg4_type = ARG_PTR_TO_MEM | PTR_MAYBE_NULL | MEM_RDONLY, 493 .arg5_type = ARG_CONST_SIZE_OR_ZERO, 494 }; 495 496 BPF_CALL_3(bpf_seq_write, struct seq_file *, m, const void *, data, u32, len) 497 { 498 return seq_write(m, data, len) ? -EOVERFLOW : 0; 499 } 500 501 static const struct bpf_func_proto bpf_seq_write_proto = { 502 .func = bpf_seq_write, 503 .gpl_only = true, 504 .ret_type = RET_INTEGER, 505 .arg1_type = ARG_PTR_TO_BTF_ID, 506 .arg1_btf_id = &btf_seq_file_ids[0], 507 .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, 508 .arg3_type = ARG_CONST_SIZE_OR_ZERO, 509 }; 510 511 BPF_CALL_4(bpf_seq_printf_btf, struct seq_file *, m, struct btf_ptr *, ptr, 512 u32, btf_ptr_size, u64, flags) 513 { 514 const struct btf *btf; 515 s32 btf_id; 516 int ret; 517 518 ret = bpf_btf_printf_prepare(ptr, btf_ptr_size, flags, &btf, &btf_id); 519 if (ret) 520 return ret; 521 522 return btf_type_seq_show_flags(btf, btf_id, ptr->ptr, m, flags); 523 } 524 525 static const struct bpf_func_proto bpf_seq_printf_btf_proto = { 526 .func = bpf_seq_printf_btf, 527 .gpl_only = true, 528 .ret_type = RET_INTEGER, 529 .arg1_type = ARG_PTR_TO_BTF_ID, 530 .arg1_btf_id = &btf_seq_file_ids[0], 531 .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, 532 .arg3_type = ARG_CONST_SIZE_OR_ZERO, 533 .arg4_type = ARG_ANYTHING, 534 }; 535 536 static __always_inline int 537 get_map_perf_counter(struct bpf_map *map, u64 flags, 538 u64 *value, u64 *enabled, u64 *running) 539 { 540 struct bpf_array *array = container_of(map, struct bpf_array, map); 541 unsigned int cpu = smp_processor_id(); 542 u64 index = flags & BPF_F_INDEX_MASK; 543 struct bpf_event_entry *ee; 544 545 if (unlikely(flags & ~(BPF_F_INDEX_MASK))) 546 return -EINVAL; 547 if (index == BPF_F_CURRENT_CPU) 548 index = cpu; 549 if (unlikely(index >= array->map.max_entries)) 550 return -E2BIG; 551 552 ee = READ_ONCE(array->ptrs[index]); 553 if (!ee) 554 return -ENOENT; 555 556 return perf_event_read_local(ee->event, value, enabled, running); 557 } 558 559 BPF_CALL_2(bpf_perf_event_read, struct bpf_map *, map, u64, flags) 560 { 561 u64 value = 0; 562 int err; 563 564 err = get_map_perf_counter(map, flags, &value, NULL, NULL); 565 /* 566 * this api is ugly since we miss [-22..-2] range of valid 567 * counter values, but that's uapi 568 */ 569 if (err) 570 return err; 571 return value; 572 } 573 574 const struct bpf_func_proto bpf_perf_event_read_proto = { 575 .func = bpf_perf_event_read, 576 .gpl_only = true, 577 .ret_type = RET_INTEGER, 578 .arg1_type = ARG_CONST_MAP_PTR, 579 .arg2_type = ARG_ANYTHING, 580 }; 581 582 BPF_CALL_4(bpf_perf_event_read_value, struct bpf_map *, map, u64, flags, 583 struct bpf_perf_event_value *, buf, u32, size) 584 { 585 int err = -EINVAL; 586 587 if (unlikely(size != sizeof(struct bpf_perf_event_value))) 588 goto clear; 589 err = get_map_perf_counter(map, flags, &buf->counter, &buf->enabled, 590 &buf->running); 591 if (unlikely(err)) 592 goto clear; 593 return 0; 594 clear: 595 memset(buf, 0, size); 596 return err; 597 } 598 599 static const struct bpf_func_proto bpf_perf_event_read_value_proto = { 600 .func = bpf_perf_event_read_value, 601 .gpl_only = true, 602 .ret_type = RET_INTEGER, 603 .arg1_type = ARG_CONST_MAP_PTR, 604 .arg2_type = ARG_ANYTHING, 605 .arg3_type = ARG_PTR_TO_UNINIT_MEM, 606 .arg4_type = ARG_CONST_SIZE, 607 }; 608 609 const struct bpf_func_proto *bpf_get_perf_event_read_value_proto(void) 610 { 611 return &bpf_perf_event_read_value_proto; 612 } 613 614 static __always_inline u64 615 __bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map, 616 u64 flags, struct perf_raw_record *raw, 617 struct perf_sample_data *sd) 618 { 619 struct bpf_array *array = container_of(map, struct bpf_array, map); 620 unsigned int cpu = smp_processor_id(); 621 u64 index = flags & BPF_F_INDEX_MASK; 622 struct bpf_event_entry *ee; 623 struct perf_event *event; 624 625 if (index == BPF_F_CURRENT_CPU) 626 index = cpu; 627 if (unlikely(index >= array->map.max_entries)) 628 return -E2BIG; 629 630 ee = READ_ONCE(array->ptrs[index]); 631 if (!ee) 632 return -ENOENT; 633 634 event = ee->event; 635 if (unlikely(event->attr.type != PERF_TYPE_SOFTWARE || 636 event->attr.config != PERF_COUNT_SW_BPF_OUTPUT)) 637 return -EINVAL; 638 639 if (unlikely(event->oncpu != cpu)) 640 return -EOPNOTSUPP; 641 642 perf_sample_save_raw_data(sd, event, raw); 643 644 return perf_event_output(event, sd, regs); 645 } 646 647 /* 648 * Support executing tracepoints in normal, irq, and nmi context that each call 649 * bpf_perf_event_output 650 */ 651 struct bpf_trace_sample_data { 652 struct perf_sample_data sds[3]; 653 }; 654 655 static DEFINE_PER_CPU(struct bpf_trace_sample_data, bpf_trace_sds); 656 static DEFINE_PER_CPU(int, bpf_trace_nest_level); 657 BPF_CALL_5(bpf_perf_event_output, struct pt_regs *, regs, struct bpf_map *, map, 658 u64, flags, void *, data, u64, size) 659 { 660 struct bpf_trace_sample_data *sds; 661 struct perf_raw_record raw = { 662 .frag = { 663 .size = size, 664 .data = data, 665 }, 666 }; 667 struct perf_sample_data *sd; 668 int nest_level, err; 669 670 preempt_disable(); 671 sds = this_cpu_ptr(&bpf_trace_sds); 672 nest_level = this_cpu_inc_return(bpf_trace_nest_level); 673 674 if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(sds->sds))) { 675 err = -EBUSY; 676 goto out; 677 } 678 679 sd = &sds->sds[nest_level - 1]; 680 681 if (unlikely(flags & ~(BPF_F_INDEX_MASK))) { 682 err = -EINVAL; 683 goto out; 684 } 685 686 perf_sample_data_init(sd, 0, 0); 687 688 err = __bpf_perf_event_output(regs, map, flags, &raw, sd); 689 out: 690 this_cpu_dec(bpf_trace_nest_level); 691 preempt_enable(); 692 return err; 693 } 694 695 static const struct bpf_func_proto bpf_perf_event_output_proto = { 696 .func = bpf_perf_event_output, 697 .gpl_only = true, 698 .ret_type = RET_INTEGER, 699 .arg1_type = ARG_PTR_TO_CTX, 700 .arg2_type = ARG_CONST_MAP_PTR, 701 .arg3_type = ARG_ANYTHING, 702 .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY, 703 .arg5_type = ARG_CONST_SIZE_OR_ZERO, 704 }; 705 706 static DEFINE_PER_CPU(int, bpf_event_output_nest_level); 707 struct bpf_nested_pt_regs { 708 struct pt_regs regs[3]; 709 }; 710 static DEFINE_PER_CPU(struct bpf_nested_pt_regs, bpf_pt_regs); 711 static DEFINE_PER_CPU(struct bpf_trace_sample_data, bpf_misc_sds); 712 713 u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size, 714 void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy) 715 { 716 struct perf_raw_frag frag = { 717 .copy = ctx_copy, 718 .size = ctx_size, 719 .data = ctx, 720 }; 721 struct perf_raw_record raw = { 722 .frag = { 723 { 724 .next = ctx_size ? &frag : NULL, 725 }, 726 .size = meta_size, 727 .data = meta, 728 }, 729 }; 730 struct perf_sample_data *sd; 731 struct pt_regs *regs; 732 int nest_level; 733 u64 ret; 734 735 preempt_disable(); 736 nest_level = this_cpu_inc_return(bpf_event_output_nest_level); 737 738 if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(bpf_misc_sds.sds))) { 739 ret = -EBUSY; 740 goto out; 741 } 742 sd = this_cpu_ptr(&bpf_misc_sds.sds[nest_level - 1]); 743 regs = this_cpu_ptr(&bpf_pt_regs.regs[nest_level - 1]); 744 745 perf_fetch_caller_regs(regs); 746 perf_sample_data_init(sd, 0, 0); 747 748 ret = __bpf_perf_event_output(regs, map, flags, &raw, sd); 749 out: 750 this_cpu_dec(bpf_event_output_nest_level); 751 preempt_enable(); 752 return ret; 753 } 754 755 BPF_CALL_0(bpf_get_current_task) 756 { 757 return (long) current; 758 } 759 760 const struct bpf_func_proto bpf_get_current_task_proto = { 761 .func = bpf_get_current_task, 762 .gpl_only = true, 763 .ret_type = RET_INTEGER, 764 }; 765 766 BPF_CALL_0(bpf_get_current_task_btf) 767 { 768 return (unsigned long) current; 769 } 770 771 const struct bpf_func_proto bpf_get_current_task_btf_proto = { 772 .func = bpf_get_current_task_btf, 773 .gpl_only = true, 774 .ret_type = RET_PTR_TO_BTF_ID_TRUSTED, 775 .ret_btf_id = &btf_tracing_ids[BTF_TRACING_TYPE_TASK], 776 }; 777 778 BPF_CALL_1(bpf_task_pt_regs, struct task_struct *, task) 779 { 780 return (unsigned long) task_pt_regs(task); 781 } 782 783 BTF_ID_LIST_SINGLE(bpf_task_pt_regs_ids, struct, pt_regs) 784 785 const struct bpf_func_proto bpf_task_pt_regs_proto = { 786 .func = bpf_task_pt_regs, 787 .gpl_only = true, 788 .arg1_type = ARG_PTR_TO_BTF_ID, 789 .arg1_btf_id = &btf_tracing_ids[BTF_TRACING_TYPE_TASK], 790 .ret_type = RET_PTR_TO_BTF_ID, 791 .ret_btf_id = &bpf_task_pt_regs_ids[0], 792 }; 793 794 struct send_signal_irq_work { 795 struct irq_work irq_work; 796 struct task_struct *task; 797 u32 sig; 798 enum pid_type type; 799 bool has_siginfo; 800 struct kernel_siginfo info; 801 }; 802 803 static DEFINE_PER_CPU(struct send_signal_irq_work, send_signal_work); 804 805 static void do_bpf_send_signal(struct irq_work *entry) 806 { 807 struct send_signal_irq_work *work; 808 struct kernel_siginfo *siginfo; 809 810 work = container_of(entry, struct send_signal_irq_work, irq_work); 811 siginfo = work->has_siginfo ? &work->info : SEND_SIG_PRIV; 812 813 group_send_sig_info(work->sig, siginfo, work->task, work->type); 814 put_task_struct(work->task); 815 } 816 817 static int bpf_send_signal_common(u32 sig, enum pid_type type, struct task_struct *task, u64 value) 818 { 819 struct send_signal_irq_work *work = NULL; 820 struct kernel_siginfo info; 821 struct kernel_siginfo *siginfo; 822 823 if (!task) { 824 task = current; 825 siginfo = SEND_SIG_PRIV; 826 } else { 827 clear_siginfo(&info); 828 info.si_signo = sig; 829 info.si_errno = 0; 830 info.si_code = SI_KERNEL; 831 info.si_pid = 0; 832 info.si_uid = 0; 833 info.si_value.sival_ptr = (void __user __force *)(unsigned long)value; 834 siginfo = &info; 835 } 836 837 /* Similar to bpf_probe_write_user, task needs to be 838 * in a sound condition and kernel memory access be 839 * permitted in order to send signal to the current 840 * task. 841 */ 842 if (unlikely(task->flags & (PF_KTHREAD | PF_EXITING))) 843 return -EPERM; 844 if (unlikely(!nmi_uaccess_okay())) 845 return -EPERM; 846 /* Task should not be pid=1 to avoid kernel panic. */ 847 if (unlikely(is_global_init(task))) 848 return -EPERM; 849 850 if (preempt_count() != 0 || irqs_disabled()) { 851 /* Do an early check on signal validity. Otherwise, 852 * the error is lost in deferred irq_work. 853 */ 854 if (unlikely(!valid_signal(sig))) 855 return -EINVAL; 856 857 work = this_cpu_ptr(&send_signal_work); 858 if (irq_work_is_busy(&work->irq_work)) 859 return -EBUSY; 860 861 /* Add the current task, which is the target of sending signal, 862 * to the irq_work. The current task may change when queued 863 * irq works get executed. 864 */ 865 work->task = get_task_struct(task); 866 work->has_siginfo = siginfo == &info; 867 if (work->has_siginfo) 868 copy_siginfo(&work->info, &info); 869 work->sig = sig; 870 work->type = type; 871 irq_work_queue(&work->irq_work); 872 return 0; 873 } 874 875 return group_send_sig_info(sig, siginfo, task, type); 876 } 877 878 BPF_CALL_1(bpf_send_signal, u32, sig) 879 { 880 return bpf_send_signal_common(sig, PIDTYPE_TGID, NULL, 0); 881 } 882 883 const struct bpf_func_proto bpf_send_signal_proto = { 884 .func = bpf_send_signal, 885 .gpl_only = false, 886 .ret_type = RET_INTEGER, 887 .arg1_type = ARG_ANYTHING, 888 }; 889 890 BPF_CALL_1(bpf_send_signal_thread, u32, sig) 891 { 892 return bpf_send_signal_common(sig, PIDTYPE_PID, NULL, 0); 893 } 894 895 const struct bpf_func_proto bpf_send_signal_thread_proto = { 896 .func = bpf_send_signal_thread, 897 .gpl_only = false, 898 .ret_type = RET_INTEGER, 899 .arg1_type = ARG_ANYTHING, 900 }; 901 902 BPF_CALL_3(bpf_d_path, const struct path *, path, char *, buf, u32, sz) 903 { 904 struct path copy; 905 long len; 906 char *p; 907 908 if (!sz) 909 return 0; 910 911 /* 912 * The path pointer is verified as trusted and safe to use, 913 * but let's double check it's valid anyway to workaround 914 * potentially broken verifier. 915 */ 916 len = copy_from_kernel_nofault(©, path, sizeof(*path)); 917 if (len < 0) 918 return len; 919 920 p = d_path(©, buf, sz); 921 if (IS_ERR(p)) { 922 len = PTR_ERR(p); 923 } else { 924 len = buf + sz - p; 925 memmove(buf, p, len); 926 } 927 928 return len; 929 } 930 931 BTF_SET_START(btf_allowlist_d_path) 932 #ifdef CONFIG_SECURITY 933 BTF_ID(func, security_file_permission) 934 BTF_ID(func, security_inode_getattr) 935 BTF_ID(func, security_file_open) 936 #endif 937 #ifdef CONFIG_SECURITY_PATH 938 BTF_ID(func, security_path_truncate) 939 #endif 940 BTF_ID(func, vfs_truncate) 941 BTF_ID(func, vfs_fallocate) 942 BTF_ID(func, dentry_open) 943 BTF_ID(func, vfs_getattr) 944 BTF_ID(func, filp_close) 945 BTF_SET_END(btf_allowlist_d_path) 946 947 static bool bpf_d_path_allowed(const struct bpf_prog *prog) 948 { 949 if (prog->type == BPF_PROG_TYPE_TRACING && 950 prog->expected_attach_type == BPF_TRACE_ITER) 951 return true; 952 953 if (prog->type == BPF_PROG_TYPE_LSM) 954 return bpf_lsm_is_sleepable_hook(prog->aux->attach_btf_id); 955 956 return btf_id_set_contains(&btf_allowlist_d_path, 957 prog->aux->attach_btf_id); 958 } 959 960 BTF_ID_LIST_SINGLE(bpf_d_path_btf_ids, struct, path) 961 962 static const struct bpf_func_proto bpf_d_path_proto = { 963 .func = bpf_d_path, 964 .gpl_only = false, 965 .ret_type = RET_INTEGER, 966 .arg1_type = ARG_PTR_TO_BTF_ID, 967 .arg1_btf_id = &bpf_d_path_btf_ids[0], 968 .arg2_type = ARG_PTR_TO_MEM | MEM_WRITE, 969 .arg3_type = ARG_CONST_SIZE_OR_ZERO, 970 .allowed = bpf_d_path_allowed, 971 }; 972 973 #define BTF_F_ALL (BTF_F_COMPACT | BTF_F_NONAME | \ 974 BTF_F_PTR_RAW | BTF_F_ZERO) 975 976 static int bpf_btf_printf_prepare(struct btf_ptr *ptr, u32 btf_ptr_size, 977 u64 flags, const struct btf **btf, 978 s32 *btf_id) 979 { 980 const struct btf_type *t; 981 982 if (unlikely(flags & ~(BTF_F_ALL))) 983 return -EINVAL; 984 985 if (btf_ptr_size != sizeof(struct btf_ptr)) 986 return -EINVAL; 987 988 *btf = bpf_get_btf_vmlinux(); 989 990 if (IS_ERR_OR_NULL(*btf)) 991 return IS_ERR(*btf) ? PTR_ERR(*btf) : -EINVAL; 992 993 if (ptr->type_id > 0) 994 *btf_id = ptr->type_id; 995 else 996 return -EINVAL; 997 998 if (*btf_id > 0) 999 t = btf_type_by_id(*btf, *btf_id); 1000 if (*btf_id <= 0 || !t) 1001 return -ENOENT; 1002 1003 return 0; 1004 } 1005 1006 BPF_CALL_5(bpf_snprintf_btf, char *, str, u32, str_size, struct btf_ptr *, ptr, 1007 u32, btf_ptr_size, u64, flags) 1008 { 1009 const struct btf *btf; 1010 s32 btf_id; 1011 int ret; 1012 1013 ret = bpf_btf_printf_prepare(ptr, btf_ptr_size, flags, &btf, &btf_id); 1014 if (ret) 1015 return ret; 1016 1017 return btf_type_snprintf_show(btf, btf_id, ptr->ptr, str, str_size, 1018 flags); 1019 } 1020 1021 const struct bpf_func_proto bpf_snprintf_btf_proto = { 1022 .func = bpf_snprintf_btf, 1023 .gpl_only = false, 1024 .ret_type = RET_INTEGER, 1025 .arg1_type = ARG_PTR_TO_MEM | MEM_WRITE, 1026 .arg2_type = ARG_CONST_SIZE, 1027 .arg3_type = ARG_PTR_TO_MEM | MEM_RDONLY, 1028 .arg4_type = ARG_CONST_SIZE, 1029 .arg5_type = ARG_ANYTHING, 1030 }; 1031 1032 BPF_CALL_1(bpf_get_func_ip_tracing, void *, ctx) 1033 { 1034 /* This helper call is inlined by verifier. */ 1035 return ((u64 *)ctx)[-2]; 1036 } 1037 1038 static const struct bpf_func_proto bpf_get_func_ip_proto_tracing = { 1039 .func = bpf_get_func_ip_tracing, 1040 .gpl_only = true, 1041 .ret_type = RET_INTEGER, 1042 .arg1_type = ARG_PTR_TO_CTX, 1043 }; 1044 1045 static inline unsigned long get_entry_ip(unsigned long fentry_ip) 1046 { 1047 #ifdef CONFIG_X86_KERNEL_IBT 1048 if (is_endbr((void *)(fentry_ip - ENDBR_INSN_SIZE))) 1049 fentry_ip -= ENDBR_INSN_SIZE; 1050 #endif 1051 return fentry_ip; 1052 } 1053 1054 BPF_CALL_1(bpf_get_func_ip_kprobe, struct pt_regs *, regs) 1055 { 1056 struct bpf_trace_run_ctx *run_ctx __maybe_unused; 1057 struct kprobe *kp; 1058 1059 #ifdef CONFIG_UPROBES 1060 run_ctx = container_of(current->bpf_ctx, struct bpf_trace_run_ctx, run_ctx); 1061 if (run_ctx->is_uprobe) 1062 return ((struct uprobe_dispatch_data *)current->utask->vaddr)->bp_addr; 1063 #endif 1064 1065 kp = kprobe_running(); 1066 1067 if (!kp || !(kp->flags & KPROBE_FLAG_ON_FUNC_ENTRY)) 1068 return 0; 1069 1070 return get_entry_ip((uintptr_t)kp->addr); 1071 } 1072 1073 static const struct bpf_func_proto bpf_get_func_ip_proto_kprobe = { 1074 .func = bpf_get_func_ip_kprobe, 1075 .gpl_only = true, 1076 .ret_type = RET_INTEGER, 1077 .arg1_type = ARG_PTR_TO_CTX, 1078 }; 1079 1080 BPF_CALL_1(bpf_get_func_ip_kprobe_multi, struct pt_regs *, regs) 1081 { 1082 return bpf_kprobe_multi_entry_ip(current->bpf_ctx); 1083 } 1084 1085 static const struct bpf_func_proto bpf_get_func_ip_proto_kprobe_multi = { 1086 .func = bpf_get_func_ip_kprobe_multi, 1087 .gpl_only = false, 1088 .ret_type = RET_INTEGER, 1089 .arg1_type = ARG_PTR_TO_CTX, 1090 }; 1091 1092 BPF_CALL_1(bpf_get_attach_cookie_kprobe_multi, struct pt_regs *, regs) 1093 { 1094 return bpf_kprobe_multi_cookie(current->bpf_ctx); 1095 } 1096 1097 static const struct bpf_func_proto bpf_get_attach_cookie_proto_kmulti = { 1098 .func = bpf_get_attach_cookie_kprobe_multi, 1099 .gpl_only = false, 1100 .ret_type = RET_INTEGER, 1101 .arg1_type = ARG_PTR_TO_CTX, 1102 }; 1103 1104 BPF_CALL_1(bpf_get_func_ip_uprobe_multi, struct pt_regs *, regs) 1105 { 1106 return bpf_uprobe_multi_entry_ip(current->bpf_ctx); 1107 } 1108 1109 static const struct bpf_func_proto bpf_get_func_ip_proto_uprobe_multi = { 1110 .func = bpf_get_func_ip_uprobe_multi, 1111 .gpl_only = false, 1112 .ret_type = RET_INTEGER, 1113 .arg1_type = ARG_PTR_TO_CTX, 1114 }; 1115 1116 BPF_CALL_1(bpf_get_attach_cookie_uprobe_multi, struct pt_regs *, regs) 1117 { 1118 return bpf_uprobe_multi_cookie(current->bpf_ctx); 1119 } 1120 1121 static const struct bpf_func_proto bpf_get_attach_cookie_proto_umulti = { 1122 .func = bpf_get_attach_cookie_uprobe_multi, 1123 .gpl_only = false, 1124 .ret_type = RET_INTEGER, 1125 .arg1_type = ARG_PTR_TO_CTX, 1126 }; 1127 1128 BPF_CALL_1(bpf_get_attach_cookie_trace, void *, ctx) 1129 { 1130 struct bpf_trace_run_ctx *run_ctx; 1131 1132 run_ctx = container_of(current->bpf_ctx, struct bpf_trace_run_ctx, run_ctx); 1133 return run_ctx->bpf_cookie; 1134 } 1135 1136 static const struct bpf_func_proto bpf_get_attach_cookie_proto_trace = { 1137 .func = bpf_get_attach_cookie_trace, 1138 .gpl_only = false, 1139 .ret_type = RET_INTEGER, 1140 .arg1_type = ARG_PTR_TO_CTX, 1141 }; 1142 1143 BPF_CALL_1(bpf_get_attach_cookie_pe, struct bpf_perf_event_data_kern *, ctx) 1144 { 1145 return ctx->event->bpf_cookie; 1146 } 1147 1148 static const struct bpf_func_proto bpf_get_attach_cookie_proto_pe = { 1149 .func = bpf_get_attach_cookie_pe, 1150 .gpl_only = false, 1151 .ret_type = RET_INTEGER, 1152 .arg1_type = ARG_PTR_TO_CTX, 1153 }; 1154 1155 BPF_CALL_1(bpf_get_attach_cookie_tracing, void *, ctx) 1156 { 1157 struct bpf_trace_run_ctx *run_ctx; 1158 1159 run_ctx = container_of(current->bpf_ctx, struct bpf_trace_run_ctx, run_ctx); 1160 return run_ctx->bpf_cookie; 1161 } 1162 1163 static const struct bpf_func_proto bpf_get_attach_cookie_proto_tracing = { 1164 .func = bpf_get_attach_cookie_tracing, 1165 .gpl_only = false, 1166 .ret_type = RET_INTEGER, 1167 .arg1_type = ARG_PTR_TO_CTX, 1168 }; 1169 1170 BPF_CALL_3(bpf_get_branch_snapshot, void *, buf, u32, size, u64, flags) 1171 { 1172 static const u32 br_entry_size = sizeof(struct perf_branch_entry); 1173 u32 entry_cnt = size / br_entry_size; 1174 1175 entry_cnt = static_call(perf_snapshot_branch_stack)(buf, entry_cnt); 1176 1177 if (unlikely(flags)) 1178 return -EINVAL; 1179 1180 if (!entry_cnt) 1181 return -ENOENT; 1182 1183 return entry_cnt * br_entry_size; 1184 } 1185 1186 const struct bpf_func_proto bpf_get_branch_snapshot_proto = { 1187 .func = bpf_get_branch_snapshot, 1188 .gpl_only = true, 1189 .ret_type = RET_INTEGER, 1190 .arg1_type = ARG_PTR_TO_UNINIT_MEM, 1191 .arg2_type = ARG_CONST_SIZE_OR_ZERO, 1192 }; 1193 1194 BPF_CALL_3(get_func_arg, void *, ctx, u32, n, u64 *, value) 1195 { 1196 /* This helper call is inlined by verifier. */ 1197 u64 nr_args = ((u64 *)ctx)[-1] & 0xFF; 1198 1199 if ((u64) n >= nr_args) 1200 return -EINVAL; 1201 *value = ((u64 *)ctx)[n]; 1202 return 0; 1203 } 1204 1205 static const struct bpf_func_proto bpf_get_func_arg_proto = { 1206 .func = get_func_arg, 1207 .ret_type = RET_INTEGER, 1208 .arg1_type = ARG_PTR_TO_CTX, 1209 .arg2_type = ARG_ANYTHING, 1210 .arg3_type = ARG_PTR_TO_FIXED_SIZE_MEM | MEM_UNINIT | MEM_WRITE | MEM_ALIGNED, 1211 .arg3_size = sizeof(u64), 1212 }; 1213 1214 BPF_CALL_2(get_func_ret, void *, ctx, u64 *, value) 1215 { 1216 /* This helper call is inlined by verifier. */ 1217 u64 nr_args = ((u64 *)ctx)[-1] & 0xFF; 1218 1219 *value = ((u64 *)ctx)[nr_args]; 1220 return 0; 1221 } 1222 1223 static const struct bpf_func_proto bpf_get_func_ret_proto = { 1224 .func = get_func_ret, 1225 .ret_type = RET_INTEGER, 1226 .arg1_type = ARG_PTR_TO_CTX, 1227 .arg2_type = ARG_PTR_TO_FIXED_SIZE_MEM | MEM_UNINIT | MEM_WRITE | MEM_ALIGNED, 1228 .arg2_size = sizeof(u64), 1229 }; 1230 1231 BPF_CALL_1(get_func_arg_cnt, void *, ctx) 1232 { 1233 /* This helper call is inlined by verifier. */ 1234 return ((u64 *)ctx)[-1] & 0xFF; 1235 } 1236 1237 static const struct bpf_func_proto bpf_get_func_arg_cnt_proto = { 1238 .func = get_func_arg_cnt, 1239 .ret_type = RET_INTEGER, 1240 .arg1_type = ARG_PTR_TO_CTX, 1241 }; 1242 1243 static const struct bpf_func_proto * 1244 bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 1245 { 1246 const struct bpf_func_proto *func_proto; 1247 1248 switch (func_id) { 1249 case BPF_FUNC_get_smp_processor_id: 1250 return &bpf_get_smp_processor_id_proto; 1251 #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE 1252 case BPF_FUNC_probe_read: 1253 return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ? 1254 NULL : &bpf_probe_read_compat_proto; 1255 case BPF_FUNC_probe_read_str: 1256 return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ? 1257 NULL : &bpf_probe_read_compat_str_proto; 1258 #endif 1259 case BPF_FUNC_get_func_ip: 1260 return &bpf_get_func_ip_proto_tracing; 1261 default: 1262 break; 1263 } 1264 1265 func_proto = bpf_base_func_proto(func_id, prog); 1266 if (func_proto) 1267 return func_proto; 1268 1269 if (!bpf_token_capable(prog->aux->token, CAP_SYS_ADMIN)) 1270 return NULL; 1271 1272 switch (func_id) { 1273 case BPF_FUNC_probe_write_user: 1274 return security_locked_down(LOCKDOWN_BPF_WRITE_USER) < 0 ? 1275 NULL : &bpf_probe_write_user_proto; 1276 default: 1277 return NULL; 1278 } 1279 } 1280 1281 static bool is_kprobe_multi(const struct bpf_prog *prog) 1282 { 1283 return prog->expected_attach_type == BPF_TRACE_KPROBE_MULTI || 1284 prog->expected_attach_type == BPF_TRACE_KPROBE_SESSION; 1285 } 1286 1287 static inline bool is_kprobe_session(const struct bpf_prog *prog) 1288 { 1289 return prog->type == BPF_PROG_TYPE_KPROBE && 1290 prog->expected_attach_type == BPF_TRACE_KPROBE_SESSION; 1291 } 1292 1293 static inline bool is_uprobe_multi(const struct bpf_prog *prog) 1294 { 1295 return prog->expected_attach_type == BPF_TRACE_UPROBE_MULTI || 1296 prog->expected_attach_type == BPF_TRACE_UPROBE_SESSION; 1297 } 1298 1299 static inline bool is_uprobe_session(const struct bpf_prog *prog) 1300 { 1301 return prog->type == BPF_PROG_TYPE_KPROBE && 1302 prog->expected_attach_type == BPF_TRACE_UPROBE_SESSION; 1303 } 1304 1305 static inline bool is_trace_fsession(const struct bpf_prog *prog) 1306 { 1307 return prog->type == BPF_PROG_TYPE_TRACING && 1308 prog->expected_attach_type == BPF_TRACE_FSESSION; 1309 } 1310 1311 static const struct bpf_func_proto * 1312 kprobe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 1313 { 1314 switch (func_id) { 1315 case BPF_FUNC_perf_event_output: 1316 return &bpf_perf_event_output_proto; 1317 case BPF_FUNC_get_stackid: 1318 return &bpf_get_stackid_proto; 1319 case BPF_FUNC_get_stack: 1320 return prog->sleepable ? &bpf_get_stack_sleepable_proto : &bpf_get_stack_proto; 1321 #ifdef CONFIG_BPF_KPROBE_OVERRIDE 1322 case BPF_FUNC_override_return: 1323 return &bpf_override_return_proto; 1324 #endif 1325 case BPF_FUNC_get_func_ip: 1326 if (is_kprobe_multi(prog)) 1327 return &bpf_get_func_ip_proto_kprobe_multi; 1328 if (is_uprobe_multi(prog)) 1329 return &bpf_get_func_ip_proto_uprobe_multi; 1330 return &bpf_get_func_ip_proto_kprobe; 1331 case BPF_FUNC_get_attach_cookie: 1332 if (is_kprobe_multi(prog)) 1333 return &bpf_get_attach_cookie_proto_kmulti; 1334 if (is_uprobe_multi(prog)) 1335 return &bpf_get_attach_cookie_proto_umulti; 1336 return &bpf_get_attach_cookie_proto_trace; 1337 default: 1338 return bpf_tracing_func_proto(func_id, prog); 1339 } 1340 } 1341 1342 /* bpf+kprobe programs can access fields of 'struct pt_regs' */ 1343 static bool kprobe_prog_is_valid_access(int off, int size, enum bpf_access_type type, 1344 const struct bpf_prog *prog, 1345 struct bpf_insn_access_aux *info) 1346 { 1347 if (off < 0 || off >= sizeof(struct pt_regs)) 1348 return false; 1349 if (off % size != 0) 1350 return false; 1351 /* 1352 * Assertion for 32 bit to make sure last 8 byte access 1353 * (BPF_DW) to the last 4 byte member is disallowed. 1354 */ 1355 if (off + size > sizeof(struct pt_regs)) 1356 return false; 1357 1358 if (type == BPF_WRITE) 1359 prog->aux->kprobe_write_ctx = true; 1360 1361 return true; 1362 } 1363 1364 const struct bpf_verifier_ops kprobe_verifier_ops = { 1365 .get_func_proto = kprobe_prog_func_proto, 1366 .is_valid_access = kprobe_prog_is_valid_access, 1367 }; 1368 1369 const struct bpf_prog_ops kprobe_prog_ops = { 1370 }; 1371 1372 BPF_CALL_5(bpf_perf_event_output_tp, void *, tp_buff, struct bpf_map *, map, 1373 u64, flags, void *, data, u64, size) 1374 { 1375 struct pt_regs *regs = *(struct pt_regs **)tp_buff; 1376 1377 /* 1378 * r1 points to perf tracepoint buffer where first 8 bytes are hidden 1379 * from bpf program and contain a pointer to 'struct pt_regs'. Fetch it 1380 * from there and call the same bpf_perf_event_output() helper inline. 1381 */ 1382 return ____bpf_perf_event_output(regs, map, flags, data, size); 1383 } 1384 1385 static const struct bpf_func_proto bpf_perf_event_output_proto_tp = { 1386 .func = bpf_perf_event_output_tp, 1387 .gpl_only = true, 1388 .ret_type = RET_INTEGER, 1389 .arg1_type = ARG_PTR_TO_CTX, 1390 .arg2_type = ARG_CONST_MAP_PTR, 1391 .arg3_type = ARG_ANYTHING, 1392 .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY, 1393 .arg5_type = ARG_CONST_SIZE_OR_ZERO, 1394 }; 1395 1396 BPF_CALL_3(bpf_get_stackid_tp, void *, tp_buff, struct bpf_map *, map, 1397 u64, flags) 1398 { 1399 struct pt_regs *regs = *(struct pt_regs **)tp_buff; 1400 1401 /* 1402 * Same comment as in bpf_perf_event_output_tp(), only that this time 1403 * the other helper's function body cannot be inlined due to being 1404 * external, thus we need to call raw helper function. 1405 */ 1406 return bpf_get_stackid((unsigned long) regs, (unsigned long) map, 1407 flags, 0, 0); 1408 } 1409 1410 static const struct bpf_func_proto bpf_get_stackid_proto_tp = { 1411 .func = bpf_get_stackid_tp, 1412 .gpl_only = true, 1413 .ret_type = RET_INTEGER, 1414 .arg1_type = ARG_PTR_TO_CTX, 1415 .arg2_type = ARG_CONST_MAP_PTR, 1416 .arg3_type = ARG_ANYTHING, 1417 }; 1418 1419 BPF_CALL_4(bpf_get_stack_tp, void *, tp_buff, void *, buf, u32, size, 1420 u64, flags) 1421 { 1422 struct pt_regs *regs = *(struct pt_regs **)tp_buff; 1423 1424 return bpf_get_stack((unsigned long) regs, (unsigned long) buf, 1425 (unsigned long) size, flags, 0); 1426 } 1427 1428 static const struct bpf_func_proto bpf_get_stack_proto_tp = { 1429 .func = bpf_get_stack_tp, 1430 .gpl_only = true, 1431 .ret_type = RET_INTEGER, 1432 .arg1_type = ARG_PTR_TO_CTX, 1433 .arg2_type = ARG_PTR_TO_UNINIT_MEM, 1434 .arg3_type = ARG_CONST_SIZE_OR_ZERO, 1435 .arg4_type = ARG_ANYTHING, 1436 }; 1437 1438 static const struct bpf_func_proto * 1439 tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 1440 { 1441 switch (func_id) { 1442 case BPF_FUNC_perf_event_output: 1443 return &bpf_perf_event_output_proto_tp; 1444 case BPF_FUNC_get_stackid: 1445 return &bpf_get_stackid_proto_tp; 1446 case BPF_FUNC_get_stack: 1447 return &bpf_get_stack_proto_tp; 1448 case BPF_FUNC_get_attach_cookie: 1449 return &bpf_get_attach_cookie_proto_trace; 1450 default: 1451 return bpf_tracing_func_proto(func_id, prog); 1452 } 1453 } 1454 1455 static bool tp_prog_is_valid_access(int off, int size, enum bpf_access_type type, 1456 const struct bpf_prog *prog, 1457 struct bpf_insn_access_aux *info) 1458 { 1459 if (off < sizeof(void *) || off >= PERF_MAX_TRACE_SIZE) 1460 return false; 1461 if (type != BPF_READ) 1462 return false; 1463 if (off % size != 0) 1464 return false; 1465 1466 BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(__u64)); 1467 return true; 1468 } 1469 1470 const struct bpf_verifier_ops tracepoint_verifier_ops = { 1471 .get_func_proto = tp_prog_func_proto, 1472 .is_valid_access = tp_prog_is_valid_access, 1473 }; 1474 1475 const struct bpf_prog_ops tracepoint_prog_ops = { 1476 }; 1477 1478 BPF_CALL_3(bpf_perf_prog_read_value, struct bpf_perf_event_data_kern *, ctx, 1479 struct bpf_perf_event_value *, buf, u32, size) 1480 { 1481 int err = -EINVAL; 1482 1483 if (unlikely(size != sizeof(struct bpf_perf_event_value))) 1484 goto clear; 1485 err = perf_event_read_local(ctx->event, &buf->counter, &buf->enabled, 1486 &buf->running); 1487 if (unlikely(err)) 1488 goto clear; 1489 return 0; 1490 clear: 1491 memset(buf, 0, size); 1492 return err; 1493 } 1494 1495 static const struct bpf_func_proto bpf_perf_prog_read_value_proto = { 1496 .func = bpf_perf_prog_read_value, 1497 .gpl_only = true, 1498 .ret_type = RET_INTEGER, 1499 .arg1_type = ARG_PTR_TO_CTX, 1500 .arg2_type = ARG_PTR_TO_UNINIT_MEM, 1501 .arg3_type = ARG_CONST_SIZE, 1502 }; 1503 1504 BPF_CALL_4(bpf_read_branch_records, struct bpf_perf_event_data_kern *, ctx, 1505 void *, buf, u32, size, u64, flags) 1506 { 1507 static const u32 br_entry_size = sizeof(struct perf_branch_entry); 1508 struct perf_branch_stack *br_stack = ctx->data->br_stack; 1509 u32 to_copy; 1510 1511 if (unlikely(flags & ~BPF_F_GET_BRANCH_RECORDS_SIZE)) 1512 return -EINVAL; 1513 1514 if (unlikely(!(ctx->data->sample_flags & PERF_SAMPLE_BRANCH_STACK))) 1515 return -ENOENT; 1516 1517 if (unlikely(!br_stack)) 1518 return -ENOENT; 1519 1520 if (flags & BPF_F_GET_BRANCH_RECORDS_SIZE) 1521 return br_stack->nr * br_entry_size; 1522 1523 if (!buf || (size % br_entry_size != 0)) 1524 return -EINVAL; 1525 1526 to_copy = min_t(u32, br_stack->nr * br_entry_size, size); 1527 memcpy(buf, br_stack->entries, to_copy); 1528 1529 return to_copy; 1530 } 1531 1532 static const struct bpf_func_proto bpf_read_branch_records_proto = { 1533 .func = bpf_read_branch_records, 1534 .gpl_only = true, 1535 .ret_type = RET_INTEGER, 1536 .arg1_type = ARG_PTR_TO_CTX, 1537 .arg2_type = ARG_PTR_TO_MEM_OR_NULL | MEM_WRITE, 1538 .arg3_type = ARG_CONST_SIZE_OR_ZERO, 1539 .arg4_type = ARG_ANYTHING, 1540 }; 1541 1542 static const struct bpf_func_proto * 1543 pe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 1544 { 1545 switch (func_id) { 1546 case BPF_FUNC_perf_event_output: 1547 return &bpf_perf_event_output_proto_tp; 1548 case BPF_FUNC_get_stackid: 1549 return &bpf_get_stackid_proto_pe; 1550 case BPF_FUNC_get_stack: 1551 return &bpf_get_stack_proto_pe; 1552 case BPF_FUNC_perf_prog_read_value: 1553 return &bpf_perf_prog_read_value_proto; 1554 case BPF_FUNC_read_branch_records: 1555 return &bpf_read_branch_records_proto; 1556 case BPF_FUNC_get_attach_cookie: 1557 return &bpf_get_attach_cookie_proto_pe; 1558 default: 1559 return bpf_tracing_func_proto(func_id, prog); 1560 } 1561 } 1562 1563 /* 1564 * bpf_raw_tp_regs are separate from bpf_pt_regs used from skb/xdp 1565 * to avoid potential recursive reuse issue when/if tracepoints are added 1566 * inside bpf_*_event_output, bpf_get_stackid and/or bpf_get_stack. 1567 * 1568 * Since raw tracepoints run despite bpf_prog_active, support concurrent usage 1569 * in normal, irq, and nmi context. 1570 */ 1571 struct bpf_raw_tp_regs { 1572 struct pt_regs regs[3]; 1573 }; 1574 static DEFINE_PER_CPU(struct bpf_raw_tp_regs, bpf_raw_tp_regs); 1575 static DEFINE_PER_CPU(int, bpf_raw_tp_nest_level); 1576 static struct pt_regs *get_bpf_raw_tp_regs(void) 1577 { 1578 struct bpf_raw_tp_regs *tp_regs = this_cpu_ptr(&bpf_raw_tp_regs); 1579 int nest_level = this_cpu_inc_return(bpf_raw_tp_nest_level); 1580 1581 if (nest_level > ARRAY_SIZE(tp_regs->regs)) { 1582 this_cpu_dec(bpf_raw_tp_nest_level); 1583 return ERR_PTR(-EBUSY); 1584 } 1585 1586 return &tp_regs->regs[nest_level - 1]; 1587 } 1588 1589 static void put_bpf_raw_tp_regs(void) 1590 { 1591 this_cpu_dec(bpf_raw_tp_nest_level); 1592 } 1593 1594 BPF_CALL_5(bpf_perf_event_output_raw_tp, struct bpf_raw_tracepoint_args *, args, 1595 struct bpf_map *, map, u64, flags, void *, data, u64, size) 1596 { 1597 struct pt_regs *regs = get_bpf_raw_tp_regs(); 1598 int ret; 1599 1600 if (IS_ERR(regs)) 1601 return PTR_ERR(regs); 1602 1603 perf_fetch_caller_regs(regs); 1604 ret = ____bpf_perf_event_output(regs, map, flags, data, size); 1605 1606 put_bpf_raw_tp_regs(); 1607 return ret; 1608 } 1609 1610 static const struct bpf_func_proto bpf_perf_event_output_proto_raw_tp = { 1611 .func = bpf_perf_event_output_raw_tp, 1612 .gpl_only = true, 1613 .ret_type = RET_INTEGER, 1614 .arg1_type = ARG_PTR_TO_CTX, 1615 .arg2_type = ARG_CONST_MAP_PTR, 1616 .arg3_type = ARG_ANYTHING, 1617 .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY, 1618 .arg5_type = ARG_CONST_SIZE_OR_ZERO, 1619 }; 1620 1621 extern const struct bpf_func_proto bpf_skb_output_proto; 1622 extern const struct bpf_func_proto bpf_xdp_output_proto; 1623 extern const struct bpf_func_proto bpf_xdp_get_buff_len_trace_proto; 1624 1625 BPF_CALL_3(bpf_get_stackid_raw_tp, struct bpf_raw_tracepoint_args *, args, 1626 struct bpf_map *, map, u64, flags) 1627 { 1628 struct pt_regs *regs = get_bpf_raw_tp_regs(); 1629 int ret; 1630 1631 if (IS_ERR(regs)) 1632 return PTR_ERR(regs); 1633 1634 perf_fetch_caller_regs(regs); 1635 /* similar to bpf_perf_event_output_tp, but pt_regs fetched differently */ 1636 ret = bpf_get_stackid((unsigned long) regs, (unsigned long) map, 1637 flags, 0, 0); 1638 put_bpf_raw_tp_regs(); 1639 return ret; 1640 } 1641 1642 static const struct bpf_func_proto bpf_get_stackid_proto_raw_tp = { 1643 .func = bpf_get_stackid_raw_tp, 1644 .gpl_only = true, 1645 .ret_type = RET_INTEGER, 1646 .arg1_type = ARG_PTR_TO_CTX, 1647 .arg2_type = ARG_CONST_MAP_PTR, 1648 .arg3_type = ARG_ANYTHING, 1649 }; 1650 1651 BPF_CALL_4(bpf_get_stack_raw_tp, struct bpf_raw_tracepoint_args *, args, 1652 void *, buf, u32, size, u64, flags) 1653 { 1654 struct pt_regs *regs = get_bpf_raw_tp_regs(); 1655 int ret; 1656 1657 if (IS_ERR(regs)) 1658 return PTR_ERR(regs); 1659 1660 perf_fetch_caller_regs(regs); 1661 ret = bpf_get_stack((unsigned long) regs, (unsigned long) buf, 1662 (unsigned long) size, flags, 0); 1663 put_bpf_raw_tp_regs(); 1664 return ret; 1665 } 1666 1667 static const struct bpf_func_proto bpf_get_stack_proto_raw_tp = { 1668 .func = bpf_get_stack_raw_tp, 1669 .gpl_only = true, 1670 .ret_type = RET_INTEGER, 1671 .arg1_type = ARG_PTR_TO_CTX, 1672 .arg2_type = ARG_PTR_TO_UNINIT_MEM, 1673 .arg3_type = ARG_CONST_SIZE_OR_ZERO, 1674 .arg4_type = ARG_ANYTHING, 1675 }; 1676 1677 static const struct bpf_func_proto * 1678 raw_tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 1679 { 1680 switch (func_id) { 1681 case BPF_FUNC_perf_event_output: 1682 return &bpf_perf_event_output_proto_raw_tp; 1683 case BPF_FUNC_get_stackid: 1684 return &bpf_get_stackid_proto_raw_tp; 1685 case BPF_FUNC_get_stack: 1686 return &bpf_get_stack_proto_raw_tp; 1687 case BPF_FUNC_get_attach_cookie: 1688 return &bpf_get_attach_cookie_proto_tracing; 1689 default: 1690 return bpf_tracing_func_proto(func_id, prog); 1691 } 1692 } 1693 1694 const struct bpf_func_proto * 1695 tracing_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 1696 { 1697 const struct bpf_func_proto *fn; 1698 1699 switch (func_id) { 1700 #ifdef CONFIG_NET 1701 case BPF_FUNC_skb_output: 1702 return &bpf_skb_output_proto; 1703 case BPF_FUNC_xdp_output: 1704 return &bpf_xdp_output_proto; 1705 case BPF_FUNC_skc_to_tcp6_sock: 1706 return &bpf_skc_to_tcp6_sock_proto; 1707 case BPF_FUNC_skc_to_tcp_sock: 1708 return &bpf_skc_to_tcp_sock_proto; 1709 case BPF_FUNC_skc_to_tcp_timewait_sock: 1710 return &bpf_skc_to_tcp_timewait_sock_proto; 1711 case BPF_FUNC_skc_to_tcp_request_sock: 1712 return &bpf_skc_to_tcp_request_sock_proto; 1713 case BPF_FUNC_skc_to_udp6_sock: 1714 return &bpf_skc_to_udp6_sock_proto; 1715 case BPF_FUNC_skc_to_unix_sock: 1716 return &bpf_skc_to_unix_sock_proto; 1717 case BPF_FUNC_skc_to_mptcp_sock: 1718 return &bpf_skc_to_mptcp_sock_proto; 1719 case BPF_FUNC_sk_storage_get: 1720 return &bpf_sk_storage_get_tracing_proto; 1721 case BPF_FUNC_sk_storage_delete: 1722 return &bpf_sk_storage_delete_tracing_proto; 1723 case BPF_FUNC_sock_from_file: 1724 return &bpf_sock_from_file_proto; 1725 case BPF_FUNC_get_socket_cookie: 1726 return &bpf_get_socket_ptr_cookie_proto; 1727 case BPF_FUNC_xdp_get_buff_len: 1728 return &bpf_xdp_get_buff_len_trace_proto; 1729 #endif 1730 case BPF_FUNC_seq_printf: 1731 return prog->expected_attach_type == BPF_TRACE_ITER ? 1732 &bpf_seq_printf_proto : 1733 NULL; 1734 case BPF_FUNC_seq_write: 1735 return prog->expected_attach_type == BPF_TRACE_ITER ? 1736 &bpf_seq_write_proto : 1737 NULL; 1738 case BPF_FUNC_seq_printf_btf: 1739 return prog->expected_attach_type == BPF_TRACE_ITER ? 1740 &bpf_seq_printf_btf_proto : 1741 NULL; 1742 case BPF_FUNC_d_path: 1743 return &bpf_d_path_proto; 1744 case BPF_FUNC_get_func_arg: 1745 if (bpf_prog_has_trampoline(prog) || 1746 prog->expected_attach_type == BPF_TRACE_RAW_TP) 1747 return &bpf_get_func_arg_proto; 1748 return NULL; 1749 case BPF_FUNC_get_func_ret: 1750 return bpf_prog_has_trampoline(prog) ? &bpf_get_func_ret_proto : NULL; 1751 case BPF_FUNC_get_func_arg_cnt: 1752 if (bpf_prog_has_trampoline(prog) || 1753 prog->expected_attach_type == BPF_TRACE_RAW_TP) 1754 return &bpf_get_func_arg_cnt_proto; 1755 return NULL; 1756 case BPF_FUNC_get_attach_cookie: 1757 if (prog->type == BPF_PROG_TYPE_TRACING && 1758 prog->expected_attach_type == BPF_TRACE_RAW_TP) 1759 return &bpf_get_attach_cookie_proto_tracing; 1760 return bpf_prog_has_trampoline(prog) ? &bpf_get_attach_cookie_proto_tracing : NULL; 1761 default: 1762 fn = raw_tp_prog_func_proto(func_id, prog); 1763 if (!fn && prog->expected_attach_type == BPF_TRACE_ITER) 1764 fn = bpf_iter_get_func_proto(func_id, prog); 1765 return fn; 1766 } 1767 } 1768 1769 static bool raw_tp_prog_is_valid_access(int off, int size, 1770 enum bpf_access_type type, 1771 const struct bpf_prog *prog, 1772 struct bpf_insn_access_aux *info) 1773 { 1774 return bpf_tracing_ctx_access(off, size, type); 1775 } 1776 1777 static bool tracing_prog_is_valid_access(int off, int size, 1778 enum bpf_access_type type, 1779 const struct bpf_prog *prog, 1780 struct bpf_insn_access_aux *info) 1781 { 1782 return bpf_tracing_btf_ctx_access(off, size, type, prog, info); 1783 } 1784 1785 int __weak bpf_prog_test_run_tracing(struct bpf_prog *prog, 1786 const union bpf_attr *kattr, 1787 union bpf_attr __user *uattr) 1788 { 1789 return -ENOTSUPP; 1790 } 1791 1792 const struct bpf_verifier_ops raw_tracepoint_verifier_ops = { 1793 .get_func_proto = raw_tp_prog_func_proto, 1794 .is_valid_access = raw_tp_prog_is_valid_access, 1795 }; 1796 1797 const struct bpf_prog_ops raw_tracepoint_prog_ops = { 1798 #ifdef CONFIG_NET 1799 .test_run = bpf_prog_test_run_raw_tp, 1800 #endif 1801 }; 1802 1803 const struct bpf_verifier_ops tracing_verifier_ops = { 1804 .get_func_proto = tracing_prog_func_proto, 1805 .is_valid_access = tracing_prog_is_valid_access, 1806 }; 1807 1808 const struct bpf_prog_ops tracing_prog_ops = { 1809 .test_run = bpf_prog_test_run_tracing, 1810 }; 1811 1812 static bool raw_tp_writable_prog_is_valid_access(int off, int size, 1813 enum bpf_access_type type, 1814 const struct bpf_prog *prog, 1815 struct bpf_insn_access_aux *info) 1816 { 1817 if (off == 0) { 1818 if (size != sizeof(u64) || type != BPF_READ) 1819 return false; 1820 info->reg_type = PTR_TO_TP_BUFFER; 1821 } 1822 return raw_tp_prog_is_valid_access(off, size, type, prog, info); 1823 } 1824 1825 const struct bpf_verifier_ops raw_tracepoint_writable_verifier_ops = { 1826 .get_func_proto = raw_tp_prog_func_proto, 1827 .is_valid_access = raw_tp_writable_prog_is_valid_access, 1828 }; 1829 1830 const struct bpf_prog_ops raw_tracepoint_writable_prog_ops = { 1831 }; 1832 1833 static bool pe_prog_is_valid_access(int off, int size, enum bpf_access_type type, 1834 const struct bpf_prog *prog, 1835 struct bpf_insn_access_aux *info) 1836 { 1837 const int size_u64 = sizeof(u64); 1838 1839 if (off < 0 || off >= sizeof(struct bpf_perf_event_data)) 1840 return false; 1841 if (type != BPF_READ) 1842 return false; 1843 if (off % size != 0) { 1844 if (sizeof(unsigned long) != 4) 1845 return false; 1846 if (size != 8) 1847 return false; 1848 if (off % size != 4) 1849 return false; 1850 } 1851 1852 switch (off) { 1853 case bpf_ctx_range(struct bpf_perf_event_data, sample_period): 1854 bpf_ctx_record_field_size(info, size_u64); 1855 if (!bpf_ctx_narrow_access_ok(off, size, size_u64)) 1856 return false; 1857 break; 1858 case bpf_ctx_range(struct bpf_perf_event_data, addr): 1859 bpf_ctx_record_field_size(info, size_u64); 1860 if (!bpf_ctx_narrow_access_ok(off, size, size_u64)) 1861 return false; 1862 break; 1863 default: 1864 if (size != sizeof(long)) 1865 return false; 1866 } 1867 1868 return true; 1869 } 1870 1871 static u32 pe_prog_convert_ctx_access(enum bpf_access_type type, 1872 const struct bpf_insn *si, 1873 struct bpf_insn *insn_buf, 1874 struct bpf_prog *prog, u32 *target_size) 1875 { 1876 struct bpf_insn *insn = insn_buf; 1877 1878 switch (si->off) { 1879 case offsetof(struct bpf_perf_event_data, sample_period): 1880 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern, 1881 data), si->dst_reg, si->src_reg, 1882 offsetof(struct bpf_perf_event_data_kern, data)); 1883 *insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg, 1884 bpf_target_off(struct perf_sample_data, period, 8, 1885 target_size)); 1886 break; 1887 case offsetof(struct bpf_perf_event_data, addr): 1888 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern, 1889 data), si->dst_reg, si->src_reg, 1890 offsetof(struct bpf_perf_event_data_kern, data)); 1891 *insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg, 1892 bpf_target_off(struct perf_sample_data, addr, 8, 1893 target_size)); 1894 break; 1895 default: 1896 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern, 1897 regs), si->dst_reg, si->src_reg, 1898 offsetof(struct bpf_perf_event_data_kern, regs)); 1899 *insn++ = BPF_LDX_MEM(BPF_SIZEOF(long), si->dst_reg, si->dst_reg, 1900 si->off); 1901 break; 1902 } 1903 1904 return insn - insn_buf; 1905 } 1906 1907 const struct bpf_verifier_ops perf_event_verifier_ops = { 1908 .get_func_proto = pe_prog_func_proto, 1909 .is_valid_access = pe_prog_is_valid_access, 1910 .convert_ctx_access = pe_prog_convert_ctx_access, 1911 }; 1912 1913 const struct bpf_prog_ops perf_event_prog_ops = { 1914 }; 1915 1916 static DEFINE_MUTEX(bpf_event_mutex); 1917 1918 #define BPF_TRACE_MAX_PROGS 64 1919 1920 int perf_event_attach_bpf_prog(struct perf_event *event, 1921 struct bpf_prog *prog, 1922 u64 bpf_cookie) 1923 { 1924 struct bpf_prog_array *old_array; 1925 struct bpf_prog_array *new_array; 1926 int ret = -EEXIST; 1927 1928 /* 1929 * Kprobe override only works if they are on the function entry, 1930 * and only if they are on the opt-in list. 1931 */ 1932 if (prog->kprobe_override && 1933 (!trace_kprobe_on_func_entry(event->tp_event) || 1934 !trace_kprobe_error_injectable(event->tp_event))) 1935 return -EINVAL; 1936 1937 mutex_lock(&bpf_event_mutex); 1938 1939 if (event->prog) 1940 goto unlock; 1941 1942 old_array = bpf_event_rcu_dereference(event->tp_event->prog_array); 1943 if (old_array && 1944 bpf_prog_array_length(old_array) >= BPF_TRACE_MAX_PROGS) { 1945 ret = -E2BIG; 1946 goto unlock; 1947 } 1948 1949 ret = bpf_prog_array_copy(old_array, NULL, prog, bpf_cookie, &new_array); 1950 if (ret < 0) 1951 goto unlock; 1952 1953 /* set the new array to event->tp_event and set event->prog */ 1954 event->prog = prog; 1955 event->bpf_cookie = bpf_cookie; 1956 rcu_assign_pointer(event->tp_event->prog_array, new_array); 1957 bpf_prog_array_free_sleepable(old_array); 1958 1959 unlock: 1960 mutex_unlock(&bpf_event_mutex); 1961 return ret; 1962 } 1963 1964 void perf_event_detach_bpf_prog(struct perf_event *event) 1965 { 1966 struct bpf_prog_array *old_array; 1967 struct bpf_prog_array *new_array; 1968 struct bpf_prog *prog = NULL; 1969 int ret; 1970 1971 mutex_lock(&bpf_event_mutex); 1972 1973 if (!event->prog) 1974 goto unlock; 1975 1976 old_array = bpf_event_rcu_dereference(event->tp_event->prog_array); 1977 if (!old_array) 1978 goto put; 1979 1980 ret = bpf_prog_array_copy(old_array, event->prog, NULL, 0, &new_array); 1981 if (ret < 0) { 1982 bpf_prog_array_delete_safe(old_array, event->prog); 1983 } else { 1984 rcu_assign_pointer(event->tp_event->prog_array, new_array); 1985 bpf_prog_array_free_sleepable(old_array); 1986 } 1987 1988 put: 1989 prog = event->prog; 1990 event->prog = NULL; 1991 1992 unlock: 1993 mutex_unlock(&bpf_event_mutex); 1994 1995 if (prog) { 1996 /* 1997 * It could be that the bpf_prog is not sleepable (and will be freed 1998 * via normal RCU), but is called from a point that supports sleepable 1999 * programs and uses tasks-trace-RCU. 2000 */ 2001 synchronize_rcu_tasks_trace(); 2002 2003 bpf_prog_put(prog); 2004 } 2005 } 2006 2007 int perf_event_query_prog_array(struct perf_event *event, void __user *info) 2008 { 2009 struct perf_event_query_bpf __user *uquery = info; 2010 struct perf_event_query_bpf query = {}; 2011 struct bpf_prog_array *progs; 2012 u32 *ids, prog_cnt, ids_len; 2013 int ret; 2014 2015 if (!perfmon_capable()) 2016 return -EPERM; 2017 if (event->attr.type != PERF_TYPE_TRACEPOINT) 2018 return -EINVAL; 2019 if (copy_from_user(&query, uquery, sizeof(query))) 2020 return -EFAULT; 2021 2022 ids_len = query.ids_len; 2023 if (ids_len > BPF_TRACE_MAX_PROGS) 2024 return -E2BIG; 2025 ids = kcalloc(ids_len, sizeof(u32), GFP_USER | __GFP_NOWARN); 2026 if (!ids) 2027 return -ENOMEM; 2028 /* 2029 * The above kcalloc returns ZERO_SIZE_PTR when ids_len = 0, which 2030 * is required when user only wants to check for uquery->prog_cnt. 2031 * There is no need to check for it since the case is handled 2032 * gracefully in bpf_prog_array_copy_info. 2033 */ 2034 2035 mutex_lock(&bpf_event_mutex); 2036 progs = bpf_event_rcu_dereference(event->tp_event->prog_array); 2037 ret = bpf_prog_array_copy_info(progs, ids, ids_len, &prog_cnt); 2038 mutex_unlock(&bpf_event_mutex); 2039 2040 if (copy_to_user(&uquery->prog_cnt, &prog_cnt, sizeof(prog_cnt)) || 2041 copy_to_user(uquery->ids, ids, ids_len * sizeof(u32))) 2042 ret = -EFAULT; 2043 2044 kfree(ids); 2045 return ret; 2046 } 2047 2048 extern struct bpf_raw_event_map __start__bpf_raw_tp[]; 2049 extern struct bpf_raw_event_map __stop__bpf_raw_tp[]; 2050 2051 struct bpf_raw_event_map *bpf_get_raw_tracepoint(const char *name) 2052 { 2053 struct bpf_raw_event_map *btp = __start__bpf_raw_tp; 2054 2055 for (; btp < __stop__bpf_raw_tp; btp++) { 2056 if (!strcmp(btp->tp->name, name)) 2057 return btp; 2058 } 2059 2060 return bpf_get_raw_tracepoint_module(name); 2061 } 2062 2063 void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp) 2064 { 2065 struct module *mod; 2066 2067 guard(rcu)(); 2068 mod = __module_address((unsigned long)btp); 2069 module_put(mod); 2070 } 2071 2072 static __always_inline 2073 void __bpf_trace_run(struct bpf_raw_tp_link *link, u64 *args) 2074 { 2075 struct bpf_prog *prog = link->link.prog; 2076 struct bpf_run_ctx *old_run_ctx; 2077 struct bpf_trace_run_ctx run_ctx; 2078 2079 cant_sleep(); 2080 if (unlikely(!bpf_prog_get_recursion_context(prog))) { 2081 bpf_prog_inc_misses_counter(prog); 2082 goto out; 2083 } 2084 2085 run_ctx.bpf_cookie = link->cookie; 2086 old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx); 2087 2088 rcu_read_lock(); 2089 (void) bpf_prog_run(prog, args); 2090 rcu_read_unlock(); 2091 2092 bpf_reset_run_ctx(old_run_ctx); 2093 out: 2094 bpf_prog_put_recursion_context(prog); 2095 } 2096 2097 #define UNPACK(...) __VA_ARGS__ 2098 #define REPEAT_1(FN, DL, X, ...) FN(X) 2099 #define REPEAT_2(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_1(FN, DL, __VA_ARGS__) 2100 #define REPEAT_3(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_2(FN, DL, __VA_ARGS__) 2101 #define REPEAT_4(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_3(FN, DL, __VA_ARGS__) 2102 #define REPEAT_5(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_4(FN, DL, __VA_ARGS__) 2103 #define REPEAT_6(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_5(FN, DL, __VA_ARGS__) 2104 #define REPEAT_7(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_6(FN, DL, __VA_ARGS__) 2105 #define REPEAT_8(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_7(FN, DL, __VA_ARGS__) 2106 #define REPEAT_9(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_8(FN, DL, __VA_ARGS__) 2107 #define REPEAT_10(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_9(FN, DL, __VA_ARGS__) 2108 #define REPEAT_11(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_10(FN, DL, __VA_ARGS__) 2109 #define REPEAT_12(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_11(FN, DL, __VA_ARGS__) 2110 #define REPEAT(X, FN, DL, ...) REPEAT_##X(FN, DL, __VA_ARGS__) 2111 2112 #define SARG(X) u64 arg##X 2113 #define COPY(X) args[X] = arg##X 2114 2115 #define __DL_COM (,) 2116 #define __DL_SEM (;) 2117 2118 #define __SEQ_0_11 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11 2119 2120 #define BPF_TRACE_DEFN_x(x) \ 2121 void bpf_trace_run##x(struct bpf_raw_tp_link *link, \ 2122 REPEAT(x, SARG, __DL_COM, __SEQ_0_11)) \ 2123 { \ 2124 u64 args[x]; \ 2125 REPEAT(x, COPY, __DL_SEM, __SEQ_0_11); \ 2126 __bpf_trace_run(link, args); \ 2127 } \ 2128 EXPORT_SYMBOL_GPL(bpf_trace_run##x) 2129 BPF_TRACE_DEFN_x(1); 2130 BPF_TRACE_DEFN_x(2); 2131 BPF_TRACE_DEFN_x(3); 2132 BPF_TRACE_DEFN_x(4); 2133 BPF_TRACE_DEFN_x(5); 2134 BPF_TRACE_DEFN_x(6); 2135 BPF_TRACE_DEFN_x(7); 2136 BPF_TRACE_DEFN_x(8); 2137 BPF_TRACE_DEFN_x(9); 2138 BPF_TRACE_DEFN_x(10); 2139 BPF_TRACE_DEFN_x(11); 2140 BPF_TRACE_DEFN_x(12); 2141 2142 int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_raw_tp_link *link) 2143 { 2144 struct tracepoint *tp = btp->tp; 2145 struct bpf_prog *prog = link->link.prog; 2146 2147 /* 2148 * check that program doesn't access arguments beyond what's 2149 * available in this tracepoint 2150 */ 2151 if (prog->aux->max_ctx_offset > btp->num_args * sizeof(u64)) 2152 return -EINVAL; 2153 2154 if (prog->aux->max_tp_access > btp->writable_size) 2155 return -EINVAL; 2156 2157 return tracepoint_probe_register_may_exist(tp, (void *)btp->bpf_func, link); 2158 } 2159 2160 int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_raw_tp_link *link) 2161 { 2162 return tracepoint_probe_unregister(btp->tp, (void *)btp->bpf_func, link); 2163 } 2164 2165 int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id, 2166 u32 *fd_type, const char **buf, 2167 u64 *probe_offset, u64 *probe_addr, 2168 unsigned long *missed) 2169 { 2170 bool is_tracepoint, is_syscall_tp; 2171 struct bpf_prog *prog; 2172 int flags, err = 0; 2173 2174 prog = event->prog; 2175 if (!prog) 2176 return -ENOENT; 2177 2178 /* not supporting BPF_PROG_TYPE_PERF_EVENT yet */ 2179 if (prog->type == BPF_PROG_TYPE_PERF_EVENT) 2180 return -EOPNOTSUPP; 2181 2182 *prog_id = prog->aux->id; 2183 flags = event->tp_event->flags; 2184 is_tracepoint = flags & TRACE_EVENT_FL_TRACEPOINT; 2185 is_syscall_tp = is_syscall_trace_event(event->tp_event); 2186 2187 if (is_tracepoint || is_syscall_tp) { 2188 *buf = is_tracepoint ? event->tp_event->tp->name 2189 : event->tp_event->name; 2190 /* We allow NULL pointer for tracepoint */ 2191 if (fd_type) 2192 *fd_type = BPF_FD_TYPE_TRACEPOINT; 2193 if (probe_offset) 2194 *probe_offset = 0x0; 2195 if (probe_addr) 2196 *probe_addr = 0x0; 2197 } else { 2198 /* kprobe/uprobe */ 2199 err = -EOPNOTSUPP; 2200 #ifdef CONFIG_KPROBE_EVENTS 2201 if (flags & TRACE_EVENT_FL_KPROBE) 2202 err = bpf_get_kprobe_info(event, fd_type, buf, 2203 probe_offset, probe_addr, missed, 2204 event->attr.type == PERF_TYPE_TRACEPOINT); 2205 #endif 2206 #ifdef CONFIG_UPROBE_EVENTS 2207 if (flags & TRACE_EVENT_FL_UPROBE) 2208 err = bpf_get_uprobe_info(event, fd_type, buf, 2209 probe_offset, probe_addr, 2210 event->attr.type == PERF_TYPE_TRACEPOINT); 2211 #endif 2212 } 2213 2214 return err; 2215 } 2216 2217 static int __init send_signal_irq_work_init(void) 2218 { 2219 int cpu; 2220 struct send_signal_irq_work *work; 2221 2222 for_each_possible_cpu(cpu) { 2223 work = per_cpu_ptr(&send_signal_work, cpu); 2224 init_irq_work(&work->irq_work, do_bpf_send_signal); 2225 } 2226 return 0; 2227 } 2228 2229 subsys_initcall(send_signal_irq_work_init); 2230 2231 #ifdef CONFIG_MODULES 2232 static int bpf_event_notify(struct notifier_block *nb, unsigned long op, 2233 void *module) 2234 { 2235 struct bpf_trace_module *btm, *tmp; 2236 struct module *mod = module; 2237 int ret = 0; 2238 2239 if (mod->num_bpf_raw_events == 0 || 2240 (op != MODULE_STATE_COMING && op != MODULE_STATE_GOING)) 2241 goto out; 2242 2243 mutex_lock(&bpf_module_mutex); 2244 2245 switch (op) { 2246 case MODULE_STATE_COMING: 2247 btm = kzalloc(sizeof(*btm), GFP_KERNEL); 2248 if (btm) { 2249 btm->module = module; 2250 list_add(&btm->list, &bpf_trace_modules); 2251 } else { 2252 ret = -ENOMEM; 2253 } 2254 break; 2255 case MODULE_STATE_GOING: 2256 list_for_each_entry_safe(btm, tmp, &bpf_trace_modules, list) { 2257 if (btm->module == module) { 2258 list_del(&btm->list); 2259 kfree(btm); 2260 break; 2261 } 2262 } 2263 break; 2264 } 2265 2266 mutex_unlock(&bpf_module_mutex); 2267 2268 out: 2269 return notifier_from_errno(ret); 2270 } 2271 2272 static struct notifier_block bpf_module_nb = { 2273 .notifier_call = bpf_event_notify, 2274 }; 2275 2276 static int __init bpf_event_init(void) 2277 { 2278 register_module_notifier(&bpf_module_nb); 2279 return 0; 2280 } 2281 2282 fs_initcall(bpf_event_init); 2283 #endif /* CONFIG_MODULES */ 2284 2285 struct bpf_session_run_ctx { 2286 struct bpf_run_ctx run_ctx; 2287 bool is_return; 2288 void *data; 2289 }; 2290 2291 #ifdef CONFIG_FPROBE 2292 struct bpf_kprobe_multi_link { 2293 struct bpf_link link; 2294 struct fprobe fp; 2295 unsigned long *addrs; 2296 u64 *cookies; 2297 u32 cnt; 2298 u32 mods_cnt; 2299 struct module **mods; 2300 }; 2301 2302 struct bpf_kprobe_multi_run_ctx { 2303 struct bpf_session_run_ctx session_ctx; 2304 struct bpf_kprobe_multi_link *link; 2305 unsigned long entry_ip; 2306 }; 2307 2308 struct user_syms { 2309 const char **syms; 2310 char *buf; 2311 }; 2312 2313 #ifndef CONFIG_HAVE_FTRACE_REGS_HAVING_PT_REGS 2314 static DEFINE_PER_CPU(struct pt_regs, bpf_kprobe_multi_pt_regs); 2315 #define bpf_kprobe_multi_pt_regs_ptr() this_cpu_ptr(&bpf_kprobe_multi_pt_regs) 2316 #else 2317 #define bpf_kprobe_multi_pt_regs_ptr() (NULL) 2318 #endif 2319 2320 static unsigned long ftrace_get_entry_ip(unsigned long fentry_ip) 2321 { 2322 unsigned long ip = ftrace_get_symaddr(fentry_ip); 2323 2324 return ip ? : fentry_ip; 2325 } 2326 2327 static int copy_user_syms(struct user_syms *us, unsigned long __user *usyms, u32 cnt) 2328 { 2329 unsigned long __user usymbol; 2330 const char **syms = NULL; 2331 char *buf = NULL, *p; 2332 int err = -ENOMEM; 2333 unsigned int i; 2334 2335 syms = kvmalloc_array(cnt, sizeof(*syms), GFP_KERNEL); 2336 if (!syms) 2337 goto error; 2338 2339 buf = kvmalloc_array(cnt, KSYM_NAME_LEN, GFP_KERNEL); 2340 if (!buf) 2341 goto error; 2342 2343 for (p = buf, i = 0; i < cnt; i++) { 2344 if (__get_user(usymbol, usyms + i)) { 2345 err = -EFAULT; 2346 goto error; 2347 } 2348 err = strncpy_from_user(p, (const char __user *) usymbol, KSYM_NAME_LEN); 2349 if (err == KSYM_NAME_LEN) 2350 err = -E2BIG; 2351 if (err < 0) 2352 goto error; 2353 syms[i] = p; 2354 p += err + 1; 2355 } 2356 2357 us->syms = syms; 2358 us->buf = buf; 2359 return 0; 2360 2361 error: 2362 if (err) { 2363 kvfree(syms); 2364 kvfree(buf); 2365 } 2366 return err; 2367 } 2368 2369 static void kprobe_multi_put_modules(struct module **mods, u32 cnt) 2370 { 2371 u32 i; 2372 2373 for (i = 0; i < cnt; i++) 2374 module_put(mods[i]); 2375 } 2376 2377 static void free_user_syms(struct user_syms *us) 2378 { 2379 kvfree(us->syms); 2380 kvfree(us->buf); 2381 } 2382 2383 static void bpf_kprobe_multi_link_release(struct bpf_link *link) 2384 { 2385 struct bpf_kprobe_multi_link *kmulti_link; 2386 2387 kmulti_link = container_of(link, struct bpf_kprobe_multi_link, link); 2388 unregister_fprobe(&kmulti_link->fp); 2389 kprobe_multi_put_modules(kmulti_link->mods, kmulti_link->mods_cnt); 2390 } 2391 2392 static void bpf_kprobe_multi_link_dealloc(struct bpf_link *link) 2393 { 2394 struct bpf_kprobe_multi_link *kmulti_link; 2395 2396 kmulti_link = container_of(link, struct bpf_kprobe_multi_link, link); 2397 kvfree(kmulti_link->addrs); 2398 kvfree(kmulti_link->cookies); 2399 kfree(kmulti_link->mods); 2400 kfree(kmulti_link); 2401 } 2402 2403 static int bpf_kprobe_multi_link_fill_link_info(const struct bpf_link *link, 2404 struct bpf_link_info *info) 2405 { 2406 u64 __user *ucookies = u64_to_user_ptr(info->kprobe_multi.cookies); 2407 u64 __user *uaddrs = u64_to_user_ptr(info->kprobe_multi.addrs); 2408 struct bpf_kprobe_multi_link *kmulti_link; 2409 u32 ucount = info->kprobe_multi.count; 2410 int err = 0, i; 2411 2412 if (!uaddrs ^ !ucount) 2413 return -EINVAL; 2414 if (ucookies && !ucount) 2415 return -EINVAL; 2416 2417 kmulti_link = container_of(link, struct bpf_kprobe_multi_link, link); 2418 info->kprobe_multi.count = kmulti_link->cnt; 2419 info->kprobe_multi.flags = kmulti_link->link.flags; 2420 info->kprobe_multi.missed = kmulti_link->fp.nmissed; 2421 2422 if (!uaddrs) 2423 return 0; 2424 if (ucount < kmulti_link->cnt) 2425 err = -ENOSPC; 2426 else 2427 ucount = kmulti_link->cnt; 2428 2429 if (ucookies) { 2430 if (kmulti_link->cookies) { 2431 if (copy_to_user(ucookies, kmulti_link->cookies, ucount * sizeof(u64))) 2432 return -EFAULT; 2433 } else { 2434 for (i = 0; i < ucount; i++) { 2435 if (put_user(0, ucookies + i)) 2436 return -EFAULT; 2437 } 2438 } 2439 } 2440 2441 if (kallsyms_show_value(current_cred())) { 2442 if (copy_to_user(uaddrs, kmulti_link->addrs, ucount * sizeof(u64))) 2443 return -EFAULT; 2444 } else { 2445 for (i = 0; i < ucount; i++) { 2446 if (put_user(0, uaddrs + i)) 2447 return -EFAULT; 2448 } 2449 } 2450 return err; 2451 } 2452 2453 #ifdef CONFIG_PROC_FS 2454 static void bpf_kprobe_multi_show_fdinfo(const struct bpf_link *link, 2455 struct seq_file *seq) 2456 { 2457 struct bpf_kprobe_multi_link *kmulti_link; 2458 2459 kmulti_link = container_of(link, struct bpf_kprobe_multi_link, link); 2460 2461 seq_printf(seq, 2462 "kprobe_cnt:\t%u\n" 2463 "missed:\t%lu\n", 2464 kmulti_link->cnt, 2465 kmulti_link->fp.nmissed); 2466 2467 seq_printf(seq, "%s\t %s\n", "cookie", "func"); 2468 for (int i = 0; i < kmulti_link->cnt; i++) { 2469 seq_printf(seq, 2470 "%llu\t %pS\n", 2471 kmulti_link->cookies[i], 2472 (void *)kmulti_link->addrs[i]); 2473 } 2474 } 2475 #endif 2476 2477 static const struct bpf_link_ops bpf_kprobe_multi_link_lops = { 2478 .release = bpf_kprobe_multi_link_release, 2479 .dealloc_deferred = bpf_kprobe_multi_link_dealloc, 2480 .fill_link_info = bpf_kprobe_multi_link_fill_link_info, 2481 #ifdef CONFIG_PROC_FS 2482 .show_fdinfo = bpf_kprobe_multi_show_fdinfo, 2483 #endif 2484 }; 2485 2486 static void bpf_kprobe_multi_cookie_swap(void *a, void *b, int size, const void *priv) 2487 { 2488 const struct bpf_kprobe_multi_link *link = priv; 2489 unsigned long *addr_a = a, *addr_b = b; 2490 u64 *cookie_a, *cookie_b; 2491 2492 cookie_a = link->cookies + (addr_a - link->addrs); 2493 cookie_b = link->cookies + (addr_b - link->addrs); 2494 2495 /* swap addr_a/addr_b and cookie_a/cookie_b values */ 2496 swap(*addr_a, *addr_b); 2497 swap(*cookie_a, *cookie_b); 2498 } 2499 2500 static int bpf_kprobe_multi_addrs_cmp(const void *a, const void *b) 2501 { 2502 const unsigned long *addr_a = a, *addr_b = b; 2503 2504 if (*addr_a == *addr_b) 2505 return 0; 2506 return *addr_a < *addr_b ? -1 : 1; 2507 } 2508 2509 static int bpf_kprobe_multi_cookie_cmp(const void *a, const void *b, const void *priv) 2510 { 2511 return bpf_kprobe_multi_addrs_cmp(a, b); 2512 } 2513 2514 static u64 bpf_kprobe_multi_cookie(struct bpf_run_ctx *ctx) 2515 { 2516 struct bpf_kprobe_multi_run_ctx *run_ctx; 2517 struct bpf_kprobe_multi_link *link; 2518 u64 *cookie, entry_ip; 2519 unsigned long *addr; 2520 2521 if (WARN_ON_ONCE(!ctx)) 2522 return 0; 2523 run_ctx = container_of(current->bpf_ctx, struct bpf_kprobe_multi_run_ctx, 2524 session_ctx.run_ctx); 2525 link = run_ctx->link; 2526 if (!link->cookies) 2527 return 0; 2528 entry_ip = run_ctx->entry_ip; 2529 addr = bsearch(&entry_ip, link->addrs, link->cnt, sizeof(entry_ip), 2530 bpf_kprobe_multi_addrs_cmp); 2531 if (!addr) 2532 return 0; 2533 cookie = link->cookies + (addr - link->addrs); 2534 return *cookie; 2535 } 2536 2537 static u64 bpf_kprobe_multi_entry_ip(struct bpf_run_ctx *ctx) 2538 { 2539 struct bpf_kprobe_multi_run_ctx *run_ctx; 2540 2541 run_ctx = container_of(current->bpf_ctx, struct bpf_kprobe_multi_run_ctx, 2542 session_ctx.run_ctx); 2543 return run_ctx->entry_ip; 2544 } 2545 2546 static __always_inline int 2547 kprobe_multi_link_prog_run(struct bpf_kprobe_multi_link *link, 2548 unsigned long entry_ip, struct ftrace_regs *fregs, 2549 bool is_return, void *data) 2550 { 2551 struct bpf_kprobe_multi_run_ctx run_ctx = { 2552 .session_ctx = { 2553 .is_return = is_return, 2554 .data = data, 2555 }, 2556 .link = link, 2557 .entry_ip = entry_ip, 2558 }; 2559 struct bpf_run_ctx *old_run_ctx; 2560 struct pt_regs *regs; 2561 int err; 2562 2563 /* 2564 * graph tracer framework ensures we won't migrate, so there is no need 2565 * to use migrate_disable for bpf_prog_run again. The check here just for 2566 * __this_cpu_inc_return. 2567 */ 2568 cant_sleep(); 2569 2570 if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) { 2571 bpf_prog_inc_misses_counter(link->link.prog); 2572 err = 1; 2573 goto out; 2574 } 2575 2576 rcu_read_lock(); 2577 regs = ftrace_partial_regs(fregs, bpf_kprobe_multi_pt_regs_ptr()); 2578 old_run_ctx = bpf_set_run_ctx(&run_ctx.session_ctx.run_ctx); 2579 err = bpf_prog_run(link->link.prog, regs); 2580 bpf_reset_run_ctx(old_run_ctx); 2581 ftrace_partial_regs_update(fregs, bpf_kprobe_multi_pt_regs_ptr()); 2582 rcu_read_unlock(); 2583 2584 out: 2585 __this_cpu_dec(bpf_prog_active); 2586 return err; 2587 } 2588 2589 static int 2590 kprobe_multi_link_handler(struct fprobe *fp, unsigned long fentry_ip, 2591 unsigned long ret_ip, struct ftrace_regs *fregs, 2592 void *data) 2593 { 2594 struct bpf_kprobe_multi_link *link; 2595 int err; 2596 2597 link = container_of(fp, struct bpf_kprobe_multi_link, fp); 2598 err = kprobe_multi_link_prog_run(link, ftrace_get_entry_ip(fentry_ip), 2599 fregs, false, data); 2600 return is_kprobe_session(link->link.prog) ? err : 0; 2601 } 2602 2603 static void 2604 kprobe_multi_link_exit_handler(struct fprobe *fp, unsigned long fentry_ip, 2605 unsigned long ret_ip, struct ftrace_regs *fregs, 2606 void *data) 2607 { 2608 struct bpf_kprobe_multi_link *link; 2609 2610 link = container_of(fp, struct bpf_kprobe_multi_link, fp); 2611 kprobe_multi_link_prog_run(link, ftrace_get_entry_ip(fentry_ip), 2612 fregs, true, data); 2613 } 2614 2615 static int symbols_cmp_r(const void *a, const void *b, const void *priv) 2616 { 2617 const char **str_a = (const char **) a; 2618 const char **str_b = (const char **) b; 2619 2620 return strcmp(*str_a, *str_b); 2621 } 2622 2623 struct multi_symbols_sort { 2624 const char **funcs; 2625 u64 *cookies; 2626 }; 2627 2628 static void symbols_swap_r(void *a, void *b, int size, const void *priv) 2629 { 2630 const struct multi_symbols_sort *data = priv; 2631 const char **name_a = a, **name_b = b; 2632 2633 swap(*name_a, *name_b); 2634 2635 /* If defined, swap also related cookies. */ 2636 if (data->cookies) { 2637 u64 *cookie_a, *cookie_b; 2638 2639 cookie_a = data->cookies + (name_a - data->funcs); 2640 cookie_b = data->cookies + (name_b - data->funcs); 2641 swap(*cookie_a, *cookie_b); 2642 } 2643 } 2644 2645 struct modules_array { 2646 struct module **mods; 2647 int mods_cnt; 2648 int mods_cap; 2649 }; 2650 2651 static int add_module(struct modules_array *arr, struct module *mod) 2652 { 2653 struct module **mods; 2654 2655 if (arr->mods_cnt == arr->mods_cap) { 2656 arr->mods_cap = max(16, arr->mods_cap * 3 / 2); 2657 mods = krealloc_array(arr->mods, arr->mods_cap, sizeof(*mods), GFP_KERNEL); 2658 if (!mods) 2659 return -ENOMEM; 2660 arr->mods = mods; 2661 } 2662 2663 arr->mods[arr->mods_cnt] = mod; 2664 arr->mods_cnt++; 2665 return 0; 2666 } 2667 2668 static bool has_module(struct modules_array *arr, struct module *mod) 2669 { 2670 int i; 2671 2672 for (i = arr->mods_cnt - 1; i >= 0; i--) { 2673 if (arr->mods[i] == mod) 2674 return true; 2675 } 2676 return false; 2677 } 2678 2679 static int get_modules_for_addrs(struct module ***mods, unsigned long *addrs, u32 addrs_cnt) 2680 { 2681 struct modules_array arr = {}; 2682 u32 i, err = 0; 2683 2684 for (i = 0; i < addrs_cnt; i++) { 2685 bool skip_add = false; 2686 struct module *mod; 2687 2688 scoped_guard(rcu) { 2689 mod = __module_address(addrs[i]); 2690 /* Either no module or it's already stored */ 2691 if (!mod || has_module(&arr, mod)) { 2692 skip_add = true; 2693 break; /* scoped_guard */ 2694 } 2695 if (!try_module_get(mod)) 2696 err = -EINVAL; 2697 } 2698 if (skip_add) 2699 continue; 2700 if (err) 2701 break; 2702 err = add_module(&arr, mod); 2703 if (err) { 2704 module_put(mod); 2705 break; 2706 } 2707 } 2708 2709 /* We return either err < 0 in case of error, ... */ 2710 if (err) { 2711 kprobe_multi_put_modules(arr.mods, arr.mods_cnt); 2712 kfree(arr.mods); 2713 return err; 2714 } 2715 2716 /* or number of modules found if everything is ok. */ 2717 *mods = arr.mods; 2718 return arr.mods_cnt; 2719 } 2720 2721 static int addrs_check_error_injection_list(unsigned long *addrs, u32 cnt) 2722 { 2723 u32 i; 2724 2725 for (i = 0; i < cnt; i++) { 2726 if (!within_error_injection_list(addrs[i])) 2727 return -EINVAL; 2728 } 2729 return 0; 2730 } 2731 2732 int bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog) 2733 { 2734 struct bpf_kprobe_multi_link *link = NULL; 2735 struct bpf_link_primer link_primer; 2736 void __user *ucookies; 2737 unsigned long *addrs; 2738 u32 flags, cnt, size; 2739 void __user *uaddrs; 2740 u64 *cookies = NULL; 2741 void __user *usyms; 2742 int err; 2743 2744 /* no support for 32bit archs yet */ 2745 if (sizeof(u64) != sizeof(void *)) 2746 return -EOPNOTSUPP; 2747 2748 if (attr->link_create.flags) 2749 return -EINVAL; 2750 2751 if (!is_kprobe_multi(prog)) 2752 return -EINVAL; 2753 2754 /* Writing to context is not allowed for kprobes. */ 2755 if (prog->aux->kprobe_write_ctx) 2756 return -EINVAL; 2757 2758 flags = attr->link_create.kprobe_multi.flags; 2759 if (flags & ~BPF_F_KPROBE_MULTI_RETURN) 2760 return -EINVAL; 2761 2762 uaddrs = u64_to_user_ptr(attr->link_create.kprobe_multi.addrs); 2763 usyms = u64_to_user_ptr(attr->link_create.kprobe_multi.syms); 2764 if (!!uaddrs == !!usyms) 2765 return -EINVAL; 2766 2767 cnt = attr->link_create.kprobe_multi.cnt; 2768 if (!cnt) 2769 return -EINVAL; 2770 if (cnt > MAX_KPROBE_MULTI_CNT) 2771 return -E2BIG; 2772 2773 size = cnt * sizeof(*addrs); 2774 addrs = kvmalloc_array(cnt, sizeof(*addrs), GFP_KERNEL); 2775 if (!addrs) 2776 return -ENOMEM; 2777 2778 ucookies = u64_to_user_ptr(attr->link_create.kprobe_multi.cookies); 2779 if (ucookies) { 2780 cookies = kvmalloc_array(cnt, sizeof(*addrs), GFP_KERNEL); 2781 if (!cookies) { 2782 err = -ENOMEM; 2783 goto error; 2784 } 2785 if (copy_from_user(cookies, ucookies, size)) { 2786 err = -EFAULT; 2787 goto error; 2788 } 2789 } 2790 2791 if (uaddrs) { 2792 if (copy_from_user(addrs, uaddrs, size)) { 2793 err = -EFAULT; 2794 goto error; 2795 } 2796 } else { 2797 struct multi_symbols_sort data = { 2798 .cookies = cookies, 2799 }; 2800 struct user_syms us; 2801 2802 err = copy_user_syms(&us, usyms, cnt); 2803 if (err) 2804 goto error; 2805 2806 if (cookies) 2807 data.funcs = us.syms; 2808 2809 sort_r(us.syms, cnt, sizeof(*us.syms), symbols_cmp_r, 2810 symbols_swap_r, &data); 2811 2812 err = ftrace_lookup_symbols(us.syms, cnt, addrs); 2813 free_user_syms(&us); 2814 if (err) 2815 goto error; 2816 } 2817 2818 if (prog->kprobe_override && addrs_check_error_injection_list(addrs, cnt)) { 2819 err = -EINVAL; 2820 goto error; 2821 } 2822 2823 link = kzalloc(sizeof(*link), GFP_KERNEL); 2824 if (!link) { 2825 err = -ENOMEM; 2826 goto error; 2827 } 2828 2829 bpf_link_init(&link->link, BPF_LINK_TYPE_KPROBE_MULTI, 2830 &bpf_kprobe_multi_link_lops, prog, attr->link_create.attach_type); 2831 2832 err = bpf_link_prime(&link->link, &link_primer); 2833 if (err) 2834 goto error; 2835 2836 if (!(flags & BPF_F_KPROBE_MULTI_RETURN)) 2837 link->fp.entry_handler = kprobe_multi_link_handler; 2838 if ((flags & BPF_F_KPROBE_MULTI_RETURN) || is_kprobe_session(prog)) 2839 link->fp.exit_handler = kprobe_multi_link_exit_handler; 2840 if (is_kprobe_session(prog)) 2841 link->fp.entry_data_size = sizeof(u64); 2842 2843 link->addrs = addrs; 2844 link->cookies = cookies; 2845 link->cnt = cnt; 2846 link->link.flags = flags; 2847 2848 if (cookies) { 2849 /* 2850 * Sorting addresses will trigger sorting cookies as well 2851 * (check bpf_kprobe_multi_cookie_swap). This way we can 2852 * find cookie based on the address in bpf_get_attach_cookie 2853 * helper. 2854 */ 2855 sort_r(addrs, cnt, sizeof(*addrs), 2856 bpf_kprobe_multi_cookie_cmp, 2857 bpf_kprobe_multi_cookie_swap, 2858 link); 2859 } 2860 2861 err = get_modules_for_addrs(&link->mods, addrs, cnt); 2862 if (err < 0) { 2863 bpf_link_cleanup(&link_primer); 2864 return err; 2865 } 2866 link->mods_cnt = err; 2867 2868 err = register_fprobe_ips(&link->fp, addrs, cnt); 2869 if (err) { 2870 kprobe_multi_put_modules(link->mods, link->mods_cnt); 2871 bpf_link_cleanup(&link_primer); 2872 return err; 2873 } 2874 2875 return bpf_link_settle(&link_primer); 2876 2877 error: 2878 kfree(link); 2879 kvfree(addrs); 2880 kvfree(cookies); 2881 return err; 2882 } 2883 #else /* !CONFIG_FPROBE */ 2884 int bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog) 2885 { 2886 return -EOPNOTSUPP; 2887 } 2888 static u64 bpf_kprobe_multi_cookie(struct bpf_run_ctx *ctx) 2889 { 2890 return 0; 2891 } 2892 static u64 bpf_kprobe_multi_entry_ip(struct bpf_run_ctx *ctx) 2893 { 2894 return 0; 2895 } 2896 #endif 2897 2898 #ifdef CONFIG_UPROBES 2899 struct bpf_uprobe_multi_link; 2900 2901 struct bpf_uprobe { 2902 struct bpf_uprobe_multi_link *link; 2903 loff_t offset; 2904 unsigned long ref_ctr_offset; 2905 u64 cookie; 2906 struct uprobe *uprobe; 2907 struct uprobe_consumer consumer; 2908 bool session; 2909 }; 2910 2911 struct bpf_uprobe_multi_link { 2912 struct path path; 2913 struct bpf_link link; 2914 u32 cnt; 2915 struct bpf_uprobe *uprobes; 2916 struct task_struct *task; 2917 }; 2918 2919 struct bpf_uprobe_multi_run_ctx { 2920 struct bpf_session_run_ctx session_ctx; 2921 unsigned long entry_ip; 2922 struct bpf_uprobe *uprobe; 2923 }; 2924 2925 static void bpf_uprobe_unregister(struct bpf_uprobe *uprobes, u32 cnt) 2926 { 2927 u32 i; 2928 2929 for (i = 0; i < cnt; i++) 2930 uprobe_unregister_nosync(uprobes[i].uprobe, &uprobes[i].consumer); 2931 2932 if (cnt) 2933 uprobe_unregister_sync(); 2934 } 2935 2936 static void bpf_uprobe_multi_link_release(struct bpf_link *link) 2937 { 2938 struct bpf_uprobe_multi_link *umulti_link; 2939 2940 umulti_link = container_of(link, struct bpf_uprobe_multi_link, link); 2941 bpf_uprobe_unregister(umulti_link->uprobes, umulti_link->cnt); 2942 if (umulti_link->task) 2943 put_task_struct(umulti_link->task); 2944 path_put(&umulti_link->path); 2945 } 2946 2947 static void bpf_uprobe_multi_link_dealloc(struct bpf_link *link) 2948 { 2949 struct bpf_uprobe_multi_link *umulti_link; 2950 2951 umulti_link = container_of(link, struct bpf_uprobe_multi_link, link); 2952 kvfree(umulti_link->uprobes); 2953 kfree(umulti_link); 2954 } 2955 2956 static int bpf_uprobe_multi_link_fill_link_info(const struct bpf_link *link, 2957 struct bpf_link_info *info) 2958 { 2959 u64 __user *uref_ctr_offsets = u64_to_user_ptr(info->uprobe_multi.ref_ctr_offsets); 2960 u64 __user *ucookies = u64_to_user_ptr(info->uprobe_multi.cookies); 2961 u64 __user *uoffsets = u64_to_user_ptr(info->uprobe_multi.offsets); 2962 u64 __user *upath = u64_to_user_ptr(info->uprobe_multi.path); 2963 u32 upath_size = info->uprobe_multi.path_size; 2964 struct bpf_uprobe_multi_link *umulti_link; 2965 u32 ucount = info->uprobe_multi.count; 2966 int err = 0, i; 2967 char *p, *buf; 2968 long left = 0; 2969 2970 if (!upath ^ !upath_size) 2971 return -EINVAL; 2972 2973 if ((uoffsets || uref_ctr_offsets || ucookies) && !ucount) 2974 return -EINVAL; 2975 2976 umulti_link = container_of(link, struct bpf_uprobe_multi_link, link); 2977 info->uprobe_multi.count = umulti_link->cnt; 2978 info->uprobe_multi.flags = umulti_link->link.flags; 2979 info->uprobe_multi.pid = umulti_link->task ? 2980 task_pid_nr_ns(umulti_link->task, task_active_pid_ns(current)) : 0; 2981 2982 upath_size = upath_size ? min_t(u32, upath_size, PATH_MAX) : PATH_MAX; 2983 buf = kmalloc(upath_size, GFP_KERNEL); 2984 if (!buf) 2985 return -ENOMEM; 2986 p = d_path(&umulti_link->path, buf, upath_size); 2987 if (IS_ERR(p)) { 2988 kfree(buf); 2989 return PTR_ERR(p); 2990 } 2991 upath_size = buf + upath_size - p; 2992 2993 if (upath) 2994 left = copy_to_user(upath, p, upath_size); 2995 kfree(buf); 2996 if (left) 2997 return -EFAULT; 2998 info->uprobe_multi.path_size = upath_size; 2999 3000 if (!uoffsets && !ucookies && !uref_ctr_offsets) 3001 return 0; 3002 3003 if (ucount < umulti_link->cnt) 3004 err = -ENOSPC; 3005 else 3006 ucount = umulti_link->cnt; 3007 3008 for (i = 0; i < ucount; i++) { 3009 if (uoffsets && 3010 put_user(umulti_link->uprobes[i].offset, uoffsets + i)) 3011 return -EFAULT; 3012 if (uref_ctr_offsets && 3013 put_user(umulti_link->uprobes[i].ref_ctr_offset, uref_ctr_offsets + i)) 3014 return -EFAULT; 3015 if (ucookies && 3016 put_user(umulti_link->uprobes[i].cookie, ucookies + i)) 3017 return -EFAULT; 3018 } 3019 3020 return err; 3021 } 3022 3023 #ifdef CONFIG_PROC_FS 3024 static void bpf_uprobe_multi_show_fdinfo(const struct bpf_link *link, 3025 struct seq_file *seq) 3026 { 3027 struct bpf_uprobe_multi_link *umulti_link; 3028 char *p, *buf; 3029 pid_t pid; 3030 3031 umulti_link = container_of(link, struct bpf_uprobe_multi_link, link); 3032 3033 buf = kmalloc(PATH_MAX, GFP_KERNEL); 3034 if (!buf) 3035 return; 3036 3037 p = d_path(&umulti_link->path, buf, PATH_MAX); 3038 if (IS_ERR(p)) { 3039 kfree(buf); 3040 return; 3041 } 3042 3043 pid = umulti_link->task ? 3044 task_pid_nr_ns(umulti_link->task, task_active_pid_ns(current)) : 0; 3045 seq_printf(seq, 3046 "uprobe_cnt:\t%u\n" 3047 "pid:\t%u\n" 3048 "path:\t%s\n", 3049 umulti_link->cnt, pid, p); 3050 3051 seq_printf(seq, "%s\t %s\t %s\n", "cookie", "offset", "ref_ctr_offset"); 3052 for (int i = 0; i < umulti_link->cnt; i++) { 3053 seq_printf(seq, 3054 "%llu\t %#llx\t %#lx\n", 3055 umulti_link->uprobes[i].cookie, 3056 umulti_link->uprobes[i].offset, 3057 umulti_link->uprobes[i].ref_ctr_offset); 3058 } 3059 3060 kfree(buf); 3061 } 3062 #endif 3063 3064 static const struct bpf_link_ops bpf_uprobe_multi_link_lops = { 3065 .release = bpf_uprobe_multi_link_release, 3066 .dealloc_deferred = bpf_uprobe_multi_link_dealloc, 3067 .fill_link_info = bpf_uprobe_multi_link_fill_link_info, 3068 #ifdef CONFIG_PROC_FS 3069 .show_fdinfo = bpf_uprobe_multi_show_fdinfo, 3070 #endif 3071 }; 3072 3073 static int uprobe_prog_run(struct bpf_uprobe *uprobe, 3074 unsigned long entry_ip, 3075 struct pt_regs *regs, 3076 bool is_return, void *data) 3077 { 3078 struct bpf_uprobe_multi_link *link = uprobe->link; 3079 struct bpf_uprobe_multi_run_ctx run_ctx = { 3080 .session_ctx = { 3081 .is_return = is_return, 3082 .data = data, 3083 }, 3084 .entry_ip = entry_ip, 3085 .uprobe = uprobe, 3086 }; 3087 struct bpf_prog *prog = link->link.prog; 3088 bool sleepable = prog->sleepable; 3089 struct bpf_run_ctx *old_run_ctx; 3090 int err; 3091 3092 if (link->task && !same_thread_group(current, link->task)) 3093 return 0; 3094 3095 if (sleepable) 3096 rcu_read_lock_trace(); 3097 else 3098 rcu_read_lock(); 3099 3100 migrate_disable(); 3101 3102 old_run_ctx = bpf_set_run_ctx(&run_ctx.session_ctx.run_ctx); 3103 err = bpf_prog_run(link->link.prog, regs); 3104 bpf_reset_run_ctx(old_run_ctx); 3105 3106 migrate_enable(); 3107 3108 if (sleepable) 3109 rcu_read_unlock_trace(); 3110 else 3111 rcu_read_unlock(); 3112 return err; 3113 } 3114 3115 static bool 3116 uprobe_multi_link_filter(struct uprobe_consumer *con, struct mm_struct *mm) 3117 { 3118 struct bpf_uprobe *uprobe; 3119 3120 uprobe = container_of(con, struct bpf_uprobe, consumer); 3121 return uprobe->link->task->mm == mm; 3122 } 3123 3124 static int 3125 uprobe_multi_link_handler(struct uprobe_consumer *con, struct pt_regs *regs, 3126 __u64 *data) 3127 { 3128 struct bpf_uprobe *uprobe; 3129 int ret; 3130 3131 uprobe = container_of(con, struct bpf_uprobe, consumer); 3132 ret = uprobe_prog_run(uprobe, instruction_pointer(regs), regs, false, data); 3133 if (uprobe->session) 3134 return ret ? UPROBE_HANDLER_IGNORE : 0; 3135 return 0; 3136 } 3137 3138 static int 3139 uprobe_multi_link_ret_handler(struct uprobe_consumer *con, unsigned long func, struct pt_regs *regs, 3140 __u64 *data) 3141 { 3142 struct bpf_uprobe *uprobe; 3143 3144 uprobe = container_of(con, struct bpf_uprobe, consumer); 3145 uprobe_prog_run(uprobe, func, regs, true, data); 3146 return 0; 3147 } 3148 3149 static u64 bpf_uprobe_multi_entry_ip(struct bpf_run_ctx *ctx) 3150 { 3151 struct bpf_uprobe_multi_run_ctx *run_ctx; 3152 3153 run_ctx = container_of(current->bpf_ctx, struct bpf_uprobe_multi_run_ctx, 3154 session_ctx.run_ctx); 3155 return run_ctx->entry_ip; 3156 } 3157 3158 static u64 bpf_uprobe_multi_cookie(struct bpf_run_ctx *ctx) 3159 { 3160 struct bpf_uprobe_multi_run_ctx *run_ctx; 3161 3162 run_ctx = container_of(current->bpf_ctx, struct bpf_uprobe_multi_run_ctx, 3163 session_ctx.run_ctx); 3164 return run_ctx->uprobe->cookie; 3165 } 3166 3167 int bpf_uprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog) 3168 { 3169 struct bpf_uprobe_multi_link *link = NULL; 3170 unsigned long __user *uref_ctr_offsets; 3171 struct bpf_link_primer link_primer; 3172 struct bpf_uprobe *uprobes = NULL; 3173 struct task_struct *task = NULL; 3174 unsigned long __user *uoffsets; 3175 u64 __user *ucookies; 3176 void __user *upath; 3177 u32 flags, cnt, i; 3178 struct path path; 3179 char *name; 3180 pid_t pid; 3181 int err; 3182 3183 /* no support for 32bit archs yet */ 3184 if (sizeof(u64) != sizeof(void *)) 3185 return -EOPNOTSUPP; 3186 3187 if (attr->link_create.flags) 3188 return -EINVAL; 3189 3190 if (!is_uprobe_multi(prog)) 3191 return -EINVAL; 3192 3193 flags = attr->link_create.uprobe_multi.flags; 3194 if (flags & ~BPF_F_UPROBE_MULTI_RETURN) 3195 return -EINVAL; 3196 3197 /* 3198 * path, offsets and cnt are mandatory, 3199 * ref_ctr_offsets and cookies are optional 3200 */ 3201 upath = u64_to_user_ptr(attr->link_create.uprobe_multi.path); 3202 uoffsets = u64_to_user_ptr(attr->link_create.uprobe_multi.offsets); 3203 cnt = attr->link_create.uprobe_multi.cnt; 3204 pid = attr->link_create.uprobe_multi.pid; 3205 3206 if (!upath || !uoffsets || !cnt || pid < 0) 3207 return -EINVAL; 3208 if (cnt > MAX_UPROBE_MULTI_CNT) 3209 return -E2BIG; 3210 3211 uref_ctr_offsets = u64_to_user_ptr(attr->link_create.uprobe_multi.ref_ctr_offsets); 3212 ucookies = u64_to_user_ptr(attr->link_create.uprobe_multi.cookies); 3213 3214 name = strndup_user(upath, PATH_MAX); 3215 if (IS_ERR(name)) { 3216 err = PTR_ERR(name); 3217 return err; 3218 } 3219 3220 err = kern_path(name, LOOKUP_FOLLOW, &path); 3221 kfree(name); 3222 if (err) 3223 return err; 3224 3225 if (!d_is_reg(path.dentry)) { 3226 err = -EBADF; 3227 goto error_path_put; 3228 } 3229 3230 if (pid) { 3231 rcu_read_lock(); 3232 task = get_pid_task(find_vpid(pid), PIDTYPE_TGID); 3233 rcu_read_unlock(); 3234 if (!task) { 3235 err = -ESRCH; 3236 goto error_path_put; 3237 } 3238 } 3239 3240 err = -ENOMEM; 3241 3242 link = kzalloc(sizeof(*link), GFP_KERNEL); 3243 uprobes = kvcalloc(cnt, sizeof(*uprobes), GFP_KERNEL); 3244 3245 if (!uprobes || !link) 3246 goto error_free; 3247 3248 for (i = 0; i < cnt; i++) { 3249 if (__get_user(uprobes[i].offset, uoffsets + i)) { 3250 err = -EFAULT; 3251 goto error_free; 3252 } 3253 if (uprobes[i].offset < 0) { 3254 err = -EINVAL; 3255 goto error_free; 3256 } 3257 if (uref_ctr_offsets && __get_user(uprobes[i].ref_ctr_offset, uref_ctr_offsets + i)) { 3258 err = -EFAULT; 3259 goto error_free; 3260 } 3261 if (ucookies && __get_user(uprobes[i].cookie, ucookies + i)) { 3262 err = -EFAULT; 3263 goto error_free; 3264 } 3265 3266 uprobes[i].link = link; 3267 3268 if (!(flags & BPF_F_UPROBE_MULTI_RETURN)) 3269 uprobes[i].consumer.handler = uprobe_multi_link_handler; 3270 if (flags & BPF_F_UPROBE_MULTI_RETURN || is_uprobe_session(prog)) 3271 uprobes[i].consumer.ret_handler = uprobe_multi_link_ret_handler; 3272 if (is_uprobe_session(prog)) 3273 uprobes[i].session = true; 3274 if (pid) 3275 uprobes[i].consumer.filter = uprobe_multi_link_filter; 3276 } 3277 3278 link->cnt = cnt; 3279 link->uprobes = uprobes; 3280 link->path = path; 3281 link->task = task; 3282 link->link.flags = flags; 3283 3284 bpf_link_init(&link->link, BPF_LINK_TYPE_UPROBE_MULTI, 3285 &bpf_uprobe_multi_link_lops, prog, attr->link_create.attach_type); 3286 3287 for (i = 0; i < cnt; i++) { 3288 uprobes[i].uprobe = uprobe_register(d_real_inode(link->path.dentry), 3289 uprobes[i].offset, 3290 uprobes[i].ref_ctr_offset, 3291 &uprobes[i].consumer); 3292 if (IS_ERR(uprobes[i].uprobe)) { 3293 err = PTR_ERR(uprobes[i].uprobe); 3294 link->cnt = i; 3295 goto error_unregister; 3296 } 3297 } 3298 3299 err = bpf_link_prime(&link->link, &link_primer); 3300 if (err) 3301 goto error_unregister; 3302 3303 return bpf_link_settle(&link_primer); 3304 3305 error_unregister: 3306 bpf_uprobe_unregister(uprobes, link->cnt); 3307 3308 error_free: 3309 kvfree(uprobes); 3310 kfree(link); 3311 if (task) 3312 put_task_struct(task); 3313 error_path_put: 3314 path_put(&path); 3315 return err; 3316 } 3317 #else /* !CONFIG_UPROBES */ 3318 int bpf_uprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog) 3319 { 3320 return -EOPNOTSUPP; 3321 } 3322 static u64 bpf_uprobe_multi_cookie(struct bpf_run_ctx *ctx) 3323 { 3324 return 0; 3325 } 3326 static u64 bpf_uprobe_multi_entry_ip(struct bpf_run_ctx *ctx) 3327 { 3328 return 0; 3329 } 3330 #endif /* CONFIG_UPROBES */ 3331 3332 __bpf_kfunc_start_defs(); 3333 3334 __bpf_kfunc bool bpf_session_is_return(void *ctx) 3335 { 3336 struct bpf_session_run_ctx *session_ctx; 3337 3338 session_ctx = container_of(current->bpf_ctx, struct bpf_session_run_ctx, run_ctx); 3339 return session_ctx->is_return; 3340 } 3341 3342 __bpf_kfunc __u64 *bpf_session_cookie(void *ctx) 3343 { 3344 struct bpf_session_run_ctx *session_ctx; 3345 3346 session_ctx = container_of(current->bpf_ctx, struct bpf_session_run_ctx, run_ctx); 3347 return session_ctx->data; 3348 } 3349 3350 __bpf_kfunc_end_defs(); 3351 3352 BTF_KFUNCS_START(session_kfunc_set_ids) 3353 BTF_ID_FLAGS(func, bpf_session_is_return) 3354 BTF_ID_FLAGS(func, bpf_session_cookie) 3355 BTF_KFUNCS_END(session_kfunc_set_ids) 3356 3357 static int bpf_session_filter(const struct bpf_prog *prog, u32 kfunc_id) 3358 { 3359 if (!btf_id_set8_contains(&session_kfunc_set_ids, kfunc_id)) 3360 return 0; 3361 3362 if (!is_kprobe_session(prog) && !is_uprobe_session(prog) && !is_trace_fsession(prog)) 3363 return -EACCES; 3364 3365 return 0; 3366 } 3367 3368 static const struct btf_kfunc_id_set bpf_session_kfunc_set = { 3369 .owner = THIS_MODULE, 3370 .set = &session_kfunc_set_ids, 3371 .filter = bpf_session_filter, 3372 }; 3373 3374 static int __init bpf_trace_kfuncs_init(void) 3375 { 3376 int err = 0; 3377 3378 err = err ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_KPROBE, &bpf_session_kfunc_set); 3379 err = err ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING, &bpf_session_kfunc_set); 3380 3381 return err; 3382 } 3383 3384 late_initcall(bpf_trace_kfuncs_init); 3385 3386 typedef int (*copy_fn_t)(void *dst, const void *src, u32 size, struct task_struct *tsk); 3387 3388 /* 3389 * The __always_inline is to make sure the compiler doesn't 3390 * generate indirect calls into callbacks, which is expensive, 3391 * on some kernel configurations. This allows compiler to put 3392 * direct calls into all the specific callback implementations 3393 * (copy_user_data_sleepable, copy_user_data_nofault, and so on) 3394 */ 3395 static __always_inline int __bpf_dynptr_copy_str(struct bpf_dynptr *dptr, u64 doff, u64 size, 3396 const void *unsafe_src, 3397 copy_fn_t str_copy_fn, 3398 struct task_struct *tsk) 3399 { 3400 struct bpf_dynptr_kern *dst; 3401 u64 chunk_sz, off; 3402 void *dst_slice; 3403 int cnt, err; 3404 char buf[256]; 3405 3406 dst_slice = bpf_dynptr_slice_rdwr(dptr, doff, NULL, size); 3407 if (likely(dst_slice)) 3408 return str_copy_fn(dst_slice, unsafe_src, size, tsk); 3409 3410 dst = (struct bpf_dynptr_kern *)dptr; 3411 if (bpf_dynptr_check_off_len(dst, doff, size)) 3412 return -E2BIG; 3413 3414 for (off = 0; off < size; off += chunk_sz - 1) { 3415 chunk_sz = min_t(u64, sizeof(buf), size - off); 3416 /* Expect str_copy_fn to return count of copied bytes, including 3417 * zero terminator. Next iteration increment off by chunk_sz - 1 to 3418 * overwrite NUL. 3419 */ 3420 cnt = str_copy_fn(buf, unsafe_src + off, chunk_sz, tsk); 3421 if (cnt < 0) 3422 return cnt; 3423 err = __bpf_dynptr_write(dst, doff + off, buf, cnt, 0); 3424 if (err) 3425 return err; 3426 if (cnt < chunk_sz || chunk_sz == 1) /* we are done */ 3427 return off + cnt; 3428 } 3429 return off; 3430 } 3431 3432 static __always_inline int __bpf_dynptr_copy(const struct bpf_dynptr *dptr, u64 doff, 3433 u64 size, const void *unsafe_src, 3434 copy_fn_t copy_fn, struct task_struct *tsk) 3435 { 3436 struct bpf_dynptr_kern *dst; 3437 void *dst_slice; 3438 char buf[256]; 3439 u64 off, chunk_sz; 3440 int err; 3441 3442 dst_slice = bpf_dynptr_slice_rdwr(dptr, doff, NULL, size); 3443 if (likely(dst_slice)) 3444 return copy_fn(dst_slice, unsafe_src, size, tsk); 3445 3446 dst = (struct bpf_dynptr_kern *)dptr; 3447 if (bpf_dynptr_check_off_len(dst, doff, size)) 3448 return -E2BIG; 3449 3450 for (off = 0; off < size; off += chunk_sz) { 3451 chunk_sz = min_t(u64, sizeof(buf), size - off); 3452 err = copy_fn(buf, unsafe_src + off, chunk_sz, tsk); 3453 if (err) 3454 return err; 3455 err = __bpf_dynptr_write(dst, doff + off, buf, chunk_sz, 0); 3456 if (err) 3457 return err; 3458 } 3459 return 0; 3460 } 3461 3462 static __always_inline int copy_user_data_nofault(void *dst, const void *unsafe_src, 3463 u32 size, struct task_struct *tsk) 3464 { 3465 return copy_from_user_nofault(dst, (const void __user *)unsafe_src, size); 3466 } 3467 3468 static __always_inline int copy_user_data_sleepable(void *dst, const void *unsafe_src, 3469 u32 size, struct task_struct *tsk) 3470 { 3471 int ret; 3472 3473 if (!tsk) { /* Read from the current task */ 3474 ret = copy_from_user(dst, (const void __user *)unsafe_src, size); 3475 if (ret) 3476 return -EFAULT; 3477 return 0; 3478 } 3479 3480 ret = access_process_vm(tsk, (unsigned long)unsafe_src, dst, size, 0); 3481 if (ret != size) 3482 return -EFAULT; 3483 return 0; 3484 } 3485 3486 static __always_inline int copy_kernel_data_nofault(void *dst, const void *unsafe_src, 3487 u32 size, struct task_struct *tsk) 3488 { 3489 return copy_from_kernel_nofault(dst, unsafe_src, size); 3490 } 3491 3492 static __always_inline int copy_user_str_nofault(void *dst, const void *unsafe_src, 3493 u32 size, struct task_struct *tsk) 3494 { 3495 return strncpy_from_user_nofault(dst, (const void __user *)unsafe_src, size); 3496 } 3497 3498 static __always_inline int copy_user_str_sleepable(void *dst, const void *unsafe_src, 3499 u32 size, struct task_struct *tsk) 3500 { 3501 int ret; 3502 3503 if (unlikely(size == 0)) 3504 return 0; 3505 3506 if (tsk) { 3507 ret = copy_remote_vm_str(tsk, (unsigned long)unsafe_src, dst, size, 0); 3508 } else { 3509 ret = strncpy_from_user(dst, (const void __user *)unsafe_src, size - 1); 3510 /* strncpy_from_user does not guarantee NUL termination */ 3511 if (ret >= 0) 3512 ((char *)dst)[ret] = '\0'; 3513 } 3514 3515 if (ret < 0) 3516 return ret; 3517 return ret + 1; 3518 } 3519 3520 static __always_inline int copy_kernel_str_nofault(void *dst, const void *unsafe_src, 3521 u32 size, struct task_struct *tsk) 3522 { 3523 return strncpy_from_kernel_nofault(dst, unsafe_src, size); 3524 } 3525 3526 __bpf_kfunc_start_defs(); 3527 3528 __bpf_kfunc int bpf_send_signal_task(struct task_struct *task, int sig, enum pid_type type, 3529 u64 value) 3530 { 3531 if (type != PIDTYPE_PID && type != PIDTYPE_TGID) 3532 return -EINVAL; 3533 3534 return bpf_send_signal_common(sig, type, task, value); 3535 } 3536 3537 __bpf_kfunc int bpf_probe_read_user_dynptr(struct bpf_dynptr *dptr, u64 off, 3538 u64 size, const void __user *unsafe_ptr__ign) 3539 { 3540 return __bpf_dynptr_copy(dptr, off, size, (const void __force *)unsafe_ptr__ign, 3541 copy_user_data_nofault, NULL); 3542 } 3543 3544 __bpf_kfunc int bpf_probe_read_kernel_dynptr(struct bpf_dynptr *dptr, u64 off, 3545 u64 size, const void *unsafe_ptr__ign) 3546 { 3547 return __bpf_dynptr_copy(dptr, off, size, unsafe_ptr__ign, 3548 copy_kernel_data_nofault, NULL); 3549 } 3550 3551 __bpf_kfunc int bpf_probe_read_user_str_dynptr(struct bpf_dynptr *dptr, u64 off, 3552 u64 size, const void __user *unsafe_ptr__ign) 3553 { 3554 return __bpf_dynptr_copy_str(dptr, off, size, (const void __force *)unsafe_ptr__ign, 3555 copy_user_str_nofault, NULL); 3556 } 3557 3558 __bpf_kfunc int bpf_probe_read_kernel_str_dynptr(struct bpf_dynptr *dptr, u64 off, 3559 u64 size, const void *unsafe_ptr__ign) 3560 { 3561 return __bpf_dynptr_copy_str(dptr, off, size, unsafe_ptr__ign, 3562 copy_kernel_str_nofault, NULL); 3563 } 3564 3565 __bpf_kfunc int bpf_copy_from_user_dynptr(struct bpf_dynptr *dptr, u64 off, 3566 u64 size, const void __user *unsafe_ptr__ign) 3567 { 3568 return __bpf_dynptr_copy(dptr, off, size, (const void __force *)unsafe_ptr__ign, 3569 copy_user_data_sleepable, NULL); 3570 } 3571 3572 __bpf_kfunc int bpf_copy_from_user_str_dynptr(struct bpf_dynptr *dptr, u64 off, 3573 u64 size, const void __user *unsafe_ptr__ign) 3574 { 3575 return __bpf_dynptr_copy_str(dptr, off, size, (const void __force *)unsafe_ptr__ign, 3576 copy_user_str_sleepable, NULL); 3577 } 3578 3579 __bpf_kfunc int bpf_copy_from_user_task_dynptr(struct bpf_dynptr *dptr, u64 off, 3580 u64 size, const void __user *unsafe_ptr__ign, 3581 struct task_struct *tsk) 3582 { 3583 return __bpf_dynptr_copy(dptr, off, size, (const void __force *)unsafe_ptr__ign, 3584 copy_user_data_sleepable, tsk); 3585 } 3586 3587 __bpf_kfunc int bpf_copy_from_user_task_str_dynptr(struct bpf_dynptr *dptr, u64 off, 3588 u64 size, const void __user *unsafe_ptr__ign, 3589 struct task_struct *tsk) 3590 { 3591 return __bpf_dynptr_copy_str(dptr, off, size, (const void __force *)unsafe_ptr__ign, 3592 copy_user_str_sleepable, tsk); 3593 } 3594 3595 __bpf_kfunc_end_defs(); 3596