1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2011-2015 PLUMgrid, http://plumgrid.com 3 * Copyright (c) 2016 Facebook 4 */ 5 #include <linux/kernel.h> 6 #include <linux/types.h> 7 #include <linux/slab.h> 8 #include <linux/bpf.h> 9 #include <linux/bpf_verifier.h> 10 #include <linux/bpf_perf_event.h> 11 #include <linux/btf.h> 12 #include <linux/filter.h> 13 #include <linux/uaccess.h> 14 #include <linux/ctype.h> 15 #include <linux/kprobes.h> 16 #include <linux/spinlock.h> 17 #include <linux/syscalls.h> 18 #include <linux/error-injection.h> 19 #include <linux/btf_ids.h> 20 #include <linux/bpf_lsm.h> 21 #include <linux/fprobe.h> 22 #include <linux/bsearch.h> 23 #include <linux/sort.h> 24 #include <linux/key.h> 25 #include <linux/verification.h> 26 #include <linux/namei.h> 27 #include <linux/fileattr.h> 28 29 #include <net/bpf_sk_storage.h> 30 31 #include <uapi/linux/bpf.h> 32 #include <uapi/linux/btf.h> 33 34 #include <asm/tlb.h> 35 36 #include "trace_probe.h" 37 #include "trace.h" 38 39 #define CREATE_TRACE_POINTS 40 #include "bpf_trace.h" 41 42 #define bpf_event_rcu_dereference(p) \ 43 rcu_dereference_protected(p, lockdep_is_held(&bpf_event_mutex)) 44 45 #define MAX_UPROBE_MULTI_CNT (1U << 20) 46 #define MAX_KPROBE_MULTI_CNT (1U << 20) 47 48 #ifdef CONFIG_MODULES 49 struct bpf_trace_module { 50 struct module *module; 51 struct list_head list; 52 }; 53 54 static LIST_HEAD(bpf_trace_modules); 55 static DEFINE_MUTEX(bpf_module_mutex); 56 57 static struct bpf_raw_event_map *bpf_get_raw_tracepoint_module(const char *name) 58 { 59 struct bpf_raw_event_map *btp, *ret = NULL; 60 struct bpf_trace_module *btm; 61 unsigned int i; 62 63 mutex_lock(&bpf_module_mutex); 64 list_for_each_entry(btm, &bpf_trace_modules, list) { 65 for (i = 0; i < btm->module->num_bpf_raw_events; ++i) { 66 btp = &btm->module->bpf_raw_events[i]; 67 if (!strcmp(btp->tp->name, name)) { 68 if (try_module_get(btm->module)) 69 ret = btp; 70 goto out; 71 } 72 } 73 } 74 out: 75 mutex_unlock(&bpf_module_mutex); 76 return ret; 77 } 78 #else 79 static struct bpf_raw_event_map *bpf_get_raw_tracepoint_module(const char *name) 80 { 81 return NULL; 82 } 83 #endif /* CONFIG_MODULES */ 84 85 u64 bpf_get_stackid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); 86 u64 bpf_get_stack(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); 87 88 static int bpf_btf_printf_prepare(struct btf_ptr *ptr, u32 btf_ptr_size, 89 u64 flags, const struct btf **btf, 90 s32 *btf_id); 91 static u64 bpf_kprobe_multi_cookie(struct bpf_run_ctx *ctx); 92 static u64 bpf_kprobe_multi_entry_ip(struct bpf_run_ctx *ctx); 93 94 static u64 bpf_uprobe_multi_cookie(struct bpf_run_ctx *ctx); 95 static u64 bpf_uprobe_multi_entry_ip(struct bpf_run_ctx *ctx); 96 97 /** 98 * trace_call_bpf - invoke BPF program 99 * @call: tracepoint event 100 * @ctx: opaque context pointer 101 * 102 * kprobe handlers execute BPF programs via this helper. 103 * Can be used from static tracepoints in the future. 104 * 105 * Return: BPF programs always return an integer which is interpreted by 106 * kprobe handler as: 107 * 0 - return from kprobe (event is filtered out) 108 * 1 - store kprobe event into ring buffer 109 * Other values are reserved and currently alias to 1 110 */ 111 unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx) 112 { 113 unsigned int ret; 114 115 cant_sleep(); 116 117 if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) { 118 /* 119 * since some bpf program is already running on this cpu, 120 * don't call into another bpf program (same or different) 121 * and don't send kprobe event into ring-buffer, 122 * so return zero here 123 */ 124 rcu_read_lock(); 125 bpf_prog_inc_misses_counters(rcu_dereference(call->prog_array)); 126 rcu_read_unlock(); 127 ret = 0; 128 goto out; 129 } 130 131 /* 132 * Instead of moving rcu_read_lock/rcu_dereference/rcu_read_unlock 133 * to all call sites, we did a bpf_prog_array_valid() there to check 134 * whether call->prog_array is empty or not, which is 135 * a heuristic to speed up execution. 136 * 137 * If bpf_prog_array_valid() fetched prog_array was 138 * non-NULL, we go into trace_call_bpf() and do the actual 139 * proper rcu_dereference() under RCU lock. 140 * If it turns out that prog_array is NULL then, we bail out. 141 * For the opposite, if the bpf_prog_array_valid() fetched pointer 142 * was NULL, you'll skip the prog_array with the risk of missing 143 * out of events when it was updated in between this and the 144 * rcu_dereference() which is accepted risk. 145 */ 146 rcu_read_lock(); 147 ret = bpf_prog_run_array(rcu_dereference(call->prog_array), 148 ctx, bpf_prog_run); 149 rcu_read_unlock(); 150 151 out: 152 __this_cpu_dec(bpf_prog_active); 153 154 return ret; 155 } 156 157 #ifdef CONFIG_BPF_KPROBE_OVERRIDE 158 BPF_CALL_2(bpf_override_return, struct pt_regs *, regs, unsigned long, rc) 159 { 160 regs_set_return_value(regs, rc); 161 override_function_with_return(regs); 162 return 0; 163 } 164 165 static const struct bpf_func_proto bpf_override_return_proto = { 166 .func = bpf_override_return, 167 .gpl_only = true, 168 .ret_type = RET_INTEGER, 169 .arg1_type = ARG_PTR_TO_CTX, 170 .arg2_type = ARG_ANYTHING, 171 }; 172 #endif 173 174 static __always_inline int 175 bpf_probe_read_user_common(void *dst, u32 size, const void __user *unsafe_ptr) 176 { 177 int ret; 178 179 ret = copy_from_user_nofault(dst, unsafe_ptr, size); 180 if (unlikely(ret < 0)) 181 memset(dst, 0, size); 182 return ret; 183 } 184 185 BPF_CALL_3(bpf_probe_read_user, void *, dst, u32, size, 186 const void __user *, unsafe_ptr) 187 { 188 return bpf_probe_read_user_common(dst, size, unsafe_ptr); 189 } 190 191 const struct bpf_func_proto bpf_probe_read_user_proto = { 192 .func = bpf_probe_read_user, 193 .gpl_only = true, 194 .ret_type = RET_INTEGER, 195 .arg1_type = ARG_PTR_TO_UNINIT_MEM, 196 .arg2_type = ARG_CONST_SIZE_OR_ZERO, 197 .arg3_type = ARG_ANYTHING, 198 }; 199 200 static __always_inline int 201 bpf_probe_read_user_str_common(void *dst, u32 size, 202 const void __user *unsafe_ptr) 203 { 204 int ret; 205 206 /* 207 * NB: We rely on strncpy_from_user() not copying junk past the NUL 208 * terminator into `dst`. 209 * 210 * strncpy_from_user() does long-sized strides in the fast path. If the 211 * strncpy does not mask out the bytes after the NUL in `unsafe_ptr`, 212 * then there could be junk after the NUL in `dst`. If user takes `dst` 213 * and keys a hash map with it, then semantically identical strings can 214 * occupy multiple entries in the map. 215 */ 216 ret = strncpy_from_user_nofault(dst, unsafe_ptr, size); 217 if (unlikely(ret < 0)) 218 memset(dst, 0, size); 219 return ret; 220 } 221 222 BPF_CALL_3(bpf_probe_read_user_str, void *, dst, u32, size, 223 const void __user *, unsafe_ptr) 224 { 225 return bpf_probe_read_user_str_common(dst, size, unsafe_ptr); 226 } 227 228 const struct bpf_func_proto bpf_probe_read_user_str_proto = { 229 .func = bpf_probe_read_user_str, 230 .gpl_only = true, 231 .ret_type = RET_INTEGER, 232 .arg1_type = ARG_PTR_TO_UNINIT_MEM, 233 .arg2_type = ARG_CONST_SIZE_OR_ZERO, 234 .arg3_type = ARG_ANYTHING, 235 }; 236 237 BPF_CALL_3(bpf_probe_read_kernel, void *, dst, u32, size, 238 const void *, unsafe_ptr) 239 { 240 return bpf_probe_read_kernel_common(dst, size, unsafe_ptr); 241 } 242 243 const struct bpf_func_proto bpf_probe_read_kernel_proto = { 244 .func = bpf_probe_read_kernel, 245 .gpl_only = true, 246 .ret_type = RET_INTEGER, 247 .arg1_type = ARG_PTR_TO_UNINIT_MEM, 248 .arg2_type = ARG_CONST_SIZE_OR_ZERO, 249 .arg3_type = ARG_ANYTHING, 250 }; 251 252 static __always_inline int 253 bpf_probe_read_kernel_str_common(void *dst, u32 size, const void *unsafe_ptr) 254 { 255 int ret; 256 257 /* 258 * The strncpy_from_kernel_nofault() call will likely not fill the 259 * entire buffer, but that's okay in this circumstance as we're probing 260 * arbitrary memory anyway similar to bpf_probe_read_*() and might 261 * as well probe the stack. Thus, memory is explicitly cleared 262 * only in error case, so that improper users ignoring return 263 * code altogether don't copy garbage; otherwise length of string 264 * is returned that can be used for bpf_perf_event_output() et al. 265 */ 266 ret = strncpy_from_kernel_nofault(dst, unsafe_ptr, size); 267 if (unlikely(ret < 0)) 268 memset(dst, 0, size); 269 return ret; 270 } 271 272 BPF_CALL_3(bpf_probe_read_kernel_str, void *, dst, u32, size, 273 const void *, unsafe_ptr) 274 { 275 return bpf_probe_read_kernel_str_common(dst, size, unsafe_ptr); 276 } 277 278 const struct bpf_func_proto bpf_probe_read_kernel_str_proto = { 279 .func = bpf_probe_read_kernel_str, 280 .gpl_only = true, 281 .ret_type = RET_INTEGER, 282 .arg1_type = ARG_PTR_TO_UNINIT_MEM, 283 .arg2_type = ARG_CONST_SIZE_OR_ZERO, 284 .arg3_type = ARG_ANYTHING, 285 }; 286 287 #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE 288 BPF_CALL_3(bpf_probe_read_compat, void *, dst, u32, size, 289 const void *, unsafe_ptr) 290 { 291 if ((unsigned long)unsafe_ptr < TASK_SIZE) { 292 return bpf_probe_read_user_common(dst, size, 293 (__force void __user *)unsafe_ptr); 294 } 295 return bpf_probe_read_kernel_common(dst, size, unsafe_ptr); 296 } 297 298 static const struct bpf_func_proto bpf_probe_read_compat_proto = { 299 .func = bpf_probe_read_compat, 300 .gpl_only = true, 301 .ret_type = RET_INTEGER, 302 .arg1_type = ARG_PTR_TO_UNINIT_MEM, 303 .arg2_type = ARG_CONST_SIZE_OR_ZERO, 304 .arg3_type = ARG_ANYTHING, 305 }; 306 307 BPF_CALL_3(bpf_probe_read_compat_str, void *, dst, u32, size, 308 const void *, unsafe_ptr) 309 { 310 if ((unsigned long)unsafe_ptr < TASK_SIZE) { 311 return bpf_probe_read_user_str_common(dst, size, 312 (__force void __user *)unsafe_ptr); 313 } 314 return bpf_probe_read_kernel_str_common(dst, size, unsafe_ptr); 315 } 316 317 static const struct bpf_func_proto bpf_probe_read_compat_str_proto = { 318 .func = bpf_probe_read_compat_str, 319 .gpl_only = true, 320 .ret_type = RET_INTEGER, 321 .arg1_type = ARG_PTR_TO_UNINIT_MEM, 322 .arg2_type = ARG_CONST_SIZE_OR_ZERO, 323 .arg3_type = ARG_ANYTHING, 324 }; 325 #endif /* CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE */ 326 327 BPF_CALL_3(bpf_probe_write_user, void __user *, unsafe_ptr, const void *, src, 328 u32, size) 329 { 330 /* 331 * Ensure we're in user context which is safe for the helper to 332 * run. This helper has no business in a kthread. 333 * 334 * access_ok() should prevent writing to non-user memory, but in 335 * some situations (nommu, temporary switch, etc) access_ok() does 336 * not provide enough validation, hence the check on KERNEL_DS. 337 * 338 * nmi_uaccess_okay() ensures the probe is not run in an interim 339 * state, when the task or mm are switched. This is specifically 340 * required to prevent the use of temporary mm. 341 */ 342 343 if (unlikely(in_interrupt() || 344 current->flags & (PF_KTHREAD | PF_EXITING))) 345 return -EPERM; 346 if (unlikely(!nmi_uaccess_okay())) 347 return -EPERM; 348 349 return copy_to_user_nofault(unsafe_ptr, src, size); 350 } 351 352 static const struct bpf_func_proto bpf_probe_write_user_proto = { 353 .func = bpf_probe_write_user, 354 .gpl_only = true, 355 .ret_type = RET_INTEGER, 356 .arg1_type = ARG_ANYTHING, 357 .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, 358 .arg3_type = ARG_CONST_SIZE, 359 }; 360 361 static const struct bpf_func_proto *bpf_get_probe_write_proto(void) 362 { 363 if (!capable(CAP_SYS_ADMIN)) 364 return NULL; 365 366 pr_warn_ratelimited("%s[%d] is installing a program with bpf_probe_write_user helper that may corrupt user memory!", 367 current->comm, task_pid_nr(current)); 368 369 return &bpf_probe_write_user_proto; 370 } 371 372 #define MAX_TRACE_PRINTK_VARARGS 3 373 #define BPF_TRACE_PRINTK_SIZE 1024 374 375 BPF_CALL_5(bpf_trace_printk, char *, fmt, u32, fmt_size, u64, arg1, 376 u64, arg2, u64, arg3) 377 { 378 u64 args[MAX_TRACE_PRINTK_VARARGS] = { arg1, arg2, arg3 }; 379 struct bpf_bprintf_data data = { 380 .get_bin_args = true, 381 .get_buf = true, 382 }; 383 int ret; 384 385 ret = bpf_bprintf_prepare(fmt, fmt_size, args, 386 MAX_TRACE_PRINTK_VARARGS, &data); 387 if (ret < 0) 388 return ret; 389 390 ret = bstr_printf(data.buf, MAX_BPRINTF_BUF, fmt, data.bin_args); 391 392 trace_bpf_trace_printk(data.buf); 393 394 bpf_bprintf_cleanup(&data); 395 396 return ret; 397 } 398 399 static const struct bpf_func_proto bpf_trace_printk_proto = { 400 .func = bpf_trace_printk, 401 .gpl_only = true, 402 .ret_type = RET_INTEGER, 403 .arg1_type = ARG_PTR_TO_MEM | MEM_RDONLY, 404 .arg2_type = ARG_CONST_SIZE, 405 }; 406 407 static void __set_printk_clr_event(void) 408 { 409 /* 410 * This program might be calling bpf_trace_printk, 411 * so enable the associated bpf_trace/bpf_trace_printk event. 412 * Repeat this each time as it is possible a user has 413 * disabled bpf_trace_printk events. By loading a program 414 * calling bpf_trace_printk() however the user has expressed 415 * the intent to see such events. 416 */ 417 if (trace_set_clr_event("bpf_trace", "bpf_trace_printk", 1)) 418 pr_warn_ratelimited("could not enable bpf_trace_printk events"); 419 } 420 421 const struct bpf_func_proto *bpf_get_trace_printk_proto(void) 422 { 423 __set_printk_clr_event(); 424 return &bpf_trace_printk_proto; 425 } 426 427 BPF_CALL_4(bpf_trace_vprintk, char *, fmt, u32, fmt_size, const void *, args, 428 u32, data_len) 429 { 430 struct bpf_bprintf_data data = { 431 .get_bin_args = true, 432 .get_buf = true, 433 }; 434 int ret, num_args; 435 436 if (data_len & 7 || data_len > MAX_BPRINTF_VARARGS * 8 || 437 (data_len && !args)) 438 return -EINVAL; 439 num_args = data_len / 8; 440 441 ret = bpf_bprintf_prepare(fmt, fmt_size, args, num_args, &data); 442 if (ret < 0) 443 return ret; 444 445 ret = bstr_printf(data.buf, MAX_BPRINTF_BUF, fmt, data.bin_args); 446 447 trace_bpf_trace_printk(data.buf); 448 449 bpf_bprintf_cleanup(&data); 450 451 return ret; 452 } 453 454 static const struct bpf_func_proto bpf_trace_vprintk_proto = { 455 .func = bpf_trace_vprintk, 456 .gpl_only = true, 457 .ret_type = RET_INTEGER, 458 .arg1_type = ARG_PTR_TO_MEM | MEM_RDONLY, 459 .arg2_type = ARG_CONST_SIZE, 460 .arg3_type = ARG_PTR_TO_MEM | PTR_MAYBE_NULL | MEM_RDONLY, 461 .arg4_type = ARG_CONST_SIZE_OR_ZERO, 462 }; 463 464 const struct bpf_func_proto *bpf_get_trace_vprintk_proto(void) 465 { 466 __set_printk_clr_event(); 467 return &bpf_trace_vprintk_proto; 468 } 469 470 BPF_CALL_5(bpf_seq_printf, struct seq_file *, m, char *, fmt, u32, fmt_size, 471 const void *, args, u32, data_len) 472 { 473 struct bpf_bprintf_data data = { 474 .get_bin_args = true, 475 }; 476 int err, num_args; 477 478 if (data_len & 7 || data_len > MAX_BPRINTF_VARARGS * 8 || 479 (data_len && !args)) 480 return -EINVAL; 481 num_args = data_len / 8; 482 483 err = bpf_bprintf_prepare(fmt, fmt_size, args, num_args, &data); 484 if (err < 0) 485 return err; 486 487 seq_bprintf(m, fmt, data.bin_args); 488 489 bpf_bprintf_cleanup(&data); 490 491 return seq_has_overflowed(m) ? -EOVERFLOW : 0; 492 } 493 494 BTF_ID_LIST_SINGLE(btf_seq_file_ids, struct, seq_file) 495 496 static const struct bpf_func_proto bpf_seq_printf_proto = { 497 .func = bpf_seq_printf, 498 .gpl_only = true, 499 .ret_type = RET_INTEGER, 500 .arg1_type = ARG_PTR_TO_BTF_ID, 501 .arg1_btf_id = &btf_seq_file_ids[0], 502 .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, 503 .arg3_type = ARG_CONST_SIZE, 504 .arg4_type = ARG_PTR_TO_MEM | PTR_MAYBE_NULL | MEM_RDONLY, 505 .arg5_type = ARG_CONST_SIZE_OR_ZERO, 506 }; 507 508 BPF_CALL_3(bpf_seq_write, struct seq_file *, m, const void *, data, u32, len) 509 { 510 return seq_write(m, data, len) ? -EOVERFLOW : 0; 511 } 512 513 static const struct bpf_func_proto bpf_seq_write_proto = { 514 .func = bpf_seq_write, 515 .gpl_only = true, 516 .ret_type = RET_INTEGER, 517 .arg1_type = ARG_PTR_TO_BTF_ID, 518 .arg1_btf_id = &btf_seq_file_ids[0], 519 .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, 520 .arg3_type = ARG_CONST_SIZE_OR_ZERO, 521 }; 522 523 BPF_CALL_4(bpf_seq_printf_btf, struct seq_file *, m, struct btf_ptr *, ptr, 524 u32, btf_ptr_size, u64, flags) 525 { 526 const struct btf *btf; 527 s32 btf_id; 528 int ret; 529 530 ret = bpf_btf_printf_prepare(ptr, btf_ptr_size, flags, &btf, &btf_id); 531 if (ret) 532 return ret; 533 534 return btf_type_seq_show_flags(btf, btf_id, ptr->ptr, m, flags); 535 } 536 537 static const struct bpf_func_proto bpf_seq_printf_btf_proto = { 538 .func = bpf_seq_printf_btf, 539 .gpl_only = true, 540 .ret_type = RET_INTEGER, 541 .arg1_type = ARG_PTR_TO_BTF_ID, 542 .arg1_btf_id = &btf_seq_file_ids[0], 543 .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, 544 .arg3_type = ARG_CONST_SIZE_OR_ZERO, 545 .arg4_type = ARG_ANYTHING, 546 }; 547 548 static __always_inline int 549 get_map_perf_counter(struct bpf_map *map, u64 flags, 550 u64 *value, u64 *enabled, u64 *running) 551 { 552 struct bpf_array *array = container_of(map, struct bpf_array, map); 553 unsigned int cpu = smp_processor_id(); 554 u64 index = flags & BPF_F_INDEX_MASK; 555 struct bpf_event_entry *ee; 556 557 if (unlikely(flags & ~(BPF_F_INDEX_MASK))) 558 return -EINVAL; 559 if (index == BPF_F_CURRENT_CPU) 560 index = cpu; 561 if (unlikely(index >= array->map.max_entries)) 562 return -E2BIG; 563 564 ee = READ_ONCE(array->ptrs[index]); 565 if (!ee) 566 return -ENOENT; 567 568 return perf_event_read_local(ee->event, value, enabled, running); 569 } 570 571 BPF_CALL_2(bpf_perf_event_read, struct bpf_map *, map, u64, flags) 572 { 573 u64 value = 0; 574 int err; 575 576 err = get_map_perf_counter(map, flags, &value, NULL, NULL); 577 /* 578 * this api is ugly since we miss [-22..-2] range of valid 579 * counter values, but that's uapi 580 */ 581 if (err) 582 return err; 583 return value; 584 } 585 586 static const struct bpf_func_proto bpf_perf_event_read_proto = { 587 .func = bpf_perf_event_read, 588 .gpl_only = true, 589 .ret_type = RET_INTEGER, 590 .arg1_type = ARG_CONST_MAP_PTR, 591 .arg2_type = ARG_ANYTHING, 592 }; 593 594 BPF_CALL_4(bpf_perf_event_read_value, struct bpf_map *, map, u64, flags, 595 struct bpf_perf_event_value *, buf, u32, size) 596 { 597 int err = -EINVAL; 598 599 if (unlikely(size != sizeof(struct bpf_perf_event_value))) 600 goto clear; 601 err = get_map_perf_counter(map, flags, &buf->counter, &buf->enabled, 602 &buf->running); 603 if (unlikely(err)) 604 goto clear; 605 return 0; 606 clear: 607 memset(buf, 0, size); 608 return err; 609 } 610 611 static const struct bpf_func_proto bpf_perf_event_read_value_proto = { 612 .func = bpf_perf_event_read_value, 613 .gpl_only = true, 614 .ret_type = RET_INTEGER, 615 .arg1_type = ARG_CONST_MAP_PTR, 616 .arg2_type = ARG_ANYTHING, 617 .arg3_type = ARG_PTR_TO_UNINIT_MEM, 618 .arg4_type = ARG_CONST_SIZE, 619 }; 620 621 static __always_inline u64 622 __bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map, 623 u64 flags, struct perf_sample_data *sd) 624 { 625 struct bpf_array *array = container_of(map, struct bpf_array, map); 626 unsigned int cpu = smp_processor_id(); 627 u64 index = flags & BPF_F_INDEX_MASK; 628 struct bpf_event_entry *ee; 629 struct perf_event *event; 630 631 if (index == BPF_F_CURRENT_CPU) 632 index = cpu; 633 if (unlikely(index >= array->map.max_entries)) 634 return -E2BIG; 635 636 ee = READ_ONCE(array->ptrs[index]); 637 if (!ee) 638 return -ENOENT; 639 640 event = ee->event; 641 if (unlikely(event->attr.type != PERF_TYPE_SOFTWARE || 642 event->attr.config != PERF_COUNT_SW_BPF_OUTPUT)) 643 return -EINVAL; 644 645 if (unlikely(event->oncpu != cpu)) 646 return -EOPNOTSUPP; 647 648 return perf_event_output(event, sd, regs); 649 } 650 651 /* 652 * Support executing tracepoints in normal, irq, and nmi context that each call 653 * bpf_perf_event_output 654 */ 655 struct bpf_trace_sample_data { 656 struct perf_sample_data sds[3]; 657 }; 658 659 static DEFINE_PER_CPU(struct bpf_trace_sample_data, bpf_trace_sds); 660 static DEFINE_PER_CPU(int, bpf_trace_nest_level); 661 BPF_CALL_5(bpf_perf_event_output, struct pt_regs *, regs, struct bpf_map *, map, 662 u64, flags, void *, data, u64, size) 663 { 664 struct bpf_trace_sample_data *sds; 665 struct perf_raw_record raw = { 666 .frag = { 667 .size = size, 668 .data = data, 669 }, 670 }; 671 struct perf_sample_data *sd; 672 int nest_level, err; 673 674 preempt_disable(); 675 sds = this_cpu_ptr(&bpf_trace_sds); 676 nest_level = this_cpu_inc_return(bpf_trace_nest_level); 677 678 if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(sds->sds))) { 679 err = -EBUSY; 680 goto out; 681 } 682 683 sd = &sds->sds[nest_level - 1]; 684 685 if (unlikely(flags & ~(BPF_F_INDEX_MASK))) { 686 err = -EINVAL; 687 goto out; 688 } 689 690 perf_sample_data_init(sd, 0, 0); 691 perf_sample_save_raw_data(sd, &raw); 692 693 err = __bpf_perf_event_output(regs, map, flags, sd); 694 out: 695 this_cpu_dec(bpf_trace_nest_level); 696 preempt_enable(); 697 return err; 698 } 699 700 static const struct bpf_func_proto bpf_perf_event_output_proto = { 701 .func = bpf_perf_event_output, 702 .gpl_only = true, 703 .ret_type = RET_INTEGER, 704 .arg1_type = ARG_PTR_TO_CTX, 705 .arg2_type = ARG_CONST_MAP_PTR, 706 .arg3_type = ARG_ANYTHING, 707 .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY, 708 .arg5_type = ARG_CONST_SIZE_OR_ZERO, 709 }; 710 711 static DEFINE_PER_CPU(int, bpf_event_output_nest_level); 712 struct bpf_nested_pt_regs { 713 struct pt_regs regs[3]; 714 }; 715 static DEFINE_PER_CPU(struct bpf_nested_pt_regs, bpf_pt_regs); 716 static DEFINE_PER_CPU(struct bpf_trace_sample_data, bpf_misc_sds); 717 718 u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size, 719 void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy) 720 { 721 struct perf_raw_frag frag = { 722 .copy = ctx_copy, 723 .size = ctx_size, 724 .data = ctx, 725 }; 726 struct perf_raw_record raw = { 727 .frag = { 728 { 729 .next = ctx_size ? &frag : NULL, 730 }, 731 .size = meta_size, 732 .data = meta, 733 }, 734 }; 735 struct perf_sample_data *sd; 736 struct pt_regs *regs; 737 int nest_level; 738 u64 ret; 739 740 preempt_disable(); 741 nest_level = this_cpu_inc_return(bpf_event_output_nest_level); 742 743 if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(bpf_misc_sds.sds))) { 744 ret = -EBUSY; 745 goto out; 746 } 747 sd = this_cpu_ptr(&bpf_misc_sds.sds[nest_level - 1]); 748 regs = this_cpu_ptr(&bpf_pt_regs.regs[nest_level - 1]); 749 750 perf_fetch_caller_regs(regs); 751 perf_sample_data_init(sd, 0, 0); 752 perf_sample_save_raw_data(sd, &raw); 753 754 ret = __bpf_perf_event_output(regs, map, flags, sd); 755 out: 756 this_cpu_dec(bpf_event_output_nest_level); 757 preempt_enable(); 758 return ret; 759 } 760 761 BPF_CALL_0(bpf_get_current_task) 762 { 763 return (long) current; 764 } 765 766 const struct bpf_func_proto bpf_get_current_task_proto = { 767 .func = bpf_get_current_task, 768 .gpl_only = true, 769 .ret_type = RET_INTEGER, 770 }; 771 772 BPF_CALL_0(bpf_get_current_task_btf) 773 { 774 return (unsigned long) current; 775 } 776 777 const struct bpf_func_proto bpf_get_current_task_btf_proto = { 778 .func = bpf_get_current_task_btf, 779 .gpl_only = true, 780 .ret_type = RET_PTR_TO_BTF_ID_TRUSTED, 781 .ret_btf_id = &btf_tracing_ids[BTF_TRACING_TYPE_TASK], 782 }; 783 784 BPF_CALL_1(bpf_task_pt_regs, struct task_struct *, task) 785 { 786 return (unsigned long) task_pt_regs(task); 787 } 788 789 BTF_ID_LIST(bpf_task_pt_regs_ids) 790 BTF_ID(struct, pt_regs) 791 792 const struct bpf_func_proto bpf_task_pt_regs_proto = { 793 .func = bpf_task_pt_regs, 794 .gpl_only = true, 795 .arg1_type = ARG_PTR_TO_BTF_ID, 796 .arg1_btf_id = &btf_tracing_ids[BTF_TRACING_TYPE_TASK], 797 .ret_type = RET_PTR_TO_BTF_ID, 798 .ret_btf_id = &bpf_task_pt_regs_ids[0], 799 }; 800 801 BPF_CALL_2(bpf_current_task_under_cgroup, struct bpf_map *, map, u32, idx) 802 { 803 struct bpf_array *array = container_of(map, struct bpf_array, map); 804 struct cgroup *cgrp; 805 806 if (unlikely(idx >= array->map.max_entries)) 807 return -E2BIG; 808 809 cgrp = READ_ONCE(array->ptrs[idx]); 810 if (unlikely(!cgrp)) 811 return -EAGAIN; 812 813 return task_under_cgroup_hierarchy(current, cgrp); 814 } 815 816 static const struct bpf_func_proto bpf_current_task_under_cgroup_proto = { 817 .func = bpf_current_task_under_cgroup, 818 .gpl_only = false, 819 .ret_type = RET_INTEGER, 820 .arg1_type = ARG_CONST_MAP_PTR, 821 .arg2_type = ARG_ANYTHING, 822 }; 823 824 struct send_signal_irq_work { 825 struct irq_work irq_work; 826 struct task_struct *task; 827 u32 sig; 828 enum pid_type type; 829 }; 830 831 static DEFINE_PER_CPU(struct send_signal_irq_work, send_signal_work); 832 833 static void do_bpf_send_signal(struct irq_work *entry) 834 { 835 struct send_signal_irq_work *work; 836 837 work = container_of(entry, struct send_signal_irq_work, irq_work); 838 group_send_sig_info(work->sig, SEND_SIG_PRIV, work->task, work->type); 839 put_task_struct(work->task); 840 } 841 842 static int bpf_send_signal_common(u32 sig, enum pid_type type) 843 { 844 struct send_signal_irq_work *work = NULL; 845 846 /* Similar to bpf_probe_write_user, task needs to be 847 * in a sound condition and kernel memory access be 848 * permitted in order to send signal to the current 849 * task. 850 */ 851 if (unlikely(current->flags & (PF_KTHREAD | PF_EXITING))) 852 return -EPERM; 853 if (unlikely(!nmi_uaccess_okay())) 854 return -EPERM; 855 /* Task should not be pid=1 to avoid kernel panic. */ 856 if (unlikely(is_global_init(current))) 857 return -EPERM; 858 859 if (irqs_disabled()) { 860 /* Do an early check on signal validity. Otherwise, 861 * the error is lost in deferred irq_work. 862 */ 863 if (unlikely(!valid_signal(sig))) 864 return -EINVAL; 865 866 work = this_cpu_ptr(&send_signal_work); 867 if (irq_work_is_busy(&work->irq_work)) 868 return -EBUSY; 869 870 /* Add the current task, which is the target of sending signal, 871 * to the irq_work. The current task may change when queued 872 * irq works get executed. 873 */ 874 work->task = get_task_struct(current); 875 work->sig = sig; 876 work->type = type; 877 irq_work_queue(&work->irq_work); 878 return 0; 879 } 880 881 return group_send_sig_info(sig, SEND_SIG_PRIV, current, type); 882 } 883 884 BPF_CALL_1(bpf_send_signal, u32, sig) 885 { 886 return bpf_send_signal_common(sig, PIDTYPE_TGID); 887 } 888 889 static const struct bpf_func_proto bpf_send_signal_proto = { 890 .func = bpf_send_signal, 891 .gpl_only = false, 892 .ret_type = RET_INTEGER, 893 .arg1_type = ARG_ANYTHING, 894 }; 895 896 BPF_CALL_1(bpf_send_signal_thread, u32, sig) 897 { 898 return bpf_send_signal_common(sig, PIDTYPE_PID); 899 } 900 901 static const struct bpf_func_proto bpf_send_signal_thread_proto = { 902 .func = bpf_send_signal_thread, 903 .gpl_only = false, 904 .ret_type = RET_INTEGER, 905 .arg1_type = ARG_ANYTHING, 906 }; 907 908 BPF_CALL_3(bpf_d_path, struct path *, path, char *, buf, u32, sz) 909 { 910 struct path copy; 911 long len; 912 char *p; 913 914 if (!sz) 915 return 0; 916 917 /* 918 * The path pointer is verified as trusted and safe to use, 919 * but let's double check it's valid anyway to workaround 920 * potentially broken verifier. 921 */ 922 len = copy_from_kernel_nofault(©, path, sizeof(*path)); 923 if (len < 0) 924 return len; 925 926 p = d_path(©, buf, sz); 927 if (IS_ERR(p)) { 928 len = PTR_ERR(p); 929 } else { 930 len = buf + sz - p; 931 memmove(buf, p, len); 932 } 933 934 return len; 935 } 936 937 BTF_SET_START(btf_allowlist_d_path) 938 #ifdef CONFIG_SECURITY 939 BTF_ID(func, security_file_permission) 940 BTF_ID(func, security_inode_getattr) 941 BTF_ID(func, security_file_open) 942 #endif 943 #ifdef CONFIG_SECURITY_PATH 944 BTF_ID(func, security_path_truncate) 945 #endif 946 BTF_ID(func, vfs_truncate) 947 BTF_ID(func, vfs_fallocate) 948 BTF_ID(func, dentry_open) 949 BTF_ID(func, vfs_getattr) 950 BTF_ID(func, filp_close) 951 BTF_SET_END(btf_allowlist_d_path) 952 953 static bool bpf_d_path_allowed(const struct bpf_prog *prog) 954 { 955 if (prog->type == BPF_PROG_TYPE_TRACING && 956 prog->expected_attach_type == BPF_TRACE_ITER) 957 return true; 958 959 if (prog->type == BPF_PROG_TYPE_LSM) 960 return bpf_lsm_is_sleepable_hook(prog->aux->attach_btf_id); 961 962 return btf_id_set_contains(&btf_allowlist_d_path, 963 prog->aux->attach_btf_id); 964 } 965 966 BTF_ID_LIST_SINGLE(bpf_d_path_btf_ids, struct, path) 967 968 static const struct bpf_func_proto bpf_d_path_proto = { 969 .func = bpf_d_path, 970 .gpl_only = false, 971 .ret_type = RET_INTEGER, 972 .arg1_type = ARG_PTR_TO_BTF_ID, 973 .arg1_btf_id = &bpf_d_path_btf_ids[0], 974 .arg2_type = ARG_PTR_TO_MEM, 975 .arg3_type = ARG_CONST_SIZE_OR_ZERO, 976 .allowed = bpf_d_path_allowed, 977 }; 978 979 #define BTF_F_ALL (BTF_F_COMPACT | BTF_F_NONAME | \ 980 BTF_F_PTR_RAW | BTF_F_ZERO) 981 982 static int bpf_btf_printf_prepare(struct btf_ptr *ptr, u32 btf_ptr_size, 983 u64 flags, const struct btf **btf, 984 s32 *btf_id) 985 { 986 const struct btf_type *t; 987 988 if (unlikely(flags & ~(BTF_F_ALL))) 989 return -EINVAL; 990 991 if (btf_ptr_size != sizeof(struct btf_ptr)) 992 return -EINVAL; 993 994 *btf = bpf_get_btf_vmlinux(); 995 996 if (IS_ERR_OR_NULL(*btf)) 997 return IS_ERR(*btf) ? PTR_ERR(*btf) : -EINVAL; 998 999 if (ptr->type_id > 0) 1000 *btf_id = ptr->type_id; 1001 else 1002 return -EINVAL; 1003 1004 if (*btf_id > 0) 1005 t = btf_type_by_id(*btf, *btf_id); 1006 if (*btf_id <= 0 || !t) 1007 return -ENOENT; 1008 1009 return 0; 1010 } 1011 1012 BPF_CALL_5(bpf_snprintf_btf, char *, str, u32, str_size, struct btf_ptr *, ptr, 1013 u32, btf_ptr_size, u64, flags) 1014 { 1015 const struct btf *btf; 1016 s32 btf_id; 1017 int ret; 1018 1019 ret = bpf_btf_printf_prepare(ptr, btf_ptr_size, flags, &btf, &btf_id); 1020 if (ret) 1021 return ret; 1022 1023 return btf_type_snprintf_show(btf, btf_id, ptr->ptr, str, str_size, 1024 flags); 1025 } 1026 1027 const struct bpf_func_proto bpf_snprintf_btf_proto = { 1028 .func = bpf_snprintf_btf, 1029 .gpl_only = false, 1030 .ret_type = RET_INTEGER, 1031 .arg1_type = ARG_PTR_TO_MEM, 1032 .arg2_type = ARG_CONST_SIZE, 1033 .arg3_type = ARG_PTR_TO_MEM | MEM_RDONLY, 1034 .arg4_type = ARG_CONST_SIZE, 1035 .arg5_type = ARG_ANYTHING, 1036 }; 1037 1038 BPF_CALL_1(bpf_get_func_ip_tracing, void *, ctx) 1039 { 1040 /* This helper call is inlined by verifier. */ 1041 return ((u64 *)ctx)[-2]; 1042 } 1043 1044 static const struct bpf_func_proto bpf_get_func_ip_proto_tracing = { 1045 .func = bpf_get_func_ip_tracing, 1046 .gpl_only = true, 1047 .ret_type = RET_INTEGER, 1048 .arg1_type = ARG_PTR_TO_CTX, 1049 }; 1050 1051 #ifdef CONFIG_X86_KERNEL_IBT 1052 static unsigned long get_entry_ip(unsigned long fentry_ip) 1053 { 1054 u32 instr; 1055 1056 /* We want to be extra safe in case entry ip is on the page edge, 1057 * but otherwise we need to avoid get_kernel_nofault()'s overhead. 1058 */ 1059 if ((fentry_ip & ~PAGE_MASK) < ENDBR_INSN_SIZE) { 1060 if (get_kernel_nofault(instr, (u32 *)(fentry_ip - ENDBR_INSN_SIZE))) 1061 return fentry_ip; 1062 } else { 1063 instr = *(u32 *)(fentry_ip - ENDBR_INSN_SIZE); 1064 } 1065 if (is_endbr(instr)) 1066 fentry_ip -= ENDBR_INSN_SIZE; 1067 return fentry_ip; 1068 } 1069 #else 1070 #define get_entry_ip(fentry_ip) fentry_ip 1071 #endif 1072 1073 BPF_CALL_1(bpf_get_func_ip_kprobe, struct pt_regs *, regs) 1074 { 1075 struct bpf_trace_run_ctx *run_ctx __maybe_unused; 1076 struct kprobe *kp; 1077 1078 #ifdef CONFIG_UPROBES 1079 run_ctx = container_of(current->bpf_ctx, struct bpf_trace_run_ctx, run_ctx); 1080 if (run_ctx->is_uprobe) 1081 return ((struct uprobe_dispatch_data *)current->utask->vaddr)->bp_addr; 1082 #endif 1083 1084 kp = kprobe_running(); 1085 1086 if (!kp || !(kp->flags & KPROBE_FLAG_ON_FUNC_ENTRY)) 1087 return 0; 1088 1089 return get_entry_ip((uintptr_t)kp->addr); 1090 } 1091 1092 static const struct bpf_func_proto bpf_get_func_ip_proto_kprobe = { 1093 .func = bpf_get_func_ip_kprobe, 1094 .gpl_only = true, 1095 .ret_type = RET_INTEGER, 1096 .arg1_type = ARG_PTR_TO_CTX, 1097 }; 1098 1099 BPF_CALL_1(bpf_get_func_ip_kprobe_multi, struct pt_regs *, regs) 1100 { 1101 return bpf_kprobe_multi_entry_ip(current->bpf_ctx); 1102 } 1103 1104 static const struct bpf_func_proto bpf_get_func_ip_proto_kprobe_multi = { 1105 .func = bpf_get_func_ip_kprobe_multi, 1106 .gpl_only = false, 1107 .ret_type = RET_INTEGER, 1108 .arg1_type = ARG_PTR_TO_CTX, 1109 }; 1110 1111 BPF_CALL_1(bpf_get_attach_cookie_kprobe_multi, struct pt_regs *, regs) 1112 { 1113 return bpf_kprobe_multi_cookie(current->bpf_ctx); 1114 } 1115 1116 static const struct bpf_func_proto bpf_get_attach_cookie_proto_kmulti = { 1117 .func = bpf_get_attach_cookie_kprobe_multi, 1118 .gpl_only = false, 1119 .ret_type = RET_INTEGER, 1120 .arg1_type = ARG_PTR_TO_CTX, 1121 }; 1122 1123 BPF_CALL_1(bpf_get_func_ip_uprobe_multi, struct pt_regs *, regs) 1124 { 1125 return bpf_uprobe_multi_entry_ip(current->bpf_ctx); 1126 } 1127 1128 static const struct bpf_func_proto bpf_get_func_ip_proto_uprobe_multi = { 1129 .func = bpf_get_func_ip_uprobe_multi, 1130 .gpl_only = false, 1131 .ret_type = RET_INTEGER, 1132 .arg1_type = ARG_PTR_TO_CTX, 1133 }; 1134 1135 BPF_CALL_1(bpf_get_attach_cookie_uprobe_multi, struct pt_regs *, regs) 1136 { 1137 return bpf_uprobe_multi_cookie(current->bpf_ctx); 1138 } 1139 1140 static const struct bpf_func_proto bpf_get_attach_cookie_proto_umulti = { 1141 .func = bpf_get_attach_cookie_uprobe_multi, 1142 .gpl_only = false, 1143 .ret_type = RET_INTEGER, 1144 .arg1_type = ARG_PTR_TO_CTX, 1145 }; 1146 1147 BPF_CALL_1(bpf_get_attach_cookie_trace, void *, ctx) 1148 { 1149 struct bpf_trace_run_ctx *run_ctx; 1150 1151 run_ctx = container_of(current->bpf_ctx, struct bpf_trace_run_ctx, run_ctx); 1152 return run_ctx->bpf_cookie; 1153 } 1154 1155 static const struct bpf_func_proto bpf_get_attach_cookie_proto_trace = { 1156 .func = bpf_get_attach_cookie_trace, 1157 .gpl_only = false, 1158 .ret_type = RET_INTEGER, 1159 .arg1_type = ARG_PTR_TO_CTX, 1160 }; 1161 1162 BPF_CALL_1(bpf_get_attach_cookie_pe, struct bpf_perf_event_data_kern *, ctx) 1163 { 1164 return ctx->event->bpf_cookie; 1165 } 1166 1167 static const struct bpf_func_proto bpf_get_attach_cookie_proto_pe = { 1168 .func = bpf_get_attach_cookie_pe, 1169 .gpl_only = false, 1170 .ret_type = RET_INTEGER, 1171 .arg1_type = ARG_PTR_TO_CTX, 1172 }; 1173 1174 BPF_CALL_1(bpf_get_attach_cookie_tracing, void *, ctx) 1175 { 1176 struct bpf_trace_run_ctx *run_ctx; 1177 1178 run_ctx = container_of(current->bpf_ctx, struct bpf_trace_run_ctx, run_ctx); 1179 return run_ctx->bpf_cookie; 1180 } 1181 1182 static const struct bpf_func_proto bpf_get_attach_cookie_proto_tracing = { 1183 .func = bpf_get_attach_cookie_tracing, 1184 .gpl_only = false, 1185 .ret_type = RET_INTEGER, 1186 .arg1_type = ARG_PTR_TO_CTX, 1187 }; 1188 1189 BPF_CALL_3(bpf_get_branch_snapshot, void *, buf, u32, size, u64, flags) 1190 { 1191 #ifndef CONFIG_X86 1192 return -ENOENT; 1193 #else 1194 static const u32 br_entry_size = sizeof(struct perf_branch_entry); 1195 u32 entry_cnt = size / br_entry_size; 1196 1197 entry_cnt = static_call(perf_snapshot_branch_stack)(buf, entry_cnt); 1198 1199 if (unlikely(flags)) 1200 return -EINVAL; 1201 1202 if (!entry_cnt) 1203 return -ENOENT; 1204 1205 return entry_cnt * br_entry_size; 1206 #endif 1207 } 1208 1209 static const struct bpf_func_proto bpf_get_branch_snapshot_proto = { 1210 .func = bpf_get_branch_snapshot, 1211 .gpl_only = true, 1212 .ret_type = RET_INTEGER, 1213 .arg1_type = ARG_PTR_TO_UNINIT_MEM, 1214 .arg2_type = ARG_CONST_SIZE_OR_ZERO, 1215 }; 1216 1217 BPF_CALL_3(get_func_arg, void *, ctx, u32, n, u64 *, value) 1218 { 1219 /* This helper call is inlined by verifier. */ 1220 u64 nr_args = ((u64 *)ctx)[-1]; 1221 1222 if ((u64) n >= nr_args) 1223 return -EINVAL; 1224 *value = ((u64 *)ctx)[n]; 1225 return 0; 1226 } 1227 1228 static const struct bpf_func_proto bpf_get_func_arg_proto = { 1229 .func = get_func_arg, 1230 .ret_type = RET_INTEGER, 1231 .arg1_type = ARG_PTR_TO_CTX, 1232 .arg2_type = ARG_ANYTHING, 1233 .arg3_type = ARG_PTR_TO_LONG, 1234 }; 1235 1236 BPF_CALL_2(get_func_ret, void *, ctx, u64 *, value) 1237 { 1238 /* This helper call is inlined by verifier. */ 1239 u64 nr_args = ((u64 *)ctx)[-1]; 1240 1241 *value = ((u64 *)ctx)[nr_args]; 1242 return 0; 1243 } 1244 1245 static const struct bpf_func_proto bpf_get_func_ret_proto = { 1246 .func = get_func_ret, 1247 .ret_type = RET_INTEGER, 1248 .arg1_type = ARG_PTR_TO_CTX, 1249 .arg2_type = ARG_PTR_TO_LONG, 1250 }; 1251 1252 BPF_CALL_1(get_func_arg_cnt, void *, ctx) 1253 { 1254 /* This helper call is inlined by verifier. */ 1255 return ((u64 *)ctx)[-1]; 1256 } 1257 1258 static const struct bpf_func_proto bpf_get_func_arg_cnt_proto = { 1259 .func = get_func_arg_cnt, 1260 .ret_type = RET_INTEGER, 1261 .arg1_type = ARG_PTR_TO_CTX, 1262 }; 1263 1264 #ifdef CONFIG_KEYS 1265 __bpf_kfunc_start_defs(); 1266 1267 /** 1268 * bpf_lookup_user_key - lookup a key by its serial 1269 * @serial: key handle serial number 1270 * @flags: lookup-specific flags 1271 * 1272 * Search a key with a given *serial* and the provided *flags*. 1273 * If found, increment the reference count of the key by one, and 1274 * return it in the bpf_key structure. 1275 * 1276 * The bpf_key structure must be passed to bpf_key_put() when done 1277 * with it, so that the key reference count is decremented and the 1278 * bpf_key structure is freed. 1279 * 1280 * Permission checks are deferred to the time the key is used by 1281 * one of the available key-specific kfuncs. 1282 * 1283 * Set *flags* with KEY_LOOKUP_CREATE, to attempt creating a requested 1284 * special keyring (e.g. session keyring), if it doesn't yet exist. 1285 * Set *flags* with KEY_LOOKUP_PARTIAL, to lookup a key without waiting 1286 * for the key construction, and to retrieve uninstantiated keys (keys 1287 * without data attached to them). 1288 * 1289 * Return: a bpf_key pointer with a valid key pointer if the key is found, a 1290 * NULL pointer otherwise. 1291 */ 1292 __bpf_kfunc struct bpf_key *bpf_lookup_user_key(u32 serial, u64 flags) 1293 { 1294 key_ref_t key_ref; 1295 struct bpf_key *bkey; 1296 1297 if (flags & ~KEY_LOOKUP_ALL) 1298 return NULL; 1299 1300 /* 1301 * Permission check is deferred until the key is used, as the 1302 * intent of the caller is unknown here. 1303 */ 1304 key_ref = lookup_user_key(serial, flags, KEY_DEFER_PERM_CHECK); 1305 if (IS_ERR(key_ref)) 1306 return NULL; 1307 1308 bkey = kmalloc(sizeof(*bkey), GFP_KERNEL); 1309 if (!bkey) { 1310 key_put(key_ref_to_ptr(key_ref)); 1311 return NULL; 1312 } 1313 1314 bkey->key = key_ref_to_ptr(key_ref); 1315 bkey->has_ref = true; 1316 1317 return bkey; 1318 } 1319 1320 /** 1321 * bpf_lookup_system_key - lookup a key by a system-defined ID 1322 * @id: key ID 1323 * 1324 * Obtain a bpf_key structure with a key pointer set to the passed key ID. 1325 * The key pointer is marked as invalid, to prevent bpf_key_put() from 1326 * attempting to decrement the key reference count on that pointer. The key 1327 * pointer set in such way is currently understood only by 1328 * verify_pkcs7_signature(). 1329 * 1330 * Set *id* to one of the values defined in include/linux/verification.h: 1331 * 0 for the primary keyring (immutable keyring of system keys); 1332 * VERIFY_USE_SECONDARY_KEYRING for both the primary and secondary keyring 1333 * (where keys can be added only if they are vouched for by existing keys 1334 * in those keyrings); VERIFY_USE_PLATFORM_KEYRING for the platform 1335 * keyring (primarily used by the integrity subsystem to verify a kexec'ed 1336 * kerned image and, possibly, the initramfs signature). 1337 * 1338 * Return: a bpf_key pointer with an invalid key pointer set from the 1339 * pre-determined ID on success, a NULL pointer otherwise 1340 */ 1341 __bpf_kfunc struct bpf_key *bpf_lookup_system_key(u64 id) 1342 { 1343 struct bpf_key *bkey; 1344 1345 if (system_keyring_id_check(id) < 0) 1346 return NULL; 1347 1348 bkey = kmalloc(sizeof(*bkey), GFP_ATOMIC); 1349 if (!bkey) 1350 return NULL; 1351 1352 bkey->key = (struct key *)(unsigned long)id; 1353 bkey->has_ref = false; 1354 1355 return bkey; 1356 } 1357 1358 /** 1359 * bpf_key_put - decrement key reference count if key is valid and free bpf_key 1360 * @bkey: bpf_key structure 1361 * 1362 * Decrement the reference count of the key inside *bkey*, if the pointer 1363 * is valid, and free *bkey*. 1364 */ 1365 __bpf_kfunc void bpf_key_put(struct bpf_key *bkey) 1366 { 1367 if (bkey->has_ref) 1368 key_put(bkey->key); 1369 1370 kfree(bkey); 1371 } 1372 1373 #ifdef CONFIG_SYSTEM_DATA_VERIFICATION 1374 /** 1375 * bpf_verify_pkcs7_signature - verify a PKCS#7 signature 1376 * @data_ptr: data to verify 1377 * @sig_ptr: signature of the data 1378 * @trusted_keyring: keyring with keys trusted for signature verification 1379 * 1380 * Verify the PKCS#7 signature *sig_ptr* against the supplied *data_ptr* 1381 * with keys in a keyring referenced by *trusted_keyring*. 1382 * 1383 * Return: 0 on success, a negative value on error. 1384 */ 1385 __bpf_kfunc int bpf_verify_pkcs7_signature(struct bpf_dynptr_kern *data_ptr, 1386 struct bpf_dynptr_kern *sig_ptr, 1387 struct bpf_key *trusted_keyring) 1388 { 1389 const void *data, *sig; 1390 u32 data_len, sig_len; 1391 int ret; 1392 1393 if (trusted_keyring->has_ref) { 1394 /* 1395 * Do the permission check deferred in bpf_lookup_user_key(). 1396 * See bpf_lookup_user_key() for more details. 1397 * 1398 * A call to key_task_permission() here would be redundant, as 1399 * it is already done by keyring_search() called by 1400 * find_asymmetric_key(). 1401 */ 1402 ret = key_validate(trusted_keyring->key); 1403 if (ret < 0) 1404 return ret; 1405 } 1406 1407 data_len = __bpf_dynptr_size(data_ptr); 1408 data = __bpf_dynptr_data(data_ptr, data_len); 1409 sig_len = __bpf_dynptr_size(sig_ptr); 1410 sig = __bpf_dynptr_data(sig_ptr, sig_len); 1411 1412 return verify_pkcs7_signature(data, data_len, sig, sig_len, 1413 trusted_keyring->key, 1414 VERIFYING_UNSPECIFIED_SIGNATURE, NULL, 1415 NULL); 1416 } 1417 #endif /* CONFIG_SYSTEM_DATA_VERIFICATION */ 1418 1419 __bpf_kfunc_end_defs(); 1420 1421 BTF_KFUNCS_START(key_sig_kfunc_set) 1422 BTF_ID_FLAGS(func, bpf_lookup_user_key, KF_ACQUIRE | KF_RET_NULL | KF_SLEEPABLE) 1423 BTF_ID_FLAGS(func, bpf_lookup_system_key, KF_ACQUIRE | KF_RET_NULL) 1424 BTF_ID_FLAGS(func, bpf_key_put, KF_RELEASE) 1425 #ifdef CONFIG_SYSTEM_DATA_VERIFICATION 1426 BTF_ID_FLAGS(func, bpf_verify_pkcs7_signature, KF_SLEEPABLE) 1427 #endif 1428 BTF_KFUNCS_END(key_sig_kfunc_set) 1429 1430 static const struct btf_kfunc_id_set bpf_key_sig_kfunc_set = { 1431 .owner = THIS_MODULE, 1432 .set = &key_sig_kfunc_set, 1433 }; 1434 1435 static int __init bpf_key_sig_kfuncs_init(void) 1436 { 1437 return register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING, 1438 &bpf_key_sig_kfunc_set); 1439 } 1440 1441 late_initcall(bpf_key_sig_kfuncs_init); 1442 #endif /* CONFIG_KEYS */ 1443 1444 /* filesystem kfuncs */ 1445 __bpf_kfunc_start_defs(); 1446 1447 /** 1448 * bpf_get_file_xattr - get xattr of a file 1449 * @file: file to get xattr from 1450 * @name__str: name of the xattr 1451 * @value_ptr: output buffer of the xattr value 1452 * 1453 * Get xattr *name__str* of *file* and store the output in *value_ptr*. 1454 * 1455 * For security reasons, only *name__str* with prefix "user." is allowed. 1456 * 1457 * Return: 0 on success, a negative value on error. 1458 */ 1459 __bpf_kfunc int bpf_get_file_xattr(struct file *file, const char *name__str, 1460 struct bpf_dynptr_kern *value_ptr) 1461 { 1462 struct dentry *dentry; 1463 u32 value_len; 1464 void *value; 1465 int ret; 1466 1467 if (strncmp(name__str, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN)) 1468 return -EPERM; 1469 1470 value_len = __bpf_dynptr_size(value_ptr); 1471 value = __bpf_dynptr_data_rw(value_ptr, value_len); 1472 if (!value) 1473 return -EINVAL; 1474 1475 dentry = file_dentry(file); 1476 ret = inode_permission(&nop_mnt_idmap, dentry->d_inode, MAY_READ); 1477 if (ret) 1478 return ret; 1479 return __vfs_getxattr(dentry, dentry->d_inode, name__str, value, value_len); 1480 } 1481 1482 __bpf_kfunc_end_defs(); 1483 1484 BTF_KFUNCS_START(fs_kfunc_set_ids) 1485 BTF_ID_FLAGS(func, bpf_get_file_xattr, KF_SLEEPABLE | KF_TRUSTED_ARGS) 1486 BTF_KFUNCS_END(fs_kfunc_set_ids) 1487 1488 static int bpf_get_file_xattr_filter(const struct bpf_prog *prog, u32 kfunc_id) 1489 { 1490 if (!btf_id_set8_contains(&fs_kfunc_set_ids, kfunc_id)) 1491 return 0; 1492 1493 /* Only allow to attach from LSM hooks, to avoid recursion */ 1494 return prog->type != BPF_PROG_TYPE_LSM ? -EACCES : 0; 1495 } 1496 1497 static const struct btf_kfunc_id_set bpf_fs_kfunc_set = { 1498 .owner = THIS_MODULE, 1499 .set = &fs_kfunc_set_ids, 1500 .filter = bpf_get_file_xattr_filter, 1501 }; 1502 1503 static int __init bpf_fs_kfuncs_init(void) 1504 { 1505 return register_btf_kfunc_id_set(BPF_PROG_TYPE_LSM, &bpf_fs_kfunc_set); 1506 } 1507 1508 late_initcall(bpf_fs_kfuncs_init); 1509 1510 static const struct bpf_func_proto * 1511 bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 1512 { 1513 switch (func_id) { 1514 case BPF_FUNC_map_lookup_elem: 1515 return &bpf_map_lookup_elem_proto; 1516 case BPF_FUNC_map_update_elem: 1517 return &bpf_map_update_elem_proto; 1518 case BPF_FUNC_map_delete_elem: 1519 return &bpf_map_delete_elem_proto; 1520 case BPF_FUNC_map_push_elem: 1521 return &bpf_map_push_elem_proto; 1522 case BPF_FUNC_map_pop_elem: 1523 return &bpf_map_pop_elem_proto; 1524 case BPF_FUNC_map_peek_elem: 1525 return &bpf_map_peek_elem_proto; 1526 case BPF_FUNC_map_lookup_percpu_elem: 1527 return &bpf_map_lookup_percpu_elem_proto; 1528 case BPF_FUNC_ktime_get_ns: 1529 return &bpf_ktime_get_ns_proto; 1530 case BPF_FUNC_ktime_get_boot_ns: 1531 return &bpf_ktime_get_boot_ns_proto; 1532 case BPF_FUNC_tail_call: 1533 return &bpf_tail_call_proto; 1534 case BPF_FUNC_get_current_task: 1535 return &bpf_get_current_task_proto; 1536 case BPF_FUNC_get_current_task_btf: 1537 return &bpf_get_current_task_btf_proto; 1538 case BPF_FUNC_task_pt_regs: 1539 return &bpf_task_pt_regs_proto; 1540 case BPF_FUNC_get_current_uid_gid: 1541 return &bpf_get_current_uid_gid_proto; 1542 case BPF_FUNC_get_current_comm: 1543 return &bpf_get_current_comm_proto; 1544 case BPF_FUNC_trace_printk: 1545 return bpf_get_trace_printk_proto(); 1546 case BPF_FUNC_get_smp_processor_id: 1547 return &bpf_get_smp_processor_id_proto; 1548 case BPF_FUNC_get_numa_node_id: 1549 return &bpf_get_numa_node_id_proto; 1550 case BPF_FUNC_perf_event_read: 1551 return &bpf_perf_event_read_proto; 1552 case BPF_FUNC_current_task_under_cgroup: 1553 return &bpf_current_task_under_cgroup_proto; 1554 case BPF_FUNC_get_prandom_u32: 1555 return &bpf_get_prandom_u32_proto; 1556 case BPF_FUNC_probe_write_user: 1557 return security_locked_down(LOCKDOWN_BPF_WRITE_USER) < 0 ? 1558 NULL : bpf_get_probe_write_proto(); 1559 case BPF_FUNC_probe_read_user: 1560 return &bpf_probe_read_user_proto; 1561 case BPF_FUNC_probe_read_kernel: 1562 return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ? 1563 NULL : &bpf_probe_read_kernel_proto; 1564 case BPF_FUNC_probe_read_user_str: 1565 return &bpf_probe_read_user_str_proto; 1566 case BPF_FUNC_probe_read_kernel_str: 1567 return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ? 1568 NULL : &bpf_probe_read_kernel_str_proto; 1569 #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE 1570 case BPF_FUNC_probe_read: 1571 return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ? 1572 NULL : &bpf_probe_read_compat_proto; 1573 case BPF_FUNC_probe_read_str: 1574 return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ? 1575 NULL : &bpf_probe_read_compat_str_proto; 1576 #endif 1577 #ifdef CONFIG_CGROUPS 1578 case BPF_FUNC_cgrp_storage_get: 1579 return &bpf_cgrp_storage_get_proto; 1580 case BPF_FUNC_cgrp_storage_delete: 1581 return &bpf_cgrp_storage_delete_proto; 1582 #endif 1583 case BPF_FUNC_send_signal: 1584 return &bpf_send_signal_proto; 1585 case BPF_FUNC_send_signal_thread: 1586 return &bpf_send_signal_thread_proto; 1587 case BPF_FUNC_perf_event_read_value: 1588 return &bpf_perf_event_read_value_proto; 1589 case BPF_FUNC_ringbuf_output: 1590 return &bpf_ringbuf_output_proto; 1591 case BPF_FUNC_ringbuf_reserve: 1592 return &bpf_ringbuf_reserve_proto; 1593 case BPF_FUNC_ringbuf_submit: 1594 return &bpf_ringbuf_submit_proto; 1595 case BPF_FUNC_ringbuf_discard: 1596 return &bpf_ringbuf_discard_proto; 1597 case BPF_FUNC_ringbuf_query: 1598 return &bpf_ringbuf_query_proto; 1599 case BPF_FUNC_jiffies64: 1600 return &bpf_jiffies64_proto; 1601 case BPF_FUNC_get_task_stack: 1602 return &bpf_get_task_stack_proto; 1603 case BPF_FUNC_copy_from_user: 1604 return &bpf_copy_from_user_proto; 1605 case BPF_FUNC_copy_from_user_task: 1606 return &bpf_copy_from_user_task_proto; 1607 case BPF_FUNC_snprintf_btf: 1608 return &bpf_snprintf_btf_proto; 1609 case BPF_FUNC_per_cpu_ptr: 1610 return &bpf_per_cpu_ptr_proto; 1611 case BPF_FUNC_this_cpu_ptr: 1612 return &bpf_this_cpu_ptr_proto; 1613 case BPF_FUNC_task_storage_get: 1614 if (bpf_prog_check_recur(prog)) 1615 return &bpf_task_storage_get_recur_proto; 1616 return &bpf_task_storage_get_proto; 1617 case BPF_FUNC_task_storage_delete: 1618 if (bpf_prog_check_recur(prog)) 1619 return &bpf_task_storage_delete_recur_proto; 1620 return &bpf_task_storage_delete_proto; 1621 case BPF_FUNC_for_each_map_elem: 1622 return &bpf_for_each_map_elem_proto; 1623 case BPF_FUNC_snprintf: 1624 return &bpf_snprintf_proto; 1625 case BPF_FUNC_get_func_ip: 1626 return &bpf_get_func_ip_proto_tracing; 1627 case BPF_FUNC_get_branch_snapshot: 1628 return &bpf_get_branch_snapshot_proto; 1629 case BPF_FUNC_find_vma: 1630 return &bpf_find_vma_proto; 1631 case BPF_FUNC_trace_vprintk: 1632 return bpf_get_trace_vprintk_proto(); 1633 default: 1634 return bpf_base_func_proto(func_id, prog); 1635 } 1636 } 1637 1638 static const struct bpf_func_proto * 1639 kprobe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 1640 { 1641 switch (func_id) { 1642 case BPF_FUNC_perf_event_output: 1643 return &bpf_perf_event_output_proto; 1644 case BPF_FUNC_get_stackid: 1645 return &bpf_get_stackid_proto; 1646 case BPF_FUNC_get_stack: 1647 return &bpf_get_stack_proto; 1648 #ifdef CONFIG_BPF_KPROBE_OVERRIDE 1649 case BPF_FUNC_override_return: 1650 return &bpf_override_return_proto; 1651 #endif 1652 case BPF_FUNC_get_func_ip: 1653 if (prog->expected_attach_type == BPF_TRACE_KPROBE_MULTI) 1654 return &bpf_get_func_ip_proto_kprobe_multi; 1655 if (prog->expected_attach_type == BPF_TRACE_UPROBE_MULTI) 1656 return &bpf_get_func_ip_proto_uprobe_multi; 1657 return &bpf_get_func_ip_proto_kprobe; 1658 case BPF_FUNC_get_attach_cookie: 1659 if (prog->expected_attach_type == BPF_TRACE_KPROBE_MULTI) 1660 return &bpf_get_attach_cookie_proto_kmulti; 1661 if (prog->expected_attach_type == BPF_TRACE_UPROBE_MULTI) 1662 return &bpf_get_attach_cookie_proto_umulti; 1663 return &bpf_get_attach_cookie_proto_trace; 1664 default: 1665 return bpf_tracing_func_proto(func_id, prog); 1666 } 1667 } 1668 1669 /* bpf+kprobe programs can access fields of 'struct pt_regs' */ 1670 static bool kprobe_prog_is_valid_access(int off, int size, enum bpf_access_type type, 1671 const struct bpf_prog *prog, 1672 struct bpf_insn_access_aux *info) 1673 { 1674 if (off < 0 || off >= sizeof(struct pt_regs)) 1675 return false; 1676 if (type != BPF_READ) 1677 return false; 1678 if (off % size != 0) 1679 return false; 1680 /* 1681 * Assertion for 32 bit to make sure last 8 byte access 1682 * (BPF_DW) to the last 4 byte member is disallowed. 1683 */ 1684 if (off + size > sizeof(struct pt_regs)) 1685 return false; 1686 1687 return true; 1688 } 1689 1690 const struct bpf_verifier_ops kprobe_verifier_ops = { 1691 .get_func_proto = kprobe_prog_func_proto, 1692 .is_valid_access = kprobe_prog_is_valid_access, 1693 }; 1694 1695 const struct bpf_prog_ops kprobe_prog_ops = { 1696 }; 1697 1698 BPF_CALL_5(bpf_perf_event_output_tp, void *, tp_buff, struct bpf_map *, map, 1699 u64, flags, void *, data, u64, size) 1700 { 1701 struct pt_regs *regs = *(struct pt_regs **)tp_buff; 1702 1703 /* 1704 * r1 points to perf tracepoint buffer where first 8 bytes are hidden 1705 * from bpf program and contain a pointer to 'struct pt_regs'. Fetch it 1706 * from there and call the same bpf_perf_event_output() helper inline. 1707 */ 1708 return ____bpf_perf_event_output(regs, map, flags, data, size); 1709 } 1710 1711 static const struct bpf_func_proto bpf_perf_event_output_proto_tp = { 1712 .func = bpf_perf_event_output_tp, 1713 .gpl_only = true, 1714 .ret_type = RET_INTEGER, 1715 .arg1_type = ARG_PTR_TO_CTX, 1716 .arg2_type = ARG_CONST_MAP_PTR, 1717 .arg3_type = ARG_ANYTHING, 1718 .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY, 1719 .arg5_type = ARG_CONST_SIZE_OR_ZERO, 1720 }; 1721 1722 BPF_CALL_3(bpf_get_stackid_tp, void *, tp_buff, struct bpf_map *, map, 1723 u64, flags) 1724 { 1725 struct pt_regs *regs = *(struct pt_regs **)tp_buff; 1726 1727 /* 1728 * Same comment as in bpf_perf_event_output_tp(), only that this time 1729 * the other helper's function body cannot be inlined due to being 1730 * external, thus we need to call raw helper function. 1731 */ 1732 return bpf_get_stackid((unsigned long) regs, (unsigned long) map, 1733 flags, 0, 0); 1734 } 1735 1736 static const struct bpf_func_proto bpf_get_stackid_proto_tp = { 1737 .func = bpf_get_stackid_tp, 1738 .gpl_only = true, 1739 .ret_type = RET_INTEGER, 1740 .arg1_type = ARG_PTR_TO_CTX, 1741 .arg2_type = ARG_CONST_MAP_PTR, 1742 .arg3_type = ARG_ANYTHING, 1743 }; 1744 1745 BPF_CALL_4(bpf_get_stack_tp, void *, tp_buff, void *, buf, u32, size, 1746 u64, flags) 1747 { 1748 struct pt_regs *regs = *(struct pt_regs **)tp_buff; 1749 1750 return bpf_get_stack((unsigned long) regs, (unsigned long) buf, 1751 (unsigned long) size, flags, 0); 1752 } 1753 1754 static const struct bpf_func_proto bpf_get_stack_proto_tp = { 1755 .func = bpf_get_stack_tp, 1756 .gpl_only = true, 1757 .ret_type = RET_INTEGER, 1758 .arg1_type = ARG_PTR_TO_CTX, 1759 .arg2_type = ARG_PTR_TO_UNINIT_MEM, 1760 .arg3_type = ARG_CONST_SIZE_OR_ZERO, 1761 .arg4_type = ARG_ANYTHING, 1762 }; 1763 1764 static const struct bpf_func_proto * 1765 tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 1766 { 1767 switch (func_id) { 1768 case BPF_FUNC_perf_event_output: 1769 return &bpf_perf_event_output_proto_tp; 1770 case BPF_FUNC_get_stackid: 1771 return &bpf_get_stackid_proto_tp; 1772 case BPF_FUNC_get_stack: 1773 return &bpf_get_stack_proto_tp; 1774 case BPF_FUNC_get_attach_cookie: 1775 return &bpf_get_attach_cookie_proto_trace; 1776 default: 1777 return bpf_tracing_func_proto(func_id, prog); 1778 } 1779 } 1780 1781 static bool tp_prog_is_valid_access(int off, int size, enum bpf_access_type type, 1782 const struct bpf_prog *prog, 1783 struct bpf_insn_access_aux *info) 1784 { 1785 if (off < sizeof(void *) || off >= PERF_MAX_TRACE_SIZE) 1786 return false; 1787 if (type != BPF_READ) 1788 return false; 1789 if (off % size != 0) 1790 return false; 1791 1792 BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(__u64)); 1793 return true; 1794 } 1795 1796 const struct bpf_verifier_ops tracepoint_verifier_ops = { 1797 .get_func_proto = tp_prog_func_proto, 1798 .is_valid_access = tp_prog_is_valid_access, 1799 }; 1800 1801 const struct bpf_prog_ops tracepoint_prog_ops = { 1802 }; 1803 1804 BPF_CALL_3(bpf_perf_prog_read_value, struct bpf_perf_event_data_kern *, ctx, 1805 struct bpf_perf_event_value *, buf, u32, size) 1806 { 1807 int err = -EINVAL; 1808 1809 if (unlikely(size != sizeof(struct bpf_perf_event_value))) 1810 goto clear; 1811 err = perf_event_read_local(ctx->event, &buf->counter, &buf->enabled, 1812 &buf->running); 1813 if (unlikely(err)) 1814 goto clear; 1815 return 0; 1816 clear: 1817 memset(buf, 0, size); 1818 return err; 1819 } 1820 1821 static const struct bpf_func_proto bpf_perf_prog_read_value_proto = { 1822 .func = bpf_perf_prog_read_value, 1823 .gpl_only = true, 1824 .ret_type = RET_INTEGER, 1825 .arg1_type = ARG_PTR_TO_CTX, 1826 .arg2_type = ARG_PTR_TO_UNINIT_MEM, 1827 .arg3_type = ARG_CONST_SIZE, 1828 }; 1829 1830 BPF_CALL_4(bpf_read_branch_records, struct bpf_perf_event_data_kern *, ctx, 1831 void *, buf, u32, size, u64, flags) 1832 { 1833 static const u32 br_entry_size = sizeof(struct perf_branch_entry); 1834 struct perf_branch_stack *br_stack = ctx->data->br_stack; 1835 u32 to_copy; 1836 1837 if (unlikely(flags & ~BPF_F_GET_BRANCH_RECORDS_SIZE)) 1838 return -EINVAL; 1839 1840 if (unlikely(!(ctx->data->sample_flags & PERF_SAMPLE_BRANCH_STACK))) 1841 return -ENOENT; 1842 1843 if (unlikely(!br_stack)) 1844 return -ENOENT; 1845 1846 if (flags & BPF_F_GET_BRANCH_RECORDS_SIZE) 1847 return br_stack->nr * br_entry_size; 1848 1849 if (!buf || (size % br_entry_size != 0)) 1850 return -EINVAL; 1851 1852 to_copy = min_t(u32, br_stack->nr * br_entry_size, size); 1853 memcpy(buf, br_stack->entries, to_copy); 1854 1855 return to_copy; 1856 } 1857 1858 static const struct bpf_func_proto bpf_read_branch_records_proto = { 1859 .func = bpf_read_branch_records, 1860 .gpl_only = true, 1861 .ret_type = RET_INTEGER, 1862 .arg1_type = ARG_PTR_TO_CTX, 1863 .arg2_type = ARG_PTR_TO_MEM_OR_NULL, 1864 .arg3_type = ARG_CONST_SIZE_OR_ZERO, 1865 .arg4_type = ARG_ANYTHING, 1866 }; 1867 1868 static const struct bpf_func_proto * 1869 pe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 1870 { 1871 switch (func_id) { 1872 case BPF_FUNC_perf_event_output: 1873 return &bpf_perf_event_output_proto_tp; 1874 case BPF_FUNC_get_stackid: 1875 return &bpf_get_stackid_proto_pe; 1876 case BPF_FUNC_get_stack: 1877 return &bpf_get_stack_proto_pe; 1878 case BPF_FUNC_perf_prog_read_value: 1879 return &bpf_perf_prog_read_value_proto; 1880 case BPF_FUNC_read_branch_records: 1881 return &bpf_read_branch_records_proto; 1882 case BPF_FUNC_get_attach_cookie: 1883 return &bpf_get_attach_cookie_proto_pe; 1884 default: 1885 return bpf_tracing_func_proto(func_id, prog); 1886 } 1887 } 1888 1889 /* 1890 * bpf_raw_tp_regs are separate from bpf_pt_regs used from skb/xdp 1891 * to avoid potential recursive reuse issue when/if tracepoints are added 1892 * inside bpf_*_event_output, bpf_get_stackid and/or bpf_get_stack. 1893 * 1894 * Since raw tracepoints run despite bpf_prog_active, support concurrent usage 1895 * in normal, irq, and nmi context. 1896 */ 1897 struct bpf_raw_tp_regs { 1898 struct pt_regs regs[3]; 1899 }; 1900 static DEFINE_PER_CPU(struct bpf_raw_tp_regs, bpf_raw_tp_regs); 1901 static DEFINE_PER_CPU(int, bpf_raw_tp_nest_level); 1902 static struct pt_regs *get_bpf_raw_tp_regs(void) 1903 { 1904 struct bpf_raw_tp_regs *tp_regs = this_cpu_ptr(&bpf_raw_tp_regs); 1905 int nest_level = this_cpu_inc_return(bpf_raw_tp_nest_level); 1906 1907 if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(tp_regs->regs))) { 1908 this_cpu_dec(bpf_raw_tp_nest_level); 1909 return ERR_PTR(-EBUSY); 1910 } 1911 1912 return &tp_regs->regs[nest_level - 1]; 1913 } 1914 1915 static void put_bpf_raw_tp_regs(void) 1916 { 1917 this_cpu_dec(bpf_raw_tp_nest_level); 1918 } 1919 1920 BPF_CALL_5(bpf_perf_event_output_raw_tp, struct bpf_raw_tracepoint_args *, args, 1921 struct bpf_map *, map, u64, flags, void *, data, u64, size) 1922 { 1923 struct pt_regs *regs = get_bpf_raw_tp_regs(); 1924 int ret; 1925 1926 if (IS_ERR(regs)) 1927 return PTR_ERR(regs); 1928 1929 perf_fetch_caller_regs(regs); 1930 ret = ____bpf_perf_event_output(regs, map, flags, data, size); 1931 1932 put_bpf_raw_tp_regs(); 1933 return ret; 1934 } 1935 1936 static const struct bpf_func_proto bpf_perf_event_output_proto_raw_tp = { 1937 .func = bpf_perf_event_output_raw_tp, 1938 .gpl_only = true, 1939 .ret_type = RET_INTEGER, 1940 .arg1_type = ARG_PTR_TO_CTX, 1941 .arg2_type = ARG_CONST_MAP_PTR, 1942 .arg3_type = ARG_ANYTHING, 1943 .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY, 1944 .arg5_type = ARG_CONST_SIZE_OR_ZERO, 1945 }; 1946 1947 extern const struct bpf_func_proto bpf_skb_output_proto; 1948 extern const struct bpf_func_proto bpf_xdp_output_proto; 1949 extern const struct bpf_func_proto bpf_xdp_get_buff_len_trace_proto; 1950 1951 BPF_CALL_3(bpf_get_stackid_raw_tp, struct bpf_raw_tracepoint_args *, args, 1952 struct bpf_map *, map, u64, flags) 1953 { 1954 struct pt_regs *regs = get_bpf_raw_tp_regs(); 1955 int ret; 1956 1957 if (IS_ERR(regs)) 1958 return PTR_ERR(regs); 1959 1960 perf_fetch_caller_regs(regs); 1961 /* similar to bpf_perf_event_output_tp, but pt_regs fetched differently */ 1962 ret = bpf_get_stackid((unsigned long) regs, (unsigned long) map, 1963 flags, 0, 0); 1964 put_bpf_raw_tp_regs(); 1965 return ret; 1966 } 1967 1968 static const struct bpf_func_proto bpf_get_stackid_proto_raw_tp = { 1969 .func = bpf_get_stackid_raw_tp, 1970 .gpl_only = true, 1971 .ret_type = RET_INTEGER, 1972 .arg1_type = ARG_PTR_TO_CTX, 1973 .arg2_type = ARG_CONST_MAP_PTR, 1974 .arg3_type = ARG_ANYTHING, 1975 }; 1976 1977 BPF_CALL_4(bpf_get_stack_raw_tp, struct bpf_raw_tracepoint_args *, args, 1978 void *, buf, u32, size, u64, flags) 1979 { 1980 struct pt_regs *regs = get_bpf_raw_tp_regs(); 1981 int ret; 1982 1983 if (IS_ERR(regs)) 1984 return PTR_ERR(regs); 1985 1986 perf_fetch_caller_regs(regs); 1987 ret = bpf_get_stack((unsigned long) regs, (unsigned long) buf, 1988 (unsigned long) size, flags, 0); 1989 put_bpf_raw_tp_regs(); 1990 return ret; 1991 } 1992 1993 static const struct bpf_func_proto bpf_get_stack_proto_raw_tp = { 1994 .func = bpf_get_stack_raw_tp, 1995 .gpl_only = true, 1996 .ret_type = RET_INTEGER, 1997 .arg1_type = ARG_PTR_TO_CTX, 1998 .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, 1999 .arg3_type = ARG_CONST_SIZE_OR_ZERO, 2000 .arg4_type = ARG_ANYTHING, 2001 }; 2002 2003 static const struct bpf_func_proto * 2004 raw_tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 2005 { 2006 switch (func_id) { 2007 case BPF_FUNC_perf_event_output: 2008 return &bpf_perf_event_output_proto_raw_tp; 2009 case BPF_FUNC_get_stackid: 2010 return &bpf_get_stackid_proto_raw_tp; 2011 case BPF_FUNC_get_stack: 2012 return &bpf_get_stack_proto_raw_tp; 2013 case BPF_FUNC_get_attach_cookie: 2014 return &bpf_get_attach_cookie_proto_tracing; 2015 default: 2016 return bpf_tracing_func_proto(func_id, prog); 2017 } 2018 } 2019 2020 const struct bpf_func_proto * 2021 tracing_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 2022 { 2023 const struct bpf_func_proto *fn; 2024 2025 switch (func_id) { 2026 #ifdef CONFIG_NET 2027 case BPF_FUNC_skb_output: 2028 return &bpf_skb_output_proto; 2029 case BPF_FUNC_xdp_output: 2030 return &bpf_xdp_output_proto; 2031 case BPF_FUNC_skc_to_tcp6_sock: 2032 return &bpf_skc_to_tcp6_sock_proto; 2033 case BPF_FUNC_skc_to_tcp_sock: 2034 return &bpf_skc_to_tcp_sock_proto; 2035 case BPF_FUNC_skc_to_tcp_timewait_sock: 2036 return &bpf_skc_to_tcp_timewait_sock_proto; 2037 case BPF_FUNC_skc_to_tcp_request_sock: 2038 return &bpf_skc_to_tcp_request_sock_proto; 2039 case BPF_FUNC_skc_to_udp6_sock: 2040 return &bpf_skc_to_udp6_sock_proto; 2041 case BPF_FUNC_skc_to_unix_sock: 2042 return &bpf_skc_to_unix_sock_proto; 2043 case BPF_FUNC_skc_to_mptcp_sock: 2044 return &bpf_skc_to_mptcp_sock_proto; 2045 case BPF_FUNC_sk_storage_get: 2046 return &bpf_sk_storage_get_tracing_proto; 2047 case BPF_FUNC_sk_storage_delete: 2048 return &bpf_sk_storage_delete_tracing_proto; 2049 case BPF_FUNC_sock_from_file: 2050 return &bpf_sock_from_file_proto; 2051 case BPF_FUNC_get_socket_cookie: 2052 return &bpf_get_socket_ptr_cookie_proto; 2053 case BPF_FUNC_xdp_get_buff_len: 2054 return &bpf_xdp_get_buff_len_trace_proto; 2055 #endif 2056 case BPF_FUNC_seq_printf: 2057 return prog->expected_attach_type == BPF_TRACE_ITER ? 2058 &bpf_seq_printf_proto : 2059 NULL; 2060 case BPF_FUNC_seq_write: 2061 return prog->expected_attach_type == BPF_TRACE_ITER ? 2062 &bpf_seq_write_proto : 2063 NULL; 2064 case BPF_FUNC_seq_printf_btf: 2065 return prog->expected_attach_type == BPF_TRACE_ITER ? 2066 &bpf_seq_printf_btf_proto : 2067 NULL; 2068 case BPF_FUNC_d_path: 2069 return &bpf_d_path_proto; 2070 case BPF_FUNC_get_func_arg: 2071 return bpf_prog_has_trampoline(prog) ? &bpf_get_func_arg_proto : NULL; 2072 case BPF_FUNC_get_func_ret: 2073 return bpf_prog_has_trampoline(prog) ? &bpf_get_func_ret_proto : NULL; 2074 case BPF_FUNC_get_func_arg_cnt: 2075 return bpf_prog_has_trampoline(prog) ? &bpf_get_func_arg_cnt_proto : NULL; 2076 case BPF_FUNC_get_attach_cookie: 2077 if (prog->type == BPF_PROG_TYPE_TRACING && 2078 prog->expected_attach_type == BPF_TRACE_RAW_TP) 2079 return &bpf_get_attach_cookie_proto_tracing; 2080 return bpf_prog_has_trampoline(prog) ? &bpf_get_attach_cookie_proto_tracing : NULL; 2081 default: 2082 fn = raw_tp_prog_func_proto(func_id, prog); 2083 if (!fn && prog->expected_attach_type == BPF_TRACE_ITER) 2084 fn = bpf_iter_get_func_proto(func_id, prog); 2085 return fn; 2086 } 2087 } 2088 2089 static bool raw_tp_prog_is_valid_access(int off, int size, 2090 enum bpf_access_type type, 2091 const struct bpf_prog *prog, 2092 struct bpf_insn_access_aux *info) 2093 { 2094 return bpf_tracing_ctx_access(off, size, type); 2095 } 2096 2097 static bool tracing_prog_is_valid_access(int off, int size, 2098 enum bpf_access_type type, 2099 const struct bpf_prog *prog, 2100 struct bpf_insn_access_aux *info) 2101 { 2102 return bpf_tracing_btf_ctx_access(off, size, type, prog, info); 2103 } 2104 2105 int __weak bpf_prog_test_run_tracing(struct bpf_prog *prog, 2106 const union bpf_attr *kattr, 2107 union bpf_attr __user *uattr) 2108 { 2109 return -ENOTSUPP; 2110 } 2111 2112 const struct bpf_verifier_ops raw_tracepoint_verifier_ops = { 2113 .get_func_proto = raw_tp_prog_func_proto, 2114 .is_valid_access = raw_tp_prog_is_valid_access, 2115 }; 2116 2117 const struct bpf_prog_ops raw_tracepoint_prog_ops = { 2118 #ifdef CONFIG_NET 2119 .test_run = bpf_prog_test_run_raw_tp, 2120 #endif 2121 }; 2122 2123 const struct bpf_verifier_ops tracing_verifier_ops = { 2124 .get_func_proto = tracing_prog_func_proto, 2125 .is_valid_access = tracing_prog_is_valid_access, 2126 }; 2127 2128 const struct bpf_prog_ops tracing_prog_ops = { 2129 .test_run = bpf_prog_test_run_tracing, 2130 }; 2131 2132 static bool raw_tp_writable_prog_is_valid_access(int off, int size, 2133 enum bpf_access_type type, 2134 const struct bpf_prog *prog, 2135 struct bpf_insn_access_aux *info) 2136 { 2137 if (off == 0) { 2138 if (size != sizeof(u64) || type != BPF_READ) 2139 return false; 2140 info->reg_type = PTR_TO_TP_BUFFER; 2141 } 2142 return raw_tp_prog_is_valid_access(off, size, type, prog, info); 2143 } 2144 2145 const struct bpf_verifier_ops raw_tracepoint_writable_verifier_ops = { 2146 .get_func_proto = raw_tp_prog_func_proto, 2147 .is_valid_access = raw_tp_writable_prog_is_valid_access, 2148 }; 2149 2150 const struct bpf_prog_ops raw_tracepoint_writable_prog_ops = { 2151 }; 2152 2153 static bool pe_prog_is_valid_access(int off, int size, enum bpf_access_type type, 2154 const struct bpf_prog *prog, 2155 struct bpf_insn_access_aux *info) 2156 { 2157 const int size_u64 = sizeof(u64); 2158 2159 if (off < 0 || off >= sizeof(struct bpf_perf_event_data)) 2160 return false; 2161 if (type != BPF_READ) 2162 return false; 2163 if (off % size != 0) { 2164 if (sizeof(unsigned long) != 4) 2165 return false; 2166 if (size != 8) 2167 return false; 2168 if (off % size != 4) 2169 return false; 2170 } 2171 2172 switch (off) { 2173 case bpf_ctx_range(struct bpf_perf_event_data, sample_period): 2174 bpf_ctx_record_field_size(info, size_u64); 2175 if (!bpf_ctx_narrow_access_ok(off, size, size_u64)) 2176 return false; 2177 break; 2178 case bpf_ctx_range(struct bpf_perf_event_data, addr): 2179 bpf_ctx_record_field_size(info, size_u64); 2180 if (!bpf_ctx_narrow_access_ok(off, size, size_u64)) 2181 return false; 2182 break; 2183 default: 2184 if (size != sizeof(long)) 2185 return false; 2186 } 2187 2188 return true; 2189 } 2190 2191 static u32 pe_prog_convert_ctx_access(enum bpf_access_type type, 2192 const struct bpf_insn *si, 2193 struct bpf_insn *insn_buf, 2194 struct bpf_prog *prog, u32 *target_size) 2195 { 2196 struct bpf_insn *insn = insn_buf; 2197 2198 switch (si->off) { 2199 case offsetof(struct bpf_perf_event_data, sample_period): 2200 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern, 2201 data), si->dst_reg, si->src_reg, 2202 offsetof(struct bpf_perf_event_data_kern, data)); 2203 *insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg, 2204 bpf_target_off(struct perf_sample_data, period, 8, 2205 target_size)); 2206 break; 2207 case offsetof(struct bpf_perf_event_data, addr): 2208 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern, 2209 data), si->dst_reg, si->src_reg, 2210 offsetof(struct bpf_perf_event_data_kern, data)); 2211 *insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg, 2212 bpf_target_off(struct perf_sample_data, addr, 8, 2213 target_size)); 2214 break; 2215 default: 2216 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern, 2217 regs), si->dst_reg, si->src_reg, 2218 offsetof(struct bpf_perf_event_data_kern, regs)); 2219 *insn++ = BPF_LDX_MEM(BPF_SIZEOF(long), si->dst_reg, si->dst_reg, 2220 si->off); 2221 break; 2222 } 2223 2224 return insn - insn_buf; 2225 } 2226 2227 const struct bpf_verifier_ops perf_event_verifier_ops = { 2228 .get_func_proto = pe_prog_func_proto, 2229 .is_valid_access = pe_prog_is_valid_access, 2230 .convert_ctx_access = pe_prog_convert_ctx_access, 2231 }; 2232 2233 const struct bpf_prog_ops perf_event_prog_ops = { 2234 }; 2235 2236 static DEFINE_MUTEX(bpf_event_mutex); 2237 2238 #define BPF_TRACE_MAX_PROGS 64 2239 2240 int perf_event_attach_bpf_prog(struct perf_event *event, 2241 struct bpf_prog *prog, 2242 u64 bpf_cookie) 2243 { 2244 struct bpf_prog_array *old_array; 2245 struct bpf_prog_array *new_array; 2246 int ret = -EEXIST; 2247 2248 /* 2249 * Kprobe override only works if they are on the function entry, 2250 * and only if they are on the opt-in list. 2251 */ 2252 if (prog->kprobe_override && 2253 (!trace_kprobe_on_func_entry(event->tp_event) || 2254 !trace_kprobe_error_injectable(event->tp_event))) 2255 return -EINVAL; 2256 2257 mutex_lock(&bpf_event_mutex); 2258 2259 if (event->prog) 2260 goto unlock; 2261 2262 old_array = bpf_event_rcu_dereference(event->tp_event->prog_array); 2263 if (old_array && 2264 bpf_prog_array_length(old_array) >= BPF_TRACE_MAX_PROGS) { 2265 ret = -E2BIG; 2266 goto unlock; 2267 } 2268 2269 ret = bpf_prog_array_copy(old_array, NULL, prog, bpf_cookie, &new_array); 2270 if (ret < 0) 2271 goto unlock; 2272 2273 /* set the new array to event->tp_event and set event->prog */ 2274 event->prog = prog; 2275 event->bpf_cookie = bpf_cookie; 2276 rcu_assign_pointer(event->tp_event->prog_array, new_array); 2277 bpf_prog_array_free_sleepable(old_array); 2278 2279 unlock: 2280 mutex_unlock(&bpf_event_mutex); 2281 return ret; 2282 } 2283 2284 void perf_event_detach_bpf_prog(struct perf_event *event) 2285 { 2286 struct bpf_prog_array *old_array; 2287 struct bpf_prog_array *new_array; 2288 int ret; 2289 2290 mutex_lock(&bpf_event_mutex); 2291 2292 if (!event->prog) 2293 goto unlock; 2294 2295 old_array = bpf_event_rcu_dereference(event->tp_event->prog_array); 2296 ret = bpf_prog_array_copy(old_array, event->prog, NULL, 0, &new_array); 2297 if (ret == -ENOENT) 2298 goto unlock; 2299 if (ret < 0) { 2300 bpf_prog_array_delete_safe(old_array, event->prog); 2301 } else { 2302 rcu_assign_pointer(event->tp_event->prog_array, new_array); 2303 bpf_prog_array_free_sleepable(old_array); 2304 } 2305 2306 bpf_prog_put(event->prog); 2307 event->prog = NULL; 2308 2309 unlock: 2310 mutex_unlock(&bpf_event_mutex); 2311 } 2312 2313 int perf_event_query_prog_array(struct perf_event *event, void __user *info) 2314 { 2315 struct perf_event_query_bpf __user *uquery = info; 2316 struct perf_event_query_bpf query = {}; 2317 struct bpf_prog_array *progs; 2318 u32 *ids, prog_cnt, ids_len; 2319 int ret; 2320 2321 if (!perfmon_capable()) 2322 return -EPERM; 2323 if (event->attr.type != PERF_TYPE_TRACEPOINT) 2324 return -EINVAL; 2325 if (copy_from_user(&query, uquery, sizeof(query))) 2326 return -EFAULT; 2327 2328 ids_len = query.ids_len; 2329 if (ids_len > BPF_TRACE_MAX_PROGS) 2330 return -E2BIG; 2331 ids = kcalloc(ids_len, sizeof(u32), GFP_USER | __GFP_NOWARN); 2332 if (!ids) 2333 return -ENOMEM; 2334 /* 2335 * The above kcalloc returns ZERO_SIZE_PTR when ids_len = 0, which 2336 * is required when user only wants to check for uquery->prog_cnt. 2337 * There is no need to check for it since the case is handled 2338 * gracefully in bpf_prog_array_copy_info. 2339 */ 2340 2341 mutex_lock(&bpf_event_mutex); 2342 progs = bpf_event_rcu_dereference(event->tp_event->prog_array); 2343 ret = bpf_prog_array_copy_info(progs, ids, ids_len, &prog_cnt); 2344 mutex_unlock(&bpf_event_mutex); 2345 2346 if (copy_to_user(&uquery->prog_cnt, &prog_cnt, sizeof(prog_cnt)) || 2347 copy_to_user(uquery->ids, ids, ids_len * sizeof(u32))) 2348 ret = -EFAULT; 2349 2350 kfree(ids); 2351 return ret; 2352 } 2353 2354 extern struct bpf_raw_event_map __start__bpf_raw_tp[]; 2355 extern struct bpf_raw_event_map __stop__bpf_raw_tp[]; 2356 2357 struct bpf_raw_event_map *bpf_get_raw_tracepoint(const char *name) 2358 { 2359 struct bpf_raw_event_map *btp = __start__bpf_raw_tp; 2360 2361 for (; btp < __stop__bpf_raw_tp; btp++) { 2362 if (!strcmp(btp->tp->name, name)) 2363 return btp; 2364 } 2365 2366 return bpf_get_raw_tracepoint_module(name); 2367 } 2368 2369 void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp) 2370 { 2371 struct module *mod; 2372 2373 preempt_disable(); 2374 mod = __module_address((unsigned long)btp); 2375 module_put(mod); 2376 preempt_enable(); 2377 } 2378 2379 static __always_inline 2380 void __bpf_trace_run(struct bpf_raw_tp_link *link, u64 *args) 2381 { 2382 struct bpf_prog *prog = link->link.prog; 2383 struct bpf_run_ctx *old_run_ctx; 2384 struct bpf_trace_run_ctx run_ctx; 2385 2386 cant_sleep(); 2387 if (unlikely(this_cpu_inc_return(*(prog->active)) != 1)) { 2388 bpf_prog_inc_misses_counter(prog); 2389 goto out; 2390 } 2391 2392 run_ctx.bpf_cookie = link->cookie; 2393 old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx); 2394 2395 rcu_read_lock(); 2396 (void) bpf_prog_run(prog, args); 2397 rcu_read_unlock(); 2398 2399 bpf_reset_run_ctx(old_run_ctx); 2400 out: 2401 this_cpu_dec(*(prog->active)); 2402 } 2403 2404 #define UNPACK(...) __VA_ARGS__ 2405 #define REPEAT_1(FN, DL, X, ...) FN(X) 2406 #define REPEAT_2(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_1(FN, DL, __VA_ARGS__) 2407 #define REPEAT_3(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_2(FN, DL, __VA_ARGS__) 2408 #define REPEAT_4(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_3(FN, DL, __VA_ARGS__) 2409 #define REPEAT_5(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_4(FN, DL, __VA_ARGS__) 2410 #define REPEAT_6(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_5(FN, DL, __VA_ARGS__) 2411 #define REPEAT_7(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_6(FN, DL, __VA_ARGS__) 2412 #define REPEAT_8(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_7(FN, DL, __VA_ARGS__) 2413 #define REPEAT_9(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_8(FN, DL, __VA_ARGS__) 2414 #define REPEAT_10(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_9(FN, DL, __VA_ARGS__) 2415 #define REPEAT_11(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_10(FN, DL, __VA_ARGS__) 2416 #define REPEAT_12(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_11(FN, DL, __VA_ARGS__) 2417 #define REPEAT(X, FN, DL, ...) REPEAT_##X(FN, DL, __VA_ARGS__) 2418 2419 #define SARG(X) u64 arg##X 2420 #define COPY(X) args[X] = arg##X 2421 2422 #define __DL_COM (,) 2423 #define __DL_SEM (;) 2424 2425 #define __SEQ_0_11 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11 2426 2427 #define BPF_TRACE_DEFN_x(x) \ 2428 void bpf_trace_run##x(struct bpf_raw_tp_link *link, \ 2429 REPEAT(x, SARG, __DL_COM, __SEQ_0_11)) \ 2430 { \ 2431 u64 args[x]; \ 2432 REPEAT(x, COPY, __DL_SEM, __SEQ_0_11); \ 2433 __bpf_trace_run(link, args); \ 2434 } \ 2435 EXPORT_SYMBOL_GPL(bpf_trace_run##x) 2436 BPF_TRACE_DEFN_x(1); 2437 BPF_TRACE_DEFN_x(2); 2438 BPF_TRACE_DEFN_x(3); 2439 BPF_TRACE_DEFN_x(4); 2440 BPF_TRACE_DEFN_x(5); 2441 BPF_TRACE_DEFN_x(6); 2442 BPF_TRACE_DEFN_x(7); 2443 BPF_TRACE_DEFN_x(8); 2444 BPF_TRACE_DEFN_x(9); 2445 BPF_TRACE_DEFN_x(10); 2446 BPF_TRACE_DEFN_x(11); 2447 BPF_TRACE_DEFN_x(12); 2448 2449 int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_raw_tp_link *link) 2450 { 2451 struct tracepoint *tp = btp->tp; 2452 struct bpf_prog *prog = link->link.prog; 2453 2454 /* 2455 * check that program doesn't access arguments beyond what's 2456 * available in this tracepoint 2457 */ 2458 if (prog->aux->max_ctx_offset > btp->num_args * sizeof(u64)) 2459 return -EINVAL; 2460 2461 if (prog->aux->max_tp_access > btp->writable_size) 2462 return -EINVAL; 2463 2464 return tracepoint_probe_register_may_exist(tp, (void *)btp->bpf_func, link); 2465 } 2466 2467 int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_raw_tp_link *link) 2468 { 2469 return tracepoint_probe_unregister(btp->tp, (void *)btp->bpf_func, link); 2470 } 2471 2472 int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id, 2473 u32 *fd_type, const char **buf, 2474 u64 *probe_offset, u64 *probe_addr, 2475 unsigned long *missed) 2476 { 2477 bool is_tracepoint, is_syscall_tp; 2478 struct bpf_prog *prog; 2479 int flags, err = 0; 2480 2481 prog = event->prog; 2482 if (!prog) 2483 return -ENOENT; 2484 2485 /* not supporting BPF_PROG_TYPE_PERF_EVENT yet */ 2486 if (prog->type == BPF_PROG_TYPE_PERF_EVENT) 2487 return -EOPNOTSUPP; 2488 2489 *prog_id = prog->aux->id; 2490 flags = event->tp_event->flags; 2491 is_tracepoint = flags & TRACE_EVENT_FL_TRACEPOINT; 2492 is_syscall_tp = is_syscall_trace_event(event->tp_event); 2493 2494 if (is_tracepoint || is_syscall_tp) { 2495 *buf = is_tracepoint ? event->tp_event->tp->name 2496 : event->tp_event->name; 2497 /* We allow NULL pointer for tracepoint */ 2498 if (fd_type) 2499 *fd_type = BPF_FD_TYPE_TRACEPOINT; 2500 if (probe_offset) 2501 *probe_offset = 0x0; 2502 if (probe_addr) 2503 *probe_addr = 0x0; 2504 } else { 2505 /* kprobe/uprobe */ 2506 err = -EOPNOTSUPP; 2507 #ifdef CONFIG_KPROBE_EVENTS 2508 if (flags & TRACE_EVENT_FL_KPROBE) 2509 err = bpf_get_kprobe_info(event, fd_type, buf, 2510 probe_offset, probe_addr, missed, 2511 event->attr.type == PERF_TYPE_TRACEPOINT); 2512 #endif 2513 #ifdef CONFIG_UPROBE_EVENTS 2514 if (flags & TRACE_EVENT_FL_UPROBE) 2515 err = bpf_get_uprobe_info(event, fd_type, buf, 2516 probe_offset, probe_addr, 2517 event->attr.type == PERF_TYPE_TRACEPOINT); 2518 #endif 2519 } 2520 2521 return err; 2522 } 2523 2524 static int __init send_signal_irq_work_init(void) 2525 { 2526 int cpu; 2527 struct send_signal_irq_work *work; 2528 2529 for_each_possible_cpu(cpu) { 2530 work = per_cpu_ptr(&send_signal_work, cpu); 2531 init_irq_work(&work->irq_work, do_bpf_send_signal); 2532 } 2533 return 0; 2534 } 2535 2536 subsys_initcall(send_signal_irq_work_init); 2537 2538 #ifdef CONFIG_MODULES 2539 static int bpf_event_notify(struct notifier_block *nb, unsigned long op, 2540 void *module) 2541 { 2542 struct bpf_trace_module *btm, *tmp; 2543 struct module *mod = module; 2544 int ret = 0; 2545 2546 if (mod->num_bpf_raw_events == 0 || 2547 (op != MODULE_STATE_COMING && op != MODULE_STATE_GOING)) 2548 goto out; 2549 2550 mutex_lock(&bpf_module_mutex); 2551 2552 switch (op) { 2553 case MODULE_STATE_COMING: 2554 btm = kzalloc(sizeof(*btm), GFP_KERNEL); 2555 if (btm) { 2556 btm->module = module; 2557 list_add(&btm->list, &bpf_trace_modules); 2558 } else { 2559 ret = -ENOMEM; 2560 } 2561 break; 2562 case MODULE_STATE_GOING: 2563 list_for_each_entry_safe(btm, tmp, &bpf_trace_modules, list) { 2564 if (btm->module == module) { 2565 list_del(&btm->list); 2566 kfree(btm); 2567 break; 2568 } 2569 } 2570 break; 2571 } 2572 2573 mutex_unlock(&bpf_module_mutex); 2574 2575 out: 2576 return notifier_from_errno(ret); 2577 } 2578 2579 static struct notifier_block bpf_module_nb = { 2580 .notifier_call = bpf_event_notify, 2581 }; 2582 2583 static int __init bpf_event_init(void) 2584 { 2585 register_module_notifier(&bpf_module_nb); 2586 return 0; 2587 } 2588 2589 fs_initcall(bpf_event_init); 2590 #endif /* CONFIG_MODULES */ 2591 2592 #ifdef CONFIG_FPROBE 2593 struct bpf_kprobe_multi_link { 2594 struct bpf_link link; 2595 struct fprobe fp; 2596 unsigned long *addrs; 2597 u64 *cookies; 2598 u32 cnt; 2599 u32 mods_cnt; 2600 struct module **mods; 2601 u32 flags; 2602 }; 2603 2604 struct bpf_kprobe_multi_run_ctx { 2605 struct bpf_run_ctx run_ctx; 2606 struct bpf_kprobe_multi_link *link; 2607 unsigned long entry_ip; 2608 }; 2609 2610 struct user_syms { 2611 const char **syms; 2612 char *buf; 2613 }; 2614 2615 static int copy_user_syms(struct user_syms *us, unsigned long __user *usyms, u32 cnt) 2616 { 2617 unsigned long __user usymbol; 2618 const char **syms = NULL; 2619 char *buf = NULL, *p; 2620 int err = -ENOMEM; 2621 unsigned int i; 2622 2623 syms = kvmalloc_array(cnt, sizeof(*syms), GFP_KERNEL); 2624 if (!syms) 2625 goto error; 2626 2627 buf = kvmalloc_array(cnt, KSYM_NAME_LEN, GFP_KERNEL); 2628 if (!buf) 2629 goto error; 2630 2631 for (p = buf, i = 0; i < cnt; i++) { 2632 if (__get_user(usymbol, usyms + i)) { 2633 err = -EFAULT; 2634 goto error; 2635 } 2636 err = strncpy_from_user(p, (const char __user *) usymbol, KSYM_NAME_LEN); 2637 if (err == KSYM_NAME_LEN) 2638 err = -E2BIG; 2639 if (err < 0) 2640 goto error; 2641 syms[i] = p; 2642 p += err + 1; 2643 } 2644 2645 us->syms = syms; 2646 us->buf = buf; 2647 return 0; 2648 2649 error: 2650 if (err) { 2651 kvfree(syms); 2652 kvfree(buf); 2653 } 2654 return err; 2655 } 2656 2657 static void kprobe_multi_put_modules(struct module **mods, u32 cnt) 2658 { 2659 u32 i; 2660 2661 for (i = 0; i < cnt; i++) 2662 module_put(mods[i]); 2663 } 2664 2665 static void free_user_syms(struct user_syms *us) 2666 { 2667 kvfree(us->syms); 2668 kvfree(us->buf); 2669 } 2670 2671 static void bpf_kprobe_multi_link_release(struct bpf_link *link) 2672 { 2673 struct bpf_kprobe_multi_link *kmulti_link; 2674 2675 kmulti_link = container_of(link, struct bpf_kprobe_multi_link, link); 2676 unregister_fprobe(&kmulti_link->fp); 2677 kprobe_multi_put_modules(kmulti_link->mods, kmulti_link->mods_cnt); 2678 } 2679 2680 static void bpf_kprobe_multi_link_dealloc(struct bpf_link *link) 2681 { 2682 struct bpf_kprobe_multi_link *kmulti_link; 2683 2684 kmulti_link = container_of(link, struct bpf_kprobe_multi_link, link); 2685 kvfree(kmulti_link->addrs); 2686 kvfree(kmulti_link->cookies); 2687 kfree(kmulti_link->mods); 2688 kfree(kmulti_link); 2689 } 2690 2691 static int bpf_kprobe_multi_link_fill_link_info(const struct bpf_link *link, 2692 struct bpf_link_info *info) 2693 { 2694 u64 __user *ucookies = u64_to_user_ptr(info->kprobe_multi.cookies); 2695 u64 __user *uaddrs = u64_to_user_ptr(info->kprobe_multi.addrs); 2696 struct bpf_kprobe_multi_link *kmulti_link; 2697 u32 ucount = info->kprobe_multi.count; 2698 int err = 0, i; 2699 2700 if (!uaddrs ^ !ucount) 2701 return -EINVAL; 2702 if (ucookies && !ucount) 2703 return -EINVAL; 2704 2705 kmulti_link = container_of(link, struct bpf_kprobe_multi_link, link); 2706 info->kprobe_multi.count = kmulti_link->cnt; 2707 info->kprobe_multi.flags = kmulti_link->flags; 2708 info->kprobe_multi.missed = kmulti_link->fp.nmissed; 2709 2710 if (!uaddrs) 2711 return 0; 2712 if (ucount < kmulti_link->cnt) 2713 err = -ENOSPC; 2714 else 2715 ucount = kmulti_link->cnt; 2716 2717 if (ucookies) { 2718 if (kmulti_link->cookies) { 2719 if (copy_to_user(ucookies, kmulti_link->cookies, ucount * sizeof(u64))) 2720 return -EFAULT; 2721 } else { 2722 for (i = 0; i < ucount; i++) { 2723 if (put_user(0, ucookies + i)) 2724 return -EFAULT; 2725 } 2726 } 2727 } 2728 2729 if (kallsyms_show_value(current_cred())) { 2730 if (copy_to_user(uaddrs, kmulti_link->addrs, ucount * sizeof(u64))) 2731 return -EFAULT; 2732 } else { 2733 for (i = 0; i < ucount; i++) { 2734 if (put_user(0, uaddrs + i)) 2735 return -EFAULT; 2736 } 2737 } 2738 return err; 2739 } 2740 2741 static const struct bpf_link_ops bpf_kprobe_multi_link_lops = { 2742 .release = bpf_kprobe_multi_link_release, 2743 .dealloc_deferred = bpf_kprobe_multi_link_dealloc, 2744 .fill_link_info = bpf_kprobe_multi_link_fill_link_info, 2745 }; 2746 2747 static void bpf_kprobe_multi_cookie_swap(void *a, void *b, int size, const void *priv) 2748 { 2749 const struct bpf_kprobe_multi_link *link = priv; 2750 unsigned long *addr_a = a, *addr_b = b; 2751 u64 *cookie_a, *cookie_b; 2752 2753 cookie_a = link->cookies + (addr_a - link->addrs); 2754 cookie_b = link->cookies + (addr_b - link->addrs); 2755 2756 /* swap addr_a/addr_b and cookie_a/cookie_b values */ 2757 swap(*addr_a, *addr_b); 2758 swap(*cookie_a, *cookie_b); 2759 } 2760 2761 static int bpf_kprobe_multi_addrs_cmp(const void *a, const void *b) 2762 { 2763 const unsigned long *addr_a = a, *addr_b = b; 2764 2765 if (*addr_a == *addr_b) 2766 return 0; 2767 return *addr_a < *addr_b ? -1 : 1; 2768 } 2769 2770 static int bpf_kprobe_multi_cookie_cmp(const void *a, const void *b, const void *priv) 2771 { 2772 return bpf_kprobe_multi_addrs_cmp(a, b); 2773 } 2774 2775 static u64 bpf_kprobe_multi_cookie(struct bpf_run_ctx *ctx) 2776 { 2777 struct bpf_kprobe_multi_run_ctx *run_ctx; 2778 struct bpf_kprobe_multi_link *link; 2779 u64 *cookie, entry_ip; 2780 unsigned long *addr; 2781 2782 if (WARN_ON_ONCE(!ctx)) 2783 return 0; 2784 run_ctx = container_of(current->bpf_ctx, struct bpf_kprobe_multi_run_ctx, run_ctx); 2785 link = run_ctx->link; 2786 if (!link->cookies) 2787 return 0; 2788 entry_ip = run_ctx->entry_ip; 2789 addr = bsearch(&entry_ip, link->addrs, link->cnt, sizeof(entry_ip), 2790 bpf_kprobe_multi_addrs_cmp); 2791 if (!addr) 2792 return 0; 2793 cookie = link->cookies + (addr - link->addrs); 2794 return *cookie; 2795 } 2796 2797 static u64 bpf_kprobe_multi_entry_ip(struct bpf_run_ctx *ctx) 2798 { 2799 struct bpf_kprobe_multi_run_ctx *run_ctx; 2800 2801 run_ctx = container_of(current->bpf_ctx, struct bpf_kprobe_multi_run_ctx, run_ctx); 2802 return run_ctx->entry_ip; 2803 } 2804 2805 static int 2806 kprobe_multi_link_prog_run(struct bpf_kprobe_multi_link *link, 2807 unsigned long entry_ip, struct pt_regs *regs) 2808 { 2809 struct bpf_kprobe_multi_run_ctx run_ctx = { 2810 .link = link, 2811 .entry_ip = entry_ip, 2812 }; 2813 struct bpf_run_ctx *old_run_ctx; 2814 int err; 2815 2816 if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) { 2817 bpf_prog_inc_misses_counter(link->link.prog); 2818 err = 0; 2819 goto out; 2820 } 2821 2822 migrate_disable(); 2823 rcu_read_lock(); 2824 old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx); 2825 err = bpf_prog_run(link->link.prog, regs); 2826 bpf_reset_run_ctx(old_run_ctx); 2827 rcu_read_unlock(); 2828 migrate_enable(); 2829 2830 out: 2831 __this_cpu_dec(bpf_prog_active); 2832 return err; 2833 } 2834 2835 static int 2836 kprobe_multi_link_handler(struct fprobe *fp, unsigned long fentry_ip, 2837 unsigned long ret_ip, struct pt_regs *regs, 2838 void *data) 2839 { 2840 struct bpf_kprobe_multi_link *link; 2841 2842 link = container_of(fp, struct bpf_kprobe_multi_link, fp); 2843 kprobe_multi_link_prog_run(link, get_entry_ip(fentry_ip), regs); 2844 return 0; 2845 } 2846 2847 static void 2848 kprobe_multi_link_exit_handler(struct fprobe *fp, unsigned long fentry_ip, 2849 unsigned long ret_ip, struct pt_regs *regs, 2850 void *data) 2851 { 2852 struct bpf_kprobe_multi_link *link; 2853 2854 link = container_of(fp, struct bpf_kprobe_multi_link, fp); 2855 kprobe_multi_link_prog_run(link, get_entry_ip(fentry_ip), regs); 2856 } 2857 2858 static int symbols_cmp_r(const void *a, const void *b, const void *priv) 2859 { 2860 const char **str_a = (const char **) a; 2861 const char **str_b = (const char **) b; 2862 2863 return strcmp(*str_a, *str_b); 2864 } 2865 2866 struct multi_symbols_sort { 2867 const char **funcs; 2868 u64 *cookies; 2869 }; 2870 2871 static void symbols_swap_r(void *a, void *b, int size, const void *priv) 2872 { 2873 const struct multi_symbols_sort *data = priv; 2874 const char **name_a = a, **name_b = b; 2875 2876 swap(*name_a, *name_b); 2877 2878 /* If defined, swap also related cookies. */ 2879 if (data->cookies) { 2880 u64 *cookie_a, *cookie_b; 2881 2882 cookie_a = data->cookies + (name_a - data->funcs); 2883 cookie_b = data->cookies + (name_b - data->funcs); 2884 swap(*cookie_a, *cookie_b); 2885 } 2886 } 2887 2888 struct modules_array { 2889 struct module **mods; 2890 int mods_cnt; 2891 int mods_cap; 2892 }; 2893 2894 static int add_module(struct modules_array *arr, struct module *mod) 2895 { 2896 struct module **mods; 2897 2898 if (arr->mods_cnt == arr->mods_cap) { 2899 arr->mods_cap = max(16, arr->mods_cap * 3 / 2); 2900 mods = krealloc_array(arr->mods, arr->mods_cap, sizeof(*mods), GFP_KERNEL); 2901 if (!mods) 2902 return -ENOMEM; 2903 arr->mods = mods; 2904 } 2905 2906 arr->mods[arr->mods_cnt] = mod; 2907 arr->mods_cnt++; 2908 return 0; 2909 } 2910 2911 static bool has_module(struct modules_array *arr, struct module *mod) 2912 { 2913 int i; 2914 2915 for (i = arr->mods_cnt - 1; i >= 0; i--) { 2916 if (arr->mods[i] == mod) 2917 return true; 2918 } 2919 return false; 2920 } 2921 2922 static int get_modules_for_addrs(struct module ***mods, unsigned long *addrs, u32 addrs_cnt) 2923 { 2924 struct modules_array arr = {}; 2925 u32 i, err = 0; 2926 2927 for (i = 0; i < addrs_cnt; i++) { 2928 struct module *mod; 2929 2930 preempt_disable(); 2931 mod = __module_address(addrs[i]); 2932 /* Either no module or we it's already stored */ 2933 if (!mod || has_module(&arr, mod)) { 2934 preempt_enable(); 2935 continue; 2936 } 2937 if (!try_module_get(mod)) 2938 err = -EINVAL; 2939 preempt_enable(); 2940 if (err) 2941 break; 2942 err = add_module(&arr, mod); 2943 if (err) { 2944 module_put(mod); 2945 break; 2946 } 2947 } 2948 2949 /* We return either err < 0 in case of error, ... */ 2950 if (err) { 2951 kprobe_multi_put_modules(arr.mods, arr.mods_cnt); 2952 kfree(arr.mods); 2953 return err; 2954 } 2955 2956 /* or number of modules found if everything is ok. */ 2957 *mods = arr.mods; 2958 return arr.mods_cnt; 2959 } 2960 2961 static int addrs_check_error_injection_list(unsigned long *addrs, u32 cnt) 2962 { 2963 u32 i; 2964 2965 for (i = 0; i < cnt; i++) { 2966 if (!within_error_injection_list(addrs[i])) 2967 return -EINVAL; 2968 } 2969 return 0; 2970 } 2971 2972 int bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog) 2973 { 2974 struct bpf_kprobe_multi_link *link = NULL; 2975 struct bpf_link_primer link_primer; 2976 void __user *ucookies; 2977 unsigned long *addrs; 2978 u32 flags, cnt, size; 2979 void __user *uaddrs; 2980 u64 *cookies = NULL; 2981 void __user *usyms; 2982 int err; 2983 2984 /* no support for 32bit archs yet */ 2985 if (sizeof(u64) != sizeof(void *)) 2986 return -EOPNOTSUPP; 2987 2988 if (prog->expected_attach_type != BPF_TRACE_KPROBE_MULTI) 2989 return -EINVAL; 2990 2991 flags = attr->link_create.kprobe_multi.flags; 2992 if (flags & ~BPF_F_KPROBE_MULTI_RETURN) 2993 return -EINVAL; 2994 2995 uaddrs = u64_to_user_ptr(attr->link_create.kprobe_multi.addrs); 2996 usyms = u64_to_user_ptr(attr->link_create.kprobe_multi.syms); 2997 if (!!uaddrs == !!usyms) 2998 return -EINVAL; 2999 3000 cnt = attr->link_create.kprobe_multi.cnt; 3001 if (!cnt) 3002 return -EINVAL; 3003 if (cnt > MAX_KPROBE_MULTI_CNT) 3004 return -E2BIG; 3005 3006 size = cnt * sizeof(*addrs); 3007 addrs = kvmalloc_array(cnt, sizeof(*addrs), GFP_KERNEL); 3008 if (!addrs) 3009 return -ENOMEM; 3010 3011 ucookies = u64_to_user_ptr(attr->link_create.kprobe_multi.cookies); 3012 if (ucookies) { 3013 cookies = kvmalloc_array(cnt, sizeof(*addrs), GFP_KERNEL); 3014 if (!cookies) { 3015 err = -ENOMEM; 3016 goto error; 3017 } 3018 if (copy_from_user(cookies, ucookies, size)) { 3019 err = -EFAULT; 3020 goto error; 3021 } 3022 } 3023 3024 if (uaddrs) { 3025 if (copy_from_user(addrs, uaddrs, size)) { 3026 err = -EFAULT; 3027 goto error; 3028 } 3029 } else { 3030 struct multi_symbols_sort data = { 3031 .cookies = cookies, 3032 }; 3033 struct user_syms us; 3034 3035 err = copy_user_syms(&us, usyms, cnt); 3036 if (err) 3037 goto error; 3038 3039 if (cookies) 3040 data.funcs = us.syms; 3041 3042 sort_r(us.syms, cnt, sizeof(*us.syms), symbols_cmp_r, 3043 symbols_swap_r, &data); 3044 3045 err = ftrace_lookup_symbols(us.syms, cnt, addrs); 3046 free_user_syms(&us); 3047 if (err) 3048 goto error; 3049 } 3050 3051 if (prog->kprobe_override && addrs_check_error_injection_list(addrs, cnt)) { 3052 err = -EINVAL; 3053 goto error; 3054 } 3055 3056 link = kzalloc(sizeof(*link), GFP_KERNEL); 3057 if (!link) { 3058 err = -ENOMEM; 3059 goto error; 3060 } 3061 3062 bpf_link_init(&link->link, BPF_LINK_TYPE_KPROBE_MULTI, 3063 &bpf_kprobe_multi_link_lops, prog); 3064 3065 err = bpf_link_prime(&link->link, &link_primer); 3066 if (err) 3067 goto error; 3068 3069 if (flags & BPF_F_KPROBE_MULTI_RETURN) 3070 link->fp.exit_handler = kprobe_multi_link_exit_handler; 3071 else 3072 link->fp.entry_handler = kprobe_multi_link_handler; 3073 3074 link->addrs = addrs; 3075 link->cookies = cookies; 3076 link->cnt = cnt; 3077 link->flags = flags; 3078 3079 if (cookies) { 3080 /* 3081 * Sorting addresses will trigger sorting cookies as well 3082 * (check bpf_kprobe_multi_cookie_swap). This way we can 3083 * find cookie based on the address in bpf_get_attach_cookie 3084 * helper. 3085 */ 3086 sort_r(addrs, cnt, sizeof(*addrs), 3087 bpf_kprobe_multi_cookie_cmp, 3088 bpf_kprobe_multi_cookie_swap, 3089 link); 3090 } 3091 3092 err = get_modules_for_addrs(&link->mods, addrs, cnt); 3093 if (err < 0) { 3094 bpf_link_cleanup(&link_primer); 3095 return err; 3096 } 3097 link->mods_cnt = err; 3098 3099 err = register_fprobe_ips(&link->fp, addrs, cnt); 3100 if (err) { 3101 kprobe_multi_put_modules(link->mods, link->mods_cnt); 3102 bpf_link_cleanup(&link_primer); 3103 return err; 3104 } 3105 3106 return bpf_link_settle(&link_primer); 3107 3108 error: 3109 kfree(link); 3110 kvfree(addrs); 3111 kvfree(cookies); 3112 return err; 3113 } 3114 #else /* !CONFIG_FPROBE */ 3115 int bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog) 3116 { 3117 return -EOPNOTSUPP; 3118 } 3119 static u64 bpf_kprobe_multi_cookie(struct bpf_run_ctx *ctx) 3120 { 3121 return 0; 3122 } 3123 static u64 bpf_kprobe_multi_entry_ip(struct bpf_run_ctx *ctx) 3124 { 3125 return 0; 3126 } 3127 #endif 3128 3129 #ifdef CONFIG_UPROBES 3130 struct bpf_uprobe_multi_link; 3131 3132 struct bpf_uprobe { 3133 struct bpf_uprobe_multi_link *link; 3134 loff_t offset; 3135 unsigned long ref_ctr_offset; 3136 u64 cookie; 3137 struct uprobe_consumer consumer; 3138 }; 3139 3140 struct bpf_uprobe_multi_link { 3141 struct path path; 3142 struct bpf_link link; 3143 u32 cnt; 3144 u32 flags; 3145 struct bpf_uprobe *uprobes; 3146 struct task_struct *task; 3147 }; 3148 3149 struct bpf_uprobe_multi_run_ctx { 3150 struct bpf_run_ctx run_ctx; 3151 unsigned long entry_ip; 3152 struct bpf_uprobe *uprobe; 3153 }; 3154 3155 static void bpf_uprobe_unregister(struct path *path, struct bpf_uprobe *uprobes, 3156 u32 cnt) 3157 { 3158 u32 i; 3159 3160 for (i = 0; i < cnt; i++) { 3161 uprobe_unregister(d_real_inode(path->dentry), uprobes[i].offset, 3162 &uprobes[i].consumer); 3163 } 3164 } 3165 3166 static void bpf_uprobe_multi_link_release(struct bpf_link *link) 3167 { 3168 struct bpf_uprobe_multi_link *umulti_link; 3169 3170 umulti_link = container_of(link, struct bpf_uprobe_multi_link, link); 3171 bpf_uprobe_unregister(&umulti_link->path, umulti_link->uprobes, umulti_link->cnt); 3172 if (umulti_link->task) 3173 put_task_struct(umulti_link->task); 3174 path_put(&umulti_link->path); 3175 } 3176 3177 static void bpf_uprobe_multi_link_dealloc(struct bpf_link *link) 3178 { 3179 struct bpf_uprobe_multi_link *umulti_link; 3180 3181 umulti_link = container_of(link, struct bpf_uprobe_multi_link, link); 3182 kvfree(umulti_link->uprobes); 3183 kfree(umulti_link); 3184 } 3185 3186 static int bpf_uprobe_multi_link_fill_link_info(const struct bpf_link *link, 3187 struct bpf_link_info *info) 3188 { 3189 u64 __user *uref_ctr_offsets = u64_to_user_ptr(info->uprobe_multi.ref_ctr_offsets); 3190 u64 __user *ucookies = u64_to_user_ptr(info->uprobe_multi.cookies); 3191 u64 __user *uoffsets = u64_to_user_ptr(info->uprobe_multi.offsets); 3192 u64 __user *upath = u64_to_user_ptr(info->uprobe_multi.path); 3193 u32 upath_size = info->uprobe_multi.path_size; 3194 struct bpf_uprobe_multi_link *umulti_link; 3195 u32 ucount = info->uprobe_multi.count; 3196 int err = 0, i; 3197 long left; 3198 3199 if (!upath ^ !upath_size) 3200 return -EINVAL; 3201 3202 if ((uoffsets || uref_ctr_offsets || ucookies) && !ucount) 3203 return -EINVAL; 3204 3205 umulti_link = container_of(link, struct bpf_uprobe_multi_link, link); 3206 info->uprobe_multi.count = umulti_link->cnt; 3207 info->uprobe_multi.flags = umulti_link->flags; 3208 info->uprobe_multi.pid = umulti_link->task ? 3209 task_pid_nr_ns(umulti_link->task, task_active_pid_ns(current)) : 0; 3210 3211 if (upath) { 3212 char *p, *buf; 3213 3214 upath_size = min_t(u32, upath_size, PATH_MAX); 3215 3216 buf = kmalloc(upath_size, GFP_KERNEL); 3217 if (!buf) 3218 return -ENOMEM; 3219 p = d_path(&umulti_link->path, buf, upath_size); 3220 if (IS_ERR(p)) { 3221 kfree(buf); 3222 return PTR_ERR(p); 3223 } 3224 upath_size = buf + upath_size - p; 3225 left = copy_to_user(upath, p, upath_size); 3226 kfree(buf); 3227 if (left) 3228 return -EFAULT; 3229 info->uprobe_multi.path_size = upath_size; 3230 } 3231 3232 if (!uoffsets && !ucookies && !uref_ctr_offsets) 3233 return 0; 3234 3235 if (ucount < umulti_link->cnt) 3236 err = -ENOSPC; 3237 else 3238 ucount = umulti_link->cnt; 3239 3240 for (i = 0; i < ucount; i++) { 3241 if (uoffsets && 3242 put_user(umulti_link->uprobes[i].offset, uoffsets + i)) 3243 return -EFAULT; 3244 if (uref_ctr_offsets && 3245 put_user(umulti_link->uprobes[i].ref_ctr_offset, uref_ctr_offsets + i)) 3246 return -EFAULT; 3247 if (ucookies && 3248 put_user(umulti_link->uprobes[i].cookie, ucookies + i)) 3249 return -EFAULT; 3250 } 3251 3252 return err; 3253 } 3254 3255 static const struct bpf_link_ops bpf_uprobe_multi_link_lops = { 3256 .release = bpf_uprobe_multi_link_release, 3257 .dealloc_deferred = bpf_uprobe_multi_link_dealloc, 3258 .fill_link_info = bpf_uprobe_multi_link_fill_link_info, 3259 }; 3260 3261 static int uprobe_prog_run(struct bpf_uprobe *uprobe, 3262 unsigned long entry_ip, 3263 struct pt_regs *regs) 3264 { 3265 struct bpf_uprobe_multi_link *link = uprobe->link; 3266 struct bpf_uprobe_multi_run_ctx run_ctx = { 3267 .entry_ip = entry_ip, 3268 .uprobe = uprobe, 3269 }; 3270 struct bpf_prog *prog = link->link.prog; 3271 bool sleepable = prog->sleepable; 3272 struct bpf_run_ctx *old_run_ctx; 3273 int err = 0; 3274 3275 if (link->task && current != link->task) 3276 return 0; 3277 3278 if (sleepable) 3279 rcu_read_lock_trace(); 3280 else 3281 rcu_read_lock(); 3282 3283 migrate_disable(); 3284 3285 old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx); 3286 err = bpf_prog_run(link->link.prog, regs); 3287 bpf_reset_run_ctx(old_run_ctx); 3288 3289 migrate_enable(); 3290 3291 if (sleepable) 3292 rcu_read_unlock_trace(); 3293 else 3294 rcu_read_unlock(); 3295 return err; 3296 } 3297 3298 static bool 3299 uprobe_multi_link_filter(struct uprobe_consumer *con, enum uprobe_filter_ctx ctx, 3300 struct mm_struct *mm) 3301 { 3302 struct bpf_uprobe *uprobe; 3303 3304 uprobe = container_of(con, struct bpf_uprobe, consumer); 3305 return uprobe->link->task->mm == mm; 3306 } 3307 3308 static int 3309 uprobe_multi_link_handler(struct uprobe_consumer *con, struct pt_regs *regs) 3310 { 3311 struct bpf_uprobe *uprobe; 3312 3313 uprobe = container_of(con, struct bpf_uprobe, consumer); 3314 return uprobe_prog_run(uprobe, instruction_pointer(regs), regs); 3315 } 3316 3317 static int 3318 uprobe_multi_link_ret_handler(struct uprobe_consumer *con, unsigned long func, struct pt_regs *regs) 3319 { 3320 struct bpf_uprobe *uprobe; 3321 3322 uprobe = container_of(con, struct bpf_uprobe, consumer); 3323 return uprobe_prog_run(uprobe, func, regs); 3324 } 3325 3326 static u64 bpf_uprobe_multi_entry_ip(struct bpf_run_ctx *ctx) 3327 { 3328 struct bpf_uprobe_multi_run_ctx *run_ctx; 3329 3330 run_ctx = container_of(current->bpf_ctx, struct bpf_uprobe_multi_run_ctx, run_ctx); 3331 return run_ctx->entry_ip; 3332 } 3333 3334 static u64 bpf_uprobe_multi_cookie(struct bpf_run_ctx *ctx) 3335 { 3336 struct bpf_uprobe_multi_run_ctx *run_ctx; 3337 3338 run_ctx = container_of(current->bpf_ctx, struct bpf_uprobe_multi_run_ctx, run_ctx); 3339 return run_ctx->uprobe->cookie; 3340 } 3341 3342 int bpf_uprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog) 3343 { 3344 struct bpf_uprobe_multi_link *link = NULL; 3345 unsigned long __user *uref_ctr_offsets; 3346 struct bpf_link_primer link_primer; 3347 struct bpf_uprobe *uprobes = NULL; 3348 struct task_struct *task = NULL; 3349 unsigned long __user *uoffsets; 3350 u64 __user *ucookies; 3351 void __user *upath; 3352 u32 flags, cnt, i; 3353 struct path path; 3354 char *name; 3355 pid_t pid; 3356 int err; 3357 3358 /* no support for 32bit archs yet */ 3359 if (sizeof(u64) != sizeof(void *)) 3360 return -EOPNOTSUPP; 3361 3362 if (prog->expected_attach_type != BPF_TRACE_UPROBE_MULTI) 3363 return -EINVAL; 3364 3365 flags = attr->link_create.uprobe_multi.flags; 3366 if (flags & ~BPF_F_UPROBE_MULTI_RETURN) 3367 return -EINVAL; 3368 3369 /* 3370 * path, offsets and cnt are mandatory, 3371 * ref_ctr_offsets and cookies are optional 3372 */ 3373 upath = u64_to_user_ptr(attr->link_create.uprobe_multi.path); 3374 uoffsets = u64_to_user_ptr(attr->link_create.uprobe_multi.offsets); 3375 cnt = attr->link_create.uprobe_multi.cnt; 3376 3377 if (!upath || !uoffsets || !cnt) 3378 return -EINVAL; 3379 if (cnt > MAX_UPROBE_MULTI_CNT) 3380 return -E2BIG; 3381 3382 uref_ctr_offsets = u64_to_user_ptr(attr->link_create.uprobe_multi.ref_ctr_offsets); 3383 ucookies = u64_to_user_ptr(attr->link_create.uprobe_multi.cookies); 3384 3385 name = strndup_user(upath, PATH_MAX); 3386 if (IS_ERR(name)) { 3387 err = PTR_ERR(name); 3388 return err; 3389 } 3390 3391 err = kern_path(name, LOOKUP_FOLLOW, &path); 3392 kfree(name); 3393 if (err) 3394 return err; 3395 3396 if (!d_is_reg(path.dentry)) { 3397 err = -EBADF; 3398 goto error_path_put; 3399 } 3400 3401 pid = attr->link_create.uprobe_multi.pid; 3402 if (pid) { 3403 rcu_read_lock(); 3404 task = get_pid_task(find_vpid(pid), PIDTYPE_PID); 3405 rcu_read_unlock(); 3406 if (!task) { 3407 err = -ESRCH; 3408 goto error_path_put; 3409 } 3410 } 3411 3412 err = -ENOMEM; 3413 3414 link = kzalloc(sizeof(*link), GFP_KERNEL); 3415 uprobes = kvcalloc(cnt, sizeof(*uprobes), GFP_KERNEL); 3416 3417 if (!uprobes || !link) 3418 goto error_free; 3419 3420 for (i = 0; i < cnt; i++) { 3421 if (__get_user(uprobes[i].offset, uoffsets + i)) { 3422 err = -EFAULT; 3423 goto error_free; 3424 } 3425 if (uprobes[i].offset < 0) { 3426 err = -EINVAL; 3427 goto error_free; 3428 } 3429 if (uref_ctr_offsets && __get_user(uprobes[i].ref_ctr_offset, uref_ctr_offsets + i)) { 3430 err = -EFAULT; 3431 goto error_free; 3432 } 3433 if (ucookies && __get_user(uprobes[i].cookie, ucookies + i)) { 3434 err = -EFAULT; 3435 goto error_free; 3436 } 3437 3438 uprobes[i].link = link; 3439 3440 if (flags & BPF_F_UPROBE_MULTI_RETURN) 3441 uprobes[i].consumer.ret_handler = uprobe_multi_link_ret_handler; 3442 else 3443 uprobes[i].consumer.handler = uprobe_multi_link_handler; 3444 3445 if (pid) 3446 uprobes[i].consumer.filter = uprobe_multi_link_filter; 3447 } 3448 3449 link->cnt = cnt; 3450 link->uprobes = uprobes; 3451 link->path = path; 3452 link->task = task; 3453 link->flags = flags; 3454 3455 bpf_link_init(&link->link, BPF_LINK_TYPE_UPROBE_MULTI, 3456 &bpf_uprobe_multi_link_lops, prog); 3457 3458 for (i = 0; i < cnt; i++) { 3459 err = uprobe_register_refctr(d_real_inode(link->path.dentry), 3460 uprobes[i].offset, 3461 uprobes[i].ref_ctr_offset, 3462 &uprobes[i].consumer); 3463 if (err) { 3464 bpf_uprobe_unregister(&path, uprobes, i); 3465 goto error_free; 3466 } 3467 } 3468 3469 err = bpf_link_prime(&link->link, &link_primer); 3470 if (err) 3471 goto error_free; 3472 3473 return bpf_link_settle(&link_primer); 3474 3475 error_free: 3476 kvfree(uprobes); 3477 kfree(link); 3478 if (task) 3479 put_task_struct(task); 3480 error_path_put: 3481 path_put(&path); 3482 return err; 3483 } 3484 #else /* !CONFIG_UPROBES */ 3485 int bpf_uprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog) 3486 { 3487 return -EOPNOTSUPP; 3488 } 3489 static u64 bpf_uprobe_multi_cookie(struct bpf_run_ctx *ctx) 3490 { 3491 return 0; 3492 } 3493 static u64 bpf_uprobe_multi_entry_ip(struct bpf_run_ctx *ctx) 3494 { 3495 return 0; 3496 } 3497 #endif /* CONFIG_UPROBES */ 3498