1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Kprobes-based tracing events 4 * 5 * Created by Masami Hiramatsu <mhiramat@redhat.com> 6 * 7 */ 8 #define pr_fmt(fmt) "trace_kprobe: " fmt 9 10 #include <linux/bpf-cgroup.h> 11 #include <linux/security.h> 12 #include <linux/module.h> 13 #include <linux/uaccess.h> 14 #include <linux/rculist.h> 15 #include <linux/error-injection.h> 16 17 #include <asm/setup.h> /* for COMMAND_LINE_SIZE */ 18 19 #include "trace_dynevent.h" 20 #include "trace_kprobe_selftest.h" 21 #include "trace_probe.h" 22 #include "trace_probe_tmpl.h" 23 24 #define KPROBE_EVENT_SYSTEM "kprobes" 25 #define KRETPROBE_MAXACTIVE_MAX 4096 26 27 /* Kprobe early definition from command line */ 28 static char kprobe_boot_events_buf[COMMAND_LINE_SIZE] __initdata; 29 30 static int __init set_kprobe_boot_events(char *str) 31 { 32 strlcpy(kprobe_boot_events_buf, str, COMMAND_LINE_SIZE); 33 disable_tracing_selftest("running kprobe events"); 34 35 return 1; 36 } 37 __setup("kprobe_event=", set_kprobe_boot_events); 38 39 static int trace_kprobe_create(const char *raw_command); 40 static int trace_kprobe_show(struct seq_file *m, struct dyn_event *ev); 41 static int trace_kprobe_release(struct dyn_event *ev); 42 static bool trace_kprobe_is_busy(struct dyn_event *ev); 43 static bool trace_kprobe_match(const char *system, const char *event, 44 int argc, const char **argv, struct dyn_event *ev); 45 46 static struct dyn_event_operations trace_kprobe_ops = { 47 .create = trace_kprobe_create, 48 .show = trace_kprobe_show, 49 .is_busy = trace_kprobe_is_busy, 50 .free = trace_kprobe_release, 51 .match = trace_kprobe_match, 52 }; 53 54 /* 55 * Kprobe event core functions 56 */ 57 struct trace_kprobe { 58 struct dyn_event devent; 59 struct kretprobe rp; /* Use rp.kp for kprobe use */ 60 unsigned long __percpu *nhit; 61 const char *symbol; /* symbol name */ 62 struct trace_probe tp; 63 }; 64 65 static bool is_trace_kprobe(struct dyn_event *ev) 66 { 67 return ev->ops == &trace_kprobe_ops; 68 } 69 70 static struct trace_kprobe *to_trace_kprobe(struct dyn_event *ev) 71 { 72 return container_of(ev, struct trace_kprobe, devent); 73 } 74 75 /** 76 * for_each_trace_kprobe - iterate over the trace_kprobe list 77 * @pos: the struct trace_kprobe * for each entry 78 * @dpos: the struct dyn_event * to use as a loop cursor 79 */ 80 #define for_each_trace_kprobe(pos, dpos) \ 81 for_each_dyn_event(dpos) \ 82 if (is_trace_kprobe(dpos) && (pos = to_trace_kprobe(dpos))) 83 84 static nokprobe_inline bool trace_kprobe_is_return(struct trace_kprobe *tk) 85 { 86 return tk->rp.handler != NULL; 87 } 88 89 static nokprobe_inline const char *trace_kprobe_symbol(struct trace_kprobe *tk) 90 { 91 return tk->symbol ? tk->symbol : "unknown"; 92 } 93 94 static nokprobe_inline unsigned long trace_kprobe_offset(struct trace_kprobe *tk) 95 { 96 return tk->rp.kp.offset; 97 } 98 99 static nokprobe_inline bool trace_kprobe_has_gone(struct trace_kprobe *tk) 100 { 101 return kprobe_gone(&tk->rp.kp); 102 } 103 104 static nokprobe_inline bool trace_kprobe_within_module(struct trace_kprobe *tk, 105 struct module *mod) 106 { 107 int len = strlen(module_name(mod)); 108 const char *name = trace_kprobe_symbol(tk); 109 110 return strncmp(module_name(mod), name, len) == 0 && name[len] == ':'; 111 } 112 113 static nokprobe_inline bool trace_kprobe_module_exist(struct trace_kprobe *tk) 114 { 115 char *p; 116 bool ret; 117 118 if (!tk->symbol) 119 return false; 120 p = strchr(tk->symbol, ':'); 121 if (!p) 122 return true; 123 *p = '\0'; 124 rcu_read_lock_sched(); 125 ret = !!find_module(tk->symbol); 126 rcu_read_unlock_sched(); 127 *p = ':'; 128 129 return ret; 130 } 131 132 static bool trace_kprobe_is_busy(struct dyn_event *ev) 133 { 134 struct trace_kprobe *tk = to_trace_kprobe(ev); 135 136 return trace_probe_is_enabled(&tk->tp); 137 } 138 139 static bool trace_kprobe_match_command_head(struct trace_kprobe *tk, 140 int argc, const char **argv) 141 { 142 char buf[MAX_ARGSTR_LEN + 1]; 143 144 if (!argc) 145 return true; 146 147 if (!tk->symbol) 148 snprintf(buf, sizeof(buf), "0x%p", tk->rp.kp.addr); 149 else if (tk->rp.kp.offset) 150 snprintf(buf, sizeof(buf), "%s+%u", 151 trace_kprobe_symbol(tk), tk->rp.kp.offset); 152 else 153 snprintf(buf, sizeof(buf), "%s", trace_kprobe_symbol(tk)); 154 if (strcmp(buf, argv[0])) 155 return false; 156 argc--; argv++; 157 158 return trace_probe_match_command_args(&tk->tp, argc, argv); 159 } 160 161 static bool trace_kprobe_match(const char *system, const char *event, 162 int argc, const char **argv, struct dyn_event *ev) 163 { 164 struct trace_kprobe *tk = to_trace_kprobe(ev); 165 166 return (event[0] == '\0' || 167 strcmp(trace_probe_name(&tk->tp), event) == 0) && 168 (!system || strcmp(trace_probe_group_name(&tk->tp), system) == 0) && 169 trace_kprobe_match_command_head(tk, argc, argv); 170 } 171 172 static nokprobe_inline unsigned long trace_kprobe_nhit(struct trace_kprobe *tk) 173 { 174 unsigned long nhit = 0; 175 int cpu; 176 177 for_each_possible_cpu(cpu) 178 nhit += *per_cpu_ptr(tk->nhit, cpu); 179 180 return nhit; 181 } 182 183 static nokprobe_inline bool trace_kprobe_is_registered(struct trace_kprobe *tk) 184 { 185 return !(list_empty(&tk->rp.kp.list) && 186 hlist_unhashed(&tk->rp.kp.hlist)); 187 } 188 189 /* Return 0 if it fails to find the symbol address */ 190 static nokprobe_inline 191 unsigned long trace_kprobe_address(struct trace_kprobe *tk) 192 { 193 unsigned long addr; 194 195 if (tk->symbol) { 196 addr = (unsigned long) 197 kallsyms_lookup_name(trace_kprobe_symbol(tk)); 198 if (addr) 199 addr += tk->rp.kp.offset; 200 } else { 201 addr = (unsigned long)tk->rp.kp.addr; 202 } 203 return addr; 204 } 205 206 static nokprobe_inline struct trace_kprobe * 207 trace_kprobe_primary_from_call(struct trace_event_call *call) 208 { 209 struct trace_probe *tp; 210 211 tp = trace_probe_primary_from_call(call); 212 if (WARN_ON_ONCE(!tp)) 213 return NULL; 214 215 return container_of(tp, struct trace_kprobe, tp); 216 } 217 218 bool trace_kprobe_on_func_entry(struct trace_event_call *call) 219 { 220 struct trace_kprobe *tk = trace_kprobe_primary_from_call(call); 221 222 return tk ? (kprobe_on_func_entry(tk->rp.kp.addr, 223 tk->rp.kp.addr ? NULL : tk->rp.kp.symbol_name, 224 tk->rp.kp.addr ? 0 : tk->rp.kp.offset) == 0) : false; 225 } 226 227 bool trace_kprobe_error_injectable(struct trace_event_call *call) 228 { 229 struct trace_kprobe *tk = trace_kprobe_primary_from_call(call); 230 231 return tk ? within_error_injection_list(trace_kprobe_address(tk)) : 232 false; 233 } 234 235 static int register_kprobe_event(struct trace_kprobe *tk); 236 static int unregister_kprobe_event(struct trace_kprobe *tk); 237 238 static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs); 239 static int kretprobe_dispatcher(struct kretprobe_instance *ri, 240 struct pt_regs *regs); 241 242 static void free_trace_kprobe(struct trace_kprobe *tk) 243 { 244 if (tk) { 245 trace_probe_cleanup(&tk->tp); 246 kfree(tk->symbol); 247 free_percpu(tk->nhit); 248 kfree(tk); 249 } 250 } 251 252 /* 253 * Allocate new trace_probe and initialize it (including kprobes). 254 */ 255 static struct trace_kprobe *alloc_trace_kprobe(const char *group, 256 const char *event, 257 void *addr, 258 const char *symbol, 259 unsigned long offs, 260 int maxactive, 261 int nargs, bool is_return) 262 { 263 struct trace_kprobe *tk; 264 int ret = -ENOMEM; 265 266 tk = kzalloc(struct_size(tk, tp.args, nargs), GFP_KERNEL); 267 if (!tk) 268 return ERR_PTR(ret); 269 270 tk->nhit = alloc_percpu(unsigned long); 271 if (!tk->nhit) 272 goto error; 273 274 if (symbol) { 275 tk->symbol = kstrdup(symbol, GFP_KERNEL); 276 if (!tk->symbol) 277 goto error; 278 tk->rp.kp.symbol_name = tk->symbol; 279 tk->rp.kp.offset = offs; 280 } else 281 tk->rp.kp.addr = addr; 282 283 if (is_return) 284 tk->rp.handler = kretprobe_dispatcher; 285 else 286 tk->rp.kp.pre_handler = kprobe_dispatcher; 287 288 tk->rp.maxactive = maxactive; 289 INIT_HLIST_NODE(&tk->rp.kp.hlist); 290 INIT_LIST_HEAD(&tk->rp.kp.list); 291 292 ret = trace_probe_init(&tk->tp, event, group, false); 293 if (ret < 0) 294 goto error; 295 296 dyn_event_init(&tk->devent, &trace_kprobe_ops); 297 return tk; 298 error: 299 free_trace_kprobe(tk); 300 return ERR_PTR(ret); 301 } 302 303 static struct trace_kprobe *find_trace_kprobe(const char *event, 304 const char *group) 305 { 306 struct dyn_event *pos; 307 struct trace_kprobe *tk; 308 309 for_each_trace_kprobe(tk, pos) 310 if (strcmp(trace_probe_name(&tk->tp), event) == 0 && 311 strcmp(trace_probe_group_name(&tk->tp), group) == 0) 312 return tk; 313 return NULL; 314 } 315 316 static inline int __enable_trace_kprobe(struct trace_kprobe *tk) 317 { 318 int ret = 0; 319 320 if (trace_kprobe_is_registered(tk) && !trace_kprobe_has_gone(tk)) { 321 if (trace_kprobe_is_return(tk)) 322 ret = enable_kretprobe(&tk->rp); 323 else 324 ret = enable_kprobe(&tk->rp.kp); 325 } 326 327 return ret; 328 } 329 330 static void __disable_trace_kprobe(struct trace_probe *tp) 331 { 332 struct trace_kprobe *tk; 333 334 list_for_each_entry(tk, trace_probe_probe_list(tp), tp.list) { 335 if (!trace_kprobe_is_registered(tk)) 336 continue; 337 if (trace_kprobe_is_return(tk)) 338 disable_kretprobe(&tk->rp); 339 else 340 disable_kprobe(&tk->rp.kp); 341 } 342 } 343 344 /* 345 * Enable trace_probe 346 * if the file is NULL, enable "perf" handler, or enable "trace" handler. 347 */ 348 static int enable_trace_kprobe(struct trace_event_call *call, 349 struct trace_event_file *file) 350 { 351 struct trace_probe *tp; 352 struct trace_kprobe *tk; 353 bool enabled; 354 int ret = 0; 355 356 tp = trace_probe_primary_from_call(call); 357 if (WARN_ON_ONCE(!tp)) 358 return -ENODEV; 359 enabled = trace_probe_is_enabled(tp); 360 361 /* This also changes "enabled" state */ 362 if (file) { 363 ret = trace_probe_add_file(tp, file); 364 if (ret) 365 return ret; 366 } else 367 trace_probe_set_flag(tp, TP_FLAG_PROFILE); 368 369 if (enabled) 370 return 0; 371 372 list_for_each_entry(tk, trace_probe_probe_list(tp), tp.list) { 373 if (trace_kprobe_has_gone(tk)) 374 continue; 375 ret = __enable_trace_kprobe(tk); 376 if (ret) 377 break; 378 enabled = true; 379 } 380 381 if (ret) { 382 /* Failed to enable one of them. Roll back all */ 383 if (enabled) 384 __disable_trace_kprobe(tp); 385 if (file) 386 trace_probe_remove_file(tp, file); 387 else 388 trace_probe_clear_flag(tp, TP_FLAG_PROFILE); 389 } 390 391 return ret; 392 } 393 394 /* 395 * Disable trace_probe 396 * if the file is NULL, disable "perf" handler, or disable "trace" handler. 397 */ 398 static int disable_trace_kprobe(struct trace_event_call *call, 399 struct trace_event_file *file) 400 { 401 struct trace_probe *tp; 402 403 tp = trace_probe_primary_from_call(call); 404 if (WARN_ON_ONCE(!tp)) 405 return -ENODEV; 406 407 if (file) { 408 if (!trace_probe_get_file_link(tp, file)) 409 return -ENOENT; 410 if (!trace_probe_has_single_file(tp)) 411 goto out; 412 trace_probe_clear_flag(tp, TP_FLAG_TRACE); 413 } else 414 trace_probe_clear_flag(tp, TP_FLAG_PROFILE); 415 416 if (!trace_probe_is_enabled(tp)) 417 __disable_trace_kprobe(tp); 418 419 out: 420 if (file) 421 /* 422 * Synchronization is done in below function. For perf event, 423 * file == NULL and perf_trace_event_unreg() calls 424 * tracepoint_synchronize_unregister() to ensure synchronize 425 * event. We don't need to care about it. 426 */ 427 trace_probe_remove_file(tp, file); 428 429 return 0; 430 } 431 432 #if defined(CONFIG_DYNAMIC_FTRACE) && \ 433 !defined(CONFIG_KPROBE_EVENTS_ON_NOTRACE) 434 static bool __within_notrace_func(unsigned long addr) 435 { 436 unsigned long offset, size; 437 438 if (!addr || !kallsyms_lookup_size_offset(addr, &size, &offset)) 439 return false; 440 441 /* Get the entry address of the target function */ 442 addr -= offset; 443 444 /* 445 * Since ftrace_location_range() does inclusive range check, we need 446 * to subtract 1 byte from the end address. 447 */ 448 return !ftrace_location_range(addr, addr + size - 1); 449 } 450 451 static bool within_notrace_func(struct trace_kprobe *tk) 452 { 453 unsigned long addr = trace_kprobe_address(tk); 454 char symname[KSYM_NAME_LEN], *p; 455 456 if (!__within_notrace_func(addr)) 457 return false; 458 459 /* Check if the address is on a suffixed-symbol */ 460 if (!lookup_symbol_name(addr, symname)) { 461 p = strchr(symname, '.'); 462 if (!p) 463 return true; 464 *p = '\0'; 465 addr = (unsigned long)kprobe_lookup_name(symname, 0); 466 if (addr) 467 return __within_notrace_func(addr); 468 } 469 470 return true; 471 } 472 #else 473 #define within_notrace_func(tk) (false) 474 #endif 475 476 /* Internal register function - just handle k*probes and flags */ 477 static int __register_trace_kprobe(struct trace_kprobe *tk) 478 { 479 int i, ret; 480 481 ret = security_locked_down(LOCKDOWN_KPROBES); 482 if (ret) 483 return ret; 484 485 if (trace_kprobe_is_registered(tk)) 486 return -EINVAL; 487 488 if (within_notrace_func(tk)) { 489 pr_warn("Could not probe notrace function %s\n", 490 trace_kprobe_symbol(tk)); 491 return -EINVAL; 492 } 493 494 for (i = 0; i < tk->tp.nr_args; i++) { 495 ret = traceprobe_update_arg(&tk->tp.args[i]); 496 if (ret) 497 return ret; 498 } 499 500 /* Set/clear disabled flag according to tp->flag */ 501 if (trace_probe_is_enabled(&tk->tp)) 502 tk->rp.kp.flags &= ~KPROBE_FLAG_DISABLED; 503 else 504 tk->rp.kp.flags |= KPROBE_FLAG_DISABLED; 505 506 if (trace_kprobe_is_return(tk)) 507 ret = register_kretprobe(&tk->rp); 508 else 509 ret = register_kprobe(&tk->rp.kp); 510 511 return ret; 512 } 513 514 /* Internal unregister function - just handle k*probes and flags */ 515 static void __unregister_trace_kprobe(struct trace_kprobe *tk) 516 { 517 if (trace_kprobe_is_registered(tk)) { 518 if (trace_kprobe_is_return(tk)) 519 unregister_kretprobe(&tk->rp); 520 else 521 unregister_kprobe(&tk->rp.kp); 522 /* Cleanup kprobe for reuse and mark it unregistered */ 523 INIT_HLIST_NODE(&tk->rp.kp.hlist); 524 INIT_LIST_HEAD(&tk->rp.kp.list); 525 if (tk->rp.kp.symbol_name) 526 tk->rp.kp.addr = NULL; 527 } 528 } 529 530 /* Unregister a trace_probe and probe_event */ 531 static int unregister_trace_kprobe(struct trace_kprobe *tk) 532 { 533 /* If other probes are on the event, just unregister kprobe */ 534 if (trace_probe_has_sibling(&tk->tp)) 535 goto unreg; 536 537 /* Enabled event can not be unregistered */ 538 if (trace_probe_is_enabled(&tk->tp)) 539 return -EBUSY; 540 541 /* If there's a reference to the dynamic event */ 542 if (trace_event_dyn_busy(trace_probe_event_call(&tk->tp))) 543 return -EBUSY; 544 545 /* Will fail if probe is being used by ftrace or perf */ 546 if (unregister_kprobe_event(tk)) 547 return -EBUSY; 548 549 unreg: 550 __unregister_trace_kprobe(tk); 551 dyn_event_remove(&tk->devent); 552 trace_probe_unlink(&tk->tp); 553 554 return 0; 555 } 556 557 static bool trace_kprobe_has_same_kprobe(struct trace_kprobe *orig, 558 struct trace_kprobe *comp) 559 { 560 struct trace_probe_event *tpe = orig->tp.event; 561 int i; 562 563 list_for_each_entry(orig, &tpe->probes, tp.list) { 564 if (strcmp(trace_kprobe_symbol(orig), 565 trace_kprobe_symbol(comp)) || 566 trace_kprobe_offset(orig) != trace_kprobe_offset(comp)) 567 continue; 568 569 /* 570 * trace_probe_compare_arg_type() ensured that nr_args and 571 * each argument name and type are same. Let's compare comm. 572 */ 573 for (i = 0; i < orig->tp.nr_args; i++) { 574 if (strcmp(orig->tp.args[i].comm, 575 comp->tp.args[i].comm)) 576 break; 577 } 578 579 if (i == orig->tp.nr_args) 580 return true; 581 } 582 583 return false; 584 } 585 586 static int append_trace_kprobe(struct trace_kprobe *tk, struct trace_kprobe *to) 587 { 588 int ret; 589 590 ret = trace_probe_compare_arg_type(&tk->tp, &to->tp); 591 if (ret) { 592 /* Note that argument starts index = 2 */ 593 trace_probe_log_set_index(ret + 1); 594 trace_probe_log_err(0, DIFF_ARG_TYPE); 595 return -EEXIST; 596 } 597 if (trace_kprobe_has_same_kprobe(to, tk)) { 598 trace_probe_log_set_index(0); 599 trace_probe_log_err(0, SAME_PROBE); 600 return -EEXIST; 601 } 602 603 /* Append to existing event */ 604 ret = trace_probe_append(&tk->tp, &to->tp); 605 if (ret) 606 return ret; 607 608 /* Register k*probe */ 609 ret = __register_trace_kprobe(tk); 610 if (ret == -ENOENT && !trace_kprobe_module_exist(tk)) { 611 pr_warn("This probe might be able to register after target module is loaded. Continue.\n"); 612 ret = 0; 613 } 614 615 if (ret) 616 trace_probe_unlink(&tk->tp); 617 else 618 dyn_event_add(&tk->devent, trace_probe_event_call(&tk->tp)); 619 620 return ret; 621 } 622 623 /* Register a trace_probe and probe_event */ 624 static int register_trace_kprobe(struct trace_kprobe *tk) 625 { 626 struct trace_kprobe *old_tk; 627 int ret; 628 629 mutex_lock(&event_mutex); 630 631 old_tk = find_trace_kprobe(trace_probe_name(&tk->tp), 632 trace_probe_group_name(&tk->tp)); 633 if (old_tk) { 634 if (trace_kprobe_is_return(tk) != trace_kprobe_is_return(old_tk)) { 635 trace_probe_log_set_index(0); 636 trace_probe_log_err(0, DIFF_PROBE_TYPE); 637 ret = -EEXIST; 638 } else { 639 ret = append_trace_kprobe(tk, old_tk); 640 } 641 goto end; 642 } 643 644 /* Register new event */ 645 ret = register_kprobe_event(tk); 646 if (ret) { 647 if (ret == -EEXIST) { 648 trace_probe_log_set_index(0); 649 trace_probe_log_err(0, EVENT_EXIST); 650 } else 651 pr_warn("Failed to register probe event(%d)\n", ret); 652 goto end; 653 } 654 655 /* Register k*probe */ 656 ret = __register_trace_kprobe(tk); 657 if (ret == -ENOENT && !trace_kprobe_module_exist(tk)) { 658 pr_warn("This probe might be able to register after target module is loaded. Continue.\n"); 659 ret = 0; 660 } 661 662 if (ret < 0) 663 unregister_kprobe_event(tk); 664 else 665 dyn_event_add(&tk->devent, trace_probe_event_call(&tk->tp)); 666 667 end: 668 mutex_unlock(&event_mutex); 669 return ret; 670 } 671 672 /* Module notifier call back, checking event on the module */ 673 static int trace_kprobe_module_callback(struct notifier_block *nb, 674 unsigned long val, void *data) 675 { 676 struct module *mod = data; 677 struct dyn_event *pos; 678 struct trace_kprobe *tk; 679 int ret; 680 681 if (val != MODULE_STATE_COMING) 682 return NOTIFY_DONE; 683 684 /* Update probes on coming module */ 685 mutex_lock(&event_mutex); 686 for_each_trace_kprobe(tk, pos) { 687 if (trace_kprobe_within_module(tk, mod)) { 688 /* Don't need to check busy - this should have gone. */ 689 __unregister_trace_kprobe(tk); 690 ret = __register_trace_kprobe(tk); 691 if (ret) 692 pr_warn("Failed to re-register probe %s on %s: %d\n", 693 trace_probe_name(&tk->tp), 694 module_name(mod), ret); 695 } 696 } 697 mutex_unlock(&event_mutex); 698 699 return NOTIFY_DONE; 700 } 701 702 static struct notifier_block trace_kprobe_module_nb = { 703 .notifier_call = trace_kprobe_module_callback, 704 .priority = 1 /* Invoked after kprobe module callback */ 705 }; 706 707 static int __trace_kprobe_create(int argc, const char *argv[]) 708 { 709 /* 710 * Argument syntax: 711 * - Add kprobe: 712 * p[:[GRP/][EVENT]] [MOD:]KSYM[+OFFS]|KADDR [FETCHARGS] 713 * - Add kretprobe: 714 * r[MAXACTIVE][:[GRP/][EVENT]] [MOD:]KSYM[+0] [FETCHARGS] 715 * Or 716 * p[:[GRP/][EVENT]] [MOD:]KSYM[+0]%return [FETCHARGS] 717 * 718 * Fetch args: 719 * $retval : fetch return value 720 * $stack : fetch stack address 721 * $stackN : fetch Nth of stack (N:0-) 722 * $comm : fetch current task comm 723 * @ADDR : fetch memory at ADDR (ADDR should be in kernel) 724 * @SYM[+|-offs] : fetch memory at SYM +|- offs (SYM is a data symbol) 725 * %REG : fetch register REG 726 * Dereferencing memory fetch: 727 * +|-offs(ARG) : fetch memory at ARG +|- offs address. 728 * Alias name of args: 729 * NAME=FETCHARG : set NAME as alias of FETCHARG. 730 * Type of args: 731 * FETCHARG:TYPE : use TYPE instead of unsigned long. 732 */ 733 struct trace_kprobe *tk = NULL; 734 int i, len, ret = 0; 735 bool is_return = false; 736 char *symbol = NULL, *tmp = NULL; 737 const char *event = NULL, *group = KPROBE_EVENT_SYSTEM; 738 enum probe_print_type ptype; 739 int maxactive = 0; 740 long offset = 0; 741 void *addr = NULL; 742 char buf[MAX_EVENT_NAME_LEN]; 743 char gbuf[MAX_EVENT_NAME_LEN]; 744 unsigned int flags = TPARG_FL_KERNEL; 745 746 switch (argv[0][0]) { 747 case 'r': 748 is_return = true; 749 break; 750 case 'p': 751 break; 752 default: 753 return -ECANCELED; 754 } 755 if (argc < 2) 756 return -ECANCELED; 757 758 trace_probe_log_init("trace_kprobe", argc, argv); 759 760 event = strchr(&argv[0][1], ':'); 761 if (event) 762 event++; 763 764 if (isdigit(argv[0][1])) { 765 if (!is_return) { 766 trace_probe_log_err(1, MAXACT_NO_KPROBE); 767 goto parse_error; 768 } 769 if (event) 770 len = event - &argv[0][1] - 1; 771 else 772 len = strlen(&argv[0][1]); 773 if (len > MAX_EVENT_NAME_LEN - 1) { 774 trace_probe_log_err(1, BAD_MAXACT); 775 goto parse_error; 776 } 777 memcpy(buf, &argv[0][1], len); 778 buf[len] = '\0'; 779 ret = kstrtouint(buf, 0, &maxactive); 780 if (ret || !maxactive) { 781 trace_probe_log_err(1, BAD_MAXACT); 782 goto parse_error; 783 } 784 /* kretprobes instances are iterated over via a list. The 785 * maximum should stay reasonable. 786 */ 787 if (maxactive > KRETPROBE_MAXACTIVE_MAX) { 788 trace_probe_log_err(1, MAXACT_TOO_BIG); 789 goto parse_error; 790 } 791 } 792 793 /* try to parse an address. if that fails, try to read the 794 * input as a symbol. */ 795 if (kstrtoul(argv[1], 0, (unsigned long *)&addr)) { 796 trace_probe_log_set_index(1); 797 /* Check whether uprobe event specified */ 798 if (strchr(argv[1], '/') && strchr(argv[1], ':')) { 799 ret = -ECANCELED; 800 goto error; 801 } 802 /* a symbol specified */ 803 symbol = kstrdup(argv[1], GFP_KERNEL); 804 if (!symbol) 805 return -ENOMEM; 806 807 tmp = strchr(symbol, '%'); 808 if (tmp) { 809 if (!strcmp(tmp, "%return")) { 810 *tmp = '\0'; 811 is_return = true; 812 } else { 813 trace_probe_log_err(tmp - symbol, BAD_ADDR_SUFFIX); 814 goto parse_error; 815 } 816 } 817 818 /* TODO: support .init module functions */ 819 ret = traceprobe_split_symbol_offset(symbol, &offset); 820 if (ret || offset < 0 || offset > UINT_MAX) { 821 trace_probe_log_err(0, BAD_PROBE_ADDR); 822 goto parse_error; 823 } 824 if (is_return) 825 flags |= TPARG_FL_RETURN; 826 ret = kprobe_on_func_entry(NULL, symbol, offset); 827 if (ret == 0) 828 flags |= TPARG_FL_FENTRY; 829 /* Defer the ENOENT case until register kprobe */ 830 if (ret == -EINVAL && is_return) { 831 trace_probe_log_err(0, BAD_RETPROBE); 832 goto parse_error; 833 } 834 } 835 836 trace_probe_log_set_index(0); 837 if (event) { 838 ret = traceprobe_parse_event_name(&event, &group, gbuf, 839 event - argv[0]); 840 if (ret) 841 goto parse_error; 842 } 843 844 if (!event) { 845 /* Make a new event name */ 846 if (symbol) 847 snprintf(buf, MAX_EVENT_NAME_LEN, "%c_%s_%ld", 848 is_return ? 'r' : 'p', symbol, offset); 849 else 850 snprintf(buf, MAX_EVENT_NAME_LEN, "%c_0x%p", 851 is_return ? 'r' : 'p', addr); 852 sanitize_event_name(buf); 853 event = buf; 854 } 855 856 /* setup a probe */ 857 tk = alloc_trace_kprobe(group, event, addr, symbol, offset, maxactive, 858 argc - 2, is_return); 859 if (IS_ERR(tk)) { 860 ret = PTR_ERR(tk); 861 /* This must return -ENOMEM, else there is a bug */ 862 WARN_ON_ONCE(ret != -ENOMEM); 863 goto out; /* We know tk is not allocated */ 864 } 865 argc -= 2; argv += 2; 866 867 /* parse arguments */ 868 for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) { 869 trace_probe_log_set_index(i + 2); 870 ret = traceprobe_parse_probe_arg(&tk->tp, i, argv[i], flags); 871 if (ret) 872 goto error; /* This can be -ENOMEM */ 873 } 874 875 ptype = is_return ? PROBE_PRINT_RETURN : PROBE_PRINT_NORMAL; 876 ret = traceprobe_set_print_fmt(&tk->tp, ptype); 877 if (ret < 0) 878 goto error; 879 880 ret = register_trace_kprobe(tk); 881 if (ret) { 882 trace_probe_log_set_index(1); 883 if (ret == -EILSEQ) 884 trace_probe_log_err(0, BAD_INSN_BNDRY); 885 else if (ret == -ENOENT) 886 trace_probe_log_err(0, BAD_PROBE_ADDR); 887 else if (ret != -ENOMEM && ret != -EEXIST) 888 trace_probe_log_err(0, FAIL_REG_PROBE); 889 goto error; 890 } 891 892 out: 893 trace_probe_log_clear(); 894 kfree(symbol); 895 return ret; 896 897 parse_error: 898 ret = -EINVAL; 899 error: 900 free_trace_kprobe(tk); 901 goto out; 902 } 903 904 static int trace_kprobe_create(const char *raw_command) 905 { 906 return trace_probe_create(raw_command, __trace_kprobe_create); 907 } 908 909 static int create_or_delete_trace_kprobe(const char *raw_command) 910 { 911 int ret; 912 913 if (raw_command[0] == '-') 914 return dyn_event_release(raw_command, &trace_kprobe_ops); 915 916 ret = trace_kprobe_create(raw_command); 917 return ret == -ECANCELED ? -EINVAL : ret; 918 } 919 920 static int trace_kprobe_run_command(struct dynevent_cmd *cmd) 921 { 922 return create_or_delete_trace_kprobe(cmd->seq.buffer); 923 } 924 925 /** 926 * kprobe_event_cmd_init - Initialize a kprobe event command object 927 * @cmd: A pointer to the dynevent_cmd struct representing the new event 928 * @buf: A pointer to the buffer used to build the command 929 * @maxlen: The length of the buffer passed in @buf 930 * 931 * Initialize a synthetic event command object. Use this before 932 * calling any of the other kprobe_event functions. 933 */ 934 void kprobe_event_cmd_init(struct dynevent_cmd *cmd, char *buf, int maxlen) 935 { 936 dynevent_cmd_init(cmd, buf, maxlen, DYNEVENT_TYPE_KPROBE, 937 trace_kprobe_run_command); 938 } 939 EXPORT_SYMBOL_GPL(kprobe_event_cmd_init); 940 941 /** 942 * __kprobe_event_gen_cmd_start - Generate a kprobe event command from arg list 943 * @cmd: A pointer to the dynevent_cmd struct representing the new event 944 * @name: The name of the kprobe event 945 * @loc: The location of the kprobe event 946 * @kretprobe: Is this a return probe? 947 * @args: Variable number of arg (pairs), one pair for each field 948 * 949 * NOTE: Users normally won't want to call this function directly, but 950 * rather use the kprobe_event_gen_cmd_start() wrapper, which automatically 951 * adds a NULL to the end of the arg list. If this function is used 952 * directly, make sure the last arg in the variable arg list is NULL. 953 * 954 * Generate a kprobe event command to be executed by 955 * kprobe_event_gen_cmd_end(). This function can be used to generate the 956 * complete command or only the first part of it; in the latter case, 957 * kprobe_event_add_fields() can be used to add more fields following this. 958 * 959 * Unlikely the synth_event_gen_cmd_start(), @loc must be specified. This 960 * returns -EINVAL if @loc == NULL. 961 * 962 * Return: 0 if successful, error otherwise. 963 */ 964 int __kprobe_event_gen_cmd_start(struct dynevent_cmd *cmd, bool kretprobe, 965 const char *name, const char *loc, ...) 966 { 967 char buf[MAX_EVENT_NAME_LEN]; 968 struct dynevent_arg arg; 969 va_list args; 970 int ret; 971 972 if (cmd->type != DYNEVENT_TYPE_KPROBE) 973 return -EINVAL; 974 975 if (!loc) 976 return -EINVAL; 977 978 if (kretprobe) 979 snprintf(buf, MAX_EVENT_NAME_LEN, "r:kprobes/%s", name); 980 else 981 snprintf(buf, MAX_EVENT_NAME_LEN, "p:kprobes/%s", name); 982 983 ret = dynevent_str_add(cmd, buf); 984 if (ret) 985 return ret; 986 987 dynevent_arg_init(&arg, 0); 988 arg.str = loc; 989 ret = dynevent_arg_add(cmd, &arg, NULL); 990 if (ret) 991 return ret; 992 993 va_start(args, loc); 994 for (;;) { 995 const char *field; 996 997 field = va_arg(args, const char *); 998 if (!field) 999 break; 1000 1001 if (++cmd->n_fields > MAX_TRACE_ARGS) { 1002 ret = -EINVAL; 1003 break; 1004 } 1005 1006 arg.str = field; 1007 ret = dynevent_arg_add(cmd, &arg, NULL); 1008 if (ret) 1009 break; 1010 } 1011 va_end(args); 1012 1013 return ret; 1014 } 1015 EXPORT_SYMBOL_GPL(__kprobe_event_gen_cmd_start); 1016 1017 /** 1018 * __kprobe_event_add_fields - Add probe fields to a kprobe command from arg list 1019 * @cmd: A pointer to the dynevent_cmd struct representing the new event 1020 * @args: Variable number of arg (pairs), one pair for each field 1021 * 1022 * NOTE: Users normally won't want to call this function directly, but 1023 * rather use the kprobe_event_add_fields() wrapper, which 1024 * automatically adds a NULL to the end of the arg list. If this 1025 * function is used directly, make sure the last arg in the variable 1026 * arg list is NULL. 1027 * 1028 * Add probe fields to an existing kprobe command using a variable 1029 * list of args. Fields are added in the same order they're listed. 1030 * 1031 * Return: 0 if successful, error otherwise. 1032 */ 1033 int __kprobe_event_add_fields(struct dynevent_cmd *cmd, ...) 1034 { 1035 struct dynevent_arg arg; 1036 va_list args; 1037 int ret = 0; 1038 1039 if (cmd->type != DYNEVENT_TYPE_KPROBE) 1040 return -EINVAL; 1041 1042 dynevent_arg_init(&arg, 0); 1043 1044 va_start(args, cmd); 1045 for (;;) { 1046 const char *field; 1047 1048 field = va_arg(args, const char *); 1049 if (!field) 1050 break; 1051 1052 if (++cmd->n_fields > MAX_TRACE_ARGS) { 1053 ret = -EINVAL; 1054 break; 1055 } 1056 1057 arg.str = field; 1058 ret = dynevent_arg_add(cmd, &arg, NULL); 1059 if (ret) 1060 break; 1061 } 1062 va_end(args); 1063 1064 return ret; 1065 } 1066 EXPORT_SYMBOL_GPL(__kprobe_event_add_fields); 1067 1068 /** 1069 * kprobe_event_delete - Delete a kprobe event 1070 * @name: The name of the kprobe event to delete 1071 * 1072 * Delete a kprobe event with the give @name from kernel code rather 1073 * than directly from the command line. 1074 * 1075 * Return: 0 if successful, error otherwise. 1076 */ 1077 int kprobe_event_delete(const char *name) 1078 { 1079 char buf[MAX_EVENT_NAME_LEN]; 1080 1081 snprintf(buf, MAX_EVENT_NAME_LEN, "-:%s", name); 1082 1083 return create_or_delete_trace_kprobe(buf); 1084 } 1085 EXPORT_SYMBOL_GPL(kprobe_event_delete); 1086 1087 static int trace_kprobe_release(struct dyn_event *ev) 1088 { 1089 struct trace_kprobe *tk = to_trace_kprobe(ev); 1090 int ret = unregister_trace_kprobe(tk); 1091 1092 if (!ret) 1093 free_trace_kprobe(tk); 1094 return ret; 1095 } 1096 1097 static int trace_kprobe_show(struct seq_file *m, struct dyn_event *ev) 1098 { 1099 struct trace_kprobe *tk = to_trace_kprobe(ev); 1100 int i; 1101 1102 seq_putc(m, trace_kprobe_is_return(tk) ? 'r' : 'p'); 1103 if (trace_kprobe_is_return(tk) && tk->rp.maxactive) 1104 seq_printf(m, "%d", tk->rp.maxactive); 1105 seq_printf(m, ":%s/%s", trace_probe_group_name(&tk->tp), 1106 trace_probe_name(&tk->tp)); 1107 1108 if (!tk->symbol) 1109 seq_printf(m, " 0x%p", tk->rp.kp.addr); 1110 else if (tk->rp.kp.offset) 1111 seq_printf(m, " %s+%u", trace_kprobe_symbol(tk), 1112 tk->rp.kp.offset); 1113 else 1114 seq_printf(m, " %s", trace_kprobe_symbol(tk)); 1115 1116 for (i = 0; i < tk->tp.nr_args; i++) 1117 seq_printf(m, " %s=%s", tk->tp.args[i].name, tk->tp.args[i].comm); 1118 seq_putc(m, '\n'); 1119 1120 return 0; 1121 } 1122 1123 static int probes_seq_show(struct seq_file *m, void *v) 1124 { 1125 struct dyn_event *ev = v; 1126 1127 if (!is_trace_kprobe(ev)) 1128 return 0; 1129 1130 return trace_kprobe_show(m, ev); 1131 } 1132 1133 static const struct seq_operations probes_seq_op = { 1134 .start = dyn_event_seq_start, 1135 .next = dyn_event_seq_next, 1136 .stop = dyn_event_seq_stop, 1137 .show = probes_seq_show 1138 }; 1139 1140 static int probes_open(struct inode *inode, struct file *file) 1141 { 1142 int ret; 1143 1144 ret = security_locked_down(LOCKDOWN_TRACEFS); 1145 if (ret) 1146 return ret; 1147 1148 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) { 1149 ret = dyn_events_release_all(&trace_kprobe_ops); 1150 if (ret < 0) 1151 return ret; 1152 } 1153 1154 return seq_open(file, &probes_seq_op); 1155 } 1156 1157 static ssize_t probes_write(struct file *file, const char __user *buffer, 1158 size_t count, loff_t *ppos) 1159 { 1160 return trace_parse_run_command(file, buffer, count, ppos, 1161 create_or_delete_trace_kprobe); 1162 } 1163 1164 static const struct file_operations kprobe_events_ops = { 1165 .owner = THIS_MODULE, 1166 .open = probes_open, 1167 .read = seq_read, 1168 .llseek = seq_lseek, 1169 .release = seq_release, 1170 .write = probes_write, 1171 }; 1172 1173 /* Probes profiling interfaces */ 1174 static int probes_profile_seq_show(struct seq_file *m, void *v) 1175 { 1176 struct dyn_event *ev = v; 1177 struct trace_kprobe *tk; 1178 unsigned long nmissed; 1179 1180 if (!is_trace_kprobe(ev)) 1181 return 0; 1182 1183 tk = to_trace_kprobe(ev); 1184 nmissed = trace_kprobe_is_return(tk) ? 1185 tk->rp.kp.nmissed + tk->rp.nmissed : tk->rp.kp.nmissed; 1186 seq_printf(m, " %-44s %15lu %15lu\n", 1187 trace_probe_name(&tk->tp), 1188 trace_kprobe_nhit(tk), 1189 nmissed); 1190 1191 return 0; 1192 } 1193 1194 static const struct seq_operations profile_seq_op = { 1195 .start = dyn_event_seq_start, 1196 .next = dyn_event_seq_next, 1197 .stop = dyn_event_seq_stop, 1198 .show = probes_profile_seq_show 1199 }; 1200 1201 static int profile_open(struct inode *inode, struct file *file) 1202 { 1203 int ret; 1204 1205 ret = security_locked_down(LOCKDOWN_TRACEFS); 1206 if (ret) 1207 return ret; 1208 1209 return seq_open(file, &profile_seq_op); 1210 } 1211 1212 static const struct file_operations kprobe_profile_ops = { 1213 .owner = THIS_MODULE, 1214 .open = profile_open, 1215 .read = seq_read, 1216 .llseek = seq_lseek, 1217 .release = seq_release, 1218 }; 1219 1220 /* Kprobe specific fetch functions */ 1221 1222 /* Return the length of string -- including null terminal byte */ 1223 static nokprobe_inline int 1224 fetch_store_strlen_user(unsigned long addr) 1225 { 1226 const void __user *uaddr = (__force const void __user *)addr; 1227 1228 return strnlen_user_nofault(uaddr, MAX_STRING_SIZE); 1229 } 1230 1231 /* Return the length of string -- including null terminal byte */ 1232 static nokprobe_inline int 1233 fetch_store_strlen(unsigned long addr) 1234 { 1235 int ret, len = 0; 1236 u8 c; 1237 1238 #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE 1239 if (addr < TASK_SIZE) 1240 return fetch_store_strlen_user(addr); 1241 #endif 1242 1243 do { 1244 ret = copy_from_kernel_nofault(&c, (u8 *)addr + len, 1); 1245 len++; 1246 } while (c && ret == 0 && len < MAX_STRING_SIZE); 1247 1248 return (ret < 0) ? ret : len; 1249 } 1250 1251 /* 1252 * Fetch a null-terminated string from user. Caller MUST set *(u32 *)buf 1253 * with max length and relative data location. 1254 */ 1255 static nokprobe_inline int 1256 fetch_store_string_user(unsigned long addr, void *dest, void *base) 1257 { 1258 const void __user *uaddr = (__force const void __user *)addr; 1259 int maxlen = get_loc_len(*(u32 *)dest); 1260 void *__dest; 1261 long ret; 1262 1263 if (unlikely(!maxlen)) 1264 return -ENOMEM; 1265 1266 __dest = get_loc_data(dest, base); 1267 1268 ret = strncpy_from_user_nofault(__dest, uaddr, maxlen); 1269 if (ret >= 0) 1270 *(u32 *)dest = make_data_loc(ret, __dest - base); 1271 1272 return ret; 1273 } 1274 1275 /* 1276 * Fetch a null-terminated string. Caller MUST set *(u32 *)buf with max 1277 * length and relative data location. 1278 */ 1279 static nokprobe_inline int 1280 fetch_store_string(unsigned long addr, void *dest, void *base) 1281 { 1282 int maxlen = get_loc_len(*(u32 *)dest); 1283 void *__dest; 1284 long ret; 1285 1286 #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE 1287 if ((unsigned long)addr < TASK_SIZE) 1288 return fetch_store_string_user(addr, dest, base); 1289 #endif 1290 1291 if (unlikely(!maxlen)) 1292 return -ENOMEM; 1293 1294 __dest = get_loc_data(dest, base); 1295 1296 /* 1297 * Try to get string again, since the string can be changed while 1298 * probing. 1299 */ 1300 ret = strncpy_from_kernel_nofault(__dest, (void *)addr, maxlen); 1301 if (ret >= 0) 1302 *(u32 *)dest = make_data_loc(ret, __dest - base); 1303 1304 return ret; 1305 } 1306 1307 static nokprobe_inline int 1308 probe_mem_read_user(void *dest, void *src, size_t size) 1309 { 1310 const void __user *uaddr = (__force const void __user *)src; 1311 1312 return copy_from_user_nofault(dest, uaddr, size); 1313 } 1314 1315 static nokprobe_inline int 1316 probe_mem_read(void *dest, void *src, size_t size) 1317 { 1318 #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE 1319 if ((unsigned long)src < TASK_SIZE) 1320 return probe_mem_read_user(dest, src, size); 1321 #endif 1322 return copy_from_kernel_nofault(dest, src, size); 1323 } 1324 1325 /* Note that we don't verify it, since the code does not come from user space */ 1326 static int 1327 process_fetch_insn(struct fetch_insn *code, void *rec, void *dest, 1328 void *base) 1329 { 1330 struct pt_regs *regs = rec; 1331 unsigned long val; 1332 1333 retry: 1334 /* 1st stage: get value from context */ 1335 switch (code->op) { 1336 case FETCH_OP_REG: 1337 val = regs_get_register(regs, code->param); 1338 break; 1339 case FETCH_OP_STACK: 1340 val = regs_get_kernel_stack_nth(regs, code->param); 1341 break; 1342 case FETCH_OP_STACKP: 1343 val = kernel_stack_pointer(regs); 1344 break; 1345 case FETCH_OP_RETVAL: 1346 val = regs_return_value(regs); 1347 break; 1348 case FETCH_OP_IMM: 1349 val = code->immediate; 1350 break; 1351 case FETCH_OP_COMM: 1352 val = (unsigned long)current->comm; 1353 break; 1354 case FETCH_OP_DATA: 1355 val = (unsigned long)code->data; 1356 break; 1357 #ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API 1358 case FETCH_OP_ARG: 1359 val = regs_get_kernel_argument(regs, code->param); 1360 break; 1361 #endif 1362 case FETCH_NOP_SYMBOL: /* Ignore a place holder */ 1363 code++; 1364 goto retry; 1365 default: 1366 return -EILSEQ; 1367 } 1368 code++; 1369 1370 return process_fetch_insn_bottom(code, val, dest, base); 1371 } 1372 NOKPROBE_SYMBOL(process_fetch_insn) 1373 1374 /* Kprobe handler */ 1375 static nokprobe_inline void 1376 __kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs, 1377 struct trace_event_file *trace_file) 1378 { 1379 struct kprobe_trace_entry_head *entry; 1380 struct trace_event_call *call = trace_probe_event_call(&tk->tp); 1381 struct trace_event_buffer fbuffer; 1382 int dsize; 1383 1384 WARN_ON(call != trace_file->event_call); 1385 1386 if (trace_trigger_soft_disabled(trace_file)) 1387 return; 1388 1389 dsize = __get_data_size(&tk->tp, regs); 1390 1391 entry = trace_event_buffer_reserve(&fbuffer, trace_file, 1392 sizeof(*entry) + tk->tp.size + dsize); 1393 if (!entry) 1394 return; 1395 1396 fbuffer.regs = regs; 1397 entry = fbuffer.entry = ring_buffer_event_data(fbuffer.event); 1398 entry->ip = (unsigned long)tk->rp.kp.addr; 1399 store_trace_args(&entry[1], &tk->tp, regs, sizeof(*entry), dsize); 1400 1401 trace_event_buffer_commit(&fbuffer); 1402 } 1403 1404 static void 1405 kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs) 1406 { 1407 struct event_file_link *link; 1408 1409 trace_probe_for_each_link_rcu(link, &tk->tp) 1410 __kprobe_trace_func(tk, regs, link->file); 1411 } 1412 NOKPROBE_SYMBOL(kprobe_trace_func); 1413 1414 /* Kretprobe handler */ 1415 static nokprobe_inline void 1416 __kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri, 1417 struct pt_regs *regs, 1418 struct trace_event_file *trace_file) 1419 { 1420 struct kretprobe_trace_entry_head *entry; 1421 struct trace_event_buffer fbuffer; 1422 struct trace_event_call *call = trace_probe_event_call(&tk->tp); 1423 int dsize; 1424 1425 WARN_ON(call != trace_file->event_call); 1426 1427 if (trace_trigger_soft_disabled(trace_file)) 1428 return; 1429 1430 dsize = __get_data_size(&tk->tp, regs); 1431 1432 entry = trace_event_buffer_reserve(&fbuffer, trace_file, 1433 sizeof(*entry) + tk->tp.size + dsize); 1434 if (!entry) 1435 return; 1436 1437 fbuffer.regs = regs; 1438 entry = fbuffer.entry = ring_buffer_event_data(fbuffer.event); 1439 entry->func = (unsigned long)tk->rp.kp.addr; 1440 entry->ret_ip = get_kretprobe_retaddr(ri); 1441 store_trace_args(&entry[1], &tk->tp, regs, sizeof(*entry), dsize); 1442 1443 trace_event_buffer_commit(&fbuffer); 1444 } 1445 1446 static void 1447 kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri, 1448 struct pt_regs *regs) 1449 { 1450 struct event_file_link *link; 1451 1452 trace_probe_for_each_link_rcu(link, &tk->tp) 1453 __kretprobe_trace_func(tk, ri, regs, link->file); 1454 } 1455 NOKPROBE_SYMBOL(kretprobe_trace_func); 1456 1457 /* Event entry printers */ 1458 static enum print_line_t 1459 print_kprobe_event(struct trace_iterator *iter, int flags, 1460 struct trace_event *event) 1461 { 1462 struct kprobe_trace_entry_head *field; 1463 struct trace_seq *s = &iter->seq; 1464 struct trace_probe *tp; 1465 1466 field = (struct kprobe_trace_entry_head *)iter->ent; 1467 tp = trace_probe_primary_from_call( 1468 container_of(event, struct trace_event_call, event)); 1469 if (WARN_ON_ONCE(!tp)) 1470 goto out; 1471 1472 trace_seq_printf(s, "%s: (", trace_probe_name(tp)); 1473 1474 if (!seq_print_ip_sym(s, field->ip, flags | TRACE_ITER_SYM_OFFSET)) 1475 goto out; 1476 1477 trace_seq_putc(s, ')'); 1478 1479 if (print_probe_args(s, tp->args, tp->nr_args, 1480 (u8 *)&field[1], field) < 0) 1481 goto out; 1482 1483 trace_seq_putc(s, '\n'); 1484 out: 1485 return trace_handle_return(s); 1486 } 1487 1488 static enum print_line_t 1489 print_kretprobe_event(struct trace_iterator *iter, int flags, 1490 struct trace_event *event) 1491 { 1492 struct kretprobe_trace_entry_head *field; 1493 struct trace_seq *s = &iter->seq; 1494 struct trace_probe *tp; 1495 1496 field = (struct kretprobe_trace_entry_head *)iter->ent; 1497 tp = trace_probe_primary_from_call( 1498 container_of(event, struct trace_event_call, event)); 1499 if (WARN_ON_ONCE(!tp)) 1500 goto out; 1501 1502 trace_seq_printf(s, "%s: (", trace_probe_name(tp)); 1503 1504 if (!seq_print_ip_sym(s, field->ret_ip, flags | TRACE_ITER_SYM_OFFSET)) 1505 goto out; 1506 1507 trace_seq_puts(s, " <- "); 1508 1509 if (!seq_print_ip_sym(s, field->func, flags & ~TRACE_ITER_SYM_OFFSET)) 1510 goto out; 1511 1512 trace_seq_putc(s, ')'); 1513 1514 if (print_probe_args(s, tp->args, tp->nr_args, 1515 (u8 *)&field[1], field) < 0) 1516 goto out; 1517 1518 trace_seq_putc(s, '\n'); 1519 1520 out: 1521 return trace_handle_return(s); 1522 } 1523 1524 1525 static int kprobe_event_define_fields(struct trace_event_call *event_call) 1526 { 1527 int ret; 1528 struct kprobe_trace_entry_head field; 1529 struct trace_probe *tp; 1530 1531 tp = trace_probe_primary_from_call(event_call); 1532 if (WARN_ON_ONCE(!tp)) 1533 return -ENOENT; 1534 1535 DEFINE_FIELD(unsigned long, ip, FIELD_STRING_IP, 0); 1536 1537 return traceprobe_define_arg_fields(event_call, sizeof(field), tp); 1538 } 1539 1540 static int kretprobe_event_define_fields(struct trace_event_call *event_call) 1541 { 1542 int ret; 1543 struct kretprobe_trace_entry_head field; 1544 struct trace_probe *tp; 1545 1546 tp = trace_probe_primary_from_call(event_call); 1547 if (WARN_ON_ONCE(!tp)) 1548 return -ENOENT; 1549 1550 DEFINE_FIELD(unsigned long, func, FIELD_STRING_FUNC, 0); 1551 DEFINE_FIELD(unsigned long, ret_ip, FIELD_STRING_RETIP, 0); 1552 1553 return traceprobe_define_arg_fields(event_call, sizeof(field), tp); 1554 } 1555 1556 #ifdef CONFIG_PERF_EVENTS 1557 1558 /* Kprobe profile handler */ 1559 static int 1560 kprobe_perf_func(struct trace_kprobe *tk, struct pt_regs *regs) 1561 { 1562 struct trace_event_call *call = trace_probe_event_call(&tk->tp); 1563 struct kprobe_trace_entry_head *entry; 1564 struct hlist_head *head; 1565 int size, __size, dsize; 1566 int rctx; 1567 1568 if (bpf_prog_array_valid(call)) { 1569 unsigned long orig_ip = instruction_pointer(regs); 1570 int ret; 1571 1572 ret = trace_call_bpf(call, regs); 1573 1574 /* 1575 * We need to check and see if we modified the pc of the 1576 * pt_regs, and if so return 1 so that we don't do the 1577 * single stepping. 1578 */ 1579 if (orig_ip != instruction_pointer(regs)) 1580 return 1; 1581 if (!ret) 1582 return 0; 1583 } 1584 1585 head = this_cpu_ptr(call->perf_events); 1586 if (hlist_empty(head)) 1587 return 0; 1588 1589 dsize = __get_data_size(&tk->tp, regs); 1590 __size = sizeof(*entry) + tk->tp.size + dsize; 1591 size = ALIGN(__size + sizeof(u32), sizeof(u64)); 1592 size -= sizeof(u32); 1593 1594 entry = perf_trace_buf_alloc(size, NULL, &rctx); 1595 if (!entry) 1596 return 0; 1597 1598 entry->ip = (unsigned long)tk->rp.kp.addr; 1599 memset(&entry[1], 0, dsize); 1600 store_trace_args(&entry[1], &tk->tp, regs, sizeof(*entry), dsize); 1601 perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs, 1602 head, NULL); 1603 return 0; 1604 } 1605 NOKPROBE_SYMBOL(kprobe_perf_func); 1606 1607 /* Kretprobe profile handler */ 1608 static void 1609 kretprobe_perf_func(struct trace_kprobe *tk, struct kretprobe_instance *ri, 1610 struct pt_regs *regs) 1611 { 1612 struct trace_event_call *call = trace_probe_event_call(&tk->tp); 1613 struct kretprobe_trace_entry_head *entry; 1614 struct hlist_head *head; 1615 int size, __size, dsize; 1616 int rctx; 1617 1618 if (bpf_prog_array_valid(call) && !trace_call_bpf(call, regs)) 1619 return; 1620 1621 head = this_cpu_ptr(call->perf_events); 1622 if (hlist_empty(head)) 1623 return; 1624 1625 dsize = __get_data_size(&tk->tp, regs); 1626 __size = sizeof(*entry) + tk->tp.size + dsize; 1627 size = ALIGN(__size + sizeof(u32), sizeof(u64)); 1628 size -= sizeof(u32); 1629 1630 entry = perf_trace_buf_alloc(size, NULL, &rctx); 1631 if (!entry) 1632 return; 1633 1634 entry->func = (unsigned long)tk->rp.kp.addr; 1635 entry->ret_ip = get_kretprobe_retaddr(ri); 1636 store_trace_args(&entry[1], &tk->tp, regs, sizeof(*entry), dsize); 1637 perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs, 1638 head, NULL); 1639 } 1640 NOKPROBE_SYMBOL(kretprobe_perf_func); 1641 1642 int bpf_get_kprobe_info(const struct perf_event *event, u32 *fd_type, 1643 const char **symbol, u64 *probe_offset, 1644 u64 *probe_addr, bool perf_type_tracepoint) 1645 { 1646 const char *pevent = trace_event_name(event->tp_event); 1647 const char *group = event->tp_event->class->system; 1648 struct trace_kprobe *tk; 1649 1650 if (perf_type_tracepoint) 1651 tk = find_trace_kprobe(pevent, group); 1652 else 1653 tk = trace_kprobe_primary_from_call(event->tp_event); 1654 if (!tk) 1655 return -EINVAL; 1656 1657 *fd_type = trace_kprobe_is_return(tk) ? BPF_FD_TYPE_KRETPROBE 1658 : BPF_FD_TYPE_KPROBE; 1659 if (tk->symbol) { 1660 *symbol = tk->symbol; 1661 *probe_offset = tk->rp.kp.offset; 1662 *probe_addr = 0; 1663 } else { 1664 *symbol = NULL; 1665 *probe_offset = 0; 1666 *probe_addr = (unsigned long)tk->rp.kp.addr; 1667 } 1668 return 0; 1669 } 1670 #endif /* CONFIG_PERF_EVENTS */ 1671 1672 /* 1673 * called by perf_trace_init() or __ftrace_set_clr_event() under event_mutex. 1674 * 1675 * kprobe_trace_self_tests_init() does enable_trace_probe/disable_trace_probe 1676 * lockless, but we can't race with this __init function. 1677 */ 1678 static int kprobe_register(struct trace_event_call *event, 1679 enum trace_reg type, void *data) 1680 { 1681 struct trace_event_file *file = data; 1682 1683 switch (type) { 1684 case TRACE_REG_REGISTER: 1685 return enable_trace_kprobe(event, file); 1686 case TRACE_REG_UNREGISTER: 1687 return disable_trace_kprobe(event, file); 1688 1689 #ifdef CONFIG_PERF_EVENTS 1690 case TRACE_REG_PERF_REGISTER: 1691 return enable_trace_kprobe(event, NULL); 1692 case TRACE_REG_PERF_UNREGISTER: 1693 return disable_trace_kprobe(event, NULL); 1694 case TRACE_REG_PERF_OPEN: 1695 case TRACE_REG_PERF_CLOSE: 1696 case TRACE_REG_PERF_ADD: 1697 case TRACE_REG_PERF_DEL: 1698 return 0; 1699 #endif 1700 } 1701 return 0; 1702 } 1703 1704 static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs) 1705 { 1706 struct trace_kprobe *tk = container_of(kp, struct trace_kprobe, rp.kp); 1707 int ret = 0; 1708 1709 raw_cpu_inc(*tk->nhit); 1710 1711 if (trace_probe_test_flag(&tk->tp, TP_FLAG_TRACE)) 1712 kprobe_trace_func(tk, regs); 1713 #ifdef CONFIG_PERF_EVENTS 1714 if (trace_probe_test_flag(&tk->tp, TP_FLAG_PROFILE)) 1715 ret = kprobe_perf_func(tk, regs); 1716 #endif 1717 return ret; 1718 } 1719 NOKPROBE_SYMBOL(kprobe_dispatcher); 1720 1721 static int 1722 kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs) 1723 { 1724 struct kretprobe *rp = get_kretprobe(ri); 1725 struct trace_kprobe *tk; 1726 1727 /* 1728 * There is a small chance that get_kretprobe(ri) returns NULL when 1729 * the kretprobe is unregister on another CPU between kretprobe's 1730 * trampoline_handler and this function. 1731 */ 1732 if (unlikely(!rp)) 1733 return 0; 1734 1735 tk = container_of(rp, struct trace_kprobe, rp); 1736 raw_cpu_inc(*tk->nhit); 1737 1738 if (trace_probe_test_flag(&tk->tp, TP_FLAG_TRACE)) 1739 kretprobe_trace_func(tk, ri, regs); 1740 #ifdef CONFIG_PERF_EVENTS 1741 if (trace_probe_test_flag(&tk->tp, TP_FLAG_PROFILE)) 1742 kretprobe_perf_func(tk, ri, regs); 1743 #endif 1744 return 0; /* We don't tweak kernel, so just return 0 */ 1745 } 1746 NOKPROBE_SYMBOL(kretprobe_dispatcher); 1747 1748 static struct trace_event_functions kretprobe_funcs = { 1749 .trace = print_kretprobe_event 1750 }; 1751 1752 static struct trace_event_functions kprobe_funcs = { 1753 .trace = print_kprobe_event 1754 }; 1755 1756 static struct trace_event_fields kretprobe_fields_array[] = { 1757 { .type = TRACE_FUNCTION_TYPE, 1758 .define_fields = kretprobe_event_define_fields }, 1759 {} 1760 }; 1761 1762 static struct trace_event_fields kprobe_fields_array[] = { 1763 { .type = TRACE_FUNCTION_TYPE, 1764 .define_fields = kprobe_event_define_fields }, 1765 {} 1766 }; 1767 1768 static inline void init_trace_event_call(struct trace_kprobe *tk) 1769 { 1770 struct trace_event_call *call = trace_probe_event_call(&tk->tp); 1771 1772 if (trace_kprobe_is_return(tk)) { 1773 call->event.funcs = &kretprobe_funcs; 1774 call->class->fields_array = kretprobe_fields_array; 1775 } else { 1776 call->event.funcs = &kprobe_funcs; 1777 call->class->fields_array = kprobe_fields_array; 1778 } 1779 1780 call->flags = TRACE_EVENT_FL_KPROBE; 1781 call->class->reg = kprobe_register; 1782 } 1783 1784 static int register_kprobe_event(struct trace_kprobe *tk) 1785 { 1786 init_trace_event_call(tk); 1787 1788 return trace_probe_register_event_call(&tk->tp); 1789 } 1790 1791 static int unregister_kprobe_event(struct trace_kprobe *tk) 1792 { 1793 return trace_probe_unregister_event_call(&tk->tp); 1794 } 1795 1796 #ifdef CONFIG_PERF_EVENTS 1797 /* create a trace_kprobe, but don't add it to global lists */ 1798 struct trace_event_call * 1799 create_local_trace_kprobe(char *func, void *addr, unsigned long offs, 1800 bool is_return) 1801 { 1802 enum probe_print_type ptype; 1803 struct trace_kprobe *tk; 1804 int ret; 1805 char *event; 1806 1807 /* 1808 * local trace_kprobes are not added to dyn_event, so they are never 1809 * searched in find_trace_kprobe(). Therefore, there is no concern of 1810 * duplicated name here. 1811 */ 1812 event = func ? func : "DUMMY_EVENT"; 1813 1814 tk = alloc_trace_kprobe(KPROBE_EVENT_SYSTEM, event, (void *)addr, func, 1815 offs, 0 /* maxactive */, 0 /* nargs */, 1816 is_return); 1817 1818 if (IS_ERR(tk)) { 1819 pr_info("Failed to allocate trace_probe.(%d)\n", 1820 (int)PTR_ERR(tk)); 1821 return ERR_CAST(tk); 1822 } 1823 1824 init_trace_event_call(tk); 1825 1826 ptype = trace_kprobe_is_return(tk) ? 1827 PROBE_PRINT_RETURN : PROBE_PRINT_NORMAL; 1828 if (traceprobe_set_print_fmt(&tk->tp, ptype) < 0) { 1829 ret = -ENOMEM; 1830 goto error; 1831 } 1832 1833 ret = __register_trace_kprobe(tk); 1834 if (ret < 0) 1835 goto error; 1836 1837 return trace_probe_event_call(&tk->tp); 1838 error: 1839 free_trace_kprobe(tk); 1840 return ERR_PTR(ret); 1841 } 1842 1843 void destroy_local_trace_kprobe(struct trace_event_call *event_call) 1844 { 1845 struct trace_kprobe *tk; 1846 1847 tk = trace_kprobe_primary_from_call(event_call); 1848 if (unlikely(!tk)) 1849 return; 1850 1851 if (trace_probe_is_enabled(&tk->tp)) { 1852 WARN_ON(1); 1853 return; 1854 } 1855 1856 __unregister_trace_kprobe(tk); 1857 1858 free_trace_kprobe(tk); 1859 } 1860 #endif /* CONFIG_PERF_EVENTS */ 1861 1862 static __init void enable_boot_kprobe_events(void) 1863 { 1864 struct trace_array *tr = top_trace_array(); 1865 struct trace_event_file *file; 1866 struct trace_kprobe *tk; 1867 struct dyn_event *pos; 1868 1869 mutex_lock(&event_mutex); 1870 for_each_trace_kprobe(tk, pos) { 1871 list_for_each_entry(file, &tr->events, list) 1872 if (file->event_call == trace_probe_event_call(&tk->tp)) 1873 trace_event_enable_disable(file, 1, 0); 1874 } 1875 mutex_unlock(&event_mutex); 1876 } 1877 1878 static __init void setup_boot_kprobe_events(void) 1879 { 1880 char *p, *cmd = kprobe_boot_events_buf; 1881 int ret; 1882 1883 strreplace(kprobe_boot_events_buf, ',', ' '); 1884 1885 while (cmd && *cmd != '\0') { 1886 p = strchr(cmd, ';'); 1887 if (p) 1888 *p++ = '\0'; 1889 1890 ret = create_or_delete_trace_kprobe(cmd); 1891 if (ret) 1892 pr_warn("Failed to add event(%d): %s\n", ret, cmd); 1893 1894 cmd = p; 1895 } 1896 1897 enable_boot_kprobe_events(); 1898 } 1899 1900 /* 1901 * Register dynevent at core_initcall. This allows kernel to setup kprobe 1902 * events in postcore_initcall without tracefs. 1903 */ 1904 static __init int init_kprobe_trace_early(void) 1905 { 1906 int ret; 1907 1908 ret = dyn_event_register(&trace_kprobe_ops); 1909 if (ret) 1910 return ret; 1911 1912 if (register_module_notifier(&trace_kprobe_module_nb)) 1913 return -EINVAL; 1914 1915 return 0; 1916 } 1917 core_initcall(init_kprobe_trace_early); 1918 1919 /* Make a tracefs interface for controlling probe points */ 1920 static __init int init_kprobe_trace(void) 1921 { 1922 int ret; 1923 1924 ret = tracing_init_dentry(); 1925 if (ret) 1926 return 0; 1927 1928 /* Event list interface */ 1929 trace_create_file("kprobe_events", TRACE_MODE_WRITE, 1930 NULL, NULL, &kprobe_events_ops); 1931 1932 /* Profile interface */ 1933 trace_create_file("kprobe_profile", TRACE_MODE_READ, 1934 NULL, NULL, &kprobe_profile_ops); 1935 1936 setup_boot_kprobe_events(); 1937 1938 return 0; 1939 } 1940 fs_initcall(init_kprobe_trace); 1941 1942 1943 #ifdef CONFIG_FTRACE_STARTUP_TEST 1944 static __init struct trace_event_file * 1945 find_trace_probe_file(struct trace_kprobe *tk, struct trace_array *tr) 1946 { 1947 struct trace_event_file *file; 1948 1949 list_for_each_entry(file, &tr->events, list) 1950 if (file->event_call == trace_probe_event_call(&tk->tp)) 1951 return file; 1952 1953 return NULL; 1954 } 1955 1956 /* 1957 * Nobody but us can call enable_trace_kprobe/disable_trace_kprobe at this 1958 * stage, we can do this lockless. 1959 */ 1960 static __init int kprobe_trace_self_tests_init(void) 1961 { 1962 int ret, warn = 0; 1963 int (*target)(int, int, int, int, int, int); 1964 struct trace_kprobe *tk; 1965 struct trace_event_file *file; 1966 1967 if (tracing_is_disabled()) 1968 return -ENODEV; 1969 1970 if (tracing_selftest_disabled) 1971 return 0; 1972 1973 target = kprobe_trace_selftest_target; 1974 1975 pr_info("Testing kprobe tracing: "); 1976 1977 ret = create_or_delete_trace_kprobe("p:testprobe kprobe_trace_selftest_target $stack $stack0 +0($stack)"); 1978 if (WARN_ON_ONCE(ret)) { 1979 pr_warn("error on probing function entry.\n"); 1980 warn++; 1981 } else { 1982 /* Enable trace point */ 1983 tk = find_trace_kprobe("testprobe", KPROBE_EVENT_SYSTEM); 1984 if (WARN_ON_ONCE(tk == NULL)) { 1985 pr_warn("error on getting new probe.\n"); 1986 warn++; 1987 } else { 1988 file = find_trace_probe_file(tk, top_trace_array()); 1989 if (WARN_ON_ONCE(file == NULL)) { 1990 pr_warn("error on getting probe file.\n"); 1991 warn++; 1992 } else 1993 enable_trace_kprobe( 1994 trace_probe_event_call(&tk->tp), file); 1995 } 1996 } 1997 1998 ret = create_or_delete_trace_kprobe("r:testprobe2 kprobe_trace_selftest_target $retval"); 1999 if (WARN_ON_ONCE(ret)) { 2000 pr_warn("error on probing function return.\n"); 2001 warn++; 2002 } else { 2003 /* Enable trace point */ 2004 tk = find_trace_kprobe("testprobe2", KPROBE_EVENT_SYSTEM); 2005 if (WARN_ON_ONCE(tk == NULL)) { 2006 pr_warn("error on getting 2nd new probe.\n"); 2007 warn++; 2008 } else { 2009 file = find_trace_probe_file(tk, top_trace_array()); 2010 if (WARN_ON_ONCE(file == NULL)) { 2011 pr_warn("error on getting probe file.\n"); 2012 warn++; 2013 } else 2014 enable_trace_kprobe( 2015 trace_probe_event_call(&tk->tp), file); 2016 } 2017 } 2018 2019 if (warn) 2020 goto end; 2021 2022 ret = target(1, 2, 3, 4, 5, 6); 2023 2024 /* 2025 * Not expecting an error here, the check is only to prevent the 2026 * optimizer from removing the call to target() as otherwise there 2027 * are no side-effects and the call is never performed. 2028 */ 2029 if (ret != 21) 2030 warn++; 2031 2032 /* Disable trace points before removing it */ 2033 tk = find_trace_kprobe("testprobe", KPROBE_EVENT_SYSTEM); 2034 if (WARN_ON_ONCE(tk == NULL)) { 2035 pr_warn("error on getting test probe.\n"); 2036 warn++; 2037 } else { 2038 if (trace_kprobe_nhit(tk) != 1) { 2039 pr_warn("incorrect number of testprobe hits\n"); 2040 warn++; 2041 } 2042 2043 file = find_trace_probe_file(tk, top_trace_array()); 2044 if (WARN_ON_ONCE(file == NULL)) { 2045 pr_warn("error on getting probe file.\n"); 2046 warn++; 2047 } else 2048 disable_trace_kprobe( 2049 trace_probe_event_call(&tk->tp), file); 2050 } 2051 2052 tk = find_trace_kprobe("testprobe2", KPROBE_EVENT_SYSTEM); 2053 if (WARN_ON_ONCE(tk == NULL)) { 2054 pr_warn("error on getting 2nd test probe.\n"); 2055 warn++; 2056 } else { 2057 if (trace_kprobe_nhit(tk) != 1) { 2058 pr_warn("incorrect number of testprobe2 hits\n"); 2059 warn++; 2060 } 2061 2062 file = find_trace_probe_file(tk, top_trace_array()); 2063 if (WARN_ON_ONCE(file == NULL)) { 2064 pr_warn("error on getting probe file.\n"); 2065 warn++; 2066 } else 2067 disable_trace_kprobe( 2068 trace_probe_event_call(&tk->tp), file); 2069 } 2070 2071 ret = create_or_delete_trace_kprobe("-:testprobe"); 2072 if (WARN_ON_ONCE(ret)) { 2073 pr_warn("error on deleting a probe.\n"); 2074 warn++; 2075 } 2076 2077 ret = create_or_delete_trace_kprobe("-:testprobe2"); 2078 if (WARN_ON_ONCE(ret)) { 2079 pr_warn("error on deleting a probe.\n"); 2080 warn++; 2081 } 2082 2083 end: 2084 ret = dyn_events_release_all(&trace_kprobe_ops); 2085 if (WARN_ON_ONCE(ret)) { 2086 pr_warn("error on cleaning up probes.\n"); 2087 warn++; 2088 } 2089 /* 2090 * Wait for the optimizer work to finish. Otherwise it might fiddle 2091 * with probes in already freed __init text. 2092 */ 2093 wait_for_kprobe_optimizer(); 2094 if (warn) 2095 pr_cont("NG: Some tests are failed. Please check them.\n"); 2096 else 2097 pr_cont("OK\n"); 2098 return 0; 2099 } 2100 2101 late_initcall(kprobe_trace_self_tests_init); 2102 2103 #endif 2104