1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Kprobes-based tracing events 4 * 5 * Created by Masami Hiramatsu <mhiramat@redhat.com> 6 * 7 */ 8 #define pr_fmt(fmt) "trace_kprobe: " fmt 9 10 #include <linux/module.h> 11 #include <linux/uaccess.h> 12 #include <linux/rculist.h> 13 #include <linux/error-injection.h> 14 #include <linux/security.h> 15 16 #include "trace_dynevent.h" 17 #include "trace_kprobe_selftest.h" 18 #include "trace_probe.h" 19 #include "trace_probe_tmpl.h" 20 21 #define KPROBE_EVENT_SYSTEM "kprobes" 22 #define KRETPROBE_MAXACTIVE_MAX 4096 23 24 static int trace_kprobe_create(int argc, const char **argv); 25 static int trace_kprobe_show(struct seq_file *m, struct dyn_event *ev); 26 static int trace_kprobe_release(struct dyn_event *ev); 27 static bool trace_kprobe_is_busy(struct dyn_event *ev); 28 static bool trace_kprobe_match(const char *system, const char *event, 29 struct dyn_event *ev); 30 31 static struct dyn_event_operations trace_kprobe_ops = { 32 .create = trace_kprobe_create, 33 .show = trace_kprobe_show, 34 .is_busy = trace_kprobe_is_busy, 35 .free = trace_kprobe_release, 36 .match = trace_kprobe_match, 37 }; 38 39 /* 40 * Kprobe event core functions 41 */ 42 struct trace_kprobe { 43 struct dyn_event devent; 44 struct kretprobe rp; /* Use rp.kp for kprobe use */ 45 unsigned long __percpu *nhit; 46 const char *symbol; /* symbol name */ 47 struct trace_probe tp; 48 }; 49 50 static bool is_trace_kprobe(struct dyn_event *ev) 51 { 52 return ev->ops == &trace_kprobe_ops; 53 } 54 55 static struct trace_kprobe *to_trace_kprobe(struct dyn_event *ev) 56 { 57 return container_of(ev, struct trace_kprobe, devent); 58 } 59 60 /** 61 * for_each_trace_kprobe - iterate over the trace_kprobe list 62 * @pos: the struct trace_kprobe * for each entry 63 * @dpos: the struct dyn_event * to use as a loop cursor 64 */ 65 #define for_each_trace_kprobe(pos, dpos) \ 66 for_each_dyn_event(dpos) \ 67 if (is_trace_kprobe(dpos) && (pos = to_trace_kprobe(dpos))) 68 69 #define SIZEOF_TRACE_KPROBE(n) \ 70 (offsetof(struct trace_kprobe, tp.args) + \ 71 (sizeof(struct probe_arg) * (n))) 72 73 static nokprobe_inline bool trace_kprobe_is_return(struct trace_kprobe *tk) 74 { 75 return tk->rp.handler != NULL; 76 } 77 78 static nokprobe_inline const char *trace_kprobe_symbol(struct trace_kprobe *tk) 79 { 80 return tk->symbol ? tk->symbol : "unknown"; 81 } 82 83 static nokprobe_inline unsigned long trace_kprobe_offset(struct trace_kprobe *tk) 84 { 85 return tk->rp.kp.offset; 86 } 87 88 static nokprobe_inline bool trace_kprobe_has_gone(struct trace_kprobe *tk) 89 { 90 return !!(kprobe_gone(&tk->rp.kp)); 91 } 92 93 static nokprobe_inline bool trace_kprobe_within_module(struct trace_kprobe *tk, 94 struct module *mod) 95 { 96 int len = strlen(mod->name); 97 const char *name = trace_kprobe_symbol(tk); 98 return strncmp(mod->name, name, len) == 0 && name[len] == ':'; 99 } 100 101 static nokprobe_inline bool trace_kprobe_module_exist(struct trace_kprobe *tk) 102 { 103 char *p; 104 bool ret; 105 106 if (!tk->symbol) 107 return false; 108 p = strchr(tk->symbol, ':'); 109 if (!p) 110 return true; 111 *p = '\0'; 112 mutex_lock(&module_mutex); 113 ret = !!find_module(tk->symbol); 114 mutex_unlock(&module_mutex); 115 *p = ':'; 116 117 return ret; 118 } 119 120 static bool trace_kprobe_is_busy(struct dyn_event *ev) 121 { 122 struct trace_kprobe *tk = to_trace_kprobe(ev); 123 124 return trace_probe_is_enabled(&tk->tp); 125 } 126 127 static bool trace_kprobe_match(const char *system, const char *event, 128 struct dyn_event *ev) 129 { 130 struct trace_kprobe *tk = to_trace_kprobe(ev); 131 132 return strcmp(trace_event_name(&tk->tp.call), event) == 0 && 133 (!system || strcmp(tk->tp.call.class->system, system) == 0); 134 } 135 136 static nokprobe_inline unsigned long trace_kprobe_nhit(struct trace_kprobe *tk) 137 { 138 unsigned long nhit = 0; 139 int cpu; 140 141 for_each_possible_cpu(cpu) 142 nhit += *per_cpu_ptr(tk->nhit, cpu); 143 144 return nhit; 145 } 146 147 /* Return 0 if it fails to find the symbol address */ 148 static nokprobe_inline 149 unsigned long trace_kprobe_address(struct trace_kprobe *tk) 150 { 151 unsigned long addr; 152 153 if (tk->symbol) { 154 addr = (unsigned long) 155 kallsyms_lookup_name(trace_kprobe_symbol(tk)); 156 if (addr) 157 addr += tk->rp.kp.offset; 158 } else { 159 addr = (unsigned long)tk->rp.kp.addr; 160 } 161 return addr; 162 } 163 164 bool trace_kprobe_on_func_entry(struct trace_event_call *call) 165 { 166 struct trace_kprobe *tk = (struct trace_kprobe *)call->data; 167 168 return kprobe_on_func_entry(tk->rp.kp.addr, 169 tk->rp.kp.addr ? NULL : tk->rp.kp.symbol_name, 170 tk->rp.kp.addr ? 0 : tk->rp.kp.offset); 171 } 172 173 bool trace_kprobe_error_injectable(struct trace_event_call *call) 174 { 175 struct trace_kprobe *tk = (struct trace_kprobe *)call->data; 176 177 return within_error_injection_list(trace_kprobe_address(tk)); 178 } 179 180 static int register_kprobe_event(struct trace_kprobe *tk); 181 static int unregister_kprobe_event(struct trace_kprobe *tk); 182 183 static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs); 184 static int kretprobe_dispatcher(struct kretprobe_instance *ri, 185 struct pt_regs *regs); 186 187 /* 188 * Allocate new trace_probe and initialize it (including kprobes). 189 */ 190 static struct trace_kprobe *alloc_trace_kprobe(const char *group, 191 const char *event, 192 void *addr, 193 const char *symbol, 194 unsigned long offs, 195 int maxactive, 196 int nargs, bool is_return) 197 { 198 struct trace_kprobe *tk; 199 int ret = -ENOMEM; 200 201 tk = kzalloc(SIZEOF_TRACE_KPROBE(nargs), GFP_KERNEL); 202 if (!tk) 203 return ERR_PTR(ret); 204 205 tk->nhit = alloc_percpu(unsigned long); 206 if (!tk->nhit) 207 goto error; 208 209 if (symbol) { 210 tk->symbol = kstrdup(symbol, GFP_KERNEL); 211 if (!tk->symbol) 212 goto error; 213 tk->rp.kp.symbol_name = tk->symbol; 214 tk->rp.kp.offset = offs; 215 } else 216 tk->rp.kp.addr = addr; 217 218 if (is_return) 219 tk->rp.handler = kretprobe_dispatcher; 220 else 221 tk->rp.kp.pre_handler = kprobe_dispatcher; 222 223 tk->rp.maxactive = maxactive; 224 225 if (!event || !group) { 226 ret = -EINVAL; 227 goto error; 228 } 229 230 tk->tp.call.class = &tk->tp.class; 231 tk->tp.call.name = kstrdup(event, GFP_KERNEL); 232 if (!tk->tp.call.name) 233 goto error; 234 235 tk->tp.class.system = kstrdup(group, GFP_KERNEL); 236 if (!tk->tp.class.system) 237 goto error; 238 239 dyn_event_init(&tk->devent, &trace_kprobe_ops); 240 INIT_LIST_HEAD(&tk->tp.files); 241 return tk; 242 error: 243 kfree(tk->tp.call.name); 244 kfree(tk->symbol); 245 free_percpu(tk->nhit); 246 kfree(tk); 247 return ERR_PTR(ret); 248 } 249 250 static void free_trace_kprobe(struct trace_kprobe *tk) 251 { 252 int i; 253 254 if (!tk) 255 return; 256 257 for (i = 0; i < tk->tp.nr_args; i++) 258 traceprobe_free_probe_arg(&tk->tp.args[i]); 259 260 kfree(tk->tp.call.class->system); 261 kfree(tk->tp.call.name); 262 kfree(tk->symbol); 263 free_percpu(tk->nhit); 264 kfree(tk); 265 } 266 267 static struct trace_kprobe *find_trace_kprobe(const char *event, 268 const char *group) 269 { 270 struct dyn_event *pos; 271 struct trace_kprobe *tk; 272 273 for_each_trace_kprobe(tk, pos) 274 if (strcmp(trace_event_name(&tk->tp.call), event) == 0 && 275 strcmp(tk->tp.call.class->system, group) == 0) 276 return tk; 277 return NULL; 278 } 279 280 static inline int __enable_trace_kprobe(struct trace_kprobe *tk) 281 { 282 int ret = 0; 283 284 if (trace_probe_is_registered(&tk->tp) && !trace_kprobe_has_gone(tk)) { 285 if (trace_kprobe_is_return(tk)) 286 ret = enable_kretprobe(&tk->rp); 287 else 288 ret = enable_kprobe(&tk->rp.kp); 289 } 290 291 return ret; 292 } 293 294 /* 295 * Enable trace_probe 296 * if the file is NULL, enable "perf" handler, or enable "trace" handler. 297 */ 298 static int 299 enable_trace_kprobe(struct trace_kprobe *tk, struct trace_event_file *file) 300 { 301 struct event_file_link *link; 302 int ret = 0; 303 304 if (file) { 305 link = kmalloc(sizeof(*link), GFP_KERNEL); 306 if (!link) { 307 ret = -ENOMEM; 308 goto out; 309 } 310 311 link->file = file; 312 list_add_tail_rcu(&link->list, &tk->tp.files); 313 314 tk->tp.flags |= TP_FLAG_TRACE; 315 ret = __enable_trace_kprobe(tk); 316 if (ret) { 317 list_del_rcu(&link->list); 318 kfree(link); 319 tk->tp.flags &= ~TP_FLAG_TRACE; 320 } 321 322 } else { 323 tk->tp.flags |= TP_FLAG_PROFILE; 324 ret = __enable_trace_kprobe(tk); 325 if (ret) 326 tk->tp.flags &= ~TP_FLAG_PROFILE; 327 } 328 out: 329 return ret; 330 } 331 332 /* 333 * Disable trace_probe 334 * if the file is NULL, disable "perf" handler, or disable "trace" handler. 335 */ 336 static int 337 disable_trace_kprobe(struct trace_kprobe *tk, struct trace_event_file *file) 338 { 339 struct event_file_link *link = NULL; 340 int wait = 0; 341 int ret = 0; 342 343 if (file) { 344 link = find_event_file_link(&tk->tp, file); 345 if (!link) { 346 ret = -EINVAL; 347 goto out; 348 } 349 350 list_del_rcu(&link->list); 351 wait = 1; 352 if (!list_empty(&tk->tp.files)) 353 goto out; 354 355 tk->tp.flags &= ~TP_FLAG_TRACE; 356 } else 357 tk->tp.flags &= ~TP_FLAG_PROFILE; 358 359 if (!trace_probe_is_enabled(&tk->tp) && trace_probe_is_registered(&tk->tp)) { 360 if (trace_kprobe_is_return(tk)) 361 disable_kretprobe(&tk->rp); 362 else 363 disable_kprobe(&tk->rp.kp); 364 wait = 1; 365 } 366 367 /* 368 * if tk is not added to any list, it must be a local trace_kprobe 369 * created with perf_event_open. We don't need to wait for these 370 * trace_kprobes 371 */ 372 if (list_empty(&tk->devent.list)) 373 wait = 0; 374 out: 375 if (wait) { 376 /* 377 * Synchronize with kprobe_trace_func/kretprobe_trace_func 378 * to ensure disabled (all running handlers are finished). 379 * This is not only for kfree(), but also the caller, 380 * trace_remove_event_call() supposes it for releasing 381 * event_call related objects, which will be accessed in 382 * the kprobe_trace_func/kretprobe_trace_func. 383 */ 384 synchronize_rcu(); 385 kfree(link); /* Ignored if link == NULL */ 386 } 387 388 return ret; 389 } 390 391 #if defined(CONFIG_KPROBES_ON_FTRACE) && \ 392 !defined(CONFIG_KPROBE_EVENTS_ON_NOTRACE) 393 static bool within_notrace_func(struct trace_kprobe *tk) 394 { 395 unsigned long offset, size, addr; 396 397 addr = trace_kprobe_address(tk); 398 if (!addr || !kallsyms_lookup_size_offset(addr, &size, &offset)) 399 return false; 400 401 /* Get the entry address of the target function */ 402 addr -= offset; 403 404 /* 405 * Since ftrace_location_range() does inclusive range check, we need 406 * to subtract 1 byte from the end address. 407 */ 408 return !ftrace_location_range(addr, addr + size - 1); 409 } 410 #else 411 #define within_notrace_func(tk) (false) 412 #endif 413 414 /* Internal register function - just handle k*probes and flags */ 415 static int __register_trace_kprobe(struct trace_kprobe *tk) 416 { 417 int i, ret; 418 419 ret = security_locked_down(LOCKDOWN_KPROBES); 420 if (ret) 421 return ret; 422 423 if (trace_probe_is_registered(&tk->tp)) 424 return -EINVAL; 425 426 if (within_notrace_func(tk)) { 427 pr_warn("Could not probe notrace function %s\n", 428 trace_kprobe_symbol(tk)); 429 return -EINVAL; 430 } 431 432 for (i = 0; i < tk->tp.nr_args; i++) { 433 ret = traceprobe_update_arg(&tk->tp.args[i]); 434 if (ret) 435 return ret; 436 } 437 438 /* Set/clear disabled flag according to tp->flag */ 439 if (trace_probe_is_enabled(&tk->tp)) 440 tk->rp.kp.flags &= ~KPROBE_FLAG_DISABLED; 441 else 442 tk->rp.kp.flags |= KPROBE_FLAG_DISABLED; 443 444 if (trace_kprobe_is_return(tk)) 445 ret = register_kretprobe(&tk->rp); 446 else 447 ret = register_kprobe(&tk->rp.kp); 448 449 if (ret == 0) 450 tk->tp.flags |= TP_FLAG_REGISTERED; 451 return ret; 452 } 453 454 /* Internal unregister function - just handle k*probes and flags */ 455 static void __unregister_trace_kprobe(struct trace_kprobe *tk) 456 { 457 if (trace_probe_is_registered(&tk->tp)) { 458 if (trace_kprobe_is_return(tk)) 459 unregister_kretprobe(&tk->rp); 460 else 461 unregister_kprobe(&tk->rp.kp); 462 tk->tp.flags &= ~TP_FLAG_REGISTERED; 463 /* Cleanup kprobe for reuse */ 464 if (tk->rp.kp.symbol_name) 465 tk->rp.kp.addr = NULL; 466 } 467 } 468 469 /* Unregister a trace_probe and probe_event */ 470 static int unregister_trace_kprobe(struct trace_kprobe *tk) 471 { 472 /* Enabled event can not be unregistered */ 473 if (trace_probe_is_enabled(&tk->tp)) 474 return -EBUSY; 475 476 /* Will fail if probe is being used by ftrace or perf */ 477 if (unregister_kprobe_event(tk)) 478 return -EBUSY; 479 480 __unregister_trace_kprobe(tk); 481 dyn_event_remove(&tk->devent); 482 483 return 0; 484 } 485 486 /* Register a trace_probe and probe_event */ 487 static int register_trace_kprobe(struct trace_kprobe *tk) 488 { 489 struct trace_kprobe *old_tk; 490 int ret; 491 492 mutex_lock(&event_mutex); 493 494 /* Delete old (same name) event if exist */ 495 old_tk = find_trace_kprobe(trace_event_name(&tk->tp.call), 496 tk->tp.call.class->system); 497 if (old_tk) { 498 ret = unregister_trace_kprobe(old_tk); 499 if (ret < 0) 500 goto end; 501 free_trace_kprobe(old_tk); 502 } 503 504 /* Register new event */ 505 ret = register_kprobe_event(tk); 506 if (ret) { 507 pr_warn("Failed to register probe event(%d)\n", ret); 508 goto end; 509 } 510 511 /* Register k*probe */ 512 ret = __register_trace_kprobe(tk); 513 if (ret == -ENOENT && !trace_kprobe_module_exist(tk)) { 514 pr_warn("This probe might be able to register after target module is loaded. Continue.\n"); 515 ret = 0; 516 } 517 518 if (ret < 0) 519 unregister_kprobe_event(tk); 520 else 521 dyn_event_add(&tk->devent); 522 523 end: 524 mutex_unlock(&event_mutex); 525 return ret; 526 } 527 528 /* Module notifier call back, checking event on the module */ 529 static int trace_kprobe_module_callback(struct notifier_block *nb, 530 unsigned long val, void *data) 531 { 532 struct module *mod = data; 533 struct dyn_event *pos; 534 struct trace_kprobe *tk; 535 int ret; 536 537 if (val != MODULE_STATE_COMING) 538 return NOTIFY_DONE; 539 540 /* Update probes on coming module */ 541 mutex_lock(&event_mutex); 542 for_each_trace_kprobe(tk, pos) { 543 if (trace_kprobe_within_module(tk, mod)) { 544 /* Don't need to check busy - this should have gone. */ 545 __unregister_trace_kprobe(tk); 546 ret = __register_trace_kprobe(tk); 547 if (ret) 548 pr_warn("Failed to re-register probe %s on %s: %d\n", 549 trace_event_name(&tk->tp.call), 550 mod->name, ret); 551 } 552 } 553 mutex_unlock(&event_mutex); 554 555 return NOTIFY_DONE; 556 } 557 558 static struct notifier_block trace_kprobe_module_nb = { 559 .notifier_call = trace_kprobe_module_callback, 560 .priority = 1 /* Invoked after kprobe module callback */ 561 }; 562 563 /* Convert certain expected symbols into '_' when generating event names */ 564 static inline void sanitize_event_name(char *name) 565 { 566 while (*name++ != '\0') 567 if (*name == ':' || *name == '.') 568 *name = '_'; 569 } 570 571 static int trace_kprobe_create(int argc, const char *argv[]) 572 { 573 /* 574 * Argument syntax: 575 * - Add kprobe: 576 * p[:[GRP/]EVENT] [MOD:]KSYM[+OFFS]|KADDR [FETCHARGS] 577 * - Add kretprobe: 578 * r[MAXACTIVE][:[GRP/]EVENT] [MOD:]KSYM[+0] [FETCHARGS] 579 * Fetch args: 580 * $retval : fetch return value 581 * $stack : fetch stack address 582 * $stackN : fetch Nth of stack (N:0-) 583 * $comm : fetch current task comm 584 * @ADDR : fetch memory at ADDR (ADDR should be in kernel) 585 * @SYM[+|-offs] : fetch memory at SYM +|- offs (SYM is a data symbol) 586 * %REG : fetch register REG 587 * Dereferencing memory fetch: 588 * +|-offs(ARG) : fetch memory at ARG +|- offs address. 589 * Alias name of args: 590 * NAME=FETCHARG : set NAME as alias of FETCHARG. 591 * Type of args: 592 * FETCHARG:TYPE : use TYPE instead of unsigned long. 593 */ 594 struct trace_kprobe *tk = NULL; 595 int i, len, ret = 0; 596 bool is_return = false; 597 char *symbol = NULL, *tmp = NULL; 598 const char *event = NULL, *group = KPROBE_EVENT_SYSTEM; 599 int maxactive = 0; 600 long offset = 0; 601 void *addr = NULL; 602 char buf[MAX_EVENT_NAME_LEN]; 603 unsigned int flags = TPARG_FL_KERNEL; 604 605 switch (argv[0][0]) { 606 case 'r': 607 is_return = true; 608 flags |= TPARG_FL_RETURN; 609 break; 610 case 'p': 611 break; 612 default: 613 return -ECANCELED; 614 } 615 if (argc < 2) 616 return -ECANCELED; 617 618 trace_probe_log_init("trace_kprobe", argc, argv); 619 620 event = strchr(&argv[0][1], ':'); 621 if (event) 622 event++; 623 624 if (isdigit(argv[0][1])) { 625 if (!is_return) { 626 trace_probe_log_err(1, MAXACT_NO_KPROBE); 627 goto parse_error; 628 } 629 if (event) 630 len = event - &argv[0][1] - 1; 631 else 632 len = strlen(&argv[0][1]); 633 if (len > MAX_EVENT_NAME_LEN - 1) { 634 trace_probe_log_err(1, BAD_MAXACT); 635 goto parse_error; 636 } 637 memcpy(buf, &argv[0][1], len); 638 buf[len] = '\0'; 639 ret = kstrtouint(buf, 0, &maxactive); 640 if (ret || !maxactive) { 641 trace_probe_log_err(1, BAD_MAXACT); 642 goto parse_error; 643 } 644 /* kretprobes instances are iterated over via a list. The 645 * maximum should stay reasonable. 646 */ 647 if (maxactive > KRETPROBE_MAXACTIVE_MAX) { 648 trace_probe_log_err(1, MAXACT_TOO_BIG); 649 goto parse_error; 650 } 651 } 652 653 /* try to parse an address. if that fails, try to read the 654 * input as a symbol. */ 655 if (kstrtoul(argv[1], 0, (unsigned long *)&addr)) { 656 trace_probe_log_set_index(1); 657 /* Check whether uprobe event specified */ 658 if (strchr(argv[1], '/') && strchr(argv[1], ':')) { 659 ret = -ECANCELED; 660 goto error; 661 } 662 /* a symbol specified */ 663 symbol = kstrdup(argv[1], GFP_KERNEL); 664 if (!symbol) 665 return -ENOMEM; 666 /* TODO: support .init module functions */ 667 ret = traceprobe_split_symbol_offset(symbol, &offset); 668 if (ret || offset < 0 || offset > UINT_MAX) { 669 trace_probe_log_err(0, BAD_PROBE_ADDR); 670 goto parse_error; 671 } 672 if (kprobe_on_func_entry(NULL, symbol, offset)) 673 flags |= TPARG_FL_FENTRY; 674 if (offset && is_return && !(flags & TPARG_FL_FENTRY)) { 675 trace_probe_log_err(0, BAD_RETPROBE); 676 goto parse_error; 677 } 678 } 679 680 trace_probe_log_set_index(0); 681 if (event) { 682 ret = traceprobe_parse_event_name(&event, &group, buf, 683 event - argv[0]); 684 if (ret) 685 goto parse_error; 686 } else { 687 /* Make a new event name */ 688 if (symbol) 689 snprintf(buf, MAX_EVENT_NAME_LEN, "%c_%s_%ld", 690 is_return ? 'r' : 'p', symbol, offset); 691 else 692 snprintf(buf, MAX_EVENT_NAME_LEN, "%c_0x%p", 693 is_return ? 'r' : 'p', addr); 694 sanitize_event_name(buf); 695 event = buf; 696 } 697 698 /* setup a probe */ 699 tk = alloc_trace_kprobe(group, event, addr, symbol, offset, maxactive, 700 argc - 2, is_return); 701 if (IS_ERR(tk)) { 702 ret = PTR_ERR(tk); 703 /* This must return -ENOMEM, else there is a bug */ 704 WARN_ON_ONCE(ret != -ENOMEM); 705 goto out; /* We know tk is not allocated */ 706 } 707 argc -= 2; argv += 2; 708 709 /* parse arguments */ 710 for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) { 711 tmp = kstrdup(argv[i], GFP_KERNEL); 712 if (!tmp) { 713 ret = -ENOMEM; 714 goto error; 715 } 716 717 trace_probe_log_set_index(i + 2); 718 ret = traceprobe_parse_probe_arg(&tk->tp, i, tmp, flags); 719 kfree(tmp); 720 if (ret) 721 goto error; /* This can be -ENOMEM */ 722 } 723 724 ret = register_trace_kprobe(tk); 725 if (ret) { 726 trace_probe_log_set_index(1); 727 if (ret == -EILSEQ) 728 trace_probe_log_err(0, BAD_INSN_BNDRY); 729 else if (ret == -ENOENT) 730 trace_probe_log_err(0, BAD_PROBE_ADDR); 731 else if (ret != -ENOMEM) 732 trace_probe_log_err(0, FAIL_REG_PROBE); 733 goto error; 734 } 735 736 out: 737 trace_probe_log_clear(); 738 kfree(symbol); 739 return ret; 740 741 parse_error: 742 ret = -EINVAL; 743 error: 744 free_trace_kprobe(tk); 745 goto out; 746 } 747 748 static int create_or_delete_trace_kprobe(int argc, char **argv) 749 { 750 int ret; 751 752 if (argv[0][0] == '-') 753 return dyn_event_release(argc, argv, &trace_kprobe_ops); 754 755 ret = trace_kprobe_create(argc, (const char **)argv); 756 return ret == -ECANCELED ? -EINVAL : ret; 757 } 758 759 static int trace_kprobe_release(struct dyn_event *ev) 760 { 761 struct trace_kprobe *tk = to_trace_kprobe(ev); 762 int ret = unregister_trace_kprobe(tk); 763 764 if (!ret) 765 free_trace_kprobe(tk); 766 return ret; 767 } 768 769 static int trace_kprobe_show(struct seq_file *m, struct dyn_event *ev) 770 { 771 struct trace_kprobe *tk = to_trace_kprobe(ev); 772 int i; 773 774 seq_putc(m, trace_kprobe_is_return(tk) ? 'r' : 'p'); 775 seq_printf(m, ":%s/%s", tk->tp.call.class->system, 776 trace_event_name(&tk->tp.call)); 777 778 if (!tk->symbol) 779 seq_printf(m, " 0x%p", tk->rp.kp.addr); 780 else if (tk->rp.kp.offset) 781 seq_printf(m, " %s+%u", trace_kprobe_symbol(tk), 782 tk->rp.kp.offset); 783 else 784 seq_printf(m, " %s", trace_kprobe_symbol(tk)); 785 786 for (i = 0; i < tk->tp.nr_args; i++) 787 seq_printf(m, " %s=%s", tk->tp.args[i].name, tk->tp.args[i].comm); 788 seq_putc(m, '\n'); 789 790 return 0; 791 } 792 793 static int probes_seq_show(struct seq_file *m, void *v) 794 { 795 struct dyn_event *ev = v; 796 797 if (!is_trace_kprobe(ev)) 798 return 0; 799 800 return trace_kprobe_show(m, ev); 801 } 802 803 static const struct seq_operations probes_seq_op = { 804 .start = dyn_event_seq_start, 805 .next = dyn_event_seq_next, 806 .stop = dyn_event_seq_stop, 807 .show = probes_seq_show 808 }; 809 810 static int probes_open(struct inode *inode, struct file *file) 811 { 812 int ret; 813 814 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) { 815 ret = dyn_events_release_all(&trace_kprobe_ops); 816 if (ret < 0) 817 return ret; 818 } 819 820 return seq_open(file, &probes_seq_op); 821 } 822 823 static ssize_t probes_write(struct file *file, const char __user *buffer, 824 size_t count, loff_t *ppos) 825 { 826 return trace_parse_run_command(file, buffer, count, ppos, 827 create_or_delete_trace_kprobe); 828 } 829 830 static const struct file_operations kprobe_events_ops = { 831 .owner = THIS_MODULE, 832 .open = probes_open, 833 .read = seq_read, 834 .llseek = seq_lseek, 835 .release = seq_release, 836 .write = probes_write, 837 }; 838 839 /* Probes profiling interfaces */ 840 static int probes_profile_seq_show(struct seq_file *m, void *v) 841 { 842 struct dyn_event *ev = v; 843 struct trace_kprobe *tk; 844 845 if (!is_trace_kprobe(ev)) 846 return 0; 847 848 tk = to_trace_kprobe(ev); 849 seq_printf(m, " %-44s %15lu %15lu\n", 850 trace_event_name(&tk->tp.call), 851 trace_kprobe_nhit(tk), 852 tk->rp.kp.nmissed); 853 854 return 0; 855 } 856 857 static const struct seq_operations profile_seq_op = { 858 .start = dyn_event_seq_start, 859 .next = dyn_event_seq_next, 860 .stop = dyn_event_seq_stop, 861 .show = probes_profile_seq_show 862 }; 863 864 static int profile_open(struct inode *inode, struct file *file) 865 { 866 return seq_open(file, &profile_seq_op); 867 } 868 869 static const struct file_operations kprobe_profile_ops = { 870 .owner = THIS_MODULE, 871 .open = profile_open, 872 .read = seq_read, 873 .llseek = seq_lseek, 874 .release = seq_release, 875 }; 876 877 /* Kprobe specific fetch functions */ 878 879 /* Return the length of string -- including null terminal byte */ 880 static nokprobe_inline int 881 fetch_store_strlen(unsigned long addr) 882 { 883 int ret, len = 0; 884 u8 c; 885 886 do { 887 ret = probe_kernel_read(&c, (u8 *)addr + len, 1); 888 len++; 889 } while (c && ret == 0 && len < MAX_STRING_SIZE); 890 891 return (ret < 0) ? ret : len; 892 } 893 894 /* 895 * Fetch a null-terminated string. Caller MUST set *(u32 *)buf with max 896 * length and relative data location. 897 */ 898 static nokprobe_inline int 899 fetch_store_string(unsigned long addr, void *dest, void *base) 900 { 901 int maxlen = get_loc_len(*(u32 *)dest); 902 u8 *dst = get_loc_data(dest, base); 903 long ret; 904 905 if (unlikely(!maxlen)) 906 return -ENOMEM; 907 /* 908 * Try to get string again, since the string can be changed while 909 * probing. 910 */ 911 ret = strncpy_from_unsafe(dst, (void *)addr, maxlen); 912 913 if (ret >= 0) 914 *(u32 *)dest = make_data_loc(ret, (void *)dst - base); 915 return ret; 916 } 917 918 static nokprobe_inline int 919 probe_mem_read(void *dest, void *src, size_t size) 920 { 921 return probe_kernel_read(dest, src, size); 922 } 923 924 /* Note that we don't verify it, since the code does not come from user space */ 925 static int 926 process_fetch_insn(struct fetch_insn *code, struct pt_regs *regs, void *dest, 927 void *base) 928 { 929 unsigned long val; 930 931 retry: 932 /* 1st stage: get value from context */ 933 switch (code->op) { 934 case FETCH_OP_REG: 935 val = regs_get_register(regs, code->param); 936 break; 937 case FETCH_OP_STACK: 938 val = regs_get_kernel_stack_nth(regs, code->param); 939 break; 940 case FETCH_OP_STACKP: 941 val = kernel_stack_pointer(regs); 942 break; 943 case FETCH_OP_RETVAL: 944 val = regs_return_value(regs); 945 break; 946 case FETCH_OP_IMM: 947 val = code->immediate; 948 break; 949 case FETCH_OP_COMM: 950 val = (unsigned long)current->comm; 951 break; 952 #ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API 953 case FETCH_OP_ARG: 954 val = regs_get_kernel_argument(regs, code->param); 955 break; 956 #endif 957 case FETCH_NOP_SYMBOL: /* Ignore a place holder */ 958 code++; 959 goto retry; 960 default: 961 return -EILSEQ; 962 } 963 code++; 964 965 return process_fetch_insn_bottom(code, val, dest, base); 966 } 967 NOKPROBE_SYMBOL(process_fetch_insn) 968 969 /* Kprobe handler */ 970 static nokprobe_inline void 971 __kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs, 972 struct trace_event_file *trace_file) 973 { 974 struct kprobe_trace_entry_head *entry; 975 struct ring_buffer_event *event; 976 struct ring_buffer *buffer; 977 int size, dsize, pc; 978 unsigned long irq_flags; 979 struct trace_event_call *call = &tk->tp.call; 980 981 WARN_ON(call != trace_file->event_call); 982 983 if (trace_trigger_soft_disabled(trace_file)) 984 return; 985 986 local_save_flags(irq_flags); 987 pc = preempt_count(); 988 989 dsize = __get_data_size(&tk->tp, regs); 990 size = sizeof(*entry) + tk->tp.size + dsize; 991 992 event = trace_event_buffer_lock_reserve(&buffer, trace_file, 993 call->event.type, 994 size, irq_flags, pc); 995 if (!event) 996 return; 997 998 entry = ring_buffer_event_data(event); 999 entry->ip = (unsigned long)tk->rp.kp.addr; 1000 store_trace_args(&entry[1], &tk->tp, regs, sizeof(*entry), dsize); 1001 1002 event_trigger_unlock_commit_regs(trace_file, buffer, event, 1003 entry, irq_flags, pc, regs); 1004 } 1005 1006 static void 1007 kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs) 1008 { 1009 struct event_file_link *link; 1010 1011 list_for_each_entry_rcu(link, &tk->tp.files, list) 1012 __kprobe_trace_func(tk, regs, link->file); 1013 } 1014 NOKPROBE_SYMBOL(kprobe_trace_func); 1015 1016 /* Kretprobe handler */ 1017 static nokprobe_inline void 1018 __kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri, 1019 struct pt_regs *regs, 1020 struct trace_event_file *trace_file) 1021 { 1022 struct kretprobe_trace_entry_head *entry; 1023 struct ring_buffer_event *event; 1024 struct ring_buffer *buffer; 1025 int size, pc, dsize; 1026 unsigned long irq_flags; 1027 struct trace_event_call *call = &tk->tp.call; 1028 1029 WARN_ON(call != trace_file->event_call); 1030 1031 if (trace_trigger_soft_disabled(trace_file)) 1032 return; 1033 1034 local_save_flags(irq_flags); 1035 pc = preempt_count(); 1036 1037 dsize = __get_data_size(&tk->tp, regs); 1038 size = sizeof(*entry) + tk->tp.size + dsize; 1039 1040 event = trace_event_buffer_lock_reserve(&buffer, trace_file, 1041 call->event.type, 1042 size, irq_flags, pc); 1043 if (!event) 1044 return; 1045 1046 entry = ring_buffer_event_data(event); 1047 entry->func = (unsigned long)tk->rp.kp.addr; 1048 entry->ret_ip = (unsigned long)ri->ret_addr; 1049 store_trace_args(&entry[1], &tk->tp, regs, sizeof(*entry), dsize); 1050 1051 event_trigger_unlock_commit_regs(trace_file, buffer, event, 1052 entry, irq_flags, pc, regs); 1053 } 1054 1055 static void 1056 kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri, 1057 struct pt_regs *regs) 1058 { 1059 struct event_file_link *link; 1060 1061 list_for_each_entry_rcu(link, &tk->tp.files, list) 1062 __kretprobe_trace_func(tk, ri, regs, link->file); 1063 } 1064 NOKPROBE_SYMBOL(kretprobe_trace_func); 1065 1066 /* Event entry printers */ 1067 static enum print_line_t 1068 print_kprobe_event(struct trace_iterator *iter, int flags, 1069 struct trace_event *event) 1070 { 1071 struct kprobe_trace_entry_head *field; 1072 struct trace_seq *s = &iter->seq; 1073 struct trace_probe *tp; 1074 1075 field = (struct kprobe_trace_entry_head *)iter->ent; 1076 tp = container_of(event, struct trace_probe, call.event); 1077 1078 trace_seq_printf(s, "%s: (", trace_event_name(&tp->call)); 1079 1080 if (!seq_print_ip_sym(s, field->ip, flags | TRACE_ITER_SYM_OFFSET)) 1081 goto out; 1082 1083 trace_seq_putc(s, ')'); 1084 1085 if (print_probe_args(s, tp->args, tp->nr_args, 1086 (u8 *)&field[1], field) < 0) 1087 goto out; 1088 1089 trace_seq_putc(s, '\n'); 1090 out: 1091 return trace_handle_return(s); 1092 } 1093 1094 static enum print_line_t 1095 print_kretprobe_event(struct trace_iterator *iter, int flags, 1096 struct trace_event *event) 1097 { 1098 struct kretprobe_trace_entry_head *field; 1099 struct trace_seq *s = &iter->seq; 1100 struct trace_probe *tp; 1101 1102 field = (struct kretprobe_trace_entry_head *)iter->ent; 1103 tp = container_of(event, struct trace_probe, call.event); 1104 1105 trace_seq_printf(s, "%s: (", trace_event_name(&tp->call)); 1106 1107 if (!seq_print_ip_sym(s, field->ret_ip, flags | TRACE_ITER_SYM_OFFSET)) 1108 goto out; 1109 1110 trace_seq_puts(s, " <- "); 1111 1112 if (!seq_print_ip_sym(s, field->func, flags & ~TRACE_ITER_SYM_OFFSET)) 1113 goto out; 1114 1115 trace_seq_putc(s, ')'); 1116 1117 if (print_probe_args(s, tp->args, tp->nr_args, 1118 (u8 *)&field[1], field) < 0) 1119 goto out; 1120 1121 trace_seq_putc(s, '\n'); 1122 1123 out: 1124 return trace_handle_return(s); 1125 } 1126 1127 1128 static int kprobe_event_define_fields(struct trace_event_call *event_call) 1129 { 1130 int ret; 1131 struct kprobe_trace_entry_head field; 1132 struct trace_kprobe *tk = (struct trace_kprobe *)event_call->data; 1133 1134 DEFINE_FIELD(unsigned long, ip, FIELD_STRING_IP, 0); 1135 1136 return traceprobe_define_arg_fields(event_call, sizeof(field), &tk->tp); 1137 } 1138 1139 static int kretprobe_event_define_fields(struct trace_event_call *event_call) 1140 { 1141 int ret; 1142 struct kretprobe_trace_entry_head field; 1143 struct trace_kprobe *tk = (struct trace_kprobe *)event_call->data; 1144 1145 DEFINE_FIELD(unsigned long, func, FIELD_STRING_FUNC, 0); 1146 DEFINE_FIELD(unsigned long, ret_ip, FIELD_STRING_RETIP, 0); 1147 1148 return traceprobe_define_arg_fields(event_call, sizeof(field), &tk->tp); 1149 } 1150 1151 #ifdef CONFIG_PERF_EVENTS 1152 1153 /* Kprobe profile handler */ 1154 static int 1155 kprobe_perf_func(struct trace_kprobe *tk, struct pt_regs *regs) 1156 { 1157 struct trace_event_call *call = &tk->tp.call; 1158 struct kprobe_trace_entry_head *entry; 1159 struct hlist_head *head; 1160 int size, __size, dsize; 1161 int rctx; 1162 1163 if (bpf_prog_array_valid(call)) { 1164 unsigned long orig_ip = instruction_pointer(regs); 1165 int ret; 1166 1167 ret = trace_call_bpf(call, regs); 1168 1169 /* 1170 * We need to check and see if we modified the pc of the 1171 * pt_regs, and if so return 1 so that we don't do the 1172 * single stepping. 1173 */ 1174 if (orig_ip != instruction_pointer(regs)) 1175 return 1; 1176 if (!ret) 1177 return 0; 1178 } 1179 1180 head = this_cpu_ptr(call->perf_events); 1181 if (hlist_empty(head)) 1182 return 0; 1183 1184 dsize = __get_data_size(&tk->tp, regs); 1185 __size = sizeof(*entry) + tk->tp.size + dsize; 1186 size = ALIGN(__size + sizeof(u32), sizeof(u64)); 1187 size -= sizeof(u32); 1188 1189 entry = perf_trace_buf_alloc(size, NULL, &rctx); 1190 if (!entry) 1191 return 0; 1192 1193 entry->ip = (unsigned long)tk->rp.kp.addr; 1194 memset(&entry[1], 0, dsize); 1195 store_trace_args(&entry[1], &tk->tp, regs, sizeof(*entry), dsize); 1196 perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs, 1197 head, NULL); 1198 return 0; 1199 } 1200 NOKPROBE_SYMBOL(kprobe_perf_func); 1201 1202 /* Kretprobe profile handler */ 1203 static void 1204 kretprobe_perf_func(struct trace_kprobe *tk, struct kretprobe_instance *ri, 1205 struct pt_regs *regs) 1206 { 1207 struct trace_event_call *call = &tk->tp.call; 1208 struct kretprobe_trace_entry_head *entry; 1209 struct hlist_head *head; 1210 int size, __size, dsize; 1211 int rctx; 1212 1213 if (bpf_prog_array_valid(call) && !trace_call_bpf(call, regs)) 1214 return; 1215 1216 head = this_cpu_ptr(call->perf_events); 1217 if (hlist_empty(head)) 1218 return; 1219 1220 dsize = __get_data_size(&tk->tp, regs); 1221 __size = sizeof(*entry) + tk->tp.size + dsize; 1222 size = ALIGN(__size + sizeof(u32), sizeof(u64)); 1223 size -= sizeof(u32); 1224 1225 entry = perf_trace_buf_alloc(size, NULL, &rctx); 1226 if (!entry) 1227 return; 1228 1229 entry->func = (unsigned long)tk->rp.kp.addr; 1230 entry->ret_ip = (unsigned long)ri->ret_addr; 1231 store_trace_args(&entry[1], &tk->tp, regs, sizeof(*entry), dsize); 1232 perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs, 1233 head, NULL); 1234 } 1235 NOKPROBE_SYMBOL(kretprobe_perf_func); 1236 1237 int bpf_get_kprobe_info(const struct perf_event *event, u32 *fd_type, 1238 const char **symbol, u64 *probe_offset, 1239 u64 *probe_addr, bool perf_type_tracepoint) 1240 { 1241 const char *pevent = trace_event_name(event->tp_event); 1242 const char *group = event->tp_event->class->system; 1243 struct trace_kprobe *tk; 1244 1245 if (perf_type_tracepoint) 1246 tk = find_trace_kprobe(pevent, group); 1247 else 1248 tk = event->tp_event->data; 1249 if (!tk) 1250 return -EINVAL; 1251 1252 *fd_type = trace_kprobe_is_return(tk) ? BPF_FD_TYPE_KRETPROBE 1253 : BPF_FD_TYPE_KPROBE; 1254 if (tk->symbol) { 1255 *symbol = tk->symbol; 1256 *probe_offset = tk->rp.kp.offset; 1257 *probe_addr = 0; 1258 } else { 1259 *symbol = NULL; 1260 *probe_offset = 0; 1261 *probe_addr = (unsigned long)tk->rp.kp.addr; 1262 } 1263 return 0; 1264 } 1265 #endif /* CONFIG_PERF_EVENTS */ 1266 1267 /* 1268 * called by perf_trace_init() or __ftrace_set_clr_event() under event_mutex. 1269 * 1270 * kprobe_trace_self_tests_init() does enable_trace_probe/disable_trace_probe 1271 * lockless, but we can't race with this __init function. 1272 */ 1273 static int kprobe_register(struct trace_event_call *event, 1274 enum trace_reg type, void *data) 1275 { 1276 struct trace_kprobe *tk = (struct trace_kprobe *)event->data; 1277 struct trace_event_file *file = data; 1278 1279 switch (type) { 1280 case TRACE_REG_REGISTER: 1281 return enable_trace_kprobe(tk, file); 1282 case TRACE_REG_UNREGISTER: 1283 return disable_trace_kprobe(tk, file); 1284 1285 #ifdef CONFIG_PERF_EVENTS 1286 case TRACE_REG_PERF_REGISTER: 1287 return enable_trace_kprobe(tk, NULL); 1288 case TRACE_REG_PERF_UNREGISTER: 1289 return disable_trace_kprobe(tk, NULL); 1290 case TRACE_REG_PERF_OPEN: 1291 case TRACE_REG_PERF_CLOSE: 1292 case TRACE_REG_PERF_ADD: 1293 case TRACE_REG_PERF_DEL: 1294 return 0; 1295 #endif 1296 } 1297 return 0; 1298 } 1299 1300 static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs) 1301 { 1302 struct trace_kprobe *tk = container_of(kp, struct trace_kprobe, rp.kp); 1303 int ret = 0; 1304 1305 raw_cpu_inc(*tk->nhit); 1306 1307 if (tk->tp.flags & TP_FLAG_TRACE) 1308 kprobe_trace_func(tk, regs); 1309 #ifdef CONFIG_PERF_EVENTS 1310 if (tk->tp.flags & TP_FLAG_PROFILE) 1311 ret = kprobe_perf_func(tk, regs); 1312 #endif 1313 return ret; 1314 } 1315 NOKPROBE_SYMBOL(kprobe_dispatcher); 1316 1317 static int 1318 kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs) 1319 { 1320 struct trace_kprobe *tk = container_of(ri->rp, struct trace_kprobe, rp); 1321 1322 raw_cpu_inc(*tk->nhit); 1323 1324 if (tk->tp.flags & TP_FLAG_TRACE) 1325 kretprobe_trace_func(tk, ri, regs); 1326 #ifdef CONFIG_PERF_EVENTS 1327 if (tk->tp.flags & TP_FLAG_PROFILE) 1328 kretprobe_perf_func(tk, ri, regs); 1329 #endif 1330 return 0; /* We don't tweek kernel, so just return 0 */ 1331 } 1332 NOKPROBE_SYMBOL(kretprobe_dispatcher); 1333 1334 static struct trace_event_functions kretprobe_funcs = { 1335 .trace = print_kretprobe_event 1336 }; 1337 1338 static struct trace_event_functions kprobe_funcs = { 1339 .trace = print_kprobe_event 1340 }; 1341 1342 static inline void init_trace_event_call(struct trace_kprobe *tk, 1343 struct trace_event_call *call) 1344 { 1345 INIT_LIST_HEAD(&call->class->fields); 1346 if (trace_kprobe_is_return(tk)) { 1347 call->event.funcs = &kretprobe_funcs; 1348 call->class->define_fields = kretprobe_event_define_fields; 1349 } else { 1350 call->event.funcs = &kprobe_funcs; 1351 call->class->define_fields = kprobe_event_define_fields; 1352 } 1353 1354 call->flags = TRACE_EVENT_FL_KPROBE; 1355 call->class->reg = kprobe_register; 1356 call->data = tk; 1357 } 1358 1359 static int register_kprobe_event(struct trace_kprobe *tk) 1360 { 1361 struct trace_event_call *call = &tk->tp.call; 1362 int ret = 0; 1363 1364 init_trace_event_call(tk, call); 1365 1366 if (traceprobe_set_print_fmt(&tk->tp, trace_kprobe_is_return(tk)) < 0) 1367 return -ENOMEM; 1368 ret = register_trace_event(&call->event); 1369 if (!ret) { 1370 kfree(call->print_fmt); 1371 return -ENODEV; 1372 } 1373 ret = trace_add_event_call(call); 1374 if (ret) { 1375 pr_info("Failed to register kprobe event: %s\n", 1376 trace_event_name(call)); 1377 kfree(call->print_fmt); 1378 unregister_trace_event(&call->event); 1379 } 1380 return ret; 1381 } 1382 1383 static int unregister_kprobe_event(struct trace_kprobe *tk) 1384 { 1385 int ret; 1386 1387 /* tp->event is unregistered in trace_remove_event_call() */ 1388 ret = trace_remove_event_call(&tk->tp.call); 1389 if (!ret) 1390 kfree(tk->tp.call.print_fmt); 1391 return ret; 1392 } 1393 1394 #ifdef CONFIG_PERF_EVENTS 1395 /* create a trace_kprobe, but don't add it to global lists */ 1396 struct trace_event_call * 1397 create_local_trace_kprobe(char *func, void *addr, unsigned long offs, 1398 bool is_return) 1399 { 1400 struct trace_kprobe *tk; 1401 int ret; 1402 char *event; 1403 1404 /* 1405 * local trace_kprobes are not added to dyn_event, so they are never 1406 * searched in find_trace_kprobe(). Therefore, there is no concern of 1407 * duplicated name here. 1408 */ 1409 event = func ? func : "DUMMY_EVENT"; 1410 1411 tk = alloc_trace_kprobe(KPROBE_EVENT_SYSTEM, event, (void *)addr, func, 1412 offs, 0 /* maxactive */, 0 /* nargs */, 1413 is_return); 1414 1415 if (IS_ERR(tk)) { 1416 pr_info("Failed to allocate trace_probe.(%d)\n", 1417 (int)PTR_ERR(tk)); 1418 return ERR_CAST(tk); 1419 } 1420 1421 init_trace_event_call(tk, &tk->tp.call); 1422 1423 if (traceprobe_set_print_fmt(&tk->tp, trace_kprobe_is_return(tk)) < 0) { 1424 ret = -ENOMEM; 1425 goto error; 1426 } 1427 1428 ret = __register_trace_kprobe(tk); 1429 if (ret < 0) { 1430 kfree(tk->tp.call.print_fmt); 1431 goto error; 1432 } 1433 1434 return &tk->tp.call; 1435 error: 1436 free_trace_kprobe(tk); 1437 return ERR_PTR(ret); 1438 } 1439 1440 void destroy_local_trace_kprobe(struct trace_event_call *event_call) 1441 { 1442 struct trace_kprobe *tk; 1443 1444 tk = container_of(event_call, struct trace_kprobe, tp.call); 1445 1446 if (trace_probe_is_enabled(&tk->tp)) { 1447 WARN_ON(1); 1448 return; 1449 } 1450 1451 __unregister_trace_kprobe(tk); 1452 1453 kfree(tk->tp.call.print_fmt); 1454 free_trace_kprobe(tk); 1455 } 1456 #endif /* CONFIG_PERF_EVENTS */ 1457 1458 /* Make a tracefs interface for controlling probe points */ 1459 static __init int init_kprobe_trace(void) 1460 { 1461 struct dentry *d_tracer; 1462 struct dentry *entry; 1463 int ret; 1464 1465 ret = dyn_event_register(&trace_kprobe_ops); 1466 if (ret) 1467 return ret; 1468 1469 if (register_module_notifier(&trace_kprobe_module_nb)) 1470 return -EINVAL; 1471 1472 d_tracer = tracing_init_dentry(); 1473 if (IS_ERR(d_tracer)) 1474 return 0; 1475 1476 entry = tracefs_create_file("kprobe_events", 0644, d_tracer, 1477 NULL, &kprobe_events_ops); 1478 1479 /* Event list interface */ 1480 if (!entry) 1481 pr_warn("Could not create tracefs 'kprobe_events' entry\n"); 1482 1483 /* Profile interface */ 1484 entry = tracefs_create_file("kprobe_profile", 0444, d_tracer, 1485 NULL, &kprobe_profile_ops); 1486 1487 if (!entry) 1488 pr_warn("Could not create tracefs 'kprobe_profile' entry\n"); 1489 return 0; 1490 } 1491 fs_initcall(init_kprobe_trace); 1492 1493 1494 #ifdef CONFIG_FTRACE_STARTUP_TEST 1495 static __init struct trace_event_file * 1496 find_trace_probe_file(struct trace_kprobe *tk, struct trace_array *tr) 1497 { 1498 struct trace_event_file *file; 1499 1500 list_for_each_entry(file, &tr->events, list) 1501 if (file->event_call == &tk->tp.call) 1502 return file; 1503 1504 return NULL; 1505 } 1506 1507 /* 1508 * Nobody but us can call enable_trace_kprobe/disable_trace_kprobe at this 1509 * stage, we can do this lockless. 1510 */ 1511 static __init int kprobe_trace_self_tests_init(void) 1512 { 1513 int ret, warn = 0; 1514 int (*target)(int, int, int, int, int, int); 1515 struct trace_kprobe *tk; 1516 struct trace_event_file *file; 1517 1518 if (tracing_is_disabled()) 1519 return -ENODEV; 1520 1521 target = kprobe_trace_selftest_target; 1522 1523 pr_info("Testing kprobe tracing: "); 1524 1525 ret = trace_run_command("p:testprobe kprobe_trace_selftest_target $stack $stack0 +0($stack)", 1526 create_or_delete_trace_kprobe); 1527 if (WARN_ON_ONCE(ret)) { 1528 pr_warn("error on probing function entry.\n"); 1529 warn++; 1530 } else { 1531 /* Enable trace point */ 1532 tk = find_trace_kprobe("testprobe", KPROBE_EVENT_SYSTEM); 1533 if (WARN_ON_ONCE(tk == NULL)) { 1534 pr_warn("error on getting new probe.\n"); 1535 warn++; 1536 } else { 1537 file = find_trace_probe_file(tk, top_trace_array()); 1538 if (WARN_ON_ONCE(file == NULL)) { 1539 pr_warn("error on getting probe file.\n"); 1540 warn++; 1541 } else 1542 enable_trace_kprobe(tk, file); 1543 } 1544 } 1545 1546 ret = trace_run_command("r:testprobe2 kprobe_trace_selftest_target $retval", 1547 create_or_delete_trace_kprobe); 1548 if (WARN_ON_ONCE(ret)) { 1549 pr_warn("error on probing function return.\n"); 1550 warn++; 1551 } else { 1552 /* Enable trace point */ 1553 tk = find_trace_kprobe("testprobe2", KPROBE_EVENT_SYSTEM); 1554 if (WARN_ON_ONCE(tk == NULL)) { 1555 pr_warn("error on getting 2nd new probe.\n"); 1556 warn++; 1557 } else { 1558 file = find_trace_probe_file(tk, top_trace_array()); 1559 if (WARN_ON_ONCE(file == NULL)) { 1560 pr_warn("error on getting probe file.\n"); 1561 warn++; 1562 } else 1563 enable_trace_kprobe(tk, file); 1564 } 1565 } 1566 1567 if (warn) 1568 goto end; 1569 1570 ret = target(1, 2, 3, 4, 5, 6); 1571 1572 /* 1573 * Not expecting an error here, the check is only to prevent the 1574 * optimizer from removing the call to target() as otherwise there 1575 * are no side-effects and the call is never performed. 1576 */ 1577 if (ret != 21) 1578 warn++; 1579 1580 /* Disable trace points before removing it */ 1581 tk = find_trace_kprobe("testprobe", KPROBE_EVENT_SYSTEM); 1582 if (WARN_ON_ONCE(tk == NULL)) { 1583 pr_warn("error on getting test probe.\n"); 1584 warn++; 1585 } else { 1586 if (trace_kprobe_nhit(tk) != 1) { 1587 pr_warn("incorrect number of testprobe hits\n"); 1588 warn++; 1589 } 1590 1591 file = find_trace_probe_file(tk, top_trace_array()); 1592 if (WARN_ON_ONCE(file == NULL)) { 1593 pr_warn("error on getting probe file.\n"); 1594 warn++; 1595 } else 1596 disable_trace_kprobe(tk, file); 1597 } 1598 1599 tk = find_trace_kprobe("testprobe2", KPROBE_EVENT_SYSTEM); 1600 if (WARN_ON_ONCE(tk == NULL)) { 1601 pr_warn("error on getting 2nd test probe.\n"); 1602 warn++; 1603 } else { 1604 if (trace_kprobe_nhit(tk) != 1) { 1605 pr_warn("incorrect number of testprobe2 hits\n"); 1606 warn++; 1607 } 1608 1609 file = find_trace_probe_file(tk, top_trace_array()); 1610 if (WARN_ON_ONCE(file == NULL)) { 1611 pr_warn("error on getting probe file.\n"); 1612 warn++; 1613 } else 1614 disable_trace_kprobe(tk, file); 1615 } 1616 1617 ret = trace_run_command("-:testprobe", create_or_delete_trace_kprobe); 1618 if (WARN_ON_ONCE(ret)) { 1619 pr_warn("error on deleting a probe.\n"); 1620 warn++; 1621 } 1622 1623 ret = trace_run_command("-:testprobe2", create_or_delete_trace_kprobe); 1624 if (WARN_ON_ONCE(ret)) { 1625 pr_warn("error on deleting a probe.\n"); 1626 warn++; 1627 } 1628 1629 end: 1630 ret = dyn_events_release_all(&trace_kprobe_ops); 1631 if (WARN_ON_ONCE(ret)) { 1632 pr_warn("error on cleaning up probes.\n"); 1633 warn++; 1634 } 1635 /* 1636 * Wait for the optimizer work to finish. Otherwise it might fiddle 1637 * with probes in already freed __init text. 1638 */ 1639 wait_for_kprobe_optimizer(); 1640 if (warn) 1641 pr_cont("NG: Some tests are failed. Please check them.\n"); 1642 else 1643 pr_cont("OK\n"); 1644 return 0; 1645 } 1646 1647 late_initcall(kprobe_trace_self_tests_init); 1648 1649 #endif 1650