1 /* 2 * Kprobes-based tracing events 3 * 4 * Created by Masami Hiramatsu <mhiramat@redhat.com> 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 * You should have received a copy of the GNU General Public License 16 * along with this program; if not, write to the Free Software 17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 18 */ 19 20 #include <linux/module.h> 21 #include <linux/uaccess.h> 22 23 #include "trace_probe.h" 24 25 #define KPROBE_EVENT_SYSTEM "kprobes" 26 27 /** 28 * Kprobe event core functions 29 */ 30 struct trace_kprobe { 31 struct list_head list; 32 struct kretprobe rp; /* Use rp.kp for kprobe use */ 33 unsigned long __percpu *nhit; 34 const char *symbol; /* symbol name */ 35 struct trace_probe tp; 36 }; 37 38 #define SIZEOF_TRACE_KPROBE(n) \ 39 (offsetof(struct trace_kprobe, tp.args) + \ 40 (sizeof(struct probe_arg) * (n))) 41 42 43 static nokprobe_inline bool trace_kprobe_is_return(struct trace_kprobe *tk) 44 { 45 return tk->rp.handler != NULL; 46 } 47 48 static nokprobe_inline const char *trace_kprobe_symbol(struct trace_kprobe *tk) 49 { 50 return tk->symbol ? tk->symbol : "unknown"; 51 } 52 53 static nokprobe_inline unsigned long trace_kprobe_offset(struct trace_kprobe *tk) 54 { 55 return tk->rp.kp.offset; 56 } 57 58 static nokprobe_inline bool trace_kprobe_has_gone(struct trace_kprobe *tk) 59 { 60 return !!(kprobe_gone(&tk->rp.kp)); 61 } 62 63 static nokprobe_inline bool trace_kprobe_within_module(struct trace_kprobe *tk, 64 struct module *mod) 65 { 66 int len = strlen(mod->name); 67 const char *name = trace_kprobe_symbol(tk); 68 return strncmp(mod->name, name, len) == 0 && name[len] == ':'; 69 } 70 71 static nokprobe_inline bool trace_kprobe_is_on_module(struct trace_kprobe *tk) 72 { 73 return !!strchr(trace_kprobe_symbol(tk), ':'); 74 } 75 76 static int register_kprobe_event(struct trace_kprobe *tk); 77 static int unregister_kprobe_event(struct trace_kprobe *tk); 78 79 static DEFINE_MUTEX(probe_lock); 80 static LIST_HEAD(probe_list); 81 82 static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs); 83 static int kretprobe_dispatcher(struct kretprobe_instance *ri, 84 struct pt_regs *regs); 85 86 /* Memory fetching by symbol */ 87 struct symbol_cache { 88 char *symbol; 89 long offset; 90 unsigned long addr; 91 }; 92 93 unsigned long update_symbol_cache(struct symbol_cache *sc) 94 { 95 sc->addr = (unsigned long)kallsyms_lookup_name(sc->symbol); 96 97 if (sc->addr) 98 sc->addr += sc->offset; 99 100 return sc->addr; 101 } 102 103 void free_symbol_cache(struct symbol_cache *sc) 104 { 105 kfree(sc->symbol); 106 kfree(sc); 107 } 108 109 struct symbol_cache *alloc_symbol_cache(const char *sym, long offset) 110 { 111 struct symbol_cache *sc; 112 113 if (!sym || strlen(sym) == 0) 114 return NULL; 115 116 sc = kzalloc(sizeof(struct symbol_cache), GFP_KERNEL); 117 if (!sc) 118 return NULL; 119 120 sc->symbol = kstrdup(sym, GFP_KERNEL); 121 if (!sc->symbol) { 122 kfree(sc); 123 return NULL; 124 } 125 sc->offset = offset; 126 update_symbol_cache(sc); 127 128 return sc; 129 } 130 131 /* 132 * Kprobes-specific fetch functions 133 */ 134 #define DEFINE_FETCH_stack(type) \ 135 static void FETCH_FUNC_NAME(stack, type)(struct pt_regs *regs, \ 136 void *offset, void *dest) \ 137 { \ 138 *(type *)dest = (type)regs_get_kernel_stack_nth(regs, \ 139 (unsigned int)((unsigned long)offset)); \ 140 } \ 141 NOKPROBE_SYMBOL(FETCH_FUNC_NAME(stack, type)); 142 143 DEFINE_BASIC_FETCH_FUNCS(stack) 144 /* No string on the stack entry */ 145 #define fetch_stack_string NULL 146 #define fetch_stack_string_size NULL 147 148 #define DEFINE_FETCH_memory(type) \ 149 static void FETCH_FUNC_NAME(memory, type)(struct pt_regs *regs, \ 150 void *addr, void *dest) \ 151 { \ 152 type retval; \ 153 if (probe_kernel_address(addr, retval)) \ 154 *(type *)dest = 0; \ 155 else \ 156 *(type *)dest = retval; \ 157 } \ 158 NOKPROBE_SYMBOL(FETCH_FUNC_NAME(memory, type)); 159 160 DEFINE_BASIC_FETCH_FUNCS(memory) 161 /* 162 * Fetch a null-terminated string. Caller MUST set *(u32 *)dest with max 163 * length and relative data location. 164 */ 165 static void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs, 166 void *addr, void *dest) 167 { 168 int maxlen = get_rloc_len(*(u32 *)dest); 169 u8 *dst = get_rloc_data(dest); 170 long ret; 171 172 if (!maxlen) 173 return; 174 175 /* 176 * Try to get string again, since the string can be changed while 177 * probing. 178 */ 179 ret = strncpy_from_unsafe(dst, addr, maxlen); 180 181 if (ret < 0) { /* Failed to fetch string */ 182 dst[0] = '\0'; 183 *(u32 *)dest = make_data_rloc(0, get_rloc_offs(*(u32 *)dest)); 184 } else { 185 *(u32 *)dest = make_data_rloc(ret, get_rloc_offs(*(u32 *)dest)); 186 } 187 } 188 NOKPROBE_SYMBOL(FETCH_FUNC_NAME(memory, string)); 189 190 /* Return the length of string -- including null terminal byte */ 191 static void FETCH_FUNC_NAME(memory, string_size)(struct pt_regs *regs, 192 void *addr, void *dest) 193 { 194 mm_segment_t old_fs; 195 int ret, len = 0; 196 u8 c; 197 198 old_fs = get_fs(); 199 set_fs(KERNEL_DS); 200 pagefault_disable(); 201 202 do { 203 ret = __copy_from_user_inatomic(&c, (u8 *)addr + len, 1); 204 len++; 205 } while (c && ret == 0 && len < MAX_STRING_SIZE); 206 207 pagefault_enable(); 208 set_fs(old_fs); 209 210 if (ret < 0) /* Failed to check the length */ 211 *(u32 *)dest = 0; 212 else 213 *(u32 *)dest = len; 214 } 215 NOKPROBE_SYMBOL(FETCH_FUNC_NAME(memory, string_size)); 216 217 #define DEFINE_FETCH_symbol(type) \ 218 void FETCH_FUNC_NAME(symbol, type)(struct pt_regs *regs, void *data, void *dest)\ 219 { \ 220 struct symbol_cache *sc = data; \ 221 if (sc->addr) \ 222 fetch_memory_##type(regs, (void *)sc->addr, dest); \ 223 else \ 224 *(type *)dest = 0; \ 225 } \ 226 NOKPROBE_SYMBOL(FETCH_FUNC_NAME(symbol, type)); 227 228 DEFINE_BASIC_FETCH_FUNCS(symbol) 229 DEFINE_FETCH_symbol(string) 230 DEFINE_FETCH_symbol(string_size) 231 232 /* kprobes don't support file_offset fetch methods */ 233 #define fetch_file_offset_u8 NULL 234 #define fetch_file_offset_u16 NULL 235 #define fetch_file_offset_u32 NULL 236 #define fetch_file_offset_u64 NULL 237 #define fetch_file_offset_string NULL 238 #define fetch_file_offset_string_size NULL 239 240 /* Fetch type information table */ 241 static const struct fetch_type kprobes_fetch_type_table[] = { 242 /* Special types */ 243 [FETCH_TYPE_STRING] = __ASSIGN_FETCH_TYPE("string", string, string, 244 sizeof(u32), 1, "__data_loc char[]"), 245 [FETCH_TYPE_STRSIZE] = __ASSIGN_FETCH_TYPE("string_size", u32, 246 string_size, sizeof(u32), 0, "u32"), 247 /* Basic types */ 248 ASSIGN_FETCH_TYPE(u8, u8, 0), 249 ASSIGN_FETCH_TYPE(u16, u16, 0), 250 ASSIGN_FETCH_TYPE(u32, u32, 0), 251 ASSIGN_FETCH_TYPE(u64, u64, 0), 252 ASSIGN_FETCH_TYPE(s8, u8, 1), 253 ASSIGN_FETCH_TYPE(s16, u16, 1), 254 ASSIGN_FETCH_TYPE(s32, u32, 1), 255 ASSIGN_FETCH_TYPE(s64, u64, 1), 256 257 ASSIGN_FETCH_TYPE_END 258 }; 259 260 /* 261 * Allocate new trace_probe and initialize it (including kprobes). 262 */ 263 static struct trace_kprobe *alloc_trace_kprobe(const char *group, 264 const char *event, 265 void *addr, 266 const char *symbol, 267 unsigned long offs, 268 int nargs, bool is_return) 269 { 270 struct trace_kprobe *tk; 271 int ret = -ENOMEM; 272 273 tk = kzalloc(SIZEOF_TRACE_KPROBE(nargs), GFP_KERNEL); 274 if (!tk) 275 return ERR_PTR(ret); 276 277 tk->nhit = alloc_percpu(unsigned long); 278 if (!tk->nhit) 279 goto error; 280 281 if (symbol) { 282 tk->symbol = kstrdup(symbol, GFP_KERNEL); 283 if (!tk->symbol) 284 goto error; 285 tk->rp.kp.symbol_name = tk->symbol; 286 tk->rp.kp.offset = offs; 287 } else 288 tk->rp.kp.addr = addr; 289 290 if (is_return) 291 tk->rp.handler = kretprobe_dispatcher; 292 else 293 tk->rp.kp.pre_handler = kprobe_dispatcher; 294 295 if (!event || !is_good_name(event)) { 296 ret = -EINVAL; 297 goto error; 298 } 299 300 tk->tp.call.class = &tk->tp.class; 301 tk->tp.call.name = kstrdup(event, GFP_KERNEL); 302 if (!tk->tp.call.name) 303 goto error; 304 305 if (!group || !is_good_name(group)) { 306 ret = -EINVAL; 307 goto error; 308 } 309 310 tk->tp.class.system = kstrdup(group, GFP_KERNEL); 311 if (!tk->tp.class.system) 312 goto error; 313 314 INIT_LIST_HEAD(&tk->list); 315 INIT_LIST_HEAD(&tk->tp.files); 316 return tk; 317 error: 318 kfree(tk->tp.call.name); 319 kfree(tk->symbol); 320 free_percpu(tk->nhit); 321 kfree(tk); 322 return ERR_PTR(ret); 323 } 324 325 static void free_trace_kprobe(struct trace_kprobe *tk) 326 { 327 int i; 328 329 for (i = 0; i < tk->tp.nr_args; i++) 330 traceprobe_free_probe_arg(&tk->tp.args[i]); 331 332 kfree(tk->tp.call.class->system); 333 kfree(tk->tp.call.name); 334 kfree(tk->symbol); 335 free_percpu(tk->nhit); 336 kfree(tk); 337 } 338 339 static struct trace_kprobe *find_trace_kprobe(const char *event, 340 const char *group) 341 { 342 struct trace_kprobe *tk; 343 344 list_for_each_entry(tk, &probe_list, list) 345 if (strcmp(trace_event_name(&tk->tp.call), event) == 0 && 346 strcmp(tk->tp.call.class->system, group) == 0) 347 return tk; 348 return NULL; 349 } 350 351 /* 352 * Enable trace_probe 353 * if the file is NULL, enable "perf" handler, or enable "trace" handler. 354 */ 355 static int 356 enable_trace_kprobe(struct trace_kprobe *tk, struct trace_event_file *file) 357 { 358 int ret = 0; 359 360 if (file) { 361 struct event_file_link *link; 362 363 link = kmalloc(sizeof(*link), GFP_KERNEL); 364 if (!link) { 365 ret = -ENOMEM; 366 goto out; 367 } 368 369 link->file = file; 370 list_add_tail_rcu(&link->list, &tk->tp.files); 371 372 tk->tp.flags |= TP_FLAG_TRACE; 373 } else 374 tk->tp.flags |= TP_FLAG_PROFILE; 375 376 if (trace_probe_is_registered(&tk->tp) && !trace_kprobe_has_gone(tk)) { 377 if (trace_kprobe_is_return(tk)) 378 ret = enable_kretprobe(&tk->rp); 379 else 380 ret = enable_kprobe(&tk->rp.kp); 381 } 382 out: 383 return ret; 384 } 385 386 /* 387 * Disable trace_probe 388 * if the file is NULL, disable "perf" handler, or disable "trace" handler. 389 */ 390 static int 391 disable_trace_kprobe(struct trace_kprobe *tk, struct trace_event_file *file) 392 { 393 struct event_file_link *link = NULL; 394 int wait = 0; 395 int ret = 0; 396 397 if (file) { 398 link = find_event_file_link(&tk->tp, file); 399 if (!link) { 400 ret = -EINVAL; 401 goto out; 402 } 403 404 list_del_rcu(&link->list); 405 wait = 1; 406 if (!list_empty(&tk->tp.files)) 407 goto out; 408 409 tk->tp.flags &= ~TP_FLAG_TRACE; 410 } else 411 tk->tp.flags &= ~TP_FLAG_PROFILE; 412 413 if (!trace_probe_is_enabled(&tk->tp) && trace_probe_is_registered(&tk->tp)) { 414 if (trace_kprobe_is_return(tk)) 415 disable_kretprobe(&tk->rp); 416 else 417 disable_kprobe(&tk->rp.kp); 418 wait = 1; 419 } 420 out: 421 if (wait) { 422 /* 423 * Synchronize with kprobe_trace_func/kretprobe_trace_func 424 * to ensure disabled (all running handlers are finished). 425 * This is not only for kfree(), but also the caller, 426 * trace_remove_event_call() supposes it for releasing 427 * event_call related objects, which will be accessed in 428 * the kprobe_trace_func/kretprobe_trace_func. 429 */ 430 synchronize_sched(); 431 kfree(link); /* Ignored if link == NULL */ 432 } 433 434 return ret; 435 } 436 437 /* Internal register function - just handle k*probes and flags */ 438 static int __register_trace_kprobe(struct trace_kprobe *tk) 439 { 440 int i, ret; 441 442 if (trace_probe_is_registered(&tk->tp)) 443 return -EINVAL; 444 445 for (i = 0; i < tk->tp.nr_args; i++) 446 traceprobe_update_arg(&tk->tp.args[i]); 447 448 /* Set/clear disabled flag according to tp->flag */ 449 if (trace_probe_is_enabled(&tk->tp)) 450 tk->rp.kp.flags &= ~KPROBE_FLAG_DISABLED; 451 else 452 tk->rp.kp.flags |= KPROBE_FLAG_DISABLED; 453 454 if (trace_kprobe_is_return(tk)) 455 ret = register_kretprobe(&tk->rp); 456 else 457 ret = register_kprobe(&tk->rp.kp); 458 459 if (ret == 0) 460 tk->tp.flags |= TP_FLAG_REGISTERED; 461 else { 462 pr_warn("Could not insert probe at %s+%lu: %d\n", 463 trace_kprobe_symbol(tk), trace_kprobe_offset(tk), ret); 464 if (ret == -ENOENT && trace_kprobe_is_on_module(tk)) { 465 pr_warn("This probe might be able to register after target module is loaded. Continue.\n"); 466 ret = 0; 467 } else if (ret == -EILSEQ) { 468 pr_warn("Probing address(0x%p) is not an instruction boundary.\n", 469 tk->rp.kp.addr); 470 ret = -EINVAL; 471 } 472 } 473 474 return ret; 475 } 476 477 /* Internal unregister function - just handle k*probes and flags */ 478 static void __unregister_trace_kprobe(struct trace_kprobe *tk) 479 { 480 if (trace_probe_is_registered(&tk->tp)) { 481 if (trace_kprobe_is_return(tk)) 482 unregister_kretprobe(&tk->rp); 483 else 484 unregister_kprobe(&tk->rp.kp); 485 tk->tp.flags &= ~TP_FLAG_REGISTERED; 486 /* Cleanup kprobe for reuse */ 487 if (tk->rp.kp.symbol_name) 488 tk->rp.kp.addr = NULL; 489 } 490 } 491 492 /* Unregister a trace_probe and probe_event: call with locking probe_lock */ 493 static int unregister_trace_kprobe(struct trace_kprobe *tk) 494 { 495 /* Enabled event can not be unregistered */ 496 if (trace_probe_is_enabled(&tk->tp)) 497 return -EBUSY; 498 499 /* Will fail if probe is being used by ftrace or perf */ 500 if (unregister_kprobe_event(tk)) 501 return -EBUSY; 502 503 __unregister_trace_kprobe(tk); 504 list_del(&tk->list); 505 506 return 0; 507 } 508 509 /* Register a trace_probe and probe_event */ 510 static int register_trace_kprobe(struct trace_kprobe *tk) 511 { 512 struct trace_kprobe *old_tk; 513 int ret; 514 515 mutex_lock(&probe_lock); 516 517 /* Delete old (same name) event if exist */ 518 old_tk = find_trace_kprobe(trace_event_name(&tk->tp.call), 519 tk->tp.call.class->system); 520 if (old_tk) { 521 ret = unregister_trace_kprobe(old_tk); 522 if (ret < 0) 523 goto end; 524 free_trace_kprobe(old_tk); 525 } 526 527 /* Register new event */ 528 ret = register_kprobe_event(tk); 529 if (ret) { 530 pr_warn("Failed to register probe event(%d)\n", ret); 531 goto end; 532 } 533 534 /* Register k*probe */ 535 ret = __register_trace_kprobe(tk); 536 if (ret < 0) 537 unregister_kprobe_event(tk); 538 else 539 list_add_tail(&tk->list, &probe_list); 540 541 end: 542 mutex_unlock(&probe_lock); 543 return ret; 544 } 545 546 /* Module notifier call back, checking event on the module */ 547 static int trace_kprobe_module_callback(struct notifier_block *nb, 548 unsigned long val, void *data) 549 { 550 struct module *mod = data; 551 struct trace_kprobe *tk; 552 int ret; 553 554 if (val != MODULE_STATE_COMING) 555 return NOTIFY_DONE; 556 557 /* Update probes on coming module */ 558 mutex_lock(&probe_lock); 559 list_for_each_entry(tk, &probe_list, list) { 560 if (trace_kprobe_within_module(tk, mod)) { 561 /* Don't need to check busy - this should have gone. */ 562 __unregister_trace_kprobe(tk); 563 ret = __register_trace_kprobe(tk); 564 if (ret) 565 pr_warn("Failed to re-register probe %s on %s: %d\n", 566 trace_event_name(&tk->tp.call), 567 mod->name, ret); 568 } 569 } 570 mutex_unlock(&probe_lock); 571 572 return NOTIFY_DONE; 573 } 574 575 static struct notifier_block trace_kprobe_module_nb = { 576 .notifier_call = trace_kprobe_module_callback, 577 .priority = 1 /* Invoked after kprobe module callback */ 578 }; 579 580 static int create_trace_kprobe(int argc, char **argv) 581 { 582 /* 583 * Argument syntax: 584 * - Add kprobe: p[:[GRP/]EVENT] [MOD:]KSYM[+OFFS]|KADDR [FETCHARGS] 585 * - Add kretprobe: r[:[GRP/]EVENT] [MOD:]KSYM[+0] [FETCHARGS] 586 * Fetch args: 587 * $retval : fetch return value 588 * $stack : fetch stack address 589 * $stackN : fetch Nth of stack (N:0-) 590 * @ADDR : fetch memory at ADDR (ADDR should be in kernel) 591 * @SYM[+|-offs] : fetch memory at SYM +|- offs (SYM is a data symbol) 592 * %REG : fetch register REG 593 * Dereferencing memory fetch: 594 * +|-offs(ARG) : fetch memory at ARG +|- offs address. 595 * Alias name of args: 596 * NAME=FETCHARG : set NAME as alias of FETCHARG. 597 * Type of args: 598 * FETCHARG:TYPE : use TYPE instead of unsigned long. 599 */ 600 struct trace_kprobe *tk; 601 int i, ret = 0; 602 bool is_return = false, is_delete = false; 603 char *symbol = NULL, *event = NULL, *group = NULL; 604 char *arg; 605 unsigned long offset = 0; 606 void *addr = NULL; 607 char buf[MAX_EVENT_NAME_LEN]; 608 609 /* argc must be >= 1 */ 610 if (argv[0][0] == 'p') 611 is_return = false; 612 else if (argv[0][0] == 'r') 613 is_return = true; 614 else if (argv[0][0] == '-') 615 is_delete = true; 616 else { 617 pr_info("Probe definition must be started with 'p', 'r' or" 618 " '-'.\n"); 619 return -EINVAL; 620 } 621 622 if (argv[0][1] == ':') { 623 event = &argv[0][2]; 624 if (strchr(event, '/')) { 625 group = event; 626 event = strchr(group, '/') + 1; 627 event[-1] = '\0'; 628 if (strlen(group) == 0) { 629 pr_info("Group name is not specified\n"); 630 return -EINVAL; 631 } 632 } 633 if (strlen(event) == 0) { 634 pr_info("Event name is not specified\n"); 635 return -EINVAL; 636 } 637 } 638 if (!group) 639 group = KPROBE_EVENT_SYSTEM; 640 641 if (is_delete) { 642 if (!event) { 643 pr_info("Delete command needs an event name.\n"); 644 return -EINVAL; 645 } 646 mutex_lock(&probe_lock); 647 tk = find_trace_kprobe(event, group); 648 if (!tk) { 649 mutex_unlock(&probe_lock); 650 pr_info("Event %s/%s doesn't exist.\n", group, event); 651 return -ENOENT; 652 } 653 /* delete an event */ 654 ret = unregister_trace_kprobe(tk); 655 if (ret == 0) 656 free_trace_kprobe(tk); 657 mutex_unlock(&probe_lock); 658 return ret; 659 } 660 661 if (argc < 2) { 662 pr_info("Probe point is not specified.\n"); 663 return -EINVAL; 664 } 665 if (isdigit(argv[1][0])) { 666 if (is_return) { 667 pr_info("Return probe point must be a symbol.\n"); 668 return -EINVAL; 669 } 670 /* an address specified */ 671 ret = kstrtoul(&argv[1][0], 0, (unsigned long *)&addr); 672 if (ret) { 673 pr_info("Failed to parse address.\n"); 674 return ret; 675 } 676 } else { 677 /* a symbol specified */ 678 symbol = argv[1]; 679 /* TODO: support .init module functions */ 680 ret = traceprobe_split_symbol_offset(symbol, &offset); 681 if (ret) { 682 pr_info("Failed to parse symbol.\n"); 683 return ret; 684 } 685 if (offset && is_return) { 686 pr_info("Return probe must be used without offset.\n"); 687 return -EINVAL; 688 } 689 } 690 argc -= 2; argv += 2; 691 692 /* setup a probe */ 693 if (!event) { 694 /* Make a new event name */ 695 if (symbol) 696 snprintf(buf, MAX_EVENT_NAME_LEN, "%c_%s_%ld", 697 is_return ? 'r' : 'p', symbol, offset); 698 else 699 snprintf(buf, MAX_EVENT_NAME_LEN, "%c_0x%p", 700 is_return ? 'r' : 'p', addr); 701 event = buf; 702 } 703 tk = alloc_trace_kprobe(group, event, addr, symbol, offset, argc, 704 is_return); 705 if (IS_ERR(tk)) { 706 pr_info("Failed to allocate trace_probe.(%d)\n", 707 (int)PTR_ERR(tk)); 708 return PTR_ERR(tk); 709 } 710 711 /* parse arguments */ 712 ret = 0; 713 for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) { 714 struct probe_arg *parg = &tk->tp.args[i]; 715 716 /* Increment count for freeing args in error case */ 717 tk->tp.nr_args++; 718 719 /* Parse argument name */ 720 arg = strchr(argv[i], '='); 721 if (arg) { 722 *arg++ = '\0'; 723 parg->name = kstrdup(argv[i], GFP_KERNEL); 724 } else { 725 arg = argv[i]; 726 /* If argument name is omitted, set "argN" */ 727 snprintf(buf, MAX_EVENT_NAME_LEN, "arg%d", i + 1); 728 parg->name = kstrdup(buf, GFP_KERNEL); 729 } 730 731 if (!parg->name) { 732 pr_info("Failed to allocate argument[%d] name.\n", i); 733 ret = -ENOMEM; 734 goto error; 735 } 736 737 if (!is_good_name(parg->name)) { 738 pr_info("Invalid argument[%d] name: %s\n", 739 i, parg->name); 740 ret = -EINVAL; 741 goto error; 742 } 743 744 if (traceprobe_conflict_field_name(parg->name, 745 tk->tp.args, i)) { 746 pr_info("Argument[%d] name '%s' conflicts with " 747 "another field.\n", i, argv[i]); 748 ret = -EINVAL; 749 goto error; 750 } 751 752 /* Parse fetch argument */ 753 ret = traceprobe_parse_probe_arg(arg, &tk->tp.size, parg, 754 is_return, true, 755 kprobes_fetch_type_table); 756 if (ret) { 757 pr_info("Parse error at argument[%d]. (%d)\n", i, ret); 758 goto error; 759 } 760 } 761 762 ret = register_trace_kprobe(tk); 763 if (ret) 764 goto error; 765 return 0; 766 767 error: 768 free_trace_kprobe(tk); 769 return ret; 770 } 771 772 static int release_all_trace_kprobes(void) 773 { 774 struct trace_kprobe *tk; 775 int ret = 0; 776 777 mutex_lock(&probe_lock); 778 /* Ensure no probe is in use. */ 779 list_for_each_entry(tk, &probe_list, list) 780 if (trace_probe_is_enabled(&tk->tp)) { 781 ret = -EBUSY; 782 goto end; 783 } 784 /* TODO: Use batch unregistration */ 785 while (!list_empty(&probe_list)) { 786 tk = list_entry(probe_list.next, struct trace_kprobe, list); 787 ret = unregister_trace_kprobe(tk); 788 if (ret) 789 goto end; 790 free_trace_kprobe(tk); 791 } 792 793 end: 794 mutex_unlock(&probe_lock); 795 796 return ret; 797 } 798 799 /* Probes listing interfaces */ 800 static void *probes_seq_start(struct seq_file *m, loff_t *pos) 801 { 802 mutex_lock(&probe_lock); 803 return seq_list_start(&probe_list, *pos); 804 } 805 806 static void *probes_seq_next(struct seq_file *m, void *v, loff_t *pos) 807 { 808 return seq_list_next(v, &probe_list, pos); 809 } 810 811 static void probes_seq_stop(struct seq_file *m, void *v) 812 { 813 mutex_unlock(&probe_lock); 814 } 815 816 static int probes_seq_show(struct seq_file *m, void *v) 817 { 818 struct trace_kprobe *tk = v; 819 int i; 820 821 seq_putc(m, trace_kprobe_is_return(tk) ? 'r' : 'p'); 822 seq_printf(m, ":%s/%s", tk->tp.call.class->system, 823 trace_event_name(&tk->tp.call)); 824 825 if (!tk->symbol) 826 seq_printf(m, " 0x%p", tk->rp.kp.addr); 827 else if (tk->rp.kp.offset) 828 seq_printf(m, " %s+%u", trace_kprobe_symbol(tk), 829 tk->rp.kp.offset); 830 else 831 seq_printf(m, " %s", trace_kprobe_symbol(tk)); 832 833 for (i = 0; i < tk->tp.nr_args; i++) 834 seq_printf(m, " %s=%s", tk->tp.args[i].name, tk->tp.args[i].comm); 835 seq_putc(m, '\n'); 836 837 return 0; 838 } 839 840 static const struct seq_operations probes_seq_op = { 841 .start = probes_seq_start, 842 .next = probes_seq_next, 843 .stop = probes_seq_stop, 844 .show = probes_seq_show 845 }; 846 847 static int probes_open(struct inode *inode, struct file *file) 848 { 849 int ret; 850 851 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) { 852 ret = release_all_trace_kprobes(); 853 if (ret < 0) 854 return ret; 855 } 856 857 return seq_open(file, &probes_seq_op); 858 } 859 860 static ssize_t probes_write(struct file *file, const char __user *buffer, 861 size_t count, loff_t *ppos) 862 { 863 return traceprobe_probes_write(file, buffer, count, ppos, 864 create_trace_kprobe); 865 } 866 867 static const struct file_operations kprobe_events_ops = { 868 .owner = THIS_MODULE, 869 .open = probes_open, 870 .read = seq_read, 871 .llseek = seq_lseek, 872 .release = seq_release, 873 .write = probes_write, 874 }; 875 876 /* Probes profiling interfaces */ 877 static int probes_profile_seq_show(struct seq_file *m, void *v) 878 { 879 struct trace_kprobe *tk = v; 880 unsigned long nhit = 0; 881 int cpu; 882 883 for_each_possible_cpu(cpu) 884 nhit += *per_cpu_ptr(tk->nhit, cpu); 885 886 seq_printf(m, " %-44s %15lu %15lu\n", 887 trace_event_name(&tk->tp.call), nhit, 888 tk->rp.kp.nmissed); 889 890 return 0; 891 } 892 893 static const struct seq_operations profile_seq_op = { 894 .start = probes_seq_start, 895 .next = probes_seq_next, 896 .stop = probes_seq_stop, 897 .show = probes_profile_seq_show 898 }; 899 900 static int profile_open(struct inode *inode, struct file *file) 901 { 902 return seq_open(file, &profile_seq_op); 903 } 904 905 static const struct file_operations kprobe_profile_ops = { 906 .owner = THIS_MODULE, 907 .open = profile_open, 908 .read = seq_read, 909 .llseek = seq_lseek, 910 .release = seq_release, 911 }; 912 913 /* Kprobe handler */ 914 static nokprobe_inline void 915 __kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs, 916 struct trace_event_file *trace_file) 917 { 918 struct kprobe_trace_entry_head *entry; 919 struct ring_buffer_event *event; 920 struct ring_buffer *buffer; 921 int size, dsize, pc; 922 unsigned long irq_flags; 923 struct trace_event_call *call = &tk->tp.call; 924 925 WARN_ON(call != trace_file->event_call); 926 927 if (trace_trigger_soft_disabled(trace_file)) 928 return; 929 930 local_save_flags(irq_flags); 931 pc = preempt_count(); 932 933 dsize = __get_data_size(&tk->tp, regs); 934 size = sizeof(*entry) + tk->tp.size + dsize; 935 936 event = trace_event_buffer_lock_reserve(&buffer, trace_file, 937 call->event.type, 938 size, irq_flags, pc); 939 if (!event) 940 return; 941 942 entry = ring_buffer_event_data(event); 943 entry->ip = (unsigned long)tk->rp.kp.addr; 944 store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize); 945 946 event_trigger_unlock_commit_regs(trace_file, buffer, event, 947 entry, irq_flags, pc, regs); 948 } 949 950 static void 951 kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs) 952 { 953 struct event_file_link *link; 954 955 list_for_each_entry_rcu(link, &tk->tp.files, list) 956 __kprobe_trace_func(tk, regs, link->file); 957 } 958 NOKPROBE_SYMBOL(kprobe_trace_func); 959 960 /* Kretprobe handler */ 961 static nokprobe_inline void 962 __kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri, 963 struct pt_regs *regs, 964 struct trace_event_file *trace_file) 965 { 966 struct kretprobe_trace_entry_head *entry; 967 struct ring_buffer_event *event; 968 struct ring_buffer *buffer; 969 int size, pc, dsize; 970 unsigned long irq_flags; 971 struct trace_event_call *call = &tk->tp.call; 972 973 WARN_ON(call != trace_file->event_call); 974 975 if (trace_trigger_soft_disabled(trace_file)) 976 return; 977 978 local_save_flags(irq_flags); 979 pc = preempt_count(); 980 981 dsize = __get_data_size(&tk->tp, regs); 982 size = sizeof(*entry) + tk->tp.size + dsize; 983 984 event = trace_event_buffer_lock_reserve(&buffer, trace_file, 985 call->event.type, 986 size, irq_flags, pc); 987 if (!event) 988 return; 989 990 entry = ring_buffer_event_data(event); 991 entry->func = (unsigned long)tk->rp.kp.addr; 992 entry->ret_ip = (unsigned long)ri->ret_addr; 993 store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize); 994 995 event_trigger_unlock_commit_regs(trace_file, buffer, event, 996 entry, irq_flags, pc, regs); 997 } 998 999 static void 1000 kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri, 1001 struct pt_regs *regs) 1002 { 1003 struct event_file_link *link; 1004 1005 list_for_each_entry_rcu(link, &tk->tp.files, list) 1006 __kretprobe_trace_func(tk, ri, regs, link->file); 1007 } 1008 NOKPROBE_SYMBOL(kretprobe_trace_func); 1009 1010 /* Event entry printers */ 1011 static enum print_line_t 1012 print_kprobe_event(struct trace_iterator *iter, int flags, 1013 struct trace_event *event) 1014 { 1015 struct kprobe_trace_entry_head *field; 1016 struct trace_seq *s = &iter->seq; 1017 struct trace_probe *tp; 1018 u8 *data; 1019 int i; 1020 1021 field = (struct kprobe_trace_entry_head *)iter->ent; 1022 tp = container_of(event, struct trace_probe, call.event); 1023 1024 trace_seq_printf(s, "%s: (", trace_event_name(&tp->call)); 1025 1026 if (!seq_print_ip_sym(s, field->ip, flags | TRACE_ITER_SYM_OFFSET)) 1027 goto out; 1028 1029 trace_seq_putc(s, ')'); 1030 1031 data = (u8 *)&field[1]; 1032 for (i = 0; i < tp->nr_args; i++) 1033 if (!tp->args[i].type->print(s, tp->args[i].name, 1034 data + tp->args[i].offset, field)) 1035 goto out; 1036 1037 trace_seq_putc(s, '\n'); 1038 out: 1039 return trace_handle_return(s); 1040 } 1041 1042 static enum print_line_t 1043 print_kretprobe_event(struct trace_iterator *iter, int flags, 1044 struct trace_event *event) 1045 { 1046 struct kretprobe_trace_entry_head *field; 1047 struct trace_seq *s = &iter->seq; 1048 struct trace_probe *tp; 1049 u8 *data; 1050 int i; 1051 1052 field = (struct kretprobe_trace_entry_head *)iter->ent; 1053 tp = container_of(event, struct trace_probe, call.event); 1054 1055 trace_seq_printf(s, "%s: (", trace_event_name(&tp->call)); 1056 1057 if (!seq_print_ip_sym(s, field->ret_ip, flags | TRACE_ITER_SYM_OFFSET)) 1058 goto out; 1059 1060 trace_seq_puts(s, " <- "); 1061 1062 if (!seq_print_ip_sym(s, field->func, flags & ~TRACE_ITER_SYM_OFFSET)) 1063 goto out; 1064 1065 trace_seq_putc(s, ')'); 1066 1067 data = (u8 *)&field[1]; 1068 for (i = 0; i < tp->nr_args; i++) 1069 if (!tp->args[i].type->print(s, tp->args[i].name, 1070 data + tp->args[i].offset, field)) 1071 goto out; 1072 1073 trace_seq_putc(s, '\n'); 1074 1075 out: 1076 return trace_handle_return(s); 1077 } 1078 1079 1080 static int kprobe_event_define_fields(struct trace_event_call *event_call) 1081 { 1082 int ret, i; 1083 struct kprobe_trace_entry_head field; 1084 struct trace_kprobe *tk = (struct trace_kprobe *)event_call->data; 1085 1086 DEFINE_FIELD(unsigned long, ip, FIELD_STRING_IP, 0); 1087 /* Set argument names as fields */ 1088 for (i = 0; i < tk->tp.nr_args; i++) { 1089 struct probe_arg *parg = &tk->tp.args[i]; 1090 1091 ret = trace_define_field(event_call, parg->type->fmttype, 1092 parg->name, 1093 sizeof(field) + parg->offset, 1094 parg->type->size, 1095 parg->type->is_signed, 1096 FILTER_OTHER); 1097 if (ret) 1098 return ret; 1099 } 1100 return 0; 1101 } 1102 1103 static int kretprobe_event_define_fields(struct trace_event_call *event_call) 1104 { 1105 int ret, i; 1106 struct kretprobe_trace_entry_head field; 1107 struct trace_kprobe *tk = (struct trace_kprobe *)event_call->data; 1108 1109 DEFINE_FIELD(unsigned long, func, FIELD_STRING_FUNC, 0); 1110 DEFINE_FIELD(unsigned long, ret_ip, FIELD_STRING_RETIP, 0); 1111 /* Set argument names as fields */ 1112 for (i = 0; i < tk->tp.nr_args; i++) { 1113 struct probe_arg *parg = &tk->tp.args[i]; 1114 1115 ret = trace_define_field(event_call, parg->type->fmttype, 1116 parg->name, 1117 sizeof(field) + parg->offset, 1118 parg->type->size, 1119 parg->type->is_signed, 1120 FILTER_OTHER); 1121 if (ret) 1122 return ret; 1123 } 1124 return 0; 1125 } 1126 1127 #ifdef CONFIG_PERF_EVENTS 1128 1129 /* Kprobe profile handler */ 1130 static void 1131 kprobe_perf_func(struct trace_kprobe *tk, struct pt_regs *regs) 1132 { 1133 struct trace_event_call *call = &tk->tp.call; 1134 struct bpf_prog *prog = call->prog; 1135 struct kprobe_trace_entry_head *entry; 1136 struct hlist_head *head; 1137 int size, __size, dsize; 1138 int rctx; 1139 1140 if (prog && !trace_call_bpf(prog, regs)) 1141 return; 1142 1143 head = this_cpu_ptr(call->perf_events); 1144 if (hlist_empty(head)) 1145 return; 1146 1147 dsize = __get_data_size(&tk->tp, regs); 1148 __size = sizeof(*entry) + tk->tp.size + dsize; 1149 size = ALIGN(__size + sizeof(u32), sizeof(u64)); 1150 size -= sizeof(u32); 1151 1152 entry = perf_trace_buf_alloc(size, NULL, &rctx); 1153 if (!entry) 1154 return; 1155 1156 entry->ip = (unsigned long)tk->rp.kp.addr; 1157 memset(&entry[1], 0, dsize); 1158 store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize); 1159 perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs, 1160 head, NULL); 1161 } 1162 NOKPROBE_SYMBOL(kprobe_perf_func); 1163 1164 /* Kretprobe profile handler */ 1165 static void 1166 kretprobe_perf_func(struct trace_kprobe *tk, struct kretprobe_instance *ri, 1167 struct pt_regs *regs) 1168 { 1169 struct trace_event_call *call = &tk->tp.call; 1170 struct bpf_prog *prog = call->prog; 1171 struct kretprobe_trace_entry_head *entry; 1172 struct hlist_head *head; 1173 int size, __size, dsize; 1174 int rctx; 1175 1176 if (prog && !trace_call_bpf(prog, regs)) 1177 return; 1178 1179 head = this_cpu_ptr(call->perf_events); 1180 if (hlist_empty(head)) 1181 return; 1182 1183 dsize = __get_data_size(&tk->tp, regs); 1184 __size = sizeof(*entry) + tk->tp.size + dsize; 1185 size = ALIGN(__size + sizeof(u32), sizeof(u64)); 1186 size -= sizeof(u32); 1187 1188 entry = perf_trace_buf_alloc(size, NULL, &rctx); 1189 if (!entry) 1190 return; 1191 1192 entry->func = (unsigned long)tk->rp.kp.addr; 1193 entry->ret_ip = (unsigned long)ri->ret_addr; 1194 store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize); 1195 perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs, 1196 head, NULL); 1197 } 1198 NOKPROBE_SYMBOL(kretprobe_perf_func); 1199 #endif /* CONFIG_PERF_EVENTS */ 1200 1201 /* 1202 * called by perf_trace_init() or __ftrace_set_clr_event() under event_mutex. 1203 * 1204 * kprobe_trace_self_tests_init() does enable_trace_probe/disable_trace_probe 1205 * lockless, but we can't race with this __init function. 1206 */ 1207 static int kprobe_register(struct trace_event_call *event, 1208 enum trace_reg type, void *data) 1209 { 1210 struct trace_kprobe *tk = (struct trace_kprobe *)event->data; 1211 struct trace_event_file *file = data; 1212 1213 switch (type) { 1214 case TRACE_REG_REGISTER: 1215 return enable_trace_kprobe(tk, file); 1216 case TRACE_REG_UNREGISTER: 1217 return disable_trace_kprobe(tk, file); 1218 1219 #ifdef CONFIG_PERF_EVENTS 1220 case TRACE_REG_PERF_REGISTER: 1221 return enable_trace_kprobe(tk, NULL); 1222 case TRACE_REG_PERF_UNREGISTER: 1223 return disable_trace_kprobe(tk, NULL); 1224 case TRACE_REG_PERF_OPEN: 1225 case TRACE_REG_PERF_CLOSE: 1226 case TRACE_REG_PERF_ADD: 1227 case TRACE_REG_PERF_DEL: 1228 return 0; 1229 #endif 1230 } 1231 return 0; 1232 } 1233 1234 static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs) 1235 { 1236 struct trace_kprobe *tk = container_of(kp, struct trace_kprobe, rp.kp); 1237 1238 raw_cpu_inc(*tk->nhit); 1239 1240 if (tk->tp.flags & TP_FLAG_TRACE) 1241 kprobe_trace_func(tk, regs); 1242 #ifdef CONFIG_PERF_EVENTS 1243 if (tk->tp.flags & TP_FLAG_PROFILE) 1244 kprobe_perf_func(tk, regs); 1245 #endif 1246 return 0; /* We don't tweek kernel, so just return 0 */ 1247 } 1248 NOKPROBE_SYMBOL(kprobe_dispatcher); 1249 1250 static int 1251 kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs) 1252 { 1253 struct trace_kprobe *tk = container_of(ri->rp, struct trace_kprobe, rp); 1254 1255 raw_cpu_inc(*tk->nhit); 1256 1257 if (tk->tp.flags & TP_FLAG_TRACE) 1258 kretprobe_trace_func(tk, ri, regs); 1259 #ifdef CONFIG_PERF_EVENTS 1260 if (tk->tp.flags & TP_FLAG_PROFILE) 1261 kretprobe_perf_func(tk, ri, regs); 1262 #endif 1263 return 0; /* We don't tweek kernel, so just return 0 */ 1264 } 1265 NOKPROBE_SYMBOL(kretprobe_dispatcher); 1266 1267 static struct trace_event_functions kretprobe_funcs = { 1268 .trace = print_kretprobe_event 1269 }; 1270 1271 static struct trace_event_functions kprobe_funcs = { 1272 .trace = print_kprobe_event 1273 }; 1274 1275 static int register_kprobe_event(struct trace_kprobe *tk) 1276 { 1277 struct trace_event_call *call = &tk->tp.call; 1278 int ret; 1279 1280 /* Initialize trace_event_call */ 1281 INIT_LIST_HEAD(&call->class->fields); 1282 if (trace_kprobe_is_return(tk)) { 1283 call->event.funcs = &kretprobe_funcs; 1284 call->class->define_fields = kretprobe_event_define_fields; 1285 } else { 1286 call->event.funcs = &kprobe_funcs; 1287 call->class->define_fields = kprobe_event_define_fields; 1288 } 1289 if (set_print_fmt(&tk->tp, trace_kprobe_is_return(tk)) < 0) 1290 return -ENOMEM; 1291 ret = register_trace_event(&call->event); 1292 if (!ret) { 1293 kfree(call->print_fmt); 1294 return -ENODEV; 1295 } 1296 call->flags = TRACE_EVENT_FL_KPROBE; 1297 call->class->reg = kprobe_register; 1298 call->data = tk; 1299 ret = trace_add_event_call(call); 1300 if (ret) { 1301 pr_info("Failed to register kprobe event: %s\n", 1302 trace_event_name(call)); 1303 kfree(call->print_fmt); 1304 unregister_trace_event(&call->event); 1305 } 1306 return ret; 1307 } 1308 1309 static int unregister_kprobe_event(struct trace_kprobe *tk) 1310 { 1311 int ret; 1312 1313 /* tp->event is unregistered in trace_remove_event_call() */ 1314 ret = trace_remove_event_call(&tk->tp.call); 1315 if (!ret) 1316 kfree(tk->tp.call.print_fmt); 1317 return ret; 1318 } 1319 1320 /* Make a tracefs interface for controlling probe points */ 1321 static __init int init_kprobe_trace(void) 1322 { 1323 struct dentry *d_tracer; 1324 struct dentry *entry; 1325 1326 if (register_module_notifier(&trace_kprobe_module_nb)) 1327 return -EINVAL; 1328 1329 d_tracer = tracing_init_dentry(); 1330 if (IS_ERR(d_tracer)) 1331 return 0; 1332 1333 entry = tracefs_create_file("kprobe_events", 0644, d_tracer, 1334 NULL, &kprobe_events_ops); 1335 1336 /* Event list interface */ 1337 if (!entry) 1338 pr_warn("Could not create tracefs 'kprobe_events' entry\n"); 1339 1340 /* Profile interface */ 1341 entry = tracefs_create_file("kprobe_profile", 0444, d_tracer, 1342 NULL, &kprobe_profile_ops); 1343 1344 if (!entry) 1345 pr_warn("Could not create tracefs 'kprobe_profile' entry\n"); 1346 return 0; 1347 } 1348 fs_initcall(init_kprobe_trace); 1349 1350 1351 #ifdef CONFIG_FTRACE_STARTUP_TEST 1352 1353 /* 1354 * The "__used" keeps gcc from removing the function symbol 1355 * from the kallsyms table. 1356 */ 1357 static __used int kprobe_trace_selftest_target(int a1, int a2, int a3, 1358 int a4, int a5, int a6) 1359 { 1360 return a1 + a2 + a3 + a4 + a5 + a6; 1361 } 1362 1363 static struct trace_event_file * 1364 find_trace_probe_file(struct trace_kprobe *tk, struct trace_array *tr) 1365 { 1366 struct trace_event_file *file; 1367 1368 list_for_each_entry(file, &tr->events, list) 1369 if (file->event_call == &tk->tp.call) 1370 return file; 1371 1372 return NULL; 1373 } 1374 1375 /* 1376 * Nobody but us can call enable_trace_kprobe/disable_trace_kprobe at this 1377 * stage, we can do this lockless. 1378 */ 1379 static __init int kprobe_trace_self_tests_init(void) 1380 { 1381 int ret, warn = 0; 1382 int (*target)(int, int, int, int, int, int); 1383 struct trace_kprobe *tk; 1384 struct trace_event_file *file; 1385 1386 if (tracing_is_disabled()) 1387 return -ENODEV; 1388 1389 target = kprobe_trace_selftest_target; 1390 1391 pr_info("Testing kprobe tracing: "); 1392 1393 ret = traceprobe_command("p:testprobe kprobe_trace_selftest_target " 1394 "$stack $stack0 +0($stack)", 1395 create_trace_kprobe); 1396 if (WARN_ON_ONCE(ret)) { 1397 pr_warn("error on probing function entry.\n"); 1398 warn++; 1399 } else { 1400 /* Enable trace point */ 1401 tk = find_trace_kprobe("testprobe", KPROBE_EVENT_SYSTEM); 1402 if (WARN_ON_ONCE(tk == NULL)) { 1403 pr_warn("error on getting new probe.\n"); 1404 warn++; 1405 } else { 1406 file = find_trace_probe_file(tk, top_trace_array()); 1407 if (WARN_ON_ONCE(file == NULL)) { 1408 pr_warn("error on getting probe file.\n"); 1409 warn++; 1410 } else 1411 enable_trace_kprobe(tk, file); 1412 } 1413 } 1414 1415 ret = traceprobe_command("r:testprobe2 kprobe_trace_selftest_target " 1416 "$retval", create_trace_kprobe); 1417 if (WARN_ON_ONCE(ret)) { 1418 pr_warn("error on probing function return.\n"); 1419 warn++; 1420 } else { 1421 /* Enable trace point */ 1422 tk = find_trace_kprobe("testprobe2", KPROBE_EVENT_SYSTEM); 1423 if (WARN_ON_ONCE(tk == NULL)) { 1424 pr_warn("error on getting 2nd new probe.\n"); 1425 warn++; 1426 } else { 1427 file = find_trace_probe_file(tk, top_trace_array()); 1428 if (WARN_ON_ONCE(file == NULL)) { 1429 pr_warn("error on getting probe file.\n"); 1430 warn++; 1431 } else 1432 enable_trace_kprobe(tk, file); 1433 } 1434 } 1435 1436 if (warn) 1437 goto end; 1438 1439 ret = target(1, 2, 3, 4, 5, 6); 1440 1441 /* Disable trace points before removing it */ 1442 tk = find_trace_kprobe("testprobe", KPROBE_EVENT_SYSTEM); 1443 if (WARN_ON_ONCE(tk == NULL)) { 1444 pr_warn("error on getting test probe.\n"); 1445 warn++; 1446 } else { 1447 file = find_trace_probe_file(tk, top_trace_array()); 1448 if (WARN_ON_ONCE(file == NULL)) { 1449 pr_warn("error on getting probe file.\n"); 1450 warn++; 1451 } else 1452 disable_trace_kprobe(tk, file); 1453 } 1454 1455 tk = find_trace_kprobe("testprobe2", KPROBE_EVENT_SYSTEM); 1456 if (WARN_ON_ONCE(tk == NULL)) { 1457 pr_warn("error on getting 2nd test probe.\n"); 1458 warn++; 1459 } else { 1460 file = find_trace_probe_file(tk, top_trace_array()); 1461 if (WARN_ON_ONCE(file == NULL)) { 1462 pr_warn("error on getting probe file.\n"); 1463 warn++; 1464 } else 1465 disable_trace_kprobe(tk, file); 1466 } 1467 1468 ret = traceprobe_command("-:testprobe", create_trace_kprobe); 1469 if (WARN_ON_ONCE(ret)) { 1470 pr_warn("error on deleting a probe.\n"); 1471 warn++; 1472 } 1473 1474 ret = traceprobe_command("-:testprobe2", create_trace_kprobe); 1475 if (WARN_ON_ONCE(ret)) { 1476 pr_warn("error on deleting a probe.\n"); 1477 warn++; 1478 } 1479 1480 end: 1481 release_all_trace_kprobes(); 1482 if (warn) 1483 pr_cont("NG: Some tests are failed. Please check them.\n"); 1484 else 1485 pr_cont("OK\n"); 1486 return 0; 1487 } 1488 1489 late_initcall(kprobe_trace_self_tests_init); 1490 1491 #endif 1492