1 /* 2 * Kprobes-based tracing events 3 * 4 * Created by Masami Hiramatsu <mhiramat@redhat.com> 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 * You should have received a copy of the GNU General Public License 16 * along with this program; if not, write to the Free Software 17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 18 */ 19 20 #include <linux/module.h> 21 #include <linux/uaccess.h> 22 #include <linux/kprobes.h> 23 #include <linux/seq_file.h> 24 #include <linux/slab.h> 25 #include <linux/smp.h> 26 #include <linux/debugfs.h> 27 #include <linux/types.h> 28 #include <linux/string.h> 29 #include <linux/ctype.h> 30 #include <linux/ptrace.h> 31 #include <linux/perf_event.h> 32 33 #include "trace.h" 34 #include "trace_output.h" 35 36 #define MAX_TRACE_ARGS 128 37 #define MAX_ARGSTR_LEN 63 38 #define MAX_EVENT_NAME_LEN 64 39 #define KPROBE_EVENT_SYSTEM "kprobes" 40 41 /* Reserved field names */ 42 #define FIELD_STRING_IP "__probe_ip" 43 #define FIELD_STRING_NARGS "__probe_nargs" 44 #define FIELD_STRING_RETIP "__probe_ret_ip" 45 #define FIELD_STRING_FUNC "__probe_func" 46 47 const char *reserved_field_names[] = { 48 "common_type", 49 "common_flags", 50 "common_preempt_count", 51 "common_pid", 52 "common_tgid", 53 "common_lock_depth", 54 FIELD_STRING_IP, 55 FIELD_STRING_NARGS, 56 FIELD_STRING_RETIP, 57 FIELD_STRING_FUNC, 58 }; 59 60 struct fetch_func { 61 unsigned long (*func)(struct pt_regs *, void *); 62 void *data; 63 }; 64 65 static __kprobes unsigned long call_fetch(struct fetch_func *f, 66 struct pt_regs *regs) 67 { 68 return f->func(regs, f->data); 69 } 70 71 /* fetch handlers */ 72 static __kprobes unsigned long fetch_register(struct pt_regs *regs, 73 void *offset) 74 { 75 return regs_get_register(regs, (unsigned int)((unsigned long)offset)); 76 } 77 78 static __kprobes unsigned long fetch_stack(struct pt_regs *regs, 79 void *num) 80 { 81 return regs_get_kernel_stack_nth(regs, 82 (unsigned int)((unsigned long)num)); 83 } 84 85 static __kprobes unsigned long fetch_memory(struct pt_regs *regs, void *addr) 86 { 87 unsigned long retval; 88 89 if (probe_kernel_address(addr, retval)) 90 return 0; 91 return retval; 92 } 93 94 static __kprobes unsigned long fetch_argument(struct pt_regs *regs, void *num) 95 { 96 return regs_get_argument_nth(regs, (unsigned int)((unsigned long)num)); 97 } 98 99 static __kprobes unsigned long fetch_retvalue(struct pt_regs *regs, 100 void *dummy) 101 { 102 return regs_return_value(regs); 103 } 104 105 static __kprobes unsigned long fetch_stack_address(struct pt_regs *regs, 106 void *dummy) 107 { 108 return kernel_stack_pointer(regs); 109 } 110 111 /* Memory fetching by symbol */ 112 struct symbol_cache { 113 char *symbol; 114 long offset; 115 unsigned long addr; 116 }; 117 118 static unsigned long update_symbol_cache(struct symbol_cache *sc) 119 { 120 sc->addr = (unsigned long)kallsyms_lookup_name(sc->symbol); 121 if (sc->addr) 122 sc->addr += sc->offset; 123 return sc->addr; 124 } 125 126 static void free_symbol_cache(struct symbol_cache *sc) 127 { 128 kfree(sc->symbol); 129 kfree(sc); 130 } 131 132 static struct symbol_cache *alloc_symbol_cache(const char *sym, long offset) 133 { 134 struct symbol_cache *sc; 135 136 if (!sym || strlen(sym) == 0) 137 return NULL; 138 sc = kzalloc(sizeof(struct symbol_cache), GFP_KERNEL); 139 if (!sc) 140 return NULL; 141 142 sc->symbol = kstrdup(sym, GFP_KERNEL); 143 if (!sc->symbol) { 144 kfree(sc); 145 return NULL; 146 } 147 sc->offset = offset; 148 149 update_symbol_cache(sc); 150 return sc; 151 } 152 153 static __kprobes unsigned long fetch_symbol(struct pt_regs *regs, void *data) 154 { 155 struct symbol_cache *sc = data; 156 157 if (sc->addr) 158 return fetch_memory(regs, (void *)sc->addr); 159 else 160 return 0; 161 } 162 163 /* Special indirect memory access interface */ 164 struct indirect_fetch_data { 165 struct fetch_func orig; 166 long offset; 167 }; 168 169 static __kprobes unsigned long fetch_indirect(struct pt_regs *regs, void *data) 170 { 171 struct indirect_fetch_data *ind = data; 172 unsigned long addr; 173 174 addr = call_fetch(&ind->orig, regs); 175 if (addr) { 176 addr += ind->offset; 177 return fetch_memory(regs, (void *)addr); 178 } else 179 return 0; 180 } 181 182 static __kprobes void free_indirect_fetch_data(struct indirect_fetch_data *data) 183 { 184 if (data->orig.func == fetch_indirect) 185 free_indirect_fetch_data(data->orig.data); 186 else if (data->orig.func == fetch_symbol) 187 free_symbol_cache(data->orig.data); 188 kfree(data); 189 } 190 191 /** 192 * Kprobe event core functions 193 */ 194 195 struct probe_arg { 196 struct fetch_func fetch; 197 const char *name; 198 }; 199 200 /* Flags for trace_probe */ 201 #define TP_FLAG_TRACE 1 202 #define TP_FLAG_PROFILE 2 203 204 struct trace_probe { 205 struct list_head list; 206 struct kretprobe rp; /* Use rp.kp for kprobe use */ 207 unsigned long nhit; 208 unsigned int flags; /* For TP_FLAG_* */ 209 const char *symbol; /* symbol name */ 210 struct ftrace_event_call call; 211 struct trace_event event; 212 unsigned int nr_args; 213 struct probe_arg args[]; 214 }; 215 216 #define SIZEOF_TRACE_PROBE(n) \ 217 (offsetof(struct trace_probe, args) + \ 218 (sizeof(struct probe_arg) * (n))) 219 220 static __kprobes int probe_is_return(struct trace_probe *tp) 221 { 222 return tp->rp.handler != NULL; 223 } 224 225 static __kprobes const char *probe_symbol(struct trace_probe *tp) 226 { 227 return tp->symbol ? tp->symbol : "unknown"; 228 } 229 230 static int probe_arg_string(char *buf, size_t n, struct fetch_func *ff) 231 { 232 int ret = -EINVAL; 233 234 if (ff->func == fetch_argument) 235 ret = snprintf(buf, n, "$arg%lu", (unsigned long)ff->data); 236 else if (ff->func == fetch_register) { 237 const char *name; 238 name = regs_query_register_name((unsigned int)((long)ff->data)); 239 ret = snprintf(buf, n, "%%%s", name); 240 } else if (ff->func == fetch_stack) 241 ret = snprintf(buf, n, "$stack%lu", (unsigned long)ff->data); 242 else if (ff->func == fetch_memory) 243 ret = snprintf(buf, n, "@0x%p", ff->data); 244 else if (ff->func == fetch_symbol) { 245 struct symbol_cache *sc = ff->data; 246 if (sc->offset) 247 ret = snprintf(buf, n, "@%s%+ld", sc->symbol, 248 sc->offset); 249 else 250 ret = snprintf(buf, n, "@%s", sc->symbol); 251 } else if (ff->func == fetch_retvalue) 252 ret = snprintf(buf, n, "$retval"); 253 else if (ff->func == fetch_stack_address) 254 ret = snprintf(buf, n, "$stack"); 255 else if (ff->func == fetch_indirect) { 256 struct indirect_fetch_data *id = ff->data; 257 size_t l = 0; 258 ret = snprintf(buf, n, "%+ld(", id->offset); 259 if (ret >= n) 260 goto end; 261 l += ret; 262 ret = probe_arg_string(buf + l, n - l, &id->orig); 263 if (ret < 0) 264 goto end; 265 l += ret; 266 ret = snprintf(buf + l, n - l, ")"); 267 ret += l; 268 } 269 end: 270 if (ret >= n) 271 return -ENOSPC; 272 return ret; 273 } 274 275 static int register_probe_event(struct trace_probe *tp); 276 static void unregister_probe_event(struct trace_probe *tp); 277 278 static DEFINE_MUTEX(probe_lock); 279 static LIST_HEAD(probe_list); 280 281 static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs); 282 static int kretprobe_dispatcher(struct kretprobe_instance *ri, 283 struct pt_regs *regs); 284 285 /* 286 * Allocate new trace_probe and initialize it (including kprobes). 287 */ 288 static struct trace_probe *alloc_trace_probe(const char *group, 289 const char *event, 290 void *addr, 291 const char *symbol, 292 unsigned long offs, 293 int nargs, int is_return) 294 { 295 struct trace_probe *tp; 296 297 tp = kzalloc(SIZEOF_TRACE_PROBE(nargs), GFP_KERNEL); 298 if (!tp) 299 return ERR_PTR(-ENOMEM); 300 301 if (symbol) { 302 tp->symbol = kstrdup(symbol, GFP_KERNEL); 303 if (!tp->symbol) 304 goto error; 305 tp->rp.kp.symbol_name = tp->symbol; 306 tp->rp.kp.offset = offs; 307 } else 308 tp->rp.kp.addr = addr; 309 310 if (is_return) 311 tp->rp.handler = kretprobe_dispatcher; 312 else 313 tp->rp.kp.pre_handler = kprobe_dispatcher; 314 315 if (!event) 316 goto error; 317 tp->call.name = kstrdup(event, GFP_KERNEL); 318 if (!tp->call.name) 319 goto error; 320 321 if (!group) 322 goto error; 323 tp->call.system = kstrdup(group, GFP_KERNEL); 324 if (!tp->call.system) 325 goto error; 326 327 INIT_LIST_HEAD(&tp->list); 328 return tp; 329 error: 330 kfree(tp->call.name); 331 kfree(tp->symbol); 332 kfree(tp); 333 return ERR_PTR(-ENOMEM); 334 } 335 336 static void free_probe_arg(struct probe_arg *arg) 337 { 338 if (arg->fetch.func == fetch_symbol) 339 free_symbol_cache(arg->fetch.data); 340 else if (arg->fetch.func == fetch_indirect) 341 free_indirect_fetch_data(arg->fetch.data); 342 kfree(arg->name); 343 } 344 345 static void free_trace_probe(struct trace_probe *tp) 346 { 347 int i; 348 349 for (i = 0; i < tp->nr_args; i++) 350 free_probe_arg(&tp->args[i]); 351 352 kfree(tp->call.system); 353 kfree(tp->call.name); 354 kfree(tp->symbol); 355 kfree(tp); 356 } 357 358 static struct trace_probe *find_probe_event(const char *event, 359 const char *group) 360 { 361 struct trace_probe *tp; 362 363 list_for_each_entry(tp, &probe_list, list) 364 if (strcmp(tp->call.name, event) == 0 && 365 strcmp(tp->call.system, group) == 0) 366 return tp; 367 return NULL; 368 } 369 370 /* Unregister a trace_probe and probe_event: call with locking probe_lock */ 371 static void unregister_trace_probe(struct trace_probe *tp) 372 { 373 if (probe_is_return(tp)) 374 unregister_kretprobe(&tp->rp); 375 else 376 unregister_kprobe(&tp->rp.kp); 377 list_del(&tp->list); 378 unregister_probe_event(tp); 379 } 380 381 /* Register a trace_probe and probe_event */ 382 static int register_trace_probe(struct trace_probe *tp) 383 { 384 struct trace_probe *old_tp; 385 int ret; 386 387 mutex_lock(&probe_lock); 388 389 /* register as an event */ 390 old_tp = find_probe_event(tp->call.name, tp->call.system); 391 if (old_tp) { 392 /* delete old event */ 393 unregister_trace_probe(old_tp); 394 free_trace_probe(old_tp); 395 } 396 ret = register_probe_event(tp); 397 if (ret) { 398 pr_warning("Faild to register probe event(%d)\n", ret); 399 goto end; 400 } 401 402 tp->rp.kp.flags |= KPROBE_FLAG_DISABLED; 403 if (probe_is_return(tp)) 404 ret = register_kretprobe(&tp->rp); 405 else 406 ret = register_kprobe(&tp->rp.kp); 407 408 if (ret) { 409 pr_warning("Could not insert probe(%d)\n", ret); 410 if (ret == -EILSEQ) { 411 pr_warning("Probing address(0x%p) is not an " 412 "instruction boundary.\n", 413 tp->rp.kp.addr); 414 ret = -EINVAL; 415 } 416 unregister_probe_event(tp); 417 } else 418 list_add_tail(&tp->list, &probe_list); 419 end: 420 mutex_unlock(&probe_lock); 421 return ret; 422 } 423 424 /* Split symbol and offset. */ 425 static int split_symbol_offset(char *symbol, unsigned long *offset) 426 { 427 char *tmp; 428 int ret; 429 430 if (!offset) 431 return -EINVAL; 432 433 tmp = strchr(symbol, '+'); 434 if (tmp) { 435 /* skip sign because strict_strtol doesn't accept '+' */ 436 ret = strict_strtoul(tmp + 1, 0, offset); 437 if (ret) 438 return ret; 439 *tmp = '\0'; 440 } else 441 *offset = 0; 442 return 0; 443 } 444 445 #define PARAM_MAX_ARGS 16 446 #define PARAM_MAX_STACK (THREAD_SIZE / sizeof(unsigned long)) 447 448 static int parse_probe_vars(char *arg, struct fetch_func *ff, int is_return) 449 { 450 int ret = 0; 451 unsigned long param; 452 453 if (strcmp(arg, "retval") == 0) { 454 if (is_return) { 455 ff->func = fetch_retvalue; 456 ff->data = NULL; 457 } else 458 ret = -EINVAL; 459 } else if (strncmp(arg, "stack", 5) == 0) { 460 if (arg[5] == '\0') { 461 ff->func = fetch_stack_address; 462 ff->data = NULL; 463 } else if (isdigit(arg[5])) { 464 ret = strict_strtoul(arg + 5, 10, ¶m); 465 if (ret || param > PARAM_MAX_STACK) 466 ret = -EINVAL; 467 else { 468 ff->func = fetch_stack; 469 ff->data = (void *)param; 470 } 471 } else 472 ret = -EINVAL; 473 } else if (strncmp(arg, "arg", 3) == 0 && isdigit(arg[3])) { 474 ret = strict_strtoul(arg + 3, 10, ¶m); 475 if (ret || param > PARAM_MAX_ARGS) 476 ret = -EINVAL; 477 else { 478 ff->func = fetch_argument; 479 ff->data = (void *)param; 480 } 481 } else 482 ret = -EINVAL; 483 return ret; 484 } 485 486 /* Recursive argument parser */ 487 static int __parse_probe_arg(char *arg, struct fetch_func *ff, int is_return) 488 { 489 int ret = 0; 490 unsigned long param; 491 long offset; 492 char *tmp; 493 494 switch (arg[0]) { 495 case '$': 496 ret = parse_probe_vars(arg + 1, ff, is_return); 497 break; 498 case '%': /* named register */ 499 ret = regs_query_register_offset(arg + 1); 500 if (ret >= 0) { 501 ff->func = fetch_register; 502 ff->data = (void *)(unsigned long)ret; 503 ret = 0; 504 } 505 break; 506 case '@': /* memory or symbol */ 507 if (isdigit(arg[1])) { 508 ret = strict_strtoul(arg + 1, 0, ¶m); 509 if (ret) 510 break; 511 ff->func = fetch_memory; 512 ff->data = (void *)param; 513 } else { 514 ret = split_symbol_offset(arg + 1, &offset); 515 if (ret) 516 break; 517 ff->data = alloc_symbol_cache(arg + 1, offset); 518 if (ff->data) 519 ff->func = fetch_symbol; 520 else 521 ret = -EINVAL; 522 } 523 break; 524 case '+': /* indirect memory */ 525 case '-': 526 tmp = strchr(arg, '('); 527 if (!tmp) { 528 ret = -EINVAL; 529 break; 530 } 531 *tmp = '\0'; 532 ret = strict_strtol(arg + 1, 0, &offset); 533 if (ret) 534 break; 535 if (arg[0] == '-') 536 offset = -offset; 537 arg = tmp + 1; 538 tmp = strrchr(arg, ')'); 539 if (tmp) { 540 struct indirect_fetch_data *id; 541 *tmp = '\0'; 542 id = kzalloc(sizeof(struct indirect_fetch_data), 543 GFP_KERNEL); 544 if (!id) 545 return -ENOMEM; 546 id->offset = offset; 547 ret = __parse_probe_arg(arg, &id->orig, is_return); 548 if (ret) 549 kfree(id); 550 else { 551 ff->func = fetch_indirect; 552 ff->data = (void *)id; 553 } 554 } else 555 ret = -EINVAL; 556 break; 557 default: 558 /* TODO: support custom handler */ 559 ret = -EINVAL; 560 } 561 return ret; 562 } 563 564 /* String length checking wrapper */ 565 static int parse_probe_arg(char *arg, struct fetch_func *ff, int is_return) 566 { 567 if (strlen(arg) > MAX_ARGSTR_LEN) { 568 pr_info("Argument is too long.: %s\n", arg); 569 return -ENOSPC; 570 } 571 return __parse_probe_arg(arg, ff, is_return); 572 } 573 574 /* Return 1 if name is reserved or already used by another argument */ 575 static int conflict_field_name(const char *name, 576 struct probe_arg *args, int narg) 577 { 578 int i; 579 for (i = 0; i < ARRAY_SIZE(reserved_field_names); i++) 580 if (strcmp(reserved_field_names[i], name) == 0) 581 return 1; 582 for (i = 0; i < narg; i++) 583 if (strcmp(args[i].name, name) == 0) 584 return 1; 585 return 0; 586 } 587 588 static int create_trace_probe(int argc, char **argv) 589 { 590 /* 591 * Argument syntax: 592 * - Add kprobe: p[:[GRP/]EVENT] KSYM[+OFFS]|KADDR [FETCHARGS] 593 * - Add kretprobe: r[:[GRP/]EVENT] KSYM[+0] [FETCHARGS] 594 * Fetch args: 595 * $argN : fetch Nth of function argument. (N:0-) 596 * $retval : fetch return value 597 * $stack : fetch stack address 598 * $stackN : fetch Nth of stack (N:0-) 599 * @ADDR : fetch memory at ADDR (ADDR should be in kernel) 600 * @SYM[+|-offs] : fetch memory at SYM +|- offs (SYM is a data symbol) 601 * %REG : fetch register REG 602 * Indirect memory fetch: 603 * +|-offs(ARG) : fetch memory at ARG +|- offs address. 604 * Alias name of args: 605 * NAME=FETCHARG : set NAME as alias of FETCHARG. 606 */ 607 struct trace_probe *tp; 608 int i, ret = 0; 609 int is_return = 0, is_delete = 0; 610 char *symbol = NULL, *event = NULL, *arg = NULL, *group = NULL; 611 unsigned long offset = 0; 612 void *addr = NULL; 613 char buf[MAX_EVENT_NAME_LEN]; 614 615 /* argc must be >= 1 */ 616 if (argv[0][0] == 'p') 617 is_return = 0; 618 else if (argv[0][0] == 'r') 619 is_return = 1; 620 else if (argv[0][0] == '-') 621 is_delete = 1; 622 else { 623 pr_info("Probe definition must be started with 'p', 'r' or" 624 " '-'.\n"); 625 return -EINVAL; 626 } 627 628 if (argv[0][1] == ':') { 629 event = &argv[0][2]; 630 if (strchr(event, '/')) { 631 group = event; 632 event = strchr(group, '/') + 1; 633 event[-1] = '\0'; 634 if (strlen(group) == 0) { 635 pr_info("Group name is not specifiled\n"); 636 return -EINVAL; 637 } 638 } 639 if (strlen(event) == 0) { 640 pr_info("Event name is not specifiled\n"); 641 return -EINVAL; 642 } 643 } 644 if (!group) 645 group = KPROBE_EVENT_SYSTEM; 646 647 if (is_delete) { 648 if (!event) { 649 pr_info("Delete command needs an event name.\n"); 650 return -EINVAL; 651 } 652 tp = find_probe_event(event, group); 653 if (!tp) { 654 pr_info("Event %s/%s doesn't exist.\n", group, event); 655 return -ENOENT; 656 } 657 /* delete an event */ 658 unregister_trace_probe(tp); 659 free_trace_probe(tp); 660 return 0; 661 } 662 663 if (argc < 2) { 664 pr_info("Probe point is not specified.\n"); 665 return -EINVAL; 666 } 667 if (isdigit(argv[1][0])) { 668 if (is_return) { 669 pr_info("Return probe point must be a symbol.\n"); 670 return -EINVAL; 671 } 672 /* an address specified */ 673 ret = strict_strtoul(&argv[0][2], 0, (unsigned long *)&addr); 674 if (ret) { 675 pr_info("Failed to parse address.\n"); 676 return ret; 677 } 678 } else { 679 /* a symbol specified */ 680 symbol = argv[1]; 681 /* TODO: support .init module functions */ 682 ret = split_symbol_offset(symbol, &offset); 683 if (ret) { 684 pr_info("Failed to parse symbol.\n"); 685 return ret; 686 } 687 if (offset && is_return) { 688 pr_info("Return probe must be used without offset.\n"); 689 return -EINVAL; 690 } 691 } 692 argc -= 2; argv += 2; 693 694 /* setup a probe */ 695 if (!event) { 696 /* Make a new event name */ 697 if (symbol) 698 snprintf(buf, MAX_EVENT_NAME_LEN, "%c@%s%+ld", 699 is_return ? 'r' : 'p', symbol, offset); 700 else 701 snprintf(buf, MAX_EVENT_NAME_LEN, "%c@0x%p", 702 is_return ? 'r' : 'p', addr); 703 event = buf; 704 } 705 tp = alloc_trace_probe(group, event, addr, symbol, offset, argc, 706 is_return); 707 if (IS_ERR(tp)) { 708 pr_info("Failed to allocate trace_probe.(%d)\n", 709 (int)PTR_ERR(tp)); 710 return PTR_ERR(tp); 711 } 712 713 /* parse arguments */ 714 ret = 0; 715 for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) { 716 /* Parse argument name */ 717 arg = strchr(argv[i], '='); 718 if (arg) 719 *arg++ = '\0'; 720 else 721 arg = argv[i]; 722 723 if (conflict_field_name(argv[i], tp->args, i)) { 724 pr_info("Argument%d name '%s' conflicts with " 725 "another field.\n", i, argv[i]); 726 ret = -EINVAL; 727 goto error; 728 } 729 730 tp->args[i].name = kstrdup(argv[i], GFP_KERNEL); 731 if (!tp->args[i].name) { 732 pr_info("Failed to allocate argument%d name '%s'.\n", 733 i, argv[i]); 734 ret = -ENOMEM; 735 goto error; 736 } 737 738 /* Parse fetch argument */ 739 ret = parse_probe_arg(arg, &tp->args[i].fetch, is_return); 740 if (ret) { 741 pr_info("Parse error at argument%d. (%d)\n", i, ret); 742 kfree(tp->args[i].name); 743 goto error; 744 } 745 746 tp->nr_args++; 747 } 748 749 ret = register_trace_probe(tp); 750 if (ret) 751 goto error; 752 return 0; 753 754 error: 755 free_trace_probe(tp); 756 return ret; 757 } 758 759 static void cleanup_all_probes(void) 760 { 761 struct trace_probe *tp; 762 763 mutex_lock(&probe_lock); 764 /* TODO: Use batch unregistration */ 765 while (!list_empty(&probe_list)) { 766 tp = list_entry(probe_list.next, struct trace_probe, list); 767 unregister_trace_probe(tp); 768 free_trace_probe(tp); 769 } 770 mutex_unlock(&probe_lock); 771 } 772 773 774 /* Probes listing interfaces */ 775 static void *probes_seq_start(struct seq_file *m, loff_t *pos) 776 { 777 mutex_lock(&probe_lock); 778 return seq_list_start(&probe_list, *pos); 779 } 780 781 static void *probes_seq_next(struct seq_file *m, void *v, loff_t *pos) 782 { 783 return seq_list_next(v, &probe_list, pos); 784 } 785 786 static void probes_seq_stop(struct seq_file *m, void *v) 787 { 788 mutex_unlock(&probe_lock); 789 } 790 791 static int probes_seq_show(struct seq_file *m, void *v) 792 { 793 struct trace_probe *tp = v; 794 int i, ret; 795 char buf[MAX_ARGSTR_LEN + 1]; 796 797 seq_printf(m, "%c", probe_is_return(tp) ? 'r' : 'p'); 798 seq_printf(m, ":%s/%s", tp->call.system, tp->call.name); 799 800 if (!tp->symbol) 801 seq_printf(m, " 0x%p", tp->rp.kp.addr); 802 else if (tp->rp.kp.offset) 803 seq_printf(m, " %s+%u", probe_symbol(tp), tp->rp.kp.offset); 804 else 805 seq_printf(m, " %s", probe_symbol(tp)); 806 807 for (i = 0; i < tp->nr_args; i++) { 808 ret = probe_arg_string(buf, MAX_ARGSTR_LEN, &tp->args[i].fetch); 809 if (ret < 0) { 810 pr_warning("Argument%d decoding error(%d).\n", i, ret); 811 return ret; 812 } 813 seq_printf(m, " %s=%s", tp->args[i].name, buf); 814 } 815 seq_printf(m, "\n"); 816 return 0; 817 } 818 819 static const struct seq_operations probes_seq_op = { 820 .start = probes_seq_start, 821 .next = probes_seq_next, 822 .stop = probes_seq_stop, 823 .show = probes_seq_show 824 }; 825 826 static int probes_open(struct inode *inode, struct file *file) 827 { 828 if ((file->f_mode & FMODE_WRITE) && 829 (file->f_flags & O_TRUNC)) 830 cleanup_all_probes(); 831 832 return seq_open(file, &probes_seq_op); 833 } 834 835 static int command_trace_probe(const char *buf) 836 { 837 char **argv; 838 int argc = 0, ret = 0; 839 840 argv = argv_split(GFP_KERNEL, buf, &argc); 841 if (!argv) 842 return -ENOMEM; 843 844 if (argc) 845 ret = create_trace_probe(argc, argv); 846 847 argv_free(argv); 848 return ret; 849 } 850 851 #define WRITE_BUFSIZE 128 852 853 static ssize_t probes_write(struct file *file, const char __user *buffer, 854 size_t count, loff_t *ppos) 855 { 856 char *kbuf, *tmp; 857 int ret; 858 size_t done; 859 size_t size; 860 861 kbuf = kmalloc(WRITE_BUFSIZE, GFP_KERNEL); 862 if (!kbuf) 863 return -ENOMEM; 864 865 ret = done = 0; 866 while (done < count) { 867 size = count - done; 868 if (size >= WRITE_BUFSIZE) 869 size = WRITE_BUFSIZE - 1; 870 if (copy_from_user(kbuf, buffer + done, size)) { 871 ret = -EFAULT; 872 goto out; 873 } 874 kbuf[size] = '\0'; 875 tmp = strchr(kbuf, '\n'); 876 if (tmp) { 877 *tmp = '\0'; 878 size = tmp - kbuf + 1; 879 } else if (done + size < count) { 880 pr_warning("Line length is too long: " 881 "Should be less than %d.", WRITE_BUFSIZE); 882 ret = -EINVAL; 883 goto out; 884 } 885 done += size; 886 /* Remove comments */ 887 tmp = strchr(kbuf, '#'); 888 if (tmp) 889 *tmp = '\0'; 890 891 ret = command_trace_probe(kbuf); 892 if (ret) 893 goto out; 894 } 895 ret = done; 896 out: 897 kfree(kbuf); 898 return ret; 899 } 900 901 static const struct file_operations kprobe_events_ops = { 902 .owner = THIS_MODULE, 903 .open = probes_open, 904 .read = seq_read, 905 .llseek = seq_lseek, 906 .release = seq_release, 907 .write = probes_write, 908 }; 909 910 /* Probes profiling interfaces */ 911 static int probes_profile_seq_show(struct seq_file *m, void *v) 912 { 913 struct trace_probe *tp = v; 914 915 seq_printf(m, " %-44s %15lu %15lu\n", tp->call.name, tp->nhit, 916 tp->rp.kp.nmissed); 917 918 return 0; 919 } 920 921 static const struct seq_operations profile_seq_op = { 922 .start = probes_seq_start, 923 .next = probes_seq_next, 924 .stop = probes_seq_stop, 925 .show = probes_profile_seq_show 926 }; 927 928 static int profile_open(struct inode *inode, struct file *file) 929 { 930 return seq_open(file, &profile_seq_op); 931 } 932 933 static const struct file_operations kprobe_profile_ops = { 934 .owner = THIS_MODULE, 935 .open = profile_open, 936 .read = seq_read, 937 .llseek = seq_lseek, 938 .release = seq_release, 939 }; 940 941 /* Kprobe handler */ 942 static __kprobes int kprobe_trace_func(struct kprobe *kp, struct pt_regs *regs) 943 { 944 struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp); 945 struct kprobe_trace_entry *entry; 946 struct ring_buffer_event *event; 947 struct ring_buffer *buffer; 948 int size, i, pc; 949 unsigned long irq_flags; 950 struct ftrace_event_call *call = &tp->call; 951 952 tp->nhit++; 953 954 local_save_flags(irq_flags); 955 pc = preempt_count(); 956 957 size = SIZEOF_KPROBE_TRACE_ENTRY(tp->nr_args); 958 959 event = trace_current_buffer_lock_reserve(&buffer, call->id, size, 960 irq_flags, pc); 961 if (!event) 962 return 0; 963 964 entry = ring_buffer_event_data(event); 965 entry->nargs = tp->nr_args; 966 entry->ip = (unsigned long)kp->addr; 967 for (i = 0; i < tp->nr_args; i++) 968 entry->args[i] = call_fetch(&tp->args[i].fetch, regs); 969 970 if (!filter_current_check_discard(buffer, call, entry, event)) 971 trace_nowake_buffer_unlock_commit(buffer, event, irq_flags, pc); 972 return 0; 973 } 974 975 /* Kretprobe handler */ 976 static __kprobes int kretprobe_trace_func(struct kretprobe_instance *ri, 977 struct pt_regs *regs) 978 { 979 struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp); 980 struct kretprobe_trace_entry *entry; 981 struct ring_buffer_event *event; 982 struct ring_buffer *buffer; 983 int size, i, pc; 984 unsigned long irq_flags; 985 struct ftrace_event_call *call = &tp->call; 986 987 local_save_flags(irq_flags); 988 pc = preempt_count(); 989 990 size = SIZEOF_KRETPROBE_TRACE_ENTRY(tp->nr_args); 991 992 event = trace_current_buffer_lock_reserve(&buffer, call->id, size, 993 irq_flags, pc); 994 if (!event) 995 return 0; 996 997 entry = ring_buffer_event_data(event); 998 entry->nargs = tp->nr_args; 999 entry->func = (unsigned long)tp->rp.kp.addr; 1000 entry->ret_ip = (unsigned long)ri->ret_addr; 1001 for (i = 0; i < tp->nr_args; i++) 1002 entry->args[i] = call_fetch(&tp->args[i].fetch, regs); 1003 1004 if (!filter_current_check_discard(buffer, call, entry, event)) 1005 trace_nowake_buffer_unlock_commit(buffer, event, irq_flags, pc); 1006 1007 return 0; 1008 } 1009 1010 /* Event entry printers */ 1011 enum print_line_t 1012 print_kprobe_event(struct trace_iterator *iter, int flags) 1013 { 1014 struct kprobe_trace_entry *field; 1015 struct trace_seq *s = &iter->seq; 1016 struct trace_event *event; 1017 struct trace_probe *tp; 1018 int i; 1019 1020 field = (struct kprobe_trace_entry *)iter->ent; 1021 event = ftrace_find_event(field->ent.type); 1022 tp = container_of(event, struct trace_probe, event); 1023 1024 if (!trace_seq_printf(s, "%s: (", tp->call.name)) 1025 goto partial; 1026 1027 if (!seq_print_ip_sym(s, field->ip, flags | TRACE_ITER_SYM_OFFSET)) 1028 goto partial; 1029 1030 if (!trace_seq_puts(s, ")")) 1031 goto partial; 1032 1033 for (i = 0; i < field->nargs; i++) 1034 if (!trace_seq_printf(s, " %s=%lx", 1035 tp->args[i].name, field->args[i])) 1036 goto partial; 1037 1038 if (!trace_seq_puts(s, "\n")) 1039 goto partial; 1040 1041 return TRACE_TYPE_HANDLED; 1042 partial: 1043 return TRACE_TYPE_PARTIAL_LINE; 1044 } 1045 1046 enum print_line_t 1047 print_kretprobe_event(struct trace_iterator *iter, int flags) 1048 { 1049 struct kretprobe_trace_entry *field; 1050 struct trace_seq *s = &iter->seq; 1051 struct trace_event *event; 1052 struct trace_probe *tp; 1053 int i; 1054 1055 field = (struct kretprobe_trace_entry *)iter->ent; 1056 event = ftrace_find_event(field->ent.type); 1057 tp = container_of(event, struct trace_probe, event); 1058 1059 if (!trace_seq_printf(s, "%s: (", tp->call.name)) 1060 goto partial; 1061 1062 if (!seq_print_ip_sym(s, field->ret_ip, flags | TRACE_ITER_SYM_OFFSET)) 1063 goto partial; 1064 1065 if (!trace_seq_puts(s, " <- ")) 1066 goto partial; 1067 1068 if (!seq_print_ip_sym(s, field->func, flags & ~TRACE_ITER_SYM_OFFSET)) 1069 goto partial; 1070 1071 if (!trace_seq_puts(s, ")")) 1072 goto partial; 1073 1074 for (i = 0; i < field->nargs; i++) 1075 if (!trace_seq_printf(s, " %s=%lx", 1076 tp->args[i].name, field->args[i])) 1077 goto partial; 1078 1079 if (!trace_seq_puts(s, "\n")) 1080 goto partial; 1081 1082 return TRACE_TYPE_HANDLED; 1083 partial: 1084 return TRACE_TYPE_PARTIAL_LINE; 1085 } 1086 1087 static int probe_event_enable(struct ftrace_event_call *call) 1088 { 1089 struct trace_probe *tp = (struct trace_probe *)call->data; 1090 1091 tp->flags |= TP_FLAG_TRACE; 1092 if (probe_is_return(tp)) 1093 return enable_kretprobe(&tp->rp); 1094 else 1095 return enable_kprobe(&tp->rp.kp); 1096 } 1097 1098 static void probe_event_disable(struct ftrace_event_call *call) 1099 { 1100 struct trace_probe *tp = (struct trace_probe *)call->data; 1101 1102 tp->flags &= ~TP_FLAG_TRACE; 1103 if (!(tp->flags & (TP_FLAG_TRACE | TP_FLAG_PROFILE))) { 1104 if (probe_is_return(tp)) 1105 disable_kretprobe(&tp->rp); 1106 else 1107 disable_kprobe(&tp->rp.kp); 1108 } 1109 } 1110 1111 static int probe_event_raw_init(struct ftrace_event_call *event_call) 1112 { 1113 INIT_LIST_HEAD(&event_call->fields); 1114 1115 return 0; 1116 } 1117 1118 #undef DEFINE_FIELD 1119 #define DEFINE_FIELD(type, item, name, is_signed) \ 1120 do { \ 1121 ret = trace_define_field(event_call, #type, name, \ 1122 offsetof(typeof(field), item), \ 1123 sizeof(field.item), is_signed, \ 1124 FILTER_OTHER); \ 1125 if (ret) \ 1126 return ret; \ 1127 } while (0) 1128 1129 static int kprobe_event_define_fields(struct ftrace_event_call *event_call) 1130 { 1131 int ret, i; 1132 struct kprobe_trace_entry field; 1133 struct trace_probe *tp = (struct trace_probe *)event_call->data; 1134 1135 ret = trace_define_common_fields(event_call); 1136 if (ret) 1137 return ret; 1138 1139 DEFINE_FIELD(unsigned long, ip, FIELD_STRING_IP, 0); 1140 DEFINE_FIELD(int, nargs, FIELD_STRING_NARGS, 1); 1141 /* Set argument names as fields */ 1142 for (i = 0; i < tp->nr_args; i++) 1143 DEFINE_FIELD(unsigned long, args[i], tp->args[i].name, 0); 1144 return 0; 1145 } 1146 1147 static int kretprobe_event_define_fields(struct ftrace_event_call *event_call) 1148 { 1149 int ret, i; 1150 struct kretprobe_trace_entry field; 1151 struct trace_probe *tp = (struct trace_probe *)event_call->data; 1152 1153 ret = trace_define_common_fields(event_call); 1154 if (ret) 1155 return ret; 1156 1157 DEFINE_FIELD(unsigned long, func, FIELD_STRING_FUNC, 0); 1158 DEFINE_FIELD(unsigned long, ret_ip, FIELD_STRING_RETIP, 0); 1159 DEFINE_FIELD(int, nargs, FIELD_STRING_NARGS, 1); 1160 /* Set argument names as fields */ 1161 for (i = 0; i < tp->nr_args; i++) 1162 DEFINE_FIELD(unsigned long, args[i], tp->args[i].name, 0); 1163 return 0; 1164 } 1165 1166 static int __probe_event_show_format(struct trace_seq *s, 1167 struct trace_probe *tp, const char *fmt, 1168 const char *arg) 1169 { 1170 int i; 1171 1172 /* Show format */ 1173 if (!trace_seq_printf(s, "\nprint fmt: \"%s", fmt)) 1174 return 0; 1175 1176 for (i = 0; i < tp->nr_args; i++) 1177 if (!trace_seq_printf(s, " %s=%%lx", tp->args[i].name)) 1178 return 0; 1179 1180 if (!trace_seq_printf(s, "\", %s", arg)) 1181 return 0; 1182 1183 for (i = 0; i < tp->nr_args; i++) 1184 if (!trace_seq_printf(s, ", REC->%s", tp->args[i].name)) 1185 return 0; 1186 1187 return trace_seq_puts(s, "\n"); 1188 } 1189 1190 #undef SHOW_FIELD 1191 #define SHOW_FIELD(type, item, name) \ 1192 do { \ 1193 ret = trace_seq_printf(s, "\tfield: " #type " %s;\t" \ 1194 "offset:%u;\tsize:%u;\n", name, \ 1195 (unsigned int)offsetof(typeof(field), item),\ 1196 (unsigned int)sizeof(type)); \ 1197 if (!ret) \ 1198 return 0; \ 1199 } while (0) 1200 1201 static int kprobe_event_show_format(struct ftrace_event_call *call, 1202 struct trace_seq *s) 1203 { 1204 struct kprobe_trace_entry field __attribute__((unused)); 1205 int ret, i; 1206 struct trace_probe *tp = (struct trace_probe *)call->data; 1207 1208 SHOW_FIELD(unsigned long, ip, FIELD_STRING_IP); 1209 SHOW_FIELD(int, nargs, FIELD_STRING_NARGS); 1210 1211 /* Show fields */ 1212 for (i = 0; i < tp->nr_args; i++) 1213 SHOW_FIELD(unsigned long, args[i], tp->args[i].name); 1214 trace_seq_puts(s, "\n"); 1215 1216 return __probe_event_show_format(s, tp, "(%lx)", 1217 "REC->" FIELD_STRING_IP); 1218 } 1219 1220 static int kretprobe_event_show_format(struct ftrace_event_call *call, 1221 struct trace_seq *s) 1222 { 1223 struct kretprobe_trace_entry field __attribute__((unused)); 1224 int ret, i; 1225 struct trace_probe *tp = (struct trace_probe *)call->data; 1226 1227 SHOW_FIELD(unsigned long, func, FIELD_STRING_FUNC); 1228 SHOW_FIELD(unsigned long, ret_ip, FIELD_STRING_RETIP); 1229 SHOW_FIELD(int, nargs, FIELD_STRING_NARGS); 1230 1231 /* Show fields */ 1232 for (i = 0; i < tp->nr_args; i++) 1233 SHOW_FIELD(unsigned long, args[i], tp->args[i].name); 1234 trace_seq_puts(s, "\n"); 1235 1236 return __probe_event_show_format(s, tp, "(%lx <- %lx)", 1237 "REC->" FIELD_STRING_FUNC 1238 ", REC->" FIELD_STRING_RETIP); 1239 } 1240 1241 #ifdef CONFIG_EVENT_PROFILE 1242 1243 /* Kprobe profile handler */ 1244 static __kprobes int kprobe_profile_func(struct kprobe *kp, 1245 struct pt_regs *regs) 1246 { 1247 struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp); 1248 struct ftrace_event_call *call = &tp->call; 1249 struct kprobe_trace_entry *entry; 1250 struct trace_entry *ent; 1251 int size, __size, i, pc, __cpu; 1252 unsigned long irq_flags; 1253 char *trace_buf; 1254 char *raw_data; 1255 int rctx; 1256 1257 pc = preempt_count(); 1258 __size = SIZEOF_KPROBE_TRACE_ENTRY(tp->nr_args); 1259 size = ALIGN(__size + sizeof(u32), sizeof(u64)); 1260 size -= sizeof(u32); 1261 if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE, 1262 "profile buffer not large enough")) 1263 return 0; 1264 1265 /* 1266 * Protect the non nmi buffer 1267 * This also protects the rcu read side 1268 */ 1269 local_irq_save(irq_flags); 1270 1271 rctx = perf_swevent_get_recursion_context(); 1272 if (rctx < 0) 1273 goto end_recursion; 1274 1275 __cpu = smp_processor_id(); 1276 1277 if (in_nmi()) 1278 trace_buf = rcu_dereference(perf_trace_buf_nmi); 1279 else 1280 trace_buf = rcu_dereference(perf_trace_buf); 1281 1282 if (!trace_buf) 1283 goto end; 1284 1285 raw_data = per_cpu_ptr(trace_buf, __cpu); 1286 1287 /* Zero dead bytes from alignment to avoid buffer leak to userspace */ 1288 *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL; 1289 entry = (struct kprobe_trace_entry *)raw_data; 1290 ent = &entry->ent; 1291 1292 tracing_generic_entry_update(ent, irq_flags, pc); 1293 ent->type = call->id; 1294 entry->nargs = tp->nr_args; 1295 entry->ip = (unsigned long)kp->addr; 1296 for (i = 0; i < tp->nr_args; i++) 1297 entry->args[i] = call_fetch(&tp->args[i].fetch, regs); 1298 perf_tp_event(call->id, entry->ip, 1, entry, size); 1299 1300 end: 1301 perf_swevent_put_recursion_context(rctx); 1302 end_recursion: 1303 local_irq_restore(irq_flags); 1304 1305 return 0; 1306 } 1307 1308 /* Kretprobe profile handler */ 1309 static __kprobes int kretprobe_profile_func(struct kretprobe_instance *ri, 1310 struct pt_regs *regs) 1311 { 1312 struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp); 1313 struct ftrace_event_call *call = &tp->call; 1314 struct kretprobe_trace_entry *entry; 1315 struct trace_entry *ent; 1316 int size, __size, i, pc, __cpu; 1317 unsigned long irq_flags; 1318 char *trace_buf; 1319 char *raw_data; 1320 int rctx; 1321 1322 pc = preempt_count(); 1323 __size = SIZEOF_KRETPROBE_TRACE_ENTRY(tp->nr_args); 1324 size = ALIGN(__size + sizeof(u32), sizeof(u64)); 1325 size -= sizeof(u32); 1326 if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE, 1327 "profile buffer not large enough")) 1328 return 0; 1329 1330 /* 1331 * Protect the non nmi buffer 1332 * This also protects the rcu read side 1333 */ 1334 local_irq_save(irq_flags); 1335 1336 rctx = perf_swevent_get_recursion_context(); 1337 if (rctx < 0) 1338 goto end_recursion; 1339 1340 __cpu = smp_processor_id(); 1341 1342 if (in_nmi()) 1343 trace_buf = rcu_dereference(perf_trace_buf_nmi); 1344 else 1345 trace_buf = rcu_dereference(perf_trace_buf); 1346 1347 if (!trace_buf) 1348 goto end; 1349 1350 raw_data = per_cpu_ptr(trace_buf, __cpu); 1351 1352 /* Zero dead bytes from alignment to avoid buffer leak to userspace */ 1353 *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL; 1354 entry = (struct kretprobe_trace_entry *)raw_data; 1355 ent = &entry->ent; 1356 1357 tracing_generic_entry_update(ent, irq_flags, pc); 1358 ent->type = call->id; 1359 entry->nargs = tp->nr_args; 1360 entry->func = (unsigned long)tp->rp.kp.addr; 1361 entry->ret_ip = (unsigned long)ri->ret_addr; 1362 for (i = 0; i < tp->nr_args; i++) 1363 entry->args[i] = call_fetch(&tp->args[i].fetch, regs); 1364 perf_tp_event(call->id, entry->ret_ip, 1, entry, size); 1365 1366 end: 1367 perf_swevent_put_recursion_context(rctx); 1368 end_recursion: 1369 local_irq_restore(irq_flags); 1370 1371 return 0; 1372 } 1373 1374 static int probe_profile_enable(struct ftrace_event_call *call) 1375 { 1376 struct trace_probe *tp = (struct trace_probe *)call->data; 1377 1378 tp->flags |= TP_FLAG_PROFILE; 1379 1380 if (probe_is_return(tp)) 1381 return enable_kretprobe(&tp->rp); 1382 else 1383 return enable_kprobe(&tp->rp.kp); 1384 } 1385 1386 static void probe_profile_disable(struct ftrace_event_call *call) 1387 { 1388 struct trace_probe *tp = (struct trace_probe *)call->data; 1389 1390 tp->flags &= ~TP_FLAG_PROFILE; 1391 1392 if (!(tp->flags & TP_FLAG_TRACE)) { 1393 if (probe_is_return(tp)) 1394 disable_kretprobe(&tp->rp); 1395 else 1396 disable_kprobe(&tp->rp.kp); 1397 } 1398 } 1399 #endif /* CONFIG_EVENT_PROFILE */ 1400 1401 1402 static __kprobes 1403 int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs) 1404 { 1405 struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp); 1406 1407 if (tp->flags & TP_FLAG_TRACE) 1408 kprobe_trace_func(kp, regs); 1409 #ifdef CONFIG_EVENT_PROFILE 1410 if (tp->flags & TP_FLAG_PROFILE) 1411 kprobe_profile_func(kp, regs); 1412 #endif /* CONFIG_EVENT_PROFILE */ 1413 return 0; /* We don't tweek kernel, so just return 0 */ 1414 } 1415 1416 static __kprobes 1417 int kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs) 1418 { 1419 struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp); 1420 1421 if (tp->flags & TP_FLAG_TRACE) 1422 kretprobe_trace_func(ri, regs); 1423 #ifdef CONFIG_EVENT_PROFILE 1424 if (tp->flags & TP_FLAG_PROFILE) 1425 kretprobe_profile_func(ri, regs); 1426 #endif /* CONFIG_EVENT_PROFILE */ 1427 return 0; /* We don't tweek kernel, so just return 0 */ 1428 } 1429 1430 static int register_probe_event(struct trace_probe *tp) 1431 { 1432 struct ftrace_event_call *call = &tp->call; 1433 int ret; 1434 1435 /* Initialize ftrace_event_call */ 1436 if (probe_is_return(tp)) { 1437 tp->event.trace = print_kretprobe_event; 1438 call->raw_init = probe_event_raw_init; 1439 call->show_format = kretprobe_event_show_format; 1440 call->define_fields = kretprobe_event_define_fields; 1441 } else { 1442 tp->event.trace = print_kprobe_event; 1443 call->raw_init = probe_event_raw_init; 1444 call->show_format = kprobe_event_show_format; 1445 call->define_fields = kprobe_event_define_fields; 1446 } 1447 call->event = &tp->event; 1448 call->id = register_ftrace_event(&tp->event); 1449 if (!call->id) 1450 return -ENODEV; 1451 call->enabled = 0; 1452 call->regfunc = probe_event_enable; 1453 call->unregfunc = probe_event_disable; 1454 1455 #ifdef CONFIG_EVENT_PROFILE 1456 atomic_set(&call->profile_count, -1); 1457 call->profile_enable = probe_profile_enable; 1458 call->profile_disable = probe_profile_disable; 1459 #endif 1460 call->data = tp; 1461 ret = trace_add_event_call(call); 1462 if (ret) { 1463 pr_info("Failed to register kprobe event: %s\n", call->name); 1464 unregister_ftrace_event(&tp->event); 1465 } 1466 return ret; 1467 } 1468 1469 static void unregister_probe_event(struct trace_probe *tp) 1470 { 1471 /* tp->event is unregistered in trace_remove_event_call() */ 1472 trace_remove_event_call(&tp->call); 1473 } 1474 1475 /* Make a debugfs interface for controling probe points */ 1476 static __init int init_kprobe_trace(void) 1477 { 1478 struct dentry *d_tracer; 1479 struct dentry *entry; 1480 1481 d_tracer = tracing_init_dentry(); 1482 if (!d_tracer) 1483 return 0; 1484 1485 entry = debugfs_create_file("kprobe_events", 0644, d_tracer, 1486 NULL, &kprobe_events_ops); 1487 1488 /* Event list interface */ 1489 if (!entry) 1490 pr_warning("Could not create debugfs " 1491 "'kprobe_events' entry\n"); 1492 1493 /* Profile interface */ 1494 entry = debugfs_create_file("kprobe_profile", 0444, d_tracer, 1495 NULL, &kprobe_profile_ops); 1496 1497 if (!entry) 1498 pr_warning("Could not create debugfs " 1499 "'kprobe_profile' entry\n"); 1500 return 0; 1501 } 1502 fs_initcall(init_kprobe_trace); 1503 1504 1505 #ifdef CONFIG_FTRACE_STARTUP_TEST 1506 1507 static int kprobe_trace_selftest_target(int a1, int a2, int a3, 1508 int a4, int a5, int a6) 1509 { 1510 return a1 + a2 + a3 + a4 + a5 + a6; 1511 } 1512 1513 static __init int kprobe_trace_self_tests_init(void) 1514 { 1515 int ret; 1516 int (*target)(int, int, int, int, int, int); 1517 1518 target = kprobe_trace_selftest_target; 1519 1520 pr_info("Testing kprobe tracing: "); 1521 1522 ret = command_trace_probe("p:testprobe kprobe_trace_selftest_target " 1523 "$arg1 $arg2 $arg3 $arg4 $stack $stack0"); 1524 if (WARN_ON_ONCE(ret)) 1525 pr_warning("error enabling function entry\n"); 1526 1527 ret = command_trace_probe("r:testprobe2 kprobe_trace_selftest_target " 1528 "$retval"); 1529 if (WARN_ON_ONCE(ret)) 1530 pr_warning("error enabling function return\n"); 1531 1532 ret = target(1, 2, 3, 4, 5, 6); 1533 1534 cleanup_all_probes(); 1535 1536 pr_cont("OK\n"); 1537 return 0; 1538 } 1539 1540 late_initcall(kprobe_trace_self_tests_init); 1541 1542 #endif 1543