1 /* 2 * Kprobes-based tracing events 3 * 4 * Created by Masami Hiramatsu <mhiramat@redhat.com> 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 * You should have received a copy of the GNU General Public License 16 * along with this program; if not, write to the Free Software 17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 18 */ 19 #define pr_fmt(fmt) "trace_kprobe: " fmt 20 21 #include <linux/module.h> 22 #include <linux/uaccess.h> 23 #include <linux/rculist.h> 24 25 #include "trace_probe.h" 26 27 #define KPROBE_EVENT_SYSTEM "kprobes" 28 #define KRETPROBE_MAXACTIVE_MAX 4096 29 30 /** 31 * Kprobe event core functions 32 */ 33 struct trace_kprobe { 34 struct list_head list; 35 struct kretprobe rp; /* Use rp.kp for kprobe use */ 36 unsigned long __percpu *nhit; 37 const char *symbol; /* symbol name */ 38 struct trace_probe tp; 39 }; 40 41 #define SIZEOF_TRACE_KPROBE(n) \ 42 (offsetof(struct trace_kprobe, tp.args) + \ 43 (sizeof(struct probe_arg) * (n))) 44 45 DEFINE_PER_CPU(int, bpf_kprobe_override); 46 47 static nokprobe_inline bool trace_kprobe_is_return(struct trace_kprobe *tk) 48 { 49 return tk->rp.handler != NULL; 50 } 51 52 static nokprobe_inline const char *trace_kprobe_symbol(struct trace_kprobe *tk) 53 { 54 return tk->symbol ? tk->symbol : "unknown"; 55 } 56 57 static nokprobe_inline unsigned long trace_kprobe_offset(struct trace_kprobe *tk) 58 { 59 return tk->rp.kp.offset; 60 } 61 62 static nokprobe_inline bool trace_kprobe_has_gone(struct trace_kprobe *tk) 63 { 64 return !!(kprobe_gone(&tk->rp.kp)); 65 } 66 67 static nokprobe_inline bool trace_kprobe_within_module(struct trace_kprobe *tk, 68 struct module *mod) 69 { 70 int len = strlen(mod->name); 71 const char *name = trace_kprobe_symbol(tk); 72 return strncmp(mod->name, name, len) == 0 && name[len] == ':'; 73 } 74 75 static nokprobe_inline bool trace_kprobe_is_on_module(struct trace_kprobe *tk) 76 { 77 return !!strchr(trace_kprobe_symbol(tk), ':'); 78 } 79 80 static nokprobe_inline unsigned long trace_kprobe_nhit(struct trace_kprobe *tk) 81 { 82 unsigned long nhit = 0; 83 int cpu; 84 85 for_each_possible_cpu(cpu) 86 nhit += *per_cpu_ptr(tk->nhit, cpu); 87 88 return nhit; 89 } 90 91 int trace_kprobe_ftrace(struct trace_event_call *call) 92 { 93 struct trace_kprobe *tk = (struct trace_kprobe *)call->data; 94 return kprobe_ftrace(&tk->rp.kp); 95 } 96 97 int trace_kprobe_error_injectable(struct trace_event_call *call) 98 { 99 struct trace_kprobe *tk = (struct trace_kprobe *)call->data; 100 unsigned long addr; 101 102 if (tk->symbol) { 103 addr = (unsigned long) 104 kallsyms_lookup_name(trace_kprobe_symbol(tk)); 105 addr += tk->rp.kp.offset; 106 } else { 107 addr = (unsigned long)tk->rp.kp.addr; 108 } 109 return within_kprobe_error_injection_list(addr); 110 } 111 112 static int register_kprobe_event(struct trace_kprobe *tk); 113 static int unregister_kprobe_event(struct trace_kprobe *tk); 114 115 static DEFINE_MUTEX(probe_lock); 116 static LIST_HEAD(probe_list); 117 118 static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs); 119 static int kretprobe_dispatcher(struct kretprobe_instance *ri, 120 struct pt_regs *regs); 121 122 /* Memory fetching by symbol */ 123 struct symbol_cache { 124 char *symbol; 125 long offset; 126 unsigned long addr; 127 }; 128 129 unsigned long update_symbol_cache(struct symbol_cache *sc) 130 { 131 sc->addr = (unsigned long)kallsyms_lookup_name(sc->symbol); 132 133 if (sc->addr) 134 sc->addr += sc->offset; 135 136 return sc->addr; 137 } 138 139 void free_symbol_cache(struct symbol_cache *sc) 140 { 141 kfree(sc->symbol); 142 kfree(sc); 143 } 144 145 struct symbol_cache *alloc_symbol_cache(const char *sym, long offset) 146 { 147 struct symbol_cache *sc; 148 149 if (!sym || strlen(sym) == 0) 150 return NULL; 151 152 sc = kzalloc(sizeof(struct symbol_cache), GFP_KERNEL); 153 if (!sc) 154 return NULL; 155 156 sc->symbol = kstrdup(sym, GFP_KERNEL); 157 if (!sc->symbol) { 158 kfree(sc); 159 return NULL; 160 } 161 sc->offset = offset; 162 update_symbol_cache(sc); 163 164 return sc; 165 } 166 167 /* 168 * Kprobes-specific fetch functions 169 */ 170 #define DEFINE_FETCH_stack(type) \ 171 static void FETCH_FUNC_NAME(stack, type)(struct pt_regs *regs, \ 172 void *offset, void *dest) \ 173 { \ 174 *(type *)dest = (type)regs_get_kernel_stack_nth(regs, \ 175 (unsigned int)((unsigned long)offset)); \ 176 } \ 177 NOKPROBE_SYMBOL(FETCH_FUNC_NAME(stack, type)); 178 179 DEFINE_BASIC_FETCH_FUNCS(stack) 180 /* No string on the stack entry */ 181 #define fetch_stack_string NULL 182 #define fetch_stack_string_size NULL 183 184 #define DEFINE_FETCH_memory(type) \ 185 static void FETCH_FUNC_NAME(memory, type)(struct pt_regs *regs, \ 186 void *addr, void *dest) \ 187 { \ 188 type retval; \ 189 if (probe_kernel_address(addr, retval)) \ 190 *(type *)dest = 0; \ 191 else \ 192 *(type *)dest = retval; \ 193 } \ 194 NOKPROBE_SYMBOL(FETCH_FUNC_NAME(memory, type)); 195 196 DEFINE_BASIC_FETCH_FUNCS(memory) 197 /* 198 * Fetch a null-terminated string. Caller MUST set *(u32 *)dest with max 199 * length and relative data location. 200 */ 201 static void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs, 202 void *addr, void *dest) 203 { 204 int maxlen = get_rloc_len(*(u32 *)dest); 205 u8 *dst = get_rloc_data(dest); 206 long ret; 207 208 if (!maxlen) 209 return; 210 211 /* 212 * Try to get string again, since the string can be changed while 213 * probing. 214 */ 215 ret = strncpy_from_unsafe(dst, addr, maxlen); 216 217 if (ret < 0) { /* Failed to fetch string */ 218 dst[0] = '\0'; 219 *(u32 *)dest = make_data_rloc(0, get_rloc_offs(*(u32 *)dest)); 220 } else { 221 *(u32 *)dest = make_data_rloc(ret, get_rloc_offs(*(u32 *)dest)); 222 } 223 } 224 NOKPROBE_SYMBOL(FETCH_FUNC_NAME(memory, string)); 225 226 /* Return the length of string -- including null terminal byte */ 227 static void FETCH_FUNC_NAME(memory, string_size)(struct pt_regs *regs, 228 void *addr, void *dest) 229 { 230 mm_segment_t old_fs; 231 int ret, len = 0; 232 u8 c; 233 234 old_fs = get_fs(); 235 set_fs(KERNEL_DS); 236 pagefault_disable(); 237 238 do { 239 ret = __copy_from_user_inatomic(&c, (u8 *)addr + len, 1); 240 len++; 241 } while (c && ret == 0 && len < MAX_STRING_SIZE); 242 243 pagefault_enable(); 244 set_fs(old_fs); 245 246 if (ret < 0) /* Failed to check the length */ 247 *(u32 *)dest = 0; 248 else 249 *(u32 *)dest = len; 250 } 251 NOKPROBE_SYMBOL(FETCH_FUNC_NAME(memory, string_size)); 252 253 #define DEFINE_FETCH_symbol(type) \ 254 void FETCH_FUNC_NAME(symbol, type)(struct pt_regs *regs, void *data, void *dest)\ 255 { \ 256 struct symbol_cache *sc = data; \ 257 if (sc->addr) \ 258 fetch_memory_##type(regs, (void *)sc->addr, dest); \ 259 else \ 260 *(type *)dest = 0; \ 261 } \ 262 NOKPROBE_SYMBOL(FETCH_FUNC_NAME(symbol, type)); 263 264 DEFINE_BASIC_FETCH_FUNCS(symbol) 265 DEFINE_FETCH_symbol(string) 266 DEFINE_FETCH_symbol(string_size) 267 268 /* kprobes don't support file_offset fetch methods */ 269 #define fetch_file_offset_u8 NULL 270 #define fetch_file_offset_u16 NULL 271 #define fetch_file_offset_u32 NULL 272 #define fetch_file_offset_u64 NULL 273 #define fetch_file_offset_string NULL 274 #define fetch_file_offset_string_size NULL 275 276 /* Fetch type information table */ 277 static const struct fetch_type kprobes_fetch_type_table[] = { 278 /* Special types */ 279 [FETCH_TYPE_STRING] = __ASSIGN_FETCH_TYPE("string", string, string, 280 sizeof(u32), 1, "__data_loc char[]"), 281 [FETCH_TYPE_STRSIZE] = __ASSIGN_FETCH_TYPE("string_size", u32, 282 string_size, sizeof(u32), 0, "u32"), 283 /* Basic types */ 284 ASSIGN_FETCH_TYPE(u8, u8, 0), 285 ASSIGN_FETCH_TYPE(u16, u16, 0), 286 ASSIGN_FETCH_TYPE(u32, u32, 0), 287 ASSIGN_FETCH_TYPE(u64, u64, 0), 288 ASSIGN_FETCH_TYPE(s8, u8, 1), 289 ASSIGN_FETCH_TYPE(s16, u16, 1), 290 ASSIGN_FETCH_TYPE(s32, u32, 1), 291 ASSIGN_FETCH_TYPE(s64, u64, 1), 292 ASSIGN_FETCH_TYPE_ALIAS(x8, u8, u8, 0), 293 ASSIGN_FETCH_TYPE_ALIAS(x16, u16, u16, 0), 294 ASSIGN_FETCH_TYPE_ALIAS(x32, u32, u32, 0), 295 ASSIGN_FETCH_TYPE_ALIAS(x64, u64, u64, 0), 296 297 ASSIGN_FETCH_TYPE_END 298 }; 299 300 /* 301 * Allocate new trace_probe and initialize it (including kprobes). 302 */ 303 static struct trace_kprobe *alloc_trace_kprobe(const char *group, 304 const char *event, 305 void *addr, 306 const char *symbol, 307 unsigned long offs, 308 int maxactive, 309 int nargs, bool is_return) 310 { 311 struct trace_kprobe *tk; 312 int ret = -ENOMEM; 313 314 tk = kzalloc(SIZEOF_TRACE_KPROBE(nargs), GFP_KERNEL); 315 if (!tk) 316 return ERR_PTR(ret); 317 318 tk->nhit = alloc_percpu(unsigned long); 319 if (!tk->nhit) 320 goto error; 321 322 if (symbol) { 323 tk->symbol = kstrdup(symbol, GFP_KERNEL); 324 if (!tk->symbol) 325 goto error; 326 tk->rp.kp.symbol_name = tk->symbol; 327 tk->rp.kp.offset = offs; 328 } else 329 tk->rp.kp.addr = addr; 330 331 if (is_return) 332 tk->rp.handler = kretprobe_dispatcher; 333 else 334 tk->rp.kp.pre_handler = kprobe_dispatcher; 335 336 tk->rp.maxactive = maxactive; 337 338 if (!event || !is_good_name(event)) { 339 ret = -EINVAL; 340 goto error; 341 } 342 343 tk->tp.call.class = &tk->tp.class; 344 tk->tp.call.name = kstrdup(event, GFP_KERNEL); 345 if (!tk->tp.call.name) 346 goto error; 347 348 if (!group || !is_good_name(group)) { 349 ret = -EINVAL; 350 goto error; 351 } 352 353 tk->tp.class.system = kstrdup(group, GFP_KERNEL); 354 if (!tk->tp.class.system) 355 goto error; 356 357 INIT_LIST_HEAD(&tk->list); 358 INIT_LIST_HEAD(&tk->tp.files); 359 return tk; 360 error: 361 kfree(tk->tp.call.name); 362 kfree(tk->symbol); 363 free_percpu(tk->nhit); 364 kfree(tk); 365 return ERR_PTR(ret); 366 } 367 368 static void free_trace_kprobe(struct trace_kprobe *tk) 369 { 370 int i; 371 372 for (i = 0; i < tk->tp.nr_args; i++) 373 traceprobe_free_probe_arg(&tk->tp.args[i]); 374 375 kfree(tk->tp.call.class->system); 376 kfree(tk->tp.call.name); 377 kfree(tk->symbol); 378 free_percpu(tk->nhit); 379 kfree(tk); 380 } 381 382 static struct trace_kprobe *find_trace_kprobe(const char *event, 383 const char *group) 384 { 385 struct trace_kprobe *tk; 386 387 list_for_each_entry(tk, &probe_list, list) 388 if (strcmp(trace_event_name(&tk->tp.call), event) == 0 && 389 strcmp(tk->tp.call.class->system, group) == 0) 390 return tk; 391 return NULL; 392 } 393 394 /* 395 * Enable trace_probe 396 * if the file is NULL, enable "perf" handler, or enable "trace" handler. 397 */ 398 static int 399 enable_trace_kprobe(struct trace_kprobe *tk, struct trace_event_file *file) 400 { 401 int ret = 0; 402 403 if (file) { 404 struct event_file_link *link; 405 406 link = kmalloc(sizeof(*link), GFP_KERNEL); 407 if (!link) { 408 ret = -ENOMEM; 409 goto out; 410 } 411 412 link->file = file; 413 list_add_tail_rcu(&link->list, &tk->tp.files); 414 415 tk->tp.flags |= TP_FLAG_TRACE; 416 } else 417 tk->tp.flags |= TP_FLAG_PROFILE; 418 419 if (trace_probe_is_registered(&tk->tp) && !trace_kprobe_has_gone(tk)) { 420 if (trace_kprobe_is_return(tk)) 421 ret = enable_kretprobe(&tk->rp); 422 else 423 ret = enable_kprobe(&tk->rp.kp); 424 } 425 out: 426 return ret; 427 } 428 429 /* 430 * Disable trace_probe 431 * if the file is NULL, disable "perf" handler, or disable "trace" handler. 432 */ 433 static int 434 disable_trace_kprobe(struct trace_kprobe *tk, struct trace_event_file *file) 435 { 436 struct event_file_link *link = NULL; 437 int wait = 0; 438 int ret = 0; 439 440 if (file) { 441 link = find_event_file_link(&tk->tp, file); 442 if (!link) { 443 ret = -EINVAL; 444 goto out; 445 } 446 447 list_del_rcu(&link->list); 448 wait = 1; 449 if (!list_empty(&tk->tp.files)) 450 goto out; 451 452 tk->tp.flags &= ~TP_FLAG_TRACE; 453 } else 454 tk->tp.flags &= ~TP_FLAG_PROFILE; 455 456 if (!trace_probe_is_enabled(&tk->tp) && trace_probe_is_registered(&tk->tp)) { 457 if (trace_kprobe_is_return(tk)) 458 disable_kretprobe(&tk->rp); 459 else 460 disable_kprobe(&tk->rp.kp); 461 wait = 1; 462 } 463 out: 464 if (wait) { 465 /* 466 * Synchronize with kprobe_trace_func/kretprobe_trace_func 467 * to ensure disabled (all running handlers are finished). 468 * This is not only for kfree(), but also the caller, 469 * trace_remove_event_call() supposes it for releasing 470 * event_call related objects, which will be accessed in 471 * the kprobe_trace_func/kretprobe_trace_func. 472 */ 473 synchronize_sched(); 474 kfree(link); /* Ignored if link == NULL */ 475 } 476 477 return ret; 478 } 479 480 /* Internal register function - just handle k*probes and flags */ 481 static int __register_trace_kprobe(struct trace_kprobe *tk) 482 { 483 int i, ret; 484 485 if (trace_probe_is_registered(&tk->tp)) 486 return -EINVAL; 487 488 for (i = 0; i < tk->tp.nr_args; i++) 489 traceprobe_update_arg(&tk->tp.args[i]); 490 491 /* Set/clear disabled flag according to tp->flag */ 492 if (trace_probe_is_enabled(&tk->tp)) 493 tk->rp.kp.flags &= ~KPROBE_FLAG_DISABLED; 494 else 495 tk->rp.kp.flags |= KPROBE_FLAG_DISABLED; 496 497 if (trace_kprobe_is_return(tk)) 498 ret = register_kretprobe(&tk->rp); 499 else 500 ret = register_kprobe(&tk->rp.kp); 501 502 if (ret == 0) 503 tk->tp.flags |= TP_FLAG_REGISTERED; 504 else { 505 pr_warn("Could not insert probe at %s+%lu: %d\n", 506 trace_kprobe_symbol(tk), trace_kprobe_offset(tk), ret); 507 if (ret == -ENOENT && trace_kprobe_is_on_module(tk)) { 508 pr_warn("This probe might be able to register after target module is loaded. Continue.\n"); 509 ret = 0; 510 } else if (ret == -EILSEQ) { 511 pr_warn("Probing address(0x%p) is not an instruction boundary.\n", 512 tk->rp.kp.addr); 513 ret = -EINVAL; 514 } 515 } 516 517 return ret; 518 } 519 520 /* Internal unregister function - just handle k*probes and flags */ 521 static void __unregister_trace_kprobe(struct trace_kprobe *tk) 522 { 523 if (trace_probe_is_registered(&tk->tp)) { 524 if (trace_kprobe_is_return(tk)) 525 unregister_kretprobe(&tk->rp); 526 else 527 unregister_kprobe(&tk->rp.kp); 528 tk->tp.flags &= ~TP_FLAG_REGISTERED; 529 /* Cleanup kprobe for reuse */ 530 if (tk->rp.kp.symbol_name) 531 tk->rp.kp.addr = NULL; 532 } 533 } 534 535 /* Unregister a trace_probe and probe_event: call with locking probe_lock */ 536 static int unregister_trace_kprobe(struct trace_kprobe *tk) 537 { 538 /* Enabled event can not be unregistered */ 539 if (trace_probe_is_enabled(&tk->tp)) 540 return -EBUSY; 541 542 /* Will fail if probe is being used by ftrace or perf */ 543 if (unregister_kprobe_event(tk)) 544 return -EBUSY; 545 546 __unregister_trace_kprobe(tk); 547 list_del(&tk->list); 548 549 return 0; 550 } 551 552 /* Register a trace_probe and probe_event */ 553 static int register_trace_kprobe(struct trace_kprobe *tk) 554 { 555 struct trace_kprobe *old_tk; 556 int ret; 557 558 mutex_lock(&probe_lock); 559 560 /* Delete old (same name) event if exist */ 561 old_tk = find_trace_kprobe(trace_event_name(&tk->tp.call), 562 tk->tp.call.class->system); 563 if (old_tk) { 564 ret = unregister_trace_kprobe(old_tk); 565 if (ret < 0) 566 goto end; 567 free_trace_kprobe(old_tk); 568 } 569 570 /* Register new event */ 571 ret = register_kprobe_event(tk); 572 if (ret) { 573 pr_warn("Failed to register probe event(%d)\n", ret); 574 goto end; 575 } 576 577 /* Register k*probe */ 578 ret = __register_trace_kprobe(tk); 579 if (ret < 0) 580 unregister_kprobe_event(tk); 581 else 582 list_add_tail(&tk->list, &probe_list); 583 584 end: 585 mutex_unlock(&probe_lock); 586 return ret; 587 } 588 589 /* Module notifier call back, checking event on the module */ 590 static int trace_kprobe_module_callback(struct notifier_block *nb, 591 unsigned long val, void *data) 592 { 593 struct module *mod = data; 594 struct trace_kprobe *tk; 595 int ret; 596 597 if (val != MODULE_STATE_COMING) 598 return NOTIFY_DONE; 599 600 /* Update probes on coming module */ 601 mutex_lock(&probe_lock); 602 list_for_each_entry(tk, &probe_list, list) { 603 if (trace_kprobe_within_module(tk, mod)) { 604 /* Don't need to check busy - this should have gone. */ 605 __unregister_trace_kprobe(tk); 606 ret = __register_trace_kprobe(tk); 607 if (ret) 608 pr_warn("Failed to re-register probe %s on %s: %d\n", 609 trace_event_name(&tk->tp.call), 610 mod->name, ret); 611 } 612 } 613 mutex_unlock(&probe_lock); 614 615 return NOTIFY_DONE; 616 } 617 618 static struct notifier_block trace_kprobe_module_nb = { 619 .notifier_call = trace_kprobe_module_callback, 620 .priority = 1 /* Invoked after kprobe module callback */ 621 }; 622 623 /* Convert certain expected symbols into '_' when generating event names */ 624 static inline void sanitize_event_name(char *name) 625 { 626 while (*name++ != '\0') 627 if (*name == ':' || *name == '.') 628 *name = '_'; 629 } 630 631 static int create_trace_kprobe(int argc, char **argv) 632 { 633 /* 634 * Argument syntax: 635 * - Add kprobe: 636 * p[:[GRP/]EVENT] [MOD:]KSYM[+OFFS]|KADDR [FETCHARGS] 637 * - Add kretprobe: 638 * r[MAXACTIVE][:[GRP/]EVENT] [MOD:]KSYM[+0] [FETCHARGS] 639 * Fetch args: 640 * $retval : fetch return value 641 * $stack : fetch stack address 642 * $stackN : fetch Nth of stack (N:0-) 643 * $comm : fetch current task comm 644 * @ADDR : fetch memory at ADDR (ADDR should be in kernel) 645 * @SYM[+|-offs] : fetch memory at SYM +|- offs (SYM is a data symbol) 646 * %REG : fetch register REG 647 * Dereferencing memory fetch: 648 * +|-offs(ARG) : fetch memory at ARG +|- offs address. 649 * Alias name of args: 650 * NAME=FETCHARG : set NAME as alias of FETCHARG. 651 * Type of args: 652 * FETCHARG:TYPE : use TYPE instead of unsigned long. 653 */ 654 struct trace_kprobe *tk; 655 int i, ret = 0; 656 bool is_return = false, is_delete = false; 657 char *symbol = NULL, *event = NULL, *group = NULL; 658 int maxactive = 0; 659 char *arg; 660 unsigned long offset = 0; 661 void *addr = NULL; 662 char buf[MAX_EVENT_NAME_LEN]; 663 664 /* argc must be >= 1 */ 665 if (argv[0][0] == 'p') 666 is_return = false; 667 else if (argv[0][0] == 'r') 668 is_return = true; 669 else if (argv[0][0] == '-') 670 is_delete = true; 671 else { 672 pr_info("Probe definition must be started with 'p', 'r' or" 673 " '-'.\n"); 674 return -EINVAL; 675 } 676 677 event = strchr(&argv[0][1], ':'); 678 if (event) { 679 event[0] = '\0'; 680 event++; 681 } 682 if (is_return && isdigit(argv[0][1])) { 683 ret = kstrtouint(&argv[0][1], 0, &maxactive); 684 if (ret) { 685 pr_info("Failed to parse maxactive.\n"); 686 return ret; 687 } 688 /* kretprobes instances are iterated over via a list. The 689 * maximum should stay reasonable. 690 */ 691 if (maxactive > KRETPROBE_MAXACTIVE_MAX) { 692 pr_info("Maxactive is too big (%d > %d).\n", 693 maxactive, KRETPROBE_MAXACTIVE_MAX); 694 return -E2BIG; 695 } 696 } 697 698 if (event) { 699 if (strchr(event, '/')) { 700 group = event; 701 event = strchr(group, '/') + 1; 702 event[-1] = '\0'; 703 if (strlen(group) == 0) { 704 pr_info("Group name is not specified\n"); 705 return -EINVAL; 706 } 707 } 708 if (strlen(event) == 0) { 709 pr_info("Event name is not specified\n"); 710 return -EINVAL; 711 } 712 } 713 if (!group) 714 group = KPROBE_EVENT_SYSTEM; 715 716 if (is_delete) { 717 if (!event) { 718 pr_info("Delete command needs an event name.\n"); 719 return -EINVAL; 720 } 721 mutex_lock(&probe_lock); 722 tk = find_trace_kprobe(event, group); 723 if (!tk) { 724 mutex_unlock(&probe_lock); 725 pr_info("Event %s/%s doesn't exist.\n", group, event); 726 return -ENOENT; 727 } 728 /* delete an event */ 729 ret = unregister_trace_kprobe(tk); 730 if (ret == 0) 731 free_trace_kprobe(tk); 732 mutex_unlock(&probe_lock); 733 return ret; 734 } 735 736 if (argc < 2) { 737 pr_info("Probe point is not specified.\n"); 738 return -EINVAL; 739 } 740 741 /* try to parse an address. if that fails, try to read the 742 * input as a symbol. */ 743 if (kstrtoul(argv[1], 0, (unsigned long *)&addr)) { 744 /* a symbol specified */ 745 symbol = argv[1]; 746 /* TODO: support .init module functions */ 747 ret = traceprobe_split_symbol_offset(symbol, &offset); 748 if (ret) { 749 pr_info("Failed to parse either an address or a symbol.\n"); 750 return ret; 751 } 752 if (offset && is_return && 753 !kprobe_on_func_entry(NULL, symbol, offset)) { 754 pr_info("Given offset is not valid for return probe.\n"); 755 return -EINVAL; 756 } 757 } 758 argc -= 2; argv += 2; 759 760 /* setup a probe */ 761 if (!event) { 762 /* Make a new event name */ 763 if (symbol) 764 snprintf(buf, MAX_EVENT_NAME_LEN, "%c_%s_%ld", 765 is_return ? 'r' : 'p', symbol, offset); 766 else 767 snprintf(buf, MAX_EVENT_NAME_LEN, "%c_0x%p", 768 is_return ? 'r' : 'p', addr); 769 sanitize_event_name(buf); 770 event = buf; 771 } 772 tk = alloc_trace_kprobe(group, event, addr, symbol, offset, maxactive, 773 argc, is_return); 774 if (IS_ERR(tk)) { 775 pr_info("Failed to allocate trace_probe.(%d)\n", 776 (int)PTR_ERR(tk)); 777 return PTR_ERR(tk); 778 } 779 780 /* parse arguments */ 781 ret = 0; 782 for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) { 783 struct probe_arg *parg = &tk->tp.args[i]; 784 785 /* Increment count for freeing args in error case */ 786 tk->tp.nr_args++; 787 788 /* Parse argument name */ 789 arg = strchr(argv[i], '='); 790 if (arg) { 791 *arg++ = '\0'; 792 parg->name = kstrdup(argv[i], GFP_KERNEL); 793 } else { 794 arg = argv[i]; 795 /* If argument name is omitted, set "argN" */ 796 snprintf(buf, MAX_EVENT_NAME_LEN, "arg%d", i + 1); 797 parg->name = kstrdup(buf, GFP_KERNEL); 798 } 799 800 if (!parg->name) { 801 pr_info("Failed to allocate argument[%d] name.\n", i); 802 ret = -ENOMEM; 803 goto error; 804 } 805 806 if (!is_good_name(parg->name)) { 807 pr_info("Invalid argument[%d] name: %s\n", 808 i, parg->name); 809 ret = -EINVAL; 810 goto error; 811 } 812 813 if (traceprobe_conflict_field_name(parg->name, 814 tk->tp.args, i)) { 815 pr_info("Argument[%d] name '%s' conflicts with " 816 "another field.\n", i, argv[i]); 817 ret = -EINVAL; 818 goto error; 819 } 820 821 /* Parse fetch argument */ 822 ret = traceprobe_parse_probe_arg(arg, &tk->tp.size, parg, 823 is_return, true, 824 kprobes_fetch_type_table); 825 if (ret) { 826 pr_info("Parse error at argument[%d]. (%d)\n", i, ret); 827 goto error; 828 } 829 } 830 831 ret = register_trace_kprobe(tk); 832 if (ret) 833 goto error; 834 return 0; 835 836 error: 837 free_trace_kprobe(tk); 838 return ret; 839 } 840 841 static int release_all_trace_kprobes(void) 842 { 843 struct trace_kprobe *tk; 844 int ret = 0; 845 846 mutex_lock(&probe_lock); 847 /* Ensure no probe is in use. */ 848 list_for_each_entry(tk, &probe_list, list) 849 if (trace_probe_is_enabled(&tk->tp)) { 850 ret = -EBUSY; 851 goto end; 852 } 853 /* TODO: Use batch unregistration */ 854 while (!list_empty(&probe_list)) { 855 tk = list_entry(probe_list.next, struct trace_kprobe, list); 856 ret = unregister_trace_kprobe(tk); 857 if (ret) 858 goto end; 859 free_trace_kprobe(tk); 860 } 861 862 end: 863 mutex_unlock(&probe_lock); 864 865 return ret; 866 } 867 868 /* Probes listing interfaces */ 869 static void *probes_seq_start(struct seq_file *m, loff_t *pos) 870 { 871 mutex_lock(&probe_lock); 872 return seq_list_start(&probe_list, *pos); 873 } 874 875 static void *probes_seq_next(struct seq_file *m, void *v, loff_t *pos) 876 { 877 return seq_list_next(v, &probe_list, pos); 878 } 879 880 static void probes_seq_stop(struct seq_file *m, void *v) 881 { 882 mutex_unlock(&probe_lock); 883 } 884 885 static int probes_seq_show(struct seq_file *m, void *v) 886 { 887 struct trace_kprobe *tk = v; 888 int i; 889 890 seq_putc(m, trace_kprobe_is_return(tk) ? 'r' : 'p'); 891 seq_printf(m, ":%s/%s", tk->tp.call.class->system, 892 trace_event_name(&tk->tp.call)); 893 894 if (!tk->symbol) 895 seq_printf(m, " 0x%p", tk->rp.kp.addr); 896 else if (tk->rp.kp.offset) 897 seq_printf(m, " %s+%u", trace_kprobe_symbol(tk), 898 tk->rp.kp.offset); 899 else 900 seq_printf(m, " %s", trace_kprobe_symbol(tk)); 901 902 for (i = 0; i < tk->tp.nr_args; i++) 903 seq_printf(m, " %s=%s", tk->tp.args[i].name, tk->tp.args[i].comm); 904 seq_putc(m, '\n'); 905 906 return 0; 907 } 908 909 static const struct seq_operations probes_seq_op = { 910 .start = probes_seq_start, 911 .next = probes_seq_next, 912 .stop = probes_seq_stop, 913 .show = probes_seq_show 914 }; 915 916 static int probes_open(struct inode *inode, struct file *file) 917 { 918 int ret; 919 920 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) { 921 ret = release_all_trace_kprobes(); 922 if (ret < 0) 923 return ret; 924 } 925 926 return seq_open(file, &probes_seq_op); 927 } 928 929 static ssize_t probes_write(struct file *file, const char __user *buffer, 930 size_t count, loff_t *ppos) 931 { 932 return trace_parse_run_command(file, buffer, count, ppos, 933 create_trace_kprobe); 934 } 935 936 static const struct file_operations kprobe_events_ops = { 937 .owner = THIS_MODULE, 938 .open = probes_open, 939 .read = seq_read, 940 .llseek = seq_lseek, 941 .release = seq_release, 942 .write = probes_write, 943 }; 944 945 /* Probes profiling interfaces */ 946 static int probes_profile_seq_show(struct seq_file *m, void *v) 947 { 948 struct trace_kprobe *tk = v; 949 950 seq_printf(m, " %-44s %15lu %15lu\n", 951 trace_event_name(&tk->tp.call), 952 trace_kprobe_nhit(tk), 953 tk->rp.kp.nmissed); 954 955 return 0; 956 } 957 958 static const struct seq_operations profile_seq_op = { 959 .start = probes_seq_start, 960 .next = probes_seq_next, 961 .stop = probes_seq_stop, 962 .show = probes_profile_seq_show 963 }; 964 965 static int profile_open(struct inode *inode, struct file *file) 966 { 967 return seq_open(file, &profile_seq_op); 968 } 969 970 static const struct file_operations kprobe_profile_ops = { 971 .owner = THIS_MODULE, 972 .open = profile_open, 973 .read = seq_read, 974 .llseek = seq_lseek, 975 .release = seq_release, 976 }; 977 978 /* Kprobe handler */ 979 static nokprobe_inline void 980 __kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs, 981 struct trace_event_file *trace_file) 982 { 983 struct kprobe_trace_entry_head *entry; 984 struct ring_buffer_event *event; 985 struct ring_buffer *buffer; 986 int size, dsize, pc; 987 unsigned long irq_flags; 988 struct trace_event_call *call = &tk->tp.call; 989 990 WARN_ON(call != trace_file->event_call); 991 992 if (trace_trigger_soft_disabled(trace_file)) 993 return; 994 995 local_save_flags(irq_flags); 996 pc = preempt_count(); 997 998 dsize = __get_data_size(&tk->tp, regs); 999 size = sizeof(*entry) + tk->tp.size + dsize; 1000 1001 event = trace_event_buffer_lock_reserve(&buffer, trace_file, 1002 call->event.type, 1003 size, irq_flags, pc); 1004 if (!event) 1005 return; 1006 1007 entry = ring_buffer_event_data(event); 1008 entry->ip = (unsigned long)tk->rp.kp.addr; 1009 store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize); 1010 1011 event_trigger_unlock_commit_regs(trace_file, buffer, event, 1012 entry, irq_flags, pc, regs); 1013 } 1014 1015 static void 1016 kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs) 1017 { 1018 struct event_file_link *link; 1019 1020 list_for_each_entry_rcu(link, &tk->tp.files, list) 1021 __kprobe_trace_func(tk, regs, link->file); 1022 } 1023 NOKPROBE_SYMBOL(kprobe_trace_func); 1024 1025 /* Kretprobe handler */ 1026 static nokprobe_inline void 1027 __kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri, 1028 struct pt_regs *regs, 1029 struct trace_event_file *trace_file) 1030 { 1031 struct kretprobe_trace_entry_head *entry; 1032 struct ring_buffer_event *event; 1033 struct ring_buffer *buffer; 1034 int size, pc, dsize; 1035 unsigned long irq_flags; 1036 struct trace_event_call *call = &tk->tp.call; 1037 1038 WARN_ON(call != trace_file->event_call); 1039 1040 if (trace_trigger_soft_disabled(trace_file)) 1041 return; 1042 1043 local_save_flags(irq_flags); 1044 pc = preempt_count(); 1045 1046 dsize = __get_data_size(&tk->tp, regs); 1047 size = sizeof(*entry) + tk->tp.size + dsize; 1048 1049 event = trace_event_buffer_lock_reserve(&buffer, trace_file, 1050 call->event.type, 1051 size, irq_flags, pc); 1052 if (!event) 1053 return; 1054 1055 entry = ring_buffer_event_data(event); 1056 entry->func = (unsigned long)tk->rp.kp.addr; 1057 entry->ret_ip = (unsigned long)ri->ret_addr; 1058 store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize); 1059 1060 event_trigger_unlock_commit_regs(trace_file, buffer, event, 1061 entry, irq_flags, pc, regs); 1062 } 1063 1064 static void 1065 kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri, 1066 struct pt_regs *regs) 1067 { 1068 struct event_file_link *link; 1069 1070 list_for_each_entry_rcu(link, &tk->tp.files, list) 1071 __kretprobe_trace_func(tk, ri, regs, link->file); 1072 } 1073 NOKPROBE_SYMBOL(kretprobe_trace_func); 1074 1075 /* Event entry printers */ 1076 static enum print_line_t 1077 print_kprobe_event(struct trace_iterator *iter, int flags, 1078 struct trace_event *event) 1079 { 1080 struct kprobe_trace_entry_head *field; 1081 struct trace_seq *s = &iter->seq; 1082 struct trace_probe *tp; 1083 u8 *data; 1084 int i; 1085 1086 field = (struct kprobe_trace_entry_head *)iter->ent; 1087 tp = container_of(event, struct trace_probe, call.event); 1088 1089 trace_seq_printf(s, "%s: (", trace_event_name(&tp->call)); 1090 1091 if (!seq_print_ip_sym(s, field->ip, flags | TRACE_ITER_SYM_OFFSET)) 1092 goto out; 1093 1094 trace_seq_putc(s, ')'); 1095 1096 data = (u8 *)&field[1]; 1097 for (i = 0; i < tp->nr_args; i++) 1098 if (!tp->args[i].type->print(s, tp->args[i].name, 1099 data + tp->args[i].offset, field)) 1100 goto out; 1101 1102 trace_seq_putc(s, '\n'); 1103 out: 1104 return trace_handle_return(s); 1105 } 1106 1107 static enum print_line_t 1108 print_kretprobe_event(struct trace_iterator *iter, int flags, 1109 struct trace_event *event) 1110 { 1111 struct kretprobe_trace_entry_head *field; 1112 struct trace_seq *s = &iter->seq; 1113 struct trace_probe *tp; 1114 u8 *data; 1115 int i; 1116 1117 field = (struct kretprobe_trace_entry_head *)iter->ent; 1118 tp = container_of(event, struct trace_probe, call.event); 1119 1120 trace_seq_printf(s, "%s: (", trace_event_name(&tp->call)); 1121 1122 if (!seq_print_ip_sym(s, field->ret_ip, flags | TRACE_ITER_SYM_OFFSET)) 1123 goto out; 1124 1125 trace_seq_puts(s, " <- "); 1126 1127 if (!seq_print_ip_sym(s, field->func, flags & ~TRACE_ITER_SYM_OFFSET)) 1128 goto out; 1129 1130 trace_seq_putc(s, ')'); 1131 1132 data = (u8 *)&field[1]; 1133 for (i = 0; i < tp->nr_args; i++) 1134 if (!tp->args[i].type->print(s, tp->args[i].name, 1135 data + tp->args[i].offset, field)) 1136 goto out; 1137 1138 trace_seq_putc(s, '\n'); 1139 1140 out: 1141 return trace_handle_return(s); 1142 } 1143 1144 1145 static int kprobe_event_define_fields(struct trace_event_call *event_call) 1146 { 1147 int ret, i; 1148 struct kprobe_trace_entry_head field; 1149 struct trace_kprobe *tk = (struct trace_kprobe *)event_call->data; 1150 1151 DEFINE_FIELD(unsigned long, ip, FIELD_STRING_IP, 0); 1152 /* Set argument names as fields */ 1153 for (i = 0; i < tk->tp.nr_args; i++) { 1154 struct probe_arg *parg = &tk->tp.args[i]; 1155 1156 ret = trace_define_field(event_call, parg->type->fmttype, 1157 parg->name, 1158 sizeof(field) + parg->offset, 1159 parg->type->size, 1160 parg->type->is_signed, 1161 FILTER_OTHER); 1162 if (ret) 1163 return ret; 1164 } 1165 return 0; 1166 } 1167 1168 static int kretprobe_event_define_fields(struct trace_event_call *event_call) 1169 { 1170 int ret, i; 1171 struct kretprobe_trace_entry_head field; 1172 struct trace_kprobe *tk = (struct trace_kprobe *)event_call->data; 1173 1174 DEFINE_FIELD(unsigned long, func, FIELD_STRING_FUNC, 0); 1175 DEFINE_FIELD(unsigned long, ret_ip, FIELD_STRING_RETIP, 0); 1176 /* Set argument names as fields */ 1177 for (i = 0; i < tk->tp.nr_args; i++) { 1178 struct probe_arg *parg = &tk->tp.args[i]; 1179 1180 ret = trace_define_field(event_call, parg->type->fmttype, 1181 parg->name, 1182 sizeof(field) + parg->offset, 1183 parg->type->size, 1184 parg->type->is_signed, 1185 FILTER_OTHER); 1186 if (ret) 1187 return ret; 1188 } 1189 return 0; 1190 } 1191 1192 #ifdef CONFIG_PERF_EVENTS 1193 1194 /* Kprobe profile handler */ 1195 static int 1196 kprobe_perf_func(struct trace_kprobe *tk, struct pt_regs *regs) 1197 { 1198 struct trace_event_call *call = &tk->tp.call; 1199 struct kprobe_trace_entry_head *entry; 1200 struct hlist_head *head; 1201 int size, __size, dsize; 1202 int rctx; 1203 1204 if (bpf_prog_array_valid(call)) { 1205 int ret; 1206 1207 ret = trace_call_bpf(call, regs); 1208 1209 /* 1210 * We need to check and see if we modified the pc of the 1211 * pt_regs, and if so clear the kprobe and return 1 so that we 1212 * don't do the instruction skipping. Also reset our state so 1213 * we are clean the next pass through. 1214 */ 1215 if (__this_cpu_read(bpf_kprobe_override)) { 1216 __this_cpu_write(bpf_kprobe_override, 0); 1217 reset_current_kprobe(); 1218 return 1; 1219 } 1220 if (!ret) 1221 return 0; 1222 } 1223 1224 head = this_cpu_ptr(call->perf_events); 1225 if (hlist_empty(head)) 1226 return 0; 1227 1228 dsize = __get_data_size(&tk->tp, regs); 1229 __size = sizeof(*entry) + tk->tp.size + dsize; 1230 size = ALIGN(__size + sizeof(u32), sizeof(u64)); 1231 size -= sizeof(u32); 1232 1233 entry = perf_trace_buf_alloc(size, NULL, &rctx); 1234 if (!entry) 1235 return 0; 1236 1237 entry->ip = (unsigned long)tk->rp.kp.addr; 1238 memset(&entry[1], 0, dsize); 1239 store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize); 1240 perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs, 1241 head, NULL); 1242 return 0; 1243 } 1244 NOKPROBE_SYMBOL(kprobe_perf_func); 1245 1246 /* Kretprobe profile handler */ 1247 static void 1248 kretprobe_perf_func(struct trace_kprobe *tk, struct kretprobe_instance *ri, 1249 struct pt_regs *regs) 1250 { 1251 struct trace_event_call *call = &tk->tp.call; 1252 struct kretprobe_trace_entry_head *entry; 1253 struct hlist_head *head; 1254 int size, __size, dsize; 1255 int rctx; 1256 1257 if (bpf_prog_array_valid(call) && !trace_call_bpf(call, regs)) 1258 return; 1259 1260 head = this_cpu_ptr(call->perf_events); 1261 if (hlist_empty(head)) 1262 return; 1263 1264 dsize = __get_data_size(&tk->tp, regs); 1265 __size = sizeof(*entry) + tk->tp.size + dsize; 1266 size = ALIGN(__size + sizeof(u32), sizeof(u64)); 1267 size -= sizeof(u32); 1268 1269 entry = perf_trace_buf_alloc(size, NULL, &rctx); 1270 if (!entry) 1271 return; 1272 1273 entry->func = (unsigned long)tk->rp.kp.addr; 1274 entry->ret_ip = (unsigned long)ri->ret_addr; 1275 store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize); 1276 perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs, 1277 head, NULL); 1278 } 1279 NOKPROBE_SYMBOL(kretprobe_perf_func); 1280 #endif /* CONFIG_PERF_EVENTS */ 1281 1282 /* 1283 * called by perf_trace_init() or __ftrace_set_clr_event() under event_mutex. 1284 * 1285 * kprobe_trace_self_tests_init() does enable_trace_probe/disable_trace_probe 1286 * lockless, but we can't race with this __init function. 1287 */ 1288 static int kprobe_register(struct trace_event_call *event, 1289 enum trace_reg type, void *data) 1290 { 1291 struct trace_kprobe *tk = (struct trace_kprobe *)event->data; 1292 struct trace_event_file *file = data; 1293 1294 switch (type) { 1295 case TRACE_REG_REGISTER: 1296 return enable_trace_kprobe(tk, file); 1297 case TRACE_REG_UNREGISTER: 1298 return disable_trace_kprobe(tk, file); 1299 1300 #ifdef CONFIG_PERF_EVENTS 1301 case TRACE_REG_PERF_REGISTER: 1302 return enable_trace_kprobe(tk, NULL); 1303 case TRACE_REG_PERF_UNREGISTER: 1304 return disable_trace_kprobe(tk, NULL); 1305 case TRACE_REG_PERF_OPEN: 1306 case TRACE_REG_PERF_CLOSE: 1307 case TRACE_REG_PERF_ADD: 1308 case TRACE_REG_PERF_DEL: 1309 return 0; 1310 #endif 1311 } 1312 return 0; 1313 } 1314 1315 static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs) 1316 { 1317 struct trace_kprobe *tk = container_of(kp, struct trace_kprobe, rp.kp); 1318 int ret = 0; 1319 1320 raw_cpu_inc(*tk->nhit); 1321 1322 if (tk->tp.flags & TP_FLAG_TRACE) 1323 kprobe_trace_func(tk, regs); 1324 #ifdef CONFIG_PERF_EVENTS 1325 if (tk->tp.flags & TP_FLAG_PROFILE) { 1326 ret = kprobe_perf_func(tk, regs); 1327 /* 1328 * The ftrace kprobe handler leaves it up to us to re-enable 1329 * preemption here before returning if we've modified the ip. 1330 */ 1331 if (ret) 1332 preempt_enable_no_resched(); 1333 } 1334 #endif 1335 return ret; 1336 } 1337 NOKPROBE_SYMBOL(kprobe_dispatcher); 1338 1339 static int 1340 kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs) 1341 { 1342 struct trace_kprobe *tk = container_of(ri->rp, struct trace_kprobe, rp); 1343 1344 raw_cpu_inc(*tk->nhit); 1345 1346 if (tk->tp.flags & TP_FLAG_TRACE) 1347 kretprobe_trace_func(tk, ri, regs); 1348 #ifdef CONFIG_PERF_EVENTS 1349 if (tk->tp.flags & TP_FLAG_PROFILE) 1350 kretprobe_perf_func(tk, ri, regs); 1351 #endif 1352 return 0; /* We don't tweek kernel, so just return 0 */ 1353 } 1354 NOKPROBE_SYMBOL(kretprobe_dispatcher); 1355 1356 static struct trace_event_functions kretprobe_funcs = { 1357 .trace = print_kretprobe_event 1358 }; 1359 1360 static struct trace_event_functions kprobe_funcs = { 1361 .trace = print_kprobe_event 1362 }; 1363 1364 static int register_kprobe_event(struct trace_kprobe *tk) 1365 { 1366 struct trace_event_call *call = &tk->tp.call; 1367 int ret; 1368 1369 /* Initialize trace_event_call */ 1370 INIT_LIST_HEAD(&call->class->fields); 1371 if (trace_kprobe_is_return(tk)) { 1372 call->event.funcs = &kretprobe_funcs; 1373 call->class->define_fields = kretprobe_event_define_fields; 1374 } else { 1375 call->event.funcs = &kprobe_funcs; 1376 call->class->define_fields = kprobe_event_define_fields; 1377 } 1378 if (set_print_fmt(&tk->tp, trace_kprobe_is_return(tk)) < 0) 1379 return -ENOMEM; 1380 ret = register_trace_event(&call->event); 1381 if (!ret) { 1382 kfree(call->print_fmt); 1383 return -ENODEV; 1384 } 1385 call->flags = TRACE_EVENT_FL_KPROBE; 1386 call->class->reg = kprobe_register; 1387 call->data = tk; 1388 ret = trace_add_event_call(call); 1389 if (ret) { 1390 pr_info("Failed to register kprobe event: %s\n", 1391 trace_event_name(call)); 1392 kfree(call->print_fmt); 1393 unregister_trace_event(&call->event); 1394 } 1395 return ret; 1396 } 1397 1398 static int unregister_kprobe_event(struct trace_kprobe *tk) 1399 { 1400 int ret; 1401 1402 /* tp->event is unregistered in trace_remove_event_call() */ 1403 ret = trace_remove_event_call(&tk->tp.call); 1404 if (!ret) 1405 kfree(tk->tp.call.print_fmt); 1406 return ret; 1407 } 1408 1409 /* Make a tracefs interface for controlling probe points */ 1410 static __init int init_kprobe_trace(void) 1411 { 1412 struct dentry *d_tracer; 1413 struct dentry *entry; 1414 1415 if (register_module_notifier(&trace_kprobe_module_nb)) 1416 return -EINVAL; 1417 1418 d_tracer = tracing_init_dentry(); 1419 if (IS_ERR(d_tracer)) 1420 return 0; 1421 1422 entry = tracefs_create_file("kprobe_events", 0644, d_tracer, 1423 NULL, &kprobe_events_ops); 1424 1425 /* Event list interface */ 1426 if (!entry) 1427 pr_warn("Could not create tracefs 'kprobe_events' entry\n"); 1428 1429 /* Profile interface */ 1430 entry = tracefs_create_file("kprobe_profile", 0444, d_tracer, 1431 NULL, &kprobe_profile_ops); 1432 1433 if (!entry) 1434 pr_warn("Could not create tracefs 'kprobe_profile' entry\n"); 1435 return 0; 1436 } 1437 fs_initcall(init_kprobe_trace); 1438 1439 1440 #ifdef CONFIG_FTRACE_STARTUP_TEST 1441 /* 1442 * The "__used" keeps gcc from removing the function symbol 1443 * from the kallsyms table. 'noinline' makes sure that there 1444 * isn't an inlined version used by the test method below 1445 */ 1446 static __used __init noinline int 1447 kprobe_trace_selftest_target(int a1, int a2, int a3, int a4, int a5, int a6) 1448 { 1449 return a1 + a2 + a3 + a4 + a5 + a6; 1450 } 1451 1452 static __init struct trace_event_file * 1453 find_trace_probe_file(struct trace_kprobe *tk, struct trace_array *tr) 1454 { 1455 struct trace_event_file *file; 1456 1457 list_for_each_entry(file, &tr->events, list) 1458 if (file->event_call == &tk->tp.call) 1459 return file; 1460 1461 return NULL; 1462 } 1463 1464 /* 1465 * Nobody but us can call enable_trace_kprobe/disable_trace_kprobe at this 1466 * stage, we can do this lockless. 1467 */ 1468 static __init int kprobe_trace_self_tests_init(void) 1469 { 1470 int ret, warn = 0; 1471 int (*target)(int, int, int, int, int, int); 1472 struct trace_kprobe *tk; 1473 struct trace_event_file *file; 1474 1475 if (tracing_is_disabled()) 1476 return -ENODEV; 1477 1478 target = kprobe_trace_selftest_target; 1479 1480 pr_info("Testing kprobe tracing: "); 1481 1482 ret = trace_run_command("p:testprobe kprobe_trace_selftest_target " 1483 "$stack $stack0 +0($stack)", 1484 create_trace_kprobe); 1485 if (WARN_ON_ONCE(ret)) { 1486 pr_warn("error on probing function entry.\n"); 1487 warn++; 1488 } else { 1489 /* Enable trace point */ 1490 tk = find_trace_kprobe("testprobe", KPROBE_EVENT_SYSTEM); 1491 if (WARN_ON_ONCE(tk == NULL)) { 1492 pr_warn("error on getting new probe.\n"); 1493 warn++; 1494 } else { 1495 file = find_trace_probe_file(tk, top_trace_array()); 1496 if (WARN_ON_ONCE(file == NULL)) { 1497 pr_warn("error on getting probe file.\n"); 1498 warn++; 1499 } else 1500 enable_trace_kprobe(tk, file); 1501 } 1502 } 1503 1504 ret = trace_run_command("r:testprobe2 kprobe_trace_selftest_target " 1505 "$retval", create_trace_kprobe); 1506 if (WARN_ON_ONCE(ret)) { 1507 pr_warn("error on probing function return.\n"); 1508 warn++; 1509 } else { 1510 /* Enable trace point */ 1511 tk = find_trace_kprobe("testprobe2", KPROBE_EVENT_SYSTEM); 1512 if (WARN_ON_ONCE(tk == NULL)) { 1513 pr_warn("error on getting 2nd new probe.\n"); 1514 warn++; 1515 } else { 1516 file = find_trace_probe_file(tk, top_trace_array()); 1517 if (WARN_ON_ONCE(file == NULL)) { 1518 pr_warn("error on getting probe file.\n"); 1519 warn++; 1520 } else 1521 enable_trace_kprobe(tk, file); 1522 } 1523 } 1524 1525 if (warn) 1526 goto end; 1527 1528 ret = target(1, 2, 3, 4, 5, 6); 1529 1530 /* 1531 * Not expecting an error here, the check is only to prevent the 1532 * optimizer from removing the call to target() as otherwise there 1533 * are no side-effects and the call is never performed. 1534 */ 1535 if (ret != 21) 1536 warn++; 1537 1538 /* Disable trace points before removing it */ 1539 tk = find_trace_kprobe("testprobe", KPROBE_EVENT_SYSTEM); 1540 if (WARN_ON_ONCE(tk == NULL)) { 1541 pr_warn("error on getting test probe.\n"); 1542 warn++; 1543 } else { 1544 if (trace_kprobe_nhit(tk) != 1) { 1545 pr_warn("incorrect number of testprobe hits\n"); 1546 warn++; 1547 } 1548 1549 file = find_trace_probe_file(tk, top_trace_array()); 1550 if (WARN_ON_ONCE(file == NULL)) { 1551 pr_warn("error on getting probe file.\n"); 1552 warn++; 1553 } else 1554 disable_trace_kprobe(tk, file); 1555 } 1556 1557 tk = find_trace_kprobe("testprobe2", KPROBE_EVENT_SYSTEM); 1558 if (WARN_ON_ONCE(tk == NULL)) { 1559 pr_warn("error on getting 2nd test probe.\n"); 1560 warn++; 1561 } else { 1562 if (trace_kprobe_nhit(tk) != 1) { 1563 pr_warn("incorrect number of testprobe2 hits\n"); 1564 warn++; 1565 } 1566 1567 file = find_trace_probe_file(tk, top_trace_array()); 1568 if (WARN_ON_ONCE(file == NULL)) { 1569 pr_warn("error on getting probe file.\n"); 1570 warn++; 1571 } else 1572 disable_trace_kprobe(tk, file); 1573 } 1574 1575 ret = trace_run_command("-:testprobe", create_trace_kprobe); 1576 if (WARN_ON_ONCE(ret)) { 1577 pr_warn("error on deleting a probe.\n"); 1578 warn++; 1579 } 1580 1581 ret = trace_run_command("-:testprobe2", create_trace_kprobe); 1582 if (WARN_ON_ONCE(ret)) { 1583 pr_warn("error on deleting a probe.\n"); 1584 warn++; 1585 } 1586 1587 end: 1588 release_all_trace_kprobes(); 1589 /* 1590 * Wait for the optimizer work to finish. Otherwise it might fiddle 1591 * with probes in already freed __init text. 1592 */ 1593 wait_for_kprobe_optimizer(); 1594 if (warn) 1595 pr_cont("NG: Some tests are failed. Please check them.\n"); 1596 else 1597 pr_cont("OK\n"); 1598 return 0; 1599 } 1600 1601 late_initcall(kprobe_trace_self_tests_init); 1602 1603 #endif 1604