1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Fprobe-based tracing events 4 * Copyright (C) 2022 Google LLC. 5 */ 6 #define pr_fmt(fmt) "trace_fprobe: " fmt 7 8 #include <linux/fprobe.h> 9 #include <linux/list.h> 10 #include <linux/module.h> 11 #include <linux/mutex.h> 12 #include <linux/rculist.h> 13 #include <linux/security.h> 14 #include <linux/tracepoint.h> 15 #include <linux/uaccess.h> 16 17 #include <asm/ptrace.h> 18 19 #include "trace_dynevent.h" 20 #include "trace_probe.h" 21 #include "trace_probe_kernel.h" 22 #include "trace_probe_tmpl.h" 23 24 #define FPROBE_EVENT_SYSTEM "fprobes" 25 #define TRACEPOINT_EVENT_SYSTEM "tracepoints" 26 #define RETHOOK_MAXACTIVE_MAX 4096 27 28 static int trace_fprobe_create(const char *raw_command); 29 static int trace_fprobe_show(struct seq_file *m, struct dyn_event *ev); 30 static int trace_fprobe_release(struct dyn_event *ev); 31 static bool trace_fprobe_is_busy(struct dyn_event *ev); 32 static bool trace_fprobe_match(const char *system, const char *event, 33 int argc, const char **argv, struct dyn_event *ev); 34 35 static struct dyn_event_operations trace_fprobe_ops = { 36 .create = trace_fprobe_create, 37 .show = trace_fprobe_show, 38 .is_busy = trace_fprobe_is_busy, 39 .free = trace_fprobe_release, 40 .match = trace_fprobe_match, 41 }; 42 43 /* List of tracepoint_user */ 44 static LIST_HEAD(tracepoint_user_list); 45 static DEFINE_MUTEX(tracepoint_user_mutex); 46 47 /* While living tracepoint_user, @tpoint can be NULL and @refcount != 0. */ 48 struct tracepoint_user { 49 struct list_head list; 50 const char *name; 51 struct tracepoint *tpoint; 52 unsigned int refcount; 53 }; 54 55 /* NOTE: you must lock tracepoint_user_mutex. */ 56 #define for_each_tracepoint_user(tuser) \ 57 list_for_each_entry(tuser, &tracepoint_user_list, list) 58 59 static int tracepoint_user_register(struct tracepoint_user *tuser) 60 { 61 struct tracepoint *tpoint = tuser->tpoint; 62 63 if (!tpoint) 64 return 0; 65 66 return tracepoint_probe_register_prio_may_exist(tpoint, 67 tpoint->probestub, NULL, 0); 68 } 69 70 static void tracepoint_user_unregister(struct tracepoint_user *tuser) 71 { 72 if (!tuser->tpoint) 73 return; 74 75 WARN_ON_ONCE(tracepoint_probe_unregister(tuser->tpoint, tuser->tpoint->probestub, NULL)); 76 tuser->tpoint = NULL; 77 } 78 79 static unsigned long tracepoint_user_ip(struct tracepoint_user *tuser) 80 { 81 if (!tuser->tpoint) 82 return 0UL; 83 84 return (unsigned long)tuser->tpoint->probestub; 85 } 86 87 static void __tracepoint_user_free(struct tracepoint_user *tuser) 88 { 89 if (!tuser) 90 return; 91 kfree(tuser->name); 92 kfree(tuser); 93 } 94 95 DEFINE_FREE(tuser_free, struct tracepoint_user *, __tracepoint_user_free(_T)) 96 97 static struct tracepoint_user *__tracepoint_user_init(const char *name, struct tracepoint *tpoint) 98 { 99 struct tracepoint_user *tuser __free(tuser_free) = NULL; 100 int ret; 101 102 tuser = kzalloc(sizeof(*tuser), GFP_KERNEL); 103 if (!tuser) 104 return NULL; 105 tuser->name = kstrdup(name, GFP_KERNEL); 106 if (!tuser->name) 107 return NULL; 108 109 if (tpoint) { 110 ret = tracepoint_user_register(tuser); 111 if (ret) 112 return ERR_PTR(ret); 113 } 114 115 tuser->tpoint = tpoint; 116 tuser->refcount = 1; 117 INIT_LIST_HEAD(&tuser->list); 118 list_add(&tuser->list, &tracepoint_user_list); 119 120 return_ptr(tuser); 121 } 122 123 static struct tracepoint *find_tracepoint(const char *tp_name, 124 struct module **tp_mod); 125 126 /* 127 * Get tracepoint_user if exist, or allocate new one and register it. 128 * If tracepoint is on a module, get its refcounter too. 129 * This returns errno or NULL (not loaded yet) or tracepoint_user. 130 */ 131 static struct tracepoint_user *tracepoint_user_find_get(const char *name, struct module **pmod) 132 { 133 struct module *mod __free(module_put) = NULL; 134 struct tracepoint_user *tuser; 135 struct tracepoint *tpoint; 136 137 if (!name || !pmod) 138 return ERR_PTR(-EINVAL); 139 140 /* Get and lock the module which has tracepoint. */ 141 tpoint = find_tracepoint(name, &mod); 142 143 guard(mutex)(&tracepoint_user_mutex); 144 /* Search existing tracepoint_user */ 145 for_each_tracepoint_user(tuser) { 146 if (!strcmp(tuser->name, name)) { 147 tuser->refcount++; 148 *pmod = no_free_ptr(mod); 149 return tuser; 150 } 151 } 152 153 /* The corresponding tracepoint_user is not found. */ 154 tuser = __tracepoint_user_init(name, tpoint); 155 if (!IS_ERR_OR_NULL(tuser)) 156 *pmod = no_free_ptr(mod); 157 158 return tuser; 159 } 160 161 static void tracepoint_user_put(struct tracepoint_user *tuser) 162 { 163 scoped_guard(mutex, &tracepoint_user_mutex) { 164 if (--tuser->refcount > 0) 165 return; 166 167 list_del(&tuser->list); 168 tracepoint_user_unregister(tuser); 169 } 170 171 __tracepoint_user_free(tuser); 172 } 173 174 DEFINE_FREE(tuser_put, struct tracepoint_user *, 175 if (!IS_ERR_OR_NULL(_T)) 176 tracepoint_user_put(_T)) 177 178 /* 179 * Fprobe event core functions 180 */ 181 182 /* 183 * @tprobe is true for tracepoint probe. 184 * @tuser can be NULL if the trace_fprobe is disabled or the tracepoint is not 185 * loaded with a module. If @tuser != NULL, this trace_fprobe is enabled. 186 */ 187 struct trace_fprobe { 188 struct dyn_event devent; 189 struct fprobe fp; 190 const char *symbol; 191 bool tprobe; 192 struct tracepoint_user *tuser; 193 struct trace_probe tp; 194 }; 195 196 static bool is_trace_fprobe(struct dyn_event *ev) 197 { 198 return ev->ops == &trace_fprobe_ops; 199 } 200 201 static struct trace_fprobe *to_trace_fprobe(struct dyn_event *ev) 202 { 203 return container_of(ev, struct trace_fprobe, devent); 204 } 205 206 /** 207 * for_each_trace_fprobe - iterate over the trace_fprobe list 208 * @pos: the struct trace_fprobe * for each entry 209 * @dpos: the struct dyn_event * to use as a loop cursor 210 */ 211 #define for_each_trace_fprobe(pos, dpos) \ 212 for_each_dyn_event(dpos) \ 213 if (is_trace_fprobe(dpos) && (pos = to_trace_fprobe(dpos))) 214 215 static bool trace_fprobe_is_return(struct trace_fprobe *tf) 216 { 217 return tf->fp.exit_handler != NULL; 218 } 219 220 static bool trace_fprobe_is_tracepoint(struct trace_fprobe *tf) 221 { 222 return tf->tprobe; 223 } 224 225 static const char *trace_fprobe_symbol(struct trace_fprobe *tf) 226 { 227 return tf->symbol ? tf->symbol : "unknown"; 228 } 229 230 static bool trace_fprobe_is_busy(struct dyn_event *ev) 231 { 232 struct trace_fprobe *tf = to_trace_fprobe(ev); 233 234 return trace_probe_is_enabled(&tf->tp); 235 } 236 237 static bool trace_fprobe_match_command_head(struct trace_fprobe *tf, 238 int argc, const char **argv) 239 { 240 char buf[MAX_ARGSTR_LEN + 1]; 241 242 if (!argc) 243 return true; 244 245 snprintf(buf, sizeof(buf), "%s", trace_fprobe_symbol(tf)); 246 if (strcmp(buf, argv[0])) 247 return false; 248 argc--; argv++; 249 250 return trace_probe_match_command_args(&tf->tp, argc, argv); 251 } 252 253 static bool trace_fprobe_match(const char *system, const char *event, 254 int argc, const char **argv, struct dyn_event *ev) 255 { 256 struct trace_fprobe *tf = to_trace_fprobe(ev); 257 258 if (event[0] != '\0' && strcmp(trace_probe_name(&tf->tp), event)) 259 return false; 260 261 if (system && strcmp(trace_probe_group_name(&tf->tp), system)) 262 return false; 263 264 return trace_fprobe_match_command_head(tf, argc, argv); 265 } 266 267 static bool trace_fprobe_is_registered(struct trace_fprobe *tf) 268 { 269 return fprobe_is_registered(&tf->fp); 270 } 271 272 /* 273 * Note that we don't verify the fetch_insn code, since it does not come 274 * from user space. 275 */ 276 static int 277 process_fetch_insn(struct fetch_insn *code, void *rec, void *edata, 278 void *dest, void *base) 279 { 280 struct ftrace_regs *fregs = rec; 281 unsigned long val; 282 int ret; 283 284 retry: 285 /* 1st stage: get value from context */ 286 switch (code->op) { 287 case FETCH_OP_STACK: 288 val = ftrace_regs_get_kernel_stack_nth(fregs, code->param); 289 break; 290 case FETCH_OP_STACKP: 291 val = ftrace_regs_get_stack_pointer(fregs); 292 break; 293 case FETCH_OP_RETVAL: 294 val = ftrace_regs_get_return_value(fregs); 295 break; 296 #ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API 297 case FETCH_OP_ARG: 298 val = ftrace_regs_get_argument(fregs, code->param); 299 break; 300 case FETCH_OP_EDATA: 301 val = *(unsigned long *)((unsigned long)edata + code->offset); 302 break; 303 #endif 304 case FETCH_NOP_SYMBOL: /* Ignore a place holder */ 305 code++; 306 goto retry; 307 default: 308 ret = process_common_fetch_insn(code, &val); 309 if (ret < 0) 310 return ret; 311 } 312 code++; 313 314 return process_fetch_insn_bottom(code, val, dest, base); 315 } 316 NOKPROBE_SYMBOL(process_fetch_insn) 317 318 /* function entry handler */ 319 static nokprobe_inline void 320 __fentry_trace_func(struct trace_fprobe *tf, unsigned long entry_ip, 321 struct ftrace_regs *fregs, 322 struct trace_event_file *trace_file) 323 { 324 struct fentry_trace_entry_head *entry; 325 struct trace_event_call *call = trace_probe_event_call(&tf->tp); 326 struct trace_event_buffer fbuffer; 327 int dsize; 328 329 if (WARN_ON_ONCE(call != trace_file->event_call)) 330 return; 331 332 if (trace_trigger_soft_disabled(trace_file)) 333 return; 334 335 dsize = __get_data_size(&tf->tp, fregs, NULL); 336 337 entry = trace_event_buffer_reserve(&fbuffer, trace_file, 338 sizeof(*entry) + tf->tp.size + dsize); 339 if (!entry) 340 return; 341 342 fbuffer.regs = ftrace_get_regs(fregs); 343 entry = fbuffer.entry = ring_buffer_event_data(fbuffer.event); 344 entry->ip = entry_ip; 345 store_trace_args(&entry[1], &tf->tp, fregs, NULL, sizeof(*entry), dsize); 346 347 trace_event_buffer_commit(&fbuffer); 348 } 349 350 static void 351 fentry_trace_func(struct trace_fprobe *tf, unsigned long entry_ip, 352 struct ftrace_regs *fregs) 353 { 354 struct event_file_link *link; 355 356 trace_probe_for_each_link_rcu(link, &tf->tp) 357 __fentry_trace_func(tf, entry_ip, fregs, link->file); 358 } 359 NOKPROBE_SYMBOL(fentry_trace_func); 360 361 static nokprobe_inline 362 void store_fprobe_entry_data(void *edata, struct trace_probe *tp, struct ftrace_regs *fregs) 363 { 364 struct probe_entry_arg *earg = tp->entry_arg; 365 unsigned long val = 0; 366 int i; 367 368 if (!earg) 369 return; 370 371 for (i = 0; i < earg->size; i++) { 372 struct fetch_insn *code = &earg->code[i]; 373 374 switch (code->op) { 375 case FETCH_OP_ARG: 376 val = ftrace_regs_get_argument(fregs, code->param); 377 break; 378 case FETCH_OP_ST_EDATA: 379 *(unsigned long *)((unsigned long)edata + code->offset) = val; 380 break; 381 case FETCH_OP_END: 382 goto end; 383 default: 384 break; 385 } 386 } 387 end: 388 return; 389 } 390 391 /* function exit handler */ 392 static int trace_fprobe_entry_handler(struct fprobe *fp, unsigned long entry_ip, 393 unsigned long ret_ip, struct ftrace_regs *fregs, 394 void *entry_data) 395 { 396 struct trace_fprobe *tf = container_of(fp, struct trace_fprobe, fp); 397 398 if (tf->tp.entry_arg) 399 store_fprobe_entry_data(entry_data, &tf->tp, fregs); 400 401 return 0; 402 } 403 NOKPROBE_SYMBOL(trace_fprobe_entry_handler) 404 405 static nokprobe_inline void 406 __fexit_trace_func(struct trace_fprobe *tf, unsigned long entry_ip, 407 unsigned long ret_ip, struct ftrace_regs *fregs, 408 void *entry_data, struct trace_event_file *trace_file) 409 { 410 struct fexit_trace_entry_head *entry; 411 struct trace_event_buffer fbuffer; 412 struct trace_event_call *call = trace_probe_event_call(&tf->tp); 413 int dsize; 414 415 if (WARN_ON_ONCE(call != trace_file->event_call)) 416 return; 417 418 if (trace_trigger_soft_disabled(trace_file)) 419 return; 420 421 dsize = __get_data_size(&tf->tp, fregs, entry_data); 422 423 entry = trace_event_buffer_reserve(&fbuffer, trace_file, 424 sizeof(*entry) + tf->tp.size + dsize); 425 if (!entry) 426 return; 427 428 fbuffer.regs = ftrace_get_regs(fregs); 429 entry = fbuffer.entry = ring_buffer_event_data(fbuffer.event); 430 entry->func = entry_ip; 431 entry->ret_ip = ret_ip; 432 store_trace_args(&entry[1], &tf->tp, fregs, entry_data, sizeof(*entry), dsize); 433 434 trace_event_buffer_commit(&fbuffer); 435 } 436 437 static void 438 fexit_trace_func(struct trace_fprobe *tf, unsigned long entry_ip, 439 unsigned long ret_ip, struct ftrace_regs *fregs, void *entry_data) 440 { 441 struct event_file_link *link; 442 443 trace_probe_for_each_link_rcu(link, &tf->tp) 444 __fexit_trace_func(tf, entry_ip, ret_ip, fregs, entry_data, link->file); 445 } 446 NOKPROBE_SYMBOL(fexit_trace_func); 447 448 #ifdef CONFIG_PERF_EVENTS 449 450 static int fentry_perf_func(struct trace_fprobe *tf, unsigned long entry_ip, 451 struct ftrace_regs *fregs) 452 { 453 struct trace_event_call *call = trace_probe_event_call(&tf->tp); 454 struct fentry_trace_entry_head *entry; 455 struct hlist_head *head; 456 int size, __size, dsize; 457 struct pt_regs *regs; 458 int rctx; 459 460 head = this_cpu_ptr(call->perf_events); 461 if (hlist_empty(head)) 462 return 0; 463 464 dsize = __get_data_size(&tf->tp, fregs, NULL); 465 __size = sizeof(*entry) + tf->tp.size + dsize; 466 size = ALIGN(__size + sizeof(u32), sizeof(u64)); 467 size -= sizeof(u32); 468 469 entry = perf_trace_buf_alloc(size, ®s, &rctx); 470 if (!entry) 471 return 0; 472 473 regs = ftrace_fill_perf_regs(fregs, regs); 474 475 entry->ip = entry_ip; 476 memset(&entry[1], 0, dsize); 477 store_trace_args(&entry[1], &tf->tp, fregs, NULL, sizeof(*entry), dsize); 478 perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs, 479 head, NULL); 480 return 0; 481 } 482 NOKPROBE_SYMBOL(fentry_perf_func); 483 484 static void 485 fexit_perf_func(struct trace_fprobe *tf, unsigned long entry_ip, 486 unsigned long ret_ip, struct ftrace_regs *fregs, 487 void *entry_data) 488 { 489 struct trace_event_call *call = trace_probe_event_call(&tf->tp); 490 struct fexit_trace_entry_head *entry; 491 struct hlist_head *head; 492 int size, __size, dsize; 493 struct pt_regs *regs; 494 int rctx; 495 496 head = this_cpu_ptr(call->perf_events); 497 if (hlist_empty(head)) 498 return; 499 500 dsize = __get_data_size(&tf->tp, fregs, entry_data); 501 __size = sizeof(*entry) + tf->tp.size + dsize; 502 size = ALIGN(__size + sizeof(u32), sizeof(u64)); 503 size -= sizeof(u32); 504 505 entry = perf_trace_buf_alloc(size, ®s, &rctx); 506 if (!entry) 507 return; 508 509 regs = ftrace_fill_perf_regs(fregs, regs); 510 511 entry->func = entry_ip; 512 entry->ret_ip = ret_ip; 513 store_trace_args(&entry[1], &tf->tp, fregs, entry_data, sizeof(*entry), dsize); 514 perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs, 515 head, NULL); 516 } 517 NOKPROBE_SYMBOL(fexit_perf_func); 518 #endif /* CONFIG_PERF_EVENTS */ 519 520 static int fentry_dispatcher(struct fprobe *fp, unsigned long entry_ip, 521 unsigned long ret_ip, struct ftrace_regs *fregs, 522 void *entry_data) 523 { 524 struct trace_fprobe *tf = container_of(fp, struct trace_fprobe, fp); 525 unsigned int flags = trace_probe_load_flag(&tf->tp); 526 int ret = 0; 527 528 if (flags & TP_FLAG_TRACE) 529 fentry_trace_func(tf, entry_ip, fregs); 530 531 #ifdef CONFIG_PERF_EVENTS 532 if (flags & TP_FLAG_PROFILE) 533 ret = fentry_perf_func(tf, entry_ip, fregs); 534 #endif 535 return ret; 536 } 537 NOKPROBE_SYMBOL(fentry_dispatcher); 538 539 static void fexit_dispatcher(struct fprobe *fp, unsigned long entry_ip, 540 unsigned long ret_ip, struct ftrace_regs *fregs, 541 void *entry_data) 542 { 543 struct trace_fprobe *tf = container_of(fp, struct trace_fprobe, fp); 544 unsigned int flags = trace_probe_load_flag(&tf->tp); 545 546 if (flags & TP_FLAG_TRACE) 547 fexit_trace_func(tf, entry_ip, ret_ip, fregs, entry_data); 548 #ifdef CONFIG_PERF_EVENTS 549 if (flags & TP_FLAG_PROFILE) 550 fexit_perf_func(tf, entry_ip, ret_ip, fregs, entry_data); 551 #endif 552 } 553 NOKPROBE_SYMBOL(fexit_dispatcher); 554 555 static void free_trace_fprobe(struct trace_fprobe *tf) 556 { 557 if (tf) { 558 trace_probe_cleanup(&tf->tp); 559 if (tf->tuser) 560 tracepoint_user_put(tf->tuser); 561 kfree(tf->symbol); 562 kfree(tf); 563 } 564 } 565 566 /* Since alloc_trace_fprobe() can return error, check the pointer is ERR too. */ 567 DEFINE_FREE(free_trace_fprobe, struct trace_fprobe *, if (!IS_ERR_OR_NULL(_T)) free_trace_fprobe(_T)) 568 569 /* 570 * Allocate new trace_probe and initialize it (including fprobe). 571 */ 572 static struct trace_fprobe *alloc_trace_fprobe(const char *group, 573 const char *event, 574 const char *symbol, 575 int nargs, bool is_return, 576 bool is_tracepoint) 577 { 578 struct trace_fprobe *tf __free(free_trace_fprobe) = NULL; 579 int ret = -ENOMEM; 580 581 tf = kzalloc(struct_size(tf, tp.args, nargs), GFP_KERNEL); 582 if (!tf) 583 return ERR_PTR(ret); 584 585 tf->symbol = kstrdup(symbol, GFP_KERNEL); 586 if (!tf->symbol) 587 return ERR_PTR(-ENOMEM); 588 589 if (is_return) 590 tf->fp.exit_handler = fexit_dispatcher; 591 else 592 tf->fp.entry_handler = fentry_dispatcher; 593 594 tf->tprobe = is_tracepoint; 595 596 ret = trace_probe_init(&tf->tp, event, group, false, nargs); 597 if (ret < 0) 598 return ERR_PTR(ret); 599 600 dyn_event_init(&tf->devent, &trace_fprobe_ops); 601 return_ptr(tf); 602 } 603 604 static struct trace_fprobe *find_trace_fprobe(const char *event, 605 const char *group) 606 { 607 struct dyn_event *pos; 608 struct trace_fprobe *tf; 609 610 for_each_trace_fprobe(tf, pos) 611 if (strcmp(trace_probe_name(&tf->tp), event) == 0 && 612 strcmp(trace_probe_group_name(&tf->tp), group) == 0) 613 return tf; 614 return NULL; 615 } 616 617 /* Event entry printers */ 618 static enum print_line_t 619 print_fentry_event(struct trace_iterator *iter, int flags, 620 struct trace_event *event) 621 { 622 struct fentry_trace_entry_head *field; 623 struct trace_seq *s = &iter->seq; 624 struct trace_probe *tp; 625 626 field = (struct fentry_trace_entry_head *)iter->ent; 627 tp = trace_probe_primary_from_call( 628 container_of(event, struct trace_event_call, event)); 629 if (WARN_ON_ONCE(!tp)) 630 goto out; 631 632 trace_seq_printf(s, "%s: (", trace_probe_name(tp)); 633 634 if (!seq_print_ip_sym(s, field->ip, flags | TRACE_ITER_SYM_OFFSET)) 635 goto out; 636 637 trace_seq_putc(s, ')'); 638 639 if (trace_probe_print_args(s, tp->args, tp->nr_args, 640 (u8 *)&field[1], field) < 0) 641 goto out; 642 643 trace_seq_putc(s, '\n'); 644 out: 645 return trace_handle_return(s); 646 } 647 648 static enum print_line_t 649 print_fexit_event(struct trace_iterator *iter, int flags, 650 struct trace_event *event) 651 { 652 struct fexit_trace_entry_head *field; 653 struct trace_seq *s = &iter->seq; 654 struct trace_probe *tp; 655 656 field = (struct fexit_trace_entry_head *)iter->ent; 657 tp = trace_probe_primary_from_call( 658 container_of(event, struct trace_event_call, event)); 659 if (WARN_ON_ONCE(!tp)) 660 goto out; 661 662 trace_seq_printf(s, "%s: (", trace_probe_name(tp)); 663 664 if (!seq_print_ip_sym(s, field->ret_ip, flags | TRACE_ITER_SYM_OFFSET)) 665 goto out; 666 667 trace_seq_puts(s, " <- "); 668 669 if (!seq_print_ip_sym(s, field->func, flags & ~TRACE_ITER_SYM_OFFSET)) 670 goto out; 671 672 trace_seq_putc(s, ')'); 673 674 if (trace_probe_print_args(s, tp->args, tp->nr_args, 675 (u8 *)&field[1], field) < 0) 676 goto out; 677 678 trace_seq_putc(s, '\n'); 679 680 out: 681 return trace_handle_return(s); 682 } 683 684 static int fentry_event_define_fields(struct trace_event_call *event_call) 685 { 686 int ret; 687 struct fentry_trace_entry_head field; 688 struct trace_probe *tp; 689 690 tp = trace_probe_primary_from_call(event_call); 691 if (WARN_ON_ONCE(!tp)) 692 return -ENOENT; 693 694 DEFINE_FIELD(unsigned long, ip, FIELD_STRING_IP, 0); 695 696 return traceprobe_define_arg_fields(event_call, sizeof(field), tp); 697 } 698 699 static int fexit_event_define_fields(struct trace_event_call *event_call) 700 { 701 int ret; 702 struct fexit_trace_entry_head field; 703 struct trace_probe *tp; 704 705 tp = trace_probe_primary_from_call(event_call); 706 if (WARN_ON_ONCE(!tp)) 707 return -ENOENT; 708 709 DEFINE_FIELD(unsigned long, func, FIELD_STRING_FUNC, 0); 710 DEFINE_FIELD(unsigned long, ret_ip, FIELD_STRING_RETIP, 0); 711 712 return traceprobe_define_arg_fields(event_call, sizeof(field), tp); 713 } 714 715 static struct trace_event_functions fentry_funcs = { 716 .trace = print_fentry_event 717 }; 718 719 static struct trace_event_functions fexit_funcs = { 720 .trace = print_fexit_event 721 }; 722 723 static struct trace_event_fields fentry_fields_array[] = { 724 { .type = TRACE_FUNCTION_TYPE, 725 .define_fields = fentry_event_define_fields }, 726 {} 727 }; 728 729 static struct trace_event_fields fexit_fields_array[] = { 730 { .type = TRACE_FUNCTION_TYPE, 731 .define_fields = fexit_event_define_fields }, 732 {} 733 }; 734 735 static int fprobe_register(struct trace_event_call *event, 736 enum trace_reg type, void *data); 737 738 static inline void init_trace_event_call(struct trace_fprobe *tf) 739 { 740 struct trace_event_call *call = trace_probe_event_call(&tf->tp); 741 742 if (trace_fprobe_is_return(tf)) { 743 call->event.funcs = &fexit_funcs; 744 call->class->fields_array = fexit_fields_array; 745 } else { 746 call->event.funcs = &fentry_funcs; 747 call->class->fields_array = fentry_fields_array; 748 } 749 750 call->flags = TRACE_EVENT_FL_FPROBE; 751 call->class->reg = fprobe_register; 752 } 753 754 static int register_fprobe_event(struct trace_fprobe *tf) 755 { 756 init_trace_event_call(tf); 757 758 return trace_probe_register_event_call(&tf->tp); 759 } 760 761 static int unregister_fprobe_event(struct trace_fprobe *tf) 762 { 763 return trace_probe_unregister_event_call(&tf->tp); 764 } 765 766 static int __regsiter_tracepoint_fprobe(struct trace_fprobe *tf) 767 { 768 struct tracepoint_user *tuser __free(tuser_put) = NULL; 769 struct module *mod __free(module_put) = NULL; 770 unsigned long ip; 771 int ret; 772 773 if (WARN_ON_ONCE(tf->tuser)) 774 return -EINVAL; 775 776 /* If the tracepoint is in a module, it must be locked in this function. */ 777 tuser = tracepoint_user_find_get(tf->symbol, &mod); 778 /* This tracepoint is not loaded yet */ 779 if (IS_ERR(tuser)) 780 return PTR_ERR(tuser); 781 if (!tuser) 782 return -ENOMEM; 783 784 /* Register fprobe only if the tracepoint is loaded. */ 785 if (tuser->tpoint) { 786 ip = tracepoint_user_ip(tuser); 787 if (WARN_ON_ONCE(!ip)) 788 return -ENOENT; 789 790 ret = register_fprobe_ips(&tf->fp, &ip, 1); 791 if (ret < 0) 792 return ret; 793 } 794 795 tf->tuser = no_free_ptr(tuser); 796 return 0; 797 } 798 799 /* Returns an error if the target function is not available, or 0 */ 800 static int trace_fprobe_verify_target(struct trace_fprobe *tf) 801 { 802 int ret; 803 804 /* Tracepoint should have a stub function. */ 805 if (trace_fprobe_is_tracepoint(tf)) 806 return 0; 807 808 /* 809 * Note: since we don't lock the module, even if this succeeded, 810 * register_fprobe() later can fail. 811 */ 812 ret = fprobe_count_ips_from_filter(tf->symbol, NULL); 813 return (ret < 0) ? ret : 0; 814 } 815 816 /* Internal register function - just handle fprobe and flags */ 817 static int __register_trace_fprobe(struct trace_fprobe *tf) 818 { 819 int i, ret; 820 821 /* Should we need new LOCKDOWN flag for fprobe? */ 822 ret = security_locked_down(LOCKDOWN_KPROBES); 823 if (ret) 824 return ret; 825 826 if (trace_fprobe_is_registered(tf)) 827 return -EINVAL; 828 829 for (i = 0; i < tf->tp.nr_args; i++) { 830 ret = traceprobe_update_arg(&tf->tp.args[i]); 831 if (ret) 832 return ret; 833 } 834 835 tf->fp.flags &= ~FPROBE_FL_DISABLED; 836 837 if (trace_fprobe_is_tracepoint(tf)) 838 return __regsiter_tracepoint_fprobe(tf); 839 840 /* TODO: handle filter, nofilter or symbol list */ 841 return register_fprobe(&tf->fp, tf->symbol, NULL); 842 } 843 844 /* Internal unregister function - just handle fprobe and flags */ 845 static void __unregister_trace_fprobe(struct trace_fprobe *tf) 846 { 847 if (trace_fprobe_is_registered(tf)) 848 unregister_fprobe(&tf->fp); 849 if (tf->tuser) { 850 tracepoint_user_put(tf->tuser); 851 tf->tuser = NULL; 852 } 853 } 854 855 /* TODO: make this trace_*probe common function */ 856 /* Unregister a trace_probe and probe_event */ 857 static int unregister_trace_fprobe(struct trace_fprobe *tf) 858 { 859 /* If other probes are on the event, just unregister fprobe */ 860 if (trace_probe_has_sibling(&tf->tp)) 861 goto unreg; 862 863 /* Enabled event can not be unregistered */ 864 if (trace_probe_is_enabled(&tf->tp)) 865 return -EBUSY; 866 867 /* If there's a reference to the dynamic event */ 868 if (trace_event_dyn_busy(trace_probe_event_call(&tf->tp))) 869 return -EBUSY; 870 871 /* Will fail if probe is being used by ftrace or perf */ 872 if (unregister_fprobe_event(tf)) 873 return -EBUSY; 874 875 unreg: 876 __unregister_trace_fprobe(tf); 877 dyn_event_remove(&tf->devent); 878 trace_probe_unlink(&tf->tp); 879 880 return 0; 881 } 882 883 static bool trace_fprobe_has_same_fprobe(struct trace_fprobe *orig, 884 struct trace_fprobe *comp) 885 { 886 struct trace_probe_event *tpe = orig->tp.event; 887 int i; 888 889 list_for_each_entry(orig, &tpe->probes, tp.list) { 890 if (strcmp(trace_fprobe_symbol(orig), 891 trace_fprobe_symbol(comp))) 892 continue; 893 894 /* 895 * trace_probe_compare_arg_type() ensured that nr_args and 896 * each argument name and type are same. Let's compare comm. 897 */ 898 for (i = 0; i < orig->tp.nr_args; i++) { 899 if (strcmp(orig->tp.args[i].comm, 900 comp->tp.args[i].comm)) 901 break; 902 } 903 904 if (i == orig->tp.nr_args) 905 return true; 906 } 907 908 return false; 909 } 910 911 static int append_trace_fprobe_event(struct trace_fprobe *tf, struct trace_fprobe *to) 912 { 913 int ret; 914 915 if (trace_fprobe_is_return(tf) != trace_fprobe_is_return(to) || 916 trace_fprobe_is_tracepoint(tf) != trace_fprobe_is_tracepoint(to)) { 917 trace_probe_log_set_index(0); 918 trace_probe_log_err(0, DIFF_PROBE_TYPE); 919 return -EEXIST; 920 } 921 ret = trace_probe_compare_arg_type(&tf->tp, &to->tp); 922 if (ret) { 923 /* Note that argument starts index = 2 */ 924 trace_probe_log_set_index(ret + 1); 925 trace_probe_log_err(0, DIFF_ARG_TYPE); 926 return -EEXIST; 927 } 928 if (trace_fprobe_has_same_fprobe(to, tf)) { 929 trace_probe_log_set_index(0); 930 trace_probe_log_err(0, SAME_PROBE); 931 return -EEXIST; 932 } 933 934 /* Append to existing event */ 935 ret = trace_probe_append(&tf->tp, &to->tp); 936 if (ret) 937 return ret; 938 939 ret = trace_fprobe_verify_target(tf); 940 if (ret) 941 trace_probe_unlink(&tf->tp); 942 else 943 dyn_event_add(&tf->devent, trace_probe_event_call(&tf->tp)); 944 945 return ret; 946 } 947 948 /* Register a trace_probe and probe_event, and check the fprobe is available. */ 949 static int register_trace_fprobe_event(struct trace_fprobe *tf) 950 { 951 struct trace_fprobe *old_tf; 952 int ret; 953 954 guard(mutex)(&event_mutex); 955 956 old_tf = find_trace_fprobe(trace_probe_name(&tf->tp), 957 trace_probe_group_name(&tf->tp)); 958 if (old_tf) 959 return append_trace_fprobe_event(tf, old_tf); 960 961 /* Register new event */ 962 ret = register_fprobe_event(tf); 963 if (ret) { 964 if (ret == -EEXIST) { 965 trace_probe_log_set_index(0); 966 trace_probe_log_err(0, EVENT_EXIST); 967 } else 968 pr_warn("Failed to register probe event(%d)\n", ret); 969 return ret; 970 } 971 972 /* Verify fprobe is sane. */ 973 ret = trace_fprobe_verify_target(tf); 974 if (ret < 0) 975 unregister_fprobe_event(tf); 976 else 977 dyn_event_add(&tf->devent, trace_probe_event_call(&tf->tp)); 978 979 return ret; 980 } 981 982 struct __find_tracepoint_cb_data { 983 const char *tp_name; 984 struct tracepoint *tpoint; 985 struct module *mod; 986 }; 987 988 static void __find_tracepoint_module_cb(struct tracepoint *tp, struct module *mod, void *priv) 989 { 990 struct __find_tracepoint_cb_data *data = priv; 991 992 if (!data->tpoint && !strcmp(data->tp_name, tp->name)) { 993 /* If module is not specified, try getting module refcount. */ 994 if (!data->mod && mod) { 995 /* If failed to get refcount, ignore this tracepoint. */ 996 if (!try_module_get(mod)) 997 return; 998 999 data->mod = mod; 1000 } 1001 data->tpoint = tp; 1002 } 1003 } 1004 1005 static void __find_tracepoint_cb(struct tracepoint *tp, void *priv) 1006 { 1007 struct __find_tracepoint_cb_data *data = priv; 1008 1009 if (!data->tpoint && !strcmp(data->tp_name, tp->name)) 1010 data->tpoint = tp; 1011 } 1012 1013 /* 1014 * Find a tracepoint from kernel and module. If the tracepoint is on the module, 1015 * the module's refcount is incremented and returned as *@tp_mod. Thus, if it is 1016 * not NULL, caller must call module_put(*tp_mod) after used the tracepoint. 1017 */ 1018 static struct tracepoint *find_tracepoint(const char *tp_name, 1019 struct module **tp_mod) 1020 { 1021 struct __find_tracepoint_cb_data data = { 1022 .tp_name = tp_name, 1023 .mod = NULL, 1024 }; 1025 1026 for_each_kernel_tracepoint(__find_tracepoint_cb, &data); 1027 1028 if (!data.tpoint && IS_ENABLED(CONFIG_MODULES)) { 1029 for_each_module_tracepoint(__find_tracepoint_module_cb, &data); 1030 *tp_mod = data.mod; 1031 } 1032 1033 return data.tpoint; 1034 } 1035 1036 #ifdef CONFIG_MODULES 1037 /* 1038 * Find a tracepoint from specified module. In this case, this does not get the 1039 * module's refcount. The caller must ensure the module is not freed. 1040 */ 1041 static struct tracepoint *find_tracepoint_in_module(struct module *mod, 1042 const char *tp_name) 1043 { 1044 struct __find_tracepoint_cb_data data = { 1045 .tp_name = tp_name, 1046 .mod = mod, 1047 }; 1048 1049 for_each_tracepoint_in_module(mod, __find_tracepoint_module_cb, &data); 1050 return data.tpoint; 1051 } 1052 1053 /* These are CONFIG_MODULES=y specific functions. */ 1054 static bool tracepoint_user_within_module(struct tracepoint_user *tuser, 1055 struct module *mod) 1056 { 1057 return within_module(tracepoint_user_ip(tuser), mod); 1058 } 1059 1060 static int tracepoint_user_register_again(struct tracepoint_user *tuser, 1061 struct tracepoint *tpoint) 1062 { 1063 tuser->tpoint = tpoint; 1064 return tracepoint_user_register(tuser); 1065 } 1066 1067 static void tracepoint_user_unregister_clear(struct tracepoint_user *tuser) 1068 { 1069 tracepoint_user_unregister(tuser); 1070 tuser->tpoint = NULL; 1071 } 1072 1073 /* module callback for tracepoint_user */ 1074 static int __tracepoint_probe_module_cb(struct notifier_block *self, 1075 unsigned long val, void *data) 1076 { 1077 struct tp_module *tp_mod = data; 1078 struct tracepoint_user *tuser; 1079 struct tracepoint *tpoint; 1080 1081 if (val != MODULE_STATE_GOING && val != MODULE_STATE_COMING) 1082 return NOTIFY_DONE; 1083 1084 mutex_lock(&tracepoint_user_mutex); 1085 for_each_tracepoint_user(tuser) { 1086 if (val == MODULE_STATE_COMING) { 1087 /* This is not a tracepoint in this module. Skip it. */ 1088 tpoint = find_tracepoint_in_module(tp_mod->mod, tuser->name); 1089 if (!tpoint) 1090 continue; 1091 WARN_ON_ONCE(tracepoint_user_register_again(tuser, tpoint)); 1092 } else if (val == MODULE_STATE_GOING && 1093 tracepoint_user_within_module(tuser, tp_mod->mod)) { 1094 /* Unregister all tracepoint_user in this module. */ 1095 tracepoint_user_unregister_clear(tuser); 1096 } 1097 } 1098 mutex_unlock(&tracepoint_user_mutex); 1099 1100 return NOTIFY_DONE; 1101 } 1102 1103 static struct notifier_block tracepoint_module_nb = { 1104 .notifier_call = __tracepoint_probe_module_cb, 1105 }; 1106 1107 /* module callback for tprobe events */ 1108 static int __tprobe_event_module_cb(struct notifier_block *self, 1109 unsigned long val, void *data) 1110 { 1111 struct trace_fprobe *tf; 1112 struct dyn_event *pos; 1113 struct module *mod = data; 1114 1115 if (val != MODULE_STATE_GOING && val != MODULE_STATE_COMING) 1116 return NOTIFY_DONE; 1117 1118 mutex_lock(&event_mutex); 1119 for_each_trace_fprobe(tf, pos) { 1120 /* Skip fprobe and disabled tprobe events. */ 1121 if (!trace_fprobe_is_tracepoint(tf) || !tf->tuser) 1122 continue; 1123 1124 /* Before this notification, tracepoint notifier has already done. */ 1125 if (val == MODULE_STATE_COMING && 1126 tracepoint_user_within_module(tf->tuser, mod)) { 1127 unsigned long ip = tracepoint_user_ip(tf->tuser); 1128 1129 WARN_ON_ONCE(register_fprobe_ips(&tf->fp, &ip, 1)); 1130 } else if (val == MODULE_STATE_GOING && 1131 /* 1132 * tracepoint_user_within_module() does not work here because 1133 * tracepoint_user is already unregistered and cleared tpoint. 1134 * Instead, checking whether the fprobe is registered but 1135 * tpoint is cleared(unregistered). Such unbalance probes 1136 * must be adjusted anyway. 1137 */ 1138 trace_fprobe_is_registered(tf) && 1139 !tf->tuser->tpoint) { 1140 unregister_fprobe(&tf->fp); 1141 } 1142 } 1143 mutex_unlock(&event_mutex); 1144 1145 return NOTIFY_DONE; 1146 } 1147 1148 /* NOTE: this must be called after tracepoint callback */ 1149 static struct notifier_block tprobe_event_module_nb = { 1150 .notifier_call = __tprobe_event_module_cb, 1151 /* Make sure this is later than tracepoint module notifier. */ 1152 .priority = -10, 1153 }; 1154 #endif /* CONFIG_MODULES */ 1155 1156 static int parse_symbol_and_return(int argc, const char *argv[], 1157 char **symbol, bool *is_return, 1158 bool is_tracepoint) 1159 { 1160 char *tmp = strchr(argv[1], '%'); 1161 int i; 1162 1163 if (tmp) { 1164 int len = tmp - argv[1]; 1165 1166 if (!is_tracepoint && !strcmp(tmp, "%return")) { 1167 *is_return = true; 1168 } else { 1169 trace_probe_log_err(len, BAD_ADDR_SUFFIX); 1170 return -EINVAL; 1171 } 1172 *symbol = kmemdup_nul(argv[1], len, GFP_KERNEL); 1173 } else 1174 *symbol = kstrdup(argv[1], GFP_KERNEL); 1175 if (!*symbol) 1176 return -ENOMEM; 1177 1178 if (*is_return) 1179 return 0; 1180 1181 if (is_tracepoint) { 1182 tmp = *symbol; 1183 while (*tmp && (isalnum(*tmp) || *tmp == '_')) 1184 tmp++; 1185 if (*tmp) { 1186 /* find a wrong character. */ 1187 trace_probe_log_err(tmp - *symbol, BAD_TP_NAME); 1188 kfree(*symbol); 1189 *symbol = NULL; 1190 return -EINVAL; 1191 } 1192 } 1193 1194 /* If there is $retval, this should be a return fprobe. */ 1195 for (i = 2; i < argc; i++) { 1196 tmp = strstr(argv[i], "$retval"); 1197 if (tmp && !isalnum(tmp[7]) && tmp[7] != '_') { 1198 if (is_tracepoint) { 1199 trace_probe_log_set_index(i); 1200 trace_probe_log_err(tmp - argv[i], RETVAL_ON_PROBE); 1201 kfree(*symbol); 1202 *symbol = NULL; 1203 return -EINVAL; 1204 } 1205 *is_return = true; 1206 break; 1207 } 1208 } 1209 return 0; 1210 } 1211 1212 static int trace_fprobe_create_internal(int argc, const char *argv[], 1213 struct traceprobe_parse_context *ctx) 1214 { 1215 /* 1216 * Argument syntax: 1217 * - Add fentry probe: 1218 * f[:[GRP/][EVENT]] [MOD:]KSYM [FETCHARGS] 1219 * - Add fexit probe: 1220 * f[N][:[GRP/][EVENT]] [MOD:]KSYM%return [FETCHARGS] 1221 * - Add tracepoint probe: 1222 * t[:[GRP/][EVENT]] TRACEPOINT [FETCHARGS] 1223 * 1224 * Fetch args: 1225 * $retval : fetch return value 1226 * $stack : fetch stack address 1227 * $stackN : fetch Nth entry of stack (N:0-) 1228 * $argN : fetch Nth argument (N:1-) 1229 * $comm : fetch current task comm 1230 * @ADDR : fetch memory at ADDR (ADDR should be in kernel) 1231 * @SYM[+|-offs] : fetch memory at SYM +|- offs (SYM is a data symbol) 1232 * Dereferencing memory fetch: 1233 * +|-offs(ARG) : fetch memory at ARG +|- offs address. 1234 * Alias name of args: 1235 * NAME=FETCHARG : set NAME as alias of FETCHARG. 1236 * Type of args: 1237 * FETCHARG:TYPE : use TYPE instead of unsigned long. 1238 */ 1239 struct trace_fprobe *tf __free(free_trace_fprobe) = NULL; 1240 const char *event = NULL, *group = FPROBE_EVENT_SYSTEM; 1241 struct module *mod __free(module_put) = NULL; 1242 const char **new_argv __free(kfree) = NULL; 1243 char *symbol __free(kfree) = NULL; 1244 char *ebuf __free(kfree) = NULL; 1245 char *gbuf __free(kfree) = NULL; 1246 char *sbuf __free(kfree) = NULL; 1247 char *abuf __free(kfree) = NULL; 1248 char *dbuf __free(kfree) = NULL; 1249 int i, new_argc = 0, ret = 0; 1250 bool is_tracepoint = false; 1251 bool is_return = false; 1252 1253 if ((argv[0][0] != 'f' && argv[0][0] != 't') || argc < 2) 1254 return -ECANCELED; 1255 1256 if (argv[0][0] == 't') { 1257 is_tracepoint = true; 1258 group = TRACEPOINT_EVENT_SYSTEM; 1259 } 1260 1261 if (argv[0][1] != '\0') { 1262 if (argv[0][1] != ':') { 1263 trace_probe_log_set_index(0); 1264 trace_probe_log_err(1, BAD_MAXACT); 1265 return -EINVAL; 1266 } 1267 event = &argv[0][2]; 1268 } 1269 1270 trace_probe_log_set_index(1); 1271 1272 /* a symbol(or tracepoint) must be specified */ 1273 ret = parse_symbol_and_return(argc, argv, &symbol, &is_return, is_tracepoint); 1274 if (ret < 0) 1275 return -EINVAL; 1276 1277 trace_probe_log_set_index(0); 1278 if (event) { 1279 gbuf = kmalloc(MAX_EVENT_NAME_LEN, GFP_KERNEL); 1280 if (!gbuf) 1281 return -ENOMEM; 1282 ret = traceprobe_parse_event_name(&event, &group, gbuf, 1283 event - argv[0]); 1284 if (ret) 1285 return -EINVAL; 1286 } 1287 1288 if (!event) { 1289 ebuf = kmalloc(MAX_EVENT_NAME_LEN, GFP_KERNEL); 1290 if (!ebuf) 1291 return -ENOMEM; 1292 /* Make a new event name */ 1293 if (is_tracepoint) 1294 snprintf(ebuf, MAX_EVENT_NAME_LEN, "%s%s", 1295 isdigit(*symbol) ? "_" : "", symbol); 1296 else 1297 snprintf(ebuf, MAX_EVENT_NAME_LEN, "%s__%s", symbol, 1298 is_return ? "exit" : "entry"); 1299 sanitize_event_name(ebuf); 1300 event = ebuf; 1301 } 1302 1303 if (is_return) 1304 ctx->flags |= TPARG_FL_RETURN; 1305 else 1306 ctx->flags |= TPARG_FL_FENTRY; 1307 1308 ctx->funcname = NULL; 1309 if (is_tracepoint) { 1310 /* Get tracepoint and lock its module until the end of the registration. */ 1311 struct tracepoint *tpoint; 1312 1313 ctx->flags |= TPARG_FL_TPOINT; 1314 mod = NULL; 1315 tpoint = find_tracepoint(symbol, &mod); 1316 if (tpoint) { 1317 sbuf = kmalloc(KSYM_NAME_LEN, GFP_KERNEL); 1318 if (!sbuf) 1319 return -ENOMEM; 1320 ctx->funcname = kallsyms_lookup((unsigned long)tpoint->probestub, 1321 NULL, NULL, NULL, sbuf); 1322 } 1323 } 1324 if (!ctx->funcname) 1325 ctx->funcname = symbol; 1326 1327 abuf = kmalloc(MAX_BTF_ARGS_LEN, GFP_KERNEL); 1328 if (!abuf) 1329 return -ENOMEM; 1330 argc -= 2; argv += 2; 1331 new_argv = traceprobe_expand_meta_args(argc, argv, &new_argc, 1332 abuf, MAX_BTF_ARGS_LEN, ctx); 1333 if (IS_ERR(new_argv)) 1334 return PTR_ERR(new_argv); 1335 if (new_argv) { 1336 argc = new_argc; 1337 argv = new_argv; 1338 } 1339 if (argc > MAX_TRACE_ARGS) { 1340 trace_probe_log_set_index(2); 1341 trace_probe_log_err(0, TOO_MANY_ARGS); 1342 return -E2BIG; 1343 } 1344 1345 ret = traceprobe_expand_dentry_args(argc, argv, &dbuf); 1346 if (ret) 1347 return ret; 1348 1349 /* setup a probe */ 1350 tf = alloc_trace_fprobe(group, event, symbol, argc, is_return, is_tracepoint); 1351 if (IS_ERR(tf)) { 1352 ret = PTR_ERR(tf); 1353 /* This must return -ENOMEM, else there is a bug */ 1354 WARN_ON_ONCE(ret != -ENOMEM); 1355 return ret; 1356 } 1357 1358 /* parse arguments */ 1359 for (i = 0; i < argc; i++) { 1360 trace_probe_log_set_index(i + 2); 1361 ctx->offset = 0; 1362 ret = traceprobe_parse_probe_arg(&tf->tp, i, argv[i], ctx); 1363 if (ret) 1364 return ret; /* This can be -ENOMEM */ 1365 } 1366 1367 if (is_return && tf->tp.entry_arg) { 1368 tf->fp.entry_handler = trace_fprobe_entry_handler; 1369 tf->fp.entry_data_size = traceprobe_get_entry_data_size(&tf->tp); 1370 if (ALIGN(tf->fp.entry_data_size, sizeof(long)) > MAX_FPROBE_DATA_SIZE) { 1371 trace_probe_log_set_index(2); 1372 trace_probe_log_err(0, TOO_MANY_EARGS); 1373 return -E2BIG; 1374 } 1375 } 1376 1377 ret = traceprobe_set_print_fmt(&tf->tp, 1378 is_return ? PROBE_PRINT_RETURN : PROBE_PRINT_NORMAL); 1379 if (ret < 0) 1380 return ret; 1381 1382 ret = register_trace_fprobe_event(tf); 1383 if (ret) { 1384 trace_probe_log_set_index(1); 1385 if (ret == -EILSEQ) 1386 trace_probe_log_err(0, BAD_INSN_BNDRY); 1387 else if (ret == -ENOENT) 1388 trace_probe_log_err(0, BAD_PROBE_ADDR); 1389 else if (ret != -ENOMEM && ret != -EEXIST) 1390 trace_probe_log_err(0, FAIL_REG_PROBE); 1391 return -EINVAL; 1392 } 1393 1394 /* 'tf' is successfully registered. To avoid freeing, assign NULL. */ 1395 tf = NULL; 1396 1397 return 0; 1398 } 1399 1400 static int trace_fprobe_create_cb(int argc, const char *argv[]) 1401 { 1402 struct traceprobe_parse_context *ctx __free(traceprobe_parse_context) = NULL; 1403 int ret; 1404 1405 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 1406 if (!ctx) 1407 return -ENOMEM; 1408 1409 ctx->flags = TPARG_FL_KERNEL | TPARG_FL_FPROBE; 1410 1411 trace_probe_log_init("trace_fprobe", argc, argv); 1412 ret = trace_fprobe_create_internal(argc, argv, ctx); 1413 trace_probe_log_clear(); 1414 return ret; 1415 } 1416 1417 static int trace_fprobe_create(const char *raw_command) 1418 { 1419 return trace_probe_create(raw_command, trace_fprobe_create_cb); 1420 } 1421 1422 static int trace_fprobe_release(struct dyn_event *ev) 1423 { 1424 struct trace_fprobe *tf = to_trace_fprobe(ev); 1425 int ret = unregister_trace_fprobe(tf); 1426 1427 if (!ret) 1428 free_trace_fprobe(tf); 1429 return ret; 1430 } 1431 1432 static int trace_fprobe_show(struct seq_file *m, struct dyn_event *ev) 1433 { 1434 struct trace_fprobe *tf = to_trace_fprobe(ev); 1435 int i; 1436 1437 if (trace_fprobe_is_tracepoint(tf)) 1438 seq_putc(m, 't'); 1439 else 1440 seq_putc(m, 'f'); 1441 seq_printf(m, ":%s/%s", trace_probe_group_name(&tf->tp), 1442 trace_probe_name(&tf->tp)); 1443 1444 seq_printf(m, " %s%s", trace_fprobe_symbol(tf), 1445 trace_fprobe_is_return(tf) ? "%return" : ""); 1446 1447 for (i = 0; i < tf->tp.nr_args; i++) 1448 seq_printf(m, " %s=%s", tf->tp.args[i].name, tf->tp.args[i].comm); 1449 seq_putc(m, '\n'); 1450 1451 return 0; 1452 } 1453 1454 /* 1455 * Enable trace_probe 1456 * if the file is NULL, enable "perf" handler, or enable "trace" handler. 1457 */ 1458 static int enable_trace_fprobe(struct trace_event_call *call, 1459 struct trace_event_file *file) 1460 { 1461 struct trace_probe *tp; 1462 struct trace_fprobe *tf; 1463 bool enabled; 1464 int ret = 0; 1465 1466 tp = trace_probe_primary_from_call(call); 1467 if (WARN_ON_ONCE(!tp)) 1468 return -ENODEV; 1469 enabled = trace_probe_is_enabled(tp); 1470 1471 /* This also changes "enabled" state */ 1472 if (file) { 1473 ret = trace_probe_add_file(tp, file); 1474 if (ret) 1475 return ret; 1476 } else 1477 trace_probe_set_flag(tp, TP_FLAG_PROFILE); 1478 1479 if (!enabled) { 1480 list_for_each_entry(tf, trace_probe_probe_list(tp), tp.list) { 1481 ret = __register_trace_fprobe(tf); 1482 if (ret < 0) 1483 return ret; 1484 } 1485 } 1486 1487 return 0; 1488 } 1489 1490 /* 1491 * Disable trace_probe 1492 * if the file is NULL, disable "perf" handler, or disable "trace" handler. 1493 */ 1494 static int disable_trace_fprobe(struct trace_event_call *call, 1495 struct trace_event_file *file) 1496 { 1497 struct trace_fprobe *tf; 1498 struct trace_probe *tp; 1499 1500 tp = trace_probe_primary_from_call(call); 1501 if (WARN_ON_ONCE(!tp)) 1502 return -ENODEV; 1503 1504 if (file) { 1505 if (!trace_probe_get_file_link(tp, file)) 1506 return -ENOENT; 1507 if (!trace_probe_has_single_file(tp)) 1508 goto out; 1509 trace_probe_clear_flag(tp, TP_FLAG_TRACE); 1510 } else 1511 trace_probe_clear_flag(tp, TP_FLAG_PROFILE); 1512 1513 if (!trace_probe_is_enabled(tp)) { 1514 list_for_each_entry(tf, trace_probe_probe_list(tp), tp.list) { 1515 unregister_fprobe(&tf->fp); 1516 } 1517 } 1518 1519 out: 1520 if (file) 1521 /* 1522 * Synchronization is done in below function. For perf event, 1523 * file == NULL and perf_trace_event_unreg() calls 1524 * tracepoint_synchronize_unregister() to ensure synchronize 1525 * event. We don't need to care about it. 1526 */ 1527 trace_probe_remove_file(tp, file); 1528 1529 return 0; 1530 } 1531 1532 /* 1533 * called by perf_trace_init() or __ftrace_set_clr_event() under event_mutex. 1534 */ 1535 static int fprobe_register(struct trace_event_call *event, 1536 enum trace_reg type, void *data) 1537 { 1538 struct trace_event_file *file = data; 1539 1540 switch (type) { 1541 case TRACE_REG_REGISTER: 1542 return enable_trace_fprobe(event, file); 1543 case TRACE_REG_UNREGISTER: 1544 return disable_trace_fprobe(event, file); 1545 1546 #ifdef CONFIG_PERF_EVENTS 1547 case TRACE_REG_PERF_REGISTER: 1548 return enable_trace_fprobe(event, NULL); 1549 case TRACE_REG_PERF_UNREGISTER: 1550 return disable_trace_fprobe(event, NULL); 1551 case TRACE_REG_PERF_OPEN: 1552 case TRACE_REG_PERF_CLOSE: 1553 case TRACE_REG_PERF_ADD: 1554 case TRACE_REG_PERF_DEL: 1555 return 0; 1556 #endif 1557 } 1558 return 0; 1559 } 1560 1561 /* 1562 * Register dynevent at core_initcall. This allows kernel to setup fprobe 1563 * events in postcore_initcall without tracefs. 1564 */ 1565 static __init int init_fprobe_trace_early(void) 1566 { 1567 int ret; 1568 1569 ret = dyn_event_register(&trace_fprobe_ops); 1570 if (ret) 1571 return ret; 1572 1573 #ifdef CONFIG_MODULES 1574 ret = register_tracepoint_module_notifier(&tracepoint_module_nb); 1575 if (ret) 1576 return ret; 1577 ret = register_module_notifier(&tprobe_event_module_nb); 1578 if (ret) 1579 return ret; 1580 #endif 1581 1582 return 0; 1583 } 1584 core_initcall(init_fprobe_trace_early); 1585