1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * event tracer 4 * 5 * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com> 6 * 7 * - Added format output of fields of the trace point. 8 * This was based off of work by Tom Zanussi <tzanussi@gmail.com>. 9 * 10 */ 11 12 #define pr_fmt(fmt) fmt 13 14 #include <linux/workqueue.h> 15 #include <linux/security.h> 16 #include <linux/spinlock.h> 17 #include <linux/kthread.h> 18 #include <linux/tracefs.h> 19 #include <linux/uaccess.h> 20 #include <linux/module.h> 21 #include <linux/ctype.h> 22 #include <linux/sort.h> 23 #include <linux/slab.h> 24 #include <linux/delay.h> 25 26 #include <trace/events/sched.h> 27 #include <trace/syscall.h> 28 29 #include <asm/setup.h> 30 31 #include "trace_output.h" 32 33 #undef TRACE_SYSTEM 34 #define TRACE_SYSTEM "TRACE_SYSTEM" 35 36 DEFINE_MUTEX(event_mutex); 37 38 LIST_HEAD(ftrace_events); 39 static LIST_HEAD(ftrace_generic_fields); 40 static LIST_HEAD(ftrace_common_fields); 41 static bool eventdir_initialized; 42 43 static LIST_HEAD(module_strings); 44 45 struct module_string { 46 struct list_head next; 47 struct module *module; 48 char *str; 49 }; 50 51 #define GFP_TRACE (GFP_KERNEL | __GFP_ZERO) 52 53 static struct kmem_cache *field_cachep; 54 static struct kmem_cache *file_cachep; 55 56 static inline int system_refcount(struct event_subsystem *system) 57 { 58 return system->ref_count; 59 } 60 61 static int system_refcount_inc(struct event_subsystem *system) 62 { 63 return system->ref_count++; 64 } 65 66 static int system_refcount_dec(struct event_subsystem *system) 67 { 68 return --system->ref_count; 69 } 70 71 /* Double loops, do not use break, only goto's work */ 72 #define do_for_each_event_file(tr, file) \ 73 list_for_each_entry(tr, &ftrace_trace_arrays, list) { \ 74 list_for_each_entry(file, &tr->events, list) 75 76 #define do_for_each_event_file_safe(tr, file) \ 77 list_for_each_entry(tr, &ftrace_trace_arrays, list) { \ 78 struct trace_event_file *___n; \ 79 list_for_each_entry_safe(file, ___n, &tr->events, list) 80 81 #define while_for_each_event_file() \ 82 } 83 84 static struct ftrace_event_field * 85 __find_event_field(struct list_head *head, char *name) 86 { 87 struct ftrace_event_field *field; 88 89 list_for_each_entry(field, head, link) { 90 if (!strcmp(field->name, name)) 91 return field; 92 } 93 94 return NULL; 95 } 96 97 struct ftrace_event_field * 98 trace_find_event_field(struct trace_event_call *call, char *name) 99 { 100 struct ftrace_event_field *field; 101 struct list_head *head; 102 103 head = trace_get_fields(call); 104 field = __find_event_field(head, name); 105 if (field) 106 return field; 107 108 field = __find_event_field(&ftrace_generic_fields, name); 109 if (field) 110 return field; 111 112 return __find_event_field(&ftrace_common_fields, name); 113 } 114 115 static int __trace_define_field(struct list_head *head, const char *type, 116 const char *name, int offset, int size, 117 int is_signed, int filter_type, int len) 118 { 119 struct ftrace_event_field *field; 120 121 field = kmem_cache_alloc(field_cachep, GFP_TRACE); 122 if (!field) 123 return -ENOMEM; 124 125 field->name = name; 126 field->type = type; 127 128 if (filter_type == FILTER_OTHER) 129 field->filter_type = filter_assign_type(type); 130 else 131 field->filter_type = filter_type; 132 133 field->offset = offset; 134 field->size = size; 135 field->is_signed = is_signed; 136 field->len = len; 137 138 list_add(&field->link, head); 139 140 return 0; 141 } 142 143 int trace_define_field(struct trace_event_call *call, const char *type, 144 const char *name, int offset, int size, int is_signed, 145 int filter_type) 146 { 147 struct list_head *head; 148 149 if (WARN_ON(!call->class)) 150 return 0; 151 152 head = trace_get_fields(call); 153 return __trace_define_field(head, type, name, offset, size, 154 is_signed, filter_type, 0); 155 } 156 EXPORT_SYMBOL_GPL(trace_define_field); 157 158 static int trace_define_field_ext(struct trace_event_call *call, const char *type, 159 const char *name, int offset, int size, int is_signed, 160 int filter_type, int len) 161 { 162 struct list_head *head; 163 164 if (WARN_ON(!call->class)) 165 return 0; 166 167 head = trace_get_fields(call); 168 return __trace_define_field(head, type, name, offset, size, 169 is_signed, filter_type, len); 170 } 171 172 #define __generic_field(type, item, filter_type) \ 173 ret = __trace_define_field(&ftrace_generic_fields, #type, \ 174 #item, 0, 0, is_signed_type(type), \ 175 filter_type, 0); \ 176 if (ret) \ 177 return ret; 178 179 #define __common_field(type, item) \ 180 ret = __trace_define_field(&ftrace_common_fields, #type, \ 181 "common_" #item, \ 182 offsetof(typeof(ent), item), \ 183 sizeof(ent.item), \ 184 is_signed_type(type), FILTER_OTHER, 0); \ 185 if (ret) \ 186 return ret; 187 188 static int trace_define_generic_fields(void) 189 { 190 int ret; 191 192 __generic_field(int, CPU, FILTER_CPU); 193 __generic_field(int, cpu, FILTER_CPU); 194 __generic_field(int, common_cpu, FILTER_CPU); 195 __generic_field(char *, COMM, FILTER_COMM); 196 __generic_field(char *, comm, FILTER_COMM); 197 __generic_field(char *, stacktrace, FILTER_STACKTRACE); 198 __generic_field(char *, STACKTRACE, FILTER_STACKTRACE); 199 200 return ret; 201 } 202 203 static int trace_define_common_fields(void) 204 { 205 int ret; 206 struct trace_entry ent; 207 208 __common_field(unsigned short, type); 209 __common_field(unsigned char, flags); 210 /* Holds both preempt_count and migrate_disable */ 211 __common_field(unsigned char, preempt_count); 212 __common_field(int, pid); 213 214 return ret; 215 } 216 217 static void trace_destroy_fields(struct trace_event_call *call) 218 { 219 struct ftrace_event_field *field, *next; 220 struct list_head *head; 221 222 head = trace_get_fields(call); 223 list_for_each_entry_safe(field, next, head, link) { 224 list_del(&field->link); 225 kmem_cache_free(field_cachep, field); 226 } 227 } 228 229 /* 230 * run-time version of trace_event_get_offsets_<call>() that returns the last 231 * accessible offset of trace fields excluding __dynamic_array bytes 232 */ 233 int trace_event_get_offsets(struct trace_event_call *call) 234 { 235 struct ftrace_event_field *tail; 236 struct list_head *head; 237 238 head = trace_get_fields(call); 239 /* 240 * head->next points to the last field with the largest offset, 241 * since it was added last by trace_define_field() 242 */ 243 tail = list_first_entry(head, struct ftrace_event_field, link); 244 return tail->offset + tail->size; 245 } 246 247 /* 248 * Check if the referenced field is an array and return true, 249 * as arrays are OK to dereference. 250 */ 251 static bool test_field(const char *fmt, struct trace_event_call *call) 252 { 253 struct trace_event_fields *field = call->class->fields_array; 254 const char *array_descriptor; 255 const char *p = fmt; 256 int len; 257 258 if (!(len = str_has_prefix(fmt, "REC->"))) 259 return false; 260 fmt += len; 261 for (p = fmt; *p; p++) { 262 if (!isalnum(*p) && *p != '_') 263 break; 264 } 265 len = p - fmt; 266 267 for (; field->type; field++) { 268 if (strncmp(field->name, fmt, len) || 269 field->name[len]) 270 continue; 271 array_descriptor = strchr(field->type, '['); 272 /* This is an array and is OK to dereference. */ 273 return array_descriptor != NULL; 274 } 275 return false; 276 } 277 278 /* 279 * Examine the print fmt of the event looking for unsafe dereference 280 * pointers using %p* that could be recorded in the trace event and 281 * much later referenced after the pointer was freed. Dereferencing 282 * pointers are OK, if it is dereferenced into the event itself. 283 */ 284 static void test_event_printk(struct trace_event_call *call) 285 { 286 u64 dereference_flags = 0; 287 bool first = true; 288 const char *fmt, *c, *r, *a; 289 int parens = 0; 290 char in_quote = 0; 291 int start_arg = 0; 292 int arg = 0; 293 int i; 294 295 fmt = call->print_fmt; 296 297 if (!fmt) 298 return; 299 300 for (i = 0; fmt[i]; i++) { 301 switch (fmt[i]) { 302 case '\\': 303 i++; 304 if (!fmt[i]) 305 return; 306 continue; 307 case '"': 308 case '\'': 309 /* 310 * The print fmt starts with a string that 311 * is processed first to find %p* usage, 312 * then after the first string, the print fmt 313 * contains arguments that are used to check 314 * if the dereferenced %p* usage is safe. 315 */ 316 if (first) { 317 if (fmt[i] == '\'') 318 continue; 319 if (in_quote) { 320 arg = 0; 321 first = false; 322 /* 323 * If there was no %p* uses 324 * the fmt is OK. 325 */ 326 if (!dereference_flags) 327 return; 328 } 329 } 330 if (in_quote) { 331 if (in_quote == fmt[i]) 332 in_quote = 0; 333 } else { 334 in_quote = fmt[i]; 335 } 336 continue; 337 case '%': 338 if (!first || !in_quote) 339 continue; 340 i++; 341 if (!fmt[i]) 342 return; 343 switch (fmt[i]) { 344 case '%': 345 continue; 346 case 'p': 347 /* Find dereferencing fields */ 348 switch (fmt[i + 1]) { 349 case 'B': case 'R': case 'r': 350 case 'b': case 'M': case 'm': 351 case 'I': case 'i': case 'E': 352 case 'U': case 'V': case 'N': 353 case 'a': case 'd': case 'D': 354 case 'g': case 't': case 'C': 355 case 'O': case 'f': 356 if (WARN_ONCE(arg == 63, 357 "Too many args for event: %s", 358 trace_event_name(call))) 359 return; 360 dereference_flags |= 1ULL << arg; 361 } 362 break; 363 default: 364 { 365 bool star = false; 366 int j; 367 368 /* Increment arg if %*s exists. */ 369 for (j = 0; fmt[i + j]; j++) { 370 if (isdigit(fmt[i + j]) || 371 fmt[i + j] == '.') 372 continue; 373 if (fmt[i + j] == '*') { 374 star = true; 375 continue; 376 } 377 if ((fmt[i + j] == 's') && star) 378 arg++; 379 break; 380 } 381 break; 382 } /* default */ 383 384 } /* switch */ 385 arg++; 386 continue; 387 case '(': 388 if (in_quote) 389 continue; 390 parens++; 391 continue; 392 case ')': 393 if (in_quote) 394 continue; 395 parens--; 396 if (WARN_ONCE(parens < 0, 397 "Paren mismatch for event: %s\narg='%s'\n%*s", 398 trace_event_name(call), 399 fmt + start_arg, 400 (i - start_arg) + 5, "^")) 401 return; 402 continue; 403 case ',': 404 if (in_quote || parens) 405 continue; 406 i++; 407 while (isspace(fmt[i])) 408 i++; 409 start_arg = i; 410 if (!(dereference_flags & (1ULL << arg))) 411 goto next_arg; 412 413 /* Find the REC-> in the argument */ 414 c = strchr(fmt + i, ','); 415 r = strstr(fmt + i, "REC->"); 416 if (r && (!c || r < c)) { 417 /* 418 * Addresses of events on the buffer, 419 * or an array on the buffer is 420 * OK to dereference. 421 * There's ways to fool this, but 422 * this is to catch common mistakes, 423 * not malicious code. 424 */ 425 a = strchr(fmt + i, '&'); 426 if ((a && (a < r)) || test_field(r, call)) 427 dereference_flags &= ~(1ULL << arg); 428 } else if ((r = strstr(fmt + i, "__get_dynamic_array(")) && 429 (!c || r < c)) { 430 dereference_flags &= ~(1ULL << arg); 431 } else if ((r = strstr(fmt + i, "__get_sockaddr(")) && 432 (!c || r < c)) { 433 dereference_flags &= ~(1ULL << arg); 434 } 435 436 next_arg: 437 i--; 438 arg++; 439 } 440 } 441 442 /* 443 * If you triggered the below warning, the trace event reported 444 * uses an unsafe dereference pointer %p*. As the data stored 445 * at the trace event time may no longer exist when the trace 446 * event is printed, dereferencing to the original source is 447 * unsafe. The source of the dereference must be copied into the 448 * event itself, and the dereference must access the copy instead. 449 */ 450 if (WARN_ON_ONCE(dereference_flags)) { 451 arg = 1; 452 while (!(dereference_flags & 1)) { 453 dereference_flags >>= 1; 454 arg++; 455 } 456 pr_warn("event %s has unsafe dereference of argument %d\n", 457 trace_event_name(call), arg); 458 pr_warn("print_fmt: %s\n", fmt); 459 } 460 } 461 462 int trace_event_raw_init(struct trace_event_call *call) 463 { 464 int id; 465 466 id = register_trace_event(&call->event); 467 if (!id) 468 return -ENODEV; 469 470 test_event_printk(call); 471 472 return 0; 473 } 474 EXPORT_SYMBOL_GPL(trace_event_raw_init); 475 476 bool trace_event_ignore_this_pid(struct trace_event_file *trace_file) 477 { 478 struct trace_array *tr = trace_file->tr; 479 struct trace_array_cpu *data; 480 struct trace_pid_list *no_pid_list; 481 struct trace_pid_list *pid_list; 482 483 pid_list = rcu_dereference_raw(tr->filtered_pids); 484 no_pid_list = rcu_dereference_raw(tr->filtered_no_pids); 485 486 if (!pid_list && !no_pid_list) 487 return false; 488 489 data = this_cpu_ptr(tr->array_buffer.data); 490 491 return data->ignore_pid; 492 } 493 EXPORT_SYMBOL_GPL(trace_event_ignore_this_pid); 494 495 void *trace_event_buffer_reserve(struct trace_event_buffer *fbuffer, 496 struct trace_event_file *trace_file, 497 unsigned long len) 498 { 499 struct trace_event_call *event_call = trace_file->event_call; 500 501 if ((trace_file->flags & EVENT_FILE_FL_PID_FILTER) && 502 trace_event_ignore_this_pid(trace_file)) 503 return NULL; 504 505 /* 506 * If CONFIG_PREEMPTION is enabled, then the tracepoint itself disables 507 * preemption (adding one to the preempt_count). Since we are 508 * interested in the preempt_count at the time the tracepoint was 509 * hit, we need to subtract one to offset the increment. 510 */ 511 fbuffer->trace_ctx = tracing_gen_ctx_dec(); 512 fbuffer->trace_file = trace_file; 513 514 fbuffer->event = 515 trace_event_buffer_lock_reserve(&fbuffer->buffer, trace_file, 516 event_call->event.type, len, 517 fbuffer->trace_ctx); 518 if (!fbuffer->event) 519 return NULL; 520 521 fbuffer->regs = NULL; 522 fbuffer->entry = ring_buffer_event_data(fbuffer->event); 523 return fbuffer->entry; 524 } 525 EXPORT_SYMBOL_GPL(trace_event_buffer_reserve); 526 527 int trace_event_reg(struct trace_event_call *call, 528 enum trace_reg type, void *data) 529 { 530 struct trace_event_file *file = data; 531 532 WARN_ON(!(call->flags & TRACE_EVENT_FL_TRACEPOINT)); 533 switch (type) { 534 case TRACE_REG_REGISTER: 535 return tracepoint_probe_register(call->tp, 536 call->class->probe, 537 file); 538 case TRACE_REG_UNREGISTER: 539 tracepoint_probe_unregister(call->tp, 540 call->class->probe, 541 file); 542 return 0; 543 544 #ifdef CONFIG_PERF_EVENTS 545 case TRACE_REG_PERF_REGISTER: 546 return tracepoint_probe_register(call->tp, 547 call->class->perf_probe, 548 call); 549 case TRACE_REG_PERF_UNREGISTER: 550 tracepoint_probe_unregister(call->tp, 551 call->class->perf_probe, 552 call); 553 return 0; 554 case TRACE_REG_PERF_OPEN: 555 case TRACE_REG_PERF_CLOSE: 556 case TRACE_REG_PERF_ADD: 557 case TRACE_REG_PERF_DEL: 558 return 0; 559 #endif 560 } 561 return 0; 562 } 563 EXPORT_SYMBOL_GPL(trace_event_reg); 564 565 void trace_event_enable_cmd_record(bool enable) 566 { 567 struct trace_event_file *file; 568 struct trace_array *tr; 569 570 lockdep_assert_held(&event_mutex); 571 572 do_for_each_event_file(tr, file) { 573 574 if (!(file->flags & EVENT_FILE_FL_ENABLED)) 575 continue; 576 577 if (enable) { 578 tracing_start_cmdline_record(); 579 set_bit(EVENT_FILE_FL_RECORDED_CMD_BIT, &file->flags); 580 } else { 581 tracing_stop_cmdline_record(); 582 clear_bit(EVENT_FILE_FL_RECORDED_CMD_BIT, &file->flags); 583 } 584 } while_for_each_event_file(); 585 } 586 587 void trace_event_enable_tgid_record(bool enable) 588 { 589 struct trace_event_file *file; 590 struct trace_array *tr; 591 592 lockdep_assert_held(&event_mutex); 593 594 do_for_each_event_file(tr, file) { 595 if (!(file->flags & EVENT_FILE_FL_ENABLED)) 596 continue; 597 598 if (enable) { 599 tracing_start_tgid_record(); 600 set_bit(EVENT_FILE_FL_RECORDED_TGID_BIT, &file->flags); 601 } else { 602 tracing_stop_tgid_record(); 603 clear_bit(EVENT_FILE_FL_RECORDED_TGID_BIT, 604 &file->flags); 605 } 606 } while_for_each_event_file(); 607 } 608 609 static int __ftrace_event_enable_disable(struct trace_event_file *file, 610 int enable, int soft_disable) 611 { 612 struct trace_event_call *call = file->event_call; 613 struct trace_array *tr = file->tr; 614 int ret = 0; 615 int disable; 616 617 switch (enable) { 618 case 0: 619 /* 620 * When soft_disable is set and enable is cleared, the sm_ref 621 * reference counter is decremented. If it reaches 0, we want 622 * to clear the SOFT_DISABLED flag but leave the event in the 623 * state that it was. That is, if the event was enabled and 624 * SOFT_DISABLED isn't set, then do nothing. But if SOFT_DISABLED 625 * is set we do not want the event to be enabled before we 626 * clear the bit. 627 * 628 * When soft_disable is not set but the SOFT_MODE flag is, 629 * we do nothing. Do not disable the tracepoint, otherwise 630 * "soft enable"s (clearing the SOFT_DISABLED bit) wont work. 631 */ 632 if (soft_disable) { 633 if (atomic_dec_return(&file->sm_ref) > 0) 634 break; 635 disable = file->flags & EVENT_FILE_FL_SOFT_DISABLED; 636 clear_bit(EVENT_FILE_FL_SOFT_MODE_BIT, &file->flags); 637 /* Disable use of trace_buffered_event */ 638 trace_buffered_event_disable(); 639 } else 640 disable = !(file->flags & EVENT_FILE_FL_SOFT_MODE); 641 642 if (disable && (file->flags & EVENT_FILE_FL_ENABLED)) { 643 clear_bit(EVENT_FILE_FL_ENABLED_BIT, &file->flags); 644 if (file->flags & EVENT_FILE_FL_RECORDED_CMD) { 645 tracing_stop_cmdline_record(); 646 clear_bit(EVENT_FILE_FL_RECORDED_CMD_BIT, &file->flags); 647 } 648 649 if (file->flags & EVENT_FILE_FL_RECORDED_TGID) { 650 tracing_stop_tgid_record(); 651 clear_bit(EVENT_FILE_FL_RECORDED_TGID_BIT, &file->flags); 652 } 653 654 call->class->reg(call, TRACE_REG_UNREGISTER, file); 655 } 656 /* If in SOFT_MODE, just set the SOFT_DISABLE_BIT, else clear it */ 657 if (file->flags & EVENT_FILE_FL_SOFT_MODE) 658 set_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags); 659 else 660 clear_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags); 661 break; 662 case 1: 663 /* 664 * When soft_disable is set and enable is set, we want to 665 * register the tracepoint for the event, but leave the event 666 * as is. That means, if the event was already enabled, we do 667 * nothing (but set SOFT_MODE). If the event is disabled, we 668 * set SOFT_DISABLED before enabling the event tracepoint, so 669 * it still seems to be disabled. 670 */ 671 if (!soft_disable) 672 clear_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags); 673 else { 674 if (atomic_inc_return(&file->sm_ref) > 1) 675 break; 676 set_bit(EVENT_FILE_FL_SOFT_MODE_BIT, &file->flags); 677 /* Enable use of trace_buffered_event */ 678 trace_buffered_event_enable(); 679 } 680 681 if (!(file->flags & EVENT_FILE_FL_ENABLED)) { 682 bool cmd = false, tgid = false; 683 684 /* Keep the event disabled, when going to SOFT_MODE. */ 685 if (soft_disable) 686 set_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags); 687 688 if (tr->trace_flags & TRACE_ITER_RECORD_CMD) { 689 cmd = true; 690 tracing_start_cmdline_record(); 691 set_bit(EVENT_FILE_FL_RECORDED_CMD_BIT, &file->flags); 692 } 693 694 if (tr->trace_flags & TRACE_ITER_RECORD_TGID) { 695 tgid = true; 696 tracing_start_tgid_record(); 697 set_bit(EVENT_FILE_FL_RECORDED_TGID_BIT, &file->flags); 698 } 699 700 ret = call->class->reg(call, TRACE_REG_REGISTER, file); 701 if (ret) { 702 if (cmd) 703 tracing_stop_cmdline_record(); 704 if (tgid) 705 tracing_stop_tgid_record(); 706 pr_info("event trace: Could not enable event " 707 "%s\n", trace_event_name(call)); 708 break; 709 } 710 set_bit(EVENT_FILE_FL_ENABLED_BIT, &file->flags); 711 712 /* WAS_ENABLED gets set but never cleared. */ 713 set_bit(EVENT_FILE_FL_WAS_ENABLED_BIT, &file->flags); 714 } 715 break; 716 } 717 718 return ret; 719 } 720 721 int trace_event_enable_disable(struct trace_event_file *file, 722 int enable, int soft_disable) 723 { 724 return __ftrace_event_enable_disable(file, enable, soft_disable); 725 } 726 727 static int ftrace_event_enable_disable(struct trace_event_file *file, 728 int enable) 729 { 730 return __ftrace_event_enable_disable(file, enable, 0); 731 } 732 733 static void ftrace_clear_events(struct trace_array *tr) 734 { 735 struct trace_event_file *file; 736 737 mutex_lock(&event_mutex); 738 list_for_each_entry(file, &tr->events, list) { 739 ftrace_event_enable_disable(file, 0); 740 } 741 mutex_unlock(&event_mutex); 742 } 743 744 static void 745 event_filter_pid_sched_process_exit(void *data, struct task_struct *task) 746 { 747 struct trace_pid_list *pid_list; 748 struct trace_array *tr = data; 749 750 pid_list = rcu_dereference_raw(tr->filtered_pids); 751 trace_filter_add_remove_task(pid_list, NULL, task); 752 753 pid_list = rcu_dereference_raw(tr->filtered_no_pids); 754 trace_filter_add_remove_task(pid_list, NULL, task); 755 } 756 757 static void 758 event_filter_pid_sched_process_fork(void *data, 759 struct task_struct *self, 760 struct task_struct *task) 761 { 762 struct trace_pid_list *pid_list; 763 struct trace_array *tr = data; 764 765 pid_list = rcu_dereference_sched(tr->filtered_pids); 766 trace_filter_add_remove_task(pid_list, self, task); 767 768 pid_list = rcu_dereference_sched(tr->filtered_no_pids); 769 trace_filter_add_remove_task(pid_list, self, task); 770 } 771 772 void trace_event_follow_fork(struct trace_array *tr, bool enable) 773 { 774 if (enable) { 775 register_trace_prio_sched_process_fork(event_filter_pid_sched_process_fork, 776 tr, INT_MIN); 777 register_trace_prio_sched_process_free(event_filter_pid_sched_process_exit, 778 tr, INT_MAX); 779 } else { 780 unregister_trace_sched_process_fork(event_filter_pid_sched_process_fork, 781 tr); 782 unregister_trace_sched_process_free(event_filter_pid_sched_process_exit, 783 tr); 784 } 785 } 786 787 static void 788 event_filter_pid_sched_switch_probe_pre(void *data, bool preempt, 789 struct task_struct *prev, 790 struct task_struct *next, 791 unsigned int prev_state) 792 { 793 struct trace_array *tr = data; 794 struct trace_pid_list *no_pid_list; 795 struct trace_pid_list *pid_list; 796 bool ret; 797 798 pid_list = rcu_dereference_sched(tr->filtered_pids); 799 no_pid_list = rcu_dereference_sched(tr->filtered_no_pids); 800 801 /* 802 * Sched switch is funny, as we only want to ignore it 803 * in the notrace case if both prev and next should be ignored. 804 */ 805 ret = trace_ignore_this_task(NULL, no_pid_list, prev) && 806 trace_ignore_this_task(NULL, no_pid_list, next); 807 808 this_cpu_write(tr->array_buffer.data->ignore_pid, ret || 809 (trace_ignore_this_task(pid_list, NULL, prev) && 810 trace_ignore_this_task(pid_list, NULL, next))); 811 } 812 813 static void 814 event_filter_pid_sched_switch_probe_post(void *data, bool preempt, 815 struct task_struct *prev, 816 struct task_struct *next, 817 unsigned int prev_state) 818 { 819 struct trace_array *tr = data; 820 struct trace_pid_list *no_pid_list; 821 struct trace_pid_list *pid_list; 822 823 pid_list = rcu_dereference_sched(tr->filtered_pids); 824 no_pid_list = rcu_dereference_sched(tr->filtered_no_pids); 825 826 this_cpu_write(tr->array_buffer.data->ignore_pid, 827 trace_ignore_this_task(pid_list, no_pid_list, next)); 828 } 829 830 static void 831 event_filter_pid_sched_wakeup_probe_pre(void *data, struct task_struct *task) 832 { 833 struct trace_array *tr = data; 834 struct trace_pid_list *no_pid_list; 835 struct trace_pid_list *pid_list; 836 837 /* Nothing to do if we are already tracing */ 838 if (!this_cpu_read(tr->array_buffer.data->ignore_pid)) 839 return; 840 841 pid_list = rcu_dereference_sched(tr->filtered_pids); 842 no_pid_list = rcu_dereference_sched(tr->filtered_no_pids); 843 844 this_cpu_write(tr->array_buffer.data->ignore_pid, 845 trace_ignore_this_task(pid_list, no_pid_list, task)); 846 } 847 848 static void 849 event_filter_pid_sched_wakeup_probe_post(void *data, struct task_struct *task) 850 { 851 struct trace_array *tr = data; 852 struct trace_pid_list *no_pid_list; 853 struct trace_pid_list *pid_list; 854 855 /* Nothing to do if we are not tracing */ 856 if (this_cpu_read(tr->array_buffer.data->ignore_pid)) 857 return; 858 859 pid_list = rcu_dereference_sched(tr->filtered_pids); 860 no_pid_list = rcu_dereference_sched(tr->filtered_no_pids); 861 862 /* Set tracing if current is enabled */ 863 this_cpu_write(tr->array_buffer.data->ignore_pid, 864 trace_ignore_this_task(pid_list, no_pid_list, current)); 865 } 866 867 static void unregister_pid_events(struct trace_array *tr) 868 { 869 unregister_trace_sched_switch(event_filter_pid_sched_switch_probe_pre, tr); 870 unregister_trace_sched_switch(event_filter_pid_sched_switch_probe_post, tr); 871 872 unregister_trace_sched_wakeup(event_filter_pid_sched_wakeup_probe_pre, tr); 873 unregister_trace_sched_wakeup(event_filter_pid_sched_wakeup_probe_post, tr); 874 875 unregister_trace_sched_wakeup_new(event_filter_pid_sched_wakeup_probe_pre, tr); 876 unregister_trace_sched_wakeup_new(event_filter_pid_sched_wakeup_probe_post, tr); 877 878 unregister_trace_sched_waking(event_filter_pid_sched_wakeup_probe_pre, tr); 879 unregister_trace_sched_waking(event_filter_pid_sched_wakeup_probe_post, tr); 880 } 881 882 static void __ftrace_clear_event_pids(struct trace_array *tr, int type) 883 { 884 struct trace_pid_list *pid_list; 885 struct trace_pid_list *no_pid_list; 886 struct trace_event_file *file; 887 int cpu; 888 889 pid_list = rcu_dereference_protected(tr->filtered_pids, 890 lockdep_is_held(&event_mutex)); 891 no_pid_list = rcu_dereference_protected(tr->filtered_no_pids, 892 lockdep_is_held(&event_mutex)); 893 894 /* Make sure there's something to do */ 895 if (!pid_type_enabled(type, pid_list, no_pid_list)) 896 return; 897 898 if (!still_need_pid_events(type, pid_list, no_pid_list)) { 899 unregister_pid_events(tr); 900 901 list_for_each_entry(file, &tr->events, list) { 902 clear_bit(EVENT_FILE_FL_PID_FILTER_BIT, &file->flags); 903 } 904 905 for_each_possible_cpu(cpu) 906 per_cpu_ptr(tr->array_buffer.data, cpu)->ignore_pid = false; 907 } 908 909 if (type & TRACE_PIDS) 910 rcu_assign_pointer(tr->filtered_pids, NULL); 911 912 if (type & TRACE_NO_PIDS) 913 rcu_assign_pointer(tr->filtered_no_pids, NULL); 914 915 /* Wait till all users are no longer using pid filtering */ 916 tracepoint_synchronize_unregister(); 917 918 if ((type & TRACE_PIDS) && pid_list) 919 trace_pid_list_free(pid_list); 920 921 if ((type & TRACE_NO_PIDS) && no_pid_list) 922 trace_pid_list_free(no_pid_list); 923 } 924 925 static void ftrace_clear_event_pids(struct trace_array *tr, int type) 926 { 927 mutex_lock(&event_mutex); 928 __ftrace_clear_event_pids(tr, type); 929 mutex_unlock(&event_mutex); 930 } 931 932 static void __put_system(struct event_subsystem *system) 933 { 934 struct event_filter *filter = system->filter; 935 936 WARN_ON_ONCE(system_refcount(system) == 0); 937 if (system_refcount_dec(system)) 938 return; 939 940 list_del(&system->list); 941 942 if (filter) { 943 kfree(filter->filter_string); 944 kfree(filter); 945 } 946 kfree_const(system->name); 947 kfree(system); 948 } 949 950 static void __get_system(struct event_subsystem *system) 951 { 952 WARN_ON_ONCE(system_refcount(system) == 0); 953 system_refcount_inc(system); 954 } 955 956 static void __get_system_dir(struct trace_subsystem_dir *dir) 957 { 958 WARN_ON_ONCE(dir->ref_count == 0); 959 dir->ref_count++; 960 __get_system(dir->subsystem); 961 } 962 963 static void __put_system_dir(struct trace_subsystem_dir *dir) 964 { 965 WARN_ON_ONCE(dir->ref_count == 0); 966 /* If the subsystem is about to be freed, the dir must be too */ 967 WARN_ON_ONCE(system_refcount(dir->subsystem) == 1 && dir->ref_count != 1); 968 969 __put_system(dir->subsystem); 970 if (!--dir->ref_count) 971 kfree(dir); 972 } 973 974 static void put_system(struct trace_subsystem_dir *dir) 975 { 976 mutex_lock(&event_mutex); 977 __put_system_dir(dir); 978 mutex_unlock(&event_mutex); 979 } 980 981 static void remove_subsystem(struct trace_subsystem_dir *dir) 982 { 983 if (!dir) 984 return; 985 986 if (!--dir->nr_events) { 987 tracefs_remove(dir->entry); 988 list_del(&dir->list); 989 __put_system_dir(dir); 990 } 991 } 992 993 static void remove_event_file_dir(struct trace_event_file *file) 994 { 995 struct dentry *dir = file->dir; 996 struct dentry *child; 997 998 if (dir) { 999 spin_lock(&dir->d_lock); /* probably unneeded */ 1000 list_for_each_entry(child, &dir->d_subdirs, d_child) { 1001 if (d_really_is_positive(child)) /* probably unneeded */ 1002 d_inode(child)->i_private = NULL; 1003 } 1004 spin_unlock(&dir->d_lock); 1005 1006 tracefs_remove(dir); 1007 } 1008 1009 list_del(&file->list); 1010 remove_subsystem(file->system); 1011 free_event_filter(file->filter); 1012 kmem_cache_free(file_cachep, file); 1013 } 1014 1015 /* 1016 * __ftrace_set_clr_event(NULL, NULL, NULL, set) will set/unset all events. 1017 */ 1018 static int 1019 __ftrace_set_clr_event_nolock(struct trace_array *tr, const char *match, 1020 const char *sub, const char *event, int set) 1021 { 1022 struct trace_event_file *file; 1023 struct trace_event_call *call; 1024 const char *name; 1025 int ret = -EINVAL; 1026 int eret = 0; 1027 1028 list_for_each_entry(file, &tr->events, list) { 1029 1030 call = file->event_call; 1031 name = trace_event_name(call); 1032 1033 if (!name || !call->class || !call->class->reg) 1034 continue; 1035 1036 if (call->flags & TRACE_EVENT_FL_IGNORE_ENABLE) 1037 continue; 1038 1039 if (match && 1040 strcmp(match, name) != 0 && 1041 strcmp(match, call->class->system) != 0) 1042 continue; 1043 1044 if (sub && strcmp(sub, call->class->system) != 0) 1045 continue; 1046 1047 if (event && strcmp(event, name) != 0) 1048 continue; 1049 1050 ret = ftrace_event_enable_disable(file, set); 1051 1052 /* 1053 * Save the first error and return that. Some events 1054 * may still have been enabled, but let the user 1055 * know that something went wrong. 1056 */ 1057 if (ret && !eret) 1058 eret = ret; 1059 1060 ret = eret; 1061 } 1062 1063 return ret; 1064 } 1065 1066 static int __ftrace_set_clr_event(struct trace_array *tr, const char *match, 1067 const char *sub, const char *event, int set) 1068 { 1069 int ret; 1070 1071 mutex_lock(&event_mutex); 1072 ret = __ftrace_set_clr_event_nolock(tr, match, sub, event, set); 1073 mutex_unlock(&event_mutex); 1074 1075 return ret; 1076 } 1077 1078 int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set) 1079 { 1080 char *event = NULL, *sub = NULL, *match; 1081 int ret; 1082 1083 if (!tr) 1084 return -ENOENT; 1085 /* 1086 * The buf format can be <subsystem>:<event-name> 1087 * *:<event-name> means any event by that name. 1088 * :<event-name> is the same. 1089 * 1090 * <subsystem>:* means all events in that subsystem 1091 * <subsystem>: means the same. 1092 * 1093 * <name> (no ':') means all events in a subsystem with 1094 * the name <name> or any event that matches <name> 1095 */ 1096 1097 match = strsep(&buf, ":"); 1098 if (buf) { 1099 sub = match; 1100 event = buf; 1101 match = NULL; 1102 1103 if (!strlen(sub) || strcmp(sub, "*") == 0) 1104 sub = NULL; 1105 if (!strlen(event) || strcmp(event, "*") == 0) 1106 event = NULL; 1107 } 1108 1109 ret = __ftrace_set_clr_event(tr, match, sub, event, set); 1110 1111 /* Put back the colon to allow this to be called again */ 1112 if (buf) 1113 *(buf - 1) = ':'; 1114 1115 return ret; 1116 } 1117 1118 /** 1119 * trace_set_clr_event - enable or disable an event 1120 * @system: system name to match (NULL for any system) 1121 * @event: event name to match (NULL for all events, within system) 1122 * @set: 1 to enable, 0 to disable 1123 * 1124 * This is a way for other parts of the kernel to enable or disable 1125 * event recording. 1126 * 1127 * Returns 0 on success, -EINVAL if the parameters do not match any 1128 * registered events. 1129 */ 1130 int trace_set_clr_event(const char *system, const char *event, int set) 1131 { 1132 struct trace_array *tr = top_trace_array(); 1133 1134 if (!tr) 1135 return -ENODEV; 1136 1137 return __ftrace_set_clr_event(tr, NULL, system, event, set); 1138 } 1139 EXPORT_SYMBOL_GPL(trace_set_clr_event); 1140 1141 /** 1142 * trace_array_set_clr_event - enable or disable an event for a trace array. 1143 * @tr: concerned trace array. 1144 * @system: system name to match (NULL for any system) 1145 * @event: event name to match (NULL for all events, within system) 1146 * @enable: true to enable, false to disable 1147 * 1148 * This is a way for other parts of the kernel to enable or disable 1149 * event recording. 1150 * 1151 * Returns 0 on success, -EINVAL if the parameters do not match any 1152 * registered events. 1153 */ 1154 int trace_array_set_clr_event(struct trace_array *tr, const char *system, 1155 const char *event, bool enable) 1156 { 1157 int set; 1158 1159 if (!tr) 1160 return -ENOENT; 1161 1162 set = (enable == true) ? 1 : 0; 1163 return __ftrace_set_clr_event(tr, NULL, system, event, set); 1164 } 1165 EXPORT_SYMBOL_GPL(trace_array_set_clr_event); 1166 1167 /* 128 should be much more than enough */ 1168 #define EVENT_BUF_SIZE 127 1169 1170 static ssize_t 1171 ftrace_event_write(struct file *file, const char __user *ubuf, 1172 size_t cnt, loff_t *ppos) 1173 { 1174 struct trace_parser parser; 1175 struct seq_file *m = file->private_data; 1176 struct trace_array *tr = m->private; 1177 ssize_t read, ret; 1178 1179 if (!cnt) 1180 return 0; 1181 1182 ret = tracing_update_buffers(); 1183 if (ret < 0) 1184 return ret; 1185 1186 if (trace_parser_get_init(&parser, EVENT_BUF_SIZE + 1)) 1187 return -ENOMEM; 1188 1189 read = trace_get_user(&parser, ubuf, cnt, ppos); 1190 1191 if (read >= 0 && trace_parser_loaded((&parser))) { 1192 int set = 1; 1193 1194 if (*parser.buffer == '!') 1195 set = 0; 1196 1197 ret = ftrace_set_clr_event(tr, parser.buffer + !set, set); 1198 if (ret) 1199 goto out_put; 1200 } 1201 1202 ret = read; 1203 1204 out_put: 1205 trace_parser_put(&parser); 1206 1207 return ret; 1208 } 1209 1210 static void * 1211 t_next(struct seq_file *m, void *v, loff_t *pos) 1212 { 1213 struct trace_event_file *file = v; 1214 struct trace_event_call *call; 1215 struct trace_array *tr = m->private; 1216 1217 (*pos)++; 1218 1219 list_for_each_entry_continue(file, &tr->events, list) { 1220 call = file->event_call; 1221 /* 1222 * The ftrace subsystem is for showing formats only. 1223 * They can not be enabled or disabled via the event files. 1224 */ 1225 if (call->class && call->class->reg && 1226 !(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)) 1227 return file; 1228 } 1229 1230 return NULL; 1231 } 1232 1233 static void *t_start(struct seq_file *m, loff_t *pos) 1234 { 1235 struct trace_event_file *file; 1236 struct trace_array *tr = m->private; 1237 loff_t l; 1238 1239 mutex_lock(&event_mutex); 1240 1241 file = list_entry(&tr->events, struct trace_event_file, list); 1242 for (l = 0; l <= *pos; ) { 1243 file = t_next(m, file, &l); 1244 if (!file) 1245 break; 1246 } 1247 return file; 1248 } 1249 1250 static void * 1251 s_next(struct seq_file *m, void *v, loff_t *pos) 1252 { 1253 struct trace_event_file *file = v; 1254 struct trace_array *tr = m->private; 1255 1256 (*pos)++; 1257 1258 list_for_each_entry_continue(file, &tr->events, list) { 1259 if (file->flags & EVENT_FILE_FL_ENABLED) 1260 return file; 1261 } 1262 1263 return NULL; 1264 } 1265 1266 static void *s_start(struct seq_file *m, loff_t *pos) 1267 { 1268 struct trace_event_file *file; 1269 struct trace_array *tr = m->private; 1270 loff_t l; 1271 1272 mutex_lock(&event_mutex); 1273 1274 file = list_entry(&tr->events, struct trace_event_file, list); 1275 for (l = 0; l <= *pos; ) { 1276 file = s_next(m, file, &l); 1277 if (!file) 1278 break; 1279 } 1280 return file; 1281 } 1282 1283 static int t_show(struct seq_file *m, void *v) 1284 { 1285 struct trace_event_file *file = v; 1286 struct trace_event_call *call = file->event_call; 1287 1288 if (strcmp(call->class->system, TRACE_SYSTEM) != 0) 1289 seq_printf(m, "%s:", call->class->system); 1290 seq_printf(m, "%s\n", trace_event_name(call)); 1291 1292 return 0; 1293 } 1294 1295 static void t_stop(struct seq_file *m, void *p) 1296 { 1297 mutex_unlock(&event_mutex); 1298 } 1299 1300 static void * 1301 __next(struct seq_file *m, void *v, loff_t *pos, int type) 1302 { 1303 struct trace_array *tr = m->private; 1304 struct trace_pid_list *pid_list; 1305 1306 if (type == TRACE_PIDS) 1307 pid_list = rcu_dereference_sched(tr->filtered_pids); 1308 else 1309 pid_list = rcu_dereference_sched(tr->filtered_no_pids); 1310 1311 return trace_pid_next(pid_list, v, pos); 1312 } 1313 1314 static void * 1315 p_next(struct seq_file *m, void *v, loff_t *pos) 1316 { 1317 return __next(m, v, pos, TRACE_PIDS); 1318 } 1319 1320 static void * 1321 np_next(struct seq_file *m, void *v, loff_t *pos) 1322 { 1323 return __next(m, v, pos, TRACE_NO_PIDS); 1324 } 1325 1326 static void *__start(struct seq_file *m, loff_t *pos, int type) 1327 __acquires(RCU) 1328 { 1329 struct trace_pid_list *pid_list; 1330 struct trace_array *tr = m->private; 1331 1332 /* 1333 * Grab the mutex, to keep calls to p_next() having the same 1334 * tr->filtered_pids as p_start() has. 1335 * If we just passed the tr->filtered_pids around, then RCU would 1336 * have been enough, but doing that makes things more complex. 1337 */ 1338 mutex_lock(&event_mutex); 1339 rcu_read_lock_sched(); 1340 1341 if (type == TRACE_PIDS) 1342 pid_list = rcu_dereference_sched(tr->filtered_pids); 1343 else 1344 pid_list = rcu_dereference_sched(tr->filtered_no_pids); 1345 1346 if (!pid_list) 1347 return NULL; 1348 1349 return trace_pid_start(pid_list, pos); 1350 } 1351 1352 static void *p_start(struct seq_file *m, loff_t *pos) 1353 __acquires(RCU) 1354 { 1355 return __start(m, pos, TRACE_PIDS); 1356 } 1357 1358 static void *np_start(struct seq_file *m, loff_t *pos) 1359 __acquires(RCU) 1360 { 1361 return __start(m, pos, TRACE_NO_PIDS); 1362 } 1363 1364 static void p_stop(struct seq_file *m, void *p) 1365 __releases(RCU) 1366 { 1367 rcu_read_unlock_sched(); 1368 mutex_unlock(&event_mutex); 1369 } 1370 1371 static ssize_t 1372 event_enable_read(struct file *filp, char __user *ubuf, size_t cnt, 1373 loff_t *ppos) 1374 { 1375 struct trace_event_file *file; 1376 unsigned long flags; 1377 char buf[4] = "0"; 1378 1379 mutex_lock(&event_mutex); 1380 file = event_file_data(filp); 1381 if (likely(file)) 1382 flags = file->flags; 1383 mutex_unlock(&event_mutex); 1384 1385 if (!file) 1386 return -ENODEV; 1387 1388 if (flags & EVENT_FILE_FL_ENABLED && 1389 !(flags & EVENT_FILE_FL_SOFT_DISABLED)) 1390 strcpy(buf, "1"); 1391 1392 if (flags & EVENT_FILE_FL_SOFT_DISABLED || 1393 flags & EVENT_FILE_FL_SOFT_MODE) 1394 strcat(buf, "*"); 1395 1396 strcat(buf, "\n"); 1397 1398 return simple_read_from_buffer(ubuf, cnt, ppos, buf, strlen(buf)); 1399 } 1400 1401 static ssize_t 1402 event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt, 1403 loff_t *ppos) 1404 { 1405 struct trace_event_file *file; 1406 unsigned long val; 1407 int ret; 1408 1409 ret = kstrtoul_from_user(ubuf, cnt, 10, &val); 1410 if (ret) 1411 return ret; 1412 1413 ret = tracing_update_buffers(); 1414 if (ret < 0) 1415 return ret; 1416 1417 switch (val) { 1418 case 0: 1419 case 1: 1420 ret = -ENODEV; 1421 mutex_lock(&event_mutex); 1422 file = event_file_data(filp); 1423 if (likely(file)) 1424 ret = ftrace_event_enable_disable(file, val); 1425 mutex_unlock(&event_mutex); 1426 break; 1427 1428 default: 1429 return -EINVAL; 1430 } 1431 1432 *ppos += cnt; 1433 1434 return ret ? ret : cnt; 1435 } 1436 1437 static ssize_t 1438 system_enable_read(struct file *filp, char __user *ubuf, size_t cnt, 1439 loff_t *ppos) 1440 { 1441 const char set_to_char[4] = { '?', '0', '1', 'X' }; 1442 struct trace_subsystem_dir *dir = filp->private_data; 1443 struct event_subsystem *system = dir->subsystem; 1444 struct trace_event_call *call; 1445 struct trace_event_file *file; 1446 struct trace_array *tr = dir->tr; 1447 char buf[2]; 1448 int set = 0; 1449 int ret; 1450 1451 mutex_lock(&event_mutex); 1452 list_for_each_entry(file, &tr->events, list) { 1453 call = file->event_call; 1454 if ((call->flags & TRACE_EVENT_FL_IGNORE_ENABLE) || 1455 !trace_event_name(call) || !call->class || !call->class->reg) 1456 continue; 1457 1458 if (system && strcmp(call->class->system, system->name) != 0) 1459 continue; 1460 1461 /* 1462 * We need to find out if all the events are set 1463 * or if all events or cleared, or if we have 1464 * a mixture. 1465 */ 1466 set |= (1 << !!(file->flags & EVENT_FILE_FL_ENABLED)); 1467 1468 /* 1469 * If we have a mixture, no need to look further. 1470 */ 1471 if (set == 3) 1472 break; 1473 } 1474 mutex_unlock(&event_mutex); 1475 1476 buf[0] = set_to_char[set]; 1477 buf[1] = '\n'; 1478 1479 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, 2); 1480 1481 return ret; 1482 } 1483 1484 static ssize_t 1485 system_enable_write(struct file *filp, const char __user *ubuf, size_t cnt, 1486 loff_t *ppos) 1487 { 1488 struct trace_subsystem_dir *dir = filp->private_data; 1489 struct event_subsystem *system = dir->subsystem; 1490 const char *name = NULL; 1491 unsigned long val; 1492 ssize_t ret; 1493 1494 ret = kstrtoul_from_user(ubuf, cnt, 10, &val); 1495 if (ret) 1496 return ret; 1497 1498 ret = tracing_update_buffers(); 1499 if (ret < 0) 1500 return ret; 1501 1502 if (val != 0 && val != 1) 1503 return -EINVAL; 1504 1505 /* 1506 * Opening of "enable" adds a ref count to system, 1507 * so the name is safe to use. 1508 */ 1509 if (system) 1510 name = system->name; 1511 1512 ret = __ftrace_set_clr_event(dir->tr, NULL, name, NULL, val); 1513 if (ret) 1514 goto out; 1515 1516 ret = cnt; 1517 1518 out: 1519 *ppos += cnt; 1520 1521 return ret; 1522 } 1523 1524 enum { 1525 FORMAT_HEADER = 1, 1526 FORMAT_FIELD_SEPERATOR = 2, 1527 FORMAT_PRINTFMT = 3, 1528 }; 1529 1530 static void *f_next(struct seq_file *m, void *v, loff_t *pos) 1531 { 1532 struct trace_event_call *call = event_file_data(m->private); 1533 struct list_head *common_head = &ftrace_common_fields; 1534 struct list_head *head = trace_get_fields(call); 1535 struct list_head *node = v; 1536 1537 (*pos)++; 1538 1539 switch ((unsigned long)v) { 1540 case FORMAT_HEADER: 1541 node = common_head; 1542 break; 1543 1544 case FORMAT_FIELD_SEPERATOR: 1545 node = head; 1546 break; 1547 1548 case FORMAT_PRINTFMT: 1549 /* all done */ 1550 return NULL; 1551 } 1552 1553 node = node->prev; 1554 if (node == common_head) 1555 return (void *)FORMAT_FIELD_SEPERATOR; 1556 else if (node == head) 1557 return (void *)FORMAT_PRINTFMT; 1558 else 1559 return node; 1560 } 1561 1562 static int f_show(struct seq_file *m, void *v) 1563 { 1564 struct trace_event_call *call = event_file_data(m->private); 1565 struct ftrace_event_field *field; 1566 const char *array_descriptor; 1567 1568 switch ((unsigned long)v) { 1569 case FORMAT_HEADER: 1570 seq_printf(m, "name: %s\n", trace_event_name(call)); 1571 seq_printf(m, "ID: %d\n", call->event.type); 1572 seq_puts(m, "format:\n"); 1573 return 0; 1574 1575 case FORMAT_FIELD_SEPERATOR: 1576 seq_putc(m, '\n'); 1577 return 0; 1578 1579 case FORMAT_PRINTFMT: 1580 seq_printf(m, "\nprint fmt: %s\n", 1581 call->print_fmt); 1582 return 0; 1583 } 1584 1585 field = list_entry(v, struct ftrace_event_field, link); 1586 /* 1587 * Smartly shows the array type(except dynamic array). 1588 * Normal: 1589 * field:TYPE VAR 1590 * If TYPE := TYPE[LEN], it is shown: 1591 * field:TYPE VAR[LEN] 1592 */ 1593 array_descriptor = strchr(field->type, '['); 1594 1595 if (str_has_prefix(field->type, "__data_loc")) 1596 array_descriptor = NULL; 1597 1598 if (!array_descriptor) 1599 seq_printf(m, "\tfield:%s %s;\toffset:%u;\tsize:%u;\tsigned:%d;\n", 1600 field->type, field->name, field->offset, 1601 field->size, !!field->is_signed); 1602 else if (field->len) 1603 seq_printf(m, "\tfield:%.*s %s[%d];\toffset:%u;\tsize:%u;\tsigned:%d;\n", 1604 (int)(array_descriptor - field->type), 1605 field->type, field->name, 1606 field->len, field->offset, 1607 field->size, !!field->is_signed); 1608 else 1609 seq_printf(m, "\tfield:%.*s %s[];\toffset:%u;\tsize:%u;\tsigned:%d;\n", 1610 (int)(array_descriptor - field->type), 1611 field->type, field->name, 1612 field->offset, field->size, !!field->is_signed); 1613 1614 return 0; 1615 } 1616 1617 static void *f_start(struct seq_file *m, loff_t *pos) 1618 { 1619 void *p = (void *)FORMAT_HEADER; 1620 loff_t l = 0; 1621 1622 /* ->stop() is called even if ->start() fails */ 1623 mutex_lock(&event_mutex); 1624 if (!event_file_data(m->private)) 1625 return ERR_PTR(-ENODEV); 1626 1627 while (l < *pos && p) 1628 p = f_next(m, p, &l); 1629 1630 return p; 1631 } 1632 1633 static void f_stop(struct seq_file *m, void *p) 1634 { 1635 mutex_unlock(&event_mutex); 1636 } 1637 1638 static const struct seq_operations trace_format_seq_ops = { 1639 .start = f_start, 1640 .next = f_next, 1641 .stop = f_stop, 1642 .show = f_show, 1643 }; 1644 1645 static int trace_format_open(struct inode *inode, struct file *file) 1646 { 1647 struct seq_file *m; 1648 int ret; 1649 1650 /* Do we want to hide event format files on tracefs lockdown? */ 1651 1652 ret = seq_open(file, &trace_format_seq_ops); 1653 if (ret < 0) 1654 return ret; 1655 1656 m = file->private_data; 1657 m->private = file; 1658 1659 return 0; 1660 } 1661 1662 static ssize_t 1663 event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) 1664 { 1665 int id = (long)event_file_data(filp); 1666 char buf[32]; 1667 int len; 1668 1669 if (unlikely(!id)) 1670 return -ENODEV; 1671 1672 len = sprintf(buf, "%d\n", id); 1673 1674 return simple_read_from_buffer(ubuf, cnt, ppos, buf, len); 1675 } 1676 1677 static ssize_t 1678 event_filter_read(struct file *filp, char __user *ubuf, size_t cnt, 1679 loff_t *ppos) 1680 { 1681 struct trace_event_file *file; 1682 struct trace_seq *s; 1683 int r = -ENODEV; 1684 1685 if (*ppos) 1686 return 0; 1687 1688 s = kmalloc(sizeof(*s), GFP_KERNEL); 1689 1690 if (!s) 1691 return -ENOMEM; 1692 1693 trace_seq_init(s); 1694 1695 mutex_lock(&event_mutex); 1696 file = event_file_data(filp); 1697 if (file) 1698 print_event_filter(file, s); 1699 mutex_unlock(&event_mutex); 1700 1701 if (file) 1702 r = simple_read_from_buffer(ubuf, cnt, ppos, 1703 s->buffer, trace_seq_used(s)); 1704 1705 kfree(s); 1706 1707 return r; 1708 } 1709 1710 static ssize_t 1711 event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt, 1712 loff_t *ppos) 1713 { 1714 struct trace_event_file *file; 1715 char *buf; 1716 int err = -ENODEV; 1717 1718 if (cnt >= PAGE_SIZE) 1719 return -EINVAL; 1720 1721 buf = memdup_user_nul(ubuf, cnt); 1722 if (IS_ERR(buf)) 1723 return PTR_ERR(buf); 1724 1725 mutex_lock(&event_mutex); 1726 file = event_file_data(filp); 1727 if (file) 1728 err = apply_event_filter(file, buf); 1729 mutex_unlock(&event_mutex); 1730 1731 kfree(buf); 1732 if (err < 0) 1733 return err; 1734 1735 *ppos += cnt; 1736 1737 return cnt; 1738 } 1739 1740 static LIST_HEAD(event_subsystems); 1741 1742 static int subsystem_open(struct inode *inode, struct file *filp) 1743 { 1744 struct trace_subsystem_dir *dir = NULL, *iter_dir; 1745 struct trace_array *tr = NULL, *iter_tr; 1746 struct event_subsystem *system = NULL; 1747 int ret; 1748 1749 if (tracing_is_disabled()) 1750 return -ENODEV; 1751 1752 /* Make sure the system still exists */ 1753 mutex_lock(&event_mutex); 1754 mutex_lock(&trace_types_lock); 1755 list_for_each_entry(iter_tr, &ftrace_trace_arrays, list) { 1756 list_for_each_entry(iter_dir, &iter_tr->systems, list) { 1757 if (iter_dir == inode->i_private) { 1758 /* Don't open systems with no events */ 1759 tr = iter_tr; 1760 dir = iter_dir; 1761 if (dir->nr_events) { 1762 __get_system_dir(dir); 1763 system = dir->subsystem; 1764 } 1765 goto exit_loop; 1766 } 1767 } 1768 } 1769 exit_loop: 1770 mutex_unlock(&trace_types_lock); 1771 mutex_unlock(&event_mutex); 1772 1773 if (!system) 1774 return -ENODEV; 1775 1776 /* Still need to increment the ref count of the system */ 1777 if (trace_array_get(tr) < 0) { 1778 put_system(dir); 1779 return -ENODEV; 1780 } 1781 1782 ret = tracing_open_generic(inode, filp); 1783 if (ret < 0) { 1784 trace_array_put(tr); 1785 put_system(dir); 1786 } 1787 1788 return ret; 1789 } 1790 1791 static int system_tr_open(struct inode *inode, struct file *filp) 1792 { 1793 struct trace_subsystem_dir *dir; 1794 struct trace_array *tr = inode->i_private; 1795 int ret; 1796 1797 /* Make a temporary dir that has no system but points to tr */ 1798 dir = kzalloc(sizeof(*dir), GFP_KERNEL); 1799 if (!dir) 1800 return -ENOMEM; 1801 1802 ret = tracing_open_generic_tr(inode, filp); 1803 if (ret < 0) { 1804 kfree(dir); 1805 return ret; 1806 } 1807 dir->tr = tr; 1808 filp->private_data = dir; 1809 1810 return 0; 1811 } 1812 1813 static int subsystem_release(struct inode *inode, struct file *file) 1814 { 1815 struct trace_subsystem_dir *dir = file->private_data; 1816 1817 trace_array_put(dir->tr); 1818 1819 /* 1820 * If dir->subsystem is NULL, then this is a temporary 1821 * descriptor that was made for a trace_array to enable 1822 * all subsystems. 1823 */ 1824 if (dir->subsystem) 1825 put_system(dir); 1826 else 1827 kfree(dir); 1828 1829 return 0; 1830 } 1831 1832 static ssize_t 1833 subsystem_filter_read(struct file *filp, char __user *ubuf, size_t cnt, 1834 loff_t *ppos) 1835 { 1836 struct trace_subsystem_dir *dir = filp->private_data; 1837 struct event_subsystem *system = dir->subsystem; 1838 struct trace_seq *s; 1839 int r; 1840 1841 if (*ppos) 1842 return 0; 1843 1844 s = kmalloc(sizeof(*s), GFP_KERNEL); 1845 if (!s) 1846 return -ENOMEM; 1847 1848 trace_seq_init(s); 1849 1850 print_subsystem_event_filter(system, s); 1851 r = simple_read_from_buffer(ubuf, cnt, ppos, 1852 s->buffer, trace_seq_used(s)); 1853 1854 kfree(s); 1855 1856 return r; 1857 } 1858 1859 static ssize_t 1860 subsystem_filter_write(struct file *filp, const char __user *ubuf, size_t cnt, 1861 loff_t *ppos) 1862 { 1863 struct trace_subsystem_dir *dir = filp->private_data; 1864 char *buf; 1865 int err; 1866 1867 if (cnt >= PAGE_SIZE) 1868 return -EINVAL; 1869 1870 buf = memdup_user_nul(ubuf, cnt); 1871 if (IS_ERR(buf)) 1872 return PTR_ERR(buf); 1873 1874 err = apply_subsystem_event_filter(dir, buf); 1875 kfree(buf); 1876 if (err < 0) 1877 return err; 1878 1879 *ppos += cnt; 1880 1881 return cnt; 1882 } 1883 1884 static ssize_t 1885 show_header(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) 1886 { 1887 int (*func)(struct trace_seq *s) = filp->private_data; 1888 struct trace_seq *s; 1889 int r; 1890 1891 if (*ppos) 1892 return 0; 1893 1894 s = kmalloc(sizeof(*s), GFP_KERNEL); 1895 if (!s) 1896 return -ENOMEM; 1897 1898 trace_seq_init(s); 1899 1900 func(s); 1901 r = simple_read_from_buffer(ubuf, cnt, ppos, 1902 s->buffer, trace_seq_used(s)); 1903 1904 kfree(s); 1905 1906 return r; 1907 } 1908 1909 static void ignore_task_cpu(void *data) 1910 { 1911 struct trace_array *tr = data; 1912 struct trace_pid_list *pid_list; 1913 struct trace_pid_list *no_pid_list; 1914 1915 /* 1916 * This function is called by on_each_cpu() while the 1917 * event_mutex is held. 1918 */ 1919 pid_list = rcu_dereference_protected(tr->filtered_pids, 1920 mutex_is_locked(&event_mutex)); 1921 no_pid_list = rcu_dereference_protected(tr->filtered_no_pids, 1922 mutex_is_locked(&event_mutex)); 1923 1924 this_cpu_write(tr->array_buffer.data->ignore_pid, 1925 trace_ignore_this_task(pid_list, no_pid_list, current)); 1926 } 1927 1928 static void register_pid_events(struct trace_array *tr) 1929 { 1930 /* 1931 * Register a probe that is called before all other probes 1932 * to set ignore_pid if next or prev do not match. 1933 * Register a probe this is called after all other probes 1934 * to only keep ignore_pid set if next pid matches. 1935 */ 1936 register_trace_prio_sched_switch(event_filter_pid_sched_switch_probe_pre, 1937 tr, INT_MAX); 1938 register_trace_prio_sched_switch(event_filter_pid_sched_switch_probe_post, 1939 tr, 0); 1940 1941 register_trace_prio_sched_wakeup(event_filter_pid_sched_wakeup_probe_pre, 1942 tr, INT_MAX); 1943 register_trace_prio_sched_wakeup(event_filter_pid_sched_wakeup_probe_post, 1944 tr, 0); 1945 1946 register_trace_prio_sched_wakeup_new(event_filter_pid_sched_wakeup_probe_pre, 1947 tr, INT_MAX); 1948 register_trace_prio_sched_wakeup_new(event_filter_pid_sched_wakeup_probe_post, 1949 tr, 0); 1950 1951 register_trace_prio_sched_waking(event_filter_pid_sched_wakeup_probe_pre, 1952 tr, INT_MAX); 1953 register_trace_prio_sched_waking(event_filter_pid_sched_wakeup_probe_post, 1954 tr, 0); 1955 } 1956 1957 static ssize_t 1958 event_pid_write(struct file *filp, const char __user *ubuf, 1959 size_t cnt, loff_t *ppos, int type) 1960 { 1961 struct seq_file *m = filp->private_data; 1962 struct trace_array *tr = m->private; 1963 struct trace_pid_list *filtered_pids = NULL; 1964 struct trace_pid_list *other_pids = NULL; 1965 struct trace_pid_list *pid_list; 1966 struct trace_event_file *file; 1967 ssize_t ret; 1968 1969 if (!cnt) 1970 return 0; 1971 1972 ret = tracing_update_buffers(); 1973 if (ret < 0) 1974 return ret; 1975 1976 mutex_lock(&event_mutex); 1977 1978 if (type == TRACE_PIDS) { 1979 filtered_pids = rcu_dereference_protected(tr->filtered_pids, 1980 lockdep_is_held(&event_mutex)); 1981 other_pids = rcu_dereference_protected(tr->filtered_no_pids, 1982 lockdep_is_held(&event_mutex)); 1983 } else { 1984 filtered_pids = rcu_dereference_protected(tr->filtered_no_pids, 1985 lockdep_is_held(&event_mutex)); 1986 other_pids = rcu_dereference_protected(tr->filtered_pids, 1987 lockdep_is_held(&event_mutex)); 1988 } 1989 1990 ret = trace_pid_write(filtered_pids, &pid_list, ubuf, cnt); 1991 if (ret < 0) 1992 goto out; 1993 1994 if (type == TRACE_PIDS) 1995 rcu_assign_pointer(tr->filtered_pids, pid_list); 1996 else 1997 rcu_assign_pointer(tr->filtered_no_pids, pid_list); 1998 1999 list_for_each_entry(file, &tr->events, list) { 2000 set_bit(EVENT_FILE_FL_PID_FILTER_BIT, &file->flags); 2001 } 2002 2003 if (filtered_pids) { 2004 tracepoint_synchronize_unregister(); 2005 trace_pid_list_free(filtered_pids); 2006 } else if (pid_list && !other_pids) { 2007 register_pid_events(tr); 2008 } 2009 2010 /* 2011 * Ignoring of pids is done at task switch. But we have to 2012 * check for those tasks that are currently running. 2013 * Always do this in case a pid was appended or removed. 2014 */ 2015 on_each_cpu(ignore_task_cpu, tr, 1); 2016 2017 out: 2018 mutex_unlock(&event_mutex); 2019 2020 if (ret > 0) 2021 *ppos += ret; 2022 2023 return ret; 2024 } 2025 2026 static ssize_t 2027 ftrace_event_pid_write(struct file *filp, const char __user *ubuf, 2028 size_t cnt, loff_t *ppos) 2029 { 2030 return event_pid_write(filp, ubuf, cnt, ppos, TRACE_PIDS); 2031 } 2032 2033 static ssize_t 2034 ftrace_event_npid_write(struct file *filp, const char __user *ubuf, 2035 size_t cnt, loff_t *ppos) 2036 { 2037 return event_pid_write(filp, ubuf, cnt, ppos, TRACE_NO_PIDS); 2038 } 2039 2040 static int ftrace_event_avail_open(struct inode *inode, struct file *file); 2041 static int ftrace_event_set_open(struct inode *inode, struct file *file); 2042 static int ftrace_event_set_pid_open(struct inode *inode, struct file *file); 2043 static int ftrace_event_set_npid_open(struct inode *inode, struct file *file); 2044 static int ftrace_event_release(struct inode *inode, struct file *file); 2045 2046 static const struct seq_operations show_event_seq_ops = { 2047 .start = t_start, 2048 .next = t_next, 2049 .show = t_show, 2050 .stop = t_stop, 2051 }; 2052 2053 static const struct seq_operations show_set_event_seq_ops = { 2054 .start = s_start, 2055 .next = s_next, 2056 .show = t_show, 2057 .stop = t_stop, 2058 }; 2059 2060 static const struct seq_operations show_set_pid_seq_ops = { 2061 .start = p_start, 2062 .next = p_next, 2063 .show = trace_pid_show, 2064 .stop = p_stop, 2065 }; 2066 2067 static const struct seq_operations show_set_no_pid_seq_ops = { 2068 .start = np_start, 2069 .next = np_next, 2070 .show = trace_pid_show, 2071 .stop = p_stop, 2072 }; 2073 2074 static const struct file_operations ftrace_avail_fops = { 2075 .open = ftrace_event_avail_open, 2076 .read = seq_read, 2077 .llseek = seq_lseek, 2078 .release = seq_release, 2079 }; 2080 2081 static const struct file_operations ftrace_set_event_fops = { 2082 .open = ftrace_event_set_open, 2083 .read = seq_read, 2084 .write = ftrace_event_write, 2085 .llseek = seq_lseek, 2086 .release = ftrace_event_release, 2087 }; 2088 2089 static const struct file_operations ftrace_set_event_pid_fops = { 2090 .open = ftrace_event_set_pid_open, 2091 .read = seq_read, 2092 .write = ftrace_event_pid_write, 2093 .llseek = seq_lseek, 2094 .release = ftrace_event_release, 2095 }; 2096 2097 static const struct file_operations ftrace_set_event_notrace_pid_fops = { 2098 .open = ftrace_event_set_npid_open, 2099 .read = seq_read, 2100 .write = ftrace_event_npid_write, 2101 .llseek = seq_lseek, 2102 .release = ftrace_event_release, 2103 }; 2104 2105 static const struct file_operations ftrace_enable_fops = { 2106 .open = tracing_open_generic, 2107 .read = event_enable_read, 2108 .write = event_enable_write, 2109 .llseek = default_llseek, 2110 }; 2111 2112 static const struct file_operations ftrace_event_format_fops = { 2113 .open = trace_format_open, 2114 .read = seq_read, 2115 .llseek = seq_lseek, 2116 .release = seq_release, 2117 }; 2118 2119 static const struct file_operations ftrace_event_id_fops = { 2120 .read = event_id_read, 2121 .llseek = default_llseek, 2122 }; 2123 2124 static const struct file_operations ftrace_event_filter_fops = { 2125 .open = tracing_open_generic, 2126 .read = event_filter_read, 2127 .write = event_filter_write, 2128 .llseek = default_llseek, 2129 }; 2130 2131 static const struct file_operations ftrace_subsystem_filter_fops = { 2132 .open = subsystem_open, 2133 .read = subsystem_filter_read, 2134 .write = subsystem_filter_write, 2135 .llseek = default_llseek, 2136 .release = subsystem_release, 2137 }; 2138 2139 static const struct file_operations ftrace_system_enable_fops = { 2140 .open = subsystem_open, 2141 .read = system_enable_read, 2142 .write = system_enable_write, 2143 .llseek = default_llseek, 2144 .release = subsystem_release, 2145 }; 2146 2147 static const struct file_operations ftrace_tr_enable_fops = { 2148 .open = system_tr_open, 2149 .read = system_enable_read, 2150 .write = system_enable_write, 2151 .llseek = default_llseek, 2152 .release = subsystem_release, 2153 }; 2154 2155 static const struct file_operations ftrace_show_header_fops = { 2156 .open = tracing_open_generic, 2157 .read = show_header, 2158 .llseek = default_llseek, 2159 }; 2160 2161 static int 2162 ftrace_event_open(struct inode *inode, struct file *file, 2163 const struct seq_operations *seq_ops) 2164 { 2165 struct seq_file *m; 2166 int ret; 2167 2168 ret = security_locked_down(LOCKDOWN_TRACEFS); 2169 if (ret) 2170 return ret; 2171 2172 ret = seq_open(file, seq_ops); 2173 if (ret < 0) 2174 return ret; 2175 m = file->private_data; 2176 /* copy tr over to seq ops */ 2177 m->private = inode->i_private; 2178 2179 return ret; 2180 } 2181 2182 static int ftrace_event_release(struct inode *inode, struct file *file) 2183 { 2184 struct trace_array *tr = inode->i_private; 2185 2186 trace_array_put(tr); 2187 2188 return seq_release(inode, file); 2189 } 2190 2191 static int 2192 ftrace_event_avail_open(struct inode *inode, struct file *file) 2193 { 2194 const struct seq_operations *seq_ops = &show_event_seq_ops; 2195 2196 /* Checks for tracefs lockdown */ 2197 return ftrace_event_open(inode, file, seq_ops); 2198 } 2199 2200 static int 2201 ftrace_event_set_open(struct inode *inode, struct file *file) 2202 { 2203 const struct seq_operations *seq_ops = &show_set_event_seq_ops; 2204 struct trace_array *tr = inode->i_private; 2205 int ret; 2206 2207 ret = tracing_check_open_get_tr(tr); 2208 if (ret) 2209 return ret; 2210 2211 if ((file->f_mode & FMODE_WRITE) && 2212 (file->f_flags & O_TRUNC)) 2213 ftrace_clear_events(tr); 2214 2215 ret = ftrace_event_open(inode, file, seq_ops); 2216 if (ret < 0) 2217 trace_array_put(tr); 2218 return ret; 2219 } 2220 2221 static int 2222 ftrace_event_set_pid_open(struct inode *inode, struct file *file) 2223 { 2224 const struct seq_operations *seq_ops = &show_set_pid_seq_ops; 2225 struct trace_array *tr = inode->i_private; 2226 int ret; 2227 2228 ret = tracing_check_open_get_tr(tr); 2229 if (ret) 2230 return ret; 2231 2232 if ((file->f_mode & FMODE_WRITE) && 2233 (file->f_flags & O_TRUNC)) 2234 ftrace_clear_event_pids(tr, TRACE_PIDS); 2235 2236 ret = ftrace_event_open(inode, file, seq_ops); 2237 if (ret < 0) 2238 trace_array_put(tr); 2239 return ret; 2240 } 2241 2242 static int 2243 ftrace_event_set_npid_open(struct inode *inode, struct file *file) 2244 { 2245 const struct seq_operations *seq_ops = &show_set_no_pid_seq_ops; 2246 struct trace_array *tr = inode->i_private; 2247 int ret; 2248 2249 ret = tracing_check_open_get_tr(tr); 2250 if (ret) 2251 return ret; 2252 2253 if ((file->f_mode & FMODE_WRITE) && 2254 (file->f_flags & O_TRUNC)) 2255 ftrace_clear_event_pids(tr, TRACE_NO_PIDS); 2256 2257 ret = ftrace_event_open(inode, file, seq_ops); 2258 if (ret < 0) 2259 trace_array_put(tr); 2260 return ret; 2261 } 2262 2263 static struct event_subsystem * 2264 create_new_subsystem(const char *name) 2265 { 2266 struct event_subsystem *system; 2267 2268 /* need to create new entry */ 2269 system = kmalloc(sizeof(*system), GFP_KERNEL); 2270 if (!system) 2271 return NULL; 2272 2273 system->ref_count = 1; 2274 2275 /* Only allocate if dynamic (kprobes and modules) */ 2276 system->name = kstrdup_const(name, GFP_KERNEL); 2277 if (!system->name) 2278 goto out_free; 2279 2280 system->filter = kzalloc(sizeof(struct event_filter), GFP_KERNEL); 2281 if (!system->filter) 2282 goto out_free; 2283 2284 list_add(&system->list, &event_subsystems); 2285 2286 return system; 2287 2288 out_free: 2289 kfree_const(system->name); 2290 kfree(system); 2291 return NULL; 2292 } 2293 2294 static struct dentry * 2295 event_subsystem_dir(struct trace_array *tr, const char *name, 2296 struct trace_event_file *file, struct dentry *parent) 2297 { 2298 struct event_subsystem *system, *iter; 2299 struct trace_subsystem_dir *dir; 2300 struct dentry *entry; 2301 2302 /* First see if we did not already create this dir */ 2303 list_for_each_entry(dir, &tr->systems, list) { 2304 system = dir->subsystem; 2305 if (strcmp(system->name, name) == 0) { 2306 dir->nr_events++; 2307 file->system = dir; 2308 return dir->entry; 2309 } 2310 } 2311 2312 /* Now see if the system itself exists. */ 2313 system = NULL; 2314 list_for_each_entry(iter, &event_subsystems, list) { 2315 if (strcmp(iter->name, name) == 0) { 2316 system = iter; 2317 break; 2318 } 2319 } 2320 2321 dir = kmalloc(sizeof(*dir), GFP_KERNEL); 2322 if (!dir) 2323 goto out_fail; 2324 2325 if (!system) { 2326 system = create_new_subsystem(name); 2327 if (!system) 2328 goto out_free; 2329 } else 2330 __get_system(system); 2331 2332 dir->entry = tracefs_create_dir(name, parent); 2333 if (!dir->entry) { 2334 pr_warn("Failed to create system directory %s\n", name); 2335 __put_system(system); 2336 goto out_free; 2337 } 2338 2339 dir->tr = tr; 2340 dir->ref_count = 1; 2341 dir->nr_events = 1; 2342 dir->subsystem = system; 2343 file->system = dir; 2344 2345 /* the ftrace system is special, do not create enable or filter files */ 2346 if (strcmp(name, "ftrace") != 0) { 2347 2348 entry = tracefs_create_file("filter", TRACE_MODE_WRITE, 2349 dir->entry, dir, 2350 &ftrace_subsystem_filter_fops); 2351 if (!entry) { 2352 kfree(system->filter); 2353 system->filter = NULL; 2354 pr_warn("Could not create tracefs '%s/filter' entry\n", name); 2355 } 2356 2357 trace_create_file("enable", TRACE_MODE_WRITE, dir->entry, dir, 2358 &ftrace_system_enable_fops); 2359 } 2360 2361 list_add(&dir->list, &tr->systems); 2362 2363 return dir->entry; 2364 2365 out_free: 2366 kfree(dir); 2367 out_fail: 2368 /* Only print this message if failed on memory allocation */ 2369 if (!dir || !system) 2370 pr_warn("No memory to create event subsystem %s\n", name); 2371 return NULL; 2372 } 2373 2374 static int 2375 event_define_fields(struct trace_event_call *call) 2376 { 2377 struct list_head *head; 2378 int ret = 0; 2379 2380 /* 2381 * Other events may have the same class. Only update 2382 * the fields if they are not already defined. 2383 */ 2384 head = trace_get_fields(call); 2385 if (list_empty(head)) { 2386 struct trace_event_fields *field = call->class->fields_array; 2387 unsigned int offset = sizeof(struct trace_entry); 2388 2389 for (; field->type; field++) { 2390 if (field->type == TRACE_FUNCTION_TYPE) { 2391 field->define_fields(call); 2392 break; 2393 } 2394 2395 offset = ALIGN(offset, field->align); 2396 ret = trace_define_field_ext(call, field->type, field->name, 2397 offset, field->size, 2398 field->is_signed, field->filter_type, 2399 field->len); 2400 if (WARN_ON_ONCE(ret)) { 2401 pr_err("error code is %d\n", ret); 2402 break; 2403 } 2404 2405 offset += field->size; 2406 } 2407 } 2408 2409 return ret; 2410 } 2411 2412 static int 2413 event_create_dir(struct dentry *parent, struct trace_event_file *file) 2414 { 2415 struct trace_event_call *call = file->event_call; 2416 struct trace_array *tr = file->tr; 2417 struct dentry *d_events; 2418 const char *name; 2419 int ret; 2420 2421 /* 2422 * If the trace point header did not define TRACE_SYSTEM 2423 * then the system would be called "TRACE_SYSTEM". 2424 */ 2425 if (strcmp(call->class->system, TRACE_SYSTEM) != 0) { 2426 d_events = event_subsystem_dir(tr, call->class->system, file, parent); 2427 if (!d_events) 2428 return -ENOMEM; 2429 } else 2430 d_events = parent; 2431 2432 name = trace_event_name(call); 2433 file->dir = tracefs_create_dir(name, d_events); 2434 if (!file->dir) { 2435 pr_warn("Could not create tracefs '%s' directory\n", name); 2436 return -1; 2437 } 2438 2439 if (call->class->reg && !(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)) 2440 trace_create_file("enable", TRACE_MODE_WRITE, file->dir, file, 2441 &ftrace_enable_fops); 2442 2443 #ifdef CONFIG_PERF_EVENTS 2444 if (call->event.type && call->class->reg) 2445 trace_create_file("id", TRACE_MODE_READ, file->dir, 2446 (void *)(long)call->event.type, 2447 &ftrace_event_id_fops); 2448 #endif 2449 2450 ret = event_define_fields(call); 2451 if (ret < 0) { 2452 pr_warn("Could not initialize trace point events/%s\n", name); 2453 return ret; 2454 } 2455 2456 /* 2457 * Only event directories that can be enabled should have 2458 * triggers or filters. 2459 */ 2460 if (!(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)) { 2461 trace_create_file("filter", TRACE_MODE_WRITE, file->dir, 2462 file, &ftrace_event_filter_fops); 2463 2464 trace_create_file("trigger", TRACE_MODE_WRITE, file->dir, 2465 file, &event_trigger_fops); 2466 } 2467 2468 #ifdef CONFIG_HIST_TRIGGERS 2469 trace_create_file("hist", TRACE_MODE_READ, file->dir, file, 2470 &event_hist_fops); 2471 #endif 2472 #ifdef CONFIG_HIST_TRIGGERS_DEBUG 2473 trace_create_file("hist_debug", TRACE_MODE_READ, file->dir, file, 2474 &event_hist_debug_fops); 2475 #endif 2476 trace_create_file("format", TRACE_MODE_READ, file->dir, call, 2477 &ftrace_event_format_fops); 2478 2479 #ifdef CONFIG_TRACE_EVENT_INJECT 2480 if (call->event.type && call->class->reg) 2481 trace_create_file("inject", 0200, file->dir, file, 2482 &event_inject_fops); 2483 #endif 2484 2485 return 0; 2486 } 2487 2488 static void remove_event_from_tracers(struct trace_event_call *call) 2489 { 2490 struct trace_event_file *file; 2491 struct trace_array *tr; 2492 2493 do_for_each_event_file_safe(tr, file) { 2494 if (file->event_call != call) 2495 continue; 2496 2497 remove_event_file_dir(file); 2498 /* 2499 * The do_for_each_event_file_safe() is 2500 * a double loop. After finding the call for this 2501 * trace_array, we use break to jump to the next 2502 * trace_array. 2503 */ 2504 break; 2505 } while_for_each_event_file(); 2506 } 2507 2508 static void event_remove(struct trace_event_call *call) 2509 { 2510 struct trace_array *tr; 2511 struct trace_event_file *file; 2512 2513 do_for_each_event_file(tr, file) { 2514 if (file->event_call != call) 2515 continue; 2516 2517 if (file->flags & EVENT_FILE_FL_WAS_ENABLED) 2518 tr->clear_trace = true; 2519 2520 ftrace_event_enable_disable(file, 0); 2521 /* 2522 * The do_for_each_event_file() is 2523 * a double loop. After finding the call for this 2524 * trace_array, we use break to jump to the next 2525 * trace_array. 2526 */ 2527 break; 2528 } while_for_each_event_file(); 2529 2530 if (call->event.funcs) 2531 __unregister_trace_event(&call->event); 2532 remove_event_from_tracers(call); 2533 list_del(&call->list); 2534 } 2535 2536 static int event_init(struct trace_event_call *call) 2537 { 2538 int ret = 0; 2539 const char *name; 2540 2541 name = trace_event_name(call); 2542 if (WARN_ON(!name)) 2543 return -EINVAL; 2544 2545 if (call->class->raw_init) { 2546 ret = call->class->raw_init(call); 2547 if (ret < 0 && ret != -ENOSYS) 2548 pr_warn("Could not initialize trace events/%s\n", name); 2549 } 2550 2551 return ret; 2552 } 2553 2554 static int 2555 __register_event(struct trace_event_call *call, struct module *mod) 2556 { 2557 int ret; 2558 2559 ret = event_init(call); 2560 if (ret < 0) 2561 return ret; 2562 2563 list_add(&call->list, &ftrace_events); 2564 if (call->flags & TRACE_EVENT_FL_DYNAMIC) 2565 atomic_set(&call->refcnt, 0); 2566 else 2567 call->module = mod; 2568 2569 return 0; 2570 } 2571 2572 static char *eval_replace(char *ptr, struct trace_eval_map *map, int len) 2573 { 2574 int rlen; 2575 int elen; 2576 2577 /* Find the length of the eval value as a string */ 2578 elen = snprintf(ptr, 0, "%ld", map->eval_value); 2579 /* Make sure there's enough room to replace the string with the value */ 2580 if (len < elen) 2581 return NULL; 2582 2583 snprintf(ptr, elen + 1, "%ld", map->eval_value); 2584 2585 /* Get the rest of the string of ptr */ 2586 rlen = strlen(ptr + len); 2587 memmove(ptr + elen, ptr + len, rlen); 2588 /* Make sure we end the new string */ 2589 ptr[elen + rlen] = 0; 2590 2591 return ptr + elen; 2592 } 2593 2594 static void update_event_printk(struct trace_event_call *call, 2595 struct trace_eval_map *map) 2596 { 2597 char *ptr; 2598 int quote = 0; 2599 int len = strlen(map->eval_string); 2600 2601 for (ptr = call->print_fmt; *ptr; ptr++) { 2602 if (*ptr == '\\') { 2603 ptr++; 2604 /* paranoid */ 2605 if (!*ptr) 2606 break; 2607 continue; 2608 } 2609 if (*ptr == '"') { 2610 quote ^= 1; 2611 continue; 2612 } 2613 if (quote) 2614 continue; 2615 if (isdigit(*ptr)) { 2616 /* skip numbers */ 2617 do { 2618 ptr++; 2619 /* Check for alpha chars like ULL */ 2620 } while (isalnum(*ptr)); 2621 if (!*ptr) 2622 break; 2623 /* 2624 * A number must have some kind of delimiter after 2625 * it, and we can ignore that too. 2626 */ 2627 continue; 2628 } 2629 if (isalpha(*ptr) || *ptr == '_') { 2630 if (strncmp(map->eval_string, ptr, len) == 0 && 2631 !isalnum(ptr[len]) && ptr[len] != '_') { 2632 ptr = eval_replace(ptr, map, len); 2633 /* enum/sizeof string smaller than value */ 2634 if (WARN_ON_ONCE(!ptr)) 2635 return; 2636 /* 2637 * No need to decrement here, as eval_replace() 2638 * returns the pointer to the character passed 2639 * the eval, and two evals can not be placed 2640 * back to back without something in between. 2641 * We can skip that something in between. 2642 */ 2643 continue; 2644 } 2645 skip_more: 2646 do { 2647 ptr++; 2648 } while (isalnum(*ptr) || *ptr == '_'); 2649 if (!*ptr) 2650 break; 2651 /* 2652 * If what comes after this variable is a '.' or 2653 * '->' then we can continue to ignore that string. 2654 */ 2655 if (*ptr == '.' || (ptr[0] == '-' && ptr[1] == '>')) { 2656 ptr += *ptr == '.' ? 1 : 2; 2657 if (!*ptr) 2658 break; 2659 goto skip_more; 2660 } 2661 /* 2662 * Once again, we can skip the delimiter that came 2663 * after the string. 2664 */ 2665 continue; 2666 } 2667 } 2668 } 2669 2670 static void add_str_to_module(struct module *module, char *str) 2671 { 2672 struct module_string *modstr; 2673 2674 modstr = kmalloc(sizeof(*modstr), GFP_KERNEL); 2675 2676 /* 2677 * If we failed to allocate memory here, then we'll just 2678 * let the str memory leak when the module is removed. 2679 * If this fails to allocate, there's worse problems than 2680 * a leaked string on module removal. 2681 */ 2682 if (WARN_ON_ONCE(!modstr)) 2683 return; 2684 2685 modstr->module = module; 2686 modstr->str = str; 2687 2688 list_add(&modstr->next, &module_strings); 2689 } 2690 2691 static void update_event_fields(struct trace_event_call *call, 2692 struct trace_eval_map *map) 2693 { 2694 struct ftrace_event_field *field; 2695 struct list_head *head; 2696 char *ptr; 2697 char *str; 2698 int len = strlen(map->eval_string); 2699 2700 /* Dynamic events should never have field maps */ 2701 if (WARN_ON_ONCE(call->flags & TRACE_EVENT_FL_DYNAMIC)) 2702 return; 2703 2704 head = trace_get_fields(call); 2705 list_for_each_entry(field, head, link) { 2706 ptr = strchr(field->type, '['); 2707 if (!ptr) 2708 continue; 2709 ptr++; 2710 2711 if (!isalpha(*ptr) && *ptr != '_') 2712 continue; 2713 2714 if (strncmp(map->eval_string, ptr, len) != 0) 2715 continue; 2716 2717 str = kstrdup(field->type, GFP_KERNEL); 2718 if (WARN_ON_ONCE(!str)) 2719 return; 2720 ptr = str + (ptr - field->type); 2721 ptr = eval_replace(ptr, map, len); 2722 /* enum/sizeof string smaller than value */ 2723 if (WARN_ON_ONCE(!ptr)) { 2724 kfree(str); 2725 continue; 2726 } 2727 2728 /* 2729 * If the event is part of a module, then we need to free the string 2730 * when the module is removed. Otherwise, it will stay allocated 2731 * until a reboot. 2732 */ 2733 if (call->module) 2734 add_str_to_module(call->module, str); 2735 2736 field->type = str; 2737 } 2738 } 2739 2740 void trace_event_eval_update(struct trace_eval_map **map, int len) 2741 { 2742 struct trace_event_call *call, *p; 2743 const char *last_system = NULL; 2744 bool first = false; 2745 int last_i; 2746 int i; 2747 2748 down_write(&trace_event_sem); 2749 list_for_each_entry_safe(call, p, &ftrace_events, list) { 2750 /* events are usually grouped together with systems */ 2751 if (!last_system || call->class->system != last_system) { 2752 first = true; 2753 last_i = 0; 2754 last_system = call->class->system; 2755 } 2756 2757 /* 2758 * Since calls are grouped by systems, the likelihood that the 2759 * next call in the iteration belongs to the same system as the 2760 * previous call is high. As an optimization, we skip searching 2761 * for a map[] that matches the call's system if the last call 2762 * was from the same system. That's what last_i is for. If the 2763 * call has the same system as the previous call, then last_i 2764 * will be the index of the first map[] that has a matching 2765 * system. 2766 */ 2767 for (i = last_i; i < len; i++) { 2768 if (call->class->system == map[i]->system) { 2769 /* Save the first system if need be */ 2770 if (first) { 2771 last_i = i; 2772 first = false; 2773 } 2774 update_event_printk(call, map[i]); 2775 update_event_fields(call, map[i]); 2776 } 2777 } 2778 } 2779 up_write(&trace_event_sem); 2780 } 2781 2782 static struct trace_event_file * 2783 trace_create_new_event(struct trace_event_call *call, 2784 struct trace_array *tr) 2785 { 2786 struct trace_pid_list *no_pid_list; 2787 struct trace_pid_list *pid_list; 2788 struct trace_event_file *file; 2789 unsigned int first; 2790 2791 file = kmem_cache_alloc(file_cachep, GFP_TRACE); 2792 if (!file) 2793 return NULL; 2794 2795 pid_list = rcu_dereference_protected(tr->filtered_pids, 2796 lockdep_is_held(&event_mutex)); 2797 no_pid_list = rcu_dereference_protected(tr->filtered_no_pids, 2798 lockdep_is_held(&event_mutex)); 2799 2800 if (!trace_pid_list_first(pid_list, &first) || 2801 !trace_pid_list_first(no_pid_list, &first)) 2802 file->flags |= EVENT_FILE_FL_PID_FILTER; 2803 2804 file->event_call = call; 2805 file->tr = tr; 2806 atomic_set(&file->sm_ref, 0); 2807 atomic_set(&file->tm_ref, 0); 2808 INIT_LIST_HEAD(&file->triggers); 2809 list_add(&file->list, &tr->events); 2810 2811 return file; 2812 } 2813 2814 #define MAX_BOOT_TRIGGERS 32 2815 2816 static struct boot_triggers { 2817 const char *event; 2818 char *trigger; 2819 } bootup_triggers[MAX_BOOT_TRIGGERS]; 2820 2821 static char bootup_trigger_buf[COMMAND_LINE_SIZE]; 2822 static int nr_boot_triggers; 2823 2824 static __init int setup_trace_triggers(char *str) 2825 { 2826 char *trigger; 2827 char *buf; 2828 int i; 2829 2830 strscpy(bootup_trigger_buf, str, COMMAND_LINE_SIZE); 2831 ring_buffer_expanded = true; 2832 disable_tracing_selftest("running event triggers"); 2833 2834 buf = bootup_trigger_buf; 2835 for (i = 0; i < MAX_BOOT_TRIGGERS; i++) { 2836 trigger = strsep(&buf, ","); 2837 if (!trigger) 2838 break; 2839 bootup_triggers[i].event = strsep(&trigger, "."); 2840 bootup_triggers[i].trigger = trigger; 2841 if (!bootup_triggers[i].trigger) 2842 break; 2843 } 2844 2845 nr_boot_triggers = i; 2846 return 1; 2847 } 2848 __setup("trace_trigger=", setup_trace_triggers); 2849 2850 /* Add an event to a trace directory */ 2851 static int 2852 __trace_add_new_event(struct trace_event_call *call, struct trace_array *tr) 2853 { 2854 struct trace_event_file *file; 2855 2856 file = trace_create_new_event(call, tr); 2857 if (!file) 2858 return -ENOMEM; 2859 2860 if (eventdir_initialized) 2861 return event_create_dir(tr->event_dir, file); 2862 else 2863 return event_define_fields(call); 2864 } 2865 2866 static void trace_early_triggers(struct trace_event_file *file, const char *name) 2867 { 2868 int ret; 2869 int i; 2870 2871 for (i = 0; i < nr_boot_triggers; i++) { 2872 if (strcmp(name, bootup_triggers[i].event)) 2873 continue; 2874 mutex_lock(&event_mutex); 2875 ret = trigger_process_regex(file, bootup_triggers[i].trigger); 2876 mutex_unlock(&event_mutex); 2877 if (ret) 2878 pr_err("Failed to register trigger '%s' on event %s\n", 2879 bootup_triggers[i].trigger, 2880 bootup_triggers[i].event); 2881 } 2882 } 2883 2884 /* 2885 * Just create a descriptor for early init. A descriptor is required 2886 * for enabling events at boot. We want to enable events before 2887 * the filesystem is initialized. 2888 */ 2889 static int 2890 __trace_early_add_new_event(struct trace_event_call *call, 2891 struct trace_array *tr) 2892 { 2893 struct trace_event_file *file; 2894 int ret; 2895 2896 file = trace_create_new_event(call, tr); 2897 if (!file) 2898 return -ENOMEM; 2899 2900 ret = event_define_fields(call); 2901 if (ret) 2902 return ret; 2903 2904 trace_early_triggers(file, trace_event_name(call)); 2905 2906 return 0; 2907 } 2908 2909 struct ftrace_module_file_ops; 2910 static void __add_event_to_tracers(struct trace_event_call *call); 2911 2912 /* Add an additional event_call dynamically */ 2913 int trace_add_event_call(struct trace_event_call *call) 2914 { 2915 int ret; 2916 lockdep_assert_held(&event_mutex); 2917 2918 mutex_lock(&trace_types_lock); 2919 2920 ret = __register_event(call, NULL); 2921 if (ret >= 0) 2922 __add_event_to_tracers(call); 2923 2924 mutex_unlock(&trace_types_lock); 2925 return ret; 2926 } 2927 EXPORT_SYMBOL_GPL(trace_add_event_call); 2928 2929 /* 2930 * Must be called under locking of trace_types_lock, event_mutex and 2931 * trace_event_sem. 2932 */ 2933 static void __trace_remove_event_call(struct trace_event_call *call) 2934 { 2935 event_remove(call); 2936 trace_destroy_fields(call); 2937 free_event_filter(call->filter); 2938 call->filter = NULL; 2939 } 2940 2941 static int probe_remove_event_call(struct trace_event_call *call) 2942 { 2943 struct trace_array *tr; 2944 struct trace_event_file *file; 2945 2946 #ifdef CONFIG_PERF_EVENTS 2947 if (call->perf_refcount) 2948 return -EBUSY; 2949 #endif 2950 do_for_each_event_file(tr, file) { 2951 if (file->event_call != call) 2952 continue; 2953 /* 2954 * We can't rely on ftrace_event_enable_disable(enable => 0) 2955 * we are going to do, EVENT_FILE_FL_SOFT_MODE can suppress 2956 * TRACE_REG_UNREGISTER. 2957 */ 2958 if (file->flags & EVENT_FILE_FL_ENABLED) 2959 goto busy; 2960 2961 if (file->flags & EVENT_FILE_FL_WAS_ENABLED) 2962 tr->clear_trace = true; 2963 /* 2964 * The do_for_each_event_file_safe() is 2965 * a double loop. After finding the call for this 2966 * trace_array, we use break to jump to the next 2967 * trace_array. 2968 */ 2969 break; 2970 } while_for_each_event_file(); 2971 2972 __trace_remove_event_call(call); 2973 2974 return 0; 2975 busy: 2976 /* No need to clear the trace now */ 2977 list_for_each_entry(tr, &ftrace_trace_arrays, list) { 2978 tr->clear_trace = false; 2979 } 2980 return -EBUSY; 2981 } 2982 2983 /* Remove an event_call */ 2984 int trace_remove_event_call(struct trace_event_call *call) 2985 { 2986 int ret; 2987 2988 lockdep_assert_held(&event_mutex); 2989 2990 mutex_lock(&trace_types_lock); 2991 down_write(&trace_event_sem); 2992 ret = probe_remove_event_call(call); 2993 up_write(&trace_event_sem); 2994 mutex_unlock(&trace_types_lock); 2995 2996 return ret; 2997 } 2998 EXPORT_SYMBOL_GPL(trace_remove_event_call); 2999 3000 #define for_each_event(event, start, end) \ 3001 for (event = start; \ 3002 (unsigned long)event < (unsigned long)end; \ 3003 event++) 3004 3005 #ifdef CONFIG_MODULES 3006 3007 static void trace_module_add_events(struct module *mod) 3008 { 3009 struct trace_event_call **call, **start, **end; 3010 3011 if (!mod->num_trace_events) 3012 return; 3013 3014 /* Don't add infrastructure for mods without tracepoints */ 3015 if (trace_module_has_bad_taint(mod)) { 3016 pr_err("%s: module has bad taint, not creating trace events\n", 3017 mod->name); 3018 return; 3019 } 3020 3021 start = mod->trace_events; 3022 end = mod->trace_events + mod->num_trace_events; 3023 3024 for_each_event(call, start, end) { 3025 __register_event(*call, mod); 3026 __add_event_to_tracers(*call); 3027 } 3028 } 3029 3030 static void trace_module_remove_events(struct module *mod) 3031 { 3032 struct trace_event_call *call, *p; 3033 struct module_string *modstr, *m; 3034 3035 down_write(&trace_event_sem); 3036 list_for_each_entry_safe(call, p, &ftrace_events, list) { 3037 if ((call->flags & TRACE_EVENT_FL_DYNAMIC) || !call->module) 3038 continue; 3039 if (call->module == mod) 3040 __trace_remove_event_call(call); 3041 } 3042 /* Check for any strings allocade for this module */ 3043 list_for_each_entry_safe(modstr, m, &module_strings, next) { 3044 if (modstr->module != mod) 3045 continue; 3046 list_del(&modstr->next); 3047 kfree(modstr->str); 3048 kfree(modstr); 3049 } 3050 up_write(&trace_event_sem); 3051 3052 /* 3053 * It is safest to reset the ring buffer if the module being unloaded 3054 * registered any events that were used. The only worry is if 3055 * a new module gets loaded, and takes on the same id as the events 3056 * of this module. When printing out the buffer, traced events left 3057 * over from this module may be passed to the new module events and 3058 * unexpected results may occur. 3059 */ 3060 tracing_reset_all_online_cpus_unlocked(); 3061 } 3062 3063 static int trace_module_notify(struct notifier_block *self, 3064 unsigned long val, void *data) 3065 { 3066 struct module *mod = data; 3067 3068 mutex_lock(&event_mutex); 3069 mutex_lock(&trace_types_lock); 3070 switch (val) { 3071 case MODULE_STATE_COMING: 3072 trace_module_add_events(mod); 3073 break; 3074 case MODULE_STATE_GOING: 3075 trace_module_remove_events(mod); 3076 break; 3077 } 3078 mutex_unlock(&trace_types_lock); 3079 mutex_unlock(&event_mutex); 3080 3081 return NOTIFY_OK; 3082 } 3083 3084 static struct notifier_block trace_module_nb = { 3085 .notifier_call = trace_module_notify, 3086 .priority = 1, /* higher than trace.c module notify */ 3087 }; 3088 #endif /* CONFIG_MODULES */ 3089 3090 /* Create a new event directory structure for a trace directory. */ 3091 static void 3092 __trace_add_event_dirs(struct trace_array *tr) 3093 { 3094 struct trace_event_call *call; 3095 int ret; 3096 3097 list_for_each_entry(call, &ftrace_events, list) { 3098 ret = __trace_add_new_event(call, tr); 3099 if (ret < 0) 3100 pr_warn("Could not create directory for event %s\n", 3101 trace_event_name(call)); 3102 } 3103 } 3104 3105 /* Returns any file that matches the system and event */ 3106 struct trace_event_file * 3107 __find_event_file(struct trace_array *tr, const char *system, const char *event) 3108 { 3109 struct trace_event_file *file; 3110 struct trace_event_call *call; 3111 const char *name; 3112 3113 list_for_each_entry(file, &tr->events, list) { 3114 3115 call = file->event_call; 3116 name = trace_event_name(call); 3117 3118 if (!name || !call->class) 3119 continue; 3120 3121 if (strcmp(event, name) == 0 && 3122 strcmp(system, call->class->system) == 0) 3123 return file; 3124 } 3125 return NULL; 3126 } 3127 3128 /* Returns valid trace event files that match system and event */ 3129 struct trace_event_file * 3130 find_event_file(struct trace_array *tr, const char *system, const char *event) 3131 { 3132 struct trace_event_file *file; 3133 3134 file = __find_event_file(tr, system, event); 3135 if (!file || !file->event_call->class->reg || 3136 file->event_call->flags & TRACE_EVENT_FL_IGNORE_ENABLE) 3137 return NULL; 3138 3139 return file; 3140 } 3141 3142 /** 3143 * trace_get_event_file - Find and return a trace event file 3144 * @instance: The name of the trace instance containing the event 3145 * @system: The name of the system containing the event 3146 * @event: The name of the event 3147 * 3148 * Return a trace event file given the trace instance name, trace 3149 * system, and trace event name. If the instance name is NULL, it 3150 * refers to the top-level trace array. 3151 * 3152 * This function will look it up and return it if found, after calling 3153 * trace_array_get() to prevent the instance from going away, and 3154 * increment the event's module refcount to prevent it from being 3155 * removed. 3156 * 3157 * To release the file, call trace_put_event_file(), which will call 3158 * trace_array_put() and decrement the event's module refcount. 3159 * 3160 * Return: The trace event on success, ERR_PTR otherwise. 3161 */ 3162 struct trace_event_file *trace_get_event_file(const char *instance, 3163 const char *system, 3164 const char *event) 3165 { 3166 struct trace_array *tr = top_trace_array(); 3167 struct trace_event_file *file = NULL; 3168 int ret = -EINVAL; 3169 3170 if (instance) { 3171 tr = trace_array_find_get(instance); 3172 if (!tr) 3173 return ERR_PTR(-ENOENT); 3174 } else { 3175 ret = trace_array_get(tr); 3176 if (ret) 3177 return ERR_PTR(ret); 3178 } 3179 3180 mutex_lock(&event_mutex); 3181 3182 file = find_event_file(tr, system, event); 3183 if (!file) { 3184 trace_array_put(tr); 3185 ret = -EINVAL; 3186 goto out; 3187 } 3188 3189 /* Don't let event modules unload while in use */ 3190 ret = trace_event_try_get_ref(file->event_call); 3191 if (!ret) { 3192 trace_array_put(tr); 3193 ret = -EBUSY; 3194 goto out; 3195 } 3196 3197 ret = 0; 3198 out: 3199 mutex_unlock(&event_mutex); 3200 3201 if (ret) 3202 file = ERR_PTR(ret); 3203 3204 return file; 3205 } 3206 EXPORT_SYMBOL_GPL(trace_get_event_file); 3207 3208 /** 3209 * trace_put_event_file - Release a file from trace_get_event_file() 3210 * @file: The trace event file 3211 * 3212 * If a file was retrieved using trace_get_event_file(), this should 3213 * be called when it's no longer needed. It will cancel the previous 3214 * trace_array_get() called by that function, and decrement the 3215 * event's module refcount. 3216 */ 3217 void trace_put_event_file(struct trace_event_file *file) 3218 { 3219 mutex_lock(&event_mutex); 3220 trace_event_put_ref(file->event_call); 3221 mutex_unlock(&event_mutex); 3222 3223 trace_array_put(file->tr); 3224 } 3225 EXPORT_SYMBOL_GPL(trace_put_event_file); 3226 3227 #ifdef CONFIG_DYNAMIC_FTRACE 3228 3229 /* Avoid typos */ 3230 #define ENABLE_EVENT_STR "enable_event" 3231 #define DISABLE_EVENT_STR "disable_event" 3232 3233 struct event_probe_data { 3234 struct trace_event_file *file; 3235 unsigned long count; 3236 int ref; 3237 bool enable; 3238 }; 3239 3240 static void update_event_probe(struct event_probe_data *data) 3241 { 3242 if (data->enable) 3243 clear_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &data->file->flags); 3244 else 3245 set_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &data->file->flags); 3246 } 3247 3248 static void 3249 event_enable_probe(unsigned long ip, unsigned long parent_ip, 3250 struct trace_array *tr, struct ftrace_probe_ops *ops, 3251 void *data) 3252 { 3253 struct ftrace_func_mapper *mapper = data; 3254 struct event_probe_data *edata; 3255 void **pdata; 3256 3257 pdata = ftrace_func_mapper_find_ip(mapper, ip); 3258 if (!pdata || !*pdata) 3259 return; 3260 3261 edata = *pdata; 3262 update_event_probe(edata); 3263 } 3264 3265 static void 3266 event_enable_count_probe(unsigned long ip, unsigned long parent_ip, 3267 struct trace_array *tr, struct ftrace_probe_ops *ops, 3268 void *data) 3269 { 3270 struct ftrace_func_mapper *mapper = data; 3271 struct event_probe_data *edata; 3272 void **pdata; 3273 3274 pdata = ftrace_func_mapper_find_ip(mapper, ip); 3275 if (!pdata || !*pdata) 3276 return; 3277 3278 edata = *pdata; 3279 3280 if (!edata->count) 3281 return; 3282 3283 /* Skip if the event is in a state we want to switch to */ 3284 if (edata->enable == !(edata->file->flags & EVENT_FILE_FL_SOFT_DISABLED)) 3285 return; 3286 3287 if (edata->count != -1) 3288 (edata->count)--; 3289 3290 update_event_probe(edata); 3291 } 3292 3293 static int 3294 event_enable_print(struct seq_file *m, unsigned long ip, 3295 struct ftrace_probe_ops *ops, void *data) 3296 { 3297 struct ftrace_func_mapper *mapper = data; 3298 struct event_probe_data *edata; 3299 void **pdata; 3300 3301 pdata = ftrace_func_mapper_find_ip(mapper, ip); 3302 3303 if (WARN_ON_ONCE(!pdata || !*pdata)) 3304 return 0; 3305 3306 edata = *pdata; 3307 3308 seq_printf(m, "%ps:", (void *)ip); 3309 3310 seq_printf(m, "%s:%s:%s", 3311 edata->enable ? ENABLE_EVENT_STR : DISABLE_EVENT_STR, 3312 edata->file->event_call->class->system, 3313 trace_event_name(edata->file->event_call)); 3314 3315 if (edata->count == -1) 3316 seq_puts(m, ":unlimited\n"); 3317 else 3318 seq_printf(m, ":count=%ld\n", edata->count); 3319 3320 return 0; 3321 } 3322 3323 static int 3324 event_enable_init(struct ftrace_probe_ops *ops, struct trace_array *tr, 3325 unsigned long ip, void *init_data, void **data) 3326 { 3327 struct ftrace_func_mapper *mapper = *data; 3328 struct event_probe_data *edata = init_data; 3329 int ret; 3330 3331 if (!mapper) { 3332 mapper = allocate_ftrace_func_mapper(); 3333 if (!mapper) 3334 return -ENODEV; 3335 *data = mapper; 3336 } 3337 3338 ret = ftrace_func_mapper_add_ip(mapper, ip, edata); 3339 if (ret < 0) 3340 return ret; 3341 3342 edata->ref++; 3343 3344 return 0; 3345 } 3346 3347 static int free_probe_data(void *data) 3348 { 3349 struct event_probe_data *edata = data; 3350 3351 edata->ref--; 3352 if (!edata->ref) { 3353 /* Remove the SOFT_MODE flag */ 3354 __ftrace_event_enable_disable(edata->file, 0, 1); 3355 trace_event_put_ref(edata->file->event_call); 3356 kfree(edata); 3357 } 3358 return 0; 3359 } 3360 3361 static void 3362 event_enable_free(struct ftrace_probe_ops *ops, struct trace_array *tr, 3363 unsigned long ip, void *data) 3364 { 3365 struct ftrace_func_mapper *mapper = data; 3366 struct event_probe_data *edata; 3367 3368 if (!ip) { 3369 if (!mapper) 3370 return; 3371 free_ftrace_func_mapper(mapper, free_probe_data); 3372 return; 3373 } 3374 3375 edata = ftrace_func_mapper_remove_ip(mapper, ip); 3376 3377 if (WARN_ON_ONCE(!edata)) 3378 return; 3379 3380 if (WARN_ON_ONCE(edata->ref <= 0)) 3381 return; 3382 3383 free_probe_data(edata); 3384 } 3385 3386 static struct ftrace_probe_ops event_enable_probe_ops = { 3387 .func = event_enable_probe, 3388 .print = event_enable_print, 3389 .init = event_enable_init, 3390 .free = event_enable_free, 3391 }; 3392 3393 static struct ftrace_probe_ops event_enable_count_probe_ops = { 3394 .func = event_enable_count_probe, 3395 .print = event_enable_print, 3396 .init = event_enable_init, 3397 .free = event_enable_free, 3398 }; 3399 3400 static struct ftrace_probe_ops event_disable_probe_ops = { 3401 .func = event_enable_probe, 3402 .print = event_enable_print, 3403 .init = event_enable_init, 3404 .free = event_enable_free, 3405 }; 3406 3407 static struct ftrace_probe_ops event_disable_count_probe_ops = { 3408 .func = event_enable_count_probe, 3409 .print = event_enable_print, 3410 .init = event_enable_init, 3411 .free = event_enable_free, 3412 }; 3413 3414 static int 3415 event_enable_func(struct trace_array *tr, struct ftrace_hash *hash, 3416 char *glob, char *cmd, char *param, int enabled) 3417 { 3418 struct trace_event_file *file; 3419 struct ftrace_probe_ops *ops; 3420 struct event_probe_data *data; 3421 const char *system; 3422 const char *event; 3423 char *number; 3424 bool enable; 3425 int ret; 3426 3427 if (!tr) 3428 return -ENODEV; 3429 3430 /* hash funcs only work with set_ftrace_filter */ 3431 if (!enabled || !param) 3432 return -EINVAL; 3433 3434 system = strsep(¶m, ":"); 3435 if (!param) 3436 return -EINVAL; 3437 3438 event = strsep(¶m, ":"); 3439 3440 mutex_lock(&event_mutex); 3441 3442 ret = -EINVAL; 3443 file = find_event_file(tr, system, event); 3444 if (!file) 3445 goto out; 3446 3447 enable = strcmp(cmd, ENABLE_EVENT_STR) == 0; 3448 3449 if (enable) 3450 ops = param ? &event_enable_count_probe_ops : &event_enable_probe_ops; 3451 else 3452 ops = param ? &event_disable_count_probe_ops : &event_disable_probe_ops; 3453 3454 if (glob[0] == '!') { 3455 ret = unregister_ftrace_function_probe_func(glob+1, tr, ops); 3456 goto out; 3457 } 3458 3459 ret = -ENOMEM; 3460 3461 data = kzalloc(sizeof(*data), GFP_KERNEL); 3462 if (!data) 3463 goto out; 3464 3465 data->enable = enable; 3466 data->count = -1; 3467 data->file = file; 3468 3469 if (!param) 3470 goto out_reg; 3471 3472 number = strsep(¶m, ":"); 3473 3474 ret = -EINVAL; 3475 if (!strlen(number)) 3476 goto out_free; 3477 3478 /* 3479 * We use the callback data field (which is a pointer) 3480 * as our counter. 3481 */ 3482 ret = kstrtoul(number, 0, &data->count); 3483 if (ret) 3484 goto out_free; 3485 3486 out_reg: 3487 /* Don't let event modules unload while probe registered */ 3488 ret = trace_event_try_get_ref(file->event_call); 3489 if (!ret) { 3490 ret = -EBUSY; 3491 goto out_free; 3492 } 3493 3494 ret = __ftrace_event_enable_disable(file, 1, 1); 3495 if (ret < 0) 3496 goto out_put; 3497 3498 ret = register_ftrace_function_probe(glob, tr, ops, data); 3499 /* 3500 * The above returns on success the # of functions enabled, 3501 * but if it didn't find any functions it returns zero. 3502 * Consider no functions a failure too. 3503 */ 3504 if (!ret) { 3505 ret = -ENOENT; 3506 goto out_disable; 3507 } else if (ret < 0) 3508 goto out_disable; 3509 /* Just return zero, not the number of enabled functions */ 3510 ret = 0; 3511 out: 3512 mutex_unlock(&event_mutex); 3513 return ret; 3514 3515 out_disable: 3516 __ftrace_event_enable_disable(file, 0, 1); 3517 out_put: 3518 trace_event_put_ref(file->event_call); 3519 out_free: 3520 kfree(data); 3521 goto out; 3522 } 3523 3524 static struct ftrace_func_command event_enable_cmd = { 3525 .name = ENABLE_EVENT_STR, 3526 .func = event_enable_func, 3527 }; 3528 3529 static struct ftrace_func_command event_disable_cmd = { 3530 .name = DISABLE_EVENT_STR, 3531 .func = event_enable_func, 3532 }; 3533 3534 static __init int register_event_cmds(void) 3535 { 3536 int ret; 3537 3538 ret = register_ftrace_command(&event_enable_cmd); 3539 if (WARN_ON(ret < 0)) 3540 return ret; 3541 ret = register_ftrace_command(&event_disable_cmd); 3542 if (WARN_ON(ret < 0)) 3543 unregister_ftrace_command(&event_enable_cmd); 3544 return ret; 3545 } 3546 #else 3547 static inline int register_event_cmds(void) { return 0; } 3548 #endif /* CONFIG_DYNAMIC_FTRACE */ 3549 3550 /* 3551 * The top level array and trace arrays created by boot-time tracing 3552 * have already had its trace_event_file descriptors created in order 3553 * to allow for early events to be recorded. 3554 * This function is called after the tracefs has been initialized, 3555 * and we now have to create the files associated to the events. 3556 */ 3557 static void __trace_early_add_event_dirs(struct trace_array *tr) 3558 { 3559 struct trace_event_file *file; 3560 int ret; 3561 3562 3563 list_for_each_entry(file, &tr->events, list) { 3564 ret = event_create_dir(tr->event_dir, file); 3565 if (ret < 0) 3566 pr_warn("Could not create directory for event %s\n", 3567 trace_event_name(file->event_call)); 3568 } 3569 } 3570 3571 /* 3572 * For early boot up, the top trace array and the trace arrays created 3573 * by boot-time tracing require to have a list of events that can be 3574 * enabled. This must be done before the filesystem is set up in order 3575 * to allow events to be traced early. 3576 */ 3577 void __trace_early_add_events(struct trace_array *tr) 3578 { 3579 struct trace_event_call *call; 3580 int ret; 3581 3582 list_for_each_entry(call, &ftrace_events, list) { 3583 /* Early boot up should not have any modules loaded */ 3584 if (!(call->flags & TRACE_EVENT_FL_DYNAMIC) && 3585 WARN_ON_ONCE(call->module)) 3586 continue; 3587 3588 ret = __trace_early_add_new_event(call, tr); 3589 if (ret < 0) 3590 pr_warn("Could not create early event %s\n", 3591 trace_event_name(call)); 3592 } 3593 } 3594 3595 /* Remove the event directory structure for a trace directory. */ 3596 static void 3597 __trace_remove_event_dirs(struct trace_array *tr) 3598 { 3599 struct trace_event_file *file, *next; 3600 3601 list_for_each_entry_safe(file, next, &tr->events, list) 3602 remove_event_file_dir(file); 3603 } 3604 3605 static void __add_event_to_tracers(struct trace_event_call *call) 3606 { 3607 struct trace_array *tr; 3608 3609 list_for_each_entry(tr, &ftrace_trace_arrays, list) 3610 __trace_add_new_event(call, tr); 3611 } 3612 3613 extern struct trace_event_call *__start_ftrace_events[]; 3614 extern struct trace_event_call *__stop_ftrace_events[]; 3615 3616 static char bootup_event_buf[COMMAND_LINE_SIZE] __initdata; 3617 3618 static __init int setup_trace_event(char *str) 3619 { 3620 strscpy(bootup_event_buf, str, COMMAND_LINE_SIZE); 3621 ring_buffer_expanded = true; 3622 disable_tracing_selftest("running event tracing"); 3623 3624 return 1; 3625 } 3626 __setup("trace_event=", setup_trace_event); 3627 3628 /* Expects to have event_mutex held when called */ 3629 static int 3630 create_event_toplevel_files(struct dentry *parent, struct trace_array *tr) 3631 { 3632 struct dentry *d_events; 3633 struct dentry *entry; 3634 3635 entry = trace_create_file("set_event", TRACE_MODE_WRITE, parent, 3636 tr, &ftrace_set_event_fops); 3637 if (!entry) 3638 return -ENOMEM; 3639 3640 d_events = tracefs_create_dir("events", parent); 3641 if (!d_events) { 3642 pr_warn("Could not create tracefs 'events' directory\n"); 3643 return -ENOMEM; 3644 } 3645 3646 entry = trace_create_file("enable", TRACE_MODE_WRITE, d_events, 3647 tr, &ftrace_tr_enable_fops); 3648 if (!entry) 3649 return -ENOMEM; 3650 3651 /* There are not as crucial, just warn if they are not created */ 3652 3653 trace_create_file("set_event_pid", TRACE_MODE_WRITE, parent, 3654 tr, &ftrace_set_event_pid_fops); 3655 3656 trace_create_file("set_event_notrace_pid", 3657 TRACE_MODE_WRITE, parent, tr, 3658 &ftrace_set_event_notrace_pid_fops); 3659 3660 /* ring buffer internal formats */ 3661 trace_create_file("header_page", TRACE_MODE_READ, d_events, 3662 ring_buffer_print_page_header, 3663 &ftrace_show_header_fops); 3664 3665 trace_create_file("header_event", TRACE_MODE_READ, d_events, 3666 ring_buffer_print_entry_header, 3667 &ftrace_show_header_fops); 3668 3669 tr->event_dir = d_events; 3670 3671 return 0; 3672 } 3673 3674 /** 3675 * event_trace_add_tracer - add a instance of a trace_array to events 3676 * @parent: The parent dentry to place the files/directories for events in 3677 * @tr: The trace array associated with these events 3678 * 3679 * When a new instance is created, it needs to set up its events 3680 * directory, as well as other files associated with events. It also 3681 * creates the event hierarchy in the @parent/events directory. 3682 * 3683 * Returns 0 on success. 3684 * 3685 * Must be called with event_mutex held. 3686 */ 3687 int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr) 3688 { 3689 int ret; 3690 3691 lockdep_assert_held(&event_mutex); 3692 3693 ret = create_event_toplevel_files(parent, tr); 3694 if (ret) 3695 goto out; 3696 3697 down_write(&trace_event_sem); 3698 /* If tr already has the event list, it is initialized in early boot. */ 3699 if (unlikely(!list_empty(&tr->events))) 3700 __trace_early_add_event_dirs(tr); 3701 else 3702 __trace_add_event_dirs(tr); 3703 up_write(&trace_event_sem); 3704 3705 out: 3706 return ret; 3707 } 3708 3709 /* 3710 * The top trace array already had its file descriptors created. 3711 * Now the files themselves need to be created. 3712 */ 3713 static __init int 3714 early_event_add_tracer(struct dentry *parent, struct trace_array *tr) 3715 { 3716 int ret; 3717 3718 mutex_lock(&event_mutex); 3719 3720 ret = create_event_toplevel_files(parent, tr); 3721 if (ret) 3722 goto out_unlock; 3723 3724 down_write(&trace_event_sem); 3725 __trace_early_add_event_dirs(tr); 3726 up_write(&trace_event_sem); 3727 3728 out_unlock: 3729 mutex_unlock(&event_mutex); 3730 3731 return ret; 3732 } 3733 3734 /* Must be called with event_mutex held */ 3735 int event_trace_del_tracer(struct trace_array *tr) 3736 { 3737 lockdep_assert_held(&event_mutex); 3738 3739 /* Disable any event triggers and associated soft-disabled events */ 3740 clear_event_triggers(tr); 3741 3742 /* Clear the pid list */ 3743 __ftrace_clear_event_pids(tr, TRACE_PIDS | TRACE_NO_PIDS); 3744 3745 /* Disable any running events */ 3746 __ftrace_set_clr_event_nolock(tr, NULL, NULL, NULL, 0); 3747 3748 /* Make sure no more events are being executed */ 3749 tracepoint_synchronize_unregister(); 3750 3751 down_write(&trace_event_sem); 3752 __trace_remove_event_dirs(tr); 3753 tracefs_remove(tr->event_dir); 3754 up_write(&trace_event_sem); 3755 3756 tr->event_dir = NULL; 3757 3758 return 0; 3759 } 3760 3761 static __init int event_trace_memsetup(void) 3762 { 3763 field_cachep = KMEM_CACHE(ftrace_event_field, SLAB_PANIC); 3764 file_cachep = KMEM_CACHE(trace_event_file, SLAB_PANIC); 3765 return 0; 3766 } 3767 3768 __init void 3769 early_enable_events(struct trace_array *tr, char *buf, bool disable_first) 3770 { 3771 char *token; 3772 int ret; 3773 3774 while (true) { 3775 token = strsep(&buf, ","); 3776 3777 if (!token) 3778 break; 3779 3780 if (*token) { 3781 /* Restarting syscalls requires that we stop them first */ 3782 if (disable_first) 3783 ftrace_set_clr_event(tr, token, 0); 3784 3785 ret = ftrace_set_clr_event(tr, token, 1); 3786 if (ret) 3787 pr_warn("Failed to enable trace event: %s\n", token); 3788 } 3789 3790 /* Put back the comma to allow this to be called again */ 3791 if (buf) 3792 *(buf - 1) = ','; 3793 } 3794 } 3795 3796 static __init int event_trace_enable(void) 3797 { 3798 struct trace_array *tr = top_trace_array(); 3799 struct trace_event_call **iter, *call; 3800 int ret; 3801 3802 if (!tr) 3803 return -ENODEV; 3804 3805 for_each_event(iter, __start_ftrace_events, __stop_ftrace_events) { 3806 3807 call = *iter; 3808 ret = event_init(call); 3809 if (!ret) 3810 list_add(&call->list, &ftrace_events); 3811 } 3812 3813 register_trigger_cmds(); 3814 3815 /* 3816 * We need the top trace array to have a working set of trace 3817 * points at early init, before the debug files and directories 3818 * are created. Create the file entries now, and attach them 3819 * to the actual file dentries later. 3820 */ 3821 __trace_early_add_events(tr); 3822 3823 early_enable_events(tr, bootup_event_buf, false); 3824 3825 trace_printk_start_comm(); 3826 3827 register_event_cmds(); 3828 3829 3830 return 0; 3831 } 3832 3833 /* 3834 * event_trace_enable() is called from trace_event_init() first to 3835 * initialize events and perhaps start any events that are on the 3836 * command line. Unfortunately, there are some events that will not 3837 * start this early, like the system call tracepoints that need 3838 * to set the %SYSCALL_WORK_SYSCALL_TRACEPOINT flag of pid 1. But 3839 * event_trace_enable() is called before pid 1 starts, and this flag 3840 * is never set, making the syscall tracepoint never get reached, but 3841 * the event is enabled regardless (and not doing anything). 3842 */ 3843 static __init int event_trace_enable_again(void) 3844 { 3845 struct trace_array *tr; 3846 3847 tr = top_trace_array(); 3848 if (!tr) 3849 return -ENODEV; 3850 3851 early_enable_events(tr, bootup_event_buf, true); 3852 3853 return 0; 3854 } 3855 3856 early_initcall(event_trace_enable_again); 3857 3858 /* Init fields which doesn't related to the tracefs */ 3859 static __init int event_trace_init_fields(void) 3860 { 3861 if (trace_define_generic_fields()) 3862 pr_warn("tracing: Failed to allocated generic fields"); 3863 3864 if (trace_define_common_fields()) 3865 pr_warn("tracing: Failed to allocate common fields"); 3866 3867 return 0; 3868 } 3869 3870 __init int event_trace_init(void) 3871 { 3872 struct trace_array *tr; 3873 int ret; 3874 3875 tr = top_trace_array(); 3876 if (!tr) 3877 return -ENODEV; 3878 3879 trace_create_file("available_events", TRACE_MODE_READ, 3880 NULL, tr, &ftrace_avail_fops); 3881 3882 ret = early_event_add_tracer(NULL, tr); 3883 if (ret) 3884 return ret; 3885 3886 #ifdef CONFIG_MODULES 3887 ret = register_module_notifier(&trace_module_nb); 3888 if (ret) 3889 pr_warn("Failed to register trace events module notifier\n"); 3890 #endif 3891 3892 eventdir_initialized = true; 3893 3894 return 0; 3895 } 3896 3897 void __init trace_event_init(void) 3898 { 3899 event_trace_memsetup(); 3900 init_ftrace_syscalls(); 3901 event_trace_enable(); 3902 event_trace_init_fields(); 3903 } 3904 3905 #ifdef CONFIG_EVENT_TRACE_STARTUP_TEST 3906 3907 static DEFINE_SPINLOCK(test_spinlock); 3908 static DEFINE_SPINLOCK(test_spinlock_irq); 3909 static DEFINE_MUTEX(test_mutex); 3910 3911 static __init void test_work(struct work_struct *dummy) 3912 { 3913 spin_lock(&test_spinlock); 3914 spin_lock_irq(&test_spinlock_irq); 3915 udelay(1); 3916 spin_unlock_irq(&test_spinlock_irq); 3917 spin_unlock(&test_spinlock); 3918 3919 mutex_lock(&test_mutex); 3920 msleep(1); 3921 mutex_unlock(&test_mutex); 3922 } 3923 3924 static __init int event_test_thread(void *unused) 3925 { 3926 void *test_malloc; 3927 3928 test_malloc = kmalloc(1234, GFP_KERNEL); 3929 if (!test_malloc) 3930 pr_info("failed to kmalloc\n"); 3931 3932 schedule_on_each_cpu(test_work); 3933 3934 kfree(test_malloc); 3935 3936 set_current_state(TASK_INTERRUPTIBLE); 3937 while (!kthread_should_stop()) { 3938 schedule(); 3939 set_current_state(TASK_INTERRUPTIBLE); 3940 } 3941 __set_current_state(TASK_RUNNING); 3942 3943 return 0; 3944 } 3945 3946 /* 3947 * Do various things that may trigger events. 3948 */ 3949 static __init void event_test_stuff(void) 3950 { 3951 struct task_struct *test_thread; 3952 3953 test_thread = kthread_run(event_test_thread, NULL, "test-events"); 3954 msleep(1); 3955 kthread_stop(test_thread); 3956 } 3957 3958 /* 3959 * For every trace event defined, we will test each trace point separately, 3960 * and then by groups, and finally all trace points. 3961 */ 3962 static __init void event_trace_self_tests(void) 3963 { 3964 struct trace_subsystem_dir *dir; 3965 struct trace_event_file *file; 3966 struct trace_event_call *call; 3967 struct event_subsystem *system; 3968 struct trace_array *tr; 3969 int ret; 3970 3971 tr = top_trace_array(); 3972 if (!tr) 3973 return; 3974 3975 pr_info("Running tests on trace events:\n"); 3976 3977 list_for_each_entry(file, &tr->events, list) { 3978 3979 call = file->event_call; 3980 3981 /* Only test those that have a probe */ 3982 if (!call->class || !call->class->probe) 3983 continue; 3984 3985 /* 3986 * Testing syscall events here is pretty useless, but 3987 * we still do it if configured. But this is time consuming. 3988 * What we really need is a user thread to perform the 3989 * syscalls as we test. 3990 */ 3991 #ifndef CONFIG_EVENT_TRACE_TEST_SYSCALLS 3992 if (call->class->system && 3993 strcmp(call->class->system, "syscalls") == 0) 3994 continue; 3995 #endif 3996 3997 pr_info("Testing event %s: ", trace_event_name(call)); 3998 3999 /* 4000 * If an event is already enabled, someone is using 4001 * it and the self test should not be on. 4002 */ 4003 if (file->flags & EVENT_FILE_FL_ENABLED) { 4004 pr_warn("Enabled event during self test!\n"); 4005 WARN_ON_ONCE(1); 4006 continue; 4007 } 4008 4009 ftrace_event_enable_disable(file, 1); 4010 event_test_stuff(); 4011 ftrace_event_enable_disable(file, 0); 4012 4013 pr_cont("OK\n"); 4014 } 4015 4016 /* Now test at the sub system level */ 4017 4018 pr_info("Running tests on trace event systems:\n"); 4019 4020 list_for_each_entry(dir, &tr->systems, list) { 4021 4022 system = dir->subsystem; 4023 4024 /* the ftrace system is special, skip it */ 4025 if (strcmp(system->name, "ftrace") == 0) 4026 continue; 4027 4028 pr_info("Testing event system %s: ", system->name); 4029 4030 ret = __ftrace_set_clr_event(tr, NULL, system->name, NULL, 1); 4031 if (WARN_ON_ONCE(ret)) { 4032 pr_warn("error enabling system %s\n", 4033 system->name); 4034 continue; 4035 } 4036 4037 event_test_stuff(); 4038 4039 ret = __ftrace_set_clr_event(tr, NULL, system->name, NULL, 0); 4040 if (WARN_ON_ONCE(ret)) { 4041 pr_warn("error disabling system %s\n", 4042 system->name); 4043 continue; 4044 } 4045 4046 pr_cont("OK\n"); 4047 } 4048 4049 /* Test with all events enabled */ 4050 4051 pr_info("Running tests on all trace events:\n"); 4052 pr_info("Testing all events: "); 4053 4054 ret = __ftrace_set_clr_event(tr, NULL, NULL, NULL, 1); 4055 if (WARN_ON_ONCE(ret)) { 4056 pr_warn("error enabling all events\n"); 4057 return; 4058 } 4059 4060 event_test_stuff(); 4061 4062 /* reset sysname */ 4063 ret = __ftrace_set_clr_event(tr, NULL, NULL, NULL, 0); 4064 if (WARN_ON_ONCE(ret)) { 4065 pr_warn("error disabling all events\n"); 4066 return; 4067 } 4068 4069 pr_cont("OK\n"); 4070 } 4071 4072 #ifdef CONFIG_FUNCTION_TRACER 4073 4074 static DEFINE_PER_CPU(atomic_t, ftrace_test_event_disable); 4075 4076 static struct trace_event_file event_trace_file __initdata; 4077 4078 static void __init 4079 function_test_events_call(unsigned long ip, unsigned long parent_ip, 4080 struct ftrace_ops *op, struct ftrace_regs *regs) 4081 { 4082 struct trace_buffer *buffer; 4083 struct ring_buffer_event *event; 4084 struct ftrace_entry *entry; 4085 unsigned int trace_ctx; 4086 long disabled; 4087 int cpu; 4088 4089 trace_ctx = tracing_gen_ctx(); 4090 preempt_disable_notrace(); 4091 cpu = raw_smp_processor_id(); 4092 disabled = atomic_inc_return(&per_cpu(ftrace_test_event_disable, cpu)); 4093 4094 if (disabled != 1) 4095 goto out; 4096 4097 event = trace_event_buffer_lock_reserve(&buffer, &event_trace_file, 4098 TRACE_FN, sizeof(*entry), 4099 trace_ctx); 4100 if (!event) 4101 goto out; 4102 entry = ring_buffer_event_data(event); 4103 entry->ip = ip; 4104 entry->parent_ip = parent_ip; 4105 4106 event_trigger_unlock_commit(&event_trace_file, buffer, event, 4107 entry, trace_ctx); 4108 out: 4109 atomic_dec(&per_cpu(ftrace_test_event_disable, cpu)); 4110 preempt_enable_notrace(); 4111 } 4112 4113 static struct ftrace_ops trace_ops __initdata = 4114 { 4115 .func = function_test_events_call, 4116 }; 4117 4118 static __init void event_trace_self_test_with_function(void) 4119 { 4120 int ret; 4121 4122 event_trace_file.tr = top_trace_array(); 4123 if (WARN_ON(!event_trace_file.tr)) 4124 return; 4125 4126 ret = register_ftrace_function(&trace_ops); 4127 if (WARN_ON(ret < 0)) { 4128 pr_info("Failed to enable function tracer for event tests\n"); 4129 return; 4130 } 4131 pr_info("Running tests again, along with the function tracer\n"); 4132 event_trace_self_tests(); 4133 unregister_ftrace_function(&trace_ops); 4134 } 4135 #else 4136 static __init void event_trace_self_test_with_function(void) 4137 { 4138 } 4139 #endif 4140 4141 static __init int event_trace_self_tests_init(void) 4142 { 4143 if (!tracing_selftest_disabled) { 4144 event_trace_self_tests(); 4145 event_trace_self_test_with_function(); 4146 } 4147 4148 return 0; 4149 } 4150 4151 late_initcall(event_trace_self_tests_init); 4152 4153 #endif 4154